#ifndef _CTL_IO_H_
#define _CTL_IO_H_
#ifndef _KERNEL
#include <stdbool.h>
#endif
#include <sys/queue.h>
#include <cam/scsi/scsi_all.h>
#include <dev/nvme/nvme.h>
#define CTL_MAX_CDBLEN 32
#define CTL_TIME_IO
#ifdef CTL_TIME_IO
#define CTL_TIME_IO_DEFAULT_SECS 90
#endif
typedef enum {
CTL_STATUS_NONE,
CTL_SUCCESS,
CTL_CMD_TIMEOUT,
CTL_SEL_TIMEOUT,
CTL_ERROR,
CTL_SCSI_ERROR,
CTL_NVME_ERROR,
CTL_CMD_ABORTED,
CTL_STATUS_MASK = 0xfff,
CTL_AUTOSENSE = 0x1000
} ctl_io_status;
typedef enum {
CTL_FLAG_NONE = 0x00000000,
CTL_FLAG_DATA_IN = 0x00000001,
CTL_FLAG_DATA_OUT = 0x00000002,
CTL_FLAG_DATA_NONE = 0x00000003,
CTL_FLAG_DATA_MASK = 0x00000003,
CTL_FLAG_USER_TAG = 0x00000020,
CTL_FLAG_USER_REQ = 0x00000040,
CTL_FLAG_ALLOCATED = 0x00000100,
CTL_FLAG_ABORT_STATUS = 0x00000400,
CTL_FLAG_ABORT = 0x00000800,
CTL_FLAG_DMA_INPROG = 0x00001000,
CTL_FLAG_DELAY_DONE = 0x00004000,
CTL_FLAG_INT_COPY = 0x00008000,
CTL_FLAG_SENT_2OTHER_SC = 0x00010000,
CTL_FLAG_FROM_OTHER_SC = 0x00020000,
CTL_FLAG_IS_WAS_ON_RTR = 0x00040000,
CTL_FLAG_BUS_ADDR = 0x00080000,
CTL_FLAG_IO_CONT = 0x00100000,
#if 0
CTL_FLAG_ALREADY_DONE = 0x00200000,
#endif
CTL_FLAG_NO_DATAMOVE = 0x00400000,
CTL_FLAG_DMA_QUEUED = 0x00800000,
CTL_FLAG_STATUS_QUEUED = 0x01000000,
CTL_FLAG_FAILOVER = 0x04000000,
CTL_FLAG_IO_ACTIVE = 0x08000000,
CTL_FLAG_STATUS_SENT = 0x10000000,
CTL_FLAG_SERSEQ_DONE = 0x20000000
} ctl_io_flags;
struct ctl_lba_len {
uint64_t lba;
uint32_t len;
};
struct ctl_lba_len_flags {
uint64_t lba;
uint32_t len;
uint32_t flags;
#define CTL_LLF_FUA 0x04000000
#define CTL_LLF_DPO 0x08000000
#define CTL_LLF_READ 0x10000000
#define CTL_LLF_WRITE 0x20000000
#define CTL_LLF_VERIFY 0x40000000
#define CTL_LLF_COMPARE 0x80000000
};
struct ctl_ptr_len_flags {
uint8_t *ptr;
uint32_t len;
uint32_t flags;
};
union ctl_priv {
uint8_t bytes[sizeof(uint64_t) * 2];
uint64_t integer;
uint64_t integers[2];
void *ptr;
void *ptrs[2];
};
#define CTL_NUM_PRIV 6
#define CTL_PRIV_LUN 0
#define CTL_PRIV_LBA_LEN 1
#define CTL_PRIV_MODEPAGE 1
#define CTL_PRIV_BACKEND 2
#define CTL_PRIV_BACKEND_LUN 3
#define CTL_PRIV_FRONTEND 4
#define CTL_PRIV_FRONTEND2 5
#define CTL_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[0])
#define CTL_SOFTC(io) ((io)->io_hdr.ctl_private[CTL_PRIV_LUN].ptrs[1])
#define CTL_BACKEND_LUN(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptrs[0])
#define CTL_PORT(io) (((struct ctl_softc *)CTL_SOFTC(io))-> \
ctl_ports[(io)->io_hdr.nexus.targ_port])
#define CTL_RSGL(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[0])
#define CTL_LSGL(io) ((io)->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptrs[1])
#define CTL_RSGLT(io) ((struct ctl_sg_entry *)CTL_RSGL(io))
#define CTL_LSGLT(io) ((struct ctl_sg_entry *)CTL_LSGL(io))
#define CTL_INVALID_PORTNAME 0xFF
#define CTL_UNMAPPED_IID 0xFF
struct ctl_sg_entry {
void *addr;
size_t len;
};
typedef enum {
CTL_IO_NONE,
CTL_IO_SCSI,
CTL_IO_TASK,
CTL_IO_NVME,
CTL_IO_NVME_ADMIN,
} ctl_io_type;
struct ctl_nexus {
uint32_t initid;
uint32_t targ_port;
uint32_t targ_lun;
uint32_t targ_mapped_lun;
};
typedef enum {
CTL_MSG_SERIALIZE,
CTL_MSG_R2R,
CTL_MSG_FINISH_IO,
CTL_MSG_BAD_JUJU,
CTL_MSG_MANAGE_TASKS,
CTL_MSG_PERS_ACTION,
CTL_MSG_DATAMOVE,
CTL_MSG_DATAMOVE_DONE,
CTL_MSG_UA,
CTL_MSG_PORT_SYNC,
CTL_MSG_LUN_SYNC,
CTL_MSG_IID_SYNC,
CTL_MSG_LOGIN,
CTL_MSG_MODE_SYNC,
CTL_MSG_FAILOVER
} ctl_msg_type;
struct ctl_scsiio;
struct ctl_io_hdr {
uint32_t version;
ctl_io_type io_type;
ctl_msg_type msg_type;
struct ctl_nexus nexus;
uint32_t iid_indx;
uint32_t flags;
uint32_t status;
uint32_t port_status;
uint32_t timeout;
uint32_t retries;
#ifdef CTL_IO_DELAY
struct callout delay_callout;
#endif
#ifdef CTL_TIME_IO
time_t start_time;
struct bintime start_bt;
struct bintime dma_start_bt;
struct bintime dma_bt;
#endif
uint32_t num_dmas;
union ctl_io *remote_io;
union ctl_io *blocker;
void *pool;
union ctl_priv ctl_private[CTL_NUM_PRIV];
TAILQ_HEAD(, ctl_io_hdr) blocked_queue;
STAILQ_ENTRY(ctl_io_hdr) links;
LIST_ENTRY(ctl_io_hdr) ooa_links;
TAILQ_ENTRY(ctl_io_hdr) blocked_links;
};
typedef enum {
CTL_TAG_UNTAGGED,
CTL_TAG_SIMPLE,
CTL_TAG_ORDERED,
CTL_TAG_HEAD_OF_QUEUE,
CTL_TAG_ACA
} ctl_tag_type;
union ctl_io;
typedef void (*ctl_ref)(void *arg, int diff);
typedef int (*ctl_be_move_done_t)(union ctl_io *io, bool samethr);
typedef int (*ctl_io_cont)(union ctl_io *io);
struct ctl_scsiio {
struct ctl_io_hdr io_hdr;
uint32_t ext_sg_entries;
uint8_t *ext_data_ptr;
uint32_t ext_data_len;
uint32_t ext_data_filled;
uint32_t kern_sg_entries;
uint32_t rem_sg_entries;
uint8_t *kern_data_ptr;
uint32_t kern_data_len;
uint32_t kern_total_len;
uint32_t kern_data_resid;
uint32_t kern_rel_offset;
struct scsi_sense_data sense_data;
uint8_t sense_len;
uint8_t scsi_status;
uint8_t seridx;
uint8_t priority;
uint64_t tag_num;
ctl_tag_type tag_type;
uint8_t cdb_len;
uint8_t cdb[CTL_MAX_CDBLEN];
ctl_be_move_done_t be_move_done;
ctl_io_cont io_cont;
ctl_ref kern_data_ref;
void *kern_data_arg;
};
typedef enum {
CTL_TASK_ABORT_TASK,
CTL_TASK_ABORT_TASK_SET,
CTL_TASK_CLEAR_ACA,
CTL_TASK_CLEAR_TASK_SET,
CTL_TASK_I_T_NEXUS_RESET,
CTL_TASK_LUN_RESET,
CTL_TASK_TARGET_RESET,
CTL_TASK_BUS_RESET,
CTL_TASK_PORT_LOGIN,
CTL_TASK_PORT_LOGOUT,
CTL_TASK_QUERY_TASK,
CTL_TASK_QUERY_TASK_SET,
CTL_TASK_QUERY_ASYNC_EVENT
} ctl_task_type;
typedef enum {
CTL_TASK_FUNCTION_COMPLETE,
CTL_TASK_FUNCTION_SUCCEEDED,
CTL_TASK_FUNCTION_REJECTED,
CTL_TASK_LUN_DOES_NOT_EXIST,
CTL_TASK_FUNCTION_NOT_SUPPORTED
} ctl_task_status;
struct ctl_taskio {
struct ctl_io_hdr io_hdr;
ctl_task_type task_action;
uint64_t tag_num;
ctl_tag_type tag_type;
uint8_t task_status;
uint8_t task_resp[3];
};
struct ctl_nvmeio {
struct ctl_io_hdr io_hdr;
uint32_t ext_sg_entries;
uint8_t *ext_data_ptr;
uint32_t ext_data_len;
uint32_t ext_data_filled;
uint32_t kern_sg_entries;
uint8_t *kern_data_ptr;
uint32_t kern_data_len;
uint32_t kern_total_len;
uint32_t kern_data_resid;
uint32_t kern_rel_offset;
struct nvme_command cmd;
struct nvme_completion cpl;
bool success_sent;
ctl_be_move_done_t be_move_done;
ctl_io_cont io_cont;
ctl_ref kern_data_ref;
void *kern_data_arg;
};
#define CTL_HA_VERSION 4
struct ctl_ha_msg_login {
ctl_msg_type msg_type;
int version;
int ha_mode;
int ha_id;
int max_luns;
int max_ports;
int max_init_per_port;
};
typedef enum {
CTL_PR_REG_KEY,
CTL_PR_UNREG_KEY,
CTL_PR_PREEMPT,
CTL_PR_CLEAR,
CTL_PR_RESERVE,
CTL_PR_RELEASE
} ctl_pr_action;
struct ctl_pr_info {
ctl_pr_action action;
uint8_t sa_res_key[8];
uint8_t res_type;
uint32_t residx;
};
struct ctl_ha_msg_hdr {
ctl_msg_type msg_type;
uint32_t status;
union ctl_io *original_sc;
union ctl_io *serializing_sc;
struct ctl_nexus nexus;
};
#define CTL_HA_MAX_SG_ENTRIES 16
#define CTL_HA_DATAMOVE_SEGMENT 131072
struct ctl_ha_msg_pr {
struct ctl_ha_msg_hdr hdr;
struct ctl_pr_info pr_info;
};
struct ctl_ha_msg_ua {
struct ctl_ha_msg_hdr hdr;
int ua_all;
int ua_set;
int ua_type;
uint8_t ua_info[8];
};
struct ctl_ha_msg_dt {
struct ctl_ha_msg_hdr hdr;
ctl_io_flags flags;
uint32_t sg_sequence;
uint8_t sg_last;
uint32_t sent_sg_entries;
uint32_t cur_sg_entries;
uint32_t kern_sg_entries;
uint32_t kern_data_len;
uint32_t kern_total_len;
uint32_t kern_data_resid;
uint32_t kern_rel_offset;
struct ctl_sg_entry sg_list[CTL_HA_MAX_SG_ENTRIES];
};
struct ctl_ha_msg_scsi {
struct ctl_ha_msg_hdr hdr;
uint64_t tag_num;
ctl_tag_type tag_type;
uint8_t cdb[CTL_MAX_CDBLEN];
uint8_t cdb_len;
uint8_t scsi_status;
uint8_t sense_len;
uint8_t priority;
uint32_t port_status;
uint32_t kern_data_resid;
struct scsi_sense_data sense_data;
};
struct ctl_ha_msg_task {
struct ctl_ha_msg_hdr hdr;
ctl_task_type task_action;
uint64_t tag_num;
ctl_tag_type tag_type;
};
struct ctl_ha_msg_port {
struct ctl_ha_msg_hdr hdr;
int port_type;
int physical_port;
int virtual_port;
int status;
int name_len;
int lun_map_len;
int port_devid_len;
int target_devid_len;
int init_devid_len;
uint8_t data[];
};
struct ctl_ha_msg_lun {
struct ctl_ha_msg_hdr hdr;
int flags;
unsigned int pr_generation;
uint32_t pr_res_idx;
uint8_t pr_res_type;
int lun_devid_len;
int pr_key_count;
uint8_t data[];
};
struct ctl_ha_msg_lun_pr_key {
uint32_t pr_iid;
uint64_t pr_key;
};
struct ctl_ha_msg_iid {
struct ctl_ha_msg_hdr hdr;
int in_use;
int name_len;
uint64_t wwpn;
uint8_t data[];
};
struct ctl_ha_msg_mode {
struct ctl_ha_msg_hdr hdr;
uint8_t page_code;
uint8_t subpage;
uint16_t page_len;
uint8_t data[];
};
union ctl_ha_msg {
struct ctl_ha_msg_hdr hdr;
struct ctl_ha_msg_task task;
struct ctl_ha_msg_scsi scsi;
struct ctl_ha_msg_dt dt;
struct ctl_ha_msg_pr pr;
struct ctl_ha_msg_ua ua;
struct ctl_ha_msg_port port;
struct ctl_ha_msg_lun lun;
struct ctl_ha_msg_iid iid;
struct ctl_ha_msg_login login;
struct ctl_ha_msg_mode mode;
};
struct ctl_prio {
struct ctl_io_hdr io_hdr;
struct ctl_ha_msg_pr pr_msg;
};
union ctl_io {
struct ctl_io_hdr io_hdr;
struct ctl_scsiio scsiio;
struct ctl_taskio taskio;
struct ctl_nvmeio nvmeio;
struct ctl_prio presio;
};
#ifdef _KERNEL
#define _CTL_IO_ASSERT_1(io, _1) \
KASSERT((io)->io_hdr.io_type == CTL_IO_##_1, \
("%s: unexpected I/O type %x", __func__, (io)->io_hdr.io_type))
#define _CTL_IO_ASSERT_2(io, _1, _2) \
KASSERT((io)->io_hdr.io_type == CTL_IO_##_1 || \
(io)->io_hdr.io_type == CTL_IO_##_2, \
("%s: unexpected I/O type %x", __func__, (io)->io_hdr.io_type))
#define _CTL_IO_ASSERT_MACRO(io, _1, _2, NAME, ...) \
NAME
#define CTL_IO_ASSERT(...) \
_CTL_IO_ASSERT_MACRO(__VA_ARGS__, _CTL_IO_ASSERT_2, \
_CTL_IO_ASSERT_1)(__VA_ARGS__)
static __inline uint32_t
ctl_kern_sg_entries(union ctl_io *io)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
return (io->scsiio.kern_sg_entries);
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
return (io->nvmeio.kern_sg_entries);
default:
__assert_unreachable();
}
}
static __inline uint8_t *
ctl_kern_data_ptr(union ctl_io *io)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
return (io->scsiio.kern_data_ptr);
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
return (io->nvmeio.kern_data_ptr);
default:
__assert_unreachable();
}
}
static __inline uint32_t
ctl_kern_data_len(union ctl_io *io)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
return (io->scsiio.kern_data_len);
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
return (io->nvmeio.kern_data_len);
default:
__assert_unreachable();
}
}
static __inline uint32_t
ctl_kern_total_len(union ctl_io *io)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
return (io->scsiio.kern_total_len);
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
return (io->nvmeio.kern_total_len);
default:
__assert_unreachable();
}
}
static __inline uint32_t
ctl_kern_data_resid(union ctl_io *io)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
return (io->scsiio.kern_data_resid);
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
return (io->nvmeio.kern_data_resid);
default:
__assert_unreachable();
}
}
static __inline uint32_t
ctl_kern_rel_offset(union ctl_io *io)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
return (io->scsiio.kern_rel_offset);
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
return (io->nvmeio.kern_rel_offset);
default:
__assert_unreachable();
}
}
static __inline void
ctl_add_kern_rel_offset(union ctl_io *io, uint32_t offset)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.kern_rel_offset += offset;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.kern_rel_offset += offset;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_kern_sg_entries(union ctl_io *io, uint32_t kern_sg_entries)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.kern_sg_entries = kern_sg_entries;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.kern_sg_entries = kern_sg_entries;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_kern_data_ptr(union ctl_io *io, void *kern_data_ptr)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.kern_data_ptr = kern_data_ptr;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.kern_data_ptr = kern_data_ptr;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_kern_data_len(union ctl_io *io, uint32_t kern_data_len)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.kern_data_len = kern_data_len;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.kern_data_len = kern_data_len;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_kern_total_len(union ctl_io *io, uint32_t kern_total_len)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.kern_total_len = kern_total_len;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.kern_total_len = kern_total_len;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_kern_data_resid(union ctl_io *io, uint32_t kern_data_resid)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.kern_data_resid = kern_data_resid;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.kern_data_resid = kern_data_resid;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_kern_rel_offset(union ctl_io *io, uint32_t kern_rel_offset)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.kern_rel_offset = kern_rel_offset;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.kern_rel_offset = kern_rel_offset;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_be_move_done(union ctl_io *io, ctl_be_move_done_t be_move_done)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.be_move_done = be_move_done;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.be_move_done = be_move_done;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_io_cont(union ctl_io *io, ctl_io_cont io_cont)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.io_cont = io_cont;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.io_cont = io_cont;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_kern_data_ref(union ctl_io *io, ctl_ref kern_data_ref)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.kern_data_ref = kern_data_ref;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.kern_data_ref = kern_data_ref;
break;
default:
__assert_unreachable();
}
}
static __inline void
ctl_set_kern_data_arg(union ctl_io *io, void *kern_data_arg)
{
switch (io->io_hdr.io_type) {
case CTL_IO_SCSI:
io->scsiio.kern_data_arg = kern_data_arg;
break;
case CTL_IO_NVME:
case CTL_IO_NVME_ADMIN:
io->nvmeio.kern_data_arg = kern_data_arg;
break;
default:
__assert_unreachable();
}
}
union ctl_io *ctl_alloc_io(void *pool_ref);
union ctl_io *ctl_alloc_io_nowait(void *pool_ref);
void ctl_free_io(union ctl_io *io);
void ctl_zero_io(union ctl_io *io);
#endif
#endif