Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/block/mtip32xx/mtip32xx.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Driver for the Micron P320 SSD
4
* Copyright (C) 2011 Micron Technology, Inc.
5
*
6
* Portions of this code were derived from works subjected to the
7
* following copyright:
8
* Copyright (C) 2009 Integrated Device Technology, Inc.
9
*/
10
11
#include <linux/pci.h>
12
#include <linux/interrupt.h>
13
#include <linux/ata.h>
14
#include <linux/delay.h>
15
#include <linux/hdreg.h>
16
#include <linux/uaccess.h>
17
#include <linux/random.h>
18
#include <linux/smp.h>
19
#include <linux/compat.h>
20
#include <linux/fs.h>
21
#include <linux/module.h>
22
#include <linux/blkdev.h>
23
#include <linux/blk-mq.h>
24
#include <linux/bio.h>
25
#include <linux/dma-mapping.h>
26
#include <linux/idr.h>
27
#include <linux/kthread.h>
28
#include <../drivers/ata/ahci.h>
29
#include <linux/export.h>
30
#include <linux/debugfs.h>
31
#include <linux/prefetch.h>
32
#include <linux/numa.h>
33
#include "mtip32xx.h"
34
35
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
36
37
/* DMA region containing RX Fis, Identify, RLE10, and SMART buffers */
38
#define AHCI_RX_FIS_SZ 0x100
39
#define AHCI_RX_FIS_OFFSET 0x0
40
#define AHCI_IDFY_SZ ATA_SECT_SIZE
41
#define AHCI_IDFY_OFFSET 0x400
42
#define AHCI_SECTBUF_SZ ATA_SECT_SIZE
43
#define AHCI_SECTBUF_OFFSET 0x800
44
#define AHCI_SMARTBUF_SZ ATA_SECT_SIZE
45
#define AHCI_SMARTBUF_OFFSET 0xC00
46
/* 0x100 + 0x200 + 0x200 + 0x200 is smaller than 4k but we pad it out */
47
#define BLOCK_DMA_ALLOC_SZ 4096
48
49
/* DMA region containing command table (should be 8192 bytes) */
50
#define AHCI_CMD_SLOT_SZ sizeof(struct mtip_cmd_hdr)
51
#define AHCI_CMD_TBL_SZ (MTIP_MAX_COMMAND_SLOTS * AHCI_CMD_SLOT_SZ)
52
#define AHCI_CMD_TBL_OFFSET 0x0
53
54
/* DMA region per command (contains header and SGL) */
55
#define AHCI_CMD_TBL_HDR_SZ 0x80
56
#define AHCI_CMD_TBL_HDR_OFFSET 0x0
57
#define AHCI_CMD_TBL_SGL_SZ (MTIP_MAX_SG * sizeof(struct mtip_cmd_sg))
58
#define AHCI_CMD_TBL_SGL_OFFSET AHCI_CMD_TBL_HDR_SZ
59
#define CMD_DMA_ALLOC_SZ (AHCI_CMD_TBL_SGL_SZ + AHCI_CMD_TBL_HDR_SZ)
60
61
62
#define HOST_CAP_NZDMA (1 << 19)
63
#define HOST_HSORG 0xFC
64
#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
65
#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
66
#define HSORG_HWREV 0xFF00
67
#define HSORG_STYLE 0x8
68
#define HSORG_SLOTGROUPS 0x7
69
70
#define PORT_COMMAND_ISSUE 0x38
71
#define PORT_SDBV 0x7C
72
73
#define PORT_OFFSET 0x100
74
#define PORT_MEM_SIZE 0x80
75
76
#define PORT_IRQ_ERR \
77
(PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
78
PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
79
PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
80
PORT_IRQ_OVERFLOW)
81
#define PORT_IRQ_LEGACY \
82
(PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
83
#define PORT_IRQ_HANDLED \
84
(PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
85
PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
86
PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
87
#define DEF_PORT_IRQ \
88
(PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
89
90
/* product numbers */
91
#define MTIP_PRODUCT_UNKNOWN 0x00
92
#define MTIP_PRODUCT_ASICFPGA 0x11
93
94
/* Device instance number, incremented each time a device is probed. */
95
static int instance;
96
97
/*
98
* Global variable used to hold the major block device number
99
* allocated in mtip_init().
100
*/
101
static int mtip_major;
102
static struct dentry *dfs_parent;
103
104
static u32 cpu_use[NR_CPUS];
105
106
static DEFINE_IDA(rssd_index_ida);
107
108
static int mtip_block_initialize(struct driver_data *dd);
109
110
#ifdef CONFIG_COMPAT
111
struct mtip_compat_ide_task_request_s {
112
__u8 io_ports[8];
113
__u8 hob_ports[8];
114
ide_reg_valid_t out_flags;
115
ide_reg_valid_t in_flags;
116
int data_phase;
117
int req_cmd;
118
compat_ulong_t out_size;
119
compat_ulong_t in_size;
120
};
121
#endif
122
123
/*
124
* This function check_for_surprise_removal is called
125
* while card is removed from the system and it will
126
* read the vendor id from the configuration space
127
*
128
* @pdev Pointer to the pci_dev structure.
129
*
130
* return value
131
* true if device removed, else false
132
*/
133
static bool mtip_check_surprise_removal(struct driver_data *dd)
134
{
135
u16 vendor_id = 0;
136
137
if (dd->sr)
138
return true;
139
140
/* Read the vendorID from the configuration space */
141
pci_read_config_word(dd->pdev, 0x00, &vendor_id);
142
if (vendor_id == 0xFFFF) {
143
dd->sr = true;
144
if (dd->disk)
145
blk_mark_disk_dead(dd->disk);
146
return true; /* device removed */
147
}
148
149
return false; /* device present */
150
}
151
152
static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
153
unsigned int tag)
154
{
155
return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(dd->tags.tags[0], tag));
156
}
157
158
/*
159
* Reset the HBA (without sleeping)
160
*
161
* @dd Pointer to the driver data structure.
162
*
163
* return value
164
* 0 The reset was successful.
165
* -1 The HBA Reset bit did not clear.
166
*/
167
static int mtip_hba_reset(struct driver_data *dd)
168
{
169
unsigned long timeout;
170
171
/* Set the reset bit */
172
writel(HOST_RESET, dd->mmio + HOST_CTL);
173
174
/* Flush */
175
readl(dd->mmio + HOST_CTL);
176
177
/*
178
* Spin for up to 10 seconds waiting for reset acknowledgement. Spec
179
* is 1 sec but in LUN failure conditions, up to 10 secs are required
180
*/
181
timeout = jiffies + msecs_to_jiffies(10000);
182
do {
183
mdelay(10);
184
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
185
return -1;
186
187
} while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
188
&& time_before(jiffies, timeout));
189
190
if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
191
return -1;
192
193
return 0;
194
}
195
196
/*
197
* Issue a command to the hardware.
198
*
199
* Set the appropriate bit in the s_active and Command Issue hardware
200
* registers, causing hardware command processing to begin.
201
*
202
* @port Pointer to the port structure.
203
* @tag The tag of the command to be issued.
204
*
205
* return value
206
* None
207
*/
208
static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
209
{
210
int group = tag >> 5;
211
212
/* guard SACT and CI registers */
213
spin_lock(&port->cmd_issue_lock[group]);
214
writel((1 << MTIP_TAG_BIT(tag)),
215
port->s_active[MTIP_TAG_INDEX(tag)]);
216
writel((1 << MTIP_TAG_BIT(tag)),
217
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
218
spin_unlock(&port->cmd_issue_lock[group]);
219
}
220
221
/*
222
* Enable/disable the reception of FIS
223
*
224
* @port Pointer to the port data structure
225
* @enable 1 to enable, 0 to disable
226
*
227
* return value
228
* Previous state: 1 enabled, 0 disabled
229
*/
230
static int mtip_enable_fis(struct mtip_port *port, int enable)
231
{
232
u32 tmp;
233
234
/* enable FIS reception */
235
tmp = readl(port->mmio + PORT_CMD);
236
if (enable)
237
writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
238
else
239
writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
240
241
/* Flush */
242
readl(port->mmio + PORT_CMD);
243
244
return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
245
}
246
247
/*
248
* Enable/disable the DMA engine
249
*
250
* @port Pointer to the port data structure
251
* @enable 1 to enable, 0 to disable
252
*
253
* return value
254
* Previous state: 1 enabled, 0 disabled.
255
*/
256
static int mtip_enable_engine(struct mtip_port *port, int enable)
257
{
258
u32 tmp;
259
260
/* enable FIS reception */
261
tmp = readl(port->mmio + PORT_CMD);
262
if (enable)
263
writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
264
else
265
writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
266
267
readl(port->mmio + PORT_CMD);
268
return (((tmp & PORT_CMD_START) == PORT_CMD_START));
269
}
270
271
/*
272
* Enables the port DMA engine and FIS reception.
273
*
274
* return value
275
* None
276
*/
277
static inline void mtip_start_port(struct mtip_port *port)
278
{
279
/* Enable FIS reception */
280
mtip_enable_fis(port, 1);
281
282
/* Enable the DMA engine */
283
mtip_enable_engine(port, 1);
284
}
285
286
/*
287
* Deinitialize a port by disabling port interrupts, the DMA engine,
288
* and FIS reception.
289
*
290
* @port Pointer to the port structure
291
*
292
* return value
293
* None
294
*/
295
static inline void mtip_deinit_port(struct mtip_port *port)
296
{
297
/* Disable interrupts on this port */
298
writel(0, port->mmio + PORT_IRQ_MASK);
299
300
/* Disable the DMA engine */
301
mtip_enable_engine(port, 0);
302
303
/* Disable FIS reception */
304
mtip_enable_fis(port, 0);
305
}
306
307
/*
308
* Initialize a port.
309
*
310
* This function deinitializes the port by calling mtip_deinit_port() and
311
* then initializes it by setting the command header and RX FIS addresses,
312
* clearing the SError register and any pending port interrupts before
313
* re-enabling the default set of port interrupts.
314
*
315
* @port Pointer to the port structure.
316
*
317
* return value
318
* None
319
*/
320
static void mtip_init_port(struct mtip_port *port)
321
{
322
int i;
323
mtip_deinit_port(port);
324
325
/* Program the command list base and FIS base addresses */
326
if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
327
writel((port->command_list_dma >> 16) >> 16,
328
port->mmio + PORT_LST_ADDR_HI);
329
writel((port->rxfis_dma >> 16) >> 16,
330
port->mmio + PORT_FIS_ADDR_HI);
331
set_bit(MTIP_PF_HOST_CAP_64, &port->flags);
332
}
333
334
writel(port->command_list_dma & 0xFFFFFFFF,
335
port->mmio + PORT_LST_ADDR);
336
writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR);
337
338
/* Clear SError */
339
writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
340
341
/* reset the completed registers.*/
342
for (i = 0; i < port->dd->slot_groups; i++)
343
writel(0xFFFFFFFF, port->completed[i]);
344
345
/* Clear any pending interrupts for this port */
346
writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
347
348
/* Clear any pending interrupts on the HBA. */
349
writel(readl(port->dd->mmio + HOST_IRQ_STAT),
350
port->dd->mmio + HOST_IRQ_STAT);
351
352
/* Enable port interrupts */
353
writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
354
}
355
356
/*
357
* Restart a port
358
*
359
* @port Pointer to the port data structure.
360
*
361
* return value
362
* None
363
*/
364
static void mtip_restart_port(struct mtip_port *port)
365
{
366
unsigned long timeout;
367
368
/* Disable the DMA engine */
369
mtip_enable_engine(port, 0);
370
371
/* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
372
timeout = jiffies + msecs_to_jiffies(500);
373
while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
374
&& time_before(jiffies, timeout))
375
;
376
377
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
378
return;
379
380
/*
381
* Chip quirk: escalate to hba reset if
382
* PxCMD.CR not clear after 500 ms
383
*/
384
if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
385
dev_warn(&port->dd->pdev->dev,
386
"PxCMD.CR not clear, escalating reset\n");
387
388
if (mtip_hba_reset(port->dd))
389
dev_err(&port->dd->pdev->dev,
390
"HBA reset escalation failed.\n");
391
392
/* 30 ms delay before com reset to quiesce chip */
393
mdelay(30);
394
}
395
396
dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
397
398
/* Set PxSCTL.DET */
399
writel(readl(port->mmio + PORT_SCR_CTL) |
400
1, port->mmio + PORT_SCR_CTL);
401
readl(port->mmio + PORT_SCR_CTL);
402
403
/* Wait 1 ms to quiesce chip function */
404
timeout = jiffies + msecs_to_jiffies(1);
405
while (time_before(jiffies, timeout))
406
;
407
408
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
409
return;
410
411
/* Clear PxSCTL.DET */
412
writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
413
port->mmio + PORT_SCR_CTL);
414
readl(port->mmio + PORT_SCR_CTL);
415
416
/* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
417
timeout = jiffies + msecs_to_jiffies(500);
418
while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
419
&& time_before(jiffies, timeout))
420
;
421
422
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
423
return;
424
425
if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
426
dev_warn(&port->dd->pdev->dev,
427
"COM reset failed\n");
428
429
mtip_init_port(port);
430
mtip_start_port(port);
431
432
}
433
434
static int mtip_device_reset(struct driver_data *dd)
435
{
436
int rv = 0;
437
438
if (mtip_check_surprise_removal(dd))
439
return 0;
440
441
if (mtip_hba_reset(dd) < 0)
442
rv = -EFAULT;
443
444
mdelay(1);
445
mtip_init_port(dd->port);
446
mtip_start_port(dd->port);
447
448
/* Enable interrupts on the HBA. */
449
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
450
dd->mmio + HOST_CTL);
451
return rv;
452
}
453
454
/*
455
* Helper function for tag logging
456
*/
457
static void print_tags(struct driver_data *dd,
458
char *msg,
459
unsigned long *tagbits,
460
int cnt)
461
{
462
unsigned char tagmap[128];
463
int group, tagmap_len = 0;
464
465
memset(tagmap, 0, sizeof(tagmap));
466
for (group = SLOTBITS_IN_LONGS; group > 0; group--)
467
tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ",
468
tagbits[group-1]);
469
dev_warn(&dd->pdev->dev,
470
"%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
471
}
472
473
static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
474
dma_addr_t buffer_dma, unsigned int sectors);
475
static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
476
struct smart_attr *attrib);
477
478
static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
479
{
480
struct request *req = blk_mq_rq_from_pdu(cmd);
481
482
cmd->status = status;
483
if (likely(!blk_should_fake_timeout(req->q)))
484
blk_mq_complete_request(req);
485
}
486
487
/*
488
* Handle an error.
489
*
490
* @dd Pointer to the DRIVER_DATA structure.
491
*
492
* return value
493
* None
494
*/
495
static void mtip_handle_tfe(struct driver_data *dd)
496
{
497
int group, tag, bit, reissue, rv;
498
struct mtip_port *port;
499
struct mtip_cmd *cmd;
500
u32 completed;
501
struct host_to_dev_fis *fis;
502
unsigned long tagaccum[SLOTBITS_IN_LONGS];
503
unsigned int cmd_cnt = 0;
504
unsigned char *buf;
505
char *fail_reason = NULL;
506
int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
507
508
dev_warn(&dd->pdev->dev, "Taskfile error\n");
509
510
port = dd->port;
511
512
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
513
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
514
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
515
mtip_complete_command(cmd, BLK_STS_IOERR);
516
return;
517
}
518
519
/* clear the tag accumulator */
520
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
521
522
/* Loop through all the groups */
523
for (group = 0; group < dd->slot_groups; group++) {
524
completed = readl(port->completed[group]);
525
526
dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
527
528
/* clear completed status register in the hardware.*/
529
writel(completed, port->completed[group]);
530
531
/* Process successfully completed commands */
532
for (bit = 0; bit < 32 && completed; bit++) {
533
if (!(completed & (1<<bit)))
534
continue;
535
tag = (group << 5) + bit;
536
537
/* Skip the internal command slot */
538
if (tag == MTIP_TAG_INTERNAL)
539
continue;
540
541
cmd = mtip_cmd_from_tag(dd, tag);
542
mtip_complete_command(cmd, 0);
543
set_bit(tag, tagaccum);
544
cmd_cnt++;
545
}
546
}
547
548
print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
549
550
/* Restart the port */
551
mdelay(20);
552
mtip_restart_port(port);
553
554
/* Trying to determine the cause of the error */
555
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
556
dd->port->log_buf,
557
dd->port->log_buf_dma, 1);
558
if (rv) {
559
dev_warn(&dd->pdev->dev,
560
"Error in READ LOG EXT (10h) command\n");
561
/* non-critical error, don't fail the load */
562
} else {
563
buf = (unsigned char *)dd->port->log_buf;
564
if (buf[259] & 0x1) {
565
dev_info(&dd->pdev->dev,
566
"Write protect bit is set.\n");
567
set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
568
fail_all_ncq_write = 1;
569
fail_reason = "write protect";
570
}
571
if (buf[288] == 0xF7) {
572
dev_info(&dd->pdev->dev,
573
"Exceeded Tmax, drive in thermal shutdown.\n");
574
set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
575
fail_all_ncq_cmds = 1;
576
fail_reason = "thermal shutdown";
577
}
578
if (buf[288] == 0xBF) {
579
set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
580
dev_info(&dd->pdev->dev,
581
"Drive indicates rebuild has failed. Secure erase required.\n");
582
fail_all_ncq_cmds = 1;
583
fail_reason = "rebuild failed";
584
}
585
}
586
587
/* clear the tag accumulator */
588
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
589
590
/* Loop through all the groups */
591
for (group = 0; group < dd->slot_groups; group++) {
592
for (bit = 0; bit < 32; bit++) {
593
reissue = 1;
594
tag = (group << 5) + bit;
595
cmd = mtip_cmd_from_tag(dd, tag);
596
597
fis = (struct host_to_dev_fis *)cmd->command;
598
599
/* Should re-issue? */
600
if (tag == MTIP_TAG_INTERNAL ||
601
fis->command == ATA_CMD_SET_FEATURES)
602
reissue = 0;
603
else {
604
if (fail_all_ncq_cmds ||
605
(fail_all_ncq_write &&
606
fis->command == ATA_CMD_FPDMA_WRITE)) {
607
dev_warn(&dd->pdev->dev,
608
" Fail: %s w/tag %d [%s].\n",
609
fis->command == ATA_CMD_FPDMA_WRITE ?
610
"write" : "read",
611
tag,
612
fail_reason != NULL ?
613
fail_reason : "unknown");
614
mtip_complete_command(cmd, BLK_STS_MEDIUM);
615
continue;
616
}
617
}
618
619
/*
620
* First check if this command has
621
* exceeded its retries.
622
*/
623
if (reissue && (cmd->retries-- > 0)) {
624
625
set_bit(tag, tagaccum);
626
627
/* Re-issue the command. */
628
mtip_issue_ncq_command(port, tag);
629
630
continue;
631
}
632
633
/* Retire a command that will not be reissued */
634
dev_warn(&port->dd->pdev->dev,
635
"retiring tag %d\n", tag);
636
637
mtip_complete_command(cmd, BLK_STS_IOERR);
638
}
639
}
640
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
641
}
642
643
/*
644
* Handle a set device bits interrupt
645
*/
646
static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
647
u32 completed)
648
{
649
struct driver_data *dd = port->dd;
650
int tag, bit;
651
struct mtip_cmd *command;
652
653
if (!completed) {
654
WARN_ON_ONCE(!completed);
655
return;
656
}
657
/* clear completed status register in the hardware.*/
658
writel(completed, port->completed[group]);
659
660
/* Process completed commands. */
661
for (bit = 0; (bit < 32) && completed; bit++) {
662
if (completed & 0x01) {
663
tag = (group << 5) | bit;
664
665
/* skip internal command slot. */
666
if (unlikely(tag == MTIP_TAG_INTERNAL))
667
continue;
668
669
command = mtip_cmd_from_tag(dd, tag);
670
mtip_complete_command(command, 0);
671
}
672
completed >>= 1;
673
}
674
675
/* If last, re-enable interrupts */
676
if (atomic_dec_return(&dd->irq_workers_active) == 0)
677
writel(0xffffffff, dd->mmio + HOST_IRQ_STAT);
678
}
679
680
/*
681
* Process legacy pio and d2h interrupts
682
*/
683
static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
684
{
685
struct mtip_port *port = dd->port;
686
struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
687
688
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && cmd) {
689
int group = MTIP_TAG_INDEX(MTIP_TAG_INTERNAL);
690
int status = readl(port->cmd_issue[group]);
691
692
if (!(status & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))))
693
mtip_complete_command(cmd, 0);
694
}
695
}
696
697
/*
698
* Demux and handle errors
699
*/
700
static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
701
{
702
if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
703
dev_warn(&dd->pdev->dev,
704
"Clearing PxSERR.DIAG.x\n");
705
writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
706
}
707
708
if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
709
dev_warn(&dd->pdev->dev,
710
"Clearing PxSERR.DIAG.n\n");
711
writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
712
}
713
714
if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
715
dev_warn(&dd->pdev->dev,
716
"Port stat errors %x unhandled\n",
717
(port_stat & ~PORT_IRQ_HANDLED));
718
if (mtip_check_surprise_removal(dd))
719
return;
720
}
721
if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
722
set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags);
723
wake_up_interruptible(&dd->port->svc_wait);
724
}
725
}
726
727
static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
728
{
729
struct driver_data *dd = (struct driver_data *) data;
730
struct mtip_port *port = dd->port;
731
u32 hba_stat, port_stat;
732
int rv = IRQ_NONE;
733
int do_irq_enable = 1, i, workers;
734
struct mtip_work *twork;
735
736
hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
737
if (hba_stat) {
738
rv = IRQ_HANDLED;
739
740
/* Acknowledge the interrupt status on the port.*/
741
port_stat = readl(port->mmio + PORT_IRQ_STAT);
742
if (unlikely(port_stat == 0xFFFFFFFF)) {
743
mtip_check_surprise_removal(dd);
744
return IRQ_HANDLED;
745
}
746
writel(port_stat, port->mmio + PORT_IRQ_STAT);
747
748
/* Demux port status */
749
if (likely(port_stat & PORT_IRQ_SDB_FIS)) {
750
do_irq_enable = 0;
751
WARN_ON_ONCE(atomic_read(&dd->irq_workers_active) != 0);
752
753
/* Start at 1: group zero is always local? */
754
for (i = 0, workers = 0; i < MTIP_MAX_SLOT_GROUPS;
755
i++) {
756
twork = &dd->work[i];
757
twork->completed = readl(port->completed[i]);
758
if (twork->completed)
759
workers++;
760
}
761
762
atomic_set(&dd->irq_workers_active, workers);
763
if (workers) {
764
for (i = 1; i < MTIP_MAX_SLOT_GROUPS; i++) {
765
twork = &dd->work[i];
766
if (twork->completed)
767
queue_work_on(
768
twork->cpu_binding,
769
dd->isr_workq,
770
&twork->work);
771
}
772
773
if (likely(dd->work[0].completed))
774
mtip_workq_sdbfx(port, 0,
775
dd->work[0].completed);
776
777
} else {
778
/*
779
* Chip quirk: SDB interrupt but nothing
780
* to complete
781
*/
782
do_irq_enable = 1;
783
}
784
}
785
786
if (unlikely(port_stat & PORT_IRQ_ERR)) {
787
if (unlikely(mtip_check_surprise_removal(dd))) {
788
/* don't proceed further */
789
return IRQ_HANDLED;
790
}
791
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
792
&dd->dd_flag))
793
return rv;
794
795
mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
796
}
797
798
if (unlikely(port_stat & PORT_IRQ_LEGACY))
799
mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
800
}
801
802
/* acknowledge interrupt */
803
if (unlikely(do_irq_enable))
804
writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
805
806
return rv;
807
}
808
809
/*
810
* HBA interrupt subroutine.
811
*
812
* @irq IRQ number.
813
* @instance Pointer to the driver data structure.
814
*
815
* return value
816
* IRQ_HANDLED A HBA interrupt was pending and handled.
817
* IRQ_NONE This interrupt was not for the HBA.
818
*/
819
static irqreturn_t mtip_irq_handler(int irq, void *instance)
820
{
821
struct driver_data *dd = instance;
822
823
return mtip_handle_irq(dd);
824
}
825
826
static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
827
{
828
writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]);
829
}
830
831
static bool mtip_pause_ncq(struct mtip_port *port,
832
struct host_to_dev_fis *fis)
833
{
834
unsigned long task_file_data;
835
836
task_file_data = readl(port->mmio+PORT_TFDATA);
837
if ((task_file_data & 1))
838
return false;
839
840
if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
841
port->ic_pause_timer = jiffies;
842
return true;
843
} else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
844
(fis->features == 0x03)) {
845
set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
846
port->ic_pause_timer = jiffies;
847
return true;
848
} else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
849
((fis->command == 0xFC) &&
850
(fis->features == 0x27 || fis->features == 0x72 ||
851
fis->features == 0x62 || fis->features == 0x26))) {
852
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
853
clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
854
/* Com reset after secure erase or lowlevel format */
855
mtip_restart_port(port);
856
clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
857
return false;
858
}
859
860
return false;
861
}
862
863
static bool mtip_commands_active(struct mtip_port *port)
864
{
865
unsigned int active;
866
unsigned int n;
867
868
/*
869
* Ignore s_active bit 0 of array element 0.
870
* This bit will always be set
871
*/
872
active = readl(port->s_active[0]) & 0xFFFFFFFE;
873
for (n = 1; n < port->dd->slot_groups; n++)
874
active |= readl(port->s_active[n]);
875
876
return active != 0;
877
}
878
879
/*
880
* Wait for port to quiesce
881
*
882
* @port Pointer to port data structure
883
* @timeout Max duration to wait (ms)
884
*
885
* return value
886
* 0 Success
887
* -EBUSY Commands still active
888
*/
889
static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
890
{
891
unsigned long to;
892
bool active = true;
893
894
blk_mq_quiesce_queue(port->dd->queue);
895
896
to = jiffies + msecs_to_jiffies(timeout);
897
do {
898
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
899
test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
900
msleep(20);
901
continue; /* svc thd is actively issuing commands */
902
}
903
904
msleep(100);
905
906
if (mtip_check_surprise_removal(port->dd))
907
goto err_fault;
908
909
active = mtip_commands_active(port);
910
if (!active)
911
break;
912
} while (time_before(jiffies, to));
913
914
blk_mq_unquiesce_queue(port->dd->queue);
915
return active ? -EBUSY : 0;
916
err_fault:
917
blk_mq_unquiesce_queue(port->dd->queue);
918
return -EFAULT;
919
}
920
921
struct mtip_int_cmd {
922
int fis_len;
923
dma_addr_t buffer;
924
int buf_len;
925
u32 opts;
926
};
927
928
/*
929
* Execute an internal command and wait for the completion.
930
*
931
* @port Pointer to the port data structure.
932
* @fis Pointer to the FIS that describes the command.
933
* @fis_len Length in WORDS of the FIS.
934
* @buffer DMA accessible for command data.
935
* @buf_len Length, in bytes, of the data buffer.
936
* @opts Command header options, excluding the FIS length
937
* and the number of PRD entries.
938
* @timeout Time in ms to wait for the command to complete.
939
*
940
* return value
941
* 0 Command completed successfully.
942
* -EFAULT The buffer address is not correctly aligned.
943
* -EBUSY Internal command or other IO in progress.
944
* -EAGAIN Time out waiting for command to complete.
945
*/
946
static int mtip_exec_internal_command(struct mtip_port *port,
947
struct host_to_dev_fis *fis,
948
int fis_len,
949
dma_addr_t buffer,
950
int buf_len,
951
u32 opts,
952
unsigned long timeout)
953
{
954
struct mtip_cmd *int_cmd;
955
struct driver_data *dd = port->dd;
956
struct request *rq;
957
struct mtip_int_cmd icmd = {
958
.fis_len = fis_len,
959
.buffer = buffer,
960
.buf_len = buf_len,
961
.opts = opts
962
};
963
int rv = 0;
964
965
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
966
if (buffer & 0x00000007) {
967
dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
968
return -EFAULT;
969
}
970
971
if (mtip_check_surprise_removal(dd))
972
return -EFAULT;
973
974
rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
975
if (IS_ERR(rq)) {
976
dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
977
return -EFAULT;
978
}
979
980
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
981
982
if (fis->command == ATA_CMD_SEC_ERASE_PREP)
983
set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
984
985
clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
986
987
if (fis->command != ATA_CMD_STANDBYNOW1) {
988
/* wait for io to complete if non atomic */
989
if (mtip_quiesce_io(port, MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
990
dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n");
991
blk_mq_free_request(rq);
992
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
993
wake_up_interruptible(&port->svc_wait);
994
return -EBUSY;
995
}
996
}
997
998
/* Copy the command to the command table */
999
int_cmd = blk_mq_rq_to_pdu(rq);
1000
int_cmd->icmd = &icmd;
1001
memcpy(int_cmd->command, fis, fis_len*4);
1002
1003
rq->timeout = timeout;
1004
1005
/* insert request and run queue */
1006
blk_execute_rq(rq, true);
1007
1008
if (int_cmd->status) {
1009
dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
1010
fis->command, int_cmd->status);
1011
rv = -EIO;
1012
1013
if (mtip_check_surprise_removal(dd) ||
1014
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1015
&dd->dd_flag)) {
1016
dev_err(&dd->pdev->dev,
1017
"Internal command [%02X] wait returned due to SR\n",
1018
fis->command);
1019
rv = -ENXIO;
1020
goto exec_ic_exit;
1021
}
1022
mtip_device_reset(dd); /* recover from timeout issue */
1023
rv = -EAGAIN;
1024
goto exec_ic_exit;
1025
}
1026
1027
if (readl(port->cmd_issue[MTIP_TAG_INDEX(MTIP_TAG_INTERNAL)])
1028
& (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))) {
1029
rv = -ENXIO;
1030
if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
1031
mtip_device_reset(dd);
1032
rv = -EAGAIN;
1033
}
1034
}
1035
exec_ic_exit:
1036
/* Clear the allocated and active bits for the internal command. */
1037
blk_mq_free_request(rq);
1038
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1039
if (rv >= 0 && mtip_pause_ncq(port, fis)) {
1040
/* NCQ paused */
1041
return rv;
1042
}
1043
wake_up_interruptible(&port->svc_wait);
1044
1045
return rv;
1046
}
1047
1048
/*
1049
* Byte-swap ATA ID strings.
1050
*
1051
* ATA identify data contains strings in byte-swapped 16-bit words.
1052
* They must be swapped (on all architectures) to be usable as C strings.
1053
* This function swaps bytes in-place.
1054
*
1055
* @buf The buffer location of the string
1056
* @len The number of bytes to swap
1057
*
1058
* return value
1059
* None
1060
*/
1061
static inline void ata_swap_string(u16 *buf, unsigned int len)
1062
{
1063
int i;
1064
for (i = 0; i < (len/2); i++)
1065
be16_to_cpus(&buf[i]);
1066
}
1067
1068
static void mtip_set_timeout(struct driver_data *dd,
1069
struct host_to_dev_fis *fis,
1070
unsigned int *timeout, u8 erasemode)
1071
{
1072
switch (fis->command) {
1073
case ATA_CMD_DOWNLOAD_MICRO:
1074
*timeout = 120000; /* 2 minutes */
1075
break;
1076
case ATA_CMD_SEC_ERASE_UNIT:
1077
case 0xFC:
1078
if (erasemode)
1079
*timeout = ((*(dd->port->identify + 90) * 2) * 60000);
1080
else
1081
*timeout = ((*(dd->port->identify + 89) * 2) * 60000);
1082
break;
1083
case ATA_CMD_STANDBYNOW1:
1084
*timeout = 120000; /* 2 minutes */
1085
break;
1086
case 0xF7:
1087
case 0xFA:
1088
*timeout = 60000; /* 60 seconds */
1089
break;
1090
case ATA_CMD_SMART:
1091
*timeout = 15000; /* 15 seconds */
1092
break;
1093
default:
1094
*timeout = MTIP_IOCTL_CMD_TIMEOUT_MS;
1095
break;
1096
}
1097
}
1098
1099
/*
1100
* Request the device identity information.
1101
*
1102
* If a user space buffer is not specified, i.e. is NULL, the
1103
* identify information is still read from the drive and placed
1104
* into the identify data buffer (@e port->identify) in the
1105
* port data structure.
1106
* When the identify buffer contains valid identify information @e
1107
* port->identify_valid is non-zero.
1108
*
1109
* @port Pointer to the port structure.
1110
* @user_buffer A user space buffer where the identify data should be
1111
* copied.
1112
*
1113
* return value
1114
* 0 Command completed successfully.
1115
* -EFAULT An error occurred while coping data to the user buffer.
1116
* -1 Command failed.
1117
*/
1118
static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
1119
{
1120
int rv = 0;
1121
struct host_to_dev_fis fis;
1122
1123
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1124
return -EFAULT;
1125
1126
/* Build the FIS. */
1127
memset(&fis, 0, sizeof(struct host_to_dev_fis));
1128
fis.type = 0x27;
1129
fis.opts = 1 << 7;
1130
fis.command = ATA_CMD_ID_ATA;
1131
1132
/* Set the identify information as invalid. */
1133
port->identify_valid = 0;
1134
1135
/* Clear the identify information. */
1136
memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
1137
1138
/* Execute the command. */
1139
if (mtip_exec_internal_command(port,
1140
&fis,
1141
5,
1142
port->identify_dma,
1143
sizeof(u16) * ATA_ID_WORDS,
1144
0,
1145
MTIP_INT_CMD_TIMEOUT_MS)
1146
< 0) {
1147
rv = -1;
1148
goto out;
1149
}
1150
1151
/*
1152
* Perform any necessary byte-swapping. Yes, the kernel does in fact
1153
* perform field-sensitive swapping on the string fields.
1154
* See the kernel use of ata_id_string() for proof of this.
1155
*/
1156
#ifdef __LITTLE_ENDIAN
1157
ata_swap_string(port->identify + 27, 40); /* model string*/
1158
ata_swap_string(port->identify + 23, 8); /* firmware string*/
1159
ata_swap_string(port->identify + 10, 20); /* serial# string*/
1160
#else
1161
{
1162
int i;
1163
for (i = 0; i < ATA_ID_WORDS; i++)
1164
port->identify[i] = le16_to_cpu(port->identify[i]);
1165
}
1166
#endif
1167
1168
/* Check security locked state */
1169
if (port->identify[128] & 0x4)
1170
set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1171
else
1172
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
1173
1174
/* Set the identify buffer as valid. */
1175
port->identify_valid = 1;
1176
1177
if (user_buffer) {
1178
if (copy_to_user(
1179
user_buffer,
1180
port->identify,
1181
ATA_ID_WORDS * sizeof(u16))) {
1182
rv = -EFAULT;
1183
goto out;
1184
}
1185
}
1186
1187
out:
1188
return rv;
1189
}
1190
1191
/*
1192
* Issue a standby immediate command to the device.
1193
*
1194
* @port Pointer to the port structure.
1195
*
1196
* return value
1197
* 0 Command was executed successfully.
1198
* -1 An error occurred while executing the command.
1199
*/
1200
static int mtip_standby_immediate(struct mtip_port *port)
1201
{
1202
int rv;
1203
struct host_to_dev_fis fis;
1204
unsigned long __maybe_unused start;
1205
unsigned int timeout;
1206
1207
/* Build the FIS. */
1208
memset(&fis, 0, sizeof(struct host_to_dev_fis));
1209
fis.type = 0x27;
1210
fis.opts = 1 << 7;
1211
fis.command = ATA_CMD_STANDBYNOW1;
1212
1213
mtip_set_timeout(port->dd, &fis, &timeout, 0);
1214
1215
start = jiffies;
1216
rv = mtip_exec_internal_command(port,
1217
&fis,
1218
5,
1219
0,
1220
0,
1221
0,
1222
timeout);
1223
dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
1224
jiffies_to_msecs(jiffies - start));
1225
if (rv)
1226
dev_warn(&port->dd->pdev->dev,
1227
"STANDBY IMMEDIATE command failed.\n");
1228
1229
return rv;
1230
}
1231
1232
/*
1233
* Issue a READ LOG EXT command to the device.
1234
*
1235
* @port pointer to the port structure.
1236
* @page page number to fetch
1237
* @buffer pointer to buffer
1238
* @buffer_dma dma address corresponding to @buffer
1239
* @sectors page length to fetch, in sectors
1240
*
1241
* return value
1242
* @rv return value from mtip_exec_internal_command()
1243
*/
1244
static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
1245
dma_addr_t buffer_dma, unsigned int sectors)
1246
{
1247
struct host_to_dev_fis fis;
1248
1249
memset(&fis, 0, sizeof(struct host_to_dev_fis));
1250
fis.type = 0x27;
1251
fis.opts = 1 << 7;
1252
fis.command = ATA_CMD_READ_LOG_EXT;
1253
fis.sect_count = sectors & 0xFF;
1254
fis.sect_cnt_ex = (sectors >> 8) & 0xFF;
1255
fis.lba_low = page;
1256
fis.lba_mid = 0;
1257
fis.device = ATA_DEVICE_OBS;
1258
1259
memset(buffer, 0, sectors * ATA_SECT_SIZE);
1260
1261
return mtip_exec_internal_command(port,
1262
&fis,
1263
5,
1264
buffer_dma,
1265
sectors * ATA_SECT_SIZE,
1266
0,
1267
MTIP_INT_CMD_TIMEOUT_MS);
1268
}
1269
1270
/*
1271
* Issue a SMART READ DATA command to the device.
1272
*
1273
* @port pointer to the port structure.
1274
* @buffer pointer to buffer
1275
* @buffer_dma dma address corresponding to @buffer
1276
*
1277
* return value
1278
* @rv return value from mtip_exec_internal_command()
1279
*/
1280
static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
1281
dma_addr_t buffer_dma)
1282
{
1283
struct host_to_dev_fis fis;
1284
1285
memset(&fis, 0, sizeof(struct host_to_dev_fis));
1286
fis.type = 0x27;
1287
fis.opts = 1 << 7;
1288
fis.command = ATA_CMD_SMART;
1289
fis.features = 0xD0;
1290
fis.sect_count = 1;
1291
fis.lba_mid = 0x4F;
1292
fis.lba_hi = 0xC2;
1293
fis.device = ATA_DEVICE_OBS;
1294
1295
return mtip_exec_internal_command(port,
1296
&fis,
1297
5,
1298
buffer_dma,
1299
ATA_SECT_SIZE,
1300
0,
1301
15000);
1302
}
1303
1304
/*
1305
* Get the value of a smart attribute
1306
*
1307
* @port pointer to the port structure
1308
* @id attribute number
1309
* @attrib pointer to return attrib information corresponding to @id
1310
*
1311
* return value
1312
* -EINVAL NULL buffer passed or unsupported attribute @id.
1313
* -EPERM Identify data not valid, SMART not supported or not enabled
1314
*/
1315
static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
1316
struct smart_attr *attrib)
1317
{
1318
int rv, i;
1319
struct smart_attr *pattr;
1320
1321
if (!attrib)
1322
return -EINVAL;
1323
1324
if (!port->identify_valid) {
1325
dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
1326
return -EPERM;
1327
}
1328
if (!(port->identify[82] & 0x1)) {
1329
dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
1330
return -EPERM;
1331
}
1332
if (!(port->identify[85] & 0x1)) {
1333
dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
1334
return -EPERM;
1335
}
1336
1337
memset(port->smart_buf, 0, ATA_SECT_SIZE);
1338
rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
1339
if (rv) {
1340
dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
1341
return rv;
1342
}
1343
1344
pattr = (struct smart_attr *)(port->smart_buf + 2);
1345
for (i = 0; i < 29; i++, pattr++)
1346
if (pattr->attr_id == id) {
1347
memcpy(attrib, pattr, sizeof(struct smart_attr));
1348
break;
1349
}
1350
1351
if (i == 29) {
1352
dev_warn(&port->dd->pdev->dev,
1353
"Query for invalid SMART attribute ID\n");
1354
rv = -EINVAL;
1355
}
1356
1357
return rv;
1358
}
1359
1360
/*
1361
* Get the drive capacity.
1362
*
1363
* @dd Pointer to the device data structure.
1364
* @sectors Pointer to the variable that will receive the sector count.
1365
*
1366
* return value
1367
* 1 Capacity was returned successfully.
1368
* 0 The identify information is invalid.
1369
*/
1370
static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
1371
{
1372
struct mtip_port *port = dd->port;
1373
u64 total, raw0, raw1, raw2, raw3;
1374
raw0 = port->identify[100];
1375
raw1 = port->identify[101];
1376
raw2 = port->identify[102];
1377
raw3 = port->identify[103];
1378
total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
1379
*sectors = total;
1380
return (bool) !!port->identify_valid;
1381
}
1382
1383
/*
1384
* Display the identify command data.
1385
*
1386
* @port Pointer to the port data structure.
1387
*
1388
* return value
1389
* None
1390
*/
1391
static void mtip_dump_identify(struct mtip_port *port)
1392
{
1393
sector_t sectors;
1394
unsigned short revid;
1395
char cbuf[42];
1396
1397
if (!port->identify_valid)
1398
return;
1399
1400
strscpy(cbuf, (char *)(port->identify + 10), 21);
1401
dev_info(&port->dd->pdev->dev,
1402
"Serial No.: %s\n", cbuf);
1403
1404
strscpy(cbuf, (char *)(port->identify + 23), 9);
1405
dev_info(&port->dd->pdev->dev,
1406
"Firmware Ver.: %s\n", cbuf);
1407
1408
strscpy(cbuf, (char *)(port->identify + 27), 41);
1409
dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
1410
1411
dev_info(&port->dd->pdev->dev, "Security: %04x %s\n",
1412
port->identify[128],
1413
port->identify[128] & 0x4 ? "(LOCKED)" : "");
1414
1415
if (mtip_hw_get_capacity(port->dd, &sectors))
1416
dev_info(&port->dd->pdev->dev,
1417
"Capacity: %llu sectors (%llu MB)\n",
1418
(u64)sectors,
1419
((u64)sectors) * ATA_SECT_SIZE >> 20);
1420
1421
pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
1422
switch (revid & 0xFF) {
1423
case 0x1:
1424
strscpy(cbuf, "A0", 3);
1425
break;
1426
case 0x3:
1427
strscpy(cbuf, "A2", 3);
1428
break;
1429
default:
1430
strscpy(cbuf, "?", 2);
1431
break;
1432
}
1433
dev_info(&port->dd->pdev->dev,
1434
"Card Type: %s\n", cbuf);
1435
}
1436
1437
/*
1438
* Map the commands scatter list into the command table.
1439
*
1440
* @command Pointer to the command.
1441
* @nents Number of scatter list entries.
1442
*
1443
* return value
1444
* None
1445
*/
1446
static inline void fill_command_sg(struct driver_data *dd,
1447
struct mtip_cmd *command,
1448
int nents)
1449
{
1450
int n;
1451
unsigned int dma_len;
1452
struct mtip_cmd_sg *command_sg;
1453
struct scatterlist *sg;
1454
1455
command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
1456
1457
for_each_sg(command->sg, sg, nents, n) {
1458
dma_len = sg_dma_len(sg);
1459
if (dma_len > 0x400000)
1460
dev_err(&dd->pdev->dev,
1461
"DMA segment length truncated\n");
1462
command_sg->info = cpu_to_le32((dma_len-1) & 0x3FFFFF);
1463
command_sg->dba = cpu_to_le32(sg_dma_address(sg));
1464
command_sg->dba_upper =
1465
cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
1466
command_sg++;
1467
}
1468
}
1469
1470
/*
1471
* @brief Execute a drive command.
1472
*
1473
* return value 0 The command completed successfully.
1474
* return value -1 An error occurred while executing the command.
1475
*/
1476
static int exec_drive_task(struct mtip_port *port, u8 *command)
1477
{
1478
struct host_to_dev_fis fis;
1479
struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
1480
unsigned int to;
1481
1482
/* Build the FIS. */
1483
memset(&fis, 0, sizeof(struct host_to_dev_fis));
1484
fis.type = 0x27;
1485
fis.opts = 1 << 7;
1486
fis.command = command[0];
1487
fis.features = command[1];
1488
fis.sect_count = command[2];
1489
fis.sector = command[3];
1490
fis.cyl_low = command[4];
1491
fis.cyl_hi = command[5];
1492
fis.device = command[6] & ~0x10; /* Clear the dev bit*/
1493
1494
mtip_set_timeout(port->dd, &fis, &to, 0);
1495
1496
dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
1497
__func__,
1498
command[0],
1499
command[1],
1500
command[2],
1501
command[3],
1502
command[4],
1503
command[5],
1504
command[6]);
1505
1506
/* Execute the command. */
1507
if (mtip_exec_internal_command(port,
1508
&fis,
1509
5,
1510
0,
1511
0,
1512
0,
1513
to) < 0) {
1514
return -1;
1515
}
1516
1517
command[0] = reply->command; /* Status*/
1518
command[1] = reply->features; /* Error*/
1519
command[4] = reply->cyl_low;
1520
command[5] = reply->cyl_hi;
1521
1522
dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
1523
__func__,
1524
command[0],
1525
command[1],
1526
command[4],
1527
command[5]);
1528
1529
return 0;
1530
}
1531
1532
/*
1533
* @brief Execute a drive command.
1534
*
1535
* @param port Pointer to the port data structure.
1536
* @param command Pointer to the user specified command parameters.
1537
* @param user_buffer Pointer to the user space buffer where read sector
1538
* data should be copied.
1539
*
1540
* return value 0 The command completed successfully.
1541
* return value -EFAULT An error occurred while copying the completion
1542
* data to the user space buffer.
1543
* return value -1 An error occurred while executing the command.
1544
*/
1545
static int exec_drive_command(struct mtip_port *port, u8 *command,
1546
void __user *user_buffer)
1547
{
1548
struct host_to_dev_fis fis;
1549
struct host_to_dev_fis *reply;
1550
u8 *buf = NULL;
1551
dma_addr_t dma_addr = 0;
1552
int rv = 0, xfer_sz = command[3];
1553
unsigned int to;
1554
1555
if (xfer_sz) {
1556
if (!user_buffer)
1557
return -EFAULT;
1558
1559
buf = dma_alloc_coherent(&port->dd->pdev->dev,
1560
ATA_SECT_SIZE * xfer_sz,
1561
&dma_addr,
1562
GFP_KERNEL);
1563
if (!buf) {
1564
dev_err(&port->dd->pdev->dev,
1565
"Memory allocation failed (%d bytes)\n",
1566
ATA_SECT_SIZE * xfer_sz);
1567
return -ENOMEM;
1568
}
1569
}
1570
1571
/* Build the FIS. */
1572
memset(&fis, 0, sizeof(struct host_to_dev_fis));
1573
fis.type = 0x27;
1574
fis.opts = 1 << 7;
1575
fis.command = command[0];
1576
fis.features = command[2];
1577
fis.sect_count = command[3];
1578
if (fis.command == ATA_CMD_SMART) {
1579
fis.sector = command[1];
1580
fis.cyl_low = 0x4F;
1581
fis.cyl_hi = 0xC2;
1582
}
1583
1584
mtip_set_timeout(port->dd, &fis, &to, 0);
1585
1586
if (xfer_sz)
1587
reply = (port->rxfis + RX_FIS_PIO_SETUP);
1588
else
1589
reply = (port->rxfis + RX_FIS_D2H_REG);
1590
1591
dbg_printk(MTIP_DRV_NAME
1592
" %s: User Command: cmd %x, sect %x, "
1593
"feat %x, sectcnt %x\n",
1594
__func__,
1595
command[0],
1596
command[1],
1597
command[2],
1598
command[3]);
1599
1600
/* Execute the command. */
1601
if (mtip_exec_internal_command(port,
1602
&fis,
1603
5,
1604
(xfer_sz ? dma_addr : 0),
1605
(xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
1606
0,
1607
to)
1608
< 0) {
1609
rv = -EFAULT;
1610
goto exit_drive_command;
1611
}
1612
1613
/* Collect the completion status. */
1614
command[0] = reply->command; /* Status*/
1615
command[1] = reply->features; /* Error*/
1616
command[2] = reply->sect_count;
1617
1618
dbg_printk(MTIP_DRV_NAME
1619
" %s: Completion Status: stat %x, "
1620
"err %x, nsect %x\n",
1621
__func__,
1622
command[0],
1623
command[1],
1624
command[2]);
1625
1626
if (xfer_sz) {
1627
if (copy_to_user(user_buffer,
1628
buf,
1629
ATA_SECT_SIZE * command[3])) {
1630
rv = -EFAULT;
1631
goto exit_drive_command;
1632
}
1633
}
1634
exit_drive_command:
1635
if (buf)
1636
dma_free_coherent(&port->dd->pdev->dev,
1637
ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
1638
return rv;
1639
}
1640
1641
/*
1642
* Indicates whether a command has a single sector payload.
1643
*
1644
* @command passed to the device to perform the certain event.
1645
* @features passed to the device to perform the certain event.
1646
*
1647
* return value
1648
* 1 command is one that always has a single sector payload,
1649
* regardless of the value in the Sector Count field.
1650
* 0 otherwise
1651
*
1652
*/
1653
static unsigned int implicit_sector(unsigned char command,
1654
unsigned char features)
1655
{
1656
unsigned int rv = 0;
1657
1658
/* list of commands that have an implicit sector count of 1 */
1659
switch (command) {
1660
case ATA_CMD_SEC_SET_PASS:
1661
case ATA_CMD_SEC_UNLOCK:
1662
case ATA_CMD_SEC_ERASE_PREP:
1663
case ATA_CMD_SEC_ERASE_UNIT:
1664
case ATA_CMD_SEC_FREEZE_LOCK:
1665
case ATA_CMD_SEC_DISABLE_PASS:
1666
case ATA_CMD_PMP_READ:
1667
case ATA_CMD_PMP_WRITE:
1668
rv = 1;
1669
break;
1670
case ATA_CMD_SET_MAX:
1671
if (features == ATA_SET_MAX_UNLOCK)
1672
rv = 1;
1673
break;
1674
case ATA_CMD_SMART:
1675
if ((features == ATA_SMART_READ_VALUES) ||
1676
(features == ATA_SMART_READ_THRESHOLDS))
1677
rv = 1;
1678
break;
1679
case ATA_CMD_CONF_OVERLAY:
1680
if ((features == ATA_DCO_IDENTIFY) ||
1681
(features == ATA_DCO_SET))
1682
rv = 1;
1683
break;
1684
}
1685
return rv;
1686
}
1687
1688
/*
1689
* Executes a taskfile
1690
* See ide_taskfile_ioctl() for derivation
1691
*/
1692
static int exec_drive_taskfile(struct driver_data *dd,
1693
void __user *buf,
1694
ide_task_request_t *req_task,
1695
int outtotal)
1696
{
1697
struct host_to_dev_fis fis;
1698
struct host_to_dev_fis *reply;
1699
u8 *outbuf = NULL;
1700
u8 *inbuf = NULL;
1701
dma_addr_t outbuf_dma = 0;
1702
dma_addr_t inbuf_dma = 0;
1703
dma_addr_t dma_buffer = 0;
1704
int err = 0;
1705
unsigned int taskin = 0;
1706
unsigned int taskout = 0;
1707
u8 nsect = 0;
1708
unsigned int timeout;
1709
unsigned int force_single_sector;
1710
unsigned int transfer_size;
1711
unsigned long task_file_data;
1712
int intotal = outtotal + req_task->out_size;
1713
int erasemode = 0;
1714
1715
taskout = req_task->out_size;
1716
taskin = req_task->in_size;
1717
/* 130560 = 512 * 0xFF*/
1718
if (taskin > 130560 || taskout > 130560)
1719
return -EINVAL;
1720
1721
if (taskout) {
1722
outbuf = memdup_user(buf + outtotal, taskout);
1723
if (IS_ERR(outbuf))
1724
return PTR_ERR(outbuf);
1725
1726
outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf,
1727
taskout, DMA_TO_DEVICE);
1728
if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) {
1729
err = -ENOMEM;
1730
goto abort;
1731
}
1732
dma_buffer = outbuf_dma;
1733
}
1734
1735
if (taskin) {
1736
inbuf = memdup_user(buf + intotal, taskin);
1737
if (IS_ERR(inbuf)) {
1738
err = PTR_ERR(inbuf);
1739
inbuf = NULL;
1740
goto abort;
1741
}
1742
inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf,
1743
taskin, DMA_FROM_DEVICE);
1744
if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) {
1745
err = -ENOMEM;
1746
goto abort;
1747
}
1748
dma_buffer = inbuf_dma;
1749
}
1750
1751
/* only supports PIO and non-data commands from this ioctl. */
1752
switch (req_task->data_phase) {
1753
case TASKFILE_OUT:
1754
nsect = taskout / ATA_SECT_SIZE;
1755
reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
1756
break;
1757
case TASKFILE_IN:
1758
reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
1759
break;
1760
case TASKFILE_NO_DATA:
1761
reply = (dd->port->rxfis + RX_FIS_D2H_REG);
1762
break;
1763
default:
1764
err = -EINVAL;
1765
goto abort;
1766
}
1767
1768
/* Build the FIS. */
1769
memset(&fis, 0, sizeof(struct host_to_dev_fis));
1770
1771
fis.type = 0x27;
1772
fis.opts = 1 << 7;
1773
fis.command = req_task->io_ports[7];
1774
fis.features = req_task->io_ports[1];
1775
fis.sect_count = req_task->io_ports[2];
1776
fis.lba_low = req_task->io_ports[3];
1777
fis.lba_mid = req_task->io_ports[4];
1778
fis.lba_hi = req_task->io_ports[5];
1779
/* Clear the dev bit*/
1780
fis.device = req_task->io_ports[6] & ~0x10;
1781
1782
if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
1783
req_task->in_flags.all =
1784
IDE_TASKFILE_STD_IN_FLAGS |
1785
(IDE_HOB_STD_IN_FLAGS << 8);
1786
fis.lba_low_ex = req_task->hob_ports[3];
1787
fis.lba_mid_ex = req_task->hob_ports[4];
1788
fis.lba_hi_ex = req_task->hob_ports[5];
1789
fis.features_ex = req_task->hob_ports[1];
1790
fis.sect_cnt_ex = req_task->hob_ports[2];
1791
1792
} else {
1793
req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
1794
}
1795
1796
force_single_sector = implicit_sector(fis.command, fis.features);
1797
1798
if ((taskin || taskout) && (!fis.sect_count)) {
1799
if (nsect)
1800
fis.sect_count = nsect;
1801
else {
1802
if (!force_single_sector) {
1803
dev_warn(&dd->pdev->dev,
1804
"data movement but "
1805
"sect_count is 0\n");
1806
err = -EINVAL;
1807
goto abort;
1808
}
1809
}
1810
}
1811
1812
dbg_printk(MTIP_DRV_NAME
1813
" %s: cmd %x, feat %x, nsect %x,"
1814
" sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
1815
" head/dev %x\n",
1816
__func__,
1817
fis.command,
1818
fis.features,
1819
fis.sect_count,
1820
fis.lba_low,
1821
fis.lba_mid,
1822
fis.lba_hi,
1823
fis.device);
1824
1825
/* check for erase mode support during secure erase.*/
1826
if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
1827
(outbuf[0] & MTIP_SEC_ERASE_MODE)) {
1828
erasemode = 1;
1829
}
1830
1831
mtip_set_timeout(dd, &fis, &timeout, erasemode);
1832
1833
/* Determine the correct transfer size.*/
1834
if (force_single_sector)
1835
transfer_size = ATA_SECT_SIZE;
1836
else
1837
transfer_size = ATA_SECT_SIZE * fis.sect_count;
1838
1839
/* Execute the command.*/
1840
if (mtip_exec_internal_command(dd->port,
1841
&fis,
1842
5,
1843
dma_buffer,
1844
transfer_size,
1845
0,
1846
timeout) < 0) {
1847
err = -EIO;
1848
goto abort;
1849
}
1850
1851
task_file_data = readl(dd->port->mmio+PORT_TFDATA);
1852
1853
if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
1854
reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
1855
req_task->io_ports[7] = reply->control;
1856
} else {
1857
reply = dd->port->rxfis + RX_FIS_D2H_REG;
1858
req_task->io_ports[7] = reply->command;
1859
}
1860
1861
/* reclaim the DMA buffers.*/
1862
if (inbuf_dma)
1863
dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
1864
DMA_FROM_DEVICE);
1865
if (outbuf_dma)
1866
dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
1867
DMA_TO_DEVICE);
1868
inbuf_dma = 0;
1869
outbuf_dma = 0;
1870
1871
/* return the ATA registers to the caller.*/
1872
req_task->io_ports[1] = reply->features;
1873
req_task->io_ports[2] = reply->sect_count;
1874
req_task->io_ports[3] = reply->lba_low;
1875
req_task->io_ports[4] = reply->lba_mid;
1876
req_task->io_ports[5] = reply->lba_hi;
1877
req_task->io_ports[6] = reply->device;
1878
1879
if (req_task->out_flags.all & 1) {
1880
1881
req_task->hob_ports[3] = reply->lba_low_ex;
1882
req_task->hob_ports[4] = reply->lba_mid_ex;
1883
req_task->hob_ports[5] = reply->lba_hi_ex;
1884
req_task->hob_ports[1] = reply->features_ex;
1885
req_task->hob_ports[2] = reply->sect_cnt_ex;
1886
}
1887
dbg_printk(MTIP_DRV_NAME
1888
" %s: Completion: stat %x,"
1889
"err %x, sect_cnt %x, lbalo %x,"
1890
"lbamid %x, lbahi %x, dev %x\n",
1891
__func__,
1892
req_task->io_ports[7],
1893
req_task->io_ports[1],
1894
req_task->io_ports[2],
1895
req_task->io_ports[3],
1896
req_task->io_ports[4],
1897
req_task->io_ports[5],
1898
req_task->io_ports[6]);
1899
1900
if (taskout) {
1901
if (copy_to_user(buf + outtotal, outbuf, taskout)) {
1902
err = -EFAULT;
1903
goto abort;
1904
}
1905
}
1906
if (taskin) {
1907
if (copy_to_user(buf + intotal, inbuf, taskin)) {
1908
err = -EFAULT;
1909
goto abort;
1910
}
1911
}
1912
abort:
1913
if (inbuf_dma)
1914
dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
1915
DMA_FROM_DEVICE);
1916
if (outbuf_dma)
1917
dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
1918
DMA_TO_DEVICE);
1919
kfree(outbuf);
1920
kfree(inbuf);
1921
1922
return err;
1923
}
1924
1925
/*
1926
* Handle IOCTL calls from the Block Layer.
1927
*
1928
* This function is called by the Block Layer when it receives an IOCTL
1929
* command that it does not understand. If the IOCTL command is not supported
1930
* this function returns -ENOTTY.
1931
*
1932
* @dd Pointer to the driver data structure.
1933
* @cmd IOCTL command passed from the Block Layer.
1934
* @arg IOCTL argument passed from the Block Layer.
1935
*
1936
* return value
1937
* 0 The IOCTL completed successfully.
1938
* -ENOTTY The specified command is not supported.
1939
* -EFAULT An error occurred copying data to a user space buffer.
1940
* -EIO An error occurred while executing the command.
1941
*/
1942
static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
1943
unsigned long arg)
1944
{
1945
switch (cmd) {
1946
case HDIO_GET_IDENTITY:
1947
{
1948
if (copy_to_user((void __user *)arg, dd->port->identify,
1949
sizeof(u16) * ATA_ID_WORDS))
1950
return -EFAULT;
1951
break;
1952
}
1953
case HDIO_DRIVE_CMD:
1954
{
1955
u8 drive_command[4];
1956
1957
/* Copy the user command info to our buffer. */
1958
if (copy_from_user(drive_command,
1959
(void __user *) arg,
1960
sizeof(drive_command)))
1961
return -EFAULT;
1962
1963
/* Execute the drive command. */
1964
if (exec_drive_command(dd->port,
1965
drive_command,
1966
(void __user *) (arg+4)))
1967
return -EIO;
1968
1969
/* Copy the status back to the users buffer. */
1970
if (copy_to_user((void __user *) arg,
1971
drive_command,
1972
sizeof(drive_command)))
1973
return -EFAULT;
1974
1975
break;
1976
}
1977
case HDIO_DRIVE_TASK:
1978
{
1979
u8 drive_command[7];
1980
1981
/* Copy the user command info to our buffer. */
1982
if (copy_from_user(drive_command,
1983
(void __user *) arg,
1984
sizeof(drive_command)))
1985
return -EFAULT;
1986
1987
/* Execute the drive command. */
1988
if (exec_drive_task(dd->port, drive_command))
1989
return -EIO;
1990
1991
/* Copy the status back to the users buffer. */
1992
if (copy_to_user((void __user *) arg,
1993
drive_command,
1994
sizeof(drive_command)))
1995
return -EFAULT;
1996
1997
break;
1998
}
1999
case HDIO_DRIVE_TASKFILE: {
2000
ide_task_request_t req_task;
2001
int ret, outtotal;
2002
2003
if (copy_from_user(&req_task, (void __user *) arg,
2004
sizeof(req_task)))
2005
return -EFAULT;
2006
2007
outtotal = sizeof(req_task);
2008
2009
ret = exec_drive_taskfile(dd, (void __user *) arg,
2010
&req_task, outtotal);
2011
2012
if (copy_to_user((void __user *) arg, &req_task,
2013
sizeof(req_task)))
2014
return -EFAULT;
2015
2016
return ret;
2017
}
2018
2019
default:
2020
return -EINVAL;
2021
}
2022
return 0;
2023
}
2024
2025
/*
2026
* Submit an IO to the hw
2027
*
2028
* This function is called by the block layer to issue an io
2029
* to the device. Upon completion, the callback function will
2030
* be called with the data parameter passed as the callback data.
2031
*
2032
* @dd Pointer to the driver data structure.
2033
* @start First sector to read.
2034
* @nsect Number of sectors to read.
2035
* @tag The tag of this read command.
2036
* @callback Pointer to the function that should be called
2037
* when the read completes.
2038
* @data Callback data passed to the callback function
2039
* when the read completes.
2040
* @dir Direction (read or write)
2041
*
2042
* return value
2043
* 0 The IO completed successfully.
2044
* -ENOMEM The DMA mapping failed.
2045
*/
2046
static int mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
2047
struct mtip_cmd *command,
2048
struct blk_mq_hw_ctx *hctx)
2049
{
2050
struct mtip_cmd_hdr *hdr =
2051
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
2052
struct host_to_dev_fis *fis;
2053
struct mtip_port *port = dd->port;
2054
int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2055
u64 start = blk_rq_pos(rq);
2056
unsigned int nsect = blk_rq_sectors(rq);
2057
unsigned int nents;
2058
2059
/* Map the scatter list for DMA access */
2060
command->scatter_ents = blk_rq_map_sg(rq, command->sg);
2061
nents = dma_map_sg(&dd->pdev->dev, command->sg,
2062
command->scatter_ents, dma_dir);
2063
if (!nents)
2064
return -ENOMEM;
2065
2066
2067
prefetch(&port->flags);
2068
2069
/*
2070
* The number of retries for this command before it is
2071
* reported as a failure to the upper layers.
2072
*/
2073
command->retries = MTIP_MAX_RETRIES;
2074
2075
/* Fill out fis */
2076
fis = command->command;
2077
fis->type = 0x27;
2078
fis->opts = 1 << 7;
2079
if (dma_dir == DMA_FROM_DEVICE)
2080
fis->command = ATA_CMD_FPDMA_READ;
2081
else
2082
fis->command = ATA_CMD_FPDMA_WRITE;
2083
fis->lba_low = start & 0xFF;
2084
fis->lba_mid = (start >> 8) & 0xFF;
2085
fis->lba_hi = (start >> 16) & 0xFF;
2086
fis->lba_low_ex = (start >> 24) & 0xFF;
2087
fis->lba_mid_ex = (start >> 32) & 0xFF;
2088
fis->lba_hi_ex = (start >> 40) & 0xFF;
2089
fis->device = 1 << 6;
2090
fis->features = nsect & 0xFF;
2091
fis->features_ex = (nsect >> 8) & 0xFF;
2092
fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5));
2093
fis->sect_cnt_ex = 0;
2094
fis->control = 0;
2095
fis->res2 = 0;
2096
fis->res3 = 0;
2097
fill_command_sg(dd, command, nents);
2098
2099
if (unlikely(command->unaligned))
2100
fis->device |= 1 << 7;
2101
2102
/* Populate the command header */
2103
hdr->ctba = cpu_to_le32(command->command_dma & 0xFFFFFFFF);
2104
if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
2105
hdr->ctbau = cpu_to_le32((command->command_dma >> 16) >> 16);
2106
hdr->opts = cpu_to_le32((nents << 16) | 5 | AHCI_CMD_PREFETCH);
2107
hdr->byte_count = 0;
2108
2109
command->direction = dma_dir;
2110
2111
/*
2112
* To prevent this command from being issued
2113
* if an internal command is in progress or error handling is active.
2114
*/
2115
if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
2116
set_bit(rq->tag, port->cmds_to_issue);
2117
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2118
return 0;
2119
}
2120
2121
/* Issue the command to the hardware */
2122
mtip_issue_ncq_command(port, rq->tag);
2123
2124
return 0;
2125
}
2126
2127
/*
2128
* Sysfs status dump.
2129
*
2130
* @dev Pointer to the device structure, passed by the kernrel.
2131
* @attr Pointer to the device_attribute structure passed by the kernel.
2132
* @buf Pointer to the char buffer that will receive the stats info.
2133
*
2134
* return value
2135
* The size, in bytes, of the data copied into buf.
2136
*/
2137
static ssize_t mtip_hw_show_status(struct device *dev,
2138
struct device_attribute *attr,
2139
char *buf)
2140
{
2141
struct driver_data *dd = dev_to_disk(dev)->private_data;
2142
int size = 0;
2143
2144
if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
2145
size += sprintf(buf, "%s", "thermal_shutdown\n");
2146
else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2147
size += sprintf(buf, "%s", "write_protect\n");
2148
else
2149
size += sprintf(buf, "%s", "online\n");
2150
2151
return size;
2152
}
2153
2154
static DEVICE_ATTR(status, 0444, mtip_hw_show_status, NULL);
2155
2156
static struct attribute *mtip_disk_attrs[] = {
2157
&dev_attr_status.attr,
2158
NULL,
2159
};
2160
2161
static const struct attribute_group mtip_disk_attr_group = {
2162
.attrs = mtip_disk_attrs,
2163
};
2164
2165
static const struct attribute_group *mtip_disk_attr_groups[] = {
2166
&mtip_disk_attr_group,
2167
NULL,
2168
};
2169
2170
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
2171
size_t len, loff_t *offset)
2172
{
2173
struct driver_data *dd = (struct driver_data *)f->private_data;
2174
char *buf;
2175
u32 group_allocated;
2176
int size = *offset;
2177
int n, rv = 0;
2178
2179
if (!len || size)
2180
return 0;
2181
2182
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2183
if (!buf)
2184
return -ENOMEM;
2185
2186
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
2187
2188
for (n = dd->slot_groups-1; n >= 0; n--)
2189
size += sprintf(&buf[size], "%08X ",
2190
readl(dd->port->s_active[n]));
2191
2192
size += sprintf(&buf[size], "]\n");
2193
size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
2194
2195
for (n = dd->slot_groups-1; n >= 0; n--)
2196
size += sprintf(&buf[size], "%08X ",
2197
readl(dd->port->cmd_issue[n]));
2198
2199
size += sprintf(&buf[size], "]\n");
2200
size += sprintf(&buf[size], "H/ Completed : [ 0x");
2201
2202
for (n = dd->slot_groups-1; n >= 0; n--)
2203
size += sprintf(&buf[size], "%08X ",
2204
readl(dd->port->completed[n]));
2205
2206
size += sprintf(&buf[size], "]\n");
2207
size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
2208
readl(dd->port->mmio + PORT_IRQ_STAT));
2209
size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
2210
readl(dd->mmio + HOST_IRQ_STAT));
2211
size += sprintf(&buf[size], "\n");
2212
2213
size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
2214
2215
for (n = dd->slot_groups-1; n >= 0; n--) {
2216
if (sizeof(long) > sizeof(u32))
2217
group_allocated =
2218
dd->port->cmds_to_issue[n/2] >> (32*(n&1));
2219
else
2220
group_allocated = dd->port->cmds_to_issue[n];
2221
size += sprintf(&buf[size], "%08X ", group_allocated);
2222
}
2223
size += sprintf(&buf[size], "]\n");
2224
2225
*offset = size <= len ? size : len;
2226
size = copy_to_user(ubuf, buf, *offset);
2227
if (size)
2228
rv = -EFAULT;
2229
2230
kfree(buf);
2231
return rv ? rv : *offset;
2232
}
2233
2234
static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
2235
size_t len, loff_t *offset)
2236
{
2237
struct driver_data *dd = (struct driver_data *)f->private_data;
2238
char *buf;
2239
int size = *offset;
2240
int rv = 0;
2241
2242
if (!len || size)
2243
return 0;
2244
2245
buf = kzalloc(MTIP_DFS_MAX_BUF_SIZE, GFP_KERNEL);
2246
if (!buf)
2247
return -ENOMEM;
2248
2249
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
2250
dd->port->flags);
2251
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
2252
dd->dd_flag);
2253
2254
*offset = size <= len ? size : len;
2255
size = copy_to_user(ubuf, buf, *offset);
2256
if (size)
2257
rv = -EFAULT;
2258
2259
kfree(buf);
2260
return rv ? rv : *offset;
2261
}
2262
2263
static const struct file_operations mtip_regs_fops = {
2264
.owner = THIS_MODULE,
2265
.open = simple_open,
2266
.read = mtip_hw_read_registers,
2267
};
2268
2269
static const struct file_operations mtip_flags_fops = {
2270
.owner = THIS_MODULE,
2271
.open = simple_open,
2272
.read = mtip_hw_read_flags,
2273
};
2274
2275
static void mtip_hw_debugfs_init(struct driver_data *dd)
2276
{
2277
dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
2278
debugfs_create_file("flags", 0444, dd->dfs_node, dd, &mtip_flags_fops);
2279
debugfs_create_file("registers", 0444, dd->dfs_node, dd,
2280
&mtip_regs_fops);
2281
}
2282
2283
static void mtip_hw_debugfs_exit(struct driver_data *dd)
2284
{
2285
debugfs_remove_recursive(dd->dfs_node);
2286
}
2287
2288
/*
2289
* Perform any init/resume time hardware setup
2290
*
2291
* @dd Pointer to the driver data structure.
2292
*
2293
* return value
2294
* None
2295
*/
2296
static inline void hba_setup(struct driver_data *dd)
2297
{
2298
u32 hwdata;
2299
hwdata = readl(dd->mmio + HOST_HSORG);
2300
2301
/* interrupt bug workaround: use only 1 IS bit.*/
2302
writel(hwdata |
2303
HSORG_DISABLE_SLOTGRP_INTR |
2304
HSORG_DISABLE_SLOTGRP_PXIS,
2305
dd->mmio + HOST_HSORG);
2306
}
2307
2308
static int mtip_device_unaligned_constrained(struct driver_data *dd)
2309
{
2310
return (dd->pdev->device == P420M_DEVICE_ID ? 1 : 0);
2311
}
2312
2313
/*
2314
* Detect the details of the product, and store anything needed
2315
* into the driver data structure. This includes product type and
2316
* version and number of slot groups.
2317
*
2318
* @dd Pointer to the driver data structure.
2319
*
2320
* return value
2321
* None
2322
*/
2323
static void mtip_detect_product(struct driver_data *dd)
2324
{
2325
u32 hwdata;
2326
unsigned int rev, slotgroups;
2327
2328
/*
2329
* HBA base + 0xFC [15:0] - vendor-specific hardware interface
2330
* info register:
2331
* [15:8] hardware/software interface rev#
2332
* [ 3] asic-style interface
2333
* [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
2334
*/
2335
hwdata = readl(dd->mmio + HOST_HSORG);
2336
2337
dd->product_type = MTIP_PRODUCT_UNKNOWN;
2338
dd->slot_groups = 1;
2339
2340
if (hwdata & 0x8) {
2341
dd->product_type = MTIP_PRODUCT_ASICFPGA;
2342
rev = (hwdata & HSORG_HWREV) >> 8;
2343
slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
2344
dev_info(&dd->pdev->dev,
2345
"ASIC-FPGA design, HS rev 0x%x, "
2346
"%i slot groups [%i slots]\n",
2347
rev,
2348
slotgroups,
2349
slotgroups * 32);
2350
2351
if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
2352
dev_warn(&dd->pdev->dev,
2353
"Warning: driver only supports "
2354
"%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
2355
slotgroups = MTIP_MAX_SLOT_GROUPS;
2356
}
2357
dd->slot_groups = slotgroups;
2358
return;
2359
}
2360
2361
dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
2362
}
2363
2364
/*
2365
* Blocking wait for FTL rebuild to complete
2366
*
2367
* @dd Pointer to the DRIVER_DATA structure.
2368
*
2369
* return value
2370
* 0 FTL rebuild completed successfully
2371
* -EFAULT FTL rebuild error/timeout/interruption
2372
*/
2373
static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2374
{
2375
unsigned long timeout, cnt = 0, start;
2376
2377
dev_warn(&dd->pdev->dev,
2378
"FTL rebuild in progress. Polling for completion.\n");
2379
2380
start = jiffies;
2381
timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
2382
2383
do {
2384
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2385
&dd->dd_flag)))
2386
return -EFAULT;
2387
if (mtip_check_surprise_removal(dd))
2388
return -EFAULT;
2389
2390
if (mtip_get_identify(dd->port, NULL) < 0)
2391
return -EFAULT;
2392
2393
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2394
MTIP_FTL_REBUILD_MAGIC) {
2395
ssleep(1);
2396
/* Print message every 3 minutes */
2397
if (cnt++ >= 180) {
2398
dev_warn(&dd->pdev->dev,
2399
"FTL rebuild in progress (%d secs).\n",
2400
jiffies_to_msecs(jiffies - start) / 1000);
2401
cnt = 0;
2402
}
2403
} else {
2404
dev_warn(&dd->pdev->dev,
2405
"FTL rebuild complete (%d secs).\n",
2406
jiffies_to_msecs(jiffies - start) / 1000);
2407
mtip_block_initialize(dd);
2408
return 0;
2409
}
2410
} while (time_before(jiffies, timeout));
2411
2412
/* Check for timeout */
2413
dev_err(&dd->pdev->dev,
2414
"Timed out waiting for FTL rebuild to complete (%d secs).\n",
2415
jiffies_to_msecs(jiffies - start) / 1000);
2416
return -EFAULT;
2417
}
2418
2419
static void mtip_softirq_done_fn(struct request *rq)
2420
{
2421
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
2422
struct driver_data *dd = rq->q->queuedata;
2423
2424
/* Unmap the DMA scatter list entries */
2425
dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents,
2426
cmd->direction);
2427
2428
if (unlikely(cmd->unaligned))
2429
atomic_inc(&dd->port->cmd_slot_unal);
2430
2431
blk_mq_end_request(rq, cmd->status);
2432
}
2433
2434
static bool mtip_abort_cmd(struct request *req, void *data)
2435
{
2436
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
2437
struct driver_data *dd = data;
2438
2439
dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
2440
2441
clear_bit(req->tag, dd->port->cmds_to_issue);
2442
cmd->status = BLK_STS_IOERR;
2443
mtip_softirq_done_fn(req);
2444
return true;
2445
}
2446
2447
static bool mtip_queue_cmd(struct request *req, void *data)
2448
{
2449
struct driver_data *dd = data;
2450
2451
set_bit(req->tag, dd->port->cmds_to_issue);
2452
blk_abort_request(req);
2453
return true;
2454
}
2455
2456
/*
2457
* service thread to issue queued commands
2458
*
2459
* @data Pointer to the driver data structure.
2460
*
2461
* return value
2462
* 0
2463
*/
2464
2465
static int mtip_service_thread(void *data)
2466
{
2467
struct driver_data *dd = (struct driver_data *)data;
2468
unsigned long slot, slot_start, slot_wrap, to;
2469
unsigned int num_cmd_slots = dd->slot_groups * 32;
2470
struct mtip_port *port = dd->port;
2471
2472
while (1) {
2473
if (kthread_should_stop() ||
2474
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2475
goto st_out;
2476
clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2477
2478
/*
2479
* the condition is to check neither an internal command is
2480
* is in progress nor error handling is active
2481
*/
2482
wait_event_interruptible(port->svc_wait, (port->flags) &&
2483
(port->flags & MTIP_PF_SVC_THD_WORK));
2484
2485
if (kthread_should_stop() ||
2486
test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2487
goto st_out;
2488
2489
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2490
&dd->dd_flag)))
2491
goto st_out;
2492
2493
set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2494
2495
restart_eh:
2496
/* Demux bits: start with error handling */
2497
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
2498
mtip_handle_tfe(dd);
2499
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
2500
}
2501
2502
if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags))
2503
goto restart_eh;
2504
2505
if (test_bit(MTIP_PF_TO_ACTIVE_BIT, &port->flags)) {
2506
to = jiffies + msecs_to_jiffies(5000);
2507
2508
do {
2509
mdelay(100);
2510
} while (atomic_read(&dd->irq_workers_active) != 0 &&
2511
time_before(jiffies, to));
2512
2513
if (atomic_read(&dd->irq_workers_active) != 0)
2514
dev_warn(&dd->pdev->dev,
2515
"Completion workers still active!");
2516
2517
blk_mq_quiesce_queue(dd->queue);
2518
2519
blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd);
2520
2521
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
2522
2523
if (mtip_device_reset(dd))
2524
blk_mq_tagset_busy_iter(&dd->tags,
2525
mtip_abort_cmd, dd);
2526
2527
clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags);
2528
2529
blk_mq_unquiesce_queue(dd->queue);
2530
}
2531
2532
if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
2533
slot = 1;
2534
/* used to restrict the loop to one iteration */
2535
slot_start = num_cmd_slots;
2536
slot_wrap = 0;
2537
while (1) {
2538
slot = find_next_bit(port->cmds_to_issue,
2539
num_cmd_slots, slot);
2540
if (slot_wrap == 1) {
2541
if ((slot_start >= slot) ||
2542
(slot >= num_cmd_slots))
2543
break;
2544
}
2545
if (unlikely(slot_start == num_cmd_slots))
2546
slot_start = slot;
2547
2548
if (unlikely(slot == num_cmd_slots)) {
2549
slot = 1;
2550
slot_wrap = 1;
2551
continue;
2552
}
2553
2554
/* Issue the command to the hardware */
2555
mtip_issue_ncq_command(port, slot);
2556
2557
clear_bit(slot, port->cmds_to_issue);
2558
}
2559
2560
clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2561
}
2562
2563
if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
2564
if (mtip_ftl_rebuild_poll(dd) == 0)
2565
clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2566
}
2567
}
2568
2569
st_out:
2570
return 0;
2571
}
2572
2573
/*
2574
* DMA region teardown
2575
*
2576
* @dd Pointer to driver_data structure
2577
*
2578
* return value
2579
* None
2580
*/
2581
static void mtip_dma_free(struct driver_data *dd)
2582
{
2583
struct mtip_port *port = dd->port;
2584
2585
if (port->block1)
2586
dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2587
port->block1, port->block1_dma);
2588
2589
if (port->command_list) {
2590
dma_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
2591
port->command_list, port->command_list_dma);
2592
}
2593
}
2594
2595
/*
2596
* DMA region setup
2597
*
2598
* @dd Pointer to driver_data structure
2599
*
2600
* return value
2601
* -ENOMEM Not enough free DMA region space to initialize driver
2602
*/
2603
static int mtip_dma_alloc(struct driver_data *dd)
2604
{
2605
struct mtip_port *port = dd->port;
2606
2607
/* Allocate dma memory for RX Fis, Identify, and Sector Buffer */
2608
port->block1 =
2609
dma_alloc_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2610
&port->block1_dma, GFP_KERNEL);
2611
if (!port->block1)
2612
return -ENOMEM;
2613
2614
/* Allocate dma memory for command list */
2615
port->command_list =
2616
dma_alloc_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
2617
&port->command_list_dma, GFP_KERNEL);
2618
if (!port->command_list) {
2619
dma_free_coherent(&dd->pdev->dev, BLOCK_DMA_ALLOC_SZ,
2620
port->block1, port->block1_dma);
2621
port->block1 = NULL;
2622
port->block1_dma = 0;
2623
return -ENOMEM;
2624
}
2625
2626
/* Setup all pointers into first DMA region */
2627
port->rxfis = port->block1 + AHCI_RX_FIS_OFFSET;
2628
port->rxfis_dma = port->block1_dma + AHCI_RX_FIS_OFFSET;
2629
port->identify = port->block1 + AHCI_IDFY_OFFSET;
2630
port->identify_dma = port->block1_dma + AHCI_IDFY_OFFSET;
2631
port->log_buf = port->block1 + AHCI_SECTBUF_OFFSET;
2632
port->log_buf_dma = port->block1_dma + AHCI_SECTBUF_OFFSET;
2633
port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET;
2634
port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
2635
2636
return 0;
2637
}
2638
2639
static int mtip_hw_get_identify(struct driver_data *dd)
2640
{
2641
struct smart_attr attr242;
2642
unsigned char *buf;
2643
int rv;
2644
2645
if (mtip_get_identify(dd->port, NULL) < 0)
2646
return -EFAULT;
2647
2648
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2649
MTIP_FTL_REBUILD_MAGIC) {
2650
set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
2651
return MTIP_FTL_REBUILD_MAGIC;
2652
}
2653
mtip_dump_identify(dd->port);
2654
2655
/* check write protect, over temp and rebuild statuses */
2656
rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
2657
dd->port->log_buf,
2658
dd->port->log_buf_dma, 1);
2659
if (rv) {
2660
dev_warn(&dd->pdev->dev,
2661
"Error in READ LOG EXT (10h) command\n");
2662
/* non-critical error, don't fail the load */
2663
} else {
2664
buf = (unsigned char *)dd->port->log_buf;
2665
if (buf[259] & 0x1) {
2666
dev_info(&dd->pdev->dev,
2667
"Write protect bit is set.\n");
2668
set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
2669
}
2670
if (buf[288] == 0xF7) {
2671
dev_info(&dd->pdev->dev,
2672
"Exceeded Tmax, drive in thermal shutdown.\n");
2673
set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
2674
}
2675
if (buf[288] == 0xBF) {
2676
dev_info(&dd->pdev->dev,
2677
"Drive indicates rebuild has failed.\n");
2678
set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
2679
}
2680
}
2681
2682
/* get write protect progess */
2683
memset(&attr242, 0, sizeof(struct smart_attr));
2684
if (mtip_get_smart_attr(dd->port, 242, &attr242))
2685
dev_warn(&dd->pdev->dev,
2686
"Unable to check write protect progress\n");
2687
else
2688
dev_info(&dd->pdev->dev,
2689
"Write protect progress: %u%% (%u blocks)\n",
2690
attr242.cur, le32_to_cpu(attr242.data));
2691
2692
return rv;
2693
}
2694
2695
/*
2696
* Called once for each card.
2697
*
2698
* @dd Pointer to the driver data structure.
2699
*
2700
* return value
2701
* 0 on success, else an error code.
2702
*/
2703
static int mtip_hw_init(struct driver_data *dd)
2704
{
2705
int i;
2706
int rv;
2707
unsigned long timeout, timetaken;
2708
2709
dd->mmio = pcim_iomap_region(dd->pdev, MTIP_ABAR, MTIP_DRV_NAME);
2710
if (IS_ERR(dd->mmio)) {
2711
dev_err(&dd->pdev->dev, "Unable to request / ioremap PCI region\n");
2712
return PTR_ERR(dd->mmio);
2713
}
2714
2715
2716
mtip_detect_product(dd);
2717
if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
2718
rv = -EIO;
2719
goto out1;
2720
}
2721
2722
hba_setup(dd);
2723
2724
dd->port = kzalloc_node(sizeof(struct mtip_port), GFP_KERNEL,
2725
dd->numa_node);
2726
if (!dd->port)
2727
return -ENOMEM;
2728
2729
/* Continue workqueue setup */
2730
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
2731
dd->work[i].port = dd->port;
2732
2733
/* Enable unaligned IO constraints for some devices */
2734
if (mtip_device_unaligned_constrained(dd))
2735
dd->unal_qdepth = MTIP_MAX_UNALIGNED_SLOTS;
2736
else
2737
dd->unal_qdepth = 0;
2738
2739
atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth);
2740
2741
/* Spinlock to prevent concurrent issue */
2742
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
2743
spin_lock_init(&dd->port->cmd_issue_lock[i]);
2744
2745
/* Set the port mmio base address. */
2746
dd->port->mmio = dd->mmio + PORT_OFFSET;
2747
dd->port->dd = dd;
2748
2749
/* DMA allocations */
2750
rv = mtip_dma_alloc(dd);
2751
if (rv < 0)
2752
goto out1;
2753
2754
/* Setup the pointers to the extended s_active and CI registers. */
2755
for (i = 0; i < dd->slot_groups; i++) {
2756
dd->port->s_active[i] =
2757
dd->port->mmio + i*0x80 + PORT_SCR_ACT;
2758
dd->port->cmd_issue[i] =
2759
dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
2760
dd->port->completed[i] =
2761
dd->port->mmio + i*0x80 + PORT_SDBV;
2762
}
2763
2764
timetaken = jiffies;
2765
timeout = jiffies + msecs_to_jiffies(30000);
2766
while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
2767
time_before(jiffies, timeout)) {
2768
mdelay(100);
2769
}
2770
if (unlikely(mtip_check_surprise_removal(dd))) {
2771
timetaken = jiffies - timetaken;
2772
dev_warn(&dd->pdev->dev,
2773
"Surprise removal detected at %u ms\n",
2774
jiffies_to_msecs(timetaken));
2775
rv = -ENODEV;
2776
goto out2 ;
2777
}
2778
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
2779
timetaken = jiffies - timetaken;
2780
dev_warn(&dd->pdev->dev,
2781
"Removal detected at %u ms\n",
2782
jiffies_to_msecs(timetaken));
2783
rv = -EFAULT;
2784
goto out2;
2785
}
2786
2787
/* Conditionally reset the HBA. */
2788
if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
2789
if (mtip_hba_reset(dd) < 0) {
2790
dev_err(&dd->pdev->dev,
2791
"Card did not reset within timeout\n");
2792
rv = -EIO;
2793
goto out2;
2794
}
2795
} else {
2796
/* Clear any pending interrupts on the HBA */
2797
writel(readl(dd->mmio + HOST_IRQ_STAT),
2798
dd->mmio + HOST_IRQ_STAT);
2799
}
2800
2801
mtip_init_port(dd->port);
2802
mtip_start_port(dd->port);
2803
2804
/* Setup the ISR and enable interrupts. */
2805
rv = request_irq(dd->pdev->irq, mtip_irq_handler, IRQF_SHARED,
2806
dev_driver_string(&dd->pdev->dev), dd);
2807
if (rv) {
2808
dev_err(&dd->pdev->dev,
2809
"Unable to allocate IRQ %d\n", dd->pdev->irq);
2810
goto out2;
2811
}
2812
irq_set_affinity_hint(dd->pdev->irq, get_cpu_mask(dd->isr_binding));
2813
2814
/* Enable interrupts on the HBA. */
2815
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
2816
dd->mmio + HOST_CTL);
2817
2818
init_waitqueue_head(&dd->port->svc_wait);
2819
2820
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
2821
rv = -EFAULT;
2822
goto out3;
2823
}
2824
2825
return rv;
2826
2827
out3:
2828
/* Disable interrupts on the HBA. */
2829
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
2830
dd->mmio + HOST_CTL);
2831
2832
/* Release the IRQ. */
2833
irq_set_affinity_hint(dd->pdev->irq, NULL);
2834
free_irq(dd->pdev->irq, dd);
2835
2836
out2:
2837
mtip_deinit_port(dd->port);
2838
mtip_dma_free(dd);
2839
2840
out1:
2841
/* Free the memory allocated for the for structure. */
2842
kfree(dd->port);
2843
2844
return rv;
2845
}
2846
2847
static int mtip_standby_drive(struct driver_data *dd)
2848
{
2849
int rv = 0;
2850
2851
if (dd->sr || !dd->port)
2852
return -ENODEV;
2853
/*
2854
* Send standby immediate (E0h) to the drive so that it
2855
* saves its state.
2856
*/
2857
if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
2858
!test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
2859
!test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
2860
rv = mtip_standby_immediate(dd->port);
2861
if (rv)
2862
dev_warn(&dd->pdev->dev,
2863
"STANDBY IMMEDIATE failed\n");
2864
}
2865
return rv;
2866
}
2867
2868
/*
2869
* Called to deinitialize an interface.
2870
*
2871
* @dd Pointer to the driver data structure.
2872
*
2873
* return value
2874
* 0
2875
*/
2876
static int mtip_hw_exit(struct driver_data *dd)
2877
{
2878
if (!dd->sr) {
2879
/* de-initialize the port. */
2880
mtip_deinit_port(dd->port);
2881
2882
/* Disable interrupts on the HBA. */
2883
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
2884
dd->mmio + HOST_CTL);
2885
}
2886
2887
/* Release the IRQ. */
2888
irq_set_affinity_hint(dd->pdev->irq, NULL);
2889
free_irq(dd->pdev->irq, dd);
2890
msleep(1000);
2891
2892
/* Free dma regions */
2893
mtip_dma_free(dd);
2894
2895
/* Free the memory allocated for the for structure. */
2896
kfree(dd->port);
2897
dd->port = NULL;
2898
2899
return 0;
2900
}
2901
2902
/*
2903
* Issue a Standby Immediate command to the device.
2904
*
2905
* This function is called by the Block Layer just before the
2906
* system powers off during a shutdown.
2907
*
2908
* @dd Pointer to the driver data structure.
2909
*
2910
* return value
2911
* 0
2912
*/
2913
static int mtip_hw_shutdown(struct driver_data *dd)
2914
{
2915
/*
2916
* Send standby immediate (E0h) to the drive so that it
2917
* saves its state.
2918
*/
2919
mtip_standby_drive(dd);
2920
2921
return 0;
2922
}
2923
2924
/*
2925
* Suspend function
2926
*
2927
* This function is called by the Block Layer just before the
2928
* system hibernates.
2929
*
2930
* @dd Pointer to the driver data structure.
2931
*
2932
* return value
2933
* 0 Suspend was successful
2934
* -EFAULT Suspend was not successful
2935
*/
2936
static int mtip_hw_suspend(struct driver_data *dd)
2937
{
2938
/*
2939
* Send standby immediate (E0h) to the drive
2940
* so that it saves its state.
2941
*/
2942
if (mtip_standby_drive(dd) != 0) {
2943
dev_err(&dd->pdev->dev,
2944
"Failed standby-immediate command\n");
2945
return -EFAULT;
2946
}
2947
2948
/* Disable interrupts on the HBA.*/
2949
writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
2950
dd->mmio + HOST_CTL);
2951
mtip_deinit_port(dd->port);
2952
2953
return 0;
2954
}
2955
2956
/*
2957
* Resume function
2958
*
2959
* This function is called by the Block Layer as the
2960
* system resumes.
2961
*
2962
* @dd Pointer to the driver data structure.
2963
*
2964
* return value
2965
* 0 Resume was successful
2966
* -EFAULT Resume was not successful
2967
*/
2968
static int mtip_hw_resume(struct driver_data *dd)
2969
{
2970
/* Perform any needed hardware setup steps */
2971
hba_setup(dd);
2972
2973
/* Reset the HBA */
2974
if (mtip_hba_reset(dd) != 0) {
2975
dev_err(&dd->pdev->dev,
2976
"Unable to reset the HBA\n");
2977
return -EFAULT;
2978
}
2979
2980
/*
2981
* Enable the port, DMA engine, and FIS reception specific
2982
* h/w in controller.
2983
*/
2984
mtip_init_port(dd->port);
2985
mtip_start_port(dd->port);
2986
2987
/* Enable interrupts on the HBA.*/
2988
writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
2989
dd->mmio + HOST_CTL);
2990
2991
return 0;
2992
}
2993
2994
/*
2995
* Helper function for reusing disk name
2996
* upon hot insertion.
2997
*/
2998
static int rssd_disk_name_format(char *prefix,
2999
int index,
3000
char *buf,
3001
int buflen)
3002
{
3003
const int base = 'z' - 'a' + 1;
3004
char *begin = buf + strlen(prefix);
3005
char *end = buf + buflen;
3006
char *p;
3007
int unit;
3008
3009
p = end - 1;
3010
*p = '\0';
3011
unit = base;
3012
do {
3013
if (p == begin)
3014
return -EINVAL;
3015
*--p = 'a' + (index % unit);
3016
index = (index / unit) - 1;
3017
} while (index >= 0);
3018
3019
memmove(begin, p, end - p);
3020
memcpy(buf, prefix, strlen(prefix));
3021
3022
return 0;
3023
}
3024
3025
/*
3026
* Block layer IOCTL handler.
3027
*
3028
* @dev Pointer to the block_device structure.
3029
* @mode ignored
3030
* @cmd IOCTL command passed from the user application.
3031
* @arg Argument passed from the user application.
3032
*
3033
* return value
3034
* 0 IOCTL completed successfully.
3035
* -ENOTTY IOCTL not supported or invalid driver data
3036
* structure pointer.
3037
*/
3038
static int mtip_block_ioctl(struct block_device *dev,
3039
blk_mode_t mode,
3040
unsigned cmd,
3041
unsigned long arg)
3042
{
3043
struct driver_data *dd = dev->bd_disk->private_data;
3044
3045
if (!capable(CAP_SYS_ADMIN))
3046
return -EACCES;
3047
3048
if (!dd)
3049
return -ENOTTY;
3050
3051
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3052
return -ENOTTY;
3053
3054
switch (cmd) {
3055
case BLKFLSBUF:
3056
return -ENOTTY;
3057
default:
3058
return mtip_hw_ioctl(dd, cmd, arg);
3059
}
3060
}
3061
3062
#ifdef CONFIG_COMPAT
3063
/*
3064
* Block layer compat IOCTL handler.
3065
*
3066
* @dev Pointer to the block_device structure.
3067
* @mode ignored
3068
* @cmd IOCTL command passed from the user application.
3069
* @arg Argument passed from the user application.
3070
*
3071
* return value
3072
* 0 IOCTL completed successfully.
3073
* -ENOTTY IOCTL not supported or invalid driver data
3074
* structure pointer.
3075
*/
3076
static int mtip_block_compat_ioctl(struct block_device *dev,
3077
blk_mode_t mode,
3078
unsigned cmd,
3079
unsigned long arg)
3080
{
3081
struct driver_data *dd = dev->bd_disk->private_data;
3082
3083
if (!capable(CAP_SYS_ADMIN))
3084
return -EACCES;
3085
3086
if (!dd)
3087
return -ENOTTY;
3088
3089
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3090
return -ENOTTY;
3091
3092
switch (cmd) {
3093
case BLKFLSBUF:
3094
return -ENOTTY;
3095
case HDIO_DRIVE_TASKFILE: {
3096
struct mtip_compat_ide_task_request_s __user *compat_req_task;
3097
ide_task_request_t req_task;
3098
int compat_tasksize, outtotal, ret;
3099
3100
compat_tasksize =
3101
sizeof(struct mtip_compat_ide_task_request_s);
3102
3103
compat_req_task =
3104
(struct mtip_compat_ide_task_request_s __user *) arg;
3105
3106
if (copy_from_user(&req_task, (void __user *) arg,
3107
compat_tasksize - (2 * sizeof(compat_long_t))))
3108
return -EFAULT;
3109
3110
if (get_user(req_task.out_size, &compat_req_task->out_size))
3111
return -EFAULT;
3112
3113
if (get_user(req_task.in_size, &compat_req_task->in_size))
3114
return -EFAULT;
3115
3116
outtotal = sizeof(struct mtip_compat_ide_task_request_s);
3117
3118
ret = exec_drive_taskfile(dd, (void __user *) arg,
3119
&req_task, outtotal);
3120
3121
if (copy_to_user((void __user *) arg, &req_task,
3122
compat_tasksize -
3123
(2 * sizeof(compat_long_t))))
3124
return -EFAULT;
3125
3126
if (put_user(req_task.out_size, &compat_req_task->out_size))
3127
return -EFAULT;
3128
3129
if (put_user(req_task.in_size, &compat_req_task->in_size))
3130
return -EFAULT;
3131
3132
return ret;
3133
}
3134
default:
3135
return mtip_hw_ioctl(dd, cmd, arg);
3136
}
3137
}
3138
#endif
3139
3140
/*
3141
* Obtain the geometry of the device.
3142
*
3143
* You may think that this function is obsolete, but some applications,
3144
* fdisk for example still used CHS values. This function describes the
3145
* device as having 224 heads and 56 sectors per cylinder. These values are
3146
* chosen so that each cylinder is aligned on a 4KB boundary. Since a
3147
* partition is described in terms of a start and end cylinder this means
3148
* that each partition is also 4KB aligned. Non-aligned partitions adversely
3149
* affects performance.
3150
*
3151
* @dev Pointer to the block_device strucutre.
3152
* @geo Pointer to a hd_geometry structure.
3153
*
3154
* return value
3155
* 0 Operation completed successfully.
3156
* -ENOTTY An error occurred while reading the drive capacity.
3157
*/
3158
static int mtip_block_getgeo(struct block_device *dev,
3159
struct hd_geometry *geo)
3160
{
3161
struct driver_data *dd = dev->bd_disk->private_data;
3162
sector_t capacity;
3163
3164
if (!dd)
3165
return -ENOTTY;
3166
3167
if (!(mtip_hw_get_capacity(dd, &capacity))) {
3168
dev_warn(&dd->pdev->dev,
3169
"Could not get drive capacity.\n");
3170
return -ENOTTY;
3171
}
3172
3173
geo->heads = 224;
3174
geo->sectors = 56;
3175
sector_div(capacity, (geo->heads * geo->sectors));
3176
geo->cylinders = capacity;
3177
return 0;
3178
}
3179
3180
static void mtip_block_free_disk(struct gendisk *disk)
3181
{
3182
struct driver_data *dd = disk->private_data;
3183
3184
ida_free(&rssd_index_ida, dd->index);
3185
kfree(dd);
3186
}
3187
3188
/*
3189
* Block device operation function.
3190
*
3191
* This structure contains pointers to the functions required by the block
3192
* layer.
3193
*/
3194
static const struct block_device_operations mtip_block_ops = {
3195
.ioctl = mtip_block_ioctl,
3196
#ifdef CONFIG_COMPAT
3197
.compat_ioctl = mtip_block_compat_ioctl,
3198
#endif
3199
.getgeo = mtip_block_getgeo,
3200
.free_disk = mtip_block_free_disk,
3201
.owner = THIS_MODULE
3202
};
3203
3204
static inline bool is_se_active(struct driver_data *dd)
3205
{
3206
if (unlikely(test_bit(MTIP_PF_SE_ACTIVE_BIT, &dd->port->flags))) {
3207
if (dd->port->ic_pause_timer) {
3208
unsigned long to = dd->port->ic_pause_timer +
3209
msecs_to_jiffies(1000);
3210
if (time_after(jiffies, to)) {
3211
clear_bit(MTIP_PF_SE_ACTIVE_BIT,
3212
&dd->port->flags);
3213
clear_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
3214
dd->port->ic_pause_timer = 0;
3215
wake_up_interruptible(&dd->port->svc_wait);
3216
return false;
3217
}
3218
}
3219
return true;
3220
}
3221
return false;
3222
}
3223
3224
static inline bool is_stopped(struct driver_data *dd, struct request *rq)
3225
{
3226
if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO)))
3227
return false;
3228
3229
if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
3230
return true;
3231
if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
3232
return true;
3233
if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) &&
3234
rq_data_dir(rq))
3235
return true;
3236
if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
3237
return true;
3238
if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
3239
return true;
3240
3241
return false;
3242
}
3243
3244
static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
3245
struct request *rq)
3246
{
3247
struct driver_data *dd = hctx->queue->queuedata;
3248
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3249
3250
if (rq_data_dir(rq) == READ || !dd->unal_qdepth)
3251
return false;
3252
3253
/*
3254
* If unaligned depth must be limited on this controller, mark it
3255
* as unaligned if the IO isn't on a 4k boundary (start of length).
3256
*/
3257
if (blk_rq_sectors(rq) <= 64) {
3258
if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
3259
cmd->unaligned = 1;
3260
}
3261
3262
if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0)
3263
return true;
3264
3265
return false;
3266
}
3267
3268
static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
3269
struct request *rq)
3270
{
3271
struct driver_data *dd = hctx->queue->queuedata;
3272
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3273
struct mtip_int_cmd *icmd = cmd->icmd;
3274
struct mtip_cmd_hdr *hdr =
3275
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
3276
struct mtip_cmd_sg *command_sg;
3277
3278
if (mtip_commands_active(dd->port))
3279
return BLK_STS_DEV_RESOURCE;
3280
3281
hdr->ctba = cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
3282
if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
3283
hdr->ctbau = cpu_to_le32((cmd->command_dma >> 16) >> 16);
3284
/* Populate the SG list */
3285
hdr->opts = cpu_to_le32(icmd->opts | icmd->fis_len);
3286
if (icmd->buf_len) {
3287
command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ;
3288
3289
command_sg->info = cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
3290
command_sg->dba = cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
3291
command_sg->dba_upper =
3292
cpu_to_le32((icmd->buffer >> 16) >> 16);
3293
3294
hdr->opts |= cpu_to_le32((1 << 16));
3295
}
3296
3297
/* Populate the command header */
3298
hdr->byte_count = 0;
3299
3300
blk_mq_start_request(rq);
3301
mtip_issue_non_ncq_command(dd->port, rq->tag);
3302
return 0;
3303
}
3304
3305
static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
3306
const struct blk_mq_queue_data *bd)
3307
{
3308
struct driver_data *dd = hctx->queue->queuedata;
3309
struct request *rq = bd->rq;
3310
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3311
3312
if (blk_rq_is_passthrough(rq))
3313
return mtip_issue_reserved_cmd(hctx, rq);
3314
3315
if (unlikely(mtip_check_unal_depth(hctx, rq)))
3316
return BLK_STS_DEV_RESOURCE;
3317
3318
if (is_se_active(dd) || is_stopped(dd, rq))
3319
return BLK_STS_IOERR;
3320
3321
blk_mq_start_request(rq);
3322
3323
if (mtip_hw_submit_io(dd, rq, cmd, hctx))
3324
return BLK_STS_IOERR;
3325
3326
return BLK_STS_OK;
3327
}
3328
3329
static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
3330
unsigned int hctx_idx)
3331
{
3332
struct driver_data *dd = set->driver_data;
3333
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3334
3335
if (!cmd->command)
3336
return;
3337
3338
dma_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, cmd->command,
3339
cmd->command_dma);
3340
}
3341
3342
static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
3343
unsigned int hctx_idx, unsigned int numa_node)
3344
{
3345
struct driver_data *dd = set->driver_data;
3346
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3347
3348
cmd->command = dma_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3349
&cmd->command_dma, GFP_KERNEL);
3350
if (!cmd->command)
3351
return -ENOMEM;
3352
3353
sg_init_table(cmd->sg, MTIP_MAX_SG);
3354
return 0;
3355
}
3356
3357
static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req)
3358
{
3359
struct driver_data *dd = req->q->queuedata;
3360
3361
if (blk_mq_is_reserved_rq(req)) {
3362
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
3363
3364
cmd->status = BLK_STS_TIMEOUT;
3365
blk_mq_complete_request(req);
3366
return BLK_EH_DONE;
3367
}
3368
3369
if (test_bit(req->tag, dd->port->cmds_to_issue))
3370
goto exit_handler;
3371
3372
if (test_and_set_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags))
3373
goto exit_handler;
3374
3375
wake_up_interruptible(&dd->port->svc_wait);
3376
exit_handler:
3377
return BLK_EH_RESET_TIMER;
3378
}
3379
3380
static const struct blk_mq_ops mtip_mq_ops = {
3381
.queue_rq = mtip_queue_rq,
3382
.init_request = mtip_init_cmd,
3383
.exit_request = mtip_free_cmd,
3384
.complete = mtip_softirq_done_fn,
3385
.timeout = mtip_cmd_timeout,
3386
};
3387
3388
/*
3389
* Block layer initialization function.
3390
*
3391
* This function is called once by the PCI layer for each P320
3392
* device that is connected to the system.
3393
*
3394
* @dd Pointer to the driver data structure.
3395
*
3396
* return value
3397
* 0 on success else an error code.
3398
*/
3399
static int mtip_block_initialize(struct driver_data *dd)
3400
{
3401
struct queue_limits lim = {
3402
.physical_block_size = 4096,
3403
.max_hw_sectors = 0xffff,
3404
.max_segments = MTIP_MAX_SG,
3405
.max_segment_size = 0x400000,
3406
};
3407
int rv = 0, wait_for_rebuild = 0;
3408
sector_t capacity;
3409
unsigned int index = 0;
3410
3411
if (dd->disk)
3412
goto skip_create_disk; /* hw init done, before rebuild */
3413
3414
if (mtip_hw_init(dd)) {
3415
rv = -EINVAL;
3416
goto protocol_init_error;
3417
}
3418
3419
memset(&dd->tags, 0, sizeof(dd->tags));
3420
dd->tags.ops = &mtip_mq_ops;
3421
dd->tags.nr_hw_queues = 1;
3422
dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS;
3423
dd->tags.reserved_tags = 1;
3424
dd->tags.cmd_size = sizeof(struct mtip_cmd);
3425
dd->tags.numa_node = dd->numa_node;
3426
dd->tags.driver_data = dd;
3427
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
3428
3429
rv = blk_mq_alloc_tag_set(&dd->tags);
3430
if (rv) {
3431
dev_err(&dd->pdev->dev,
3432
"Unable to allocate request queue\n");
3433
goto block_queue_alloc_tag_error;
3434
}
3435
3436
dd->disk = blk_mq_alloc_disk(&dd->tags, &lim, dd);
3437
if (IS_ERR(dd->disk)) {
3438
dev_err(&dd->pdev->dev,
3439
"Unable to allocate request queue\n");
3440
rv = -ENOMEM;
3441
goto block_queue_alloc_init_error;
3442
}
3443
dd->queue = dd->disk->queue;
3444
3445
rv = ida_alloc(&rssd_index_ida, GFP_KERNEL);
3446
if (rv < 0)
3447
goto ida_get_error;
3448
index = rv;
3449
3450
rv = rssd_disk_name_format("rssd",
3451
index,
3452
dd->disk->disk_name,
3453
DISK_NAME_LEN);
3454
if (rv)
3455
goto disk_index_error;
3456
3457
dd->disk->major = dd->major;
3458
dd->disk->first_minor = index * MTIP_MAX_MINORS;
3459
dd->disk->minors = MTIP_MAX_MINORS;
3460
dd->disk->fops = &mtip_block_ops;
3461
dd->disk->private_data = dd;
3462
dd->index = index;
3463
3464
mtip_hw_debugfs_init(dd);
3465
3466
skip_create_disk:
3467
/* Initialize the protocol layer. */
3468
wait_for_rebuild = mtip_hw_get_identify(dd);
3469
if (wait_for_rebuild < 0) {
3470
dev_err(&dd->pdev->dev,
3471
"Protocol layer initialization failed\n");
3472
rv = -EINVAL;
3473
goto init_hw_cmds_error;
3474
}
3475
3476
/*
3477
* if rebuild pending, start the service thread, and delay the block
3478
* queue creation and device_add_disk()
3479
*/
3480
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
3481
goto start_service_thread;
3482
3483
/* Set device limits. */
3484
dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
3485
3486
/* Set the capacity of the device in 512 byte sectors. */
3487
if (!(mtip_hw_get_capacity(dd, &capacity))) {
3488
dev_warn(&dd->pdev->dev,
3489
"Could not read drive capacity\n");
3490
rv = -EIO;
3491
goto read_capacity_error;
3492
}
3493
set_capacity(dd->disk, capacity);
3494
3495
/* Enable the block device and add it to /dev */
3496
rv = device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
3497
if (rv)
3498
goto read_capacity_error;
3499
3500
if (dd->mtip_svc_handler) {
3501
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
3502
return rv; /* service thread created for handling rebuild */
3503
}
3504
3505
start_service_thread:
3506
dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread,
3507
dd, dd->numa_node,
3508
"mtip_svc_thd_%02d", index);
3509
3510
if (IS_ERR(dd->mtip_svc_handler)) {
3511
dev_err(&dd->pdev->dev, "service thread failed to start\n");
3512
dd->mtip_svc_handler = NULL;
3513
rv = -EFAULT;
3514
goto kthread_run_error;
3515
}
3516
wake_up_process(dd->mtip_svc_handler);
3517
if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
3518
rv = wait_for_rebuild;
3519
3520
return rv;
3521
3522
kthread_run_error:
3523
/* Delete our gendisk. This also removes the device from /dev */
3524
del_gendisk(dd->disk);
3525
read_capacity_error:
3526
init_hw_cmds_error:
3527
mtip_hw_debugfs_exit(dd);
3528
disk_index_error:
3529
ida_free(&rssd_index_ida, index);
3530
ida_get_error:
3531
put_disk(dd->disk);
3532
block_queue_alloc_init_error:
3533
blk_mq_free_tag_set(&dd->tags);
3534
block_queue_alloc_tag_error:
3535
mtip_hw_exit(dd); /* De-initialize the protocol layer. */
3536
protocol_init_error:
3537
return rv;
3538
}
3539
3540
/*
3541
* Function called by the PCI layer when just before the
3542
* machine shuts down.
3543
*
3544
* If a protocol layer shutdown function is present it will be called
3545
* by this function.
3546
*
3547
* @dd Pointer to the driver data structure.
3548
*
3549
* return value
3550
* 0
3551
*/
3552
static int mtip_block_shutdown(struct driver_data *dd)
3553
{
3554
mtip_hw_shutdown(dd);
3555
3556
dev_info(&dd->pdev->dev,
3557
"Shutting down %s ...\n", dd->disk->disk_name);
3558
3559
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
3560
del_gendisk(dd->disk);
3561
3562
blk_mq_free_tag_set(&dd->tags);
3563
put_disk(dd->disk);
3564
return 0;
3565
}
3566
3567
static int mtip_block_suspend(struct driver_data *dd)
3568
{
3569
dev_info(&dd->pdev->dev,
3570
"Suspending %s ...\n", dd->disk->disk_name);
3571
mtip_hw_suspend(dd);
3572
return 0;
3573
}
3574
3575
static int mtip_block_resume(struct driver_data *dd)
3576
{
3577
dev_info(&dd->pdev->dev, "Resuming %s ...\n",
3578
dd->disk->disk_name);
3579
mtip_hw_resume(dd);
3580
return 0;
3581
}
3582
3583
static void drop_cpu(int cpu)
3584
{
3585
cpu_use[cpu]--;
3586
}
3587
3588
static int get_least_used_cpu_on_node(int node)
3589
{
3590
int cpu, least_used_cpu, least_cnt;
3591
const struct cpumask *node_mask;
3592
3593
node_mask = cpumask_of_node(node);
3594
least_used_cpu = cpumask_first(node_mask);
3595
least_cnt = cpu_use[least_used_cpu];
3596
cpu = least_used_cpu;
3597
3598
for_each_cpu(cpu, node_mask) {
3599
if (cpu_use[cpu] < least_cnt) {
3600
least_used_cpu = cpu;
3601
least_cnt = cpu_use[cpu];
3602
}
3603
}
3604
cpu_use[least_used_cpu]++;
3605
return least_used_cpu;
3606
}
3607
3608
/* Helper for selecting a node in round robin mode */
3609
static inline int mtip_get_next_rr_node(void)
3610
{
3611
static int next_node = NUMA_NO_NODE;
3612
3613
if (next_node == NUMA_NO_NODE) {
3614
next_node = first_online_node;
3615
return next_node;
3616
}
3617
3618
next_node = next_online_node(next_node);
3619
if (next_node == MAX_NUMNODES)
3620
next_node = first_online_node;
3621
return next_node;
3622
}
3623
3624
static DEFINE_HANDLER(0);
3625
static DEFINE_HANDLER(1);
3626
static DEFINE_HANDLER(2);
3627
static DEFINE_HANDLER(3);
3628
static DEFINE_HANDLER(4);
3629
static DEFINE_HANDLER(5);
3630
static DEFINE_HANDLER(6);
3631
static DEFINE_HANDLER(7);
3632
3633
static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev)
3634
{
3635
unsigned short pcie_dev_ctrl;
3636
3637
if (pci_is_pcie(pdev)) {
3638
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &pcie_dev_ctrl);
3639
if (pcie_dev_ctrl & PCI_EXP_DEVCTL_NOSNOOP_EN ||
3640
pcie_dev_ctrl & PCI_EXP_DEVCTL_RELAX_EN) {
3641
dev_info(&dd->pdev->dev,
3642
"Disabling ERO/No-Snoop on bridge device %04x:%04x\n",
3643
pdev->vendor, pdev->device);
3644
pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN |
3645
PCI_EXP_DEVCTL_RELAX_EN);
3646
pcie_capability_write_word(pdev, PCI_EXP_DEVCTL,
3647
pcie_dev_ctrl);
3648
}
3649
}
3650
}
3651
3652
static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev)
3653
{
3654
/*
3655
* This workaround is specific to AMD/ATI chipset with a PCI upstream
3656
* device with device id 0x5aXX
3657
*/
3658
if (pdev->bus && pdev->bus->self) {
3659
if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI &&
3660
((pdev->bus->self->device & 0xff00) == 0x5a00)) {
3661
mtip_disable_link_opts(dd, pdev->bus->self);
3662
} else {
3663
/* Check further up the topology */
3664
struct pci_dev *parent_dev = pdev->bus->self;
3665
if (parent_dev->bus &&
3666
parent_dev->bus->parent &&
3667
parent_dev->bus->parent->self &&
3668
parent_dev->bus->parent->self->vendor ==
3669
PCI_VENDOR_ID_ATI &&
3670
(parent_dev->bus->parent->self->device &
3671
0xff00) == 0x5a00) {
3672
mtip_disable_link_opts(dd,
3673
parent_dev->bus->parent->self);
3674
}
3675
}
3676
}
3677
}
3678
3679
/*
3680
* Called for each supported PCI device detected.
3681
*
3682
* This function allocates the private data structure, enables the
3683
* PCI device and then calls the block layer initialization function.
3684
*
3685
* return value
3686
* 0 on success else an error code.
3687
*/
3688
static int mtip_pci_probe(struct pci_dev *pdev,
3689
const struct pci_device_id *ent)
3690
{
3691
int rv = 0;
3692
struct driver_data *dd = NULL;
3693
char cpu_list[256];
3694
const struct cpumask *node_mask;
3695
int cpu, i = 0, j = 0;
3696
int my_node = NUMA_NO_NODE;
3697
3698
/* Allocate memory for this devices private data. */
3699
my_node = pcibus_to_node(pdev->bus);
3700
if (my_node != NUMA_NO_NODE) {
3701
if (!node_online(my_node))
3702
my_node = mtip_get_next_rr_node();
3703
} else {
3704
dev_info(&pdev->dev, "Kernel not reporting proximity, choosing a node\n");
3705
my_node = mtip_get_next_rr_node();
3706
}
3707
dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
3708
my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
3709
cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
3710
3711
dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
3712
if (!dd)
3713
return -ENOMEM;
3714
3715
/* Attach the private data to this PCI device. */
3716
pci_set_drvdata(pdev, dd);
3717
3718
rv = pcim_enable_device(pdev);
3719
if (rv < 0) {
3720
dev_err(&pdev->dev, "Unable to enable device\n");
3721
goto iomap_err;
3722
}
3723
3724
rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3725
if (rv) {
3726
dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
3727
goto iomap_err;
3728
}
3729
3730
/* Copy the info we may need later into the private data structure. */
3731
dd->major = mtip_major;
3732
dd->instance = instance;
3733
dd->pdev = pdev;
3734
dd->numa_node = my_node;
3735
3736
memset(dd->workq_name, 0, 32);
3737
snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
3738
3739
dd->isr_workq = create_workqueue(dd->workq_name);
3740
if (!dd->isr_workq) {
3741
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
3742
rv = -ENOMEM;
3743
goto iomap_err;
3744
}
3745
3746
memset(cpu_list, 0, sizeof(cpu_list));
3747
3748
node_mask = cpumask_of_node(dd->numa_node);
3749
if (!cpumask_empty(node_mask)) {
3750
for_each_cpu(cpu, node_mask)
3751
{
3752
snprintf(&cpu_list[j], 256 - j, "%d ", cpu);
3753
j = strlen(cpu_list);
3754
}
3755
3756
dev_info(&pdev->dev, "Node %d on package %d has %d cpu(s): %s\n",
3757
dd->numa_node,
3758
topology_physical_package_id(cpumask_first(node_mask)),
3759
nr_cpus_node(dd->numa_node),
3760
cpu_list);
3761
} else
3762
dev_dbg(&pdev->dev, "mtip32xx: node_mask empty\n");
3763
3764
dd->isr_binding = get_least_used_cpu_on_node(dd->numa_node);
3765
dev_info(&pdev->dev, "Initial IRQ binding node:cpu %d:%d\n",
3766
cpu_to_node(dd->isr_binding), dd->isr_binding);
3767
3768
/* first worker context always runs in ISR */
3769
dd->work[0].cpu_binding = dd->isr_binding;
3770
dd->work[1].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
3771
dd->work[2].cpu_binding = get_least_used_cpu_on_node(dd->numa_node);
3772
dd->work[3].cpu_binding = dd->work[0].cpu_binding;
3773
dd->work[4].cpu_binding = dd->work[1].cpu_binding;
3774
dd->work[5].cpu_binding = dd->work[2].cpu_binding;
3775
dd->work[6].cpu_binding = dd->work[2].cpu_binding;
3776
dd->work[7].cpu_binding = dd->work[1].cpu_binding;
3777
3778
/* Log the bindings */
3779
for_each_present_cpu(cpu) {
3780
memset(cpu_list, 0, sizeof(cpu_list));
3781
for (i = 0, j = 0; i < MTIP_MAX_SLOT_GROUPS; i++) {
3782
if (dd->work[i].cpu_binding == cpu) {
3783
snprintf(&cpu_list[j], 256 - j, "%d ", i);
3784
j = strlen(cpu_list);
3785
}
3786
}
3787
if (j)
3788
dev_info(&pdev->dev, "CPU %d: WQs %s\n", cpu, cpu_list);
3789
}
3790
3791
INIT_WORK(&dd->work[0].work, mtip_workq_sdbf0);
3792
INIT_WORK(&dd->work[1].work, mtip_workq_sdbf1);
3793
INIT_WORK(&dd->work[2].work, mtip_workq_sdbf2);
3794
INIT_WORK(&dd->work[3].work, mtip_workq_sdbf3);
3795
INIT_WORK(&dd->work[4].work, mtip_workq_sdbf4);
3796
INIT_WORK(&dd->work[5].work, mtip_workq_sdbf5);
3797
INIT_WORK(&dd->work[6].work, mtip_workq_sdbf6);
3798
INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
3799
3800
pci_set_master(pdev);
3801
rv = pci_enable_msi(pdev);
3802
if (rv) {
3803
dev_warn(&pdev->dev,
3804
"Unable to enable MSI interrupt.\n");
3805
goto msi_initialize_err;
3806
}
3807
3808
mtip_fix_ero_nosnoop(dd, pdev);
3809
3810
/* Initialize the block layer. */
3811
rv = mtip_block_initialize(dd);
3812
if (rv < 0) {
3813
dev_err(&pdev->dev,
3814
"Unable to initialize block layer\n");
3815
goto block_initialize_err;
3816
}
3817
3818
/*
3819
* Increment the instance count so that each device has a unique
3820
* instance number.
3821
*/
3822
instance++;
3823
if (rv != MTIP_FTL_REBUILD_MAGIC)
3824
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
3825
else
3826
rv = 0; /* device in rebuild state, return 0 from probe */
3827
3828
goto done;
3829
3830
block_initialize_err:
3831
pci_disable_msi(pdev);
3832
3833
msi_initialize_err:
3834
if (dd->isr_workq) {
3835
destroy_workqueue(dd->isr_workq);
3836
drop_cpu(dd->work[0].cpu_binding);
3837
drop_cpu(dd->work[1].cpu_binding);
3838
drop_cpu(dd->work[2].cpu_binding);
3839
}
3840
3841
iomap_err:
3842
kfree(dd);
3843
pci_set_drvdata(pdev, NULL);
3844
return rv;
3845
done:
3846
return rv;
3847
}
3848
3849
/*
3850
* Called for each probed device when the device is removed or the
3851
* driver is unloaded.
3852
*
3853
* return value
3854
* None
3855
*/
3856
static void mtip_pci_remove(struct pci_dev *pdev)
3857
{
3858
struct driver_data *dd = pci_get_drvdata(pdev);
3859
unsigned long to;
3860
3861
mtip_check_surprise_removal(dd);
3862
synchronize_irq(dd->pdev->irq);
3863
3864
/* Spin until workers are done */
3865
to = jiffies + msecs_to_jiffies(4000);
3866
do {
3867
msleep(20);
3868
} while (atomic_read(&dd->irq_workers_active) != 0 &&
3869
time_before(jiffies, to));
3870
3871
if (atomic_read(&dd->irq_workers_active) != 0) {
3872
dev_warn(&dd->pdev->dev,
3873
"Completion workers still active!\n");
3874
}
3875
3876
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
3877
3878
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
3879
del_gendisk(dd->disk);
3880
3881
mtip_hw_debugfs_exit(dd);
3882
3883
if (dd->mtip_svc_handler) {
3884
set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
3885
wake_up_interruptible(&dd->port->svc_wait);
3886
kthread_stop(dd->mtip_svc_handler);
3887
}
3888
3889
if (!dd->sr) {
3890
/*
3891
* Explicitly wait here for IOs to quiesce,
3892
* as mtip_standby_drive usually won't wait for IOs.
3893
*/
3894
if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
3895
mtip_standby_drive(dd);
3896
}
3897
else
3898
dev_info(&dd->pdev->dev, "device %s surprise removal\n",
3899
dd->disk->disk_name);
3900
3901
blk_mq_free_tag_set(&dd->tags);
3902
3903
/* De-initialize the protocol layer. */
3904
mtip_hw_exit(dd);
3905
3906
if (dd->isr_workq) {
3907
destroy_workqueue(dd->isr_workq);
3908
drop_cpu(dd->work[0].cpu_binding);
3909
drop_cpu(dd->work[1].cpu_binding);
3910
drop_cpu(dd->work[2].cpu_binding);
3911
}
3912
3913
pci_disable_msi(pdev);
3914
3915
pci_set_drvdata(pdev, NULL);
3916
3917
put_disk(dd->disk);
3918
}
3919
3920
/*
3921
* Called for each probed device when the device is suspended.
3922
*
3923
* return value
3924
* 0 Success
3925
* <0 Error
3926
*/
3927
static int __maybe_unused mtip_pci_suspend(struct device *dev)
3928
{
3929
int rv = 0;
3930
struct driver_data *dd = dev_get_drvdata(dev);
3931
3932
set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
3933
3934
/* Disable ports & interrupts then send standby immediate */
3935
rv = mtip_block_suspend(dd);
3936
if (rv < 0)
3937
dev_err(dev, "Failed to suspend controller\n");
3938
3939
return rv;
3940
}
3941
3942
/*
3943
* Called for each probed device when the device is resumed.
3944
*
3945
* return value
3946
* 0 Success
3947
* <0 Error
3948
*/
3949
static int __maybe_unused mtip_pci_resume(struct device *dev)
3950
{
3951
int rv = 0;
3952
struct driver_data *dd = dev_get_drvdata(dev);
3953
3954
/*
3955
* Calls hbaReset, initPort, & startPort function
3956
* then enables interrupts
3957
*/
3958
rv = mtip_block_resume(dd);
3959
if (rv < 0)
3960
dev_err(dev, "Unable to resume\n");
3961
3962
clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
3963
3964
return rv;
3965
}
3966
3967
/*
3968
* Shutdown routine
3969
*
3970
* return value
3971
* None
3972
*/
3973
static void mtip_pci_shutdown(struct pci_dev *pdev)
3974
{
3975
struct driver_data *dd = pci_get_drvdata(pdev);
3976
if (dd)
3977
mtip_block_shutdown(dd);
3978
}
3979
3980
/* Table of device ids supported by this driver. */
3981
static const struct pci_device_id mtip_pci_tbl[] = {
3982
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) },
3983
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) },
3984
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) },
3985
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) },
3986
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) },
3987
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) },
3988
{ PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) },
3989
{ 0 }
3990
};
3991
3992
static SIMPLE_DEV_PM_OPS(mtip_pci_pm_ops, mtip_pci_suspend, mtip_pci_resume);
3993
3994
/* Structure that describes the PCI driver functions. */
3995
static struct pci_driver mtip_pci_driver = {
3996
.name = MTIP_DRV_NAME,
3997
.id_table = mtip_pci_tbl,
3998
.probe = mtip_pci_probe,
3999
.remove = mtip_pci_remove,
4000
.driver.pm = &mtip_pci_pm_ops,
4001
.shutdown = mtip_pci_shutdown,
4002
};
4003
4004
MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
4005
4006
/*
4007
* Module initialization function.
4008
*
4009
* Called once when the module is loaded. This function allocates a major
4010
* block device number to the Cyclone devices and registers the PCI layer
4011
* of the driver.
4012
*
4013
* Return value
4014
* 0 on success else error code.
4015
*/
4016
static int __init mtip_init(void)
4017
{
4018
int error;
4019
4020
pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
4021
4022
/* Allocate a major block device number to use with this driver. */
4023
error = register_blkdev(0, MTIP_DRV_NAME);
4024
if (error <= 0) {
4025
pr_err("Unable to register block device (%d)\n",
4026
error);
4027
return -EBUSY;
4028
}
4029
mtip_major = error;
4030
4031
dfs_parent = debugfs_create_dir("rssd", NULL);
4032
4033
/* Register our PCI operations. */
4034
error = pci_register_driver(&mtip_pci_driver);
4035
if (error) {
4036
debugfs_remove(dfs_parent);
4037
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4038
}
4039
4040
return error;
4041
}
4042
4043
/*
4044
* Module de-initialization function.
4045
*
4046
* Called once when the module is unloaded. This function deallocates
4047
* the major block device number allocated by mtip_init() and
4048
* unregisters the PCI layer of the driver.
4049
*
4050
* Return value
4051
* none
4052
*/
4053
static void __exit mtip_exit(void)
4054
{
4055
/* Release the allocated major block device number. */
4056
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4057
4058
/* Unregister the PCI driver. */
4059
pci_unregister_driver(&mtip_pci_driver);
4060
4061
debugfs_remove_recursive(dfs_parent);
4062
}
4063
4064
MODULE_AUTHOR("Micron Technology, Inc");
4065
MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
4066
MODULE_LICENSE("GPL");
4067
MODULE_VERSION(MTIP_DRV_VERSION);
4068
4069
module_init(mtip_init);
4070
module_exit(mtip_exit);
4071
4072