Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/ide/ide-disk.c
15109 views
1
/*
2
* Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
3
* Copyright (C) 1998-2002 Linux ATA Development
4
* Andre Hedrick <[email protected]>
5
* Copyright (C) 2003 Red Hat
6
* Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
7
*/
8
9
/*
10
* Mostly written by Mark Lord <[email protected]>
11
* and Gadi Oxman <[email protected]>
12
* and Andre Hedrick <[email protected]>
13
*
14
* This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
15
*/
16
17
#include <linux/types.h>
18
#include <linux/string.h>
19
#include <linux/kernel.h>
20
#include <linux/timer.h>
21
#include <linux/mm.h>
22
#include <linux/interrupt.h>
23
#include <linux/major.h>
24
#include <linux/errno.h>
25
#include <linux/genhd.h>
26
#include <linux/slab.h>
27
#include <linux/delay.h>
28
#include <linux/mutex.h>
29
#include <linux/leds.h>
30
#include <linux/ide.h>
31
32
#include <asm/byteorder.h>
33
#include <asm/irq.h>
34
#include <asm/uaccess.h>
35
#include <asm/io.h>
36
#include <asm/div64.h>
37
38
#include "ide-disk.h"
39
40
static const u8 ide_rw_cmds[] = {
41
ATA_CMD_READ_MULTI,
42
ATA_CMD_WRITE_MULTI,
43
ATA_CMD_READ_MULTI_EXT,
44
ATA_CMD_WRITE_MULTI_EXT,
45
ATA_CMD_PIO_READ,
46
ATA_CMD_PIO_WRITE,
47
ATA_CMD_PIO_READ_EXT,
48
ATA_CMD_PIO_WRITE_EXT,
49
ATA_CMD_READ,
50
ATA_CMD_WRITE,
51
ATA_CMD_READ_EXT,
52
ATA_CMD_WRITE_EXT,
53
};
54
55
static void ide_tf_set_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 dma)
56
{
57
u8 index, lba48, write;
58
59
lba48 = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
60
write = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
61
62
if (dma) {
63
cmd->protocol = ATA_PROT_DMA;
64
index = 8;
65
} else {
66
cmd->protocol = ATA_PROT_PIO;
67
if (drive->mult_count) {
68
cmd->tf_flags |= IDE_TFLAG_MULTI_PIO;
69
index = 0;
70
} else
71
index = 4;
72
}
73
74
cmd->tf.command = ide_rw_cmds[index + lba48 + write];
75
}
76
77
/*
78
* __ide_do_rw_disk() issues READ and WRITE commands to a disk,
79
* using LBA if supported, or CHS otherwise, to address sectors.
80
*/
81
static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
82
sector_t block)
83
{
84
ide_hwif_t *hwif = drive->hwif;
85
u16 nsectors = (u16)blk_rq_sectors(rq);
86
u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
87
u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
88
struct ide_cmd cmd;
89
struct ide_taskfile *tf = &cmd.tf;
90
ide_startstop_t rc;
91
92
if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
93
if (block + blk_rq_sectors(rq) > 1ULL << 28)
94
dma = 0;
95
else
96
lba48 = 0;
97
}
98
99
memset(&cmd, 0, sizeof(cmd));
100
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
101
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
102
103
if (drive->dev_flags & IDE_DFLAG_LBA) {
104
if (lba48) {
105
pr_debug("%s: LBA=0x%012llx\n", drive->name,
106
(unsigned long long)block);
107
108
tf->nsect = nsectors & 0xff;
109
tf->lbal = (u8) block;
110
tf->lbam = (u8)(block >> 8);
111
tf->lbah = (u8)(block >> 16);
112
tf->device = ATA_LBA;
113
114
tf = &cmd.hob;
115
tf->nsect = (nsectors >> 8) & 0xff;
116
tf->lbal = (u8)(block >> 24);
117
if (sizeof(block) != 4) {
118
tf->lbam = (u8)((u64)block >> 32);
119
tf->lbah = (u8)((u64)block >> 40);
120
}
121
122
cmd.valid.out.hob = IDE_VALID_OUT_HOB;
123
cmd.valid.in.hob = IDE_VALID_IN_HOB;
124
cmd.tf_flags |= IDE_TFLAG_LBA48;
125
} else {
126
tf->nsect = nsectors & 0xff;
127
tf->lbal = block;
128
tf->lbam = block >>= 8;
129
tf->lbah = block >>= 8;
130
tf->device = ((block >> 8) & 0xf) | ATA_LBA;
131
}
132
} else {
133
unsigned int sect, head, cyl, track;
134
135
track = (int)block / drive->sect;
136
sect = (int)block % drive->sect + 1;
137
head = track % drive->head;
138
cyl = track / drive->head;
139
140
pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
141
142
tf->nsect = nsectors & 0xff;
143
tf->lbal = sect;
144
tf->lbam = cyl;
145
tf->lbah = cyl >> 8;
146
tf->device = head;
147
}
148
149
cmd.tf_flags |= IDE_TFLAG_FS;
150
151
if (rq_data_dir(rq))
152
cmd.tf_flags |= IDE_TFLAG_WRITE;
153
154
ide_tf_set_cmd(drive, &cmd, dma);
155
cmd.rq = rq;
156
157
if (dma == 0) {
158
ide_init_sg_cmd(&cmd, nsectors << 9);
159
ide_map_sg(drive, &cmd);
160
}
161
162
rc = do_rw_taskfile(drive, &cmd);
163
164
if (rc == ide_stopped && dma) {
165
/* fallback to PIO */
166
cmd.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
167
ide_tf_set_cmd(drive, &cmd, 0);
168
ide_init_sg_cmd(&cmd, nsectors << 9);
169
rc = do_rw_taskfile(drive, &cmd);
170
}
171
172
return rc;
173
}
174
175
/*
176
* 268435455 == 137439 MB or 28bit limit
177
* 320173056 == 163929 MB or 48bit addressing
178
* 1073741822 == 549756 MB or 48bit addressing fake drive
179
*/
180
181
static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
182
sector_t block)
183
{
184
ide_hwif_t *hwif = drive->hwif;
185
186
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
187
BUG_ON(rq->cmd_type != REQ_TYPE_FS);
188
189
ledtrig_ide_activity();
190
191
pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
192
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
193
(unsigned long long)block, blk_rq_sectors(rq),
194
(unsigned long)rq->buffer);
195
196
if (hwif->rw_disk)
197
hwif->rw_disk(drive, rq);
198
199
return __ide_do_rw_disk(drive, rq, block);
200
}
201
202
/*
203
* Queries for true maximum capacity of the drive.
204
* Returns maximum LBA address (> 0) of the drive, 0 if failed.
205
*/
206
static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
207
{
208
struct ide_cmd cmd;
209
struct ide_taskfile *tf = &cmd.tf;
210
u64 addr = 0;
211
212
memset(&cmd, 0, sizeof(cmd));
213
if (lba48)
214
tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
215
else
216
tf->command = ATA_CMD_READ_NATIVE_MAX;
217
tf->device = ATA_LBA;
218
219
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
220
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
221
if (lba48) {
222
cmd.valid.out.hob = IDE_VALID_OUT_HOB;
223
cmd.valid.in.hob = IDE_VALID_IN_HOB;
224
cmd.tf_flags = IDE_TFLAG_LBA48;
225
}
226
227
ide_no_data_taskfile(drive, &cmd);
228
229
/* if OK, compute maximum address value */
230
if (!(tf->status & ATA_ERR))
231
addr = ide_get_lba_addr(&cmd, lba48) + 1;
232
233
return addr;
234
}
235
236
/*
237
* Sets maximum virtual LBA address of the drive.
238
* Returns new maximum virtual LBA address (> 0) or 0 on failure.
239
*/
240
static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
241
{
242
struct ide_cmd cmd;
243
struct ide_taskfile *tf = &cmd.tf;
244
u64 addr_set = 0;
245
246
addr_req--;
247
248
memset(&cmd, 0, sizeof(cmd));
249
tf->lbal = (addr_req >> 0) & 0xff;
250
tf->lbam = (addr_req >>= 8) & 0xff;
251
tf->lbah = (addr_req >>= 8) & 0xff;
252
if (lba48) {
253
cmd.hob.lbal = (addr_req >>= 8) & 0xff;
254
cmd.hob.lbam = (addr_req >>= 8) & 0xff;
255
cmd.hob.lbah = (addr_req >>= 8) & 0xff;
256
tf->command = ATA_CMD_SET_MAX_EXT;
257
} else {
258
tf->device = (addr_req >>= 8) & 0x0f;
259
tf->command = ATA_CMD_SET_MAX;
260
}
261
tf->device |= ATA_LBA;
262
263
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
264
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
265
if (lba48) {
266
cmd.valid.out.hob = IDE_VALID_OUT_HOB;
267
cmd.valid.in.hob = IDE_VALID_IN_HOB;
268
cmd.tf_flags = IDE_TFLAG_LBA48;
269
}
270
271
ide_no_data_taskfile(drive, &cmd);
272
273
/* if OK, compute maximum address value */
274
if (!(tf->status & ATA_ERR))
275
addr_set = ide_get_lba_addr(&cmd, lba48) + 1;
276
277
return addr_set;
278
}
279
280
static unsigned long long sectors_to_MB(unsigned long long n)
281
{
282
n <<= 9; /* make it bytes */
283
do_div(n, 1000000); /* make it MB */
284
return n;
285
}
286
287
/*
288
* Some disks report total number of sectors instead of
289
* maximum sector address. We list them here.
290
*/
291
static const struct drive_list_entry hpa_list[] = {
292
{ "ST340823A", NULL },
293
{ "ST320413A", NULL },
294
{ "ST310211A", NULL },
295
{ NULL, NULL }
296
};
297
298
static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48)
299
{
300
u64 capacity, set_max;
301
302
capacity = drive->capacity64;
303
set_max = idedisk_read_native_max_address(drive, lba48);
304
305
if (ide_in_drive_list(drive->id, hpa_list)) {
306
/*
307
* Since we are inclusive wrt to firmware revisions do this
308
* extra check and apply the workaround only when needed.
309
*/
310
if (set_max == capacity + 1)
311
set_max--;
312
}
313
314
return set_max;
315
}
316
317
static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48)
318
{
319
set_max = idedisk_set_max_address(drive, set_max, lba48);
320
if (set_max)
321
drive->capacity64 = set_max;
322
323
return set_max;
324
}
325
326
static void idedisk_check_hpa(ide_drive_t *drive)
327
{
328
u64 capacity, set_max;
329
int lba48 = ata_id_lba48_enabled(drive->id);
330
331
capacity = drive->capacity64;
332
set_max = ide_disk_hpa_get_native_capacity(drive, lba48);
333
334
if (set_max <= capacity)
335
return;
336
337
drive->probed_capacity = set_max;
338
339
printk(KERN_INFO "%s: Host Protected Area detected.\n"
340
"\tcurrent capacity is %llu sectors (%llu MB)\n"
341
"\tnative capacity is %llu sectors (%llu MB)\n",
342
drive->name,
343
capacity, sectors_to_MB(capacity),
344
set_max, sectors_to_MB(set_max));
345
346
if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0)
347
return;
348
349
set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48);
350
if (set_max)
351
printk(KERN_INFO "%s: Host Protected Area disabled.\n",
352
drive->name);
353
}
354
355
static int ide_disk_get_capacity(ide_drive_t *drive)
356
{
357
u16 *id = drive->id;
358
int lba;
359
360
if (ata_id_lba48_enabled(id)) {
361
/* drive speaks 48-bit LBA */
362
lba = 1;
363
drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
364
} else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
365
/* drive speaks 28-bit LBA */
366
lba = 1;
367
drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
368
} else {
369
/* drive speaks boring old 28-bit CHS */
370
lba = 0;
371
drive->capacity64 = drive->cyl * drive->head * drive->sect;
372
}
373
374
drive->probed_capacity = drive->capacity64;
375
376
if (lba) {
377
drive->dev_flags |= IDE_DFLAG_LBA;
378
379
/*
380
* If this device supports the Host Protected Area feature set,
381
* then we may need to change our opinion about its capacity.
382
*/
383
if (ata_id_hpa_enabled(id))
384
idedisk_check_hpa(drive);
385
}
386
387
/* limit drive capacity to 137GB if LBA48 cannot be used */
388
if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
389
drive->capacity64 > 1ULL << 28) {
390
printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
391
"%llu sectors (%llu MB)\n",
392
drive->name, (unsigned long long)drive->capacity64,
393
sectors_to_MB(drive->capacity64));
394
drive->probed_capacity = drive->capacity64 = 1ULL << 28;
395
}
396
397
if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
398
(drive->dev_flags & IDE_DFLAG_LBA48)) {
399
if (drive->capacity64 > 1ULL << 28) {
400
printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
401
" will be used for accessing sectors "
402
"> %u\n", drive->name, 1 << 28);
403
} else
404
drive->dev_flags &= ~IDE_DFLAG_LBA48;
405
}
406
407
return 0;
408
}
409
410
static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
411
{
412
u16 *id = drive->id;
413
int lba48 = ata_id_lba48_enabled(id);
414
415
if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 ||
416
ata_id_hpa_enabled(id) == 0)
417
return;
418
419
/*
420
* according to the spec the SET MAX ADDRESS command shall be
421
* immediately preceded by a READ NATIVE MAX ADDRESS command
422
*/
423
if (!ide_disk_hpa_get_native_capacity(drive, lba48))
424
return;
425
426
if (ide_disk_hpa_set_capacity(drive, drive->probed_capacity, lba48))
427
drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
428
}
429
430
static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
431
{
432
ide_drive_t *drive = q->queuedata;
433
struct ide_cmd *cmd;
434
435
if (!(rq->cmd_flags & REQ_FLUSH))
436
return BLKPREP_OK;
437
438
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
439
440
/* FIXME: map struct ide_taskfile on rq->cmd[] */
441
BUG_ON(cmd == NULL);
442
443
if (ata_id_flush_ext_enabled(drive->id) &&
444
(drive->capacity64 >= (1UL << 28)))
445
cmd->tf.command = ATA_CMD_FLUSH_EXT;
446
else
447
cmd->tf.command = ATA_CMD_FLUSH;
448
cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
449
cmd->tf_flags = IDE_TFLAG_DYN;
450
cmd->protocol = ATA_PROT_NODATA;
451
452
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
453
rq->special = cmd;
454
cmd->rq = rq;
455
456
return BLKPREP_OK;
457
}
458
459
ide_devset_get(multcount, mult_count);
460
461
/*
462
* This is tightly woven into the driver->do_special can not touch.
463
* DON'T do it again until a total personality rewrite is committed.
464
*/
465
static int set_multcount(ide_drive_t *drive, int arg)
466
{
467
struct request *rq;
468
int error;
469
470
if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
471
return -EINVAL;
472
473
if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
474
return -EBUSY;
475
476
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
477
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
478
479
drive->mult_req = arg;
480
drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
481
error = blk_execute_rq(drive->queue, NULL, rq, 0);
482
blk_put_request(rq);
483
484
return (drive->mult_count == arg) ? 0 : -EIO;
485
}
486
487
ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
488
489
static int set_nowerr(ide_drive_t *drive, int arg)
490
{
491
if (arg < 0 || arg > 1)
492
return -EINVAL;
493
494
if (arg)
495
drive->dev_flags |= IDE_DFLAG_NOWERR;
496
else
497
drive->dev_flags &= ~IDE_DFLAG_NOWERR;
498
499
drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
500
501
return 0;
502
}
503
504
static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
505
{
506
struct ide_cmd cmd;
507
508
memset(&cmd, 0, sizeof(cmd));
509
cmd.tf.feature = feature;
510
cmd.tf.nsect = nsect;
511
cmd.tf.command = ATA_CMD_SET_FEATURES;
512
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
513
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
514
515
return ide_no_data_taskfile(drive, &cmd);
516
}
517
518
static void update_flush(ide_drive_t *drive)
519
{
520
u16 *id = drive->id;
521
unsigned flush = 0;
522
523
if (drive->dev_flags & IDE_DFLAG_WCACHE) {
524
unsigned long long capacity;
525
int barrier;
526
/*
527
* We must avoid issuing commands a drive does not
528
* understand or we may crash it. We check flush cache
529
* is supported. We also check we have the LBA48 flush
530
* cache if the drive capacity is too large. By this
531
* time we have trimmed the drive capacity if LBA48 is
532
* not available so we don't need to recheck that.
533
*/
534
capacity = ide_gd_capacity(drive);
535
barrier = ata_id_flush_enabled(id) &&
536
(drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
537
((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
538
capacity <= (1ULL << 28) ||
539
ata_id_flush_ext_enabled(id));
540
541
printk(KERN_INFO "%s: cache flushes %ssupported\n",
542
drive->name, barrier ? "" : "not ");
543
544
if (barrier) {
545
flush = REQ_FLUSH;
546
blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
547
}
548
}
549
550
blk_queue_flush(drive->queue, flush);
551
}
552
553
ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
554
555
static int set_wcache(ide_drive_t *drive, int arg)
556
{
557
int err = 1;
558
559
if (arg < 0 || arg > 1)
560
return -EINVAL;
561
562
if (ata_id_flush_enabled(drive->id)) {
563
err = ide_do_setfeature(drive,
564
arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
565
if (err == 0) {
566
if (arg)
567
drive->dev_flags |= IDE_DFLAG_WCACHE;
568
else
569
drive->dev_flags &= ~IDE_DFLAG_WCACHE;
570
}
571
}
572
573
update_flush(drive);
574
575
return err;
576
}
577
578
static int do_idedisk_flushcache(ide_drive_t *drive)
579
{
580
struct ide_cmd cmd;
581
582
memset(&cmd, 0, sizeof(cmd));
583
if (ata_id_flush_ext_enabled(drive->id))
584
cmd.tf.command = ATA_CMD_FLUSH_EXT;
585
else
586
cmd.tf.command = ATA_CMD_FLUSH;
587
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
588
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
589
590
return ide_no_data_taskfile(drive, &cmd);
591
}
592
593
ide_devset_get(acoustic, acoustic);
594
595
static int set_acoustic(ide_drive_t *drive, int arg)
596
{
597
if (arg < 0 || arg > 254)
598
return -EINVAL;
599
600
ide_do_setfeature(drive,
601
arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
602
603
drive->acoustic = arg;
604
605
return 0;
606
}
607
608
ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
609
610
/*
611
* drive->addressing:
612
* 0: 28-bit
613
* 1: 48-bit
614
* 2: 48-bit capable doing 28-bit
615
*/
616
static int set_addressing(ide_drive_t *drive, int arg)
617
{
618
if (arg < 0 || arg > 2)
619
return -EINVAL;
620
621
if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
622
ata_id_lba48_enabled(drive->id) == 0))
623
return -EIO;
624
625
if (arg == 2)
626
arg = 0;
627
628
if (arg)
629
drive->dev_flags |= IDE_DFLAG_LBA48;
630
else
631
drive->dev_flags &= ~IDE_DFLAG_LBA48;
632
633
return 0;
634
}
635
636
ide_ext_devset_rw(acoustic, acoustic);
637
ide_ext_devset_rw(address, addressing);
638
ide_ext_devset_rw(multcount, multcount);
639
ide_ext_devset_rw(wcache, wcache);
640
641
ide_ext_devset_rw_sync(nowerr, nowerr);
642
643
static int ide_disk_check(ide_drive_t *drive, const char *s)
644
{
645
return 1;
646
}
647
648
static void ide_disk_setup(ide_drive_t *drive)
649
{
650
struct ide_disk_obj *idkp = drive->driver_data;
651
struct request_queue *q = drive->queue;
652
ide_hwif_t *hwif = drive->hwif;
653
u16 *id = drive->id;
654
char *m = (char *)&id[ATA_ID_PROD];
655
unsigned long long capacity;
656
657
ide_proc_register_driver(drive, idkp->driver);
658
659
if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
660
return;
661
662
if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
663
/*
664
* Removable disks (eg. SYQUEST); ignore 'WD' drives
665
*/
666
if (m[0] != 'W' || m[1] != 'D')
667
drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
668
}
669
670
(void)set_addressing(drive, 1);
671
672
if (drive->dev_flags & IDE_DFLAG_LBA48) {
673
int max_s = 2048;
674
675
if (max_s > hwif->rqsize)
676
max_s = hwif->rqsize;
677
678
blk_queue_max_hw_sectors(q, max_s);
679
}
680
681
printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
682
queue_max_sectors(q) / 2);
683
684
if (ata_id_is_ssd(id))
685
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
686
687
/* calculate drive capacity, and select LBA if possible */
688
ide_disk_get_capacity(drive);
689
690
/*
691
* if possible, give fdisk access to more of the drive,
692
* by correcting bios_cyls:
693
*/
694
capacity = ide_gd_capacity(drive);
695
696
if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
697
if (ata_id_lba48_enabled(drive->id)) {
698
/* compatibility */
699
drive->bios_sect = 63;
700
drive->bios_head = 255;
701
}
702
703
if (drive->bios_sect && drive->bios_head) {
704
unsigned int cap0 = capacity; /* truncate to 32 bits */
705
unsigned int cylsz, cyl;
706
707
if (cap0 != capacity)
708
drive->bios_cyl = 65535;
709
else {
710
cylsz = drive->bios_sect * drive->bios_head;
711
cyl = cap0 / cylsz;
712
if (cyl > 65535)
713
cyl = 65535;
714
if (cyl > drive->bios_cyl)
715
drive->bios_cyl = cyl;
716
}
717
}
718
}
719
printk(KERN_INFO "%s: %llu sectors (%llu MB)",
720
drive->name, capacity, sectors_to_MB(capacity));
721
722
/* Only print cache size when it was specified */
723
if (id[ATA_ID_BUF_SIZE])
724
printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
725
726
printk(KERN_CONT ", CHS=%d/%d/%d\n",
727
drive->bios_cyl, drive->bios_head, drive->bios_sect);
728
729
/* write cache enabled? */
730
if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
731
drive->dev_flags |= IDE_DFLAG_WCACHE;
732
733
set_wcache(drive, 1);
734
735
if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
736
(drive->head == 0 || drive->head > 16)) {
737
printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
738
drive->name, drive->head);
739
drive->dev_flags &= ~IDE_DFLAG_ATTACH;
740
} else
741
drive->dev_flags |= IDE_DFLAG_ATTACH;
742
}
743
744
static void ide_disk_flush(ide_drive_t *drive)
745
{
746
if (ata_id_flush_enabled(drive->id) == 0 ||
747
(drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
748
return;
749
750
if (do_idedisk_flushcache(drive))
751
printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
752
}
753
754
static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
755
{
756
return 0;
757
}
758
759
static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
760
int on)
761
{
762
struct ide_cmd cmd;
763
int ret;
764
765
if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
766
return 0;
767
768
memset(&cmd, 0, sizeof(cmd));
769
cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
770
cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
771
cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE;
772
773
ret = ide_no_data_taskfile(drive, &cmd);
774
775
if (ret)
776
drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
777
778
return ret;
779
}
780
781
const struct ide_disk_ops ide_ata_disk_ops = {
782
.check = ide_disk_check,
783
.unlock_native_capacity = ide_disk_unlock_native_capacity,
784
.get_capacity = ide_disk_get_capacity,
785
.setup = ide_disk_setup,
786
.flush = ide_disk_flush,
787
.init_media = ide_disk_init_media,
788
.set_doorlock = ide_disk_set_doorlock,
789
.do_request = ide_do_rw_disk,
790
.ioctl = ide_disk_ioctl,
791
};
792
793