Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/block/bdev.c
48855 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 1991, 1992 Linus Torvalds
4
* Copyright (C) 2001 Andrea Arcangeli <[email protected]> SuSE
5
* Copyright (C) 2016 - 2020 Christoph Hellwig
6
*/
7
8
#include <linux/init.h>
9
#include <linux/mm.h>
10
#include <linux/slab.h>
11
#include <linux/kmod.h>
12
#include <linux/major.h>
13
#include <linux/device_cgroup.h>
14
#include <linux/blkdev.h>
15
#include <linux/blk-integrity.h>
16
#include <linux/backing-dev.h>
17
#include <linux/module.h>
18
#include <linux/blkpg.h>
19
#include <linux/magic.h>
20
#include <linux/buffer_head.h>
21
#include <linux/swap.h>
22
#include <linux/writeback.h>
23
#include <linux/mount.h>
24
#include <linux/pseudo_fs.h>
25
#include <linux/uio.h>
26
#include <linux/namei.h>
27
#include <linux/security.h>
28
#include <linux/part_stat.h>
29
#include <linux/uaccess.h>
30
#include <linux/stat.h>
31
#include "../fs/internal.h"
32
#include "blk.h"
33
34
/* Should we allow writing to mounted block devices? */
35
static bool bdev_allow_write_mounted = IS_ENABLED(CONFIG_BLK_DEV_WRITE_MOUNTED);
36
37
struct bdev_inode {
38
struct block_device bdev;
39
struct inode vfs_inode;
40
};
41
42
static inline struct bdev_inode *BDEV_I(struct inode *inode)
43
{
44
return container_of(inode, struct bdev_inode, vfs_inode);
45
}
46
47
static inline struct inode *BD_INODE(struct block_device *bdev)
48
{
49
return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode;
50
}
51
52
struct block_device *I_BDEV(struct inode *inode)
53
{
54
return &BDEV_I(inode)->bdev;
55
}
56
EXPORT_SYMBOL(I_BDEV);
57
58
struct block_device *file_bdev(struct file *bdev_file)
59
{
60
return I_BDEV(bdev_file->f_mapping->host);
61
}
62
EXPORT_SYMBOL(file_bdev);
63
64
static void bdev_write_inode(struct block_device *bdev)
65
{
66
struct inode *inode = BD_INODE(bdev);
67
int ret;
68
69
spin_lock(&inode->i_lock);
70
while (inode_state_read(inode) & I_DIRTY) {
71
spin_unlock(&inode->i_lock);
72
ret = write_inode_now(inode, true);
73
if (ret)
74
pr_warn_ratelimited(
75
"VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
76
bdev, ret);
77
spin_lock(&inode->i_lock);
78
}
79
spin_unlock(&inode->i_lock);
80
}
81
82
/* Kill _all_ buffers and pagecache , dirty or not.. */
83
static void kill_bdev(struct block_device *bdev)
84
{
85
struct address_space *mapping = bdev->bd_mapping;
86
87
if (mapping_empty(mapping))
88
return;
89
90
invalidate_bh_lrus();
91
truncate_inode_pages(mapping, 0);
92
}
93
94
/* Invalidate clean unused buffers and pagecache. */
95
void invalidate_bdev(struct block_device *bdev)
96
{
97
struct address_space *mapping = bdev->bd_mapping;
98
99
if (mapping->nrpages) {
100
invalidate_bh_lrus();
101
lru_add_drain_all(); /* make sure all lru add caches are flushed */
102
invalidate_mapping_pages(mapping, 0, -1);
103
}
104
}
105
EXPORT_SYMBOL(invalidate_bdev);
106
107
/*
108
* Drop all buffers & page cache for given bdev range. This function bails
109
* with error if bdev has other exclusive owner (such as filesystem).
110
*/
111
int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
112
loff_t lstart, loff_t lend)
113
{
114
/*
115
* If we don't hold exclusive handle for the device, upgrade to it
116
* while we discard the buffer cache to avoid discarding buffers
117
* under live filesystem.
118
*/
119
if (!(mode & BLK_OPEN_EXCL)) {
120
int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL);
121
if (err)
122
goto invalidate;
123
}
124
125
truncate_inode_pages_range(bdev->bd_mapping, lstart, lend);
126
if (!(mode & BLK_OPEN_EXCL))
127
bd_abort_claiming(bdev, truncate_bdev_range);
128
return 0;
129
130
invalidate:
131
/*
132
* Someone else has handle exclusively open. Try invalidating instead.
133
* The 'end' argument is inclusive so the rounding is safe.
134
*/
135
return invalidate_inode_pages2_range(bdev->bd_mapping,
136
lstart >> PAGE_SHIFT,
137
lend >> PAGE_SHIFT);
138
}
139
140
static void set_init_blocksize(struct block_device *bdev)
141
{
142
unsigned int bsize = bdev_logical_block_size(bdev);
143
loff_t size = i_size_read(BD_INODE(bdev));
144
145
while (bsize < PAGE_SIZE) {
146
if (size & bsize)
147
break;
148
bsize <<= 1;
149
}
150
BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
151
mapping_set_folio_min_order(BD_INODE(bdev)->i_mapping,
152
get_order(bsize));
153
}
154
155
/**
156
* bdev_validate_blocksize - check that this block size is acceptable
157
* @bdev: blockdevice to check
158
* @block_size: block size to check
159
*
160
* For block device users that do not use buffer heads or the block device
161
* page cache, make sure that this block size can be used with the device.
162
*
163
* Return: On success zero is returned, negative error code on failure.
164
*/
165
int bdev_validate_blocksize(struct block_device *bdev, int block_size)
166
{
167
if (blk_validate_block_size(block_size))
168
return -EINVAL;
169
170
/* Size cannot be smaller than the size supported by the device */
171
if (block_size < bdev_logical_block_size(bdev))
172
return -EINVAL;
173
174
return 0;
175
}
176
EXPORT_SYMBOL_GPL(bdev_validate_blocksize);
177
178
int set_blocksize(struct file *file, int size)
179
{
180
struct inode *inode = file->f_mapping->host;
181
struct block_device *bdev = I_BDEV(inode);
182
int ret;
183
184
ret = bdev_validate_blocksize(bdev, size);
185
if (ret)
186
return ret;
187
188
if (!file->private_data)
189
return -EINVAL;
190
191
/* Don't change the size if it is same as current */
192
if (inode->i_blkbits != blksize_bits(size)) {
193
/*
194
* Flush and truncate the pagecache before we reconfigure the
195
* mapping geometry because folio sizes are variable now. If a
196
* reader has already allocated a folio whose size is smaller
197
* than the new min_order but invokes readahead after the new
198
* min_order becomes visible, readahead will think there are
199
* "zero" blocks per folio and crash. Take the inode and
200
* invalidation locks to avoid racing with
201
* read/write/fallocate.
202
*/
203
inode_lock(inode);
204
filemap_invalidate_lock(inode->i_mapping);
205
206
sync_blockdev(bdev);
207
kill_bdev(bdev);
208
209
inode->i_blkbits = blksize_bits(size);
210
mapping_set_folio_min_order(inode->i_mapping, get_order(size));
211
kill_bdev(bdev);
212
filemap_invalidate_unlock(inode->i_mapping);
213
inode_unlock(inode);
214
}
215
return 0;
216
}
217
218
EXPORT_SYMBOL(set_blocksize);
219
220
static int sb_validate_large_blocksize(struct super_block *sb, int size)
221
{
222
const char *err_str = NULL;
223
224
if (!(sb->s_type->fs_flags & FS_LBS))
225
err_str = "not supported by filesystem";
226
else if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
227
err_str = "is only supported with CONFIG_TRANSPARENT_HUGEPAGE";
228
229
if (!err_str)
230
return 0;
231
232
pr_warn_ratelimited("%s: block size(%d) > page size(%lu) %s\n",
233
sb->s_type->name, size, PAGE_SIZE, err_str);
234
return -EINVAL;
235
}
236
237
int sb_set_blocksize(struct super_block *sb, int size)
238
{
239
if (size > PAGE_SIZE && sb_validate_large_blocksize(sb, size))
240
return 0;
241
if (set_blocksize(sb->s_bdev_file, size))
242
return 0;
243
/* If we get here, we know size is validated */
244
sb->s_blocksize = size;
245
sb->s_blocksize_bits = blksize_bits(size);
246
return sb->s_blocksize;
247
}
248
249
EXPORT_SYMBOL(sb_set_blocksize);
250
251
int __must_check sb_min_blocksize(struct super_block *sb, int size)
252
{
253
int minsize = bdev_logical_block_size(sb->s_bdev);
254
if (size < minsize)
255
size = minsize;
256
return sb_set_blocksize(sb, size);
257
}
258
259
EXPORT_SYMBOL(sb_min_blocksize);
260
261
int sync_blockdev_nowait(struct block_device *bdev)
262
{
263
if (!bdev)
264
return 0;
265
return filemap_flush(bdev->bd_mapping);
266
}
267
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
268
269
/*
270
* Write out and wait upon all the dirty data associated with a block
271
* device via its mapping. Does not take the superblock lock.
272
*/
273
int sync_blockdev(struct block_device *bdev)
274
{
275
if (!bdev)
276
return 0;
277
return filemap_write_and_wait(bdev->bd_mapping);
278
}
279
EXPORT_SYMBOL(sync_blockdev);
280
281
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
282
{
283
return filemap_write_and_wait_range(bdev->bd_mapping,
284
lstart, lend);
285
}
286
EXPORT_SYMBOL(sync_blockdev_range);
287
288
/**
289
* bdev_freeze - lock a filesystem and force it into a consistent state
290
* @bdev: blockdevice to lock
291
*
292
* If a superblock is found on this device, we take the s_umount semaphore
293
* on it to make sure nobody unmounts until the snapshot creation is done.
294
* The reference counter (bd_fsfreeze_count) guarantees that only the last
295
* unfreeze process can unfreeze the frozen filesystem actually when multiple
296
* freeze requests arrive simultaneously. It counts up in bdev_freeze() and
297
* count down in bdev_thaw(). When it becomes 0, thaw_bdev() will unfreeze
298
* actually.
299
*
300
* Return: On success zero is returned, negative error code on failure.
301
*/
302
int bdev_freeze(struct block_device *bdev)
303
{
304
int error = 0;
305
306
mutex_lock(&bdev->bd_fsfreeze_mutex);
307
308
if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) {
309
mutex_unlock(&bdev->bd_fsfreeze_mutex);
310
return 0;
311
}
312
313
mutex_lock(&bdev->bd_holder_lock);
314
if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) {
315
error = bdev->bd_holder_ops->freeze(bdev);
316
lockdep_assert_not_held(&bdev->bd_holder_lock);
317
} else {
318
mutex_unlock(&bdev->bd_holder_lock);
319
error = sync_blockdev(bdev);
320
}
321
322
if (error)
323
atomic_dec(&bdev->bd_fsfreeze_count);
324
325
mutex_unlock(&bdev->bd_fsfreeze_mutex);
326
return error;
327
}
328
EXPORT_SYMBOL(bdev_freeze);
329
330
/**
331
* bdev_thaw - unlock filesystem
332
* @bdev: blockdevice to unlock
333
*
334
* Unlocks the filesystem and marks it writeable again after bdev_freeze().
335
*
336
* Return: On success zero is returned, negative error code on failure.
337
*/
338
int bdev_thaw(struct block_device *bdev)
339
{
340
int error = -EINVAL, nr_freeze;
341
342
mutex_lock(&bdev->bd_fsfreeze_mutex);
343
344
/*
345
* If this returns < 0 it means that @bd_fsfreeze_count was
346
* already 0 and no decrement was performed.
347
*/
348
nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count);
349
if (nr_freeze < 0)
350
goto out;
351
352
error = 0;
353
if (nr_freeze > 0)
354
goto out;
355
356
mutex_lock(&bdev->bd_holder_lock);
357
if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) {
358
error = bdev->bd_holder_ops->thaw(bdev);
359
lockdep_assert_not_held(&bdev->bd_holder_lock);
360
} else {
361
mutex_unlock(&bdev->bd_holder_lock);
362
}
363
364
if (error)
365
atomic_inc(&bdev->bd_fsfreeze_count);
366
out:
367
mutex_unlock(&bdev->bd_fsfreeze_mutex);
368
return error;
369
}
370
EXPORT_SYMBOL(bdev_thaw);
371
372
/*
373
* pseudo-fs
374
*/
375
376
static __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
377
static struct kmem_cache *bdev_cachep __ro_after_init;
378
379
static struct inode *bdev_alloc_inode(struct super_block *sb)
380
{
381
struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL);
382
383
if (!ei)
384
return NULL;
385
memset(&ei->bdev, 0, sizeof(ei->bdev));
386
387
if (security_bdev_alloc(&ei->bdev)) {
388
kmem_cache_free(bdev_cachep, ei);
389
return NULL;
390
}
391
return &ei->vfs_inode;
392
}
393
394
static void bdev_free_inode(struct inode *inode)
395
{
396
struct block_device *bdev = I_BDEV(inode);
397
398
free_percpu(bdev->bd_stats);
399
kfree(bdev->bd_meta_info);
400
security_bdev_free(bdev);
401
402
if (!bdev_is_partition(bdev)) {
403
if (bdev->bd_disk && bdev->bd_disk->bdi)
404
bdi_put(bdev->bd_disk->bdi);
405
kfree(bdev->bd_disk);
406
}
407
408
if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
409
blk_free_ext_minor(MINOR(bdev->bd_dev));
410
411
kmem_cache_free(bdev_cachep, BDEV_I(inode));
412
}
413
414
static void init_once(void *data)
415
{
416
struct bdev_inode *ei = data;
417
418
inode_init_once(&ei->vfs_inode);
419
}
420
421
static void bdev_evict_inode(struct inode *inode)
422
{
423
truncate_inode_pages_final(&inode->i_data);
424
invalidate_inode_buffers(inode); /* is it needed here? */
425
clear_inode(inode);
426
}
427
428
static const struct super_operations bdev_sops = {
429
.statfs = simple_statfs,
430
.alloc_inode = bdev_alloc_inode,
431
.free_inode = bdev_free_inode,
432
.drop_inode = inode_just_drop,
433
.evict_inode = bdev_evict_inode,
434
};
435
436
static int bd_init_fs_context(struct fs_context *fc)
437
{
438
struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
439
if (!ctx)
440
return -ENOMEM;
441
fc->s_iflags |= SB_I_CGROUPWB;
442
ctx->ops = &bdev_sops;
443
return 0;
444
}
445
446
static struct file_system_type bd_type = {
447
.name = "bdev",
448
.init_fs_context = bd_init_fs_context,
449
.kill_sb = kill_anon_super,
450
};
451
452
struct super_block *blockdev_superblock __ro_after_init;
453
static struct vfsmount *blockdev_mnt __ro_after_init;
454
EXPORT_SYMBOL_GPL(blockdev_superblock);
455
456
void __init bdev_cache_init(void)
457
{
458
int err;
459
460
bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
461
0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
462
SLAB_ACCOUNT|SLAB_PANIC),
463
init_once);
464
err = register_filesystem(&bd_type);
465
if (err)
466
panic("Cannot register bdev pseudo-fs");
467
blockdev_mnt = kern_mount(&bd_type);
468
if (IS_ERR(blockdev_mnt))
469
panic("Cannot create bdev pseudo-fs");
470
blockdev_superblock = blockdev_mnt->mnt_sb; /* For writeback */
471
}
472
473
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
474
{
475
struct block_device *bdev;
476
struct inode *inode;
477
478
inode = new_inode(blockdev_superblock);
479
if (!inode)
480
return NULL;
481
inode->i_mode = S_IFBLK;
482
inode->i_rdev = 0;
483
inode->i_data.a_ops = &def_blk_aops;
484
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
485
486
bdev = I_BDEV(inode);
487
mutex_init(&bdev->bd_fsfreeze_mutex);
488
spin_lock_init(&bdev->bd_size_lock);
489
mutex_init(&bdev->bd_holder_lock);
490
atomic_set(&bdev->__bd_flags, partno);
491
bdev->bd_mapping = &inode->i_data;
492
bdev->bd_queue = disk->queue;
493
if (partno && bdev_test_flag(disk->part0, BD_HAS_SUBMIT_BIO))
494
bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO);
495
bdev->bd_stats = alloc_percpu(struct disk_stats);
496
if (!bdev->bd_stats) {
497
iput(inode);
498
return NULL;
499
}
500
bdev->bd_disk = disk;
501
return bdev;
502
}
503
504
void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
505
{
506
spin_lock(&bdev->bd_size_lock);
507
i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT);
508
bdev->bd_nr_sectors = sectors;
509
spin_unlock(&bdev->bd_size_lock);
510
}
511
512
void bdev_add(struct block_device *bdev, dev_t dev)
513
{
514
struct inode *inode = BD_INODE(bdev);
515
if (bdev_stable_writes(bdev))
516
mapping_set_stable_writes(bdev->bd_mapping);
517
bdev->bd_dev = dev;
518
inode->i_rdev = dev;
519
inode->i_ino = dev;
520
insert_inode_hash(inode);
521
}
522
523
void bdev_unhash(struct block_device *bdev)
524
{
525
remove_inode_hash(BD_INODE(bdev));
526
}
527
528
void bdev_drop(struct block_device *bdev)
529
{
530
iput(BD_INODE(bdev));
531
}
532
533
long nr_blockdev_pages(void)
534
{
535
struct inode *inode;
536
long ret = 0;
537
538
spin_lock(&blockdev_superblock->s_inode_list_lock);
539
list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
540
ret += inode->i_mapping->nrpages;
541
spin_unlock(&blockdev_superblock->s_inode_list_lock);
542
543
return ret;
544
}
545
546
/**
547
* bd_may_claim - test whether a block device can be claimed
548
* @bdev: block device of interest
549
* @holder: holder trying to claim @bdev
550
* @hops: holder ops
551
*
552
* Test whether @bdev can be claimed by @holder.
553
*
554
* RETURNS:
555
* %true if @bdev can be claimed, %false otherwise.
556
*/
557
static bool bd_may_claim(struct block_device *bdev, void *holder,
558
const struct blk_holder_ops *hops)
559
{
560
struct block_device *whole = bdev_whole(bdev);
561
562
lockdep_assert_held(&bdev_lock);
563
564
if (bdev->bd_holder) {
565
/*
566
* The same holder can always re-claim.
567
*/
568
if (bdev->bd_holder == holder) {
569
if (WARN_ON_ONCE(bdev->bd_holder_ops != hops))
570
return false;
571
return true;
572
}
573
return false;
574
}
575
576
/*
577
* If the whole devices holder is set to bd_may_claim, a partition on
578
* the device is claimed, but not the whole device.
579
*/
580
if (whole != bdev &&
581
whole->bd_holder && whole->bd_holder != bd_may_claim)
582
return false;
583
return true;
584
}
585
586
/**
587
* bd_prepare_to_claim - claim a block device
588
* @bdev: block device of interest
589
* @holder: holder trying to claim @bdev
590
* @hops: holder ops.
591
*
592
* Claim @bdev. This function fails if @bdev is already claimed by another
593
* holder and waits if another claiming is in progress. return, the caller
594
* has ownership of bd_claiming and bd_holder[s].
595
*
596
* RETURNS:
597
* 0 if @bdev can be claimed, -EBUSY otherwise.
598
*/
599
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
600
const struct blk_holder_ops *hops)
601
{
602
struct block_device *whole = bdev_whole(bdev);
603
604
if (WARN_ON_ONCE(!holder))
605
return -EINVAL;
606
retry:
607
mutex_lock(&bdev_lock);
608
/* if someone else claimed, fail */
609
if (!bd_may_claim(bdev, holder, hops)) {
610
mutex_unlock(&bdev_lock);
611
return -EBUSY;
612
}
613
614
/* if claiming is already in progress, wait for it to finish */
615
if (whole->bd_claiming) {
616
wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming);
617
DEFINE_WAIT(wait);
618
619
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
620
mutex_unlock(&bdev_lock);
621
schedule();
622
finish_wait(wq, &wait);
623
goto retry;
624
}
625
626
/* yay, all mine */
627
whole->bd_claiming = holder;
628
mutex_unlock(&bdev_lock);
629
return 0;
630
}
631
EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
632
633
static void bd_clear_claiming(struct block_device *whole, void *holder)
634
{
635
lockdep_assert_held(&bdev_lock);
636
/* tell others that we're done */
637
BUG_ON(whole->bd_claiming != holder);
638
whole->bd_claiming = NULL;
639
wake_up_var(&whole->bd_claiming);
640
}
641
642
/**
643
* bd_finish_claiming - finish claiming of a block device
644
* @bdev: block device of interest
645
* @holder: holder that has claimed @bdev
646
* @hops: block device holder operations
647
*
648
* Finish exclusive open of a block device. Mark the device as exlusively
649
* open by the holder and wake up all waiters for exclusive open to finish.
650
*/
651
static void bd_finish_claiming(struct block_device *bdev, void *holder,
652
const struct blk_holder_ops *hops)
653
{
654
struct block_device *whole = bdev_whole(bdev);
655
656
mutex_lock(&bdev_lock);
657
BUG_ON(!bd_may_claim(bdev, holder, hops));
658
/*
659
* Note that for a whole device bd_holders will be incremented twice,
660
* and bd_holder will be set to bd_may_claim before being set to holder
661
*/
662
whole->bd_holders++;
663
whole->bd_holder = bd_may_claim;
664
bdev->bd_holders++;
665
mutex_lock(&bdev->bd_holder_lock);
666
bdev->bd_holder = holder;
667
bdev->bd_holder_ops = hops;
668
mutex_unlock(&bdev->bd_holder_lock);
669
bd_clear_claiming(whole, holder);
670
mutex_unlock(&bdev_lock);
671
}
672
673
/**
674
* bd_abort_claiming - abort claiming of a block device
675
* @bdev: block device of interest
676
* @holder: holder that has claimed @bdev
677
*
678
* Abort claiming of a block device when the exclusive open failed. This can be
679
* also used when exclusive open is not actually desired and we just needed
680
* to block other exclusive openers for a while.
681
*/
682
void bd_abort_claiming(struct block_device *bdev, void *holder)
683
{
684
mutex_lock(&bdev_lock);
685
bd_clear_claiming(bdev_whole(bdev), holder);
686
mutex_unlock(&bdev_lock);
687
}
688
EXPORT_SYMBOL(bd_abort_claiming);
689
690
static void bd_end_claim(struct block_device *bdev, void *holder)
691
{
692
struct block_device *whole = bdev_whole(bdev);
693
bool unblock = false;
694
695
/*
696
* Release a claim on the device. The holder fields are protected with
697
* bdev_lock. open_mutex is used to synchronize disk_holder unlinking.
698
*/
699
mutex_lock(&bdev_lock);
700
WARN_ON_ONCE(bdev->bd_holder != holder);
701
WARN_ON_ONCE(--bdev->bd_holders < 0);
702
WARN_ON_ONCE(--whole->bd_holders < 0);
703
if (!bdev->bd_holders) {
704
mutex_lock(&bdev->bd_holder_lock);
705
bdev->bd_holder = NULL;
706
bdev->bd_holder_ops = NULL;
707
mutex_unlock(&bdev->bd_holder_lock);
708
if (bdev_test_flag(bdev, BD_WRITE_HOLDER))
709
unblock = true;
710
}
711
if (!whole->bd_holders)
712
whole->bd_holder = NULL;
713
mutex_unlock(&bdev_lock);
714
715
/*
716
* If this was the last claim, remove holder link and unblock evpoll if
717
* it was a write holder.
718
*/
719
if (unblock) {
720
disk_unblock_events(bdev->bd_disk);
721
bdev_clear_flag(bdev, BD_WRITE_HOLDER);
722
}
723
}
724
725
static void blkdev_flush_mapping(struct block_device *bdev)
726
{
727
WARN_ON_ONCE(bdev->bd_holders);
728
sync_blockdev(bdev);
729
kill_bdev(bdev);
730
bdev_write_inode(bdev);
731
}
732
733
static void blkdev_put_whole(struct block_device *bdev)
734
{
735
if (atomic_dec_and_test(&bdev->bd_openers))
736
blkdev_flush_mapping(bdev);
737
if (bdev->bd_disk->fops->release)
738
bdev->bd_disk->fops->release(bdev->bd_disk);
739
}
740
741
static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
742
{
743
struct gendisk *disk = bdev->bd_disk;
744
int ret;
745
746
if (disk->fops->open) {
747
ret = disk->fops->open(disk, mode);
748
if (ret) {
749
/* avoid ghost partitions on a removed medium */
750
if (ret == -ENOMEDIUM &&
751
test_bit(GD_NEED_PART_SCAN, &disk->state))
752
bdev_disk_changed(disk, true);
753
return ret;
754
}
755
}
756
757
if (!atomic_read(&bdev->bd_openers))
758
set_init_blocksize(bdev);
759
atomic_inc(&bdev->bd_openers);
760
if (test_bit(GD_NEED_PART_SCAN, &disk->state)) {
761
/*
762
* Only return scanning errors if we are called from contexts
763
* that explicitly want them, e.g. the BLKRRPART ioctl.
764
*/
765
ret = bdev_disk_changed(disk, false);
766
if (ret && (mode & BLK_OPEN_STRICT_SCAN)) {
767
blkdev_put_whole(bdev);
768
return ret;
769
}
770
}
771
return 0;
772
}
773
774
static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
775
{
776
struct gendisk *disk = part->bd_disk;
777
int ret;
778
779
ret = blkdev_get_whole(bdev_whole(part), mode);
780
if (ret)
781
return ret;
782
783
ret = -ENXIO;
784
if (!bdev_nr_sectors(part))
785
goto out_blkdev_put;
786
787
if (!atomic_read(&part->bd_openers)) {
788
disk->open_partitions++;
789
set_init_blocksize(part);
790
}
791
atomic_inc(&part->bd_openers);
792
return 0;
793
794
out_blkdev_put:
795
blkdev_put_whole(bdev_whole(part));
796
return ret;
797
}
798
799
int bdev_permission(dev_t dev, blk_mode_t mode, void *holder)
800
{
801
int ret;
802
803
ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
804
MAJOR(dev), MINOR(dev),
805
((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) |
806
((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0));
807
if (ret)
808
return ret;
809
810
/* Blocking writes requires exclusive opener */
811
if (mode & BLK_OPEN_RESTRICT_WRITES && !holder)
812
return -EINVAL;
813
814
/*
815
* We're using error pointers to indicate to ->release() when we
816
* failed to open that block device. Also this doesn't make sense.
817
*/
818
if (WARN_ON_ONCE(IS_ERR(holder)))
819
return -EINVAL;
820
821
return 0;
822
}
823
824
static void blkdev_put_part(struct block_device *part)
825
{
826
struct block_device *whole = bdev_whole(part);
827
828
if (atomic_dec_and_test(&part->bd_openers)) {
829
blkdev_flush_mapping(part);
830
whole->bd_disk->open_partitions--;
831
}
832
blkdev_put_whole(whole);
833
}
834
835
struct block_device *blkdev_get_no_open(dev_t dev, bool autoload)
836
{
837
struct block_device *bdev;
838
struct inode *inode;
839
840
inode = ilookup(blockdev_superblock, dev);
841
if (!inode && autoload && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
842
blk_request_module(dev);
843
inode = ilookup(blockdev_superblock, dev);
844
if (inode)
845
pr_warn_ratelimited(
846
"block device autoloading is deprecated and will be removed.\n");
847
}
848
if (!inode)
849
return NULL;
850
851
/* switch from the inode reference to a device mode one: */
852
bdev = &BDEV_I(inode)->bdev;
853
if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
854
bdev = NULL;
855
iput(inode);
856
return bdev;
857
}
858
859
void blkdev_put_no_open(struct block_device *bdev)
860
{
861
put_device(&bdev->bd_device);
862
}
863
864
static bool bdev_writes_blocked(struct block_device *bdev)
865
{
866
return bdev->bd_writers < 0;
867
}
868
869
static void bdev_block_writes(struct block_device *bdev)
870
{
871
bdev->bd_writers--;
872
}
873
874
static void bdev_unblock_writes(struct block_device *bdev)
875
{
876
bdev->bd_writers++;
877
}
878
879
static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
880
{
881
if (bdev_allow_write_mounted)
882
return true;
883
/* Writes blocked? */
884
if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev))
885
return false;
886
if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0)
887
return false;
888
return true;
889
}
890
891
static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
892
{
893
if (bdev_allow_write_mounted)
894
return;
895
896
/* Claim exclusive or shared write access. */
897
if (mode & BLK_OPEN_RESTRICT_WRITES)
898
bdev_block_writes(bdev);
899
else if (mode & BLK_OPEN_WRITE)
900
bdev->bd_writers++;
901
}
902
903
static inline bool bdev_unclaimed(const struct file *bdev_file)
904
{
905
return bdev_file->private_data == BDEV_I(bdev_file->f_mapping->host);
906
}
907
908
static void bdev_yield_write_access(struct file *bdev_file)
909
{
910
struct block_device *bdev;
911
912
if (bdev_allow_write_mounted)
913
return;
914
915
if (bdev_unclaimed(bdev_file))
916
return;
917
918
bdev = file_bdev(bdev_file);
919
920
if (bdev_file->f_mode & FMODE_WRITE_RESTRICTED)
921
bdev_unblock_writes(bdev);
922
else if (bdev_file->f_mode & FMODE_WRITE)
923
bdev->bd_writers--;
924
}
925
926
/**
927
* bdev_open - open a block device
928
* @bdev: block device to open
929
* @mode: open mode (BLK_OPEN_*)
930
* @holder: exclusive holder identifier
931
* @hops: holder operations
932
* @bdev_file: file for the block device
933
*
934
* Open the block device. If @holder is not %NULL, the block device is opened
935
* with exclusive access. Exclusive opens may nest for the same @holder.
936
*
937
* CONTEXT:
938
* Might sleep.
939
*
940
* RETURNS:
941
* zero on success, -errno on failure.
942
*/
943
int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
944
const struct blk_holder_ops *hops, struct file *bdev_file)
945
{
946
bool unblock_events = true;
947
struct gendisk *disk = bdev->bd_disk;
948
int ret;
949
950
if (holder) {
951
mode |= BLK_OPEN_EXCL;
952
ret = bd_prepare_to_claim(bdev, holder, hops);
953
if (ret)
954
return ret;
955
} else {
956
if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL))
957
return -EIO;
958
}
959
960
disk_block_events(disk);
961
962
mutex_lock(&disk->open_mutex);
963
ret = -ENXIO;
964
if (!disk_live(disk))
965
goto abort_claiming;
966
if (!try_module_get(disk->fops->owner))
967
goto abort_claiming;
968
ret = -EBUSY;
969
if (!bdev_may_open(bdev, mode))
970
goto put_module;
971
if (bdev_is_partition(bdev))
972
ret = blkdev_get_part(bdev, mode);
973
else
974
ret = blkdev_get_whole(bdev, mode);
975
if (ret)
976
goto put_module;
977
bdev_claim_write_access(bdev, mode);
978
if (holder) {
979
bd_finish_claiming(bdev, holder, hops);
980
981
/*
982
* Block event polling for write claims if requested. Any write
983
* holder makes the write_holder state stick until all are
984
* released. This is good enough and tracking individual
985
* writeable reference is too fragile given the way @mode is
986
* used in blkdev_get/put().
987
*/
988
if ((mode & BLK_OPEN_WRITE) &&
989
!bdev_test_flag(bdev, BD_WRITE_HOLDER) &&
990
(disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
991
bdev_set_flag(bdev, BD_WRITE_HOLDER);
992
unblock_events = false;
993
}
994
}
995
mutex_unlock(&disk->open_mutex);
996
997
if (unblock_events)
998
disk_unblock_events(disk);
999
1000
bdev_file->f_flags |= O_LARGEFILE;
1001
bdev_file->f_mode |= FMODE_CAN_ODIRECT;
1002
if (bdev_nowait(bdev))
1003
bdev_file->f_mode |= FMODE_NOWAIT;
1004
if (mode & BLK_OPEN_RESTRICT_WRITES)
1005
bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
1006
bdev_file->f_mapping = bdev->bd_mapping;
1007
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
1008
bdev_file->private_data = holder;
1009
1010
return 0;
1011
put_module:
1012
module_put(disk->fops->owner);
1013
abort_claiming:
1014
if (holder)
1015
bd_abort_claiming(bdev, holder);
1016
mutex_unlock(&disk->open_mutex);
1017
disk_unblock_events(disk);
1018
return ret;
1019
}
1020
1021
/*
1022
* If BLK_OPEN_WRITE_IOCTL is set then this is a historical quirk
1023
* associated with the floppy driver where it has allowed ioctls if the
1024
* file was opened for writing, but does not allow reads or writes.
1025
* Make sure that this quirk is reflected in @f_flags.
1026
*
1027
* It can also happen if a block device is opened as O_RDWR | O_WRONLY.
1028
*/
1029
static unsigned blk_to_file_flags(blk_mode_t mode)
1030
{
1031
unsigned int flags = 0;
1032
1033
if ((mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) ==
1034
(BLK_OPEN_READ | BLK_OPEN_WRITE))
1035
flags |= O_RDWR;
1036
else if (mode & BLK_OPEN_WRITE_IOCTL)
1037
flags |= O_RDWR | O_WRONLY;
1038
else if (mode & BLK_OPEN_WRITE)
1039
flags |= O_WRONLY;
1040
else if (mode & BLK_OPEN_READ)
1041
flags |= O_RDONLY; /* homeopathic, because O_RDONLY is 0 */
1042
else
1043
WARN_ON_ONCE(true);
1044
1045
if (mode & BLK_OPEN_NDELAY)
1046
flags |= O_NDELAY;
1047
1048
return flags;
1049
}
1050
1051
struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
1052
const struct blk_holder_ops *hops)
1053
{
1054
struct file *bdev_file;
1055
struct block_device *bdev;
1056
unsigned int flags;
1057
int ret;
1058
1059
ret = bdev_permission(dev, mode, holder);
1060
if (ret)
1061
return ERR_PTR(ret);
1062
1063
bdev = blkdev_get_no_open(dev, true);
1064
if (!bdev)
1065
return ERR_PTR(-ENXIO);
1066
1067
flags = blk_to_file_flags(mode);
1068
bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev),
1069
blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops);
1070
if (IS_ERR(bdev_file)) {
1071
blkdev_put_no_open(bdev);
1072
return bdev_file;
1073
}
1074
ihold(BD_INODE(bdev));
1075
1076
ret = bdev_open(bdev, mode, holder, hops, bdev_file);
1077
if (ret) {
1078
/* We failed to open the block device. Let ->release() know. */
1079
bdev_file->private_data = ERR_PTR(ret);
1080
fput(bdev_file);
1081
return ERR_PTR(ret);
1082
}
1083
return bdev_file;
1084
}
1085
EXPORT_SYMBOL(bdev_file_open_by_dev);
1086
1087
struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
1088
void *holder,
1089
const struct blk_holder_ops *hops)
1090
{
1091
struct file *file;
1092
dev_t dev;
1093
int error;
1094
1095
error = lookup_bdev(path, &dev);
1096
if (error)
1097
return ERR_PTR(error);
1098
1099
file = bdev_file_open_by_dev(dev, mode, holder, hops);
1100
if (!IS_ERR(file) && (mode & BLK_OPEN_WRITE)) {
1101
if (bdev_read_only(file_bdev(file))) {
1102
fput(file);
1103
file = ERR_PTR(-EACCES);
1104
}
1105
}
1106
1107
return file;
1108
}
1109
EXPORT_SYMBOL(bdev_file_open_by_path);
1110
1111
static inline void bd_yield_claim(struct file *bdev_file)
1112
{
1113
struct block_device *bdev = file_bdev(bdev_file);
1114
void *holder = bdev_file->private_data;
1115
1116
lockdep_assert_held(&bdev->bd_disk->open_mutex);
1117
1118
if (WARN_ON_ONCE(IS_ERR_OR_NULL(holder)))
1119
return;
1120
1121
if (!bdev_unclaimed(bdev_file))
1122
bd_end_claim(bdev, holder);
1123
}
1124
1125
void bdev_release(struct file *bdev_file)
1126
{
1127
struct block_device *bdev = file_bdev(bdev_file);
1128
void *holder = bdev_file->private_data;
1129
struct gendisk *disk = bdev->bd_disk;
1130
1131
/* We failed to open that block device. */
1132
if (IS_ERR(holder))
1133
goto put_no_open;
1134
1135
/*
1136
* Sync early if it looks like we're the last one. If someone else
1137
* opens the block device between now and the decrement of bd_openers
1138
* then we did a sync that we didn't need to, but that's not the end
1139
* of the world and we want to avoid long (could be several minute)
1140
* syncs while holding the mutex.
1141
*/
1142
if (atomic_read(&bdev->bd_openers) == 1)
1143
sync_blockdev(bdev);
1144
1145
mutex_lock(&disk->open_mutex);
1146
bdev_yield_write_access(bdev_file);
1147
1148
if (holder)
1149
bd_yield_claim(bdev_file);
1150
1151
/*
1152
* Trigger event checking and tell drivers to flush MEDIA_CHANGE
1153
* event. This is to ensure detection of media removal commanded
1154
* from userland - e.g. eject(1).
1155
*/
1156
disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
1157
1158
if (bdev_is_partition(bdev))
1159
blkdev_put_part(bdev);
1160
else
1161
blkdev_put_whole(bdev);
1162
mutex_unlock(&disk->open_mutex);
1163
1164
module_put(disk->fops->owner);
1165
put_no_open:
1166
blkdev_put_no_open(bdev);
1167
}
1168
1169
/**
1170
* bdev_fput - yield claim to the block device and put the file
1171
* @bdev_file: open block device
1172
*
1173
* Yield claim on the block device and put the file. Ensure that the
1174
* block device can be reclaimed before the file is closed which is a
1175
* deferred operation.
1176
*/
1177
void bdev_fput(struct file *bdev_file)
1178
{
1179
if (WARN_ON_ONCE(bdev_file->f_op != &def_blk_fops))
1180
return;
1181
1182
if (bdev_file->private_data) {
1183
struct block_device *bdev = file_bdev(bdev_file);
1184
struct gendisk *disk = bdev->bd_disk;
1185
1186
mutex_lock(&disk->open_mutex);
1187
bdev_yield_write_access(bdev_file);
1188
bd_yield_claim(bdev_file);
1189
/*
1190
* Tell release we already gave up our hold on the
1191
* device and if write restrictions are available that
1192
* we already gave up write access to the device.
1193
*/
1194
bdev_file->private_data = BDEV_I(bdev_file->f_mapping->host);
1195
mutex_unlock(&disk->open_mutex);
1196
}
1197
1198
fput(bdev_file);
1199
}
1200
EXPORT_SYMBOL(bdev_fput);
1201
1202
/**
1203
* lookup_bdev() - Look up a struct block_device by name.
1204
* @pathname: Name of the block device in the filesystem.
1205
* @dev: Pointer to the block device's dev_t, if found.
1206
*
1207
* Lookup the block device's dev_t at @pathname in the current
1208
* namespace if possible and return it in @dev.
1209
*
1210
* Context: May sleep.
1211
* Return: 0 if succeeded, negative errno otherwise.
1212
*/
1213
int lookup_bdev(const char *pathname, dev_t *dev)
1214
{
1215
struct inode *inode;
1216
struct path path;
1217
int error;
1218
1219
if (!pathname || !*pathname)
1220
return -EINVAL;
1221
1222
error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1223
if (error)
1224
return error;
1225
1226
inode = d_backing_inode(path.dentry);
1227
error = -ENOTBLK;
1228
if (!S_ISBLK(inode->i_mode))
1229
goto out_path_put;
1230
error = -EACCES;
1231
if (!may_open_dev(&path))
1232
goto out_path_put;
1233
1234
*dev = inode->i_rdev;
1235
error = 0;
1236
out_path_put:
1237
path_put(&path);
1238
return error;
1239
}
1240
EXPORT_SYMBOL(lookup_bdev);
1241
1242
/**
1243
* bdev_mark_dead - mark a block device as dead
1244
* @bdev: block device to operate on
1245
* @surprise: indicate a surprise removal
1246
*
1247
* Tell the file system that this devices or media is dead. If @surprise is set
1248
* to %true the device or media is already gone, if not we are preparing for an
1249
* orderly removal.
1250
*
1251
* This calls into the file system, which then typicall syncs out all dirty data
1252
* and writes back inodes and then invalidates any cached data in the inodes on
1253
* the file system. In addition we also invalidate the block device mapping.
1254
*/
1255
void bdev_mark_dead(struct block_device *bdev, bool surprise)
1256
{
1257
mutex_lock(&bdev->bd_holder_lock);
1258
if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
1259
bdev->bd_holder_ops->mark_dead(bdev, surprise);
1260
else {
1261
mutex_unlock(&bdev->bd_holder_lock);
1262
sync_blockdev(bdev);
1263
}
1264
1265
invalidate_bdev(bdev);
1266
}
1267
/*
1268
* New drivers should not use this directly. There are some drivers however
1269
* that needs this for historical reasons. For example, the DASD driver has
1270
* historically had a shutdown to offline mode that doesn't actually remove the
1271
* gendisk that otherwise looks a lot like a safe device removal.
1272
*/
1273
EXPORT_SYMBOL_GPL(bdev_mark_dead);
1274
1275
void sync_bdevs(bool wait)
1276
{
1277
struct inode *inode, *old_inode = NULL;
1278
1279
spin_lock(&blockdev_superblock->s_inode_list_lock);
1280
list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1281
struct address_space *mapping = inode->i_mapping;
1282
struct block_device *bdev;
1283
1284
spin_lock(&inode->i_lock);
1285
if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW) ||
1286
mapping->nrpages == 0) {
1287
spin_unlock(&inode->i_lock);
1288
continue;
1289
}
1290
__iget(inode);
1291
spin_unlock(&inode->i_lock);
1292
spin_unlock(&blockdev_superblock->s_inode_list_lock);
1293
/*
1294
* We hold a reference to 'inode' so it couldn't have been
1295
* removed from s_inodes list while we dropped the
1296
* s_inode_list_lock We cannot iput the inode now as we can
1297
* be holding the last reference and we cannot iput it under
1298
* s_inode_list_lock. So we keep the reference and iput it
1299
* later.
1300
*/
1301
iput(old_inode);
1302
old_inode = inode;
1303
bdev = I_BDEV(inode);
1304
1305
mutex_lock(&bdev->bd_disk->open_mutex);
1306
if (!atomic_read(&bdev->bd_openers)) {
1307
; /* skip */
1308
} else if (wait) {
1309
/*
1310
* We keep the error status of individual mapping so
1311
* that applications can catch the writeback error using
1312
* fsync(2). See filemap_fdatawait_keep_errors() for
1313
* details.
1314
*/
1315
filemap_fdatawait_keep_errors(inode->i_mapping);
1316
} else {
1317
filemap_fdatawrite(inode->i_mapping);
1318
}
1319
mutex_unlock(&bdev->bd_disk->open_mutex);
1320
1321
spin_lock(&blockdev_superblock->s_inode_list_lock);
1322
}
1323
spin_unlock(&blockdev_superblock->s_inode_list_lock);
1324
iput(old_inode);
1325
}
1326
1327
/*
1328
* Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices.
1329
*/
1330
void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask)
1331
{
1332
struct block_device *bdev;
1333
1334
/*
1335
* Note that d_backing_inode() returns the block device node inode, not
1336
* the block device's internal inode. Therefore it is *not* valid to
1337
* use I_BDEV() here; the block device has to be looked up by i_rdev
1338
* instead.
1339
*/
1340
bdev = blkdev_get_no_open(d_backing_inode(path->dentry)->i_rdev, false);
1341
if (!bdev)
1342
return;
1343
1344
if (request_mask & STATX_DIOALIGN) {
1345
stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
1346
stat->dio_offset_align = bdev_logical_block_size(bdev);
1347
stat->result_mask |= STATX_DIOALIGN;
1348
}
1349
1350
if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) {
1351
struct request_queue *bd_queue = bdev->bd_queue;
1352
1353
generic_fill_statx_atomic_writes(stat,
1354
queue_atomic_write_unit_min_bytes(bd_queue),
1355
queue_atomic_write_unit_max_bytes(bd_queue),
1356
0);
1357
}
1358
1359
stat->blksize = bdev_io_min(bdev);
1360
1361
blkdev_put_no_open(bdev);
1362
}
1363
1364
bool disk_live(struct gendisk *disk)
1365
{
1366
return !inode_unhashed(BD_INODE(disk->part0));
1367
}
1368
EXPORT_SYMBOL_GPL(disk_live);
1369
1370
unsigned int block_size(struct block_device *bdev)
1371
{
1372
return 1 << BD_INODE(bdev)->i_blkbits;
1373
}
1374
EXPORT_SYMBOL_GPL(block_size);
1375
1376
static int __init setup_bdev_allow_write_mounted(char *str)
1377
{
1378
if (kstrtobool(str, &bdev_allow_write_mounted))
1379
pr_warn("Invalid option string for bdev_allow_write_mounted:"
1380
" '%s'\n", str);
1381
return 1;
1382
}
1383
__setup("bdev_allow_write_mounted=", setup_bdev_allow_write_mounted);
1384
1385