// SPDX-License-Identifier: GPL-2.01/*2* Inline encryption support for fscrypt3*4* Copyright 2019 Google LLC5*/67/*8* With "inline encryption", the block layer handles the decryption/encryption9* as part of the bio, instead of the filesystem doing the crypto itself via10* crypto API. See Documentation/block/inline-encryption.rst. fscrypt still11* provides the key and IV to use.12*/1314#include <linux/blk-crypto.h>15#include <linux/blkdev.h>16#include <linux/buffer_head.h>17#include <linux/export.h>18#include <linux/sched/mm.h>19#include <linux/slab.h>20#include <linux/uio.h>2122#include "fscrypt_private.h"2324static struct block_device **fscrypt_get_devices(struct super_block *sb,25unsigned int *num_devs)26{27struct block_device **devs;2829if (sb->s_cop->get_devices) {30devs = sb->s_cop->get_devices(sb, num_devs);31if (devs)32return devs;33}34devs = kmalloc(sizeof(*devs), GFP_KERNEL);35if (!devs)36return ERR_PTR(-ENOMEM);37devs[0] = sb->s_bdev;38*num_devs = 1;39return devs;40}4142static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_inode_info *ci)43{44const struct super_block *sb = ci->ci_inode->i_sb;45unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);46int dun_bits;4748if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)49return offsetofend(union fscrypt_iv, nonce);5051if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)52return sizeof(__le64);5354if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)55return sizeof(__le32);5657/* Default case: IVs are just the file data unit index */58dun_bits = fscrypt_max_file_dun_bits(sb, ci->ci_data_unit_bits);59return DIV_ROUND_UP(dun_bits, 8);60}6162/*63* Log a message when starting to use blk-crypto (native) or blk-crypto-fallback64* for an encryption mode for the first time. This is the blk-crypto65* counterpart to the message logged when starting to use the crypto API for the66* first time. A limitation is that these messages don't convey which specific67* filesystems or files are using each implementation. However, *usually*68* systems use just one implementation per mode, which makes these messages69* helpful for debugging problems where the "wrong" implementation is used.70*/71static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,72struct block_device **devs,73unsigned int num_devs,74const struct blk_crypto_config *cfg)75{76unsigned int i;7778for (i = 0; i < num_devs; i++) {79if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||80blk_crypto_config_supported_natively(devs[i], cfg)) {81if (!xchg(&mode->logged_blk_crypto_native, 1))82pr_info("fscrypt: %s using blk-crypto (native)\n",83mode->friendly_name);84} else if (!xchg(&mode->logged_blk_crypto_fallback, 1)) {85pr_info("fscrypt: %s using blk-crypto-fallback\n",86mode->friendly_name);87}88}89}9091/* Enable inline encryption for this file if supported. */92int fscrypt_select_encryption_impl(struct fscrypt_inode_info *ci,93bool is_hw_wrapped_key)94{95const struct inode *inode = ci->ci_inode;96struct super_block *sb = inode->i_sb;97struct blk_crypto_config crypto_cfg;98struct block_device **devs;99unsigned int num_devs;100unsigned int i;101102/* The file must need contents encryption, not filenames encryption */103if (!S_ISREG(inode->i_mode))104return 0;105106/* The crypto mode must have a blk-crypto counterpart */107if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)108return 0;109110/* The filesystem must be mounted with -o inlinecrypt */111if (!(sb->s_flags & SB_INLINECRYPT))112return 0;113114/*115* When a page contains multiple logically contiguous filesystem blocks,116* some filesystem code only calls fscrypt_mergeable_bio() for the first117* block in the page. This is fine for most of fscrypt's IV generation118* strategies, where contiguous blocks imply contiguous IVs. But it119* doesn't work with IV_INO_LBLK_32. For now, simply exclude120* IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption.121*/122if ((fscrypt_policy_flags(&ci->ci_policy) &123FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&124sb->s_blocksize != PAGE_SIZE)125return 0;126127/*128* On all the filesystem's block devices, blk-crypto must support the129* crypto configuration that the file would use.130*/131crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;132crypto_cfg.data_unit_size = 1U << ci->ci_data_unit_bits;133crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);134crypto_cfg.key_type = is_hw_wrapped_key ?135BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW;136137devs = fscrypt_get_devices(sb, &num_devs);138if (IS_ERR(devs))139return PTR_ERR(devs);140141for (i = 0; i < num_devs; i++) {142if (!blk_crypto_config_supported(devs[i], &crypto_cfg))143goto out_free_devs;144}145146fscrypt_log_blk_crypto_impl(ci->ci_mode, devs, num_devs, &crypto_cfg);147148ci->ci_inlinecrypt = true;149out_free_devs:150kfree(devs);151152return 0;153}154155int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,156const u8 *key_bytes, size_t key_size,157bool is_hw_wrapped,158const struct fscrypt_inode_info *ci)159{160const struct inode *inode = ci->ci_inode;161struct super_block *sb = inode->i_sb;162enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;163enum blk_crypto_key_type key_type = is_hw_wrapped ?164BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_RAW;165struct blk_crypto_key *blk_key;166struct block_device **devs;167unsigned int num_devs;168unsigned int i;169int err;170171blk_key = kmalloc(sizeof(*blk_key), GFP_KERNEL);172if (!blk_key)173return -ENOMEM;174175err = blk_crypto_init_key(blk_key, key_bytes, key_size, key_type,176crypto_mode, fscrypt_get_dun_bytes(ci),1771U << ci->ci_data_unit_bits);178if (err) {179fscrypt_err(inode, "error %d initializing blk-crypto key", err);180goto fail;181}182183/* Start using blk-crypto on all the filesystem's block devices. */184devs = fscrypt_get_devices(sb, &num_devs);185if (IS_ERR(devs)) {186err = PTR_ERR(devs);187goto fail;188}189for (i = 0; i < num_devs; i++) {190err = blk_crypto_start_using_key(devs[i], blk_key);191if (err)192break;193}194kfree(devs);195if (err) {196fscrypt_err(inode, "error %d starting to use blk-crypto", err);197goto fail;198}199200/*201* Pairs with the smp_load_acquire() in fscrypt_is_key_prepared().202* I.e., here we publish ->blk_key with a RELEASE barrier so that203* concurrent tasks can ACQUIRE it. Note that this concurrency is only204* possible for per-mode keys, not for per-file keys.205*/206smp_store_release(&prep_key->blk_key, blk_key);207return 0;208209fail:210kfree_sensitive(blk_key);211return err;212}213214void fscrypt_destroy_inline_crypt_key(struct super_block *sb,215struct fscrypt_prepared_key *prep_key)216{217struct blk_crypto_key *blk_key = prep_key->blk_key;218struct block_device **devs;219unsigned int num_devs;220unsigned int i;221222if (!blk_key)223return;224225/* Evict the key from all the filesystem's block devices. */226devs = fscrypt_get_devices(sb, &num_devs);227if (!IS_ERR(devs)) {228for (i = 0; i < num_devs; i++)229blk_crypto_evict_key(devs[i], blk_key);230kfree(devs);231}232kfree_sensitive(blk_key);233}234235/*236* Ask the inline encryption hardware to derive the software secret from a237* hardware-wrapped key. Returns -EOPNOTSUPP if hardware-wrapped keys aren't238* supported on this filesystem or hardware.239*/240int fscrypt_derive_sw_secret(struct super_block *sb,241const u8 *wrapped_key, size_t wrapped_key_size,242u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])243{244int err;245246/* The filesystem must be mounted with -o inlinecrypt. */247if (!(sb->s_flags & SB_INLINECRYPT)) {248fscrypt_warn(NULL,249"%s: filesystem not mounted with inlinecrypt\n",250sb->s_id);251return -EOPNOTSUPP;252}253254err = blk_crypto_derive_sw_secret(sb->s_bdev, wrapped_key,255wrapped_key_size, sw_secret);256if (err == -EOPNOTSUPP)257fscrypt_warn(NULL,258"%s: block device doesn't support hardware-wrapped keys\n",259sb->s_id);260return err;261}262263bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)264{265return inode->i_crypt_info->ci_inlinecrypt;266}267EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);268269static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,270u64 lblk_num,271u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])272{273u64 index = lblk_num << ci->ci_data_units_per_block_bits;274union fscrypt_iv iv;275int i;276277fscrypt_generate_iv(&iv, index, ci);278279BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);280memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);281for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)282dun[i] = le64_to_cpu(iv.dun[i]);283}284285/**286* fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto287* @bio: a bio which will eventually be submitted to the file288* @inode: the file's inode289* @first_lblk: the first file logical block number in the I/O290* @gfp_mask: memory allocation flags - these must be a waiting mask so that291* bio_crypt_set_ctx can't fail.292*293* If the contents of the file should be encrypted (or decrypted) with inline294* encryption, then assign the appropriate encryption context to the bio.295*296* Normally the bio should be newly allocated (i.e. no pages added yet), as297* otherwise fscrypt_mergeable_bio() won't work as intended.298*299* The encryption context will be freed automatically when the bio is freed.300*/301void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,302u64 first_lblk, gfp_t gfp_mask)303{304const struct fscrypt_inode_info *ci;305u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];306307if (!fscrypt_inode_uses_inline_crypto(inode))308return;309ci = inode->i_crypt_info;310311fscrypt_generate_dun(ci, first_lblk, dun);312bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);313}314EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);315316/* Extract the inode and logical block number from a buffer_head. */317static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,318const struct inode **inode_ret,319u64 *lblk_num_ret)320{321struct folio *folio = bh->b_folio;322const struct address_space *mapping;323const struct inode *inode;324325/*326* The ext4 journal (jbd2) can submit a buffer_head it directly created327* for a non-pagecache page. fscrypt doesn't care about these.328*/329mapping = folio_mapping(folio);330if (!mapping)331return false;332inode = mapping->host;333334*inode_ret = inode;335*lblk_num_ret = ((u64)folio->index << (PAGE_SHIFT - inode->i_blkbits)) +336(bh_offset(bh) >> inode->i_blkbits);337return true;338}339340/**341* fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline342* crypto343* @bio: a bio which will eventually be submitted to the file344* @first_bh: the first buffer_head for which I/O will be submitted345* @gfp_mask: memory allocation flags346*347* Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead348* of an inode and block number directly.349*/350void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,351const struct buffer_head *first_bh,352gfp_t gfp_mask)353{354const struct inode *inode;355u64 first_lblk;356357if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))358fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);359}360EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);361362/**363* fscrypt_mergeable_bio() - test whether data can be added to a bio364* @bio: the bio being built up365* @inode: the inode for the next part of the I/O366* @next_lblk: the next file logical block number in the I/O367*368* When building a bio which may contain data which should undergo inline369* encryption (or decryption) via fscrypt, filesystems should call this function370* to ensure that the resulting bio contains only contiguous data unit numbers.371* This will return false if the next part of the I/O cannot be merged with the372* bio because either the encryption key would be different or the encryption373* data unit numbers would be discontiguous.374*375* fscrypt_set_bio_crypt_ctx() must have already been called on the bio.376*377* This function isn't required in cases where crypto-mergeability is ensured in378* another way, such as I/O targeting only a single file (and thus a single key)379* combined with fscrypt_limit_io_blocks() to ensure DUN contiguity.380*381* Return: true iff the I/O is mergeable382*/383bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,384u64 next_lblk)385{386const struct bio_crypt_ctx *bc = bio->bi_crypt_context;387u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];388389if (!!bc != fscrypt_inode_uses_inline_crypto(inode))390return false;391if (!bc)392return true;393394/*395* Comparing the key pointers is good enough, as all I/O for each key396* uses the same pointer. I.e., there's currently no need to support397* merging requests where the keys are the same but the pointers differ.398*/399if (bc->bc_key != inode->i_crypt_info->ci_enc_key.blk_key)400return false;401402fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);403return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);404}405EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);406407/**408* fscrypt_mergeable_bio_bh() - test whether data can be added to a bio409* @bio: the bio being built up410* @next_bh: the next buffer_head for which I/O will be submitted411*412* Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of413* an inode and block number directly.414*415* Return: true iff the I/O is mergeable416*/417bool fscrypt_mergeable_bio_bh(struct bio *bio,418const struct buffer_head *next_bh)419{420const struct inode *inode;421u64 next_lblk;422423if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))424return !bio->bi_crypt_context;425426return fscrypt_mergeable_bio(bio, inode, next_lblk);427}428EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);429430/**431* fscrypt_dio_supported() - check whether DIO (direct I/O) is supported on an432* inode, as far as encryption is concerned433* @inode: the inode in question434*435* Return: %true if there are no encryption constraints that prevent DIO from436* being supported; %false if DIO is unsupported. (Note that in the437* %true case, the filesystem might have other, non-encryption-related438* constraints that prevent DIO from actually being supported. Also, on439* encrypted files the filesystem is still responsible for only allowing440* DIO when requests are filesystem-block-aligned.)441*/442bool fscrypt_dio_supported(struct inode *inode)443{444int err;445446/* If the file is unencrypted, no veto from us. */447if (!fscrypt_needs_contents_encryption(inode))448return true;449450/*451* We only support DIO with inline crypto, not fs-layer crypto.452*453* To determine whether the inode is using inline crypto, we have to set454* up the key if it wasn't already done. This is because in the current455* design of fscrypt, the decision of whether to use inline crypto or456* not isn't made until the inode's encryption key is being set up. In457* the DIO read/write case, the key will always be set up already, since458* the file will be open. But in the case of statx(), the key might not459* be set up yet, as the file might not have been opened yet.460*/461err = fscrypt_require_key(inode);462if (err) {463/*464* Key unavailable or couldn't be set up. This edge case isn't465* worth worrying about; just report that DIO is unsupported.466*/467return false;468}469return fscrypt_inode_uses_inline_crypto(inode);470}471EXPORT_SYMBOL_GPL(fscrypt_dio_supported);472473/**474* fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs475* @inode: the file on which I/O is being done476* @lblk: the block at which the I/O is being started from477* @nr_blocks: the number of blocks we want to submit starting at @lblk478*479* Determine the limit to the number of blocks that can be submitted in a bio480* targeting @lblk without causing a data unit number (DUN) discontiguity.481*482* This is normally just @nr_blocks, as normally the DUNs just increment along483* with the logical blocks. (Or the file is not encrypted.)484*485* In rare cases, fscrypt can be using an IV generation method that allows the486* DUN to wrap around within logically contiguous blocks, and that wraparound487* will occur. If this happens, a value less than @nr_blocks will be returned488* so that the wraparound doesn't occur in the middle of a bio, which would489* cause encryption/decryption to produce wrong results.490*491* Return: the actual number of blocks that can be submitted492*/493u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)494{495const struct fscrypt_inode_info *ci;496u32 dun;497498if (!fscrypt_inode_uses_inline_crypto(inode))499return nr_blocks;500501if (nr_blocks <= 1)502return nr_blocks;503504ci = inode->i_crypt_info;505if (!(fscrypt_policy_flags(&ci->ci_policy) &506FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))507return nr_blocks;508509/* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */510511dun = ci->ci_hashed_ino + lblk;512513return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);514}515EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);516517518