From: Linus Torvalds Date: Mon, 21 Mar 2016 18:03:02 +0000 (-0700) Subject: Merge tag 'for-f2fs-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk... X-Git-Tag: v4.6-rc1~66 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d407574e7948210223a7adca5ff26e3b0ec8143e;p=platform%2Fkernel%2Flinux-exynos.git Merge tag 'for-f2fs-4.6' of git://git./linux/kernel/git/jaegeuk/f2fs Pull f2fs updates from Jaegeuk Kim: "New Features: - uplift filesystem encryption into fs/crypto/ - give sysfs entries to control memroy consumption Enhancements: - aio performance by preallocating blocks in ->write_iter - use writepages lock for only WB_SYNC_ALL - avoid redundant inline_data conversion - enhance forground GC - use wait_for_stable_page as possible - speed up SEEK_DATA and fiiemap Bug Fixes: - corner case in terms of -ENOSPC for inline_data - hung task caused by long latency in shrinker - corruption between atomic write and f2fs_trace_pid - avoid garbage lengths in dentries - revoke atomicly written pages if an error occurs In addition, there are various minor bug fixes and clean-ups" * tag 'for-f2fs-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (81 commits) f2fs: submit node page write bios when really required f2fs: add missing argument to f2fs_setxattr stub f2fs: fix to avoid unneeded unlock_new_inode f2fs: clean up opened code with f2fs_update_dentry f2fs: declare static functions f2fs: use cryptoapi crc32 functions f2fs: modify the readahead method in ra_node_page() f2fs crypto: sync ext4_lookup and ext4_file_open fs crypto: move per-file encryption from f2fs tree to fs/crypto f2fs: mutex can't be used by down_write_nest_lock() f2fs: recovery missing dot dentries in root directory f2fs: fix to avoid deadlock when merging inline data f2fs: introduce f2fs_flush_merged_bios for cleanup f2fs: introduce f2fs_update_data_blkaddr for cleanup f2fs crypto: fix incorrect positioning for GCing encrypted data page f2fs: fix incorrect upper bound when iterating inode mapping tree f2fs: avoid hungtask problem caused by losing wake_up f2fs: trace old block address for CoWed page f2fs: try to flush inode after merging inline data f2fs: show more info about superblock recovery ... --- d407574e7948210223a7adca5ff26e3b0ec8143e diff --cc fs/crypto/crypto.c index 0000000,d45c331..aed9ccc mode 000000,100644..100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@@ -1,0 -1,556 +1,555 @@@ + /* + * This contains encryption functions for per-file encryption. + * + * Copyright (C) 2015, Google, Inc. + * Copyright (C) 2015, Motorola Mobility + * + * Written by Michael Halcrow, 2014. + * + * Filename encryption additions + * Uday Savagaonkar, 2014 + * Encryption policy handling additions + * Ildar Muslukhov, 2014 + * Add fscrypt_pullback_bio_page() + * Jaegeuk Kim, 2015. + * + * This has not yet undergone a rigorous security audit. + * + * The usage of AES-XTS should conform to recommendations in NIST + * Special Publication 800-38E and IEEE P1619/D16. + */ + -#include -#include + #include + #include + #include + #include + #include + #include + #include + #include ++#include + + static unsigned int num_prealloc_crypto_pages = 32; + static unsigned int num_prealloc_crypto_ctxs = 128; + + module_param(num_prealloc_crypto_pages, uint, 0444); + MODULE_PARM_DESC(num_prealloc_crypto_pages, + "Number of crypto pages to preallocate"); + module_param(num_prealloc_crypto_ctxs, uint, 0444); + MODULE_PARM_DESC(num_prealloc_crypto_ctxs, + "Number of crypto contexts to preallocate"); + + static mempool_t *fscrypt_bounce_page_pool = NULL; + + static LIST_HEAD(fscrypt_free_ctxs); + static DEFINE_SPINLOCK(fscrypt_ctx_lock); + + static struct workqueue_struct *fscrypt_read_workqueue; + static DEFINE_MUTEX(fscrypt_init_mutex); + + static struct kmem_cache *fscrypt_ctx_cachep; + struct kmem_cache *fscrypt_info_cachep; + + /** + * fscrypt_release_ctx() - Releases an encryption context + * @ctx: The encryption context to release. + * + * If the encryption context was allocated from the pre-allocated pool, returns + * it to that pool. Else, frees it. + * + * If there's a bounce page in the context, this frees that. + */ + void fscrypt_release_ctx(struct fscrypt_ctx *ctx) + { + unsigned long flags; + + if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) { + mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); + ctx->w.bounce_page = NULL; + } + ctx->w.control_page = NULL; + if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { + kmem_cache_free(fscrypt_ctx_cachep, ctx); + } else { + spin_lock_irqsave(&fscrypt_ctx_lock, flags); + list_add(&ctx->free_list, &fscrypt_free_ctxs); + spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); + } + } + EXPORT_SYMBOL(fscrypt_release_ctx); + + /** + * fscrypt_get_ctx() - Gets an encryption context + * @inode: The inode for which we are doing the crypto + * + * Allocates and initializes an encryption context. + * + * Return: An allocated and initialized encryption context on success; error + * value or NULL otherwise. + */ + struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode) + { + struct fscrypt_ctx *ctx = NULL; + struct fscrypt_info *ci = inode->i_crypt_info; + unsigned long flags; + + if (ci == NULL) + return ERR_PTR(-ENOKEY); + + /* + * We first try getting the ctx from a free list because in + * the common case the ctx will have an allocated and + * initialized crypto tfm, so it's probably a worthwhile + * optimization. For the bounce page, we first try getting it + * from the kernel allocator because that's just about as fast + * as getting it from a list and because a cache of free pages + * should generally be a "last resort" option for a filesystem + * to be able to do its job. + */ + spin_lock_irqsave(&fscrypt_ctx_lock, flags); + ctx = list_first_entry_or_null(&fscrypt_free_ctxs, + struct fscrypt_ctx, free_list); + if (ctx) + list_del(&ctx->free_list); + spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); + if (!ctx) { + ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); + if (!ctx) + return ERR_PTR(-ENOMEM); + ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; + } else { + ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; + } + ctx->flags &= ~FS_WRITE_PATH_FL; + return ctx; + } + EXPORT_SYMBOL(fscrypt_get_ctx); + + /** + * fscrypt_complete() - The completion callback for page encryption + * @req: The asynchronous encryption request context + * @res: The result of the encryption operation + */ + static void fscrypt_complete(struct crypto_async_request *req, int res) + { + struct fscrypt_completion_result *ecr = req->data; + + if (res == -EINPROGRESS) + return; + ecr->res = res; + complete(&ecr->completion); + } + + typedef enum { + FS_DECRYPT = 0, + FS_ENCRYPT, + } fscrypt_direction_t; + + static int do_page_crypto(struct inode *inode, + fscrypt_direction_t rw, pgoff_t index, + struct page *src_page, struct page *dest_page) + { + u8 xts_tweak[FS_XTS_TWEAK_SIZE]; - struct ablkcipher_request *req = NULL; ++ struct skcipher_request *req = NULL; + DECLARE_FS_COMPLETION_RESULT(ecr); + struct scatterlist dst, src; + struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_ablkcipher *tfm = ci->ci_ctfm; ++ struct crypto_skcipher *tfm = ci->ci_ctfm; + int res = 0; + - req = ablkcipher_request_alloc(tfm, GFP_NOFS); ++ req = skcipher_request_alloc(tfm, GFP_NOFS); + if (!req) { + printk_ratelimited(KERN_ERR + "%s: crypto_request_alloc() failed\n", + __func__); + return -ENOMEM; + } + - ablkcipher_request_set_callback( ++ skcipher_request_set_callback( + req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + fscrypt_complete, &ecr); + + BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); + memcpy(xts_tweak, &inode->i_ino, sizeof(index)); + memset(&xts_tweak[sizeof(index)], 0, + FS_XTS_TWEAK_SIZE - sizeof(index)); + + sg_init_table(&dst, 1); + sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); + sg_init_table(&src, 1); + sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); - ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, ++ skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, + xts_tweak); + if (rw == FS_DECRYPT) - res = crypto_ablkcipher_decrypt(req); ++ res = crypto_skcipher_decrypt(req); + else - res = crypto_ablkcipher_encrypt(req); ++ res = crypto_skcipher_encrypt(req); + if (res == -EINPROGRESS || res == -EBUSY) { + BUG_ON(req->base.data != &ecr); + wait_for_completion(&ecr.completion); + res = ecr.res; + } - ablkcipher_request_free(req); ++ skcipher_request_free(req); + if (res) { + printk_ratelimited(KERN_ERR - "%s: crypto_ablkcipher_encrypt() returned %d\n", ++ "%s: crypto_skcipher_encrypt() returned %d\n", + __func__, res); + return res; + } + return 0; + } + + static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx) + { + ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, + GFP_NOWAIT); + if (ctx->w.bounce_page == NULL) + return ERR_PTR(-ENOMEM); + ctx->flags |= FS_WRITE_PATH_FL; + return ctx->w.bounce_page; + } + + /** + * fscypt_encrypt_page() - Encrypts a page + * @inode: The inode for which the encryption should take place + * @plaintext_page: The page to encrypt. Must be locked. + * + * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx + * encryption context. + * + * Called on the page write path. The caller must call + * fscrypt_restore_control_page() on the returned ciphertext page to + * release the bounce buffer and the encryption context. + * + * Return: An allocated page with the encrypted content on success. Else, an + * error value or NULL. + */ + struct page *fscrypt_encrypt_page(struct inode *inode, + struct page *plaintext_page) + { + struct fscrypt_ctx *ctx; + struct page *ciphertext_page = NULL; + int err; + + BUG_ON(!PageLocked(plaintext_page)); + + ctx = fscrypt_get_ctx(inode); + if (IS_ERR(ctx)) + return (struct page *)ctx; + + /* The encryption operation will require a bounce page. */ + ciphertext_page = alloc_bounce_page(ctx); + if (IS_ERR(ciphertext_page)) + goto errout; + + ctx->w.control_page = plaintext_page; + err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index, + plaintext_page, ciphertext_page); + if (err) { + ciphertext_page = ERR_PTR(err); + goto errout; + } + SetPagePrivate(ciphertext_page); + set_page_private(ciphertext_page, (unsigned long)ctx); + lock_page(ciphertext_page); + return ciphertext_page; + + errout: + fscrypt_release_ctx(ctx); + return ciphertext_page; + } + EXPORT_SYMBOL(fscrypt_encrypt_page); + + /** + * f2crypt_decrypt_page() - Decrypts a page in-place + * @page: The page to decrypt. Must be locked. + * + * Decrypts page in-place using the ctx encryption context. + * + * Called from the read completion callback. + * + * Return: Zero on success, non-zero otherwise. + */ + int fscrypt_decrypt_page(struct page *page) + { + BUG_ON(!PageLocked(page)); + + return do_page_crypto(page->mapping->host, + FS_DECRYPT, page->index, page, page); + } + EXPORT_SYMBOL(fscrypt_decrypt_page); + + int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, + sector_t pblk, unsigned int len) + { + struct fscrypt_ctx *ctx; + struct page *ciphertext_page = NULL; + struct bio *bio; + int ret, err = 0; + + BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); + + ctx = fscrypt_get_ctx(inode); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + ciphertext_page = alloc_bounce_page(ctx); + if (IS_ERR(ciphertext_page)) { + err = PTR_ERR(ciphertext_page); + goto errout; + } + + while (len--) { + err = do_page_crypto(inode, FS_ENCRYPT, lblk, + ZERO_PAGE(0), ciphertext_page); + if (err) + goto errout; + + bio = bio_alloc(GFP_KERNEL, 1); + if (!bio) { + err = -ENOMEM; + goto errout; + } + bio->bi_bdev = inode->i_sb->s_bdev; + bio->bi_iter.bi_sector = + pblk << (inode->i_sb->s_blocksize_bits - 9); + ret = bio_add_page(bio, ciphertext_page, + inode->i_sb->s_blocksize, 0); + if (ret != inode->i_sb->s_blocksize) { + /* should never happen! */ + WARN_ON(1); + bio_put(bio); + err = -EIO; + goto errout; + } + err = submit_bio_wait(WRITE, bio); + if ((err == 0) && bio->bi_error) + err = -EIO; + bio_put(bio); + if (err) + goto errout; + lblk++; + pblk++; + } + err = 0; + errout: + fscrypt_release_ctx(ctx); + return err; + } + EXPORT_SYMBOL(fscrypt_zeroout_range); + + /* + * Validate dentries for encrypted directories to make sure we aren't + * potentially caching stale data after a key has been added or + * removed. + */ + static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) + { + struct inode *dir = d_inode(dentry->d_parent); + struct fscrypt_info *ci = dir->i_crypt_info; + int dir_has_key, cached_with_key; + + if (!dir->i_sb->s_cop->is_encrypted(dir)) + return 0; + + if (ci && ci->ci_keyring_key && + (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED) | + (1 << KEY_FLAG_DEAD)))) + ci = NULL; + + /* this should eventually be an flag in d_flags */ + spin_lock(&dentry->d_lock); + cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; + spin_unlock(&dentry->d_lock); + dir_has_key = (ci != NULL); + + /* + * If the dentry was cached without the key, and it is a + * negative dentry, it might be a valid name. We can't check + * if the key has since been made available due to locking + * reasons, so we fail the validation so ext4_lookup() can do + * this check. + * + * We also fail the validation if the dentry was created with + * the key present, but we no longer have the key, or vice versa. + */ + if ((!cached_with_key && d_is_negative(dentry)) || + (!cached_with_key && dir_has_key) || + (cached_with_key && !dir_has_key)) + return 0; + return 1; + } + + const struct dentry_operations fscrypt_d_ops = { + .d_revalidate = fscrypt_d_revalidate, + }; + EXPORT_SYMBOL(fscrypt_d_ops); + + /* + * Call fscrypt_decrypt_page on every single page, reusing the encryption + * context. + */ + static void completion_pages(struct work_struct *work) + { + struct fscrypt_ctx *ctx = + container_of(work, struct fscrypt_ctx, r.work); + struct bio *bio = ctx->r.bio; + struct bio_vec *bv; + int i; + + bio_for_each_segment_all(bv, bio, i) { + struct page *page = bv->bv_page; + int ret = fscrypt_decrypt_page(page); + + if (ret) { + WARN_ON_ONCE(1); + SetPageError(page); + } else { + SetPageUptodate(page); + } + unlock_page(page); + } + fscrypt_release_ctx(ctx); + bio_put(bio); + } + + void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) + { + INIT_WORK(&ctx->r.work, completion_pages); + ctx->r.bio = bio; + queue_work(fscrypt_read_workqueue, &ctx->r.work); + } + EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); + + void fscrypt_pullback_bio_page(struct page **page, bool restore) + { + struct fscrypt_ctx *ctx; + struct page *bounce_page; + + /* The bounce data pages are unmapped. */ + if ((*page)->mapping) + return; + + /* The bounce data page is unmapped. */ + bounce_page = *page; + ctx = (struct fscrypt_ctx *)page_private(bounce_page); + + /* restore control page */ + *page = ctx->w.control_page; + + if (restore) + fscrypt_restore_control_page(bounce_page); + } + EXPORT_SYMBOL(fscrypt_pullback_bio_page); + + void fscrypt_restore_control_page(struct page *page) + { + struct fscrypt_ctx *ctx; + + ctx = (struct fscrypt_ctx *)page_private(page); + set_page_private(page, (unsigned long)NULL); + ClearPagePrivate(page); + unlock_page(page); + fscrypt_release_ctx(ctx); + } + EXPORT_SYMBOL(fscrypt_restore_control_page); + + static void fscrypt_destroy(void) + { + struct fscrypt_ctx *pos, *n; + + list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) + kmem_cache_free(fscrypt_ctx_cachep, pos); + INIT_LIST_HEAD(&fscrypt_free_ctxs); + mempool_destroy(fscrypt_bounce_page_pool); + fscrypt_bounce_page_pool = NULL; + } + + /** + * fscrypt_initialize() - allocate major buffers for fs encryption. + * + * We only call this when we start accessing encrypted files, since it + * results in memory getting allocated that wouldn't otherwise be used. + * + * Return: Zero on success, non-zero otherwise. + */ + int fscrypt_initialize(void) + { + int i, res = -ENOMEM; + + if (fscrypt_bounce_page_pool) + return 0; + + mutex_lock(&fscrypt_init_mutex); + if (fscrypt_bounce_page_pool) + goto already_initialized; + + for (i = 0; i < num_prealloc_crypto_ctxs; i++) { + struct fscrypt_ctx *ctx; + + ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); + if (!ctx) + goto fail; + list_add(&ctx->free_list, &fscrypt_free_ctxs); + } + + fscrypt_bounce_page_pool = + mempool_create_page_pool(num_prealloc_crypto_pages, 0); + if (!fscrypt_bounce_page_pool) + goto fail; + + already_initialized: + mutex_unlock(&fscrypt_init_mutex); + return 0; + fail: + fscrypt_destroy(); + mutex_unlock(&fscrypt_init_mutex); + return res; + } + EXPORT_SYMBOL(fscrypt_initialize); + + /** + * fscrypt_init() - Set up for fs encryption. + */ + static int __init fscrypt_init(void) + { + fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", + WQ_HIGHPRI, 0); + if (!fscrypt_read_workqueue) + goto fail; + + fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); + if (!fscrypt_ctx_cachep) + goto fail_free_queue; + + fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); + if (!fscrypt_info_cachep) + goto fail_free_ctx; + + return 0; + + fail_free_ctx: + kmem_cache_destroy(fscrypt_ctx_cachep); + fail_free_queue: + destroy_workqueue(fscrypt_read_workqueue); + fail: + return -ENOMEM; + } + module_init(fscrypt_init) + + /** + * fscrypt_exit() - Shutdown the fs encryption system + */ + static void __exit fscrypt_exit(void) + { + fscrypt_destroy(); + + if (fscrypt_read_workqueue) + destroy_workqueue(fscrypt_read_workqueue); + kmem_cache_destroy(fscrypt_ctx_cachep); + kmem_cache_destroy(fscrypt_info_cachep); + } + module_exit(fscrypt_exit); + + MODULE_LICENSE("GPL"); diff --cc fs/crypto/fname.c index 16aec66,5e4ddee..5d6d491 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@@ -15,30 -9,27 +9,24 @@@ * * This has not yet undergone a rigorous security audit. */ - #include + -#include -#include #include #include - #include - #include - #include - #include - #include - #include -#include #include - #include - #include #include + #include - #include "f2fs.h" - #include "f2fs_crypto.h" - #include "xattr.h" + static u32 size_round_up(size_t size, size_t blksize) + { + return ((size + blksize - 1) / blksize) * blksize; + } /** - * f2fs_dir_crypt_complete() - + * dir_crypt_complete() - */ - static void f2fs_dir_crypt_complete(struct crypto_async_request *req, int res) + static void dir_crypt_complete(struct crypto_async_request *req, int res) { - struct f2fs_completion_result *ecr = req->data; + struct fscrypt_completion_result *ecr = req->data; if (res == -EINPROGRESS) return; @@@ -64,21 -44,22 +41,22 @@@ * ciphertext. Errors are returned as negative numbers. We trust the caller to * allocate sufficient memory to oname string. */ - static int f2fs_fname_encrypt(struct inode *inode, - const struct qstr *iname, struct f2fs_str *oname) + static int fname_encrypt(struct inode *inode, + const struct qstr *iname, struct fscrypt_str *oname) { u32 ciphertext_len; - struct ablkcipher_request *req = NULL; + struct skcipher_request *req = NULL; - DECLARE_F2FS_COMPLETION_RESULT(ecr); - struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info; + DECLARE_FS_COMPLETION_RESULT(ecr); + struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_ablkcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; - char iv[F2FS_CRYPTO_BLOCK_SIZE]; + char iv[FS_CRYPTO_BLOCK_SIZE]; struct scatterlist src_sg, dst_sg; - int padding = 4 << (ci->ci_flags & F2FS_POLICY_FLAGS_PAD_MASK); + int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); char *workbuf, buf[32], *alloc_buf = NULL; - unsigned lim = max_name_len(inode); + unsigned lim; + lim = inode->i_sb->s_cop->max_namelen(inode); if (iname->len <= 0 || iname->len > lim) return -EIO; @@@ -104,9 -85,9 +82,9 @@@ kfree(alloc_buf); return -ENOMEM; } - ablkcipher_request_set_callback(req, + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - f2fs_dir_crypt_complete, &ecr); + dir_crypt_complete, &ecr); /* Copy the input */ memcpy(workbuf, iname->name, iname->len); @@@ -119,19 -100,18 +97,18 @@@ /* Create encryption request */ sg_init_one(&src_sg, workbuf, ciphertext_len); sg_init_one(&dst_sg, oname->name, ciphertext_len); - ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv); - res = crypto_ablkcipher_encrypt(req); + skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv); + res = crypto_skcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { - BUG_ON(req->base.data != &ecr); wait_for_completion(&ecr.completion); res = ecr.res; } kfree(alloc_buf); - ablkcipher_request_free(req); + skcipher_request_free(req); - if (res < 0) { + if (res < 0) printk_ratelimited(KERN_ERR "%s: Error (error code %d)\n", __func__, res); - } + oname->len = ciphertext_len; return res; } @@@ -143,18 -123,20 +120,20 @@@ * Errors are returned as negative numbers. * We trust the caller to allocate sufficient memory to oname string. */ - static int f2fs_fname_decrypt(struct inode *inode, - const struct f2fs_str *iname, struct f2fs_str *oname) + static int fname_decrypt(struct inode *inode, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) { - struct ablkcipher_request *req = NULL; + struct skcipher_request *req = NULL; - DECLARE_F2FS_COMPLETION_RESULT(ecr); + DECLARE_FS_COMPLETION_RESULT(ecr); struct scatterlist src_sg, dst_sg; - struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info; + struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_ablkcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; - char iv[F2FS_CRYPTO_BLOCK_SIZE]; - unsigned lim = max_name_len(inode); + char iv[FS_CRYPTO_BLOCK_SIZE]; + unsigned lim; + lim = inode->i_sb->s_cop->max_namelen(inode); if (iname->len <= 0 || iname->len > lim) return -EIO; @@@ -165,28 -147,26 +144,26 @@@ "%s: crypto_request_alloc() failed\n", __func__); return -ENOMEM; } - ablkcipher_request_set_callback(req, + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - f2fs_dir_crypt_complete, &ecr); + dir_crypt_complete, &ecr); /* Initialize IV */ - memset(iv, 0, F2FS_CRYPTO_BLOCK_SIZE); + memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); /* Create decryption request */ sg_init_one(&src_sg, iname->name, iname->len); sg_init_one(&dst_sg, oname->name, oname->len); - ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); - res = crypto_ablkcipher_decrypt(req); + skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv); + res = crypto_skcipher_decrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { - BUG_ON(req->base.data != &ecr); wait_for_completion(&ecr.completion); res = ecr.res; } - ablkcipher_request_free(req); + skcipher_request_free(req); if (res < 0) { printk_ratelimited(KERN_ERR - "%s: Error in f2fs_fname_decrypt (error code %d)\n", - __func__, res); + "%s: Error (error code %d)\n", __func__, res); return res; } diff --cc fs/crypto/keyinfo.c index 0000000,cb61842..06f5aa4 mode 000000,100644..100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@@ -1,0 -1,278 +1,272 @@@ + /* + * key management facility for FS encryption support. + * + * Copyright (C) 2015, Google, Inc. + * + * This contains encryption key functions. + * + * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015. + */ + + #include + #include + #include + #include + #include -#include + #include + + static void derive_crypt_complete(struct crypto_async_request *req, int rc) + { + struct fscrypt_completion_result *ecr = req->data; + + if (rc == -EINPROGRESS) + return; + + ecr->res = rc; + complete(&ecr->completion); + } + + /** + * derive_key_aes() - Derive a key using AES-128-ECB + * @deriving_key: Encryption key used for derivation. + * @source_key: Source key to which to apply derivation. + * @derived_key: Derived key. + * + * Return: Zero on success; non-zero otherwise. + */ + static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE], + u8 source_key[FS_AES_256_XTS_KEY_SIZE], + u8 derived_key[FS_AES_256_XTS_KEY_SIZE]) + { + int res = 0; - struct ablkcipher_request *req = NULL; ++ struct skcipher_request *req = NULL; + DECLARE_FS_COMPLETION_RESULT(ecr); + struct scatterlist src_sg, dst_sg; - struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0, - 0); ++ struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0); + + if (IS_ERR(tfm)) { + res = PTR_ERR(tfm); + tfm = NULL; + goto out; + } - crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); - req = ablkcipher_request_alloc(tfm, GFP_NOFS); ++ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY); ++ req = skcipher_request_alloc(tfm, GFP_NOFS); + if (!req) { + res = -ENOMEM; + goto out; + } - ablkcipher_request_set_callback(req, ++ skcipher_request_set_callback(req, + CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, + derive_crypt_complete, &ecr); - res = crypto_ablkcipher_setkey(tfm, deriving_key, ++ res = crypto_skcipher_setkey(tfm, deriving_key, + FS_AES_128_ECB_KEY_SIZE); + if (res < 0) + goto out; + + sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE); + sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE); - ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ++ skcipher_request_set_crypt(req, &src_sg, &dst_sg, + FS_AES_256_XTS_KEY_SIZE, NULL); - res = crypto_ablkcipher_encrypt(req); ++ res = crypto_skcipher_encrypt(req); + if (res == -EINPROGRESS || res == -EBUSY) { + wait_for_completion(&ecr.completion); + res = ecr.res; + } + out: - if (req) - ablkcipher_request_free(req); - if (tfm) - crypto_free_ablkcipher(tfm); ++ skcipher_request_free(req); ++ crypto_free_skcipher(tfm); + return res; + } + + static void put_crypt_info(struct fscrypt_info *ci) + { + if (!ci) + return; + - if (ci->ci_keyring_key) - key_put(ci->ci_keyring_key); - crypto_free_ablkcipher(ci->ci_ctfm); ++ key_put(ci->ci_keyring_key); ++ crypto_free_skcipher(ci->ci_ctfm); + kmem_cache_free(fscrypt_info_cachep, ci); + } + + int get_crypt_info(struct inode *inode) + { + struct fscrypt_info *crypt_info; + u8 full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE + + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1]; + struct key *keyring_key = NULL; + struct fscrypt_key *master_key; + struct fscrypt_context ctx; + const struct user_key_payload *ukp; - struct crypto_ablkcipher *ctfm; ++ struct crypto_skcipher *ctfm; + const char *cipher_str; + u8 raw_key[FS_MAX_KEY_SIZE]; + u8 mode; + int res; + + res = fscrypt_initialize(); + if (res) + return res; + + if (!inode->i_sb->s_cop->get_context) + return -EOPNOTSUPP; + retry: + crypt_info = ACCESS_ONCE(inode->i_crypt_info); + if (crypt_info) { + if (!crypt_info->ci_keyring_key || + key_validate(crypt_info->ci_keyring_key) == 0) + return 0; + fscrypt_put_encryption_info(inode, crypt_info); + goto retry; + } + + res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (res < 0) { + if (!fscrypt_dummy_context_enabled(inode)) + return res; + ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; + ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; + ctx.flags = 0; + } else if (res != sizeof(ctx)) { + return -EINVAL; + } + res = 0; + + crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS); + if (!crypt_info) + return -ENOMEM; + + crypt_info->ci_flags = ctx.flags; + crypt_info->ci_data_mode = ctx.contents_encryption_mode; + crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; + crypt_info->ci_ctfm = NULL; + crypt_info->ci_keyring_key = NULL; + memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, + sizeof(crypt_info->ci_master_key)); + if (S_ISREG(inode->i_mode)) + mode = crypt_info->ci_data_mode; + else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + mode = crypt_info->ci_filename_mode; + else + BUG(); + + switch (mode) { + case FS_ENCRYPTION_MODE_AES_256_XTS: + cipher_str = "xts(aes)"; + break; + case FS_ENCRYPTION_MODE_AES_256_CTS: + cipher_str = "cts(cbc(aes))"; + break; + default: + printk_once(KERN_WARNING + "%s: unsupported key mode %d (ino %u)\n", + __func__, mode, (unsigned) inode->i_ino); + res = -ENOKEY; + goto out; + } + if (fscrypt_dummy_context_enabled(inode)) { + memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); + goto got_key; + } + memcpy(full_key_descriptor, FS_KEY_DESC_PREFIX, + FS_KEY_DESC_PREFIX_SIZE); + sprintf(full_key_descriptor + FS_KEY_DESC_PREFIX_SIZE, + "%*phN", FS_KEY_DESCRIPTOR_SIZE, + ctx.master_key_descriptor); + full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE + + (2 * FS_KEY_DESCRIPTOR_SIZE)] = '\0'; + keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); + if (IS_ERR(keyring_key)) { + res = PTR_ERR(keyring_key); + keyring_key = NULL; + goto out; + } + crypt_info->ci_keyring_key = keyring_key; + if (keyring_key->type != &key_type_logon) { + printk_once(KERN_WARNING + "%s: key type must be logon\n", __func__); + res = -ENOKEY; + goto out; + } + down_read(&keyring_key->sem); + ukp = user_key_payload(keyring_key); + if (ukp->datalen != sizeof(struct fscrypt_key)) { + res = -EINVAL; + up_read(&keyring_key->sem); + goto out; + } + master_key = (struct fscrypt_key *)ukp->data; + BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE); + + if (master_key->size != FS_AES_256_XTS_KEY_SIZE) { + printk_once(KERN_WARNING + "%s: key size incorrect: %d\n", + __func__, master_key->size); + res = -ENOKEY; + up_read(&keyring_key->sem); + goto out; + } + res = derive_key_aes(ctx.nonce, master_key->raw, raw_key); + up_read(&keyring_key->sem); + if (res) + goto out; + got_key: - ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0); ++ ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); + if (!ctfm || IS_ERR(ctfm)) { + res = ctfm ? PTR_ERR(ctfm) : -ENOMEM; + printk(KERN_DEBUG + "%s: error %d (inode %u) allocating crypto tfm\n", + __func__, res, (unsigned) inode->i_ino); + goto out; + } + crypt_info->ci_ctfm = ctfm; - crypto_ablkcipher_clear_flags(ctfm, ~0); - crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm), - CRYPTO_TFM_REQ_WEAK_KEY); - res = crypto_ablkcipher_setkey(ctfm, raw_key, fscrypt_key_size(mode)); ++ crypto_skcipher_clear_flags(ctfm, ~0); ++ crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY); ++ res = crypto_skcipher_setkey(ctfm, raw_key, fscrypt_key_size(mode)); + if (res) + goto out; + + memzero_explicit(raw_key, sizeof(raw_key)); + if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { + put_crypt_info(crypt_info); + goto retry; + } + return 0; + + out: + if (res == -ENOKEY) + res = 0; + put_crypt_info(crypt_info); + memzero_explicit(raw_key, sizeof(raw_key)); + return res; + } + + void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) + { + struct fscrypt_info *prev; + + if (ci == NULL) + ci = ACCESS_ONCE(inode->i_crypt_info); + if (ci == NULL) + return; + + prev = cmpxchg(&inode->i_crypt_info, ci, NULL); + if (prev != ci) + return; + + put_crypt_info(ci); + } + EXPORT_SYMBOL(fscrypt_put_encryption_info); + + int fscrypt_get_encryption_info(struct inode *inode) + { + struct fscrypt_info *ci = inode->i_crypt_info; + + if (!ci || + (ci->ci_keyring_key && + (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED) | + (1 << KEY_FLAG_DEAD))))) + return get_crypt_info(inode); + return 0; + } + EXPORT_SYMBOL(fscrypt_get_encryption_info); diff --cc include/linux/fscrypto.h index 0000000,895cdac..cd91f75 mode 000000,100644..100644 --- a/include/linux/fscrypto.h +++ b/include/linux/fscrypto.h @@@ -1,0 -1,433 +1,434 @@@ + /* + * General per-file encryption definition + * + * Copyright (C) 2015, Google, Inc. + * + * Written by Michael Halcrow, 2015. + * Modified by Jaegeuk Kim, 2015. + */ + + #ifndef _LINUX_FSCRYPTO_H + #define _LINUX_FSCRYPTO_H + + #include + #include + #include + #include + #include ++#include + #include + + #define FS_KEY_DERIVATION_NONCE_SIZE 16 + #define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1 + + #define FS_POLICY_FLAGS_PAD_4 0x00 + #define FS_POLICY_FLAGS_PAD_8 0x01 + #define FS_POLICY_FLAGS_PAD_16 0x02 + #define FS_POLICY_FLAGS_PAD_32 0x03 + #define FS_POLICY_FLAGS_PAD_MASK 0x03 + #define FS_POLICY_FLAGS_VALID 0x03 + + /* Encryption algorithms */ + #define FS_ENCRYPTION_MODE_INVALID 0 + #define FS_ENCRYPTION_MODE_AES_256_XTS 1 + #define FS_ENCRYPTION_MODE_AES_256_GCM 2 + #define FS_ENCRYPTION_MODE_AES_256_CBC 3 + #define FS_ENCRYPTION_MODE_AES_256_CTS 4 + + /** + * Encryption context for inode + * + * Protector format: + * 1 byte: Protector format (1 = this version) + * 1 byte: File contents encryption mode + * 1 byte: File names encryption mode + * 1 byte: Flags + * 8 bytes: Master Key descriptor + * 16 bytes: Encryption Key derivation nonce + */ + struct fscrypt_context { + u8 format; + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + } __packed; + + /* Encryption parameters */ + #define FS_XTS_TWEAK_SIZE 16 + #define FS_AES_128_ECB_KEY_SIZE 16 + #define FS_AES_256_GCM_KEY_SIZE 32 + #define FS_AES_256_CBC_KEY_SIZE 32 + #define FS_AES_256_CTS_KEY_SIZE 32 + #define FS_AES_256_XTS_KEY_SIZE 64 + #define FS_MAX_KEY_SIZE 64 + + #define FS_KEY_DESC_PREFIX "fscrypt:" + #define FS_KEY_DESC_PREFIX_SIZE 8 + + /* This is passed in from userspace into the kernel keyring */ + struct fscrypt_key { + u32 mode; + u8 raw[FS_MAX_KEY_SIZE]; + u32 size; + } __packed; + + struct fscrypt_info { + u8 ci_data_mode; + u8 ci_filename_mode; + u8 ci_flags; - struct crypto_ablkcipher *ci_ctfm; ++ struct crypto_skcipher *ci_ctfm; + struct key *ci_keyring_key; + u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; + }; + + #define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001 + #define FS_WRITE_PATH_FL 0x00000002 + + struct fscrypt_ctx { + union { + struct { + struct page *bounce_page; /* Ciphertext page */ + struct page *control_page; /* Original page */ + } w; + struct { + struct bio *bio; + struct work_struct work; + } r; + struct list_head free_list; /* Free list */ + }; + u8 flags; /* Flags */ + u8 mode; /* Encryption mode for tfm */ + }; + + struct fscrypt_completion_result { + struct completion completion; + int res; + }; + + #define DECLARE_FS_COMPLETION_RESULT(ecr) \ + struct fscrypt_completion_result ecr = { \ + COMPLETION_INITIALIZER((ecr).completion), 0 } + + static inline int fscrypt_key_size(int mode) + { + switch (mode) { + case FS_ENCRYPTION_MODE_AES_256_XTS: + return FS_AES_256_XTS_KEY_SIZE; + case FS_ENCRYPTION_MODE_AES_256_GCM: + return FS_AES_256_GCM_KEY_SIZE; + case FS_ENCRYPTION_MODE_AES_256_CBC: + return FS_AES_256_CBC_KEY_SIZE; + case FS_ENCRYPTION_MODE_AES_256_CTS: + return FS_AES_256_CTS_KEY_SIZE; + default: + BUG(); + } + return 0; + } + + #define FS_FNAME_NUM_SCATTER_ENTRIES 4 + #define FS_CRYPTO_BLOCK_SIZE 16 + #define FS_FNAME_CRYPTO_DIGEST_SIZE 32 + + /** + * For encrypted symlinks, the ciphertext length is stored at the beginning + * of the string in little-endian format. + */ + struct fscrypt_symlink_data { + __le16 len; + char encrypted_path[1]; + } __packed; + + /** + * This function is used to calculate the disk space required to + * store a filename of length l in encrypted symlink format. + */ + static inline u32 fscrypt_symlink_data_len(u32 l) + { + if (l < FS_CRYPTO_BLOCK_SIZE) + l = FS_CRYPTO_BLOCK_SIZE; + return (l + sizeof(struct fscrypt_symlink_data) - 1); + } + + struct fscrypt_str { + unsigned char *name; + u32 len; + }; + + struct fscrypt_name { + const struct qstr *usr_fname; + struct fscrypt_str disk_name; + u32 hash; + u32 minor_hash; + struct fscrypt_str crypto_buf; + }; + + #define FSTR_INIT(n, l) { .name = n, .len = l } + #define FSTR_TO_QSTR(f) QSTR_INIT((f)->name, (f)->len) + #define fname_name(p) ((p)->disk_name.name) + #define fname_len(p) ((p)->disk_name.len) + + /* + * crypto opertions for filesystems + */ + struct fscrypt_operations { + int (*get_context)(struct inode *, void *, size_t); + int (*prepare_context)(struct inode *); + int (*set_context)(struct inode *, const void *, size_t, void *); + int (*dummy_context)(struct inode *); + bool (*is_encrypted)(struct inode *); + bool (*empty_dir)(struct inode *); + unsigned (*max_namelen)(struct inode *); + }; + + static inline bool fscrypt_dummy_context_enabled(struct inode *inode) + { + if (inode->i_sb->s_cop->dummy_context && + inode->i_sb->s_cop->dummy_context(inode)) + return true; + return false; + } + + static inline bool fscrypt_valid_contents_enc_mode(u32 mode) + { + return (mode == FS_ENCRYPTION_MODE_AES_256_XTS); + } + + static inline bool fscrypt_valid_filenames_enc_mode(u32 mode) + { + return (mode == FS_ENCRYPTION_MODE_AES_256_CTS); + } + + static inline u32 fscrypt_validate_encryption_key_size(u32 mode, u32 size) + { + if (size == fscrypt_key_size(mode)) + return size; + return 0; + } + + static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) + { + if (str->len == 1 && str->name[0] == '.') + return true; + + if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') + return true; + + return false; + } + + static inline struct page *fscrypt_control_page(struct page *page) + { + #if IS_ENABLED(CONFIG_FS_ENCRYPTION) + return ((struct fscrypt_ctx *)page_private(page))->w.control_page; + #else + WARN_ON_ONCE(1); + return ERR_PTR(-EINVAL); + #endif + } + + static inline int fscrypt_has_encryption_key(struct inode *inode) + { + #if IS_ENABLED(CONFIG_FS_ENCRYPTION) + return (inode->i_crypt_info != NULL); + #else + return 0; + #endif + } + + static inline void fscrypt_set_encrypted_dentry(struct dentry *dentry) + { + #if IS_ENABLED(CONFIG_FS_ENCRYPTION) + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_ENCRYPTED_WITH_KEY; + spin_unlock(&dentry->d_lock); + #endif + } + + #if IS_ENABLED(CONFIG_FS_ENCRYPTION) + extern const struct dentry_operations fscrypt_d_ops; + #endif + + static inline void fscrypt_set_d_op(struct dentry *dentry) + { + #if IS_ENABLED(CONFIG_FS_ENCRYPTION) + d_set_d_op(dentry, &fscrypt_d_ops); + #endif + } + + #if IS_ENABLED(CONFIG_FS_ENCRYPTION) + /* crypto.c */ + extern struct kmem_cache *fscrypt_info_cachep; + int fscrypt_initialize(void); + + extern struct fscrypt_ctx *fscrypt_get_ctx(struct inode *); + extern void fscrypt_release_ctx(struct fscrypt_ctx *); + extern struct page *fscrypt_encrypt_page(struct inode *, struct page *); + extern int fscrypt_decrypt_page(struct page *); + extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *); + extern void fscrypt_pullback_bio_page(struct page **, bool); + extern void fscrypt_restore_control_page(struct page *); + extern int fscrypt_zeroout_range(struct inode *, pgoff_t, sector_t, + unsigned int); + /* policy.c */ + extern int fscrypt_process_policy(struct inode *, + const struct fscrypt_policy *); + extern int fscrypt_get_policy(struct inode *, struct fscrypt_policy *); + extern int fscrypt_has_permitted_context(struct inode *, struct inode *); + extern int fscrypt_inherit_context(struct inode *, struct inode *, + void *, bool); + /* keyinfo.c */ + extern int get_crypt_info(struct inode *); + extern int fscrypt_get_encryption_info(struct inode *); + extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); + + /* fname.c */ + extern int fscrypt_setup_filename(struct inode *, const struct qstr *, + int lookup, struct fscrypt_name *); + extern void fscrypt_free_filename(struct fscrypt_name *); + extern u32 fscrypt_fname_encrypted_size(struct inode *, u32); + extern int fscrypt_fname_alloc_buffer(struct inode *, u32, + struct fscrypt_str *); + extern void fscrypt_fname_free_buffer(struct fscrypt_str *); + extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, + const struct fscrypt_str *, struct fscrypt_str *); + extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, + struct fscrypt_str *); + #endif + + /* crypto.c */ + static inline struct fscrypt_ctx *fscrypt_notsupp_get_ctx(struct inode *i) + { + return ERR_PTR(-EOPNOTSUPP); + } + + static inline void fscrypt_notsupp_release_ctx(struct fscrypt_ctx *c) + { + return; + } + + static inline struct page *fscrypt_notsupp_encrypt_page(struct inode *i, + struct page *p) + { + return ERR_PTR(-EOPNOTSUPP); + } + + static inline int fscrypt_notsupp_decrypt_page(struct page *p) + { + return -EOPNOTSUPP; + } + + static inline void fscrypt_notsupp_decrypt_bio_pages(struct fscrypt_ctx *c, + struct bio *b) + { + return; + } + + static inline void fscrypt_notsupp_pullback_bio_page(struct page **p, bool b) + { + return; + } + + static inline void fscrypt_notsupp_restore_control_page(struct page *p) + { + return; + } + + static inline int fscrypt_notsupp_zeroout_range(struct inode *i, pgoff_t p, + sector_t s, unsigned int f) + { + return -EOPNOTSUPP; + } + + /* policy.c */ + static inline int fscrypt_notsupp_process_policy(struct inode *i, + const struct fscrypt_policy *p) + { + return -EOPNOTSUPP; + } + + static inline int fscrypt_notsupp_get_policy(struct inode *i, + struct fscrypt_policy *p) + { + return -EOPNOTSUPP; + } + + static inline int fscrypt_notsupp_has_permitted_context(struct inode *p, + struct inode *i) + { + return 0; + } + + static inline int fscrypt_notsupp_inherit_context(struct inode *p, + struct inode *i, void *v, bool b) + { + return -EOPNOTSUPP; + } + + /* keyinfo.c */ + static inline int fscrypt_notsupp_get_encryption_info(struct inode *i) + { + return -EOPNOTSUPP; + } + + static inline void fscrypt_notsupp_put_encryption_info(struct inode *i, + struct fscrypt_info *f) + { + return; + } + + /* fname.c */ + static inline int fscrypt_notsupp_setup_filename(struct inode *dir, + const struct qstr *iname, + int lookup, struct fscrypt_name *fname) + { + if (dir->i_sb->s_cop->is_encrypted(dir)) + return -EOPNOTSUPP; + + memset(fname, 0, sizeof(struct fscrypt_name)); + fname->usr_fname = iname; + fname->disk_name.name = (unsigned char *)iname->name; + fname->disk_name.len = iname->len; + return 0; + } + + static inline void fscrypt_notsupp_free_filename(struct fscrypt_name *fname) + { + return; + } + + static inline u32 fscrypt_notsupp_fname_encrypted_size(struct inode *i, u32 s) + { + /* never happens */ + WARN_ON(1); + return 0; + } + + static inline int fscrypt_notsupp_fname_alloc_buffer(struct inode *inode, + u32 ilen, struct fscrypt_str *crypto_str) + { + return -EOPNOTSUPP; + } + + static inline void fscrypt_notsupp_fname_free_buffer(struct fscrypt_str *c) + { + return; + } + + static inline int fscrypt_notsupp_fname_disk_to_usr(struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname) + { + return -EOPNOTSUPP; + } + + static inline int fscrypt_notsupp_fname_usr_to_disk(struct inode *inode, + const struct qstr *iname, + struct fscrypt_str *oname) + { + return -EOPNOTSUPP; + } + #endif /* _LINUX_FSCRYPTO_H */