#include <crypto/scatterwalk.h>
#include <linux/bug.h>
#include <linux/cryptouser.h>
-#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/rtnetlink.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
return max(start, end_page);
}
+static inline struct skcipher_alg *__crypto_skcipher_alg(
+ struct crypto_alg *alg)
+{
+ return container_of(alg, struct skcipher_alg, base);
+}
+
+static inline struct crypto_istat_cipher *skcipher_get_stat(
+ struct skcipher_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ return &alg->stat;
+#else
+ return NULL;
+#endif
+}
+
+static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
+{
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+ return err;
+
+ if (err && err != -EINPROGRESS && err != -EBUSY)
+ atomic64_inc(&istat->err_cnt);
+
+ return err;
+}
+
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
- crypto_stats_get(alg);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ atomic64_inc(&istat->encrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->encrypt_tlen);
+ }
+
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_skcipher_alg(tfm)->encrypt(req);
- crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
- return ret;
+ ret = alg->encrypt(req);
+
+ return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_alg *alg = tfm->base.__crt_alg;
- unsigned int cryptlen = req->cryptlen;
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
int ret;
- crypto_stats_get(alg);
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
+
+ atomic64_inc(&istat->decrypt_cnt);
+ atomic64_add(req->cryptlen, &istat->decrypt_tlen);
+ }
+
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
- ret = crypto_skcipher_alg(tfm)->decrypt(req);
- crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
- return ret;
+ ret = alg->decrypt(req);
+
+ return crypto_skcipher_errstat(alg, ret);
}
EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
__maybe_unused;
static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
{
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
seq_printf(m, "type : skcipher\n");
seq_printf(m, "async : %s\n",
#ifdef CONFIG_NET
static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
struct crypto_report_blkcipher rblkcipher;
- struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
- base);
memset(&rblkcipher, 0, sizeof(rblkcipher));
}
#endif
+static int __maybe_unused crypto_skcipher_report_stat(
+ struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
+ struct crypto_istat_cipher *istat;
+ struct crypto_stat_cipher rcipher;
+
+ istat = skcipher_get_stat(skcipher);
+
+ memset(&rcipher, 0, sizeof(rcipher));
+
+ strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+ rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+ rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+ rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+ rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
+}
+
static const struct crypto_type crypto_skcipher_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_skcipher_init_tfm,
.show = crypto_skcipher_show,
#endif
.report = crypto_skcipher_report,
+#ifdef CONFIG_CRYPTO_STATS
+ .report_stat = crypto_skcipher_report_stat,
+#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SKCIPHER,
static int skcipher_prepare_alg(struct skcipher_alg *alg)
{
+ struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
+ if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+ memset(istat, 0, sizeof(*istat));
+
return 0;
}
#ifndef _CRYPTO_SKCIPHER_H
#define _CRYPTO_SKCIPHER_H
+#include <linux/atomic.h>
#include <linux/container_of.h>
#include <linux/crypto.h>
#include <linux/slab.h>
struct crypto_skcipher base;
};
+/*
+ * struct crypto_istat_cipher - statistics for cipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for cipher requests
+ */
+struct crypto_istat_cipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
/**
* struct skcipher_alg - symmetric key cipher definition
* @min_keysize: Minimum key size supported by the transformation. This is the
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
+ * @stat: Statistics for cipher algorithm
* @base: Definition of a generic crypto algorithm.
*
* All fields except @ivsize are mandatory and must be filled.
unsigned int chunksize;
unsigned int walksize;
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_istat_cipher stat;
+#endif
+
struct crypto_alg base;
};
#ifdef CONFIG_CRYPTO_STATS
/*
- * struct crypto_istat_cipher - statistics for cipher algorithm
- * @encrypt_cnt: number of encrypt requests
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @err_cnt: number of error for cipher requests
- */
-struct crypto_istat_cipher {
- atomic64_t encrypt_cnt;
- atomic64_t encrypt_tlen;
- atomic64_t decrypt_cnt;
- atomic64_t decrypt_tlen;
- atomic64_t err_cnt;
-};
-
-/*
* struct crypto_istat_rng: statistics for RNG algorithm
* @generate_cnt: number of RNG generate requests
* @generate_tlen: total data size of generated data by the RNG
* @cra_destroy: internally used
*
* @stats: union of all possible crypto_istat_xxx structures
- * @stats.cipher: statistics for cipher algorithm
* @stats.rng: statistics for rng algorithm
*
* The struct crypto_alg describes a generic Crypto API algorithm and is common
#ifdef CONFIG_CRYPTO_STATS
union {
- struct crypto_istat_cipher cipher;
struct crypto_istat_rng rng;
} stats;
#endif /* CONFIG_CRYPTO_STATS */
void crypto_stats_get(struct crypto_alg *alg);
void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
-void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
-void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
#else
static inline void crypto_stats_init(struct crypto_alg *alg)
{}
{}
static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
{}
-static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
-{}
#endif
/*
* A helper struct for waiting for completion of async crypto ops