1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Asynchronous Compression operations
5 * Copyright (c) 2016, Intel Corporation
6 * Authors: Weigang Li <weigang.li@intel.com>
7 * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
9 #ifndef _CRYPTO_ACOMP_H
10 #define _CRYPTO_ACOMP_H
12 #include <linux/atomic.h>
13 #include <linux/container_of.h>
14 #include <linux/crypto.h>
16 #define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
17 #define CRYPTO_ACOMP_DST_MAX 131072
20 * struct acomp_req - asynchronous (de)compression request
22 * @base: Common attributes for asynchronous crypto requests
24 * @dst: Destination data
25 * @slen: Size of the input buffer
26 * @dlen: Size of the output buffer and number of bytes produced
27 * @flags: Internal flags
28 * @__ctx: Start of private context data
31 struct crypto_async_request base;
32 struct scatterlist *src;
33 struct scatterlist *dst;
37 void *__ctx[] CRYPTO_MINALIGN_ATTR;
41 * struct crypto_acomp - user-instantiated objects which encapsulate
42 * algorithms and core processing logic
44 * @compress: Function performs a compress operation
45 * @decompress: Function performs a de-compress operation
46 * @dst_free: Frees destination buffer if allocated inside the
48 * @reqsize: Context size for (de)compression requests
49 * @base: Common crypto API algorithm data structure
52 int (*compress)(struct acomp_req *req);
53 int (*decompress)(struct acomp_req *req);
54 void (*dst_free)(struct scatterlist *dst);
56 struct crypto_tfm base;
60 * struct crypto_istat_compress - statistics for compress algorithm
61 * @compress_cnt: number of compress requests
62 * @compress_tlen: total data size handled by compress requests
63 * @decompress_cnt: number of decompress requests
64 * @decompress_tlen: total data size handled by decompress requests
65 * @err_cnt: number of error for compress requests
67 struct crypto_istat_compress {
68 atomic64_t compress_cnt;
69 atomic64_t compress_tlen;
70 atomic64_t decompress_cnt;
71 atomic64_t decompress_tlen;
75 #ifdef CONFIG_CRYPTO_STATS
76 #define COMP_ALG_COMMON_STATS struct crypto_istat_compress stat;
78 #define COMP_ALG_COMMON_STATS
81 #define COMP_ALG_COMMON { \
82 COMP_ALG_COMMON_STATS \
84 struct crypto_alg base; \
86 struct comp_alg_common COMP_ALG_COMMON;
89 * DOC: Asynchronous Compression API
91 * The Asynchronous Compression API is used with the algorithms of type
92 * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
96 * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
97 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
98 * compression algorithm e.g. "deflate"
99 * @type: specifies the type of the algorithm
100 * @mask: specifies the mask for the algorithm
102 * Allocate a handle for a compression algorithm. The returned struct
103 * crypto_acomp is the handle that is required for any subsequent
104 * API invocation for the compression operations.
106 * Return: allocated handle in case of success; IS_ERR() is true in case
107 * of an error, PTR_ERR() returns the error code.
109 struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
112 * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
113 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
114 * compression algorithm e.g. "deflate"
115 * @type: specifies the type of the algorithm
116 * @mask: specifies the mask for the algorithm
117 * @node: specifies the NUMA node the ZIP hardware belongs to
119 * Allocate a handle for a compression algorithm. Drivers should try to use
120 * (de)compressors on the specified NUMA node.
121 * The returned struct crypto_acomp is the handle that is required for any
122 * subsequent API invocation for the compression operations.
124 * Return: allocated handle in case of success; IS_ERR() is true in case
125 * of an error, PTR_ERR() returns the error code.
127 struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
130 static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
135 static inline struct comp_alg_common *__crypto_comp_alg_common(
136 struct crypto_alg *alg)
138 return container_of(alg, struct comp_alg_common, base);
141 static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
143 return container_of(tfm, struct crypto_acomp, base);
146 static inline struct comp_alg_common *crypto_comp_alg_common(
147 struct crypto_acomp *tfm)
149 return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg);
152 static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
157 static inline void acomp_request_set_tfm(struct acomp_req *req,
158 struct crypto_acomp *tfm)
160 req->base.tfm = crypto_acomp_tfm(tfm);
163 static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
165 return __crypto_acomp_tfm(req->base.tfm);
169 * crypto_free_acomp() -- free ACOMPRESS tfm handle
171 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
173 * If @tfm is a NULL or error pointer, this function does nothing.
175 static inline void crypto_free_acomp(struct crypto_acomp *tfm)
177 crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
180 static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
182 type &= ~CRYPTO_ALG_TYPE_MASK;
183 type |= CRYPTO_ALG_TYPE_ACOMPRESS;
184 mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
186 return crypto_has_alg(alg_name, type, mask);
190 * acomp_request_alloc() -- allocates asynchronous (de)compression request
192 * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
194 * Return: allocated handle in case of success or NULL in case of an error
196 struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
199 * acomp_request_free() -- zeroize and free asynchronous (de)compression
200 * request as well as the output buffer if allocated
201 * inside the algorithm
203 * @req: request to free
205 void acomp_request_free(struct acomp_req *req);
208 * acomp_request_set_callback() -- Sets an asynchronous callback
210 * Callback will be called when an asynchronous operation on a given
211 * request is finished.
213 * @req: request that the callback will be set for
214 * @flgs: specify for instance if the operation may backlog
215 * @cmlp: callback which will be called
216 * @data: private data used by the caller
218 static inline void acomp_request_set_callback(struct acomp_req *req,
220 crypto_completion_t cmpl,
223 req->base.complete = cmpl;
224 req->base.data = data;
225 req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT;
226 req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT;
230 * acomp_request_set_params() -- Sets request parameters
232 * Sets parameters required by an acomp operation
234 * @req: asynchronous compress request
235 * @src: pointer to input buffer scatterlist
236 * @dst: pointer to output buffer scatterlist. If this is NULL, the
237 * acomp layer will allocate the output memory
238 * @slen: size of the input buffer
239 * @dlen: size of the output buffer. If dst is NULL, this can be used by
240 * the user to specify the maximum amount of memory to allocate
242 static inline void acomp_request_set_params(struct acomp_req *req,
243 struct scatterlist *src,
244 struct scatterlist *dst,
253 req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
255 req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
258 static inline struct crypto_istat_compress *comp_get_stat(
259 struct comp_alg_common *alg)
261 #ifdef CONFIG_CRYPTO_STATS
268 static inline int crypto_comp_errstat(struct comp_alg_common *alg, int err)
270 if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
273 if (err && err != -EINPROGRESS && err != -EBUSY)
274 atomic64_inc(&comp_get_stat(alg)->err_cnt);
280 * crypto_acomp_compress() -- Invoke asynchronous compress operation
282 * Function invokes the asynchronous compress operation
284 * @req: asynchronous compress request
286 * Return: zero on success; error code in case of error
288 static inline int crypto_acomp_compress(struct acomp_req *req)
290 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
291 struct comp_alg_common *alg;
293 alg = crypto_comp_alg_common(tfm);
295 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
296 struct crypto_istat_compress *istat = comp_get_stat(alg);
298 atomic64_inc(&istat->compress_cnt);
299 atomic64_add(req->slen, &istat->compress_tlen);
302 return crypto_comp_errstat(alg, tfm->compress(req));
306 * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
308 * Function invokes the asynchronous decompress operation
310 * @req: asynchronous compress request
312 * Return: zero on success; error code in case of error
314 static inline int crypto_acomp_decompress(struct acomp_req *req)
316 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
317 struct comp_alg_common *alg;
319 alg = crypto_comp_alg_common(tfm);
321 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
322 struct crypto_istat_compress *istat = comp_get_stat(alg);
324 atomic64_inc(&istat->decompress_cnt);
325 atomic64_add(req->slen, &istat->decompress_tlen);
328 return crypto_comp_errstat(alg, tfm->decompress(req));