@@ -73,7 +73,6 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
- acomp->dst_free = alg->dst_free;
acomp->reqsize = alg->reqsize;
if (alg->exit)
@@ -146,11 +145,6 @@ void acomp_request_free(struct acomp_req *req)
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
crypto_acomp_scomp_free_ctx(req);
- if (req->base.flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
- acomp->dst_free(req->dst);
- req->dst = NULL;
- }
-
__acomp_request_free(req);
}
EXPORT_SYMBOL_GPL(acomp_request_free);
@@ -18,15 +18,18 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/vmalloc.h>
#include <net/netlink.h>
#include "compress.h"
+#define SCOMP_SCRATCH_SIZE PAGE_SIZE
+
struct scomp_scratch {
spinlock_t lock;
- void *src;
- void *dst;
+ union {
+ void *src;
+ unsigned long saddr;
+ };
};
static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
@@ -66,10 +69,8 @@ static void crypto_scomp_free_scratches(void)
for_each_possible_cpu(i) {
scratch = per_cpu_ptr(&scomp_scratch, i);
- vfree(scratch->src);
- vfree(scratch->dst);
+ free_page(scratch->saddr);
scratch->src = NULL;
- scratch->dst = NULL;
}
}
@@ -79,18 +80,14 @@ static int crypto_scomp_alloc_scratches(void)
int i;
for_each_possible_cpu(i) {
- void *mem;
+ unsigned long mem;
scratch = per_cpu_ptr(&scomp_scratch, i);
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+ mem = __get_free_page(GFP_KERNEL);
if (!mem)
goto error;
- scratch->src = mem;
- mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
- if (!mem)
- goto error;
- scratch->dst = mem;
+ scratch->saddr = mem;
}
return 0;
error:
@@ -113,72 +110,58 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- void **tfm_ctx = acomp_tfm_ctx(tfm);
+ struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
void **ctx = acomp_request_ctx(req);
struct scomp_scratch *scratch;
+ unsigned int slen = req->slen;
+ unsigned int dlen = req->dlen;
void *src, *dst;
- unsigned int dlen;
int ret;
- if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
+ if (!req->src || !slen)
return -EINVAL;
- if (req->dst && !req->dlen)
+ if (req->dst && !dlen)
return -EINVAL;
- if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
- req->dlen = SCOMP_SCRATCH_SIZE;
+ if (sg_nents(req->dst) > 1)
+ return -ENOSYS;
- dlen = req->dlen;
+ if (req->dst->offset >= PAGE_SIZE)
+ return -ENOSYS;
+
+ if (req->dst->offset + dlen > PAGE_SIZE)
+ dlen = PAGE_SIZE - req->dst->offset;
+
+ if (sg_nents(req->src) == 1 && (!PageHighMem(sg_page(req->src)) ||
+ req->src->offset + slen <= PAGE_SIZE))
+ src = kmap_local_page(sg_page(req->src)) + req->src->offset;
+ else
+ src = scratch->src;
+
+ dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset;
scratch = raw_cpu_ptr(&scomp_scratch);
spin_lock(&scratch->lock);
- if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
- src = page_to_virt(sg_page(req->src)) + req->src->offset;
- } else {
- scatterwalk_map_and_copy(scratch->src, req->src, 0,
- req->slen, 0);
- src = scratch->src;
- }
-
- if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
- dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
- else
- dst = scratch->dst;
+ if (src == scratch->src)
+ memcpy_from_sglist(src, req->src, 0, req->slen);
if (dir)
- ret = crypto_scomp_compress(scomp, src, req->slen,
+ ret = crypto_scomp_compress(scomp, src, slen,
dst, &req->dlen, *ctx);
else
- ret = crypto_scomp_decompress(scomp, src, req->slen,
+ ret = crypto_scomp_decompress(scomp, src, slen,
dst, &req->dlen, *ctx);
- if (!ret) {
- if (!req->dst) {
- req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
- if (!req->dst) {
- ret = -ENOMEM;
- goto out;
- }
- } else if (req->dlen > dlen) {
- ret = -ENOSPC;
- goto out;
- }
- if (dst == scratch->dst) {
- scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
- req->dlen, 1);
- } else {
- int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
- int i;
- struct page *dst_page = sg_page(req->dst);
- for (i = 0; i < nr_pages; i++)
- flush_dcache_page(dst_page + i);
- }
- }
-out:
spin_unlock(&scratch->lock);
+
+ if (src != scratch->src)
+ kunmap_local(src);
+ kunmap_local(dst);
+ flush_dcache_page(sg_page(req->dst));
+
return ret;
}
@@ -225,7 +208,6 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress;
- crt->dst_free = sgl_free;
crt->reqsize = sizeof(void *);
return 0;
@@ -15,8 +15,6 @@
#include <linux/scatterlist.h>
#include <linux/types.h>
-#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
-
/* Set this bit for virtual address instead of SG list. */
#define CRYPTO_ACOMP_REQ_VIRT 0x00000002
@@ -75,15 +73,12 @@ struct acomp_req {
*
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
- * @dst_free: Frees destination buffer if allocated inside the
- * algorithm
* @reqsize: Context size for (de)compression requests
* @base: Common crypto API algorithm data structure
*/
struct crypto_acomp {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
- void (*dst_free)(struct scatterlist *dst);
unsigned int reqsize;
struct crypto_tfm base;
};
@@ -234,7 +229,7 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
crypto_completion_t cmpl,
void *data)
{
- u32 keep = CRYPTO_ACOMP_ALLOC_OUTPUT | CRYPTO_ACOMP_REQ_VIRT;
+ u32 keep = CRYPTO_ACOMP_REQ_VIRT;
req->base.complete = cmpl;
req->base.data = data;
@@ -268,9 +263,7 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->slen = slen;
req->dlen = dlen;
- req->base.flags &= ~(CRYPTO_ACOMP_ALLOC_OUTPUT | CRYPTO_ACOMP_REQ_VIRT);
- if (!req->dst)
- req->base.flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
+ req->base.flags &= ~CRYPTO_ACOMP_REQ_VIRT;
}
/**
@@ -294,7 +287,6 @@ static inline void acomp_request_set_virt(struct acomp_req *req,
req->slen = slen;
req->dlen = dlen;
- req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
req->base.flags |= CRYPTO_ACOMP_REQ_VIRT;
}
@@ -12,8 +12,6 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>
-#define SCOMP_SCRATCH_SIZE 131072
-
struct acomp_req;
struct crypto_scomp {
As the only user of acomp/scomp uses a trivial single-page SG list, remove support for everything else in preprataion for the addition of virtual address support. However, keep support for non-trivial source SG lists as that user is currently jumping through hoops in order to linearise the source data. Limit the source SG linearisation buffer to a single page as that user never goes over that. The only other potential user is also unlikely to exceed that (IPComp) and it can easily do its own linearisation if necessary. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> --- crypto/acompress.c | 6 -- crypto/scompress.c | 98 ++++++++++++----------------- include/crypto/acompress.h | 12 +--- include/crypto/internal/scompress.h | 2 - 4 files changed, 42 insertions(+), 76 deletions(-)