@@ -97,7 +97,7 @@ struct aead_request {
struct scatterlist *src;
struct scatterlist *dst;
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
+ void *__ctx[] CRYPTO_REQ_MINALIGN_ATTR;
};
/**
@@ -62,7 +62,8 @@ static inline void *aead_instance_ctx(struct aead_instance *inst)
static inline void *aead_request_ctx(struct aead_request *req)
{
- return req->__ctx;
+ return PTR_ALIGN(&req->__ctx,
+ crypto_tfm_alg_req_alignmask(req->base.tfm) + 1);
}
static inline void aead_request_complete(struct aead_request *req, int err)
@@ -105,7 +106,15 @@ static inline struct crypto_aead *crypto_spawn_aead(
static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
unsigned int reqsize)
{
- aead->reqsize = reqsize;
+ unsigned int align = crypto_tfm_alg_req_alignmask(&aead->base) + 1;
+
+ /*
+ * The request structure itself is only aligned to CRYPTO_REQ_MINALIGN,
+ * so we need to add some headroom, allowing us to return a suitably
+ * aligned context buffer pointer. We also need to round up the size so
+ * we don't end up sharing a cacheline at the end of the buffer.
+ */
+ aead->reqsize = ALIGN(reqsize, align) + align - CRYPTO_REQ_MINALIGN;
}
static inline void aead_init_queue(struct aead_queue *queue,
AEAD request structures are currently aligned to minimal DMA alignment for the arcitecture, which defaults to 128 bytes on arm64. This is excessive, and rarely needed, i.e., only when doing non-coherent inbound DMA on the contents of the request context buffer. So let's relax this requirement, and only use this alignment if the CRYPTO_ALG_NEED_DMA_ALIGNMENT flag is set by the implementation. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- include/crypto/aead.h | 2 +- include/crypto/internal/aead.h | 13 +++++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-)