Message ID | 20221018230412.886349-1-nhuck@google.com (mailing list archive) |
---|---|
State | Accepted |
Delegated to: | Herbert Xu |
Headers | show |
Series | [v3] crypto: x86/polyval - Fix crashes when keys are not 16-byte aligned | expand |
On Tue, Oct 18, 2022 at 04:04:12PM -0700, Nathan Huckleberry wrote: > crypto_tfm::__crt_ctx is not guaranteed to be 16-byte aligned on x86-64. > This causes crashes due to movaps instructions in clmul_polyval_update. > > Add logic to align polyval_tfm_ctx to 16 bytes. > > Fixes: 34f7f6c30112 ("crypto: x86/polyval - Add PCLMULQDQ accelerated implementation of POLYVAL") > Reported-by: Bruno Goncalves <bgoncalv@redhat.com> > Signed-off-by: Nathan Huckleberry <nhuck@google.com> Reviewed-by: Eric Biggers <ebiggers@google.com> Please add 'Cc: stable@vger.kernel.org' as well. (Herbert can do it when applying this patch, if you don't happen to send another version.) - Eric
On Wed, 19 Oct 2022 at 01:04, Nathan Huckleberry <nhuck@google.com> wrote: > > crypto_tfm::__crt_ctx is not guaranteed to be 16-byte aligned on x86-64. > This causes crashes due to movaps instructions in clmul_polyval_update. > > Add logic to align polyval_tfm_ctx to 16 bytes. > > Fixes: 34f7f6c30112 ("crypto: x86/polyval - Add PCLMULQDQ accelerated implementation of POLYVAL") > Reported-by: Bruno Goncalves <bgoncalv@redhat.com> > Signed-off-by: Nathan Huckleberry <nhuck@google.com> > --- > arch/x86/crypto/polyval-clmulni_glue.c | 19 ++++++++++++++----- > 1 file changed, 14 insertions(+), 5 deletions(-) > > diff --git a/arch/x86/crypto/polyval-clmulni_glue.c b/arch/x86/crypto/polyval-clmulni_glue.c > index b7664d018851..8fa58b0f3cb3 100644 > --- a/arch/x86/crypto/polyval-clmulni_glue.c > +++ b/arch/x86/crypto/polyval-clmulni_glue.c > @@ -27,13 +27,17 @@ > #include <asm/cpu_device_id.h> > #include <asm/simd.h> > > +#define POLYVAL_ALIGN 16 > +#define POLYVAL_ALIGN_ATTR __aligned(POLYVAL_ALIGN) > +#define POLYVAL_ALIGN_EXTRA ((POLYVAL_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) > +#define POLYVAL_CTX_SIZE (sizeof(struct polyval_tfm_ctx) + POLYVAL_ALIGN_EXTRA) > #define NUM_KEY_POWERS 8 > > struct polyval_tfm_ctx { > /* > * These powers must be in the order h^8, ..., h^1. > */ > - u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE]; > + u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE] POLYVAL_ALIGN_ATTR; > }; > > struct polyval_desc_ctx { > @@ -45,6 +49,11 @@ asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys, > const u8 *in, size_t nblocks, u8 *accumulator); > asmlinkage void clmul_polyval_mul(u8 *op1, const u8 *op2); > > +static inline struct polyval_tfm_ctx *polyval_tfm_ctx(struct crypto_shash *tfm) > +{ > + return PTR_ALIGN(crypto_shash_ctx(tfm), POLYVAL_ALIGN); > +} > + > static void internal_polyval_update(const struct polyval_tfm_ctx *keys, > const u8 *in, size_t nblocks, u8 *accumulator) > { > @@ -72,7 +81,7 @@ static void internal_polyval_mul(u8 *op1, const u8 *op2) > static int polyval_x86_setkey(struct crypto_shash *tfm, > const u8 *key, unsigned int keylen) > { > - struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm); > + struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(tfm); > int i; > > if (keylen != POLYVAL_BLOCK_SIZE) > @@ -102,7 +111,7 @@ static int polyval_x86_update(struct shash_desc *desc, > const u8 *src, unsigned int srclen) > { > struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); > - const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); > + const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); > u8 *pos; > unsigned int nblocks; > unsigned int n; > @@ -143,7 +152,7 @@ static int polyval_x86_update(struct shash_desc *desc, > static int polyval_x86_final(struct shash_desc *desc, u8 *dst) > { > struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); > - const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); > + const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); > > if (dctx->bytes) { > internal_polyval_mul(dctx->buffer, > @@ -167,7 +176,7 @@ static struct shash_alg polyval_alg = { > .cra_driver_name = "polyval-clmulni", > .cra_priority = 200, > .cra_blocksize = POLYVAL_BLOCK_SIZE, > - .cra_ctxsize = sizeof(struct polyval_tfm_ctx), > + .cra_ctxsize = POLYVAL_CTX_SIZE, > .cra_module = THIS_MODULE, > }, > }; > -- > 2.38.0.413.g74048e4d9e-goog > Thanks, this patch worked well for me. Bruno
On Tue, Oct 18, 2022 at 04:04:12PM -0700, Nathan Huckleberry wrote: > crypto_tfm::__crt_ctx is not guaranteed to be 16-byte aligned on x86-64. > This causes crashes due to movaps instructions in clmul_polyval_update. > > Add logic to align polyval_tfm_ctx to 16 bytes. > > Fixes: 34f7f6c30112 ("crypto: x86/polyval - Add PCLMULQDQ accelerated implementation of POLYVAL") > Reported-by: Bruno Goncalves <bgoncalv@redhat.com> > Signed-off-by: Nathan Huckleberry <nhuck@google.com> > --- > arch/x86/crypto/polyval-clmulni_glue.c | 19 ++++++++++++++----- > 1 file changed, 14 insertions(+), 5 deletions(-) Patch applied. Thanks.
diff --git a/arch/x86/crypto/polyval-clmulni_glue.c b/arch/x86/crypto/polyval-clmulni_glue.c index b7664d018851..8fa58b0f3cb3 100644 --- a/arch/x86/crypto/polyval-clmulni_glue.c +++ b/arch/x86/crypto/polyval-clmulni_glue.c @@ -27,13 +27,17 @@ #include <asm/cpu_device_id.h> #include <asm/simd.h> +#define POLYVAL_ALIGN 16 +#define POLYVAL_ALIGN_ATTR __aligned(POLYVAL_ALIGN) +#define POLYVAL_ALIGN_EXTRA ((POLYVAL_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) +#define POLYVAL_CTX_SIZE (sizeof(struct polyval_tfm_ctx) + POLYVAL_ALIGN_EXTRA) #define NUM_KEY_POWERS 8 struct polyval_tfm_ctx { /* * These powers must be in the order h^8, ..., h^1. */ - u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE]; + u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE] POLYVAL_ALIGN_ATTR; }; struct polyval_desc_ctx { @@ -45,6 +49,11 @@ asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys, const u8 *in, size_t nblocks, u8 *accumulator); asmlinkage void clmul_polyval_mul(u8 *op1, const u8 *op2); +static inline struct polyval_tfm_ctx *polyval_tfm_ctx(struct crypto_shash *tfm) +{ + return PTR_ALIGN(crypto_shash_ctx(tfm), POLYVAL_ALIGN); +} + static void internal_polyval_update(const struct polyval_tfm_ctx *keys, const u8 *in, size_t nblocks, u8 *accumulator) { @@ -72,7 +81,7 @@ static void internal_polyval_mul(u8 *op1, const u8 *op2) static int polyval_x86_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { - struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm); + struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(tfm); int i; if (keylen != POLYVAL_BLOCK_SIZE) @@ -102,7 +111,7 @@ static int polyval_x86_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); u8 *pos; unsigned int nblocks; unsigned int n; @@ -143,7 +152,7 @@ static int polyval_x86_update(struct shash_desc *desc, static int polyval_x86_final(struct shash_desc *desc, u8 *dst) { struct polyval_desc_ctx *dctx = shash_desc_ctx(desc); - const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); + const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm); if (dctx->bytes) { internal_polyval_mul(dctx->buffer, @@ -167,7 +176,7 @@ static struct shash_alg polyval_alg = { .cra_driver_name = "polyval-clmulni", .cra_priority = 200, .cra_blocksize = POLYVAL_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct polyval_tfm_ctx), + .cra_ctxsize = POLYVAL_CTX_SIZE, .cra_module = THIS_MODULE, }, };
crypto_tfm::__crt_ctx is not guaranteed to be 16-byte aligned on x86-64. This causes crashes due to movaps instructions in clmul_polyval_update. Add logic to align polyval_tfm_ctx to 16 bytes. Fixes: 34f7f6c30112 ("crypto: x86/polyval - Add PCLMULQDQ accelerated implementation of POLYVAL") Reported-by: Bruno Goncalves <bgoncalv@redhat.com> Signed-off-by: Nathan Huckleberry <nhuck@google.com> --- arch/x86/crypto/polyval-clmulni_glue.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-)