@@ -20,43 +20,40 @@ struct ah_skb_cb {
void *tmp;
};
#define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
unsigned int size)
{
unsigned int len;
- len = size + crypto_ahash_digestsize(ahash) +
- (crypto_ahash_alignmask(ahash) &
- ~(crypto_tfm_ctx_alignment() - 1));
+ len = size + crypto_ahash_digestsize(ahash);
len = ALIGN(len, crypto_tfm_ctx_alignment());
len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
len = ALIGN(len, __alignof__(struct scatterlist));
len += sizeof(struct scatterlist) * nfrags;
return kmalloc(len, GFP_ATOMIC);
}
static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
{
return tmp + offset;
}
-static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
- unsigned int offset)
+static inline u8 *ah_tmp_icv(void *tmp, unsigned int offset)
{
- return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
+ return tmp + offset;
}
static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
u8 *icv)
{
struct ahash_request *req;
req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
crypto_tfm_ctx_alignment());
@@ -122,21 +119,21 @@ static void ah_output_done(void *data, int err)
u8 *icv;
struct iphdr *iph;
struct sk_buff *skb = data;
struct xfrm_state *x = skb_dst(skb)->xfrm;
struct ah_data *ahp = x->data;
struct iphdr *top_iph = ip_hdr(skb);
struct ip_auth_hdr *ah = ip_auth_hdr(skb);
int ihl = ip_hdrlen(skb);
iph = AH_SKB_CB(skb)->tmp;
- icv = ah_tmp_icv(ahp->ahash, iph, ihl);
+ icv = ah_tmp_icv(iph, ihl);
memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
top_iph->tos = iph->tos;
top_iph->ttl = iph->ttl;
top_iph->frag_off = iph->frag_off;
if (top_iph->ihl != 5) {
top_iph->daddr = iph->daddr;
memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
}
@@ -175,21 +172,21 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
if (x->props.flags & XFRM_STATE_ESN) {
sglists = 1;
seqhi_len = sizeof(*seqhi);
}
err = -ENOMEM;
iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
if (!iph)
goto out;
seqhi = (__be32 *)((char *)iph + ihl);
- icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
+ icv = ah_tmp_icv(seqhi, seqhi_len);
req = ah_tmp_req(ahash, icv);
sg = ah_req_sg(ahash, req);
seqhisg = sg + nfrags;
memset(ah->auth_data, 0, ahp->icv_trunc_len);
top_iph = ip_hdr(skb);
iph->tos = top_iph->tos;
iph->ttl = top_iph->ttl;
@@ -272,21 +269,21 @@ static void ah_input_done(void *data, int err)
struct ah_data *ahp = x->data;
struct ip_auth_hdr *ah = ip_auth_hdr(skb);
int ihl = ip_hdrlen(skb);
int ah_hlen = (ah->hdrlen + 2) << 2;
if (err)
goto out;
work_iph = AH_SKB_CB(skb)->tmp;
auth_data = ah_tmp_auth(work_iph, ihl);
- icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
+ icv = ah_tmp_icv(auth_data, ahp->icv_trunc_len);
err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
if (err)
goto out;
err = ah->nexthdr;
skb->network_header += ah_hlen;
memcpy(skb_network_header(skb), work_iph, ihl);
__skb_pull(skb, ah_hlen + ihl);
@@ -367,21 +364,21 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
ahp->icv_trunc_len + seqhi_len);
if (!work_iph) {
err = -ENOMEM;
goto out;
}
seqhi = (__be32 *)((char *)work_iph + ihl);
auth_data = ah_tmp_auth(seqhi, seqhi_len);
- icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
+ icv = ah_tmp_icv(auth_data, ahp->icv_trunc_len);
req = ah_tmp_req(ahash, icv);
sg = ah_req_sg(ahash, req);
seqhisg = sg + nfrags;
memcpy(work_iph, iph, ihl);
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
memset(ah->auth_data, 0, ahp->icv_trunc_len);
iph->ttl = 0;
iph->tos = 0;