Sophie

Sophie

distrib > Scientific%20Linux > 5x > x86_64 > by-pkgid > 27922b4260f65d317aabda37e42bbbff > files > 1637

kernel-2.6.18-238.el5.src.rpm

From: Herbert Xu <herbert@gondor.apana.org.au>
Date: Sun, 6 Jan 2008 16:09:38 +1100
Subject: [ipsec] use crypto_aead and authenc in ESP
Message-id: E1JBNlS-0001CR-00@gondolin.me.apana.org.au
O-Subject: [PATCH 23/32] [IPSEC]: Use crypto_aead and authenc in ESP
Bugzilla: 253051

[IPSEC]: Use crypto_aead and authenc in ESP

This patch converts ESP to use the crypto_aead interface and in particular
the authenc algorithm.  This lays the foundations for future support of
combined mode algorithms.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Acked-by: "David S. Miller" <davem@redhat.com>

diff --git a/include/net/esp.h b/include/net/esp.h
index 90cd94f..1fcd747 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -1,59 +1,27 @@
 #ifndef _NET_ESP_H
 #define _NET_ESP_H
 
-#include <net/xfrm.h>
-#include <asm/scatterlist.h>
+#include <linux/skbuff.h>
 
-#define ESP_NUM_FAST_SG		4
+struct crypto_aead;
 
-struct esp_data
-{
-	struct scatterlist		sgbuf[ESP_NUM_FAST_SG];
-
-	/* Confidentiality */
-	struct {
-		u8			*key;		/* Key */
-		int			key_len;	/* Key length */
-		u8			*ivec;		/* ivec buffer */
-		/* ivlen is offset from enc_data, where encrypted data start.
-		 * It is logically different of crypto_tfm_alg_ivsize(tfm).
-		 * We assume that it is either zero (no ivec), or
-		 * >= crypto_tfm_alg_ivsize(tfm). */
-		int			ivlen;
-		int			padlen;		/* 0..255 */
-		struct crypto_tfm	*tfm;		/* crypto handle */
-	} conf;
-
-	/* Integrity. It is active when icv_full_len != 0 */
-	struct {
-		u8			*key;		/* Key */
-		int			key_len;	/* Length of the key */
-		u8			*work_icv;
-		int			icv_full_len;
-		int			icv_trunc_len;
-		void			(*icv)(struct esp_data*,
-		                               struct sk_buff *skb,
-		                               int offset, int len, u8 *icv);
-		struct crypto_tfm	*tfm;
-	} auth;
+struct esp_data {
+	/* 0..255 */
+	int padlen;
+
+	/* Confidentiality & Integrity */
+	struct crypto_aead *aead;
 };
 
 extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len);
 extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
 extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
 
-static inline void
-esp_hmac_digest(struct esp_data *esp, struct sk_buff *skb, int offset,
-                int len, u8 *auth_data)
+struct ip_esp_hdr;
+
+static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
 {
-	struct crypto_tfm *tfm = esp->auth.tfm;
-	char *icv = esp->auth.work_icv;
-
-	memset(auth_data, 0, esp->auth.icv_trunc_len);
-	crypto_hmac_init(tfm, esp->auth.key, &esp->auth.key_len);
-	skb_icv_walk(skb, tfm, offset, len, crypto_hmac_update);
-	crypto_hmac_final(tfm, esp->auth.key, &esp->auth.key_len, icv);
-	memcpy(auth_data, icv, esp->auth.icv_trunc_len);
+	return (struct ip_esp_hdr *)skb->h.raw;
 }
 
 #endif
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 90f9136..bd14894 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -384,7 +384,9 @@ config INET_AH
 config INET_ESP
 	tristate "IP: ESP transformation"
 	select XFRM
+	select XFRM_NALGO
 	select CRYPTO
+	select CRYPTO_AEAD
 	select CRYPTO_HMAC
 	select CRYPTO_MD5
 	select CRYPTO_SHA1
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index fc2f8ce..a2a6294 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -1,24 +1,117 @@
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <linux/err.h>
 #include <linux/module.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
 #include <net/esp.h>
-#include <asm/scatterlist.h>
-#include <linux/crypto.h>
+#include <linux/scatterlist.h>
 #include <linux/kernel.h>
 #include <linux/pfkeyv2.h>
-#include <linux/random.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
 #include <net/icmp.h>
 #include <net/protocol.h>
 #include <net/udp.h>
 
+struct esp_skb_cb {
+	struct xfrm_skb_cb xfrm;
+	void *tmp;
+};
+
+#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
+
+/*
+ * Allocate an AEAD request structure with extra space for SG and IV.
+ *
+ * For alignment considerations the IV is placed at the front, followed
+ * by the request and finally the SG list.
+ *
+ * TODO: Use spare space in skb for this where possible.
+ */
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
+{
+	unsigned int len;
+
+	len = crypto_aead_ivsize(aead);
+	if (len) {
+		len += crypto_aead_alignmask(aead) &
+		       ~(ncrypto_tfm_ctx_alignment() - 1);
+		len = ALIGN(len, ncrypto_tfm_ctx_alignment());
+	}
+
+	len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
+	len = ALIGN(len, __alignof__(struct scatterlist));
+
+	len += sizeof(struct scatterlist) * nfrags;
+
+	return kmalloc(len, GFP_ATOMIC);
+}
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
+{
+	return crypto_aead_ivsize(aead) ? 
+	       PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
+}
+
+static inline struct aead_givcrypt_request *esp_tmp_givreq(
+	struct crypto_aead *aead, u8 *iv)
+{
+	struct aead_givcrypt_request *req;
+
+	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
+				ncrypto_tfm_ctx_alignment());
+	aead_givcrypt_set_tfm(req, aead);
+	return req;
+}
+
+static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
+{
+	struct aead_request *req;
+
+	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
+				ncrypto_tfm_ctx_alignment());
+	aead_request_set_tfm(req, aead);
+	return req;
+}
+
+static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
+					     struct aead_request *req)
+{
+	return (void *)ALIGN((unsigned long)(req + 1) +
+			     crypto_aead_reqsize(aead),
+			     __alignof__(struct scatterlist));
+}
+
+static inline struct scatterlist *esp_givreq_sg(
+	struct crypto_aead *aead, struct aead_givcrypt_request *req)
+{
+	return (void *)ALIGN((unsigned long)(req + 1) +
+			     crypto_aead_reqsize(aead),
+			     __alignof__(struct scatterlist));
+}
+
+static void esp_output_done(struct crypto_async_request *base, int err)
+{
+	struct sk_buff *skb = base->data;
+
+	kfree(ESP_SKB_CB(skb)->tmp);
+	xfrm4_output_resume(skb, err);
+}
+
 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 {
 	int err;
 	struct iphdr *top_iph;
 	struct ip_esp_hdr *esph;
-	struct crypto_tfm *tfm;
+	struct crypto_aead *aead;
+	struct aead_givcrypt_request *req;
+	struct scatterlist *sg;
+	struct scatterlist *asg;
 	struct esp_data *esp;
 	struct sk_buff *trailer;
+	void *tmp;
+	u8 *iv;
 	int blksize;
 	int clen;
 	int alen;
@@ -34,16 +127,27 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 	clen = skb->len;
 
 	esp = x->data;
-	alen = esp->auth.icv_trunc_len;
-	tfm = esp->conf.tfm;
-	blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4);
+	aead = esp->aead;
+	alen = crypto_aead_authsize(aead);
+
+	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 	clen = ALIGN(clen + 2, blksize);
-	if (esp->conf.padlen)
-		clen = ALIGN(clen, esp->conf.padlen);
+	if (esp->padlen)
+		clen = ALIGN(clen, esp->padlen);
+
+	if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
+		goto error;
+	nfrags = err;
 
-	if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0)
+	tmp = esp_alloc_tmp(aead, nfrags + 1);
+	if (!tmp)
 		goto error;
 
+	iv = esp_tmp_iv(aead, tmp);
+	req = esp_tmp_givreq(aead, iv);
+	asg = esp_givreq_sg(aead, req);
+	sg = asg + 1;
+
 	/* Fill padding... */
 	do {
 		int i;
@@ -51,13 +155,13 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 			*(u8*)(trailer->tail + i) = i+1;
 	} while (0);
 	*(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
-	pskb_put(skb, trailer, clen - skb->len);
+	*(u8*)(trailer->tail + clen-skb->len - 1) = skb->nh.iph->protocol;
+	pskb_put(skb, trailer, clen - skb->len + alen);
 
 	__skb_push(skb, skb->data - skb->nh.raw);
 	top_iph = skb->nh.iph;
 	esph = (struct ip_esp_hdr *)(skb->nh.raw + top_iph->ihl*4);
-	top_iph->tot_len = htons(skb->len + alen);
-	*(u8*)(trailer->tail - 1) = top_iph->protocol;
+	top_iph->tot_len = htons(skb->len);
 
 	/* this is non-NULL only with UDP Encapsulation */
 	if (x->encap) {
@@ -68,7 +172,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 		uh = (struct udphdr *)esph;
 		uh->source = encap->encap_sport;
 		uh->dest = encap->encap_dport;
-		uh->len = htons(skb->len + alen - top_iph->ihl*4);
+		uh->len = htons(skb->len - top_iph->ihl*4);
 		uh->check = 0;
 
 		switch (encap->encap_type) {
@@ -91,112 +195,59 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 	esph->seq_no = htonl(++x->replay.oseq);
 	xfrm_aevent_doreplay(x);
 
-	if (esp->conf.ivlen)
-		crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
+	sg_init_table(sg, nfrags);
+	skb_to_sgvec(skb, sg,
+		     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
+		     clen + alen);
+	sg_init_one(asg, esph, sizeof(*esph));
 
-	do {
-		struct scatterlist *sg = &esp->sgbuf[0];
+	aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
+	aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
+	aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
+	aead_givcrypt_set_giv(req, esph->enc_data, x->replay.oseq);
 
-		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
-			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
-			if (!sg)
-				goto error;
-		}
-		skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
-		crypto_cipher_encrypt(tfm, sg, sg, clen);
-		if (unlikely(sg != &esp->sgbuf[0]))
-			kfree(sg);
-	} while (0);
+	ESP_SKB_CB(skb)->tmp = tmp;
+	err = crypto_aead_givencrypt(req);
+	if (err == -EINPROGRESS)
+		goto error;
 
-	if (esp->conf.ivlen) {
-		memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
-		crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
-	}
+	if (err == -EBUSY)
+		err = NET_XMIT_DROP;
 
-	if (esp->auth.icv_full_len) {
-		esp->auth.icv(esp, skb, (u8*)esph-skb->data,
-		              sizeof(struct ip_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
-		pskb_put(skb, trailer, alen);
-	}
+	kfree(tmp);
 
 	ip_send_check(top_iph);
 
-	err = 0;
-
 error:
 	return err;
 }
 
-/*
- * Note: detecting truncated vs. non-truncated authentication data is very
- * expensive, so we only support truncated data, which is the recommended
- * and common case.
- */
-static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
+static int esp_input_done2(struct xfrm_state *x, struct sk_buff *skb, int err)
 {
 	struct iphdr *iph;
-	struct ip_esp_hdr *esph;
 	struct esp_data *esp = x->data;
-	struct sk_buff *trailer;
-	int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
-	int alen = esp->auth.icv_trunc_len;
-	int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen;
-	int nfrags;
+	struct crypto_aead *aead = esp->aead;
+	int alen = crypto_aead_authsize(aead);
+	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
+	int elen = skb->len - hlen;
 	int ihl;
 	u8 nexthdr[2];
-	struct scatterlist *sg;
 	int padlen;
 
-	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
-		goto out;
-
-	if (elen <= 0 || (elen & (blksize-1)))
-		goto out;
-
-	/* If integrity check is required, do this. */
-	if (esp->auth.icv_full_len) {
-		u8 sum[esp->auth.icv_full_len];
-		u8 sum1[alen];
-		
-		esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
+	kfree(ESP_SKB_CB(skb)->tmp);
 
-		if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
-			BUG();
-
-		if (unlikely(memcmp(sum, sum1, alen))) {
+	if (unlikely(err)) {
+		if (err == -EBADMSG)
 			x->stats.integrity_failed++;
-			goto out;
-		}
-	}
-
-	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0)
 		goto out;
-
-	skb->ip_summed = CHECKSUM_NONE;
-
-	esph = (struct ip_esp_hdr*)skb->data;
-
-	/* Get ivec. This can be wrong, check against another impls. */
-	if (esp->conf.ivlen)
-		crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));
-
-	sg = &esp->sgbuf[0];
-
-	if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
-		sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
-		if (!sg)
-			goto out;
 	}
-	skb_to_sgvec(skb, sg, sizeof(struct ip_esp_hdr) + esp->conf.ivlen, elen);
-	crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
-	if (unlikely(sg != &esp->sgbuf[0]))
-		kfree(sg);
 
 	if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
 		BUG();
 
+	err = -EINVAL;
 	padlen = nexthdr[0];
-	if (padlen+2 >= elen)
+	if (padlen + 2 + alen >= elen)
 		goto out;
 
 	/* ... check padding bits here. Silly. :-) */ 
@@ -243,18 +294,92 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
 
 	iph->protocol = nexthdr[1];
 	pskb_trim(skb, skb->len - alen - padlen - 2);
-	skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - ihl;
+	skb->h.raw = __skb_pull(skb, hlen) - ihl;
 
-	return 0;
+	err = 0;
 
 out:
-	return -EINVAL;
+	return err;
+}
+
+static void esp_input_done(struct crypto_async_request *base, int err)
+{
+	struct sk_buff *skb = base->data;
+
+	xfrm4_input_resume(skb,
+			   esp_input_done2(xfrm_input_state(skb), skb, err));
+}
+
+/*
+ * Note: detecting truncated vs. non-truncated authentication data is very
+ * expensive, so we only support truncated data, which is the recommended
+ * and common case.
+ */
+static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
+{
+	struct ip_esp_hdr *esph;
+	struct esp_data *esp = x->data;
+	struct crypto_aead *aead = esp->aead;
+	struct aead_request *req;
+	struct sk_buff *trailer;
+	int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
+	int nfrags;
+	void *tmp;
+	u8 *iv;
+	struct scatterlist *sg;
+	struct scatterlist *asg;
+	int err = -EINVAL;
+
+	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr)))
+		goto out;
+
+	if (elen <= 0)
+		goto out;
+
+	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
+		goto out;
+	nfrags = err;
+
+	err = -ENOMEM;
+	tmp = esp_alloc_tmp(aead, nfrags + 1);
+	if (!tmp)
+		goto out;
+
+	ESP_SKB_CB(skb)->tmp = tmp;
+	iv = esp_tmp_iv(aead, tmp);
+	req = esp_tmp_req(aead, iv);
+	asg = esp_req_sg(aead, req);
+	sg = asg + 1;
+
+	skb->ip_summed = CHECKSUM_NONE;
+
+	esph = (struct ip_esp_hdr *)skb->data;
+
+	/* Get ivec. This can be wrong, check against another impls. */
+	iv = esph->enc_data;
+
+	sg_init_table(sg, nfrags);
+	skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
+	sg_init_one(asg, esph, sizeof(*esph));
+
+	aead_request_set_callback(req, 0, esp_input_done, skb);
+	aead_request_set_crypt(req, sg, sg, elen, iv);
+	aead_request_set_assoc(req, asg, sizeof(*esph));
+
+	err = crypto_aead_decrypt(req);
+	if (err == -EINPROGRESS)
+		goto out;
+
+	err = esp_input_done2(x, skb, err);
+
+out:
+	return err;
 }
 
 static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
 {
 	struct esp_data *esp = x->data;
-	u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
+	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
 
 	if (x->props.mode) {
 		mtu = ALIGN(mtu + 2, blksize);
@@ -262,10 +387,10 @@ static u32 esp4_get_max_size(struct xfrm_state *x, int mtu)
 		/* The worst case. */
 		mtu = ALIGN(mtu + 2, 4) + blksize - 4;
 	}
-	if (esp->conf.padlen)
-		mtu = ALIGN(mtu, esp->conf.padlen);
+	if (esp->padlen)
+		mtu = ALIGN(mtu, esp->padlen);
 
-	return mtu + x->props.header_len + esp->auth.icv_trunc_len;
+	return mtu + x->props.header_len + crypto_aead_authsize(esp->aead);
 }
 
 static void esp4_err(struct sk_buff *skb, u32 info)
@@ -293,81 +418,107 @@ static void esp_destroy(struct xfrm_state *x)
 	if (!esp)
 		return;
 
-	crypto_free_tfm(esp->conf.tfm);
-	esp->conf.tfm = NULL;
-	kfree(esp->conf.ivec);
-	esp->conf.ivec = NULL;
-	crypto_free_tfm(esp->auth.tfm);
-	esp->auth.tfm = NULL;
-	kfree(esp->auth.work_icv);
-	esp->auth.work_icv = NULL;
+	crypto_free_aead(esp->aead);
 	kfree(esp);
 }
 
 static int esp_init_state(struct xfrm_state *x)
 {
 	struct esp_data *esp = NULL;
+	struct crypto_aead *aead;
+	struct crypto_authenc_key_param *param;
+	struct xfrm_nalgo_desc *aalg_desc;
+	struct xfrm_nalgo_desc *ealg_desc;
+	struct rtattr *rta;
+	char *key;
+	char *p;
+	char authenc_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int keylen;
+	int err;
 
-	/* null auth and encryption can have zero length keys */
+	if (x->ealg == NULL)
+		return -EINVAL;
+
+	aalg_desc = NULL;
 	if (x->aalg) {
-		if (x->aalg->alg_key_len > 512)
-			goto error;
+		aalg_desc = xfrm_naalg_get_byname(x->aalg->alg_name, 1);
+		if (!aalg_desc)
+			return -ENOENT;
 	}
-	if (x->ealg == NULL)
-		goto error;
+
+	ealg_desc = xfrm_nealg_get_byname(x->ealg->alg_name, 1);
+	if (!ealg_desc)
+		return -ENOENT;
+
+	if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
+		     aalg_desc ? aalg_desc->name : "digest_null",
+		     ealg_desc->name) >= CRYPTO_MAX_ALG_NAME)
+		return -ENAMETOOLONG;
 
 	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
 	if (esp == NULL)
 		return -ENOMEM;
 
-	if (x->aalg) {
-		struct xfrm_algo_desc *aalg_desc;
+	x->data = esp;
 
-		esp->auth.key = x->aalg->alg_key;
-		esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
-		esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
-		if (esp->auth.tfm == NULL)
-			goto error;
-		esp->auth.icv = esp_hmac_digest;
+	aead = crypto_alloc_aead(authenc_name, 0,
+				 strcmp(ealg_desc->name, x->ealg->alg_name) ?
+				 NCRYPTO_ALG_ASYNC : 0);
+	err = PTR_ERR(aead);
+	if (IS_ERR(aead))
+		goto error;
+
+	esp->aead = aead;
 
-		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
-		BUG_ON(!aalg_desc);
+	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
+		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
+	err = -ENOMEM;
+	key = kmalloc(keylen, GFP_KERNEL);
+	if (!key)
+		goto error;
 
+	p = key;
+	rta = (void *)p;
+	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+	rta->rta_len = RTA_LENGTH(sizeof(*param));
+	param = RTA_DATA(rta);
+	p += RTA_SPACE(sizeof(*param));
+
+	if (aalg_desc) {
+		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
+		p += (x->aalg->alg_key_len + 7) / 8;
+
+		err = -EINVAL;
 		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
-		    crypto_tfm_alg_digestsize(esp->auth.tfm)) {
+		    crypto_aead_authsize(aead)) {
 			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
 				 x->aalg->alg_name,
-				 crypto_tfm_alg_digestsize(esp->auth.tfm),
+				 crypto_aead_authsize(aead),
 				 aalg_desc->uinfo.auth.icv_fullbits/8);
-			goto error;
+			goto free_key;
 		}
 
-		esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
-		esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
-
-		esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
-		if (!esp->auth.work_icv)
-			goto error;
+		err = crypto_aead_setauthsize(
+			aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
+		if (err)
+			goto free_key;
 	}
-	esp->conf.key = x->ealg->alg_key;
-	esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
-	if (x->props.ealgo == SADB_EALG_NULL)
-		esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB);
-	else
-		esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
-	if (esp->conf.tfm == NULL)
-		goto error;
-	esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm);
-	esp->conf.padlen = 0;
-	if (esp->conf.ivlen) {
-		esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
-		if (unlikely(esp->conf.ivec == NULL))
-			goto error;
-		get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
-	}
-	if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len))
+
+	esp->padlen = 0;
+
+	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
+	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
+
+	err = crypto_aead_setkey(aead, key, keylen);
+
+free_key:
+	kfree(key);
+
+	if (err)
 		goto error;
-	x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
+
+	x->props.header_len = sizeof(struct ip_esp_hdr) +
+			      crypto_aead_ivsize(aead);
 	if (x->props.mode)
 		x->props.header_len += sizeof(struct iphdr);
 	if (x->encap) {
@@ -384,15 +535,10 @@ static int esp_init_state(struct xfrm_state *x)
 			break;
 		}
 	}
-	x->data = esp;
 	x->props.trailer_len = esp4_get_max_size(x, 0) - x->props.header_len;
-	return 0;
 
 error:
-	x->data = esp;
-	esp_destroy(x);
-	x->data = NULL;
-	return -EINVAL;
+	return err;
 }
 
 static struct xfrm_type esp_type =
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 5127d6d..36fe967 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -72,6 +72,7 @@ config INET6_AH
 	depends on IPV6
 	select XFRM
 	select CRYPTO
+	select CRYPTO_AEAD
 	select CRYPTO_HMAC
 	select CRYPTO_MD5
 	select CRYPTO_SHA1
@@ -84,7 +85,9 @@ config INET6_ESP
 	tristate "IPv6: ESP transformation"
 	depends on IPV6
 	select XFRM
+	select XFRM_NALGO
 	select CRYPTO
+	select CRYPTO_AEAD
 	select CRYPTO_HMAC
 	select CRYPTO_MD5
 	select CRYPTO_SHA1
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index a278d5e..c0185a9 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -24,37 +24,131 @@
  * 	This file is derived from net/ipv4/esp.c
  */
 
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <linux/err.h>
 #include <linux/module.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
 #include <net/esp.h>
-#include <asm/scatterlist.h>
-#include <linux/crypto.h>
+#include <linux/scatterlist.h>
 #include <linux/kernel.h>
 #include <linux/pfkeyv2.h>
 #include <linux/random.h>
+#include <linux/slab.h>
 #include <net/icmp.h>
 #include <net/ipv6.h>
 #include <net/protocol.h>
 #include <linux/icmpv6.h>
 
+struct esp_skb_cb {
+	struct xfrm_skb_cb xfrm;
+	void *tmp;
+};
+
+#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
+
+/*
+ * Allocate an AEAD request structure with extra space for SG and IV.
+ *
+ * For alignment considerations the IV is placed at the front, followed
+ * by the request and finally the SG list.
+ *
+ * TODO: Use spare space in skb for this where possible.
+ */
+static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags)
+{
+	unsigned int len;
+
+	len = crypto_aead_ivsize(aead);
+	if (len) {
+		len += crypto_aead_alignmask(aead) &
+		       ~(ncrypto_tfm_ctx_alignment() - 1);
+		len = ALIGN(len, ncrypto_tfm_ctx_alignment());
+	}
+
+	len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
+	len = ALIGN(len, __alignof__(struct scatterlist));
+
+	len += sizeof(struct scatterlist) * nfrags;
+
+	return kmalloc(len, GFP_ATOMIC);
+}
+
+static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp)
+{
+	return crypto_aead_ivsize(aead) ? 
+	       PTR_ALIGN((u8 *)tmp, crypto_aead_alignmask(aead) + 1) : tmp;
+}
+
+static inline struct aead_givcrypt_request *esp_tmp_givreq(
+	struct crypto_aead *aead, u8 *iv)
+{
+	struct aead_givcrypt_request *req;
+
+	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
+				ncrypto_tfm_ctx_alignment());
+	aead_givcrypt_set_tfm(req, aead);
+	return req;
+}
+
+static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
+{
+	struct aead_request *req;
+
+	req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
+				ncrypto_tfm_ctx_alignment());
+	aead_request_set_tfm(req, aead);
+	return req;
+}
+
+static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
+					     struct aead_request *req)
+{
+	return (void *)ALIGN((unsigned long)(req + 1) +
+			     crypto_aead_reqsize(aead),
+			     __alignof__(struct scatterlist));
+}
+
+static inline struct scatterlist *esp_givreq_sg(
+	struct crypto_aead *aead, struct aead_givcrypt_request *req)
+{
+	return (void *)ALIGN((unsigned long)(req + 1) +
+			     crypto_aead_reqsize(aead),
+			     __alignof__(struct scatterlist));
+}
+
+static void esp_output_done(struct crypto_async_request *base, int err)
+{
+	struct sk_buff *skb = base->data;
+
+	kfree(ESP_SKB_CB(skb)->tmp);
+	xfrm6_output_resume(skb, err);
+}
+
 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 {
 	int err;
 	int hdr_len;
 	struct ipv6hdr *top_iph;
 	struct ipv6_esp_hdr *esph;
-	struct crypto_tfm *tfm;
 	struct esp_data *esp;
+	struct crypto_aead *aead;
+	struct aead_givcrypt_request *req;
+	struct scatterlist *sg;
+	struct scatterlist *asg;
 	struct sk_buff *trailer;
+	void *tmp;
 	int blksize;
 	int clen;
 	int alen;
 	int nfrags;
+	u8 *iv;
 
 	esp = x->data;
+	aead = esp->aead;
 	hdr_len = skb->h.raw - skb->data +
-		  sizeof(*esph) + esp->conf.ivlen;
+		  sizeof(*esph) + crypto_aead_ivsize(aead);
 
 	/* Strip IP+ESP header. */
 	__skb_pull(skb, hdr_len);
@@ -65,16 +159,25 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 	/* Round to block size */
 	clen = skb->len;
 
-	alen = esp->auth.icv_trunc_len;
-	tfm = esp->conf.tfm;
-	blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4);
+	alen = crypto_aead_authsize(aead);
+
+	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 	clen = ALIGN(clen + 2, blksize);
-	if (esp->conf.padlen)
-		clen = ALIGN(clen, esp->conf.padlen);
+	if (esp->padlen)
+		clen = ALIGN(clen, esp->padlen);
 
-	if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) {
+	if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
 		goto error;
-	}
+	nfrags = err;
+
+	tmp = esp_alloc_tmp(aead, nfrags + 1);
+	if (!tmp)
+		goto error;
+
+	iv = esp_tmp_iv(aead, tmp);
+	req = esp_tmp_givreq(aead, iv);
+	asg = esp_givreq_sg(aead, req);
+	sg = asg + 1;
 
 	/* Fill padding... */
 	do {
@@ -83,140 +186,155 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 			*(u8*)(trailer->tail + i) = i+1;
 	} while (0);
 	*(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
-	pskb_put(skb, trailer, clen - skb->len);
+	*(u8*)(trailer->tail + clen-skb->len - 1) = *skb->nh.raw;
+	pskb_put(skb, trailer, clen - skb->len + alen);
 
 	top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len);
 	esph = (struct ipv6_esp_hdr *)skb->h.raw;
-	top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph));
-	*(u8*)(trailer->tail - 1) = *skb->nh.raw;
+	top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
 	*skb->nh.raw = IPPROTO_ESP;
 
 	esph->spi = x->id.spi;
 	esph->seq_no = htonl(++x->replay.oseq);
 	xfrm_aevent_doreplay(x);
 
-	if (esp->conf.ivlen)
-		crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
+	sg_init_table(sg, nfrags);
+	skb_to_sgvec(skb, sg,
+		     esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
+		     clen + alen);
+	sg_init_one(asg, esph, sizeof(*esph));
 
-	do {
-		struct scatterlist *sg = &esp->sgbuf[0];
+	aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
+	aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
+	aead_givcrypt_set_assoc(req, asg, sizeof(*esph));
+	aead_givcrypt_set_giv(req, esph->enc_data, x->replay.oseq);
 
-		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
-			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
-			if (!sg)
-				goto error;
-		}
-		skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
-		crypto_cipher_encrypt(tfm, sg, sg, clen);
-		if (unlikely(sg != &esp->sgbuf[0]))
-			kfree(sg);
-	} while (0);
+	ESP_SKB_CB(skb)->tmp = tmp;
+	err = crypto_aead_givencrypt(req);
+	if (err == -EINPROGRESS)
+		goto error;
+
+	if (err == -EBUSY)
+		err = NET_XMIT_DROP;
 
-	if (esp->conf.ivlen) {
-		memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
-		crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
+	kfree(tmp);
+
+error:
+	return err;
+}
+
+static int esp_input_done2(struct xfrm_state *x, struct sk_buff *skb, int err)
+{
+	struct esp_data *esp = x->data;
+	struct crypto_aead *aead = esp->aead;
+	int alen = crypto_aead_authsize(aead);
+	int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
+	int elen = skb->len - hlen;
+	int hdr_len = skb->h.raw - skb->nh.raw;
+	int padlen;
+	u8 nexthdr[2];
+
+	kfree(ESP_SKB_CB(skb)->tmp);
+
+	if (unlikely(err)) {
+		if (err == -EBADMSG)
+			x->stats.integrity_failed++;
+		goto out;
 	}
 
-	if (esp->auth.icv_full_len) {
-		esp->auth.icv(esp, skb, (u8*)esph-skb->data,
-			sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
-		pskb_put(skb, trailer, alen);
+	if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
+		BUG();
+
+	err = -EINVAL;
+	padlen = nexthdr[0];
+	if (padlen + 2 + alen >= elen) {
+		LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
+			       "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
+		goto out;
 	}
 
-	err = 0;
+	/* ... check padding bits here. Silly. :-) */
 
-error:
+	pskb_trim(skb, skb->len - alen - padlen - 2);
+	skb->h.raw = __skb_pull(skb, hlen) - hdr_len;
+
+	err = nexthdr[1];
+
+out:
 	return err;
 }
 
+static void esp_input_done(struct crypto_async_request *base, int err)
+{
+	struct sk_buff *skb = base->data;
+
+	xfrm6_input_resume(skb,
+			   esp_input_done2(xfrm_input_state(skb), skb, err));
+}
+
 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
 {
 	struct ipv6hdr *iph;
 	struct ipv6_esp_hdr *esph;
 	struct esp_data *esp = x->data;
+	struct crypto_aead *aead = esp->aead;
+	struct aead_request *req;
 	struct sk_buff *trailer;
-	int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
-	int alen = esp->auth.icv_trunc_len;
-	int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen;
-
-	int hdr_len = skb->h.raw - skb->nh.raw;
+	int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
 	int nfrags;
 	int ret = 0;
+	void *tmp;
+	u8 *iv;
+	struct scatterlist *sg;
+	struct scatterlist *asg;
 
 	if (!pskb_may_pull(skb, sizeof(struct ipv6_esp_hdr))) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (elen <= 0 || (elen & (blksize-1))) {
+	if (elen <= 0) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	/* If integrity check is required, do this. */
-        if (esp->auth.icv_full_len) {
-		u8 sum[esp->auth.icv_full_len];
-		u8 sum1[alen];
-
-		esp->auth.icv(esp, skb, 0, skb->len-alen, sum);
-
-		if (skb_copy_bits(skb, skb->len-alen, sum1, alen))
-			BUG();
-
-		if (unlikely(memcmp(sum, sum1, alen))) {
-			x->stats.integrity_failed++;
-			ret = -EINVAL;
-			goto out;
-		}
-	}
-
 	if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
 		ret = -EINVAL;
 		goto out;
 	}
 
+	ret = -ENOMEM;
+	tmp = esp_alloc_tmp(aead, nfrags + 1);
+	if (!tmp)
+		goto out;
+
+	ESP_SKB_CB(skb)->tmp = tmp;
+	iv = esp_tmp_iv(aead, tmp);
+	req = esp_tmp_req(aead, iv);
+	asg = esp_req_sg(aead, req);
+	sg = asg + 1;
+
 	skb->ip_summed = CHECKSUM_NONE;
 
 	esph = (struct ipv6_esp_hdr*)skb->data;
 	iph = skb->nh.ipv6h;
 
 	/* Get ivec. This can be wrong, check against another impls. */
-	if (esp->conf.ivlen)
-		crypto_cipher_set_iv(esp->conf.tfm, esph->enc_data, crypto_tfm_alg_ivsize(esp->conf.tfm));
-
-        {
-		u8 nexthdr[2];
-		struct scatterlist *sg = &esp->sgbuf[0];
-		u8 padlen;
-
-		if (unlikely(nfrags > ESP_NUM_FAST_SG)) {
-			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
-			if (!sg) {
-				ret = -ENOMEM;
-				goto out;
-			}
-		}
-		skb_to_sgvec(skb, sg, sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen, elen);
-		crypto_cipher_decrypt(esp->conf.tfm, sg, sg, elen);
-		if (unlikely(sg != &esp->sgbuf[0]))
-			kfree(sg);
-
-		if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
-			BUG();
-
-		padlen = nexthdr[0];
-		if (padlen+2 >= elen) {
-			LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage padlen=%d, elen=%d\n", padlen+2, elen);
-			ret = -EINVAL;
-			goto out;
-		}
-		/* ... check padding bits here. Silly. :-) */ 
+	iv = esph->enc_data;
 
-		pskb_trim(skb, skb->len - alen - padlen - 2);
-		ret = nexthdr[1];
-	}
+	sg_init_table(sg, nfrags);
+	skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
+	sg_init_one(asg, esph, sizeof(*esph));
 
-	skb->h.raw = __skb_pull(skb, sizeof(*esph) + esp->conf.ivlen) - hdr_len;
+	aead_request_set_callback(req, 0, esp_input_done, skb);
+	aead_request_set_crypt(req, sg, sg, elen, iv);
+	aead_request_set_assoc(req, asg, sizeof(*esph));
+
+	ret = crypto_aead_decrypt(req);
+	if (ret == -EINPROGRESS)
+		goto out;
+
+	ret = esp_input_done2(x, skb, ret);
 
 out:
 	return ret;
@@ -225,7 +343,7 @@ out:
 static u32 esp6_get_max_size(struct xfrm_state *x, int mtu)
 {
 	struct esp_data *esp = x->data;
-	u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4);
+	u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
 
 	if (x->props.mode) {
 		mtu = ALIGN(mtu + 2, blksize);
@@ -234,10 +352,10 @@ static u32 esp6_get_max_size(struct xfrm_state *x, int mtu)
 		u32 padsize = ((blksize - 1) & 7) + 1;
 		mtu = ALIGN(mtu + 2, padsize) + blksize - padsize;
 	}
-	if (esp->conf.padlen)
-		mtu = ALIGN(mtu, esp->conf.padlen);
+	if (esp->padlen)
+		mtu = ALIGN(mtu, esp->padlen);
 
-	return mtu + x->props.header_len + esp->auth.icv_trunc_len;
+	return mtu + x->props.header_len + crypto_aead_authsize(esp->aead);
 }
 
 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -266,94 +384,114 @@ static void esp6_destroy(struct xfrm_state *x)
 	if (!esp)
 		return;
 
-	crypto_free_tfm(esp->conf.tfm);
-	esp->conf.tfm = NULL;
-	kfree(esp->conf.ivec);
-	esp->conf.ivec = NULL;
-	crypto_free_tfm(esp->auth.tfm);
-	esp->auth.tfm = NULL;
-	kfree(esp->auth.work_icv);
-	esp->auth.work_icv = NULL;
+	crypto_free_aead(esp->aead);
 	kfree(esp);
 }
 
 static int esp6_init_state(struct xfrm_state *x)
 {
 	struct esp_data *esp = NULL;
+	struct crypto_aead *aead;
+	struct crypto_authenc_key_param *param;
+	struct xfrm_nalgo_desc *aalg_desc;
+	struct xfrm_nalgo_desc *ealg_desc;
+	struct rtattr *rta;
+	char *key;
+	char *p;
+	char authenc_name[CRYPTO_MAX_ALG_NAME];
+	unsigned int keylen;
+	int err;
 
-	/* null auth and encryption can have zero length keys */
-	if (x->aalg) {
-		if (x->aalg->alg_key_len > 512)
-			goto error;
-	}
 	if (x->ealg == NULL)
-		goto error;
+		return -EINVAL;
 
 	if (x->encap)
-		goto error;
+		return -EINVAL;
+
+	aalg_desc = NULL;
+	if (x->aalg) {
+		aalg_desc = xfrm_naalg_get_byname(x->aalg->alg_name, 1);
+		if (!aalg_desc)
+			return -ENOENT;
+	}
+
+	ealg_desc = xfrm_nealg_get_byname(x->ealg->alg_name, 1);
+	if (!ealg_desc)
+		return -ENOENT;
+
+	if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, "authenc(%s,%s)",
+		     x->aalg ? x->aalg->alg_name : "digest_null",
+		     x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
+		return -ENAMETOOLONG;
 
 	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
 	if (esp == NULL)
 		return -ENOMEM;
 
-	if (x->aalg) {
-		struct xfrm_algo_desc *aalg_desc;
-
-		esp->auth.key = x->aalg->alg_key;
-		esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
-		esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0);
-		if (esp->auth.tfm == NULL)
-			goto error;
-		esp->auth.icv = esp_hmac_digest;
- 
-		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
-		BUG_ON(!aalg_desc);
+	x->data = esp;
+
+	aead = crypto_alloc_aead(authenc_name, 0,
+				 strcmp(ealg_desc->name, x->ealg->alg_name) ?
+				 NCRYPTO_ALG_ASYNC : 0);
+	err = PTR_ERR(aead);
+	if (IS_ERR(aead))
+		goto error;
+
+	esp->aead = aead;
+
+	keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
+		 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
+	err = -ENOMEM;
+	key = kmalloc(keylen, GFP_KERNEL);
+	if (!key)
+		goto error;
+
+	p = key;
+	rta = (void *)p;
+	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+	rta->rta_len = RTA_LENGTH(sizeof(*param));
+	param = RTA_DATA(rta);
+	p += RTA_SPACE(sizeof(*param));
+
+	if (aalg_desc) {
+		memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
+		p += (x->aalg->alg_key_len + 7) / 8;
  
 		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
-			crypto_tfm_alg_digestsize(esp->auth.tfm)) {
+		    crypto_aead_authsize(aead)) {
 				printk(KERN_INFO "ESP: %s digestsize %u != %hu\n",
 					x->aalg->alg_name,
-					crypto_tfm_alg_digestsize(esp->auth.tfm),
+					crypto_aead_authsize(aead),
 					aalg_desc->uinfo.auth.icv_fullbits/8);
 				goto error;
 		}
- 
-		esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
-		esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
- 
-		esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
-		if (!esp->auth.work_icv)
-			goto error;
-	}
-	esp->conf.key = x->ealg->alg_key;
-	esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
-	if (x->props.ealgo == SADB_EALG_NULL)
-		esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB);
-	else
-		esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC);
-	if (esp->conf.tfm == NULL)
-		goto error;
-	esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm);
-	esp->conf.padlen = 0;
-	if (esp->conf.ivlen) {
-		esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
-		if (unlikely(esp->conf.ivec == NULL))
-			goto error;
-		get_random_bytes(esp->conf.ivec, esp->conf.ivlen);
+
+		err = crypto_aead_setauthsize(
+			aead, aalg_desc->uinfo.auth.icv_truncbits / 8);
+		if (err)
+			goto free_key;
 	}
-	if (crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len))
+
+	esp->padlen = 0;
+
+	param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
+	memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
+
+	err = crypto_aead_setkey(aead, key, keylen);
+
+free_key:
+	kfree(key);
+
+	if (err)
 		goto error;
-	x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
+
+	x->props.header_len = sizeof(struct ipv6_esp_hdr) +
+			      crypto_aead_ivsize(aead);
 	if (x->props.mode)
 		x->props.header_len += sizeof(struct ipv6hdr);
-	x->data = esp;
-	return 0;
 
 error:
-	x->data = esp;
-	esp6_destroy(x);
-	x->data = NULL;
-	return -EINVAL;
+	return err;
 }
 
 static struct xfrm_type esp6_type =