Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6

Pull crypto fixes from Herbert Xu:
 "This push fixes a number of crashes triggered by a previous crypto
  self-test update.  It also fixes a build problem in the caam driver,
  as well as a concurrency issue in s390.

  Finally there is a pair of fixes to bugs in the crypto scatterwalk
  code and authenc that may lead to crashes"

* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: testmgr - fix sglen in test_aead for case 'dst != src'
  crypto: talitos - fix aead sglen for case 'dst != src'
  crypto: caam - fix aead sglen for case 'dst != src'
  crypto: ccm - Fix handling of zero plaintext when computing mac
  crypto: s390 - Fix aes-xts parameter corruption
  crypto: talitos - corrrectly handle zero-length assoc data
  crypto: scatterwalk - Set the chain pointer indication bit
  crypto: authenc - Find proper IV address in ablkcipher callback
  crypto: caam - Add missing Job Ring include
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 4363528..b3feabd 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -55,8 +55,7 @@
 
 struct s390_xts_ctx {
 	u8 key[32];
-	u8 xts_param[16];
-	struct pcc_param pcc;
+	u8 pcc_key[32];
 	long enc;
 	long dec;
 	int key_len;
@@ -591,7 +590,7 @@
 		xts_ctx->enc = KM_XTS_128_ENCRYPT;
 		xts_ctx->dec = KM_XTS_128_DECRYPT;
 		memcpy(xts_ctx->key + 16, in_key, 16);
-		memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
+		memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
 		break;
 	case 48:
 		xts_ctx->enc = 0;
@@ -602,7 +601,7 @@
 		xts_ctx->enc = KM_XTS_256_ENCRYPT;
 		xts_ctx->dec = KM_XTS_256_DECRYPT;
 		memcpy(xts_ctx->key, in_key, 32);
-		memcpy(xts_ctx->pcc.key, in_key + 32, 32);
+		memcpy(xts_ctx->pcc_key, in_key + 32, 32);
 		break;
 	default:
 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -621,29 +620,33 @@
 	unsigned int nbytes = walk->nbytes;
 	unsigned int n;
 	u8 *in, *out;
-	void *param;
+	struct pcc_param pcc_param;
+	struct {
+		u8 key[32];
+		u8 init[16];
+	} xts_param;
 
 	if (!nbytes)
 		goto out;
 
-	memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
-	memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
-	memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
-	memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
-	param = xts_ctx->pcc.key + offset;
-	ret = crypt_s390_pcc(func, param);
+	memset(pcc_param.block, 0, sizeof(pcc_param.block));
+	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
+	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
+	memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+	memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
+	ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
 	if (ret < 0)
 		return -EIO;
 
-	memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
-	param = xts_ctx->key + offset;
+	memcpy(xts_param.key, xts_ctx->key, 32);
+	memcpy(xts_param.init, pcc_param.xts, 16);
 	do {
 		/* only use complete blocks */
 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
 		out = walk->dst.virt.addr;
 		in = walk->src.virt.addr;
 
-		ret = crypt_s390_km(func, param, out, in, n);
+		ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
 		if (ret < 0 || ret != n)
 			return -EIO;
 
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 1875e70..e122355 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -380,9 +380,10 @@
 	if (!err) {
 		struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
 		struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-		struct ablkcipher_request *abreq = aead_request_ctx(areq);
-		u8 *iv = (u8 *)(abreq + 1) +
-			 crypto_ablkcipher_reqsize(ctx->enc);
+		struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
+		struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+							    + ctx->reqoff);
+		u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
 
 		err = crypto_authenc_genicv(areq, iv, 0);
 	}
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 3e05499..1df8421 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -271,7 +271,8 @@
 	}
 
 	/* compute plaintext into mac */
-	get_data_to_compute(cipher, pctx, plain, cryptlen);
+	if (cryptlen)
+		get_data_to_compute(cipher, pctx, plain, cryptlen);
 
 out:
 	return err;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 1ab8258..001f07c 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1242,6 +1242,10 @@
 		ret += tcrypt_test("cmac(des3_ede)");
 		break;
 
+	case 155:
+		ret += tcrypt_test("authenc(hmac(sha1),cbc(aes))");
+		break;
+
 	case 200:
 		test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
 				speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 432afc0..7795550 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -503,16 +503,16 @@
 				goto out;
 			}
 
-			sg_init_one(&sg[0], input,
-				    template[i].ilen + (enc ? authsize : 0));
-
 			if (diff_dst) {
 				output = xoutbuf[0];
 				output += align_offset;
+				sg_init_one(&sg[0], input, template[i].ilen);
 				sg_init_one(&sgout[0], output,
+					    template[i].rlen);
+			} else {
+				sg_init_one(&sg[0], input,
 					    template[i].ilen +
 						(enc ? authsize : 0));
-			} else {
 				output = input;
 			}
 
@@ -612,12 +612,6 @@
 				memcpy(q, template[i].input + temp,
 				       template[i].tap[k]);
 
-				n = template[i].tap[k];
-				if (k == template[i].np - 1 && enc)
-					n += authsize;
-				if (offset_in_page(q) + n < PAGE_SIZE)
-					q[n] = 0;
-
 				sg_set_buf(&sg[k], q, template[i].tap[k]);
 
 				if (diff_dst) {
@@ -625,13 +619,17 @@
 					    offset_in_page(IDX[k]);
 
 					memset(q, 0, template[i].tap[k]);
-					if (offset_in_page(q) + n < PAGE_SIZE)
-						q[n] = 0;
 
 					sg_set_buf(&sgout[k], q,
 						   template[i].tap[k]);
 				}
 
+				n = template[i].tap[k];
+				if (k == template[i].np - 1 && enc)
+					n += authsize;
+				if (offset_in_page(q) + n < PAGE_SIZE)
+					q[n] = 0;
+
 				temp += template[i].tap[k];
 			}
 
@@ -650,10 +648,10 @@
 					goto out;
 				}
 
-				sg[k - 1].length += authsize;
-
 				if (diff_dst)
 					sgout[k - 1].length += authsize;
+				else
+					sg[k - 1].length += authsize;
 			}
 
 			sg_init_table(asg, template[i].anp);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4f44b71..4cf5dec 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -818,7 +818,7 @@
 		       ivsize, 1);
 	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
-		       req->cryptlen, 1);
+		       req->cryptlen - ctx->authsize, 1);
 #endif
 
 	if (err) {
@@ -972,12 +972,9 @@
 				 (edesc->src_nents ? : 1);
 		in_options = LDST_SGF;
 	}
-	if (encrypt)
-		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
-				  req->cryptlen - authsize, in_options);
-	else
-		append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
-				  req->cryptlen, in_options);
+
+	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+			  in_options);
 
 	if (likely(req->src == req->dst)) {
 		if (all_contig) {
@@ -998,7 +995,8 @@
 		}
 	}
 	if (encrypt)
-		append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+		append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
+				   out_options);
 	else
 		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
 				   out_options);
@@ -1048,8 +1046,8 @@
 		sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
 		in_options = LDST_SGF;
 	}
-	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
-			  req->cryptlen - authsize, in_options);
+	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+			  in_options);
 
 	if (contig & GIV_DST_CONTIG) {
 		dst_dma = edesc->iv_dma;
@@ -1066,7 +1064,8 @@
 		}
 	}
 
-	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
+			   out_options);
 }
 
 /*
@@ -1130,7 +1129,8 @@
  * allocate and map the aead extended descriptor
  */
 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
-					   int desc_bytes, bool *all_contig_ptr)
+					   int desc_bytes, bool *all_contig_ptr,
+					   bool encrypt)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1145,12 +1145,22 @@
 	bool assoc_chained = false, src_chained = false, dst_chained = false;
 	int ivsize = crypto_aead_ivsize(aead);
 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+	unsigned int authsize = ctx->authsize;
 
 	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
-	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
 
-	if (unlikely(req->dst != req->src))
-		dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+	if (unlikely(req->dst != req->src)) {
+		src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+		dst_nents = sg_count(req->dst,
+				     req->cryptlen +
+					(encrypt ? authsize : (-authsize)),
+				     &dst_chained);
+	} else {
+		src_nents = sg_count(req->src,
+				     req->cryptlen +
+					(encrypt ? authsize : 0),
+				     &src_chained);
+	}
 
 	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
 				 DMA_TO_DEVICE, assoc_chained);
@@ -1234,11 +1244,9 @@
 	u32 *desc;
 	int ret = 0;
 
-	req->cryptlen += ctx->authsize;
-
 	/* allocate extended descriptor */
 	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
-				 CAAM_CMD_SZ, &all_contig);
+				 CAAM_CMD_SZ, &all_contig, true);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
@@ -1275,7 +1283,7 @@
 
 	/* allocate extended descriptor */
 	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
-				 CAAM_CMD_SZ, &all_contig);
+				 CAAM_CMD_SZ, &all_contig, false);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
@@ -1332,7 +1340,8 @@
 	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
 
 	if (unlikely(req->dst != req->src))
-		dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+		dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
+				     &dst_chained);
 
 	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
 				 DMA_TO_DEVICE, assoc_chained);
@@ -1426,8 +1435,6 @@
 	u32 *desc;
 	int ret = 0;
 
-	req->cryptlen += ctx->authsize;
-
 	/* allocate extended descriptor */
 	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
 				     CAAM_CMD_SZ, &contig);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index d23356d2..1d80bd3 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -6,6 +6,7 @@
  */
 
 #include <linux/of_irq.h>
+#include <linux/of_address.h>
 
 #include "compat.h"
 #include "regs.h"
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 905de44..b44f4dd 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -790,7 +790,7 @@
 
 	if (edesc->assoc_chained)
 		talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
-	else
+	else if (areq->assoclen)
 		/* assoc_nents counts also for IV in non-contiguous cases */
 		dma_unmap_sg(dev, areq->assoc,
 			     edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
@@ -973,7 +973,11 @@
 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
 					   edesc->dma_len, DMA_BIDIRECTIONAL);
 	} else {
-		to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->assoc));
+		if (areq->assoclen)
+			to_talitos_ptr(&desc->ptr[1],
+				       sg_dma_address(areq->assoc));
+		else
+			to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
 		desc->ptr[1].j_extent = 0;
 	}
 
@@ -1108,7 +1112,8 @@
 						 unsigned int authsize,
 						 unsigned int ivsize,
 						 int icv_stashing,
-						 u32 cryptoflags)
+						 u32 cryptoflags,
+						 bool encrypt)
 {
 	struct talitos_edesc *edesc;
 	int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
@@ -1122,10 +1127,10 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	if (iv)
+	if (ivsize)
 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
 
-	if (assoc) {
+	if (assoclen) {
 		/*
 		 * Currently it is assumed that iv is provided whenever assoc
 		 * is.
@@ -1141,19 +1146,17 @@
 			assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
 	}
 
-	src_nents = sg_count(src, cryptlen + authsize, &src_chained);
-	src_nents = (src_nents == 1) ? 0 : src_nents;
-
-	if (!dst) {
-		dst_nents = 0;
-	} else {
-		if (dst == src) {
-			dst_nents = src_nents;
-		} else {
-			dst_nents = sg_count(dst, cryptlen + authsize,
-					     &dst_chained);
-			dst_nents = (dst_nents == 1) ? 0 : dst_nents;
-		}
+	if (!dst || dst == src) {
+		src_nents = sg_count(src, cryptlen + authsize, &src_chained);
+		src_nents = (src_nents == 1) ? 0 : src_nents;
+		dst_nents = dst ? src_nents : 0;
+	} else { /* dst && dst != src*/
+		src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
+				     &src_chained);
+		src_nents = (src_nents == 1) ? 0 : src_nents;
+		dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
+				     &dst_chained);
+		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
 	}
 
 	/*
@@ -1173,9 +1176,16 @@
 
 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
 	if (!edesc) {
-		talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
+		if (assoc_chained)
+			talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
+		else if (assoclen)
+			dma_unmap_sg(dev, assoc,
+				     assoc_nents ? assoc_nents - 1 : 1,
+				     DMA_TO_DEVICE);
+
 		if (iv_dma)
 			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+
 		dev_err(dev, "could not allocate edescriptor\n");
 		return ERR_PTR(-ENOMEM);
 	}
@@ -1197,7 +1207,7 @@
 }
 
 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
-					      int icv_stashing)
+					      int icv_stashing, bool encrypt)
 {
 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
@@ -1206,7 +1216,7 @@
 	return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
 				   iv, areq->assoclen, areq->cryptlen,
 				   ctx->authsize, ivsize, icv_stashing,
-				   areq->base.flags);
+				   areq->base.flags, encrypt);
 }
 
 static int aead_encrypt(struct aead_request *req)
@@ -1216,7 +1226,7 @@
 	struct talitos_edesc *edesc;
 
 	/* allocate extended descriptor */
-	edesc = aead_edesc_alloc(req, req->iv, 0);
+	edesc = aead_edesc_alloc(req, req->iv, 0, true);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
@@ -1239,7 +1249,7 @@
 	req->cryptlen -= authsize;
 
 	/* allocate extended descriptor */
-	edesc = aead_edesc_alloc(req, req->iv, 1);
+	edesc = aead_edesc_alloc(req, req->iv, 1, false);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
@@ -1285,7 +1295,7 @@
 	struct talitos_edesc *edesc;
 
 	/* allocate extended descriptor */
-	edesc = aead_edesc_alloc(areq, req->giv, 0);
+	edesc = aead_edesc_alloc(areq, req->giv, 0, true);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
@@ -1441,7 +1451,7 @@
 }
 
 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
-						    areq)
+						    areq, bool encrypt)
 {
 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
@@ -1449,7 +1459,7 @@
 
 	return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
-				   areq->base.flags);
+				   areq->base.flags, encrypt);
 }
 
 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
@@ -1459,7 +1469,7 @@
 	struct talitos_edesc *edesc;
 
 	/* allocate extended descriptor */
-	edesc = ablkcipher_edesc_alloc(areq);
+	edesc = ablkcipher_edesc_alloc(areq, true);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
@@ -1476,7 +1486,7 @@
 	struct talitos_edesc *edesc;
 
 	/* allocate extended descriptor */
-	edesc = ablkcipher_edesc_alloc(areq);
+	edesc = ablkcipher_edesc_alloc(areq, false);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
@@ -1628,7 +1638,7 @@
 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
 	return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
-				   nbytes, 0, 0, 0, areq->base.flags);
+				   nbytes, 0, 0, 0, areq->base.flags, false);
 }
 
 static int ahash_init(struct ahash_request *areq)
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 13621cc..64ebede 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -36,6 +36,7 @@
 {
 	sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
 	sg1[num - 1].page_link &= ~0x02;
+	sg1[num - 1].page_link |= 0x01;
 }
 
 static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)