Commit 27a03b1a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull s390 fixes from Heiko Carstens:

 - Farewell Martin Schwidefsky: add Martin to CREDITS and remove him
   from MAINTAINERS

 - Vasily Gorbik and Christian Borntraeger join as maintainers for s390

 - Fix locking bug in ctr(aes) and ctr(des) s390 specific ciphers

 - A rather large patch which fixes gcm-aes-s390 scatter gather handling

 - Fix zcrypt wrong dispatching for control domain CPRBs

 - Fix assignment of bus resources in PCI code

 - Fix structure definition for set PCI function

 - Fix one compile error and one compile warning seen when
   CONFIG_OPTIMIZE_INLINING is enabled

* tag 's390-5.2-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  MAINTAINERS: add Vasily Gorbik and Christian Borntraeger for s390
  MAINTAINERS: Farewell Martin Schwidefsky
  s390/crypto: fix possible sleep during spinlock aquired
  s390/crypto: fix gcm-aes-s390 selftest failures
  s390/zcrypt: Fix wrong dispatching for control domain CPRBs
  s390/pci: fix assignment of bus resources
  s390/pci: fix struct definition for set PCI function
  s390: mark __cpacf_check_opcode() and cpacf_query_func() as __always_inline
  s390: add unreachable() to dump_fault_info() to fix -Wmaybe-uninitialized
parents 702c31e8 674459be
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -3364,6 +3364,14 @@ S: Braunschweiger Strasse 79
S: 31134 Hildesheim
S: Germany

N: Martin Schwidefsky
D: Martin was the most significant contributor to the initial s390
D: port of the Linux Kernel and later the maintainer of the s390
D: architecture backend for almost two decades.
D: He passed away in 2019, and will be greatly missed.
S: Germany
W: https://lwn.net/Articles/789028/

N: Marcel Selhorst
E: tpmdd@selhorst.net
D: TPM driver
+4 −2
Original line number Diff line number Diff line
@@ -3049,8 +3049,9 @@ S: Maintained
F:	arch/riscv/net/

BPF JIT for S390
M:	Martin Schwidefsky <schwidefsky@de.ibm.com>
M:	Heiko Carstens <heiko.carstens@de.ibm.com>
M:	Vasily Gorbik <gor@linux.ibm.com>
M:	Christian Borntraeger <borntraeger@de.ibm.com>
L:	netdev@vger.kernel.org
L:	bpf@vger.kernel.org
S:	Maintained
@@ -13614,8 +13615,9 @@ S: Maintained
F:	drivers/video/fbdev/savage/

S390
M:	Martin Schwidefsky <schwidefsky@de.ibm.com>
M:	Heiko Carstens <heiko.carstens@de.ibm.com>
M:	Vasily Gorbik <gor@linux.ibm.com>
M:	Christian Borntraeger <borntraeger@de.ibm.com>
L:	linux-s390@vger.kernel.org
W:	http://www.ibm.com/developerworks/linux/linux390/
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
+111 −45
Original line number Diff line number Diff line
@@ -27,14 +27,14 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/fips.h>
#include <linux/string.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>

static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
static DEFINE_MUTEX(ctrblk_lock);

static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
		    kma_functions;
@@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
	unsigned int n, nbytes;
	int ret, locked;

	locked = spin_trylock(&ctrblk_lock);
	locked = mutex_trylock(&ctrblk_lock);

	ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
@@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
		ret = blkcipher_walk_done(desc, walk, nbytes - n);
	}
	if (locked)
		spin_unlock(&ctrblk_lock);
		mutex_unlock(&ctrblk_lock);
	/*
	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
	 */
@@ -826,7 +826,7 @@ static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
	return 0;
}

static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
			   unsigned int len)
{
	memset(gw, 0, sizeof(*gw));
@@ -834,11 +834,37 @@ static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
	scatterwalk_start(&gw->walk, sg);
}

static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
{
	struct scatterlist *nextsg;

	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
	while (!gw->walk_bytes) {
		nextsg = sg_next(gw->walk.sg);
		if (!nextsg)
			return 0;
		scatterwalk_start(&gw->walk, nextsg);
		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
						   gw->walk_bytes_remain);
	}
	gw->walk_ptr = scatterwalk_map(&gw->walk);
	return gw->walk_bytes;
}

static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
					     unsigned int nbytes)
{
	gw->walk_bytes_remain -= nbytes;
	scatterwalk_unmap(&gw->walk);
	scatterwalk_advance(&gw->walk, nbytes);
	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
	gw->walk_ptr = NULL;
}

static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
	int n;

	/* minbytesneeded <= AES_BLOCK_SIZE */
	if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
		gw->ptr = gw->buf;
		gw->nbytes = gw->buf_bytes;
@@ -851,13 +877,11 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
		goto out;
	}

	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
	if (!gw->walk_bytes) {
		scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
						   gw->walk_bytes_remain);
	if (!_gcm_sg_clamp_and_map(gw)) {
		gw->ptr = NULL;
		gw->nbytes = 0;
		goto out;
	}
	gw->walk_ptr = scatterwalk_map(&gw->walk);

	if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
		gw->ptr = gw->walk_ptr;
@@ -869,51 +893,90 @@ static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
		gw->buf_bytes += n;
		gw->walk_bytes_remain -= n;
		scatterwalk_unmap(&gw->walk);
		scatterwalk_advance(&gw->walk, n);
		scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);

		_gcm_sg_unmap_and_advance(gw, n);
		if (gw->buf_bytes >= minbytesneeded) {
			gw->ptr = gw->buf;
			gw->nbytes = gw->buf_bytes;
			goto out;
		}

		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
						   gw->walk_bytes_remain);
		if (!gw->walk_bytes) {
			scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
			gw->walk_bytes = scatterwalk_clamp(&gw->walk,
							gw->walk_bytes_remain);
		if (!_gcm_sg_clamp_and_map(gw)) {
			gw->ptr = NULL;
			gw->nbytes = 0;
			goto out;
		}
		gw->walk_ptr = scatterwalk_map(&gw->walk);
	}

out:
	return gw->nbytes;
}

static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
{
	int n;
	if (gw->walk_bytes_remain == 0) {
		gw->ptr = NULL;
		gw->nbytes = 0;
		goto out;
	}

	if (!_gcm_sg_clamp_and_map(gw)) {
		gw->ptr = NULL;
		gw->nbytes = 0;
		goto out;
	}

	if (gw->walk_bytes >= minbytesneeded) {
		gw->ptr = gw->walk_ptr;
		gw->nbytes = gw->walk_bytes;
		goto out;
	}

	scatterwalk_unmap(&gw->walk);
	gw->walk_ptr = NULL;

	gw->ptr = gw->buf;
	gw->nbytes = sizeof(gw->buf);

out:
	return gw->nbytes;
}

static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
{
	if (gw->ptr == NULL)
		return;
		return 0;

	if (gw->ptr == gw->buf) {
		n = gw->buf_bytes - bytesdone;
		int n = gw->buf_bytes - bytesdone;
		if (n > 0) {
			memmove(gw->buf, gw->buf + bytesdone, n);
			gw->buf_bytes -= n;
			gw->buf_bytes = n;
		} else
			gw->buf_bytes = 0;
	} else {
		gw->walk_bytes_remain -= bytesdone;
		scatterwalk_unmap(&gw->walk);
		scatterwalk_advance(&gw->walk, bytesdone);
		scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
	} else
		_gcm_sg_unmap_and_advance(gw, bytesdone);

	return bytesdone;
}

static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
{
	int i, n;

	if (gw->ptr == NULL)
		return 0;

	if (gw->ptr == gw->buf) {
		for (i = 0; i < bytesdone; i += n) {
			if (!_gcm_sg_clamp_and_map(gw))
				return i;
			n = min(gw->walk_bytes, bytesdone - i);
			memcpy(gw->walk_ptr, gw->buf + i, n);
			_gcm_sg_unmap_and_advance(gw, n);
		}
	} else
		_gcm_sg_unmap_and_advance(gw, bytesdone);

	return bytesdone;
}

static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
@@ -926,7 +989,7 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
	unsigned int pclen = req->cryptlen;
	int ret = 0;

	unsigned int len, in_bytes, out_bytes,
	unsigned int n, len, in_bytes, out_bytes,
		     min_bytes, bytes, aad_bytes, pc_bytes;
	struct gcm_sg_walk gw_in, gw_out;
	u8 tag[GHASH_DIGEST_SIZE];
@@ -963,14 +1026,14 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
	*(u32 *)(param.j0 + ivsize) = 1;
	memcpy(param.k, ctx->key, ctx->key_len);

	gcm_sg_walk_start(&gw_in, req->src, len);
	gcm_sg_walk_start(&gw_out, req->dst, len);
	gcm_walk_start(&gw_in, req->src, len);
	gcm_walk_start(&gw_out, req->dst, len);

	do {
		min_bytes = min_t(unsigned int,
				  aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
		in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
		out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
		in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
		out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
		bytes = min(in_bytes, out_bytes);

		if (aadlen + pclen <= bytes) {
@@ -997,8 +1060,11 @@ static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
			  gw_in.ptr + aad_bytes, pc_bytes,
			  gw_in.ptr, aad_bytes);

		gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
		gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
		n = aad_bytes + pc_bytes;
		if (gcm_in_walk_done(&gw_in, n) != n)
			return -ENOMEM;
		if (gcm_out_walk_done(&gw_out, n) != n)
			return -ENOMEM;
		aadlen -= aad_bytes;
		pclen -= pc_bytes;
	} while (aadlen + pclen > 0);
+4 −3
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/fips.h>
#include <linux/mutex.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <asm/cpacf.h>
@@ -21,7 +22,7 @@
#define DES3_KEY_SIZE	(3 * DES_KEY_SIZE)

static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
static DEFINE_MUTEX(ctrblk_lock);

static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;

@@ -374,7 +375,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
	unsigned int n, nbytes;
	int ret, locked;

	locked = spin_trylock(&ctrblk_lock);
	locked = mutex_trylock(&ctrblk_lock);

	ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
	while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
@@ -391,7 +392,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
		ret = blkcipher_walk_done(desc, walk, nbytes - n);
	}
	if (locked)
		spin_unlock(&ctrblk_lock);
		mutex_unlock(&ctrblk_lock);
	/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
	if (nbytes) {
		cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
+2 −2
Original line number Diff line number Diff line
@@ -160,8 +160,8 @@ struct ap_config_info {
	unsigned char Nd;		/* max # of Domains - 1 */
	unsigned char _reserved3[10];
	unsigned int apm[8];		/* AP ID mask */
	unsigned int aqm[8];		/* AP queue mask */
	unsigned int adm[8];		/* AP domain mask */
	unsigned int aqm[8];		/* AP (usage) queue mask */
	unsigned int adm[8];		/* AP (control) domain mask */
	unsigned char _reserved4[16];
} __aligned(8);

Loading