aboutsummaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c118
-rw-r--r--arch/s390/crypto/des_s390.c98
-rw-r--r--arch/s390/include/asm/ccwdev.h2
-rw-r--r--arch/s390/include/asm/jump_label.h2
-rw-r--r--arch/s390/include/asm/lowcore.h11
-rw-r--r--arch/s390/include/uapi/asm/statfs.h10
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/ptrace.c9
-rw-r--r--arch/s390/kernel/smp.c4
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c3
-rw-r--r--arch/s390/mm/page-states.c10
-rw-r--r--arch/s390/net/bpf_jit_comp.c30
17 files changed, 206 insertions, 105 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 97dcbea97a1c..d8d6eeca56b0 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -116,6 +116,7 @@ config S390
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b4dbade8ca24..fd104db9cea1 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/spinlock.h>
#include "crypt_s390.h"
#define AES_KEYLEN_128 1
@@ -32,10 +33,10 @@
#define AES_KEYLEN_256 4
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
static char keylen_flag;
struct s390_aes_ctx {
- u8 iv[AES_BLOCK_SIZE];
u8 key[AES_MAX_KEY_SIZE];
long enc;
long dec;
@@ -56,8 +57,7 @@ struct pcc_param {
struct s390_xts_ctx {
u8 key[32];
- u8 xts_param[16];
- struct pcc_param pcc;
+ u8 pcc_key[32];
long enc;
long dec;
int key_len;
@@ -441,30 +441,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len);
}
-static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
+static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
struct blkcipher_walk *walk)
{
+ struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[AES_BLOCK_SIZE];
+ u8 key[AES_MAX_KEY_SIZE];
+ } param;
if (!nbytes)
goto out;
- memcpy(param, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
+ memcpy(param.key, sctx->key, sctx->key_len);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, param, out, in, n);
+ ret = crypt_s390_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, param, AES_BLOCK_SIZE);
+ memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
out:
return ret;
@@ -481,7 +487,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
return fallback_blk_enc(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk);
+ return cbc_aes_crypt(desc, sctx->enc, &walk);
}
static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -495,7 +501,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
return fallback_blk_dec(desc, dst, src, nbytes);
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk);
+ return cbc_aes_crypt(desc, sctx->dec, &walk);
}
static struct crypto_alg cbc_aes_alg = {
@@ -586,7 +592,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_128_ENCRYPT;
xts_ctx->dec = KM_XTS_128_DECRYPT;
memcpy(xts_ctx->key + 16, in_key, 16);
- memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16);
+ memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
break;
case 48:
xts_ctx->enc = 0;
@@ -597,7 +603,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
xts_ctx->enc = KM_XTS_256_ENCRYPT;
xts_ctx->dec = KM_XTS_256_DECRYPT;
memcpy(xts_ctx->key, in_key, 32);
- memcpy(xts_ctx->pcc.key, in_key + 32, 32);
+ memcpy(xts_ctx->pcc_key, in_key + 32, 32);
break;
default:
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -616,29 +622,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
unsigned int nbytes = walk->nbytes;
unsigned int n;
u8 *in, *out;
- void *param;
+ struct pcc_param pcc_param;
+ struct {
+ u8 key[32];
+ u8 init[16];
+ } xts_param;
if (!nbytes)
goto out;
- memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block));
- memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit));
- memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts));
- memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
- param = xts_ctx->pcc.key + offset;
- ret = crypt_s390_pcc(func, param);
+ memset(pcc_param.block, 0, sizeof(pcc_param.block));
+ memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
+ memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
+ memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
+ memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
+ ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
if (ret < 0)
return -EIO;
- memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
- param = xts_ctx->key + offset;
+ memcpy(xts_param.key, xts_ctx->key, 32);
+ memcpy(xts_param.init, pcc_param.xts, 16);
do {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
- ret = crypt_s390_km(func, param, out, in, n);
+ ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
if (ret < 0 || ret != n)
return -EIO;
@@ -748,43 +758,70 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len);
}
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* only use complete blocks, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+ for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[AES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
if (!walk->nbytes)
return ret;
- memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= AES_BLOCK_SIZE) {
- /* only use complete blocks, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(AES_BLOCK_SIZE - 1);
- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
- }
- ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
- if (ret < 0 || ret != n)
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = AES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, sctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
return -EIO;
+ }
if (n > AES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrptr, AES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ }
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
@@ -792,14 +829,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
- AES_BLOCK_SIZE, ctrblk);
+ AES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
+
return ret;
}
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index bcca01c9989d..f2d6cccddcf8 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -25,6 +25,7 @@
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
}
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
- u8 *iv, struct blkcipher_walk *walk)
+ struct blkcipher_walk *walk)
{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[DES_BLOCK_SIZE];
+ u8 key[DES3_KEY_SIZE];
+ } param;
if (!nbytes)
goto out;
- memcpy(iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.key, ctx->key, DES3_KEY_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, iv, out, in, n);
+ ret = crypt_s390_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, iv, DES_BLOCK_SIZE);
+ memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
out:
return ret;
@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
}
static struct crypto_alg cbc_des_alg = {
@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
}
static struct crypto_alg cbc_des3_alg = {
@@ -366,54 +369,83 @@ static struct crypto_alg cbc_des3_alg = {
}
};
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* align to block size, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
+ for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
- struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
+ struct s390_des_ctx *ctx,
+ struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[DES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
+
+ if (!walk->nbytes)
+ return ret;
- memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= DES_BLOCK_SIZE) {
- /* align to block size, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(DES_BLOCK_SIZE - 1);
- for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
- DES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
- }
- ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
- if (ret < 0 || ret != n)
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = DES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, ctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
return -EIO;
+ }
if (n > DES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr, DES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
-
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ } else {
+ if (!nbytes)
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ }
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
- DES_BLOCK_SIZE, ctrblk);
+ DES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
return ret;
}
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index f201af8be580..31b5ca8f8c3d 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -219,7 +219,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
-extern struct ccw_device *ccw_device_probe_console(void);
+extern struct ccw_device *ccw_device_probe_console(struct ccw_driver *);
extern void ccw_device_wait_idle(struct ccw_device *);
extern int ccw_device_force_console(struct ccw_device *);
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 6c32190dc73e..346b1c85ffb4 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -15,7 +15,7 @@
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm goto("0: brcl 0,0\n"
+ asm_volatile_goto("0: brcl 0,0\n"
".pushsection __jump_table, \"aw\"\n"
ASM_ALIGN "\n"
ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index bbf8141408cd..2bed4f02a558 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -142,9 +142,9 @@ struct _lowcore {
__u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
/* Interrupt response block */
- __u8 irb[64]; /* 0x0300 */
+ __u8 irb[96]; /* 0x0300 */
- __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
+ __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */
/*
* 0xe00 contains the address of the IPL Parameter Information
@@ -288,12 +288,13 @@ struct _lowcore {
__u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
/* Interrupt response block. */
- __u8 irb[64]; /* 0x0400 */
+ __u8 irb[96]; /* 0x0400 */
+ __u8 pad_0x0460[0x0480-0x0460]; /* 0x0460 */
/* Per cpu primary space access list */
- __u32 paste[16]; /* 0x0440 */
+ __u32 paste[16]; /* 0x0480 */
- __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */
+ __u8 pad_0x04c0[0x0e00-0x04c0]; /* 0x04c0 */
/*
* 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h
index a61d538756f2..471eb09184d4 100644
--- a/arch/s390/include/uapi/asm/statfs.h
+++ b/arch/s390/include/uapi/asm/statfs.h
@@ -35,11 +35,11 @@ struct statfs {
struct statfs64 {
unsigned int f_type;
unsigned int f_bsize;
- unsigned long f_blocks;
- unsigned long f_bfree;
- unsigned long f_bavail;
- unsigned long f_files;
- unsigned long f_ffree;
+ unsigned long long f_blocks;
+ unsigned long long f_bfree;
+ unsigned long long f_bavail;
+ unsigned long long f_files;
+ unsigned long long f_ffree;
__kernel_fsid_t f_fsid;
unsigned int f_namelen;
unsigned int f_frsize;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 4d5e6f8a7978..32bb7bf29397 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -265,6 +265,7 @@ sysc_sigpending:
tm __TI_flags+3(%r12),_TIF_SYSCALL
jno sysc_return
lm %r2,%r7,__PT_R2(%r11) # load svc arguments
+ l %r10,__TI_sysc_table(%r12) # 31 bit system call table
xr %r8,%r8 # svc 0 returns -ENOSYS
clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
jnl sysc_nr_ok # invalid svc number -> do svc 0
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 4c17eece707e..2e3befddecee 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -293,6 +293,7 @@ sysc_sigpending:
tm __TI_flags+7(%r12),_TIF_SYSCALL
jno sysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
+ lg %r10,__TI_sysc_table(%r12) # address of system call table
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e25ae2579c..d7c00507568a 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -59,7 +59,7 @@ ENTRY(startup_continue)
.quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
- .quad 0 # cr15: linkage stack operations
+ .quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -67,12 +67,15 @@ ENTRY(startup_continue)
.Lparmaddr:
.quad PARMAREA
.align 64
-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
+.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
+.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
+.Llinkage_stack:
+ .long 0,0,0x89000000,0,0,0,0x8a000000,0
ENTRY(_ehead)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index a314c57f4e94..9677d935583c 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -314,7 +314,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* psw and gprs are stored on the stack
*/
if (addr == (addr_t) &dummy->regs.psw.mask &&
- ((data & ~PSW_MASK_USER) != psw_user_bits ||
+ (((data^psw_user_bits) & ~PSW_MASK_USER) ||
+ (((data^psw_user_bits) & PSW_MASK_ASC) &&
+ ((data|psw_user_bits) & PSW_MASK_ASC) == PSW_MASK_ASC) ||
((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
/* Invalid psw mask. */
return -EINVAL;
@@ -627,7 +629,10 @@ static int __poke_user_compat(struct task_struct *child,
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
/* Build a 64 bit psw mask from 31 bit mask. */
- if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
+ if (((tmp^psw32_user_bits) & ~PSW32_MASK_USER) ||
+ (((tmp^psw32_user_bits) & PSW32_MASK_ASC) &&
+ ((tmp|psw32_user_bits) & PSW32_MASK_ASC)
+ == PSW32_MASK_ASC))
/* Invalid psw mask. */
return -EINVAL;
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 4f977d0d25c2..14647fe09d0c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -933,7 +933,7 @@ static ssize_t show_idle_count(struct device *dev,
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return sprintf(buf, "%llu\n", idle_count);
}
static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -951,7 +951,7 @@ static ssize_t show_idle_time(struct device *dev,
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 3fb09359eda6..737d50caa4fe 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -190,7 +190,7 @@ cputime64_t s390_get_idle_time(int cpu)
sequence = ACCESS_ONCE(idle->sequence);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
- } while ((sequence & 1) || (idle->sequence != sequence));
+ } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence));
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 1c01a9912989..6acb24d606fd 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -130,7 +130,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
- int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
+ int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
trace_kvm_s390_handle_diag(vcpu, code);
switch (code) {
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 50ea137a2d3c..1bf40ef57cf6 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
* contains the (negative) exception code.
*/
#ifdef CONFIG_64BIT
+
static unsigned long follow_table(struct mm_struct *mm,
unsigned long address, int write)
{
unsigned long *table = (unsigned long *)__pa(mm->pgd);
+ if (unlikely(address > mm->context.asce_limit - 1))
+ return -0x38UL;
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
table = table + ((address >> 53) & 0x7ff);
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e9dfb0..27c50f4d90cb 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -12,6 +12,8 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/init.h>
+#include <asm/setup.h>
+#include <asm/ipl.h>
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
@@ -41,6 +43,14 @@ void __init cmma_init(void)
if (!cmma_flag)
return;
+ /*
+ * Disable CMM for dump, otherwise the tprot based memory
+ * detection can fail because of unstable pages.
+ */
+ if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ cmma_flag = 0;
+ return;
+ }
asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
"0: la %0,0\n"
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 82f165f8078c..3bc6b7e43b24 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -243,7 +243,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
case BPF_S_LD_W_IND:
case BPF_S_LD_H_IND:
case BPF_S_LD_B_IND:
- case BPF_S_LDX_B_MSH:
case BPF_S_LD_IMM:
case BPF_S_LD_MEM:
case BPF_S_MISC_TXA:
@@ -335,14 +334,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* dr %r4,%r12 */
- EMIT2(0x1d4c);
+ /* dlr %r4,%r12 */
+ EMIT4(0xb997004c);
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
- /* m %r4,<d(K)>(%r13) */
- EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
- /* lr %r5,%r4 */
- EMIT2(0x1854);
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+ /* dl %r4,<d(K)>(%r13) */
+ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
break;
case BPF_S_ALU_MOD_X: /* A %= X */
jit->seen |= SEEN_XREG | SEEN_RET0;
@@ -352,16 +353,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* dr %r4,%r12 */
- EMIT2(0x1d4c);
+ /* dlr %r4,%r12 */
+ EMIT4(0xb997004c);
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
case BPF_S_ALU_MOD_K: /* A %= K */
+ if (K == 1) {
+ /* lhi %r5,0 */
+ EMIT4(0xa7580000);
+ break;
+ }
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* d %r4,<d(K)>(%r13) */
- EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
+ /* dl %r4,<d(K)>(%r13) */
+ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
/* lr %r5,%r4 */
EMIT2(0x1854);
break;