Home Home > GIT Browse > SLE12-SP4
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Thumshirn <jthumshirn@suse.de>2019-02-20 12:57:50 +0100
committerJohannes Thumshirn <jthumshirn@suse.de>2019-02-20 12:57:50 +0100
commitc1dfd5cd9ebb83baaeaeeb686198560ea4a79459 (patch)
tree237b50aa03aaffb5d00ff17baa56d497c08b8f5e
parent58a0f30a828df6886d912ed22d595d9caebf0617 (diff)
parent0046431e7da894b49f75c1f485b32c1f7a344804 (diff)
Merge remote-tracking branch 'origin/SLE15' into SLE12-SP4SLE12-SP4
Conflicts: patches.kabi/nvme-kABI-fix-for-scan_lock.patch series.conf suse-commit: 2056933076eabef7607b53073bc8e6b47fcf2155
-rw-r--r--drivers/nvme/host/core.c8
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--include/linux/filter.h10
-rw-r--r--kernel/bpf/core.c58
4 files changed, 53 insertions, 27 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 26c0da4fcb4b..3768f26a1617 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1168,6 +1168,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
* effects say only one namespace is affected.
*/
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ mutex_lock(&ctrl->scan_lock);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
}
@@ -1196,8 +1197,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
*/
if (effects & NVME_CMD_EFFECTS_LBCC)
nvme_update_formats(ctrl);
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
nvme_unfreeze(ctrl);
+ mutex_unlock(&ctrl->scan_lock);
+ }
if (effects & NVME_CMD_EFFECTS_CCC)
nvme_init_identify(ctrl);
if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@@ -3263,6 +3266,7 @@ static void nvme_scan_work(struct work_struct *work)
if (nvme_identify_ctrl(ctrl, &id))
return;
+ mutex_lock(&ctrl->scan_lock);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -3271,6 +3275,7 @@ static void nvme_scan_work(struct work_struct *work)
}
nvme_scan_ns_sequential(ctrl, nn);
out_free_id:
+ mutex_unlock(&ctrl->scan_lock);
kfree(id);
mutex_lock(&ctrl->namespaces_mutex);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
@@ -3497,6 +3502,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->state = NVME_CTRL_NEW;
spin_lock_init(&ctrl->lock);
+ mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
mutex_init(&ctrl->namespaces_mutex);
ctrl->dev = dev;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index be7aafccc6e5..d319aad50f67 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -234,6 +234,10 @@ struct nvme_ctrl {
u16 maxcmd;
int nr_reconnects;
struct nvmf_ctrl_options *opts;
+
+#ifndef __GENKSYMS__
+ struct mutex scan_lock;
+#endif
};
#ifdef CONFIG_NVME_MULTIPATH
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 512711fcd8bb..1e0b786e32e2 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -48,14 +48,10 @@ struct bpf_prog_aux;
#define BPF_REG_X BPF_REG_7
#define BPF_REG_TMP BPF_REG_8
-/* Kernel hidden auxiliary/helper register for hardening step.
- * Only used by eBPF JITs. It's nothing more than a temporary
- * register that JITs use internally, only that here it's part
- * of eBPF instructions that have been rewritten for blinding
- * constants. See JIT pre-step in bpf_jit_blind_constants().
- */
+/* Kernel hidden auxiliary/helper register. */
#define BPF_REG_AX MAX_BPF_REG
-#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1)
+#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index fca6acd0e889..5f37dc7a7684 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -51,6 +51,7 @@
#define DST regs[insn->dst_reg]
#define SRC regs[insn->src_reg]
#define FP regs[BPF_REG_FP]
+#define AX regs[BPF_REG_AX]
#define ARG1 regs[BPF_REG_ARG1]
#define CTX regs[BPF_REG_CTX]
#define IMM insn->imm
@@ -556,6 +557,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG);
BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
+ /* Constraints on AX register:
+ *
+ * AX register is inaccessible from user space. It is mapped in
+ * all JITs, and used here for constant blinding rewrites. It is
+ * typically "stateless" meaning its contents are only valid within
+ * the executed instruction, but not across several instructions.
+ * There are a few exceptions however which are further detailed
+ * below.
+ *
+ * Constant blinding is only used by JITs, not in the interpreter.
+ * The interpreter uses AX in some occasions as a local temporary
+ * register e.g. in DIV or MOD instructions.
+ *
+ * In restricted circumstances, the verifier can also use the AX
+ * register for rewrites as long as they do not interfere with
+ * the above cases!
+ */
+ if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
+ goto out;
+
if (from->imm == 0 &&
(from->code == (BPF_ALU | BPF_MOV | BPF_K) ||
from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
@@ -771,7 +792,6 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn,
u64 *stack)
{
- u64 tmp;
static const void *jumptable[256] = {
[0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */
@@ -945,22 +965,22 @@ select_insn:
ALU64_MOD_X:
if (unlikely(SRC == 0))
return 0;
- div64_u64_rem(DST, SRC, &tmp);
- DST = tmp;
+ div64_u64_rem(DST, SRC, &AX);
+ DST = AX;
CONT;
ALU_MOD_X:
if (unlikely((u32)SRC == 0))
return 0;
- tmp = (u32) DST;
- DST = do_div(tmp, (u32) SRC);
+ AX = (u32) DST;
+ DST = do_div(AX, (u32) SRC);
CONT;
ALU64_MOD_K:
- div64_u64_rem(DST, IMM, &tmp);
- DST = tmp;
+ div64_u64_rem(DST, IMM, &AX);
+ DST = AX;
CONT;
ALU_MOD_K:
- tmp = (u32) DST;
- DST = do_div(tmp, (u32) IMM);
+ AX = (u32) DST;
+ DST = do_div(AX, (u32) IMM);
CONT;
ALU64_DIV_X:
if (unlikely(SRC == 0))
@@ -970,17 +990,17 @@ select_insn:
ALU_DIV_X:
if (unlikely((u32)SRC == 0))
return 0;
- tmp = (u32) DST;
- do_div(tmp, (u32) SRC);
- DST = (u32) tmp;
+ AX = (u32) DST;
+ do_div(AX, (u32) SRC);
+ DST = (u32) AX;
CONT;
ALU64_DIV_K:
DST = div64_u64(DST, IMM);
CONT;
ALU_DIV_K:
- tmp = (u32) DST;
- do_div(tmp, (u32) IMM);
- DST = (u32) tmp;
+ AX = (u32) DST;
+ do_div(AX, (u32) IMM);
+ DST = (u32) AX;
CONT;
ALU_END_TO_BE:
switch (IMM) {
@@ -1235,7 +1255,7 @@ load_word:
* BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
*/
- ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
+ ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &AX);
if (likely(ptr != NULL)) {
BPF_R0 = get_unaligned_be32(ptr);
CONT;
@@ -1245,7 +1265,7 @@ load_word:
LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
off = IMM;
load_half:
- ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
+ ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &AX);
if (likely(ptr != NULL)) {
BPF_R0 = get_unaligned_be16(ptr);
CONT;
@@ -1255,7 +1275,7 @@ load_half:
LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
off = IMM;
load_byte:
- ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
+ ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &AX);
if (likely(ptr != NULL)) {
BPF_R0 = *(u8 *)ptr;
CONT;
@@ -1284,7 +1304,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
{ \
u64 stack[stack_size / sizeof(u64)]; \
- u64 regs[MAX_BPF_REG]; \
+ u64 regs[MAX_BPF_EXT_REG]; \
\
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
ARG1 = (u64) (unsigned long) ctx; \