Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2018-04-07 07:22:50 +0200
committerKernel Build Daemon <kbuild@suse.de>2018-04-07 07:22:50 +0200
commitdab9e46d401d314d2bcfc2e528bec855bcb7a1f2 (patch)
tree2ba91566b214c628d2a8a1e31420be9011b1df3a
parentf3c6fe23f549dd452d4cf69cb61e9d6057033d5e (diff)
parent2cae95f0b927c94257716cfbf3ceb84386918e06 (diff)
Merge branch 'SLE12-SP3' into openSUSE-42.3rpm-4.4.126-48
-rw-r--r--arch/s390/include/asm/mmu.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h4
-rw-r--r--arch/s390/include/asm/tlbflush.h90
-rw-r--r--arch/x86/kernel/cpu/spec_ctrl.c10
-rw-r--r--drivers/hv/hv_balloon.c115
5 files changed, 105 insertions, 118 deletions
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 081b2ad99d73..0ea085731552 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -20,9 +20,13 @@ typedef struct {
unsigned int has_pgste:1;
/* The mmu context uses storage keys. */
unsigned int use_skey:1;
+#ifndef __GENKSYMS__
+ spinlock_t lock;
+#endif
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
+ .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index bed863c65928..73acbba93bd4 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ spin_lock_init(&mm->context.lock);
spin_lock_init(&mm->context.list_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
INIT_LIST_HEAD(&mm->context.gmap_list);
@@ -114,8 +115,7 @@ static inline void finish_arch_post_lock_switch(void)
cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- if (mm->context.flush_mm)
- __tlb_flush_mm(mm);
+ __tlb_flush_mm_lazy(mm);
preempt_enable();
}
set_fs(current->thread.mm_segment);
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 6cf195c548b9..492d552e7622 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -26,17 +26,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
: : "a" (2048), "a" (asce) : "cc");
}
-/*
- * Flush TLB entries for a specific ASCE on the local CPU
- */
-static inline void __tlb_flush_idte_local(unsigned long asce)
-{
- /* Local TLB flush for the mm */
- asm volatile(
- " .insn rrf,0xb98e0000,0,%0,%1,1"
- : : "a" (2048), "a" (asce) : "cc");
-}
-
#ifdef CONFIG_SMP
void smp_ptlb_all(void);
@@ -51,51 +40,26 @@ static inline void __tlb_flush_global(void)
}
/*
- * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
- * this implicates multiple ASCEs!).
+ * Flush TLB entries for a specific ASCE on all CPUs.
*/
-static inline void __tlb_flush_full(struct mm_struct *mm)
+static inline void __tlb_flush_mm(struct mm_struct * mm)
{
+ /*
+ * If the machine has IDTE we prefer to do a per mm flush
+ * on all cpus instead of doing a local flush if the mm
+ * only ran on the local cpu.
+ */
preempt_disable();
atomic_add(0x10000, &mm->context.attach_count);
- if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
- /* Local TLB flush */
- __tlb_flush_local();
+ /* Reset TLB flush mask */
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
+ barrier();
+ if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) {
+ __tlb_flush_idte(mm->context.asce);
} else {
/* Global TLB flush */
__tlb_flush_global();
- /* Reset TLB flush mask */
- if (MACHINE_HAS_TLB_LC)
- cpumask_copy(mm_cpumask(mm),
- &mm->context.cpu_attach_mask);
- }
- atomic_sub(0x10000, &mm->context.attach_count);
- preempt_enable();
-}
-
-/*
- * Flush TLB entries for a specific ASCE on all CPUs. Should never be used
- * when more than one asce (e.g. gmap) ran on this mm.
- */
-static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
-{
- int active, count;
-
- preempt_disable();
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
- cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
- __tlb_flush_idte_local(asce);
- } else {
- if (MACHINE_HAS_IDTE)
- __tlb_flush_idte(asce);
- else
- __tlb_flush_global();
- /* Reset TLB flush mask */
- if (MACHINE_HAS_TLB_LC)
- cpumask_copy(mm_cpumask(mm),
- &mm->context.cpu_attach_mask);
}
atomic_sub(0x10000, &mm->context.attach_count);
preempt_enable();
@@ -110,47 +74,35 @@ static inline void __tlb_flush_kernel(void)
}
#else
#define __tlb_flush_global() __tlb_flush_local()
-#define __tlb_flush_full(mm) __tlb_flush_local()
/*
* Flush TLB entries for a specific ASCE on all CPUs.
*/
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{
- if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local(asce);
- else
- __tlb_flush_local();
+ __tlb_flush_local();
}
static inline void __tlb_flush_kernel(void)
{
- if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local(init_mm.context.asce);
- else
- __tlb_flush_local();
+ __tlb_flush_local();
}
-#endif
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
- /*
- * If the machine has IDTE we prefer to do a per mm flush
- * on all cpus instead of doing a local flush if the mm
- * only ran on the local cpu.
- */
- if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
- __tlb_flush_asce(mm, mm->context.asce);
- else
- __tlb_flush_full(mm);
+ __tlb_flush_local();
}
+#endif
+
static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{
+ spin_lock(&mm->context.lock);
if (mm->context.flush_mm) {
- __tlb_flush_mm(mm);
mm->context.flush_mm = 0;
+ __tlb_flush_mm(mm);
}
+ spin_unlock(&mm->context.lock);
}
/*
diff --git a/arch/x86/kernel/cpu/spec_ctrl.c b/arch/x86/kernel/cpu/spec_ctrl.c
index 7f7c7208d864..3f50f45f86ee 100644
--- a/arch/x86/kernel/cpu/spec_ctrl.c
+++ b/arch/x86/kernel/cpu/spec_ctrl.c
@@ -76,16 +76,8 @@ void x86_spec_check(void)
ibpb_state = 1;
printk_once(KERN_INFO "IBPB: Initialized\n");
} else {
- switch (boot_cpu_data.x86) {
- case 0x10:
- case 0x12:
- case 0x16:
- printk_once(KERN_INFO
- "IBPB: Disabling indirect branch predictor support\n");
- msr_set_bit(MSR_F15H_IC_CFG, 14);
- break;
- }
ibpb_state = 0;
+ printk_once(KERN_INFO "IBPB: Disabling indirect branch predictor support\n");
}
}
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index c16ee188f519..4c9c29581518 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -576,11 +576,65 @@ static struct hv_dynmem_device dm_device;
static void post_status(struct hv_dynmem_device *dm);
#ifdef CONFIG_MEMORY_HOTPLUG
+static inline bool has_pfn_is_backed(struct hv_hotadd_state *has,
+ unsigned long pfn)
+{
+ struct hv_hotadd_gap *gap;
+
+ /* The page is not backed. */
+ if ((pfn < has->covered_start_pfn) || (pfn >= has->covered_end_pfn))
+ return false;
+
+ /* Check for gaps. */
+ list_for_each_entry(gap, &has->gap_list, list) {
+ if ((pfn >= gap->start_pfn) && (pfn < gap->end_pfn))
+ return false;
+ }
+
+ return true;
+}
+
+static unsigned long hv_page_offline_check(unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ unsigned long pfn = start_pfn, count = 0;
+ struct hv_hotadd_state *has;
+ bool found;
+
+ while (pfn < start_pfn + nr_pages) {
+ /*
+ * Search for HAS which covers the pfn and when we find one
+ * count how many consequitive PFNs are covered.
+ */
+ found = false;
+ list_for_each_entry(has, &dm_device.ha_region_list, list) {
+ while ((pfn >= has->start_pfn) &&
+ (pfn < has->end_pfn) &&
+ (pfn < start_pfn + nr_pages)) {
+ found = true;
+ if (has_pfn_is_backed(has, pfn))
+ count++;
+ pfn++;
+ }
+ }
+
+ /*
+ * This PFN is not in any HAS (e.g. we're offlining a region
+ * which was present at boot), no need to account for it. Go
+ * to the next one.
+ */
+ if (!found)
+ pfn++;
+ }
+
+ return count;
+}
+
static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
void *v)
{
struct memory_notify *mem = (struct memory_notify *)v;
- unsigned long flags;
+ unsigned long flags, pfn_count;
switch (val) {
case MEM_ONLINE:
@@ -593,7 +647,19 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
case MEM_OFFLINE:
spin_lock_irqsave(&dm_device.ha_lock, flags);
- dm_device.num_pages_onlined -= mem->nr_pages;
+ pfn_count = hv_page_offline_check(mem->start_pfn,
+ mem->nr_pages);
+ if (pfn_count <= dm_device.num_pages_onlined) {
+ dm_device.num_pages_onlined -= pfn_count;
+ } else {
+ /*
+ * We're offlining more pages than we managed to online.
+ * This is unexpected. In any case don't let
+ * num_pages_onlined wrap around zero.
+ */
+ WARN_ON_ONCE(1);
+ dm_device.num_pages_onlined = 0;
+ }
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
break;
case MEM_GOING_ONLINE:
@@ -612,30 +678,9 @@ static struct notifier_block hv_memory_nb = {
/* Check if the particular page is backed and can be onlined and online it. */
static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
{
- unsigned long cur_start_pgp;
- unsigned long cur_end_pgp;
- struct hv_hotadd_gap *gap;
-
- cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
- cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
-
- /* The page is not backed. */
- if (((unsigned long)pg < cur_start_pgp) ||
- ((unsigned long)pg >= cur_end_pgp))
+ if (!has_pfn_is_backed(has, page_to_pfn(pg)))
return;
- /* Check for gaps. */
- list_for_each_entry(gap, &has->gap_list, list) {
- cur_start_pgp = (unsigned long)
- pfn_to_page(gap->start_pfn);
- cur_end_pgp = (unsigned long)
- pfn_to_page(gap->end_pfn);
- if (((unsigned long)pg >= cur_start_pgp) &&
- ((unsigned long)pg < cur_end_pgp)) {
- return;
- }
- }
-
/* This frame is currently backed; online the page. */
__online_page_set_limits(pg);
__online_page_increment_counters(pg);
@@ -691,7 +736,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
(HA_CHUNK << PAGE_SHIFT));
if (ret) {
- pr_warn("hot_add memory failed error is %d\n", ret);
+ pr_err("hot_add memory failed error is %d\n", ret);
if (ret == -EEXIST) {
/*
* This error indicates that the error
@@ -723,19 +768,13 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
static void hv_online_page(struct page *pg)
{
struct hv_hotadd_state *has;
- unsigned long cur_start_pgp;
- unsigned long cur_end_pgp;
unsigned long flags;
+ unsigned long pfn = page_to_pfn(pg);
spin_lock_irqsave(&dm_device.ha_lock, flags);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
- cur_start_pgp = (unsigned long)
- pfn_to_page(has->start_pfn);
- cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
-
/* The page belongs to a different HAS. */
- if (((unsigned long)pg < cur_start_pgp) ||
- ((unsigned long)pg >= cur_end_pgp))
+ if ((pfn < has->start_pfn) || (pfn >= has->end_pfn))
continue;
hv_page_online_one(has, pg);
@@ -1011,7 +1050,7 @@ static void hot_add_req(struct work_struct *dummy)
resp.result = 0;
if (!do_hot_add || (resp.page_count == 0))
- pr_info("Memory hot add failed\n");
+ pr_err("Memory hot add failed\n");
dm->state = DM_INITIALIZED;
resp.hdr.trans_id = atomic_inc_return(&trans_id);
@@ -1038,7 +1077,7 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
break;
default:
- pr_info("Received Unknown type: %d\n", info_hdr->type);
+ pr_warn("Received Unknown type: %d\n", info_hdr->type);
}
}
@@ -1287,7 +1326,7 @@ static void balloon_up(struct work_struct *dummy)
/*
* Free up the memory we allocatted.
*/
- pr_info("Balloon response failed\n");
+ pr_err("Balloon response failed\n");
for (i = 0; i < bl_resp->range_count; i++)
free_balloon_pages(&dm_device,
@@ -1420,7 +1459,7 @@ static void cap_resp(struct hv_dynmem_device *dm,
struct dm_capabilities_resp_msg *cap_resp)
{
if (!cap_resp->is_accepted) {
- pr_info("Capabilities not accepted by host\n");
+ pr_err("Capabilities not accepted by host\n");
dm->state = DM_INIT_ERROR;
}
complete(&dm->host_event);
@@ -1507,7 +1546,7 @@ static void balloon_onchannelcallback(void *context)
break;
default:
- pr_err("Unhandled message: type: %d\n", dm_hdr->type);
+ pr_warn("Unhandled message: type: %d\n", dm_hdr->type);
}
}