Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2010-10-25 12:53:38 +0200
committerJan Beulich <jbeulich@novell.com>2010-10-25 12:53:38 +0200
commit6145dfaac7d86fcdd3052072b3ba9597b088ca9d (patch)
treedfe3dff48dd8b6430364b108bcf0190a3a7d4596
parentc4f0e0061c405b9737a9feea0df56ea5ee3bc8db (diff)
- Update Xen patches to 2.6.36 and c/s 1043.
- xen: netback: take net_schedule_list_lock when removing entry from net_schedule_list. - Update Xen config files. suse-commit: b772cd8b959ee9b73e64aa8c27efb8ddc037d8ab
-rw-r--r--arch/x86/include/mach-xen/asm/smp.h16
-rw-r--r--arch/x86/kernel/apic/apic-xen.c7
-rw-r--r--arch/x86/kernel/apic/io_apic-xen.c4
-rw-r--r--arch/x86/kernel/cpu/intel.c6
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/head-xen.c20
-rw-r--r--arch/x86/kernel/head32-xen.c16
-rw-r--r--arch/x86/kernel/mpparse-xen.c10
-rw-r--r--drivers/acpi/processor_core.c10
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/ioat/Makefile3
-rw-r--r--drivers/dma/ioat/dca.c12
-rw-r--r--drivers/dma/ioat/dma.h17
-rw-r--r--drivers/dma/ioat/dma_v2.h6
-rw-r--r--drivers/dma/ioat/hw.h4
-rw-r--r--drivers/dma/ioat/pci.c7
-rw-r--r--drivers/xen/core/evtchn.c2
-rw-r--r--drivers/xen/core/smpboot.c13
-rw-r--r--drivers/xen/netback/common.h4
-rw-r--r--drivers/xen/netback/netback.c54
-rw-r--r--drivers/xen/xenbus/xenbus_client.c41
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c11
-rw-r--r--fs/btrfs/extent_io.c9
-rw-r--r--fs/mpage.c8
-rw-r--r--mm/Kconfig28
-rw-r--r--mm/Makefile3
26 files changed, 199 insertions, 116 deletions
diff --git a/arch/x86/include/mach-xen/asm/smp.h b/arch/x86/include/mach-xen/asm/smp.h
index 650685c66d5a..2cd16b2e4843 100644
--- a/arch/x86/include/mach-xen/asm/smp.h
+++ b/arch/x86/include/mach-xen/asm/smp.h
@@ -20,7 +20,11 @@
extern unsigned int num_processors;
+#ifndef CONFIG_XEN
+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
+#endif
DECLARE_PER_CPU(int, cpu_number);
static inline const struct cpumask *cpu_sibling_mask(int cpu)
@@ -33,8 +37,10 @@ static inline const struct cpumask *cpu_core_mask(int cpu)
return cpumask_of(cpu);
}
-DECLARE_PER_CPU(u16, x86_cpu_to_apicid);
-DECLARE_PER_CPU(u16, x86_bios_cpu_apicid);
+#ifndef CONFIG_XEN
+DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
+DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+#endif
#ifdef CONFIG_SMP
@@ -135,6 +141,9 @@ void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
+void smp_store_cpu_info(int id);
+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
+
#else /* CONFIG_XEN */
extern int __cpu_disable(void);
@@ -153,9 +162,6 @@ void play_dead(void);
#endif /* CONFIG_XEN */
-void smp_store_cpu_info(int id);
-#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
-
/* We don't mark CPUs online until __cpu_up(), so we need another measure */
static inline int num_booting_cpus(void)
{
diff --git a/arch/x86/kernel/apic/apic-xen.c b/arch/x86/kernel/apic/apic-xen.c
index b17b3e251ddb..95a20e09e06b 100644
--- a/arch/x86/kernel/apic/apic-xen.c
+++ b/arch/x86/kernel/apic/apic-xen.c
@@ -4,7 +4,6 @@
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
#include <asm/smp.h>
#include <asm/proto.h>
@@ -13,12 +12,6 @@
unsigned int num_processors;
/*
- * Map cpu index to physical APIC ID
- */
-DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
-EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
-
-/*
* Debug level, exported for io_apic.c
*/
unsigned int apic_verbosity;
diff --git a/arch/x86/kernel/apic/io_apic-xen.c b/arch/x86/kernel/apic/io_apic-xen.c
index 1ca2117e519f..6545480843a4 100644
--- a/arch/x86/kernel/apic/io_apic-xen.c
+++ b/arch/x86/kernel/apic/io_apic-xen.c
@@ -4136,10 +4136,9 @@ u8 __init io_apic_unique_id(u8 id)
#endif
}
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86_32) && !defined(CONFIG_XEN)
int __init io_apic_get_unique_id(int ioapic, int apic_id)
{
-#ifndef CONFIG_XEN
union IO_APIC_reg_00 reg_00;
static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
physid_mask_t tmp;
@@ -4208,7 +4207,6 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
apic_printk(APIC_VERBOSE, KERN_INFO
"IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
-#endif /* !CONFIG_XEN */
return apic_id;
}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 68381225e67e..f3d654c5ad6d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -288,6 +288,7 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
}
#endif
+#ifndef CONFIG_XEN
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
{
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
@@ -308,7 +309,6 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
#endif
}
-#ifndef CONFIG_XEN
/*
* find out the number of processor cores on the die
*/
@@ -326,7 +326,6 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
else
return 1;
}
-#endif
static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
{
@@ -365,6 +364,7 @@ static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_VPID);
}
}
+#endif
static void __cpuinit init_intel(struct cpuinfo_x86 *c)
{
@@ -461,13 +461,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
detect_ht(c);
#endif
}
-#endif
/* Work around errata */
srat_detect_node(c);
if (cpu_has(c, X86_FEATURE_VMX))
detect_vmx_virtcap(c);
+#endif
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d49079515122..aaae0e468112 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -40,10 +40,12 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 },
{ X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
+#ifndef CONFIG_XEN
{ X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
{ X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
{ X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
{ X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
+#endif
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/head-xen.c b/arch/x86/kernel/head-xen.c
index 15ca07385137..5d011dc1998e 100644
--- a/arch/x86/kernel/head-xen.c
+++ b/arch/x86/kernel/head-xen.c
@@ -85,11 +85,6 @@ void __init xen_start_kernel(void)
unsigned int i;
struct xen_machphys_mapping mapping;
unsigned long machine_to_phys_nr_ents;
-#ifdef CONFIG_X86_32
- struct xen_platform_parameters pp;
- extern pte_t swapper_pg_fixmap[PTRS_PER_PTE];
- unsigned long addr;
-#endif
xen_setup_features();
@@ -114,17 +109,9 @@ void __init xen_start_kernel(void)
"Xen provided");
#ifdef CONFIG_X86_32
- WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
- VMASST_TYPE_4gb_segments));
-
- init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;
-
- if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
- hypervisor_virt_start = pp.virt_start;
- reserve_top_address(0UL - pp.virt_start);
- }
-
- BUG_ON(pte_index(hypervisor_virt_start));
+{
+ extern pte_t swapper_pg_fixmap[PTRS_PER_PTE];
+ unsigned long addr;
/* Do an early initialization of the fixmap area */
make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables);
@@ -133,6 +120,7 @@ void __init xen_start_kernel(void)
addr),
addr),
__pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE));
+}
#else
x86_configure_nx();
xen_init_pt();
diff --git a/arch/x86/kernel/head32-xen.c b/arch/x86/kernel/head32-xen.c
index 7fd485a5359c..2db8b4cc0970 100644
--- a/arch/x86/kernel/head32-xen.c
+++ b/arch/x86/kernel/head32-xen.c
@@ -33,6 +33,22 @@ static void __init i386_default_early_setup(void)
void __init i386_start_kernel(void)
{
+#ifdef CONFIG_XEN
+ struct xen_platform_parameters pp;
+
+ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_4gb_segments));
+
+ init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;
+
+ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
+ hypervisor_virt_start = pp.virt_start;
+ reserve_top_address(0UL - pp.virt_start);
+ }
+
+ BUG_ON(pte_index(hypervisor_virt_start));
+#endif
+
#ifdef CONFIG_X86_TRAMPOLINE
/*
* But first pinch a few for the stack/trampoline stuff
diff --git a/arch/x86/kernel/mpparse-xen.c b/arch/x86/kernel/mpparse-xen.c
index 2db2a6f6a910..9f6cf38b55fa 100644
--- a/arch/x86/kernel/mpparse-xen.c
+++ b/arch/x86/kernel/mpparse-xen.c
@@ -623,6 +623,11 @@ void __init default_get_smp_config(unsigned int early)
if (!mpf)
return;
+#ifdef CONFIG_XEN
+ BUG_ON(early);
+#define early 0
+#endif
+
if (acpi_lapic && early)
return;
@@ -648,15 +653,15 @@ void __init default_get_smp_config(unsigned int early)
* Now see if we need to read further.
*/
if (mpf->feature1 != 0) {
- if (early) {
#ifndef CONFIG_XEN
+ if (early) {
/*
* local APIC has default address
*/
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-#endif
return;
}
+#endif
printk(KERN_INFO "Default MP configuration #%d\n",
mpf->feature1);
@@ -673,6 +678,7 @@ void __init default_get_smp_config(unsigned int early)
/*
* Only use the first configuration found.
*/
+#undef early
}
#ifndef CONFIG_XEN
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 6353a400c9f4..d74786fb0419 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -190,10 +190,20 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
if (apic_id == -1 || i)
return apic_id;
+#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
for_each_possible_cpu(i) {
if (cpu_physical_id(i) == apic_id)
return i;
}
+#else
+ /*
+ * Use of cpu_physical_id() is bogus here. Rather than defining a
+ * stub enforcing a 1:1 mapping, we keep it undefined to catch bad
+ * uses. Return as if there was a 1:1 mapping.
+ */
+ if (apic_id < nr_cpu_ids && cpu_possible(apic_id))
+ return apic_id;
+#endif
return -1;
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9520cf02edc8..ca70c22f8689 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -53,7 +53,7 @@ config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86
select DMA_ENGINE
- select DCA
+ select DCA if !XEN
select ASYNC_TX_DISABLE_CHANNEL_SWITCH
select ASYNC_TX_DISABLE_PQ_VAL_DMA
select ASYNC_TX_DISABLE_XOR_VAL_DMA
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile
index 8997d3fb9051..da5802523aac 100644
--- a/drivers/dma/ioat/Makefile
+++ b/drivers/dma/ioat/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
-ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o
+dca-$(CONFIG_DCA) := dca.o
+ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o $(dca-y) $(dca-m)
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index abd9038e06b1..fb188d19e6bc 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -682,3 +682,15 @@ ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
return dca;
}
+
+void ioat_remove_dca_provider(struct pci_dev *pdev)
+{
+ struct ioatdma_device *device = pci_get_drvdata(pdev);
+
+ if (!device->dca)
+ return;
+
+ unregister_dca_provider(device->dca, &pdev->dev);
+ free_dca_provider(device->dca);
+ device->dca = NULL;
+}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5216c8a92a21..6a8f2234d86c 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -347,4 +347,21 @@ void ioat_kobject_del(struct ioatdma_device *device);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
+
+#ifndef CONFIG_XEN
+void ioat_remove_dca_provider(struct pci_dev *);
+#else
+static inline void ioat_remove_dca_provider(struct pci_dev *pdev)
+{
+ struct ioatdma_device *device = pci_get_drvdata(pdev);
+ BUG_ON(device->dca);
+}
+static inline struct dca_provider *__devinit
+__ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+{
+ return NULL;
+}
+#define ioat_dca_init __ioat_dca_init
+#endif
+
#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index a2c413b2b8d8..1a61a4235604 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -176,4 +176,10 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
extern struct kobj_type ioat2_ktype;
extern struct kmem_cache *ioat2_cache;
+
+#ifdef CONFIG_XEN
+#define ioat2_dca_init __ioat_dca_init
+#define ioat3_dca_init __ioat_dca_init
+#endif
+
#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 60e675455b6a..3d0dca6cb0ce 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -39,7 +39,11 @@
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
#define IOAT_VER_3_2 0x32 /* Version 3.2 */
+#ifndef CONFIG_XEN
int system_has_dca_enabled(struct pci_dev *pdev);
+#else
+static inline int system_has_dca_enabled(struct pci_dev *pdev) { return 0; }
+#endif
struct ioat_dma_descriptor {
uint32_t size;
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index fab37d1cf48d..a74f3815791b 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/dca.h>
#include <linux/slab.h>
#include "dma.h"
#include "dma_v2.h"
@@ -170,11 +169,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev)
return;
dev_err(&pdev->dev, "Removing dma and dca services\n");
- if (device->dca) {
- unregister_dca_provider(device->dca, &pdev->dev);
- free_dca_provider(device->dca);
- device->dca = NULL;
- }
+ ioat_remove_dca_provider(pdev);
ioat_dma_remove(device);
}
diff --git a/drivers/xen/core/evtchn.c b/drivers/xen/core/evtchn.c
index 670e5a762e6c..40ea13318739 100644
--- a/drivers/xen/core/evtchn.c
+++ b/drivers/xen/core/evtchn.c
@@ -370,6 +370,7 @@ asmlinkage void __irq_entry evtchn_do_upcall(struct pt_regs *regs)
barrier();
#endif
+#ifndef CONFIG_NO_HZ
/*
* Handle timer interrupts before all others, so that all
* hardirq handlers see an up-to-date system time even if we
@@ -395,6 +396,7 @@ asmlinkage void __irq_entry evtchn_do_upcall(struct pt_regs *regs)
BUG();
}
}
+#endif /* CONFIG_NO_HZ */
l1 = vcpu_info_xchg(evtchn_pending_sel, 0);
diff --git a/drivers/xen/core/smpboot.c b/drivers/xen/core/smpboot.c
index 7fc8cf41de62..7244d6097f85 100644
--- a/drivers/xen/core/smpboot.c
+++ b/drivers/xen/core/smpboot.c
@@ -40,12 +40,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
static int __read_mostly ipi_irq = -1;
-#ifdef CONFIG_X86_LOCAL_APIC
-#define set_cpu_to_apicid(cpu, apicid) (per_cpu(x86_cpu_to_apicid, cpu) = (apicid))
-#else
-#define set_cpu_to_apicid(cpu, apicid)
-#endif
-
void __init prefill_possible_map(void)
{
int i, rc;
@@ -227,9 +221,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, 0, &cpu_id) == 0)
apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
cpu_data(0) = boot_cpu_data;
-
- set_cpu_to_apicid(0, apicid);
-
current_thread_info()->cpu = 0;
if (xen_smp_intr_init(0))
@@ -241,7 +232,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* Restrict the possible_map according to max_cpus. */
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
- for (cpu = nr_cpu_ids-1; !cpumask_test_cpu(cpu, cpu_possible_mask); cpu--)
+ for (cpu = nr_cpu_ids-1; !cpu_possible(cpu); cpu--)
continue;
set_cpu_possible(cpu, false);
}
@@ -263,8 +254,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
cpu_data(cpu) = boot_cpu_data;
cpu_data(cpu).cpu_index = cpu;
- set_cpu_to_apicid(cpu, apicid);
-
#ifdef __x86_64__
clear_tsk_thread_flag(idle, TIF_FORK);
per_cpu(kernel_stack, cpu) =
diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
index 4c76f12a161d..2f06d658aaf5 100644
--- a/drivers/xen/netback/common.h
+++ b/drivers/xen/netback/common.h
@@ -259,9 +259,9 @@ struct xen_netbk {
pending_ring_idx_t dealloc_cons;
struct list_head pending_inuse_head;
- struct list_head net_schedule_list;
+ struct list_head schedule_list;
- spinlock_t net_schedule_list_lock;
+ spinlock_t schedule_list_lock;
spinlock_t release_lock;
struct page **mmap_pages;
diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
index 353db714d13c..a3abe0ed6c45 100644
--- a/drivers/xen/netback/netback.c
+++ b/drivers/xen/netback/netback.c
@@ -177,7 +177,7 @@ static inline void maybe_schedule_tx_action(unsigned int group)
smp_mb();
if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
- !list_empty(&netbk->net_schedule_list)) {
+ !list_empty(&netbk->schedule_list)) {
if (use_kthreads)
wake_up(&netbk->netbk_action_wq);
else
@@ -817,17 +817,28 @@ static int __on_net_schedule_list(netif_t *netif)
return netif->list.next != NULL;
}
+/* Must be called with netbk->schedule_list_lock held. */
static void remove_from_net_schedule_list(netif_t *netif)
{
- struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)];
-
- spin_lock_irq(&netbk->net_schedule_list_lock);
if (likely(__on_net_schedule_list(netif))) {
list_del(&netif->list);
netif->list.next = NULL;
netif_put(netif);
}
- spin_unlock_irq(&netbk->net_schedule_list_lock);
+}
+
+static netif_t *poll_net_schedule_list(struct xen_netbk *netbk)
+{
+ netif_t *netif = NULL;
+
+ spin_lock_irq(&netbk->schedule_list_lock);
+ if (!list_empty(&netbk->schedule_list)) {
+ netif = list_first_entry(&netbk->schedule_list, netif_t, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+ }
+ spin_unlock_irq(&netbk->schedule_list_lock);
+ return netif;
}
static void add_to_net_schedule_list_tail(netif_t *netif)
@@ -838,13 +849,13 @@ static void add_to_net_schedule_list_tail(netif_t *netif)
if (__on_net_schedule_list(netif))
return;
- spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
+ spin_lock_irqsave(&netbk->schedule_list_lock, flags);
if (!__on_net_schedule_list(netif) &&
likely(netif_schedulable(netif))) {
- list_add_tail(&netif->list, &netbk->net_schedule_list);
+ list_add_tail(&netif->list, &netbk->schedule_list);
netif_get(netif);
}
- spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
+ spin_unlock_irqrestore(&netbk->schedule_list_lock, flags);
}
/*
@@ -873,7 +884,11 @@ void netif_schedule_work(netif_t *netif)
void netif_deschedule_work(netif_t *netif)
{
+ struct xen_netbk *netbk = &xen_netbk[GET_GROUP_INDEX(netif)];
+
+ spin_lock_irq(&netbk->schedule_list_lock);
remove_from_net_schedule_list(netif);
+ spin_unlock_irq(&netbk->schedule_list_lock);
}
@@ -1298,12 +1313,11 @@ static void net_tx_action(unsigned long group)
mop = netbk->tx_map_ops;
BUILD_BUG_ON(MAX_SKB_FRAGS >= MAX_PENDING_REQS);
while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
- !list_empty(&netbk->net_schedule_list)) {
+ !list_empty(&netbk->schedule_list)) {
/* Get a netif from the list with work to do. */
- netif = list_first_entry(&netbk->net_schedule_list,
- netif_t, list);
- netif_get(netif);
- remove_from_net_schedule_list(netif);
+ netif = poll_net_schedule_list(netbk);
+ if (!netif)
+ continue;
RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
if (!work_to_do) {
@@ -1653,7 +1667,6 @@ static netif_rx_response_t *make_rx_response(netif_t *netif,
#ifdef NETBE_DEBUG_INTERRUPT
static irqreturn_t netif_be_dbg(int irq, void *dev_id)
{
- struct list_head *ent;
netif_t *netif;
unsigned int i = 0, group;
@@ -1662,10 +1675,9 @@ static irqreturn_t netif_be_dbg(int irq, void *dev_id)
for (group = 0; group < netbk_nr_groups; ++group) {
struct xen_netbk *netbk = &xen_netbk[group];
- spin_lock_irq(&netbk->net_schedule_list_lock);
+ spin_lock_irq(&netbk->schedule_list_lock);
- list_for_each(ent, &netbk->net_schedule_list) {
- netif = list_entry(ent, netif_t, list);
+ list_for_each_entry(netif, &netbk->schedule_list, list) {
pr_alert(" %d: private(rx_req_cons=%08x "
"rx_resp_prod=%08x\n", i,
netif->rx.req_cons, netif->rx.rsp_prod_pvt);
@@ -1684,7 +1696,7 @@ static irqreturn_t netif_be_dbg(int irq, void *dev_id)
i++;
}
- spin_unlock_irq(&netbk->netbk->net_schedule_list_lock);
+ spin_unlock_irq(&netbk->netbk->schedule_list_lock);
}
pr_alert(" ** End of netif_schedule_list **\n");
@@ -1710,7 +1722,7 @@ static inline int tx_work_todo(struct xen_netbk *netbk)
return 1;
if (nr_pending_reqs(netbk) + MAX_SKB_FRAGS < MAX_PENDING_REQS &&
- !list_empty(&netbk->net_schedule_list))
+ !list_empty(&netbk->schedule_list))
return 1;
return 0;
@@ -1788,9 +1800,9 @@ static int __init netback_init(void)
netbk->pending_prod = MAX_PENDING_REQS;
INIT_LIST_HEAD(&netbk->pending_inuse_head);
- INIT_LIST_HEAD(&netbk->net_schedule_list);
+ INIT_LIST_HEAD(&netbk->schedule_list);
- spin_lock_init(&netbk->net_schedule_list_lock);
+ spin_lock_init(&netbk->schedule_list_lock);
spin_lock_init(&netbk->release_lock);
netbk->mmap_pages =
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 7beec151c5a8..5cc3a25d8275 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -258,41 +258,22 @@ static char *error_path(struct xenbus_device *dev)
static void _dev_error(struct xenbus_device *dev, int err,
- const char *fmt, va_list ap)
+ const char *fmt, va_list *ap)
{
- int ret;
- unsigned int len;
- char *printf_buffer = NULL, *path_buffer = NULL;
+ char *printf_buffer, *path_buffer;
+ struct va_format vaf = { .fmt = fmt, .va = ap };
-#define PRINTF_BUFFER_SIZE 4096
- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
- if (printf_buffer == NULL)
- goto fail;
-
- len = sprintf(printf_buffer, "%i ", -err);
- ret = vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
-
- BUG_ON(len + ret > PRINTF_BUFFER_SIZE-1);
-
- dev_err(&dev->dev, "%s\n", printf_buffer);
+ printf_buffer = kasprintf(GFP_KERNEL, "%i %pV", -err, &vaf);
+ if (printf_buffer)
+ dev_err(&dev->dev, "%s\n", printf_buffer);
path_buffer = error_path(dev);
-
- if (path_buffer == NULL) {
- dev_err(&dev->dev,
- "xenbus: failed to write error node for %s (%s)\n",
- dev->nodename, printf_buffer);
- goto fail;
- }
-
- if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
+ if (!printf_buffer || !path_buffer
+ || xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer))
dev_err(&dev->dev,
"xenbus: failed to write error node for %s (%s)\n",
dev->nodename, printf_buffer);
- goto fail;
- }
-fail:
kfree(printf_buffer);
kfree(path_buffer);
}
@@ -312,7 +293,7 @@ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
- _dev_error(dev, err, fmt, ap);
+ _dev_error(dev, err, fmt, &ap);
va_end(ap);
}
EXPORT_SYMBOL_GPL(xenbus_dev_error);
@@ -333,7 +314,7 @@ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
- _dev_error(dev, err, fmt, ap);
+ _dev_error(dev, err, fmt, &ap);
va_end(ap);
xenbus_switch_state(dev, XenbusStateClosing);
@@ -350,7 +331,7 @@ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
va_list ap;
va_start(ap, fmt);
- _dev_error(dev, err, fmt, ap);
+ _dev_error(dev, err, fmt, &ap);
va_end(ap);
if (!depth)
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index c73d2e665338..0aaa54a65837 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -541,18 +541,15 @@ int xenbus_printf(struct xenbus_transaction t,
{
va_list ap;
int ret;
-#define PRINTF_BUFFER_SIZE 4096
char *printf_buffer;
- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
- if (printf_buffer == NULL)
- return -ENOMEM;
-
va_start(ap, fmt);
- ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
+ printf_buffer = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap);
va_end(ap);
- BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
+ if (!printf_buffer)
+ return -ENOMEM;
+
ret = xenbus_write(t, dir, node, printf_buffer);
kfree(printf_buffer);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d74e6af9b53a..120c82a7791d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -10,6 +10,7 @@
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
+#include <linux/precache.h>
#include "extent_io.h"
#include "extent_map.h"
#include "compat.h"
@@ -2027,6 +2028,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
set_page_extent_mapped(page);
+ if (!PageUptodate(page)) {
+ if (precache_get(page->mapping, page->index, page) == 1) {
+ BUG_ON(blocksize != PAGE_SIZE);
+ goto out;
+ }
+ }
+
end = page_end;
while (1) {
lock_extent(tree, start, end, GFP_NOFS);
@@ -2151,6 +2159,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
cur = cur + iosize;
page_offset += iosize;
}
+out:
if (!nr) {
if (!PageError(page))
SetPageUptodate(page);
diff --git a/fs/mpage.c b/fs/mpage.c
index fd56ca2ea556..f1adb55b0743 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -27,6 +27,7 @@
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
+#include <linux/precache.h>
/*
* I/O completion handler for multipage BIOs.
@@ -286,6 +287,13 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
SetPageMappedToDisk(page);
}
+ if (fully_mapped &&
+ blocks_per_page == 1 && !PageUptodate(page) &&
+ precache_get(page->mapping, page->index, page) == 1) {
+ SetPageUptodate(page);
+ goto confused;
+ }
+
/*
* This page will go to BIO. Do we need to send this BIO off first?
*/
diff --git a/mm/Kconfig b/mm/Kconfig
index f0fb9124e410..7fb426f24ba5 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -301,3 +301,31 @@ config NOMMU_INITIAL_TRIM_EXCESS
of 1 says that all excess pages should be trimmed.
See Documentation/nommu-mmap.txt for more information.
+
+#
+# support for transcendent memory
+#
+config TMEM
+ bool
+ help
+ In a virtualized environment, allows unused and underutilized
+ system physical memory to be made accessible through a narrow
+ well-defined page-copy-based API. If unsure, say Y.
+
+config PRECACHE
+ bool "Cache clean pages in transcendent memory"
+ depends on XEN
+ select TMEM
+ help
+ Allows the transcendent memory pool to be used to store clean
+ page-cache pages which, under some circumstances, will greatly
+ reduce paging and thus improve performance. If unsure, say Y.
+
+config PRESWAP
+ bool "Swap pages to transcendent memory"
+ depends on XEN
+ select TMEM
+ help
+ Allows the transcendent memory pool to be used as a pseudo-swap
+ device which, under some circumstances, will greatly reduce
+ swapping and thus improve performance. If unsure, say Y.
diff --git a/mm/Makefile b/mm/Makefile
index 787187be0fba..0cb833f2a872 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -19,6 +19,9 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
+obj-$(CONFIG_TMEM) += tmem.o
+obj-$(CONFIG_PRESWAP) += preswap.o
+obj-$(CONFIG_PRECACHE) += precache.o
obj-$(CONFIG_HAS_DMA) += dmapool.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o