Home Home > GIT Browse > SLE15
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2013-02-05 14:35:19 +0100
committerJan Beulich <jbeulich@suse.com>2013-02-05 14:35:19 +0100
commitb432da4d8c0657669377e1828ebb0a07410f56ca (patch)
tree8a71299421cddd8af00873b05b31f04d1029fda7
parent221586743064fb0043762f03f0be59da570de9bc (diff)
- Update Xen patches to 3.7.6 and c/s 1222.
- xen-pciback: rate limit error messages from xen_pcibk_enable_msi{,x}() (CVE-2013-0231 XSA-43 bnc#801178). suse-commit: ffecceb3b8f76a7ea44c3439f0a082c0dc3946a7
-rw-r--r--arch/x86/kernel/entry_32-xen.S8
-rw-r--r--arch/x86/kernel/msr-xen.c3
-rw-r--r--arch/x86/kernel/setup-xen.c108
-rw-r--r--arch/x86/platform/efi/efi-xen.c45
-rw-r--r--drivers/xen/blkback/blkback.c9
-rw-r--r--drivers/xen/blktap/blktap.c8
-rw-r--r--drivers/xen/blktap/common.h3
-rw-r--r--drivers/xen/console/console.c40
-rw-r--r--drivers/xen/core/cpu_hotplug.c4
-rw-r--r--drivers/xen/fbfront/xenfb.c4
-rw-r--r--drivers/xen/netback/common.h3
-rw-r--r--drivers/xen/netback/interface.c22
-rw-r--r--drivers/xen/netback/netback.c117
-rw-r--r--drivers/xen/netfront/netfront.c63
-rw-r--r--drivers/xen/pcifront/pci_op.c4
-rw-r--r--drivers/xen/scsiback/scsiback.c50
-rw-r--r--drivers/xen/usbback/usbback.c11
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c14
18 files changed, 330 insertions, 186 deletions
diff --git a/arch/x86/kernel/entry_32-xen.S b/arch/x86/kernel/entry_32-xen.S
index 800c592e67a7..c2f3db8ced5f 100644
--- a/arch/x86/kernel/entry_32-xen.S
+++ b/arch/x86/kernel/entry_32-xen.S
@@ -1057,12 +1057,10 @@ ENTRY(failsafe_callback)
4: mov 16(%esp),%gs
testl %eax,%eax
popl %eax
- jz 5f
- addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
- jmp iret_exc
-5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
+ leal 16(%esp),%esp
RING0_INT_FRAME
- pushl_cfi $-1
+ jnz iret_exc # EAX != 0 => Category 2 (Bad IRET)
+ pushl_cfi $-1 # EAX == 0 => Category 1 (Bad segment)
SAVE_ALL
jmp ret_from_exception
.section .fixup,"ax"; \
diff --git a/arch/x86/kernel/msr-xen.c b/arch/x86/kernel/msr-xen.c
index c0330abc12a4..63453a3da597 100644
--- a/arch/x86/kernel/msr-xen.c
+++ b/arch/x86/kernel/msr-xen.c
@@ -156,6 +156,9 @@ static int pmsr_open(struct inode *inode, struct file *file)
{
unsigned int cpu;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
cpu = pmsr_minor(file->f_path.dentry->d_inode);
if (cpu >= nr_xen_cpu_ids || !test_bit(cpu, xen_cpu_online_map))
return -ENXIO; /* No such CPU */
diff --git a/arch/x86/kernel/setup-xen.c b/arch/x86/kernel/setup-xen.c
index 1b1c07517c91..2da3c3673605 100644
--- a/arch/x86/kernel/setup-xen.c
+++ b/arch/x86/kernel/setup-xen.c
@@ -737,6 +737,83 @@ static __init void reserve_ibft_region(void)
#ifndef CONFIG_XEN
static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
+static bool __init snb_gfx_workaround_needed(void)
+{
+#ifdef CONFIG_PCI
+ int i;
+ u16 vendor, devid;
+ static const u16 snb_ids[] = {
+ 0x0102,
+ 0x0112,
+ 0x0122,
+ 0x0106,
+ 0x0116,
+ 0x0126,
+ 0x010a,
+ };
+
+ /* Assume no if something weird is going on with PCI */
+ if (!early_pci_allowed())
+ return false;
+
+ vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
+ if (vendor != 0x8086)
+ return false;
+
+ devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
+ for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
+ if (devid == snb_ids[i])
+ return true;
+#endif
+
+ return false;
+}
+
+/*
+ * Sandy Bridge graphics has trouble with certain ranges, exclude
+ * them from allocation.
+ */
+static void __init trim_snb_memory(void)
+{
+ static const unsigned long bad_pages[] = {
+ 0x20050000,
+ 0x20110000,
+ 0x20130000,
+ 0x20138000,
+ 0x40004000,
+ };
+ int i;
+
+ if (!snb_gfx_workaround_needed())
+ return;
+
+ printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
+
+ /*
+ * Reserve all memory below the 1 MB mark that has not
+ * already been reserved.
+ */
+ memblock_reserve(0, 1<<20);
+
+ for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
+ if (memblock_reserve(bad_pages[i], PAGE_SIZE))
+ printk(KERN_WARNING "failed to reserve 0x%08lx\n",
+ bad_pages[i]);
+ }
+}
+
+/*
+ * Here we put platform-specific memory range workarounds, i.e.
+ * memory known to be corrupt or otherwise in need to be reserved on
+ * specific platforms.
+ *
+ * If this gets used more widely it could use a real dispatch mechanism.
+ */
+static void __init trim_platform_memory_ranges(void)
+{
+ trim_snb_memory();
+}
+
static void __init trim_bios_range(void)
{
/*
@@ -757,6 +834,7 @@ static void __init trim_bios_range(void)
* take them out.
*/
e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
+
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
@@ -880,15 +958,15 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_EFI
if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
"EL32", 4)) {
- efi_enabled = 1;
- efi_64bit = false;
+ set_bit(EFI_BOOT, &x86_efi_facility);
} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
"EL64", 4)) {
- efi_enabled = 1;
- efi_64bit = true;
+ set_bit(EFI_BOOT, &x86_efi_facility);
+ set_bit(EFI_64BIT, &x86_efi_facility);
}
- if (efi_enabled && efi_memblock_x86_reserve_range())
- efi_enabled = 0;
+
+ if (efi_enabled(EFI_BOOT))
+ efi_memblock_x86_reserve_range();
#endif
#else /* CONFIG_XEN */
#ifdef CONFIG_X86_32
@@ -987,7 +1065,7 @@ void __init setup_arch(char **cmdline_p)
finish_e820_parsing();
- if (efi_enabled)
+ if (efi_enabled(EFI_BOOT))
efi_init();
if (is_initial_xendomain())
@@ -1078,7 +1156,7 @@ void __init setup_arch(char **cmdline_p)
* The EFI specification says that boot service code won't be called
* after ExitBootServices(). This is, in fact, a lie.
*/
- if (efi_enabled)
+ if (efi_enabled(EFI_MEMMAP))
efi_reserve_boot_services();
/* preallocate 4k for mptable mpc */
@@ -1093,6 +1171,8 @@ void __init setup_arch(char **cmdline_p)
#ifndef CONFIG_XEN
setup_real_mode();
+
+ trim_platform_memory_ranges();
#endif
init_gbpages();
@@ -1418,7 +1498,7 @@ void __init setup_arch(char **cmdline_p)
;
else
#endif
- if (!efi_enabled || efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)
+ if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
conswitchp = &vga_con;
#endif
#endif
@@ -1433,14 +1513,14 @@ void __init setup_arch(char **cmdline_p)
register_refined_jiffies(CLOCK_TICK_RATE);
#if defined(CONFIG_EFI) && !defined(CONFIG_XEN)
- /* Once setup is done above, disable efi_enabled on mismatched
- * firmware/kernel archtectures since there is no support for
- * runtime services.
+ /* Once setup is done above, unmap the EFI memory map on
+ * mismatched firmware/kernel archtectures since there is no
+ * support for runtime services.
*/
- if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
+ if (efi_enabled(EFI_BOOT) &&
+ IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {
pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
efi_unmap_memmap();
- efi_enabled = 0;
}
#endif
}
diff --git a/arch/x86/platform/efi/efi-xen.c b/arch/x86/platform/efi/efi-xen.c
index a41faf0be830..f56767e01a9b 100644
--- a/arch/x86/platform/efi/efi-xen.c
+++ b/arch/x86/platform/efi/efi-xen.c
@@ -48,9 +48,24 @@
#define EFI_DEBUG 1
-int __read_mostly efi_enabled;
+unsigned long x86_efi_facility;
+
+/*
+ * Returns 1 if 'facility' is enabled, 0 otherwise.
+ */
+int efi_enabled(int facility)
+{
+ return test_bit(facility, &x86_efi_facility) != 0;
+}
EXPORT_SYMBOL(efi_enabled);
+static int __init setup_noefi(char *arg)
+{
+ clear_bit(EFI_BOOT, &x86_efi_facility);
+ return 0;
+}
+early_param("noefi", setup_noefi);
+
#define call op.u.efi_runtime_call
#define DECLARE_CALL(what) \
struct xen_platform_op op; \
@@ -306,13 +321,6 @@ struct efi __read_mostly efi = {
};
EXPORT_SYMBOL(efi);
-static int __init setup_noefi(char *arg)
-{
- efi_enabled = 0;
- return 0;
-}
-early_param("noefi", setup_noefi);
-
int efi_set_rtc_mmss(unsigned long nowtime)
{
@@ -369,8 +377,13 @@ void __init efi_probe(void)
}
};
- if (HYPERVISOR_platform_op(&op) == 0)
- efi_enabled = 1;
+ if (HYPERVISOR_platform_op(&op) == 0) {
+ __set_bit(EFI_BOOT, &x86_efi_facility);
+ __set_bit(EFI_64BIT, &x86_efi_facility);
+ __set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
+ __set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
+ __set_bit(EFI_MEMMAP, &x86_efi_facility);
+ }
}
void __init efi_reserve_boot_services(void) { }
@@ -468,10 +481,10 @@ void __init efi_init(void)
op.u.firmware_info.index = XEN_FW_EFI_CONFIG_TABLE;
if (HYPERVISOR_platform_op(&op))
BUG();
- if (efi_config_init(info->cfg.addr, info->cfg.nent)) {
- efi_enabled = 0;
+ if (efi_config_init(info->cfg.addr, info->cfg.nent))
return;
- }
+
+ set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
x86_platform.get_wallclock = efi_get_time;
x86_platform.set_wallclock = efi_set_rtc_mmss;
@@ -493,7 +506,8 @@ static struct platform_device rtc_efi_dev = {
static int __init rtc_init(void)
{
- if (efi_enabled && platform_device_register(&rtc_efi_dev) < 0)
+ if (efi_enabled(EFI_RUNTIME_SERVICES)
+ && platform_device_register(&rtc_efi_dev) < 0)
pr_err("unable to register rtc device...\n");
/* not necessarily an error */
@@ -509,6 +523,9 @@ u32 efi_mem_type(unsigned long phys_addr)
struct xen_platform_op op;
union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info;
+ if (!efi_enabled(EFI_MEMMAP))
+ return 0;
+
op.cmd = XENPF_firmware_info;
op.u.firmware_info.type = XEN_FW_EFI_INFO;
op.u.firmware_info.index = XEN_FW_EFI_MEM_INFO;
diff --git a/drivers/xen/blkback/blkback.c b/drivers/xen/blkback/blkback.c
index 4e7cd912ad8a..6fdeb1d88799 100644
--- a/drivers/xen/blkback/blkback.c
+++ b/drivers/xen/blkback/blkback.c
@@ -64,10 +64,10 @@ module_param_named(reqs, blkif_reqs, uint, 0444);
MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
/* Run-time switchable: /sys/module/blkback/parameters/ */
-static unsigned int log_stats = 0;
-static unsigned int debug_lvl = 0;
-module_param(log_stats, int, 0644);
-module_param(debug_lvl, int, 0644);
+static bool log_stats;
+static unsigned int debug_lvl;
+module_param(log_stats, bool, 0644);
+module_param(debug_lvl, uint, 0644);
/* Order of maximum shared ring size advertised to the front end. */
unsigned int blkif_max_ring_page_order/* XXX = sizeof(long) / 4*/;
@@ -561,7 +561,6 @@ static void dispatch_rw_block_io(blkif_t *blkif,
goto fail_response;
}
- preq.dev = req->handle;
preq.sector_number = req->sector_number;
preq.nr_sects = 0;
diff --git a/drivers/xen/blktap/blktap.c b/drivers/xen/blktap/blktap.c
index 5c3afb4b91a7..3f3e81b139c8 100644
--- a/drivers/xen/blktap/blktap.c
+++ b/drivers/xen/blktap/blktap.c
@@ -131,10 +131,10 @@ static struct tap_blkif *tapfds[MAX_TAP_DEV];
static int blktap_next_minor;
/* Run-time switchable: /sys/module/blktap/parameters/ */
-static unsigned int log_stats = 0;
-static unsigned int debug_lvl = 0;
-module_param(log_stats, int, 0644);
-module_param(debug_lvl, int, 0644);
+static bool log_stats;
+static unsigned int debug_lvl;
+module_param(log_stats, bool, 0644);
+module_param(debug_lvl, uint, 0644);
/*
* Each outstanding request that we've passed to the lower device layers has a
diff --git a/drivers/xen/blktap/common.h b/drivers/xen/blktap/common.h
index 126da802d069..5459d5c81eee 100644
--- a/drivers/xen/blktap/common.h
+++ b/drivers/xen/blktap/common.h
@@ -39,7 +39,8 @@
#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
__FILE__ , __LINE__ , ## _a )
-#define WPRINTK(fmt, args...) pr_warning("blktap: " fmt, ##args)
+#define WPRINTK(fmt, args...) \
+ ((void)(printk_ratelimit() && pr_warning("blktap: " fmt, ##args)))
struct backend_info;
diff --git a/drivers/xen/console/console.c b/drivers/xen/console/console.c
index a69f4791e7d8..496898a4f099 100644
--- a/drivers/xen/console/console.c
+++ b/drivers/xen/console/console.c
@@ -350,11 +350,13 @@ void __init dom0_init_screen_info(const struct dom0_vga_console_info *info, size
#define DUMMY_TTY(_tty) ((xc_mode == XC_TTY) && \
((_tty)->index != (xc_num - 1)))
-static struct ktermios *xencons_termios[MAX_NR_CONSOLES];
+static struct tty_port *xencons_ports;
static struct tty_struct *xencons_tty;
static int xencons_priv_irq;
static char x_char;
+static const struct tty_port_operations xencons_port_ops = {};
+
void xencons_rx(char *buf, unsigned len)
{
int i;
@@ -590,6 +592,25 @@ static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
set_current_state(TASK_RUNNING);
}
+static int xencons_install(struct tty_driver *drv, struct tty_struct *tty)
+{
+ struct tty_port *port = &xencons_ports[tty->index];
+ int rc;
+
+ if (DUMMY_TTY(tty))
+ return 0;
+
+ tty->driver_data = NULL;
+
+ tty_port_init(port);
+ port->ops = &xencons_port_ops;
+
+ rc = tty_port_install(port, drv, tty);
+ if (rc)
+ tty_port_put(port);
+ return rc;
+}
+
static int xencons_open(struct tty_struct *tty, struct file *filp)
{
unsigned long flags;
@@ -598,7 +619,6 @@ static int xencons_open(struct tty_struct *tty, struct file *filp)
return 0;
spin_lock_irqsave(&xencons_lock, flags);
- tty->driver_data = NULL;
if (xencons_tty == NULL)
xencons_tty = tty;
__xencons_tx_flush();
@@ -644,6 +664,7 @@ static void xencons_close(struct tty_struct *tty, struct file *filp)
}
static const struct tty_operations xencons_ops = {
+ .install = xencons_install,
.open = xencons_open,
.close = xencons_close,
.write = xencons_write,
@@ -674,10 +695,16 @@ static int __init xencons_init(void)
return rc;
}
- xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ?
- MAX_NR_CONSOLES : 1);
- if (xencons_driver == NULL)
+ rc = xc_mode == XC_TTY ? MAX_NR_CONSOLES : 1;
+ xencons_ports = kcalloc(rc, sizeof(*xencons_ports), GFP_KERNEL);
+ if (!xencons_ports)
+ return -ENOMEM;
+ xencons_driver = alloc_tty_driver(rc);
+ if (!xencons_driver) {
+ kfree(xencons_ports);
+ xencons_ports = NULL;
return -ENOMEM;
+ }
DRV(xencons_driver)->name = "xencons";
DRV(xencons_driver)->major = TTY_MAJOR;
@@ -687,7 +714,6 @@ static int __init xencons_init(void)
DRV(xencons_driver)->flags =
TTY_DRIVER_REAL_RAW |
TTY_DRIVER_RESET_TERMIOS;
- DRV(xencons_driver)->termios = xencons_termios;
switch (xc_mode) {
case XC_XVC:
@@ -723,6 +749,8 @@ static int __init xencons_init(void)
DRV(xencons_driver)->name_base);
put_tty_driver(xencons_driver);
xencons_driver = NULL;
+ kfree(xencons_ports);
+ xencons_ports = NULL;
return rc;
}
diff --git a/drivers/xen/core/cpu_hotplug.c b/drivers/xen/core/cpu_hotplug.c
index 171ab0e90632..c9831d6def32 100644
--- a/drivers/xen/core/cpu_hotplug.c
+++ b/drivers/xen/core/cpu_hotplug.c
@@ -28,13 +28,13 @@ static int local_cpu_hotplug_request(void)
static void __cpuinit vcpu_hotplug(unsigned int cpu, struct device *dev)
{
int err;
- char dir[32], state[32];
+ char dir[16], state[16];
if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
return;
sprintf(dir, "cpu/%u", cpu);
- err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
+ err = xenbus_scanf(XBT_NIL, dir, "availability", "%15s", state);
if (err != 1) {
pr_err("XENBUS: Unable to read cpu state\n");
return;
diff --git a/drivers/xen/fbfront/xenfb.c b/drivers/xen/fbfront/xenfb.c
index 8d36ef70f3b4..8c951fa69c76 100644
--- a/drivers/xen/fbfront/xenfb.c
+++ b/drivers/xen/fbfront/xenfb.c
@@ -141,8 +141,8 @@ struct xenfb_info
#define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
enum {KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT};
-static int video[KPARAM_CNT] = {2, XENFB_WIDTH, XENFB_HEIGHT};
-module_param_array(video, int, NULL, 0);
+static unsigned int video[KPARAM_CNT] = {2, XENFB_WIDTH, XENFB_HEIGHT};
+module_param_array(video, uint, NULL, 0);
MODULE_PARM_DESC(video,
"Size of video memory in MB and width,height in pixels, default = (2,800,600)");
diff --git a/drivers/xen/netback/common.h b/drivers/xen/netback/common.h
index cc539a76da95..e2c402ed4451 100644
--- a/drivers/xen/netback/common.h
+++ b/drivers/xen/netback/common.h
@@ -200,6 +200,9 @@ void netif_deschedule_work(netif_t *netif);
int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
irqreturn_t netif_be_int(int irq, void *dev_id);
+/* Prevent the device from generating any further traffic. */
+void xenvif_carrier_off(netif_t *netif);
+
static inline int netbk_can_queue(struct net_device *dev)
{
netif_t *netif = netdev_priv(dev);
diff --git a/drivers/xen/netback/interface.c b/drivers/xen/netback/interface.c
index d01fef71143c..351aa2942a6c 100644
--- a/drivers/xen/netback/interface.c
+++ b/drivers/xen/netback/interface.c
@@ -329,19 +329,23 @@ err_rx:
return err;
}
+void xenvif_carrier_off(netif_t *netif)
+{
+ rtnl_lock();
+ netback_carrier_off(netif);
+ netif_carrier_off(netif->dev); /* discard queued packets */
+ if (netif_running(netif->dev))
+ __netif_down(netif);
+ rtnl_unlock();
+ netif_put(netif);
+}
+
void netif_disconnect(struct backend_info *be)
{
netif_t *netif = be->netif;
- if (netback_carrier_ok(netif)) {
- rtnl_lock();
- netback_carrier_off(netif);
- netif_carrier_off(netif->dev); /* discard queued packets */
- if (netif_running(netif->dev))
- __netif_down(netif);
- rtnl_unlock();
- netif_put(netif);
- }
+ if (netback_carrier_ok(netif))
+ xenvif_carrier_off(netif);
atomic_dec(&netif->refcnt);
wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
diff --git a/drivers/xen/netback/netback.c b/drivers/xen/netback/netback.c
index 18f68ba2849e..583afce02d28 100644
--- a/drivers/xen/netback/netback.c
+++ b/drivers/xen/netback/netback.c
@@ -191,17 +191,20 @@ static int check_mfn(struct xen_netbk *netbk, unsigned int nr)
return netbk->alloc_index >= nr ? 0 : -ENOMEM;
}
-static void netbk_schedule(struct xen_netbk *netbk)
+static void netbk_rx_schedule(struct xen_netbk *netbk)
{
if (use_kthreads)
wake_up(&netbk->netbk_action_wq);
else
- tasklet_schedule(&netbk->net_tx_tasklet);
+ tasklet_schedule(&netbk->net_rx_tasklet);
}
-static void netbk_schedule_group(unsigned long group)
+static void netbk_tx_schedule(struct xen_netbk *netbk)
{
- netbk_schedule(&xen_netbk[group]);
+ if (use_kthreads)
+ wake_up(&netbk->netbk_action_wq);
+ else
+ tasklet_schedule(&netbk->net_tx_tasklet);
}
static inline void maybe_schedule_tx_action(unsigned int group)
@@ -211,7 +214,7 @@ static inline void maybe_schedule_tx_action(unsigned int group)
smp_mb();
if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
!list_empty(&netbk->schedule_list))
- netbk_schedule(netbk);
+ netbk_tx_schedule(netbk);
}
static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
@@ -391,7 +394,7 @@ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
netbk = &xen_netbk[group];
skb_queue_tail(&netbk->rx_queue, skb);
- netbk_schedule(netbk);
+ netbk_rx_schedule(netbk);
return NETDEV_TX_OK;
@@ -844,13 +847,23 @@ static void net_rx_action(unsigned long group)
/* More work to do? */
if (!skb_queue_empty(&netbk->rx_queue) &&
!timer_pending(&netbk->net_timer))
- netbk_schedule(netbk);
+ netbk_rx_schedule(netbk);
#if 0
else
xen_network_done_notify();
#endif
}
+static void net_alarm(unsigned long group)
+{
+ netbk_rx_schedule(&xen_netbk[group]);
+}
+
+static void netbk_tx_pending_timeout(unsigned long group)
+{
+ netbk_tx_schedule(&xen_netbk[group]);
+}
+
static int __on_net_schedule_list(netif_t *netif)
{
return netif->list.next != NULL;
@@ -1094,7 +1107,7 @@ static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
do {
make_tx_response(netif, txp, XEN_NETIF_RSP_ERROR);
- if (cons >= end)
+ if (cons == end)
break;
txp = RING_GET_REQUEST(&netif->tx, cons++);
} while (1);
@@ -1103,6 +1116,14 @@ static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
netif_put(netif);
}
+static void netbk_fatal_tx_err(netif_t *netif)
+{
+ printk(KERN_ERR "%s: fatal error; disabling device\n",
+ netif->dev->name);
+ xenvif_carrier_off(netif);
+ netif_put(netif);
+}
+
static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
netif_tx_request_t *txp, int work_to_do)
{
@@ -1114,19 +1135,25 @@ static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
do {
if (frags >= work_to_do) {
- DPRINTK("Need more frags\n");
+ printk(KERN_ERR "%s: Need more frags\n",
+ netif->dev->name);
+ netbk_fatal_tx_err(netif);
return -frags;
}
if (unlikely(frags >= MAX_SKB_FRAGS)) {
- DPRINTK("Too many frags\n");
+ printk(KERN_ERR "%s: Too many frags\n",
+ netif->dev->name);
+ netbk_fatal_tx_err(netif);
return -frags;
}
memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
sizeof(*txp));
if (txp->size > first->size) {
- DPRINTK("Frags galore\n");
+ printk(KERN_ERR "%s: Frag is bigger than frame.\n",
+ netif->dev->name);
+ netbk_fatal_tx_err(netif);
return -frags;
}
@@ -1134,8 +1161,9 @@ static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
frags++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
- DPRINTK("txp->offset: %x, size: %u\n",
- txp->offset, txp->size);
+ printk(KERN_ERR "%s: txp->offset: %x, size: %u\n",
+ netif->dev->name, txp->offset, txp->size);
+ netbk_fatal_tx_err(netif);
return -frags;
}
} while ((txp++)->flags & XEN_NETTXF_more_data);
@@ -1284,7 +1312,9 @@ int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
do {
if (unlikely(work_to_do-- <= 0)) {
- DPRINTK("Missing extra info\n");
+ printk(KERN_ERR "%s: Missing extra info\n",
+ netif->dev->name);
+ netbk_fatal_tx_err(netif);
return -EBADR;
}
@@ -1293,7 +1323,9 @@ int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
netif->tx.req_cons = ++cons;
- DPRINTK("Invalid extra type: %d\n", extra.type);
+ printk(KERN_ERR "%s: Invalid extra type: %d\n",
+ netif->dev->name, extra.type);
+ netbk_fatal_tx_err(netif);
return -EINVAL;
}
@@ -1304,16 +1336,21 @@ int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
return work_to_do;
}
-static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
+static int netbk_set_skb_gso(netif_t *netif, struct sk_buff *skb,
+ struct netif_extra_info *gso)
{
if (!gso->u.gso.size) {
- DPRINTK("GSO size must not be zero.\n");
+ printk(KERN_ERR "%s: GSO size must not be zero.\n",
+ netif->dev->name);
+ netbk_fatal_tx_err(netif);
return -EINVAL;
}
/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
- DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
+ printk(KERN_ERR "%s: Bad GSO type %d.\n",
+ netif->dev->name, gso->u.gso.type);
+ netbk_fatal_tx_err(netif);
return -EINVAL;
}
@@ -1350,9 +1387,25 @@ static void net_tx_action(unsigned long group)
!list_empty(&netbk->schedule_list)) {
/* Get a netif from the list with work to do. */
netif = poll_net_schedule_list(netbk);
+ /*
+ * This can sometimes happen because the test of
+ * list_empty(net_schedule_list) at the top of the
+ * loop is unlocked. Just go back and have another
+ * look.
+ */
if (!netif)
continue;
+ if (netif->tx.sring->req_prod - netif->tx.req_cons >
+ NET_TX_RING_SIZE) {
+ printk(KERN_ERR "%s: Impossible number of requests. "
+ "req_prod %u, req_cons %u, size %lu\n",
+ netif->dev->name, netif->tx.sring->req_prod,
+ netif->tx.req_cons, NET_TX_RING_SIZE);
+ netbk_fatal_tx_err(netif);
+ continue;
+ }
+
RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
if (!work_to_do) {
netif_put(netif);
@@ -1403,17 +1456,14 @@ static void net_tx_action(unsigned long group)
work_to_do = netbk_get_extras(netif, extras,
work_to_do);
i = netif->tx.req_cons;
- if (unlikely(work_to_do < 0)) {
- netbk_tx_err(netif, &txreq, i);
+ if (unlikely(work_to_do < 0))
continue;
- }
}
ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
- if (unlikely(ret < 0)) {
- netbk_tx_err(netif, &txreq, i - ret);
+ if (unlikely(ret < 0))
continue;
- }
+
i += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
@@ -1424,10 +1474,10 @@ static void net_tx_action(unsigned long group)
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
- DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
- txreq.offset, txreq.size,
- (txreq.offset &~PAGE_MASK) + txreq.size);
- netbk_tx_err(netif, &txreq, i);
+ printk(KERN_ERR "%s: txreq.offset: %x, size: %u, end: %lu\n",
+ netif->dev->name, txreq.offset, txreq.size,
+ (txreq.offset & ~PAGE_MASK) + txreq.size);
+ netbk_fatal_tx_err(netif);
continue;
}
@@ -1452,9 +1502,9 @@ static void net_tx_action(unsigned long group)
struct netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
- if (netbk_set_skb_gso(skb, gso)) {
+ if (netbk_set_skb_gso(netif, skb, gso)) {
+ /* Failure in netbk_set_skb_gso is fatal. */
kfree_skb(skb);
- netbk_tx_err(netif, &txreq, i);
continue;
}
}
@@ -1602,7 +1652,7 @@ static void netif_idx_release(struct xen_netbk *netbk, u16 pending_idx)
netbk->dealloc_prod++;
spin_unlock_irqrestore(&netbk->release_lock, flags);
- netbk_schedule(netbk);
+ netbk_tx_schedule(netbk);
}
static void netif_page_release(struct page *page, unsigned int order)
@@ -1820,11 +1870,12 @@ static int __init netback_init(void)
init_timer(&netbk->net_timer);
netbk->net_timer.data = group;
- netbk->net_timer.function = netbk_schedule_group;
+ netbk->net_timer.function = net_alarm;
init_timer(&netbk->tx_pending_timer);
netbk->tx_pending_timer.data = group;
- netbk->tx_pending_timer.function = netbk_schedule_group;
+ netbk->tx_pending_timer.function =
+ netbk_tx_pending_timeout;
netbk->pending_prod = MAX_PENDING_REQS;
diff --git a/drivers/xen/netfront/netfront.c b/drivers/xen/netfront/netfront.c
index ff3dbc82b4c8..b0493fae2186 100644
--- a/drivers/xen/netfront/netfront.c
+++ b/drivers/xen/netfront/netfront.c
@@ -66,8 +66,7 @@
#include <xen/net-util.h>
struct netfront_cb {
- struct page *page;
- unsigned offset;
+ unsigned int pull_to;
};
#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
@@ -1361,7 +1360,6 @@ static int netif_poll(struct napi_struct *napi, int budget)
struct sk_buff_head errq;
struct sk_buff_head tmpq;
unsigned long flags;
- unsigned int len;
int pages_flipped = 0;
int err;
@@ -1410,49 +1408,21 @@ err:
}
}
- NETFRONT_SKB_CB(skb)->page =
- skb_frag_page(skb_shinfo(skb)->frags);
- NETFRONT_SKB_CB(skb)->offset = rx->offset;
-
- len = rx->status;
- if (len > RX_COPY_THRESHOLD)
- len = RX_COPY_THRESHOLD;
- skb_put(skb, len);
-
- if (rx->status > len) {
- skb_shinfo(skb)->frags[0].page_offset =
- rx->offset + len;
- skb_frag_size_set(skb_shinfo(skb)->frags,
- rx->status - len);
- skb->data_len = rx->status - len;
- } else {
- __skb_fill_page_desc(skb, 0, NULL, 0, 0);
- skb_shinfo(skb)->nr_frags = 0;
- }
+ NETFRONT_SKB_CB(skb)->pull_to = rx->status;
+ if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
+ NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
+
+ skb_shinfo(skb)->frags[0].page_offset = rx->offset;
+ skb_frag_size_set(skb_shinfo(skb)->frags, rx->status);
+ skb->data_len = rx->status;
i = xennet_fill_frags(np, skb, &tmpq);
/*
- * Truesize must approximates the size of true data plus
- * any supervisor overheads. Adding hypervisor overheads
- * has been shown to significantly reduce achievable
- * bandwidth with the default receive buffer size. It is
- * therefore not wise to account for it here.
- *
- * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
- * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
- * add the size of the data pulled in xennet_fill_frags().
- *
- * We also adjust for any unused space in the main data
- * area by subtracting (RX_COPY_THRESHOLD - len). This is
- * especially important with drivers which split incoming
- * packets into header and data, using only 66 bytes of
- * the main data area (see the e1000 driver for example.)
- * On such systems, without this last adjustement, our
- * achievable receive throughout using the standard receive
- * buffer size was cut by 25%(!!!).
- */
- skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+ * Truesize is the actual allocation size, even if the
+ * allocation is only partially used.
+ */
+ skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
skb->len += skb->data_len;
if (rx->flags & XEN_NETRXF_csum_blank)
@@ -1492,14 +1462,9 @@ err:
__skb_queue_purge(&errq);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
- struct page *page = NETFRONT_SKB_CB(skb)->page;
- void *vaddr = page_address(page);
- unsigned offset = NETFRONT_SKB_CB(skb)->offset;
-
- memcpy(skb->data, vaddr + offset, skb_headlen(skb));
+ unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
- if (page != skb_frag_page(skb_shinfo(skb)->frags))
- __free_page(page);
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
diff --git a/drivers/xen/pcifront/pci_op.c b/drivers/xen/pcifront/pci_op.c
index a901a350855a..54ee20344127 100644
--- a/drivers/xen/pcifront/pci_op.c
+++ b/drivers/xen/pcifront/pci_op.c
@@ -12,8 +12,8 @@
#include <xen/evtchn.h>
#include "pcifront.h"
-static int verbose_request = 0;
-module_param(verbose_request, int, 0644);
+static bool verbose_request;
+module_param(verbose_request, bool, 0644);
static void pcifront_init_sd(struct pcifront_sd *sd,
unsigned int domain, unsigned int bus,
diff --git a/drivers/xen/scsiback/scsiback.c b/drivers/xen/scsiback/scsiback.c
index 9991b70457bb..0d3b1f215c96 100644
--- a/drivers/xen/scsiback/scsiback.c
+++ b/drivers/xen/scsiback/scsiback.c
@@ -56,8 +56,8 @@ static unsigned int vscsiif_reqs = 128;
module_param_named(reqs, vscsiif_reqs, uint, 0);
MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
-static unsigned int log_print_stat = 0;
-module_param(log_print_stat, int, 0644);
+static bool log_print_stat;
+module_param(log_print_stat, bool, 0644);
#define SCSIBACK_INVALID_HANDLE (~0)
@@ -215,10 +215,8 @@ static void scsiback_cmd_done(struct request *req, int uptodate)
resid = blk_rq_bytes(req);
errors = req->errors;
- if (errors != 0) {
- if (log_print_stat)
- scsiback_print_status(sense_buffer, errors, pending_req);
- }
+ if (errors && log_print_stat)
+ scsiback_print_status(sense_buffer, errors, pending_req);
/* The Host mode is through as for Emulation. */
if (pending_req->info->feature != VSCSI_TYPE_HOST)
@@ -588,40 +586,36 @@ static int _scsiback_do_cmd_fn(struct vscsibk_info *info)
err = prepare_pending_reqs(info, ring_req,
pending_req);
- if (err == -EINVAL) {
- scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
- 0, pending_req);
- continue;
- } else if (err == -ENODEV) {
- scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
- 0, pending_req);
- continue;
- }
-
- if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
-
+ switch (err ?: pending_req->act) {
+ case VSCSIIF_ACT_SCSI_CDB:
/* The Host mode is through as for Emulation. */
if (info->feature == VSCSI_TYPE_HOST)
scsiback_cmd_exec(pending_req);
else
scsiback_req_emulation_or_cmdexec(pending_req);
-
- } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
+ break;
+ case VSCSIIF_ACT_SCSI_RESET:
scsiback_device_reset_exec(pending_req);
- } else {
- pr_err("scsiback: invalid parameter for request\n");
- scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
- 0, pending_req);
- continue;
+ break;
+ default:
+ if(!err && printk_ratelimit())
+ pr_err("scsiback: invalid request\n");
+ scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
+ 0, pending_req);
+ break;
+ case -ENODEV:
+ scsiback_do_resp_with_sense(NULL, DID_NO_CONNECT << 16,
+ 0, pending_req);
+ break;
}
+
+ /* Yield point for this unbounded loop. */
+ cond_resched();
}
if (RING_HAS_UNCONSUMED_REQUESTS(ring))
more_to_do = 1;
- /* Yield point for this unbounded loop. */
- cond_resched();
-
return more_to_do;
}
diff --git a/drivers/xen/usbback/usbback.c b/drivers/xen/usbback/usbback.c
index b604f5dda824..5385ecf65058 100644
--- a/drivers/xen/usbback/usbback.c
+++ b/drivers/xen/usbback/usbback.c
@@ -53,8 +53,8 @@
#include "../../usb/core/hub.h"
#endif
-int usbif_reqs = USBIF_BACK_MAX_PENDING_REQS;
-module_param_named(reqs, usbif_reqs, int, 0);
+static unsigned int usbif_reqs = 128;
+module_param_named(reqs, usbif_reqs, uint, 0);
MODULE_PARM_DESC(reqs, "Number of usbback requests to allocate");
struct pending_req_segment {
@@ -1015,7 +1015,8 @@ static int usbbk_start_submit_urb(usbif_t *usbif)
while (rc != rp) {
if (RING_REQUEST_CONS_OVERFLOW(urb_ring, rc)) {
- pr_warning("RING_REQUEST_CONS_OVERFLOW\n");
+ if(printk_ratelimit())
+ pr_warning("RING_REQUEST_CONS_OVERFLOW\n");
break;
}
@@ -1030,12 +1031,12 @@ static int usbbk_start_submit_urb(usbif_t *usbif)
dispatch_request_to_pending_reqs(usbif, req,
pending_req);
+
+ cond_resched();
}
RING_FINAL_CHECK_FOR_REQUESTS(&usbif->urb_ring, more_to_do);
- cond_resched();
-
return more_to_do;
}
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index af4907d64be3..546c7edd614b 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -148,7 +148,6 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
#ifndef CONFIG_XEN
struct xen_pcibk_dev_data *dev_data;
#endif
- int otherend = pdev->xdev->otherend_id;
int status;
if (unlikely(verbose_request))
@@ -157,8 +156,9 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
status = pci_enable_msi(dev);
if (status) {
- printk(KERN_ERR "error enable msi for guest %x status %x\n",
- otherend, status);
+ pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI for guest %u: err %d\n",
+ pci_name(dev), pdev->xdev->otherend_id,
+ status);
op->value = 0;
return XEN_PCI_ERR_op_failed;
}
@@ -256,10 +256,10 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
pci_name(dev), i,
op->msix_entries[i].vector);
}
- } else {
- printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
- pci_name(dev), result);
- }
+ } else
+ pr_warn_ratelimited(DRV_NAME ": %s: error enabling MSI-X for guest %u: err %d!\n",
+ pci_name(dev), pdev->xdev->otherend_id,
+ result);
kfree(entries);
op->value = result;