Home Home > GIT Browse > linux-next
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Slaby <jslaby@suse.cz>2012-10-21 23:17:25 +0200
committerJiri Slaby <jslaby@suse.cz>2012-10-21 23:17:25 +0200
commitcfee26b09e0d74495de404afe3d0bf9d90bc25b3 (patch)
tree352b4e822d67452d7708ce782e98ee9a178bfa55
parent3fa892ec9615adec76d6cc9117fc009d79163d26 (diff)
- Linux 3.6.3.
- Refresh patches.xen/xen3-fixup-xen. - Refresh patches.xen/xen3-patch-2.6.23. - Refresh patches.xen/xen3-patch-3.2. - Refresh patches.xen/xen3-patch-3.6. - Delete patches.fixes/hv-storvsc-reset-wait.patch. - Delete patches.rpmify/x86-kbuild-archscripts-depends-on-scripts_basic.
-rw-r--r--patches.fixes/hv-storvsc-reset-wait.patch34
-rw-r--r--patches.kernel.org/patch-3.6.2-33213
-rw-r--r--patches.rpmify/x86-kbuild-archscripts-depends-on-scripts_basic43
-rw-r--r--patches.xen/xen3-fixup-xen10
-rw-r--r--patches.xen/xen3-patch-2.6.234
-rw-r--r--patches.xen/xen3-patch-3.22
-rw-r--r--patches.xen/xen3-patch-3.68
-rw-r--r--series.conf3
8 files changed, 3228 insertions, 89 deletions
diff --git a/patches.fixes/hv-storvsc-reset-wait.patch b/patches.fixes/hv-storvsc-reset-wait.patch
deleted file mode 100644
index 06f5a5f7e0..0000000000
--- a/patches.fixes/hv-storvsc-reset-wait.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: K. Y. Srinivasan <kys@microsoft.com>
-Subject: Drivers: scsi: storvsc: Account for in-transit packets in the RESET path
-Patch-mainline: not yet
-
-Properly account for I/O in transit before returning from the RESET call.
-In the absense of this patch we could have a situation where the host may
-respond to a command that was issued prior to the issuance of the RESET
-command at some arbitrary time after responding to the RESET command.
-Currently, the host does not do anything with the RESET command and so
-it is ok to wait for the in-transit I/O to be accounted for. If the host
-side sematics changes, we will have to revisit this.
-
-Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
-Acked-by: jbeulich@suse.com (in lieu of ohering@suse.com)
-
----
- drivers/scsi/storvsc_drv.c | 5 +++++
- 1 files changed, 5 insertions(+), 0 deletions(-)
-
---- a/drivers/scsi/storvsc_drv.c
-+++ b/drivers/scsi/storvsc_drv.c
-@@ -1223,7 +1223,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
- /*
- * At this point, all outstanding requests in the adapter
- * should have been flushed out and return to us
-+ * There is a potential race here where the host may be in
-+ * the process of responding when we return from here.
-+ * Just wait for all in-transit packets to be accounted for
-+ * before we return from here.
- */
-+ storvsc_wait_to_drain(stor_device);
-
- return SUCCESS;
- }
diff --git a/patches.kernel.org/patch-3.6.2-3 b/patches.kernel.org/patch-3.6.2-3
new file mode 100644
index 0000000000..b118517263
--- /dev/null
+++ b/patches.kernel.org/patch-3.6.2-3
@@ -0,0 +1,3213 @@
+From: Jiri Slaby <jslaby@suse.cz>
+Subject: Linux 3.6.3
+Patch-mainline: 3.6.3
+Git-commit: 10f571d09106c3eb85951896522c9650596eff2e
+Git-commit: 733a48e5ae5bf28b046fad984d458c747cbb8c21
+Git-commit: 034940a6b3afbe79022ab6922dd9d2982b78e6d5
+Git-commit: a1b98e12b7f8fad2f0aa3c08a3302bcac7ae1ec7
+Git-commit: 5ae9eb4cbdfd640269dbd66aa3c92ea8e11cc838
+Git-commit: 57451e437796548d658d03c2c4aab659eafcd799
+Git-commit: 71aa5ebe36a4e936eff281b375a4707b6a8320f2
+Git-commit: 1f04661fde9deda4a2cd5845258715a22d8af197
+Git-commit: 128960a9ad67e2d119738f5211956e0304517551
+Git-commit: 9c6d196d5aa35e07482f23c3e37755e7a82140e0
+Git-commit: abce9ac292e13da367bbd22c1f7669f988d931ac
+Git-commit: 8edc0e624db3756783233e464879eb2e3b904c13
+Git-commit: bf7a01bf7987b63b121d572b240c132ec44129c4
+Git-commit: 09e05d4805e6c524c1af74e524e5d0528bb3fef3
+Git-commit: dabdaf0caa3af520dbc1df87b2fb4e77224037bd
+Git-commit: 0c96c65b48fba3ffe9822a554cbc0cd610765cd5
+Git-commit: 26b6e44afb58432a5e998da0343757404f9de9ee
+Git-commit: c77d7162a7ae451c2e895d7ef7fbeb0906107472
+Git-commit: 082918471139b07964967cfe5f70230909c82ae1
+Git-commit: ffd8d101a3a7d3f2e79deee1e342801703b6dc70
+Git-commit: d1c7d97ad58836affde6e39980b96527510b572e
+Git-commit: e7d491a19d3e3aac544070293891a2542ae0c565
+Git-commit: 3ce9e53e788881da0d5f3912f80e0dd6b501f304
+Git-commit: 97541ccfb9db2bb9cd1dde6344d5834438d14bda
+Git-commit: 5aa8b572007c4bca1e6d3dd4c4820f1ae49d6bb2
+Git-commit: 17b572e82032bc246324ce136696656b66d4e3f1
+Git-commit: 91502f099dfc5a1e8812898e26ee280713e1d002
+Git-commit: c3e7724b6bc2f25e46c38dbe68f09d71fafeafb8
+Git-commit: 249ee72249140fe5b9adc988f97298f0aa5db2fc
+Git-commit: 2b17c545a4cdbbbadcd7f1e9684c2d7db8f085a6
+Git-commit: 26cff4e2aa4d666dc6a120ea34336b5057e3e187
+Git-commit: 5b3900cd409466c0070b234d941650685ad0c791
+Git-commit: 012a1211845eab69a5488d59eb87d24cc518c627
+Git-commit: b8c4321f3d194469007f5f5f2b34ec278c264a04
+Git-commit: c99af3752bb52ba3aece5315279a57a477edfaf1
+Git-commit: 49999ab27eab6289a8e4f450e148bdab521361b2
+Git-commit: 60ea8226cbd5c8301f9a39edc574ddabcb8150e0
+Git-commit: cd0608e71e9757f4dae35bcfb4e88f4d1a03a8ab
+Git-commit: 1a7bbda5b1ab0e02622761305a32dc38735b90b2
+Git-commit: cb6b6df111e46b9d0f79eb971575fd50555f43f4
+Git-commit: 899649b7d4ead76c19e39251ca886eebe3f811a8
+Git-commit: 9dbdfd23b7638d054f3b0e70c64dfb9f297f2a9f
+Git-commit: e4b11b89f9039ca97b2ed1b6efeb6749fbdeb252
+Git-commit: b32f4c7ed85c5cee2a21a55c9f59ebc9d57a2463
+Git-commit: 37bb7899ca366dc212b71b150e78566d04808cc0
+Git-commit: 7a3f369ce31694017996524a1cdb08208a839077
+Git-commit: ffe7b0e9326d9c68f5688bef691dd49f1e0d3651
+Git-commit: a519fc7a70d1a918574bb826cc6905b87b482eb9
+Git-commit: 790198f74c9d1b46b6a89504361b1a844670d050
+Git-commit: 9d7d6e363b06934221b81a859d509844c97380df
+Git-commit: 7253b85cc62d6ff84143d96fe6cd54f73736f4d7
+Git-commit: 2e9c9dfde00a6466441e93033cf2c37f720bdacf
+Git-commit: bc977749e967daa56de1922cf4cb38525631c51c
+Git-commit: 5c1b10ab7f93d24f29b5630286e323d1c5802d5c
+Git-commit: cf0eb28d3ba60098865bf7dbcbfdd6b1cc483e3b
+Git-commit: 38b11bae6ba02da352340aff12ee25755977b222
+Git-commit: 904753da183566c71211d23c169a80184648c121
+Git-commit: f25590f39d543272f7ae7b00d533359c8d7ff331
+Git-commit: 35c2a7f4908d404c9124c2efc6ada4640ca4d5d5
+Git-commit: f0a996eeeda214f4293e234df33b29bec003b536
+Git-commit: c5e0b6dbad9b4d18c561af90b384d02373f1c994
+Git-commit: f7f4b2322bf7b8c5929b7eb5a667091f32592580
+Git-commit: 7819d1c70eb6a57e43554d86e10b39d1e106ed65
+Git-commit: 67bfa9b60bd689601554526d144b21d529f78a09
+Git-commit: a520d52e99b14ba7db135e916348f12f2a6e09be
+Git-commit: cb7323fffa85df37161f4d3be45e1f787808309c
+Git-commit: 303a7ce92064c285a04c870f2dc0192fdb2968cb
+Git-commit: e9406db20fecbfcab646bad157b4cfdc7cadddfb
+Git-commit: 9959ba0c241a71c7ed8133401cfbbee2720da0b5
+Git-commit: cf9182e90b2af04245ac4fae497fe73fc71285b4
+Git-commit: 68eb35081e297b37db49d854cda144c6a3397699
+Git-commit: 6938867edba929a65a167a97581231e76aeb10b4
+Git-commit: dc182549d439f60c332bf74d7f220a1bccf37da6
+Git-commit: fe6e1e8d9fad86873eb74a26e80a8f91f9e870b5
+Git-commit: 846a136881b8f73c1f74250bf6acfaa309cab1f2
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+diff --git a/Makefile b/Makefile
+index af5d6a9..6cdadf4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 6
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Terrified Chipmunk
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 2f88d8d..48c19d4 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1413,6 +1413,16 @@ config PL310_ERRATA_769419
+ on systems with an outer cache, the store buffer is drained
+ explicitly.
+
++config ARM_ERRATA_775420
++ bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
++ depends on CPU_V7
++ help
++ This option enables the workaround for the 775420 Cortex-A9 (r2p2,
++ r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
++ operation aborts with MMU exception, it might cause the processor
++ to deadlock. This workaround puts DSB before executing ISB if
++ an abort may occur on cache maintenance.
++
+ endmenu
+
+ source "arch/arm/common/Kconfig"
+diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
+index 3d5fc41..bf53047 100644
+--- a/arch/arm/include/asm/vfpmacros.h
++++ b/arch/arm/include/asm/vfpmacros.h
+@@ -28,7 +28,7 @@
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPv3D16
+- ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
++ ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+@@ -52,7 +52,7 @@
+ ldr \tmp, =elf_hwcap @ may not have MVFR regs
+ ldr \tmp, [\tmp, #0]
+ tst \tmp, #HWCAP_VFPv3D16
+- stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
++ stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
+ addne \base, \base, #32*4 @ step over unused register space
+ #else
+ VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
+diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
+index 39e3fb3..3b17227 100644
+--- a/arch/arm/mm/cache-v7.S
++++ b/arch/arm/mm/cache-v7.S
+@@ -211,6 +211,9 @@ ENTRY(v7_coherent_user_range)
+ * isn't mapped, fail with -EFAULT.
+ */
+ 9001:
++#ifdef CONFIG_ARM_ERRATA_775420
++ dsb
++#endif
+ mov r0, #-EFAULT
+ mov pc, lr
+ UNWIND(.fnend )
+diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
+index dbf1e03..2bc51fb 100644
+--- a/arch/arm/plat-omap/counter_32k.c
++++ b/arch/arm/plat-omap/counter_32k.c
+@@ -55,22 +55,29 @@ static u32 notrace omap_32k_read_sched_clock(void)
+ * nsecs and adds to a monotonically increasing timespec.
+ */
+ static struct timespec persistent_ts;
+-static cycles_t cycles, last_cycles;
++static cycles_t cycles;
+ static unsigned int persistent_mult, persistent_shift;
++static DEFINE_SPINLOCK(read_persistent_clock_lock);
++
+ static void omap_read_persistent_clock(struct timespec *ts)
+ {
+ unsigned long long nsecs;
+- cycles_t delta;
+- struct timespec *tsp = &persistent_ts;
++ cycles_t last_cycles;
++ unsigned long flags;
++
++ spin_lock_irqsave(&read_persistent_clock_lock, flags);
+
+ last_cycles = cycles;
+ cycles = sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
+- delta = cycles - last_cycles;
+
+- nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
++ nsecs = clocksource_cyc2ns(cycles - last_cycles,
++ persistent_mult, persistent_shift);
++
++ timespec_add_ns(&persistent_ts, nsecs);
++
++ *ts = persistent_ts;
+
+- timespec_add_ns(tsp, nsecs);
+- *ts = *tsp;
++ spin_unlock_irqrestore(&read_persistent_clock_lock, flags);
+ }
+
+ /**
+diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
+index d272857..579f452 100644
+--- a/arch/mips/ath79/clock.c
++++ b/arch/mips/ath79/clock.c
+@@ -17,6 +17,8 @@
+ #include <linux/err.h>
+ #include <linux/clk.h>
+
++#include <asm/div64.h>
++
+ #include <asm/mach-ath79/ath79.h>
+ #include <asm/mach-ath79/ar71xx_regs.h>
+ #include "common.h"
+@@ -166,11 +168,34 @@ static void __init ar933x_clocks_init(void)
+ ath79_uart_clk.rate = ath79_ref_clk.rate;
+ }
+
++static u32 __init ar934x_get_pll_freq(u32 ref, u32 ref_div, u32 nint, u32 nfrac,
++ u32 frac, u32 out_div)
++{
++ u64 t;
++ u32 ret;
++
++ t = ath79_ref_clk.rate;
++ t *= nint;
++ do_div(t, ref_div);
++ ret = t;
++
++ t = ath79_ref_clk.rate;
++ t *= nfrac;
++ do_div(t, ref_div * frac);
++ ret += t;
++
++ ret /= (1 << out_div);
++ return ret;
++}
++
+ static void __init ar934x_clocks_init(void)
+ {
+- u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
++ u32 pll, out_div, ref_div, nint, nfrac, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
++ void __iomem *dpll_base;
++
++ dpll_base = ioremap(AR934X_SRIF_BASE, AR934X_SRIF_SIZE);
+
+ bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
+@@ -178,33 +203,59 @@ static void __init ar934x_clocks_init(void)
+ else
+ ath79_ref_clk.rate = 25 * 1000 * 1000;
+
+- pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
+- out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+- AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
+- ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+- AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
+- nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
+- AR934X_PLL_CPU_CONFIG_NINT_MASK;
+- frac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+- AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
+-
+- cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+- cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 6));
+- cpu_pll /= (1 << out_div);
+-
+- pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
+- out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+- AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
+- ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+- AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
+- nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
+- AR934X_PLL_DDR_CONFIG_NINT_MASK;
+- frac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+- AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
+-
+- ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+- ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 10));
+- ddr_pll /= (1 << out_div);
++ pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL2_REG);
++ if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
++ out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
++ AR934X_SRIF_DPLL2_OUTDIV_MASK;
++ pll = __raw_readl(dpll_base + AR934X_SRIF_CPU_DPLL1_REG);
++ nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
++ AR934X_SRIF_DPLL1_NINT_MASK;
++ nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
++ ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
++ AR934X_SRIF_DPLL1_REFDIV_MASK;
++ frac = 1 << 18;
++ } else {
++ pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
++ out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
++ AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
++ ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
++ AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
++ nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
++ AR934X_PLL_CPU_CONFIG_NINT_MASK;
++ nfrac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
++ AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
++ frac = 1 << 6;
++ }
++
++ cpu_pll = ar934x_get_pll_freq(ath79_ref_clk.rate, ref_div, nint,
++ nfrac, frac, out_div);
++
++ pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL2_REG);
++ if (pll & AR934X_SRIF_DPLL2_LOCAL_PLL) {
++ out_div = (pll >> AR934X_SRIF_DPLL2_OUTDIV_SHIFT) &
++ AR934X_SRIF_DPLL2_OUTDIV_MASK;
++ pll = __raw_readl(dpll_base + AR934X_SRIF_DDR_DPLL1_REG);
++ nint = (pll >> AR934X_SRIF_DPLL1_NINT_SHIFT) &
++ AR934X_SRIF_DPLL1_NINT_MASK;
++ nfrac = pll & AR934X_SRIF_DPLL1_NFRAC_MASK;
++ ref_div = (pll >> AR934X_SRIF_DPLL1_REFDIV_SHIFT) &
++ AR934X_SRIF_DPLL1_REFDIV_MASK;
++ frac = 1 << 18;
++ } else {
++ pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
++ out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
++ AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
++ ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
++ AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
++ nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
++ AR934X_PLL_DDR_CONFIG_NINT_MASK;
++ nfrac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
++ AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
++ frac = 1 << 10;
++ }
++
++ ddr_pll = ar934x_get_pll_freq(ath79_ref_clk.rate, ref_div, nint,
++ nfrac, frac, out_div);
+
+ clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
+
+@@ -240,6 +291,8 @@ static void __init ar934x_clocks_init(void)
+
+ ath79_wdt_clk.rate = ath79_ref_clk.rate;
+ ath79_uart_clk.rate = ath79_ref_clk.rate;
++
++ iounmap(dpll_base);
+ }
+
+ void __init ath79_clocks_init(void)
+diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+index dde5044..31a9a7c 100644
+--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
++++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+@@ -63,6 +63,8 @@
+
+ #define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+ #define AR934X_WMAC_SIZE 0x20000
++#define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
++#define AR934X_SRIF_SIZE 0x1000
+
+ /*
+ * DDR_CTRL block
+@@ -399,4 +401,25 @@
+ #define AR933X_GPIO_COUNT 30
+ #define AR934X_GPIO_COUNT 23
+
++/*
++ * SRIF block
++ */
++#define AR934X_SRIF_CPU_DPLL1_REG 0x1c0
++#define AR934X_SRIF_CPU_DPLL2_REG 0x1c4
++#define AR934X_SRIF_CPU_DPLL3_REG 0x1c8
++
++#define AR934X_SRIF_DDR_DPLL1_REG 0x240
++#define AR934X_SRIF_DDR_DPLL2_REG 0x244
++#define AR934X_SRIF_DDR_DPLL3_REG 0x248
++
++#define AR934X_SRIF_DPLL1_REFDIV_SHIFT 27
++#define AR934X_SRIF_DPLL1_REFDIV_MASK 0x1f
++#define AR934X_SRIF_DPLL1_NINT_SHIFT 18
++#define AR934X_SRIF_DPLL1_NINT_MASK 0x1ff
++#define AR934X_SRIF_DPLL1_NFRAC_MASK 0x0003ffff
++
++#define AR934X_SRIF_DPLL2_LOCAL_PLL BIT(30)
++#define AR934X_SRIF_DPLL2_OUTDIV_SHIFT 13
++#define AR934X_SRIF_DPLL2_OUTDIV_MASK 0x7
++
+ #endif /* __ASM_MACH_AR71XX_REGS_H */
+diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
+index f4546e9..23817a6 100644
+--- a/arch/mips/kernel/kgdb.c
++++ b/arch/mips/kernel/kgdb.c
+@@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
+ struct pt_regs *regs = args->regs;
+ int trap = (regs->cp0_cause & 0x7c) >> 2;
+
++#ifdef CONFIG_KPROBES
++ /*
++ * Return immediately if the kprobes fault notifier has set
++ * DIE_PAGE_FAULT.
++ */
++ if (cmd == DIE_PAGE_FAULT)
++ return NOTIFY_DONE;
++#endif /* CONFIG_KPROBES */
++
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 58790bd..05afcca 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -142,7 +142,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+ KBUILD_CFLAGS += $(mflags-y)
+ KBUILD_AFLAGS += $(mflags-y)
+
+-archscripts:
++archscripts: scripts_basic
+ $(Q)$(MAKE) $(build)=arch/x86/tools relocs
+
+ ###
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 1fbe75a..c1461de 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -984,7 +984,16 @@ static void xen_write_cr4(unsigned long cr4)
+
+ native_write_cr4(cr4);
+ }
+-
++#ifdef CONFIG_X86_64
++static inline unsigned long xen_read_cr8(void)
++{
++ return 0;
++}
++static inline void xen_write_cr8(unsigned long val)
++{
++ BUG_ON(val);
++}
++#endif
+ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
+ {
+ int ret;
+@@ -1153,6 +1162,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .read_cr4_safe = native_read_cr4_safe,
+ .write_cr4 = xen_write_cr4,
+
++#ifdef CONFIG_X86_64
++ .read_cr8 = xen_read_cr8,
++ .write_cr8 = xen_write_cr8,
++#endif
++
+ .wbinvd = native_wbinvd,
+
+ .read_msr = native_read_msr_safe,
+@@ -1161,6 +1175,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
+ .read_tsc = native_read_tsc,
+ .read_pmc = native_read_pmc,
+
++ .read_tscp = native_read_tscp,
++
+ .iret = xen_iret,
+ .irq_enable_sysexit = xen_sysexit,
+ #ifdef CONFIG_X86_64
+diff --git a/block/blk-core.c b/block/blk-core.c
+index ee3cb3a..8471fb7 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -696,7 +696,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
+ q->request_fn = rfn;
+ q->prep_rq_fn = NULL;
+ q->unprep_rq_fn = NULL;
+- q->queue_flags = QUEUE_FLAG_DEFAULT;
++ q->queue_flags |= QUEUE_FLAG_DEFAULT;
+
+ /* Override internal queue lock with supplied lock pointer */
+ if (lock)
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 7edaccc..a51df96 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -71,9 +71,6 @@ enum ec_command {
+ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
+ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+
+-#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts
+- per one transaction */
+-
+ enum {
+ EC_FLAGS_QUERY_PENDING, /* Query is pending */
+ EC_FLAGS_GPE_STORM, /* GPE storm detected */
+@@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
+ module_param(ec_delay, uint, 0644);
+ MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
+
++/*
++ * If the number of false interrupts per one transaction exceeds
++ * this threshold, will think there is a GPE storm happened and
++ * will disable the GPE for normal transaction.
++ */
++static unsigned int ec_storm_threshold __read_mostly = 8;
++module_param(ec_storm_threshold, uint, 0644);
++MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
++
+ /* If we find an EC via the ECDT, we need to keep a ptr to its context */
+ /* External interfaces use first EC only, so remember */
+ typedef int (*acpi_ec_query_func) (void *data);
+@@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
+ msleep(1);
+ /* It is safe to enable the GPE outside of the transaction. */
+ acpi_enable_gpe(NULL, ec->gpe);
+- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
++ } else if (t->irq_count > ec_storm_threshold) {
+ pr_info(PREFIX "GPE storm detected, "
+ "transactions will use polling mode\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+@@ -924,6 +930,17 @@ static int ec_flag_msi(const struct dmi_system_id *id)
+ return 0;
+ }
+
++/*
++ * Clevo M720 notebook actually works ok with IRQ mode, if we lifted
++ * the GPE storm threshold back to 20
++ */
++static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
++{
++ pr_debug("Setting the EC GPE storm threshold to 20\n");
++ ec_storm_threshold = 20;
++ return 0;
++}
++
+ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_skip_dsdt_scan, "Compal JFL92", {
+@@ -955,10 +972,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
++ {
++ ec_enlarge_storm_threshold, "CLEVO hardware", {
++ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL},
+ {},
+ };
+
+-
+ int __init acpi_ec_ecdt_probe(void)
+ {
+ acpi_status status;
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index 817f0ee..4dc8024 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -1186,17 +1186,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *off)
+ {
+ struct tpm_chip *chip = file->private_data;
+- size_t in_size = size, out_size;
++ size_t in_size = size;
++ ssize_t out_size;
+
+ /* cannot perform a write until the read has cleared
+- either via tpm_read or a user_read_timer timeout */
+- while (atomic_read(&chip->data_pending) != 0)
+- msleep(TPM_TIMEOUT);
+-
+- mutex_lock(&chip->buffer_mutex);
++ either via tpm_read or a user_read_timer timeout.
++ This also prevents splitted buffered writes from blocking here.
++ */
++ if (atomic_read(&chip->data_pending) != 0)
++ return -EBUSY;
+
+ if (in_size > TPM_BUFSIZE)
+- in_size = TPM_BUFSIZE;
++ return -E2BIG;
++
++ mutex_lock(&chip->buffer_mutex);
+
+ if (copy_from_user
+ (chip->data_buffer, (void __user *) buf, in_size)) {
+@@ -1206,6 +1209,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf,
+
+ /* atomic tpm command send and result receive */
+ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
++ if (out_size < 0) {
++ mutex_unlock(&chip->buffer_mutex);
++ return out_size;
++ }
+
+ atomic_set(&chip->data_pending, out_size);
+ mutex_unlock(&chip->buffer_mutex);
+diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
+index 2783f69..f8d2287 100644
+--- a/drivers/firewire/core-cdev.c
++++ b/drivers/firewire/core-cdev.c
+@@ -473,8 +473,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
+ client->bus_reset_closure = a->bus_reset_closure;
+ if (a->bus_reset != 0) {
+ fill_bus_reset_event(&bus_reset, client);
+- ret = copy_to_user(u64_to_uptr(a->bus_reset),
+- &bus_reset, sizeof(bus_reset));
++ /* unaligned size of bus_reset is 36 bytes */
++ ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
+ }
+ if (ret == 0 && list_empty(&client->link))
+ list_add_tail(&client->link, &client->device->client_list);
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 274d25d..97d4f4b 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -3893,7 +3893,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+
+ BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+- BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = drm_irq_install(dev);
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index a3e53c5..f02cfad 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -513,7 +513,7 @@
+ */
+ # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+ #define _3D_CHICKEN3 0x02090
+-#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
++#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+
+ #define MI_MODE 0x0209c
+ # define VS_TIMER_DISPATCH (1 << 6)
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 0c7f4aa..b634f6f 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -4351,7 +4351,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+- if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
++ if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+@@ -4705,7 +4705,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
+ /* determine panel color depth */
+ temp = I915_READ(PIPECONF(pipe));
+ temp &= ~PIPE_BPC_MASK;
+- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
++ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, adjusted_mode);
+ switch (pipe_bpp) {
+ case 18:
+ temp |= PIPE_6BPC;
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 8c73fae..c23c9ea 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -3355,8 +3355,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
+ GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
+
+ /* Bspec says we need to always set all mask bits. */
+- I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
+- _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
++ I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
++ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
+
+ /*
+ * According to the spec the following bits should be
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 670e991..d16f50f 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -974,11 +974,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+ static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+ {
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+- struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+- if (tmds) {
+- if (tmds->i2c_bus)
+- radeon_i2c_destroy(tmds->i2c_bus);
+- }
++ /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
+ kfree(radeon_encoder->enc_priv);
+ drm_encoder_cleanup(encoder);
+ kfree(radeon_encoder);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 0138a72..a48c215 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3158,7 +3158,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+ else {
+ bad_sectors -= (sector - first_bad);
+ if (max_sync > bad_sectors)
+- max_sync = max_sync;
++ max_sync = bad_sectors;
+ continue;
+ }
+ }
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index a11253a..c429abd 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2914,8 +2914,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+ if (le16_to_cpu(p->features) & 1)
+ *busw = NAND_BUSWIDTH_16;
+
+- chip->options &= ~NAND_CHIPOPTIONS_MSK;
+- chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
++ chip->options |= NAND_NO_READRDY;
+
+ pr_info("ONFI flash detected\n");
+ return 1;
+@@ -3080,9 +3079,8 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+ mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ }
+ }
+- /* Get chip options, preserve non chip based options */
+- chip->options &= ~NAND_CHIPOPTIONS_MSK;
+- chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
++ /* Get chip options */
++ chip->options |= type->options;
+
+ /*
+ * Check if chip is not a Samsung device. Do not clear the
+diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
+index cb3356c..04668b4 100644
+--- a/drivers/net/ethernet/intel/e1000e/e1000.h
++++ b/drivers/net/ethernet/intel/e1000e/e1000.h
+@@ -175,13 +175,13 @@ struct e1000_info;
+ /*
+ * in the case of WTHRESH, it appears at least the 82571/2 hardware
+ * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
+- * WTHRESH=4, and since we want 64 bytes at a time written back, set
+- * it to 5
++ * WTHRESH=4, so a setting of 5 gives the most efficient bus
++ * utilization but to avoid possible Tx stalls, set it to 1
+ */
+ #define E1000_TXDCTL_DMA_BURST_ENABLE \
+ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
+ E1000_TXDCTL_COUNT_DESC | \
+- (5 << 16) | /* wthresh must be +1 more than desired */\
++ (1 << 16) | /* wthresh must be +1 more than desired */\
+ (1 << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
+
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index d01a099..a46e75e 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -2831,7 +2831,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
+ * set up some performance related parameters to encourage the
+ * hardware to use the bus more efficiently in bursts, depends
+ * on the tx_int_delay to be enabled,
+- * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
++ * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
+ * hthresh = 1 ==> prefetch when one or more available
+ * pthresh = 0x1f ==> prefetch if internal cache 31 or less
+ * BEWARE: this seems to work but should be considered first if
+diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
+index 03c2d8d..cc7e720 100644
+--- a/drivers/net/usb/mcs7830.c
++++ b/drivers/net/usb/mcs7830.c
+@@ -117,6 +117,7 @@ enum {
+ struct mcs7830_data {
+ u8 multi_filter[8];
+ u8 config;
++ u8 link_counter;
+ };
+
+ static const char driver_name[] = "MOSCHIP usb-ethernet driver";
+@@ -632,20 +633,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
+ {
+ u8 *buf = urb->transfer_buffer;
+- bool link;
++ bool link, link_changed;
++ struct mcs7830_data *data = mcs7830_get_data(dev);
+
+ if (urb->actual_length < 16)
+ return;
+
+ link = !(buf[1] & 0x20);
+- if (netif_carrier_ok(dev->net) != link) {
+- if (link) {
+- netif_carrier_on(dev->net);
+- usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+- } else
+- netif_carrier_off(dev->net);
+- netdev_dbg(dev->net, "Link Status is: %d\n", link);
+- }
++ link_changed = netif_carrier_ok(dev->net) != link;
++ if (link_changed) {
++ data->link_counter++;
++ /*
++ track link state 20 times to guard against erroneous
++ link state changes reported sometimes by the chip
++ */
++ if (data->link_counter > 20) {
++ data->link_counter = 0;
++ if (link) {
++ netif_carrier_on(dev->net);
++ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
++ } else
++ netif_carrier_off(dev->net);
++ netdev_dbg(dev->net, "Link Status is: %d\n", link);
++ }
++ } else
++ data->link_counter = 0;
+ }
+
+ static const struct driver_info moschip_info = {
+diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
+index 76f07d8..1b48414 100644
+--- a/drivers/net/wireless/ath/ath9k/beacon.c
++++ b/drivers/net/wireless/ath/ath9k/beacon.c
+@@ -120,7 +120,7 @@ static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
+
+ if (ath_tx_start(hw, skb, &txctl) != 0) {
+ ath_dbg(common, XMIT, "CABQ TX failed\n");
+- dev_kfree_skb_any(skb);
++ ieee80211_free_txskb(hw, skb);
+ }
+ }
+
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index a22df74..61e08e6 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -767,7 +767,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+
+ return;
+ exit:
+- dev_kfree_skb_any(skb);
++ ieee80211_free_txskb(hw, skb);
+ }
+
+ static void ath9k_stop(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
+index 0d4155a..423a9f3 100644
+--- a/drivers/net/wireless/ath/ath9k/xmit.c
++++ b/drivers/net/wireless/ath/ath9k/xmit.c
+@@ -66,8 +66,7 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
+ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+- struct sk_buff *skb,
+- bool dequeue);
++ struct sk_buff *skb);
+
+ enum {
+ MCS_HT20,
+@@ -176,7 +175,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+
+- if (bf && fi->retries) {
++ if (!bf) {
++ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
++ if (!bf) {
++ ieee80211_free_txskb(sc->hw, skb);
++ continue;
++ }
++ }
++
++ if (fi->retries) {
+ list_add_tail(&bf->list, &bf_head);
+ ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+@@ -785,10 +792,13 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+ if (!fi->bf)
+- bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
++ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+
+- if (!bf)
++ if (!bf) {
++ __skb_unlink(skb, &tid->buf_q);
++ ieee80211_free_txskb(sc->hw, skb);
+ continue;
++ }
+
+ bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
+ seqno = bf->bf_state.seqno;
+@@ -1731,9 +1741,11 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
+ return;
+ }
+
+- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
+- if (!bf)
++ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
++ if (!bf) {
++ ieee80211_free_txskb(sc->hw, skb);
+ return;
++ }
+
+ bf->bf_state.bf_type = BUF_AMPDU;
+ INIT_LIST_HEAD(&bf_head);
+@@ -1757,11 +1769,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_buf *bf;
+
+ bf = fi->bf;
+- if (!bf)
+- bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
+-
+- if (!bf)
+- return;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add_tail(&bf->list, &bf_head);
+@@ -1834,8 +1841,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
+ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ struct ath_txq *txq,
+ struct ath_atx_tid *tid,
+- struct sk_buff *skb,
+- bool dequeue)
++ struct sk_buff *skb)
+ {
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_frame_info *fi = get_frame_info(skb);
+@@ -1847,7 +1853,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ bf = ath_tx_get_buffer(sc);
+ if (!bf) {
+ ath_dbg(common, XMIT, "TX buffers are full\n");
+- goto error;
++ return NULL;
+ }
+
+ ATH_TXBUF_RESET(bf);
+@@ -1876,18 +1882,12 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
+ ath_err(ath9k_hw_common(sc->sc_ah),
+ "dma_mapping_error() on TX\n");
+ ath_tx_return_buffer(sc, bf);
+- goto error;
++ return NULL;
+ }
+
+ fi->bf = bf;
+
+ return bf;
+-
+-error:
+- if (dequeue)
+- __skb_unlink(skb, &tid->buf_q);
+- dev_kfree_skb_any(skb);
+- return NULL;
+ }
+
+ /* FIXME: tx power */
+@@ -1916,9 +1916,14 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
+ */
+ ath_tx_send_ampdu(sc, tid, skb, txctl);
+ } else {
+- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
+- if (!bf)
++ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
++ if (!bf) {
++ if (txctl->paprd)
++ dev_kfree_skb_any(skb);
++ else
++ ieee80211_free_txskb(sc->hw, skb);
+ return;
++ }
+
+ bf->bf_state.bfs_paprd = txctl->paprd;
+
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 5b30132..41b74ba 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1403,7 +1403,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+ ctio->u.status1.scsi_status =
+ __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+ ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+- ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
++ ctio->u.status1.sense_data[0] = resp_code;
+
+ qla2x00_start_iocbs(ha, ha->req);
+ }
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 182d5a5..f4cc413 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -2054,7 +2054,7 @@ static void unmap_region(sector_t lba, unsigned int len)
+ block = lba + alignment;
+ rem = do_div(block, granularity);
+
+- if (rem == 0 && lba + granularity <= end && block < map_size) {
++ if (rem == 0 && lba + granularity < end && block < map_size) {
+ clear_bit(block, map_storep);
+ if (scsi_debug_lbprz)
+ memset(fake_storep +
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 528d52b..0144078 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1221,7 +1221,12 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
+ /*
+ * At this point, all outstanding requests in the adapter
+ * should have been flushed out and return to us
++ * There is a potential race here where the host may be in
++ * the process of responding when we return from here.
++ * Just wait for all in-transit packets to be accounted for
++ * before we return from here.
+ */
++ storvsc_wait_to_drain(stor_device);
+
+ return SUCCESS;
+ }
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index 3e79a2f..7554d78 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -219,7 +219,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ struct scatterlist sg;
+ unsigned long flags;
+
+- sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
++ sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+
+ spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
+
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 97c0f78..dd4fce2 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -3271,7 +3271,6 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ len += 1;
+
+ if ((len + payload_len) > buffer_len) {
+- spin_unlock(&tiqn->tiqn_tpg_lock);
+ end_of_buf = 1;
+ goto eob;
+ }
+@@ -3424,6 +3423,7 @@ static int iscsit_send_reject(
+ hdr->opcode = ISCSI_OP_REJECT;
+ hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+ hton24(hdr->dlength, ISCSI_HDR_LEN);
++ hdr->ffffffff = 0xffffffff;
+ cmd->stat_sn = conn->stat_sn++;
+ hdr->statsn = cpu_to_be32(cmd->stat_sn);
+ hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 8a908b2..a90294f 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -25,10 +25,10 @@
+ #define NA_DATAOUT_TIMEOUT_RETRIES 5
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+ #define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+-#define NA_NOPIN_TIMEOUT 5
++#define NA_NOPIN_TIMEOUT 15
+ #define NA_NOPIN_TIMEOUT_MAX 60
+ #define NA_NOPIN_TIMEOUT_MIN 3
+-#define NA_NOPIN_RESPONSE_TIMEOUT 5
++#define NA_NOPIN_RESPONSE_TIMEOUT 30
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60
+ #define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3
+ #define NA_RANDOM_DATAIN_PDU_OFFSETS 0
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index a38a3f8..de9ea32 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -677,6 +677,12 @@ int iscsit_ta_generate_node_acls(
+ pr_debug("iSCSI_TPG[%hu] - Generate Initiator Portal Group ACLs: %s\n",
+ tpg->tpgt, (a->generate_node_acls) ? "Enabled" : "Disabled");
+
++ if (flag == 1 && a->cache_dynamic_acls == 0) {
++ pr_debug("Explicitly setting cache_dynamic_acls=1 when "
++ "generate_node_acls=1\n");
++ a->cache_dynamic_acls = 1;
++ }
++
+ return 0;
+ }
+
+@@ -716,6 +722,12 @@ int iscsit_ta_cache_dynamic_acls(
+ return -EINVAL;
+ }
+
++ if (a->generate_node_acls == 1 && flag == 0) {
++ pr_debug("Skipping cache_dynamic_acls=0 when"
++ " generate_node_acls=1\n");
++ return 0;
++ }
++
+ a->cache_dynamic_acls = flag;
+ pr_debug("iSCSI_TPG[%hu] - Cache Dynamic Initiator Portal Group"
+ " ACLs %s\n", tpg->tpgt, (a->cache_dynamic_acls) ?
+diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
+index 801efa8..06aca11 100644
+--- a/drivers/target/target_core_configfs.c
++++ b/drivers/target/target_core_configfs.c
+@@ -3132,6 +3132,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!target_cg->default_groups) {
+ pr_err("Unable to allocate target_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3147,6 +3148,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!hba_cg->default_groups) {
+ pr_err("Unable to allocate hba_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+ config_group_init_type_name(&alua_group,
+@@ -3162,6 +3164,7 @@ static int __init target_core_init_configfs(void)
+ GFP_KERNEL);
+ if (!alua_cg->default_groups) {
+ pr_err("Unable to allocate alua_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+@@ -3173,14 +3176,17 @@ static int __init target_core_init_configfs(void)
+ * Add core/alua/lu_gps/default_lu_gp
+ */
+ lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
+- if (IS_ERR(lu_gp))
++ if (IS_ERR(lu_gp)) {
++ ret = -ENOMEM;
+ goto out_global;
++ }
+
+ lu_gp_cg = &alua_lu_gps_group;
+ lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+ GFP_KERNEL);
+ if (!lu_gp_cg->default_groups) {
+ pr_err("Unable to allocate lu_gp_cg->default_groups\n");
++ ret = -ENOMEM;
+ goto out_global;
+ }
+
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index cbb5aaf..5c5ed7a 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -125,6 +125,19 @@ static struct se_device *fd_create_virtdevice(
+ * of pure timestamp updates.
+ */
+ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
++ /*
++ * Optionally allow fd_buffered_io=1 to be enabled for people
++ * who want use the fs buffer cache as an WriteCache mechanism.
++ *
++ * This means that in event of a hard failure, there is a risk
++ * of silent data-loss if the SCSI client has *not* performed a
++ * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
++ * to write-out the entire device cache.
++ */
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
++ flags &= ~O_DSYNC;
++ }
+
+ file = filp_open(fd_dev->fd_dev_name, flags, 0600);
+ if (IS_ERR(file)) {
+@@ -188,6 +201,12 @@ static struct se_device *fd_create_virtdevice(
+ if (!dev)
+ goto fail;
+
++ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
++ pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
++ " with FDBD_HAS_BUFFERED_IO_WCE\n");
++ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = 1;
++ }
++
+ fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
+ fd_dev->fd_queue_depth = dev->queue_depth;
+
+@@ -407,6 +426,7 @@ enum {
+ static match_table_t tokens = {
+ {Opt_fd_dev_name, "fd_dev_name=%s"},
+ {Opt_fd_dev_size, "fd_dev_size=%s"},
++ {Opt_fd_buffered_io, "fd_buffered_io=%d"},
+ {Opt_err, NULL}
+ };
+
+@@ -418,7 +438,7 @@ static ssize_t fd_set_configfs_dev_params(
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
+ char *orig, *ptr, *arg_p, *opts;
+ substring_t args[MAX_OPT_ARGS];
+- int ret = 0, token;
++ int ret = 0, arg, token;
+
+ opts = kstrdup(page, GFP_KERNEL);
+ if (!opts)
+@@ -459,6 +479,19 @@ static ssize_t fd_set_configfs_dev_params(
+ " bytes\n", fd_dev->fd_dev_size);
+ fd_dev->fbd_flags |= FBDF_HAS_SIZE;
+ break;
++ case Opt_fd_buffered_io:
++ match_int(args, &arg);
++ if (arg != 1) {
++ pr_err("bogus fd_buffered_io=%d value\n", arg);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ pr_debug("FILEIO: Using buffered I/O"
++ " operations for struct fd_dev\n");
++
++ fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
++ break;
+ default:
+ break;
+ }
+@@ -490,8 +523,10 @@ static ssize_t fd_show_configfs_dev_params(
+ ssize_t bl = 0;
+
+ bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
+- bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
+- fd_dev->fd_dev_name, fd_dev->fd_dev_size);
++ bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
++ fd_dev->fd_dev_name, fd_dev->fd_dev_size,
++ (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
++ "Buffered-WCE" : "O_DSYNC");
+ return bl;
+ }
+
+diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
+index 70ce7fd..876ae53 100644
+--- a/drivers/target/target_core_file.h
++++ b/drivers/target/target_core_file.h
+@@ -14,6 +14,7 @@
+
+ #define FBDF_HAS_PATH 0x01
+ #define FBDF_HAS_SIZE 0x02
++#define FDBD_HAS_BUFFERED_IO_WCE 0x04
+
+ struct fd_dev {
+ u32 fbd_flags;
+diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
+index 388a922..9229bd9 100644
+--- a/drivers/target/target_core_spc.c
++++ b/drivers/target/target_core_spc.c
+@@ -600,30 +600,11 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
+ {
+ struct se_device *dev = cmd->se_dev;
+ struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
+- unsigned char *buf, *map_buf;
++ unsigned char *rbuf;
+ unsigned char *cdb = cmd->t_task_cdb;
++ unsigned char buf[SE_INQUIRY_BUF];
+ int p, ret;
+
+- map_buf = transport_kmap_data_sg(cmd);
+- /*
+- * If SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is not set, then we
+- * know we actually allocated a full page. Otherwise, if the
+- * data buffer is too small, allocate a temporary buffer so we
+- * don't have to worry about overruns in all our INQUIRY
+- * emulation handling.
+- */
+- if (cmd->data_length < SE_INQUIRY_BUF &&
+- (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
+- buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
+- if (!buf) {
+- transport_kunmap_data_sg(cmd);
+- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+- return -ENOMEM;
+- }
+- } else {
+- buf = map_buf;
+- }
+-
+ if (dev == tpg->tpg_virt_lun0.lun_se_dev)
+ buf[0] = 0x3f; /* Not connected */
+ else
+@@ -655,11 +636,11 @@ static int spc_emulate_inquiry(struct se_cmd *cmd)
+ ret = -EINVAL;
+
+ out:
+- if (buf != map_buf) {
+- memcpy(map_buf, buf, cmd->data_length);
+- kfree(buf);
++ rbuf = transport_kmap_data_sg(cmd);
++ if (rbuf) {
++ memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
++ transport_kunmap_data_sg(cmd);
+ }
+- transport_kunmap_data_sg(cmd);
+
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
+@@ -803,7 +784,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ unsigned char *rbuf;
+ int type = dev->transport->get_device_type(dev);
+ int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10);
+- int offset = ten ? 8 : 4;
++ u32 offset = ten ? 8 : 4;
+ int length = 0;
+ unsigned char buf[SE_MODE_PAGE_BUF];
+
+@@ -836,6 +817,7 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ offset -= 2;
+ buf[0] = (offset >> 8) & 0xff;
+ buf[1] = offset & 0xff;
++ offset += 2;
+
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+@@ -845,13 +827,10 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
+ spc_modesense_dpofua(&buf[3], type);
+-
+- if ((offset + 2) > cmd->data_length)
+- offset = cmd->data_length;
+-
+ } else {
+ offset -= 1;
+ buf[0] = offset & 0xff;
++ offset += 1;
+
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ (cmd->se_deve &&
+@@ -861,14 +840,13 @@ static int spc_emulate_modesense(struct se_cmd *cmd)
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
+ spc_modesense_dpofua(&buf[2], type);
+-
+- if ((offset + 1) > cmd->data_length)
+- offset = cmd->data_length;
+ }
+
+ rbuf = transport_kmap_data_sg(cmd);
+- memcpy(rbuf, buf, offset);
+- transport_kunmap_data_sg(cmd);
++ if (rbuf) {
++ memcpy(rbuf, buf, min(offset, cmd->data_length));
++ transport_kunmap_data_sg(cmd);
++ }
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 84cbf29..a13f7e1 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3475,6 +3475,19 @@ int con_debug_enter(struct vc_data *vc)
+ kdb_set(2, setargs);
+ }
+ }
++ if (vc->vc_cols < 999) {
++ int colcount;
++ char cols[4];
++ const char *setargs[3] = {
++ "set",
++ "COLUMNS",
++ cols,
++ };
++ if (kdbgetintenv(setargs[0], &colcount)) {
++ snprintf(cols, 4, "%i", vc->vc_cols);
++ kdb_set(2, setargs);
++ }
++ }
+ #endif /* CONFIG_KGDB_KDB */
+ return ret;
+ }
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index f763ed7..e8007b8 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1551,6 +1551,9 @@ static const struct usb_device_id acm_ids[] = {
+ Maybe we should define a new
+ quirk for this. */
+ },
++ { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */
++ .driver_info = NO_UNION_NORMAL,
++ },
+ { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
+diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
+index 1e35963..660fd53 100644
+--- a/drivers/usb/gadget/at91_udc.c
++++ b/drivers/usb/gadget/at91_udc.c
+@@ -1699,7 +1699,7 @@ static int __devinit at91udc_probe(struct platform_device *pdev)
+ int retval;
+ struct resource *res;
+
+- if (!dev->platform_data) {
++ if (!dev->platform_data && !pdev->dev.of_node) {
+ /* small (so we copy it) but critical! */
+ DBG("missing platform_data\n");
+ return -ENODEV;
+diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
+index d8dedc7..3639371 100644
+--- a/drivers/vfio/pci/vfio_pci_intrs.c
++++ b/drivers/vfio/pci/vfio_pci_intrs.c
+@@ -366,6 +366,17 @@ static int vfio_intx_enable(struct vfio_pci_device *vdev)
+ return -ENOMEM;
+
+ vdev->num_ctx = 1;
++
++ /*
++ * If the virtual interrupt is masked, restore it. Devices
++ * supporting DisINTx can be masked at the hardware level
++ * here, non-PCI-2.3 devices will have to wait until the
++ * interrupt is enabled.
++ */
++ vdev->ctx[0].masked = vdev->virq_disabled;
++ if (vdev->pci_2_3)
++ pci_intx(vdev->pdev, !vdev->ctx[0].masked);
++
+ vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+
+ return 0;
+@@ -400,25 +411,26 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
+ return PTR_ERR(trigger);
+ }
+
++ vdev->ctx[0].trigger = trigger;
++
+ if (!vdev->pci_2_3)
+ irqflags = 0;
+
+ ret = request_irq(pdev->irq, vfio_intx_handler,
+ irqflags, vdev->ctx[0].name, vdev);
+ if (ret) {
++ vdev->ctx[0].trigger = NULL;
+ kfree(vdev->ctx[0].name);
+ eventfd_ctx_put(trigger);
+ return ret;
+ }
+
+- vdev->ctx[0].trigger = trigger;
+-
+ /*
+ * INTx disable will stick across the new irq setup,
+ * disable_irq won't.
+ */
+ spin_lock_irqsave(&vdev->irqlock, flags);
+- if (!vdev->pci_2_3 && (vdev->ctx[0].masked || vdev->virq_disabled))
++ if (!vdev->pci_2_3 && vdev->ctx[0].masked)
+ disable_irq_nosync(pdev->irq);
+ spin_unlock_irqrestore(&vdev->irqlock, flags);
+
+diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
+index 8af6414..38fcfff 100644
+--- a/drivers/video/udlfb.c
++++ b/drivers/video/udlfb.c
+@@ -647,7 +647,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf,
+ result = fb_sys_write(info, buf, count, ppos);
+
+ if (result > 0) {
+- int start = max((int)(offset / info->fix.line_length) - 1, 0);
++ int start = max((int)(offset / info->fix.line_length), 0);
+ int lines = min((u32)((result / info->fix.line_length) + 1),
+ (u32)info->var.yres);
+
+diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c
+index af8f26b..db1e392 100644
+--- a/drivers/video/via/via_clock.c
++++ b/drivers/video/via/via_clock.c
+@@ -25,6 +25,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/via-core.h>
++#include <asm/olpc.h>
+ #include "via_clock.h"
+ #include "global.h"
+ #include "debug.h"
+@@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config)
+ printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap);
+ }
+
++static void noop_set_clock_state(u8 state)
++{
++}
++
+ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ {
+ switch (gfx_chip) {
+@@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip)
+ break;
+
+ }
++
++ if (machine_is_olpc()) {
++ /* The OLPC XO-1.5 cannot suspend/resume reliably if the
++ * IGA1/IGA2 clocks are set as on or off (memory rot
++ * occasionally happens during suspend under such
++ * configurations).
++ *
++ * The only known stable scenario is to leave this bits as-is,
++ * which in their default states are documented to enable the
++ * clock only when it is needed.
++ */
++ clock->set_primary_clock_state = noop_set_clock_state;
++ clock->set_secondary_clock_state = noop_set_clock_state;
++ }
+ }
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index bce15cf..ca373d1 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -47,6 +47,7 @@
+ #include <xen/xenbus.h>
+ #include <xen/xen.h>
+ #include "xenbus_comms.h"
++#include <asm/xen/hypervisor.h>
+
+ struct xs_stored_msg {
+ struct list_head list;
+@@ -617,7 +618,24 @@ static struct xenbus_watch *find_watch(const char *token)
+
+ return NULL;
+ }
++/*
++ * Certain older XenBus toolstack cannot handle reading values that are
++ * not populated. Some Xen 3.4 installation are incapable of doing this
++ * so if we are running on anything older than 4 do not attempt to read
++ * control/platform-feature-xs_reset_watches.
++ */
++static bool xen_strict_xenbus_quirk()
++{
++ uint32_t eax, ebx, ecx, edx, base;
++
++ base = xen_cpuid_base();
++ cpuid(base + 1, &eax, &ebx, &ecx, &edx);
+
++ if ((eax >> 16) < 4)
++ return true;
++ return false;
++
++}
+ static void xs_reset_watches(void)
+ {
+ int err, supported = 0;
+@@ -625,6 +643,9 @@ static void xs_reset_watches(void)
+ if (!xen_hvm_domain())
+ return;
+
++ if (xen_strict_xenbus_quirk())
++ return;
++
+ err = xenbus_scanf(XBT_NIL, "control",
+ "platform-feature-xs_reset_watches", "%d", &supported);
+ if (err != 1 || !supported)
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index e7396cf..91b1165 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
+ ino->flags |= AUTOFS_INF_PENDING;
+ spin_unlock(&sbi->fs_lock);
+ status = autofs4_mount_wait(dentry);
+- if (status)
+- return ERR_PTR(status);
+ spin_lock(&sbi->fs_lock);
+ ino->flags &= ~AUTOFS_INF_PENDING;
++ if (status) {
++ spin_unlock(&sbi->fs_lock);
++ return ERR_PTR(status);
++ }
+ }
+ done:
+ if (!(ino->flags & AUTOFS_INF_EXPIRING)) {
+diff --git a/fs/ceph/export.c b/fs/ceph/export.c
+index 8e1b60e..02ce909 100644
+--- a/fs/ceph/export.c
++++ b/fs/ceph/export.c
+@@ -99,7 +99,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
+ * FIXME: we should try harder by querying the mds for the ino.
+ */
+ static struct dentry *__fh_to_dentry(struct super_block *sb,
+- struct ceph_nfs_fh *fh)
++ struct ceph_nfs_fh *fh, int fh_len)
+ {
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct inode *inode;
+@@ -107,6 +107,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+ struct ceph_vino vino;
+ int err;
+
++ if (fh_len < sizeof(*fh) / 4)
++ return ERR_PTR(-ESTALE);
++
+ dout("__fh_to_dentry %llx\n", fh->ino);
+ vino.ino = fh->ino;
+ vino.snap = CEPH_NOSNAP;
+@@ -150,7 +153,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
+ * convert connectable fh to dentry
+ */
+ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+- struct ceph_nfs_confh *cfh)
++ struct ceph_nfs_confh *cfh, int fh_len)
+ {
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+ struct inode *inode;
+@@ -158,6 +161,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
+ struct ceph_vino vino;
+ int err;
+
++ if (fh_len < sizeof(*cfh) / 4)
++ return ERR_PTR(-ESTALE);
++
+ dout("__cfh_to_dentry %llx (%llx/%x)\n",
+ cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
+
+@@ -207,9 +213,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+ {
+ if (fh_type == 1)
+- return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
++ return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw,
++ fh_len);
+ else
+- return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
++ return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw,
++ fh_len);
+ }
+
+ /*
+@@ -230,6 +238,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
+
+ if (fh_type == 1)
+ return ERR_PTR(-ESTALE);
++ if (fh_len < sizeof(*cfh) / 4)
++ return ERR_PTR(-ESTALE);
+
+ pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
+ cfh->parent_name_hash);
+diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
+index e8ed6d4..4767774 100644
+--- a/fs/gfs2/export.c
++++ b/fs/gfs2/export.c
+@@ -161,6 +161,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ case GFS2_SMALL_FH_SIZE:
+ case GFS2_LARGE_FH_SIZE:
+ case GFS2_OLD_FH_SIZE:
++ if (fh_len < GFS2_SMALL_FH_SIZE)
++ return NULL;
+ this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32;
+ this.no_formal_ino |= be32_to_cpu(fh[1]);
+ this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32;
+@@ -180,6 +182,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid,
+ switch (fh_type) {
+ case GFS2_LARGE_FH_SIZE:
+ case GFS2_OLD_FH_SIZE:
++ if (fh_len < GFS2_LARGE_FH_SIZE)
++ return NULL;
+ parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32;
+ parent.no_formal_ino |= be32_to_cpu(fh[5]);
+ parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32;
+diff --git a/fs/isofs/export.c b/fs/isofs/export.c
+index 1d38044..2b4f235 100644
+--- a/fs/isofs/export.c
++++ b/fs/isofs/export.c
+@@ -175,7 +175,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb,
+ {
+ struct isofs_fid *ifid = (struct isofs_fid *)fid;
+
+- if (fh_type != 2)
++ if (fh_len < 2 || fh_type != 2)
+ return NULL;
+
+ return isofs_export_iget(sb,
+diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
+index 52c15c7..86b39b1 100644
+--- a/fs/jbd/commit.c
++++ b/fs/jbd/commit.c
+@@ -86,7 +86,12 @@ nope:
+ static void release_data_buffer(struct buffer_head *bh)
+ {
+ if (buffer_freed(bh)) {
++ WARN_ON_ONCE(buffer_dirty(bh));
+ clear_buffer_freed(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
+ release_buffer_page(bh);
+ } else
+ put_bh(bh);
+@@ -866,17 +871,35 @@ restart_loop:
+ * there's no point in keeping a checkpoint record for
+ * it. */
+
+- /* A buffer which has been freed while still being
+- * journaled by a previous transaction may end up still
+- * being dirty here, but we want to avoid writing back
+- * that buffer in the future after the "add to orphan"
+- * operation been committed, That's not only a performance
+- * gain, it also stops aliasing problems if the buffer is
+- * left behind for writeback and gets reallocated for another
+- * use in a different page. */
+- if (buffer_freed(bh) && !jh->b_next_transaction) {
+- clear_buffer_freed(bh);
+- clear_buffer_jbddirty(bh);
++ /*
++ * A buffer which has been freed while still being journaled by
++ * a previous transaction.
++ */
++ if (buffer_freed(bh)) {
++ /*
++ * If the running transaction is the one containing
++ * "add to orphan" operation (b_next_transaction !=
++ * NULL), we have to wait for that transaction to
++ * commit before we can really get rid of the buffer.
++ * So just clear b_modified to not confuse transaction
++ * credit accounting and refile the buffer to
++ * BJ_Forget of the running transaction. If the just
++ * committed transaction contains "add to orphan"
++ * operation, we can completely invalidate the buffer
++ * now. We are rather throughout in that since the
++ * buffer may be still accessible when blocksize <
++ * pagesize and it is attached to the last partial
++ * page.
++ */
++ jh->b_modified = 0;
++ if (!jh->b_next_transaction) {
++ clear_buffer_freed(bh);
++ clear_buffer_jbddirty(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
++ }
+ }
+
+ if (buffer_jbddirty(bh)) {
+diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
+index febc10d..78b7f84 100644
+--- a/fs/jbd/transaction.c
++++ b/fs/jbd/transaction.c
+@@ -1843,15 +1843,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
+ * We're outside-transaction here. Either or both of j_running_transaction
+ * and j_committing_transaction may be NULL.
+ */
+-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
++static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
++ int partial_page)
+ {
+ transaction_t *transaction;
+ struct journal_head *jh;
+ int may_free = 1;
+- int ret;
+
+ BUFFER_TRACE(bh, "entry");
+
++retry:
+ /*
+ * It is safe to proceed here without the j_list_lock because the
+ * buffers cannot be stolen by try_to_free_buffers as long as we are
+@@ -1879,10 +1880,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * clear the buffer dirty bit at latest at the moment when the
+ * transaction marking the buffer as freed in the filesystem
+ * structures is committed because from that moment on the
+- * buffer can be reallocated and used by a different page.
++ * block can be reallocated and used by a different page.
+ * Since the block hasn't been freed yet but the inode has
+ * already been added to orphan list, it is safe for us to add
+ * the buffer to BJ_Forget list of the newest transaction.
++ *
++ * Also we have to clear buffer_mapped flag of a truncated buffer
++ * because the buffer_head may be attached to the page straddling
++ * i_size (can happen only when blocksize < pagesize) and thus the
++ * buffer_head can be reused when the file is extended again. So we end
++ * up keeping around invalidated buffers attached to transactions'
++ * BJ_Forget list just to stop checkpointing code from cleaning up
++ * the transaction this buffer was modified in.
+ */
+ transaction = jh->b_transaction;
+ if (transaction == NULL) {
+@@ -1909,13 +1918,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * committed, the buffer won't be needed any
+ * longer. */
+ JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_running_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* There is no currently-running transaction. So the
+ * orphan record which we wrote for this file must have
+@@ -1923,13 +1928,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ * the committing transaction, if it exists. */
+ if (journal->j_committing_transaction) {
+ JBUFFER_TRACE(jh, "give to committing trans");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_committing_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* The orphan record's transaction has
+ * committed. We can cleanse this buffer */
+@@ -1950,10 +1951,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ }
+ /*
+ * The buffer is committing, we simply cannot touch
+- * it. So we just set j_next_transaction to the
+- * running transaction (if there is one) and mark
+- * buffer as freed so that commit code knows it should
+- * clear dirty bits when it is done with the buffer.
++ * it. If the page is straddling i_size we have to wait
++ * for commit and try again.
++ */
++ if (partial_page) {
++ tid_t tid = journal->j_committing_transaction->t_tid;
++
++ journal_put_journal_head(jh);
++ spin_unlock(&journal->j_list_lock);
++ jbd_unlock_bh_state(bh);
++ spin_unlock(&journal->j_state_lock);
++ log_wait_commit(journal, tid);
++ goto retry;
++ }
++ /*
++ * OK, buffer won't be reachable after truncate. We just set
++ * j_next_transaction to the running transaction (if there is
++ * one) and mark buffer as freed so that commit code knows it
++ * should clear dirty bits when it is done with the buffer.
+ */
+ set_buffer_freed(bh);
+ if (journal->j_running_transaction && buffer_jbddirty(bh))
+@@ -1976,6 +1991,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
+ }
+
+ zap_buffer:
++ /*
++ * This is tricky. Although the buffer is truncated, it may be reused
++ * if blocksize < pagesize and it is attached to the page straddling
++ * EOF. Since the buffer might have been added to BJ_Forget list of the
++ * running transaction, journal_get_write_access() won't clear
++ * b_modified and credit accounting gets confused. So clear b_modified
++ * here. */
++ jh->b_modified = 0;
+ journal_put_journal_head(jh);
+ zap_buffer_no_jh:
+ spin_unlock(&journal->j_list_lock);
+@@ -2024,7 +2047,8 @@ void journal_invalidatepage(journal_t *journal,
+ if (offset <= curr_off) {
+ /* This block is wholly outside the truncation point */
+ lock_buffer(bh);
+- may_free &= journal_unmap_buffer(journal, bh);
++ may_free &= journal_unmap_buffer(journal, bh,
++ offset > 0);
+ unlock_buffer(bh);
+ }
+ curr_off = next_off;
+diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
+index 7ef14b3..e4fb3ba 100644
+--- a/fs/lockd/mon.c
++++ b/fs/lockd/mon.c
+@@ -7,7 +7,6 @@
+ */
+
+ #include <linux/types.h>
+-#include <linux/utsname.h>
+ #include <linux/kernel.h>
+ #include <linux/ktime.h>
+ #include <linux/slab.h>
+@@ -19,6 +18,8 @@
+
+ #include <asm/unaligned.h>
+
++#include "netns.h"
++
+ #define NLMDBG_FACILITY NLMDBG_MONITOR
+ #define NSM_PROGRAM 100024
+ #define NSM_VERSION 1
+@@ -40,6 +41,7 @@ struct nsm_args {
+ u32 proc;
+
+ char *mon_name;
++ char *nodename;
+ };
+
+ struct nsm_res {
+@@ -70,7 +72,7 @@ static struct rpc_clnt *nsm_create(struct net *net)
+ };
+ struct rpc_create_args args = {
+ .net = net,
+- .protocol = XPRT_TRANSPORT_UDP,
++ .protocol = XPRT_TRANSPORT_TCP,
+ .address = (struct sockaddr *)&sin,
+ .addrsize = sizeof(sin),
+ .servername = "rpc.statd",
+@@ -83,10 +85,54 @@ static struct rpc_clnt *nsm_create(struct net *net)
+ return rpc_create(&args);
+ }
+
+-static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+- struct net *net)
++static struct rpc_clnt *nsm_client_get(struct net *net)
+ {
++ static DEFINE_MUTEX(nsm_create_mutex);
+ struct rpc_clnt *clnt;
++ struct lockd_net *ln = net_generic(net, lockd_net_id);
++
++ spin_lock(&ln->nsm_clnt_lock);
++ if (ln->nsm_users) {
++ ln->nsm_users++;
++ clnt = ln->nsm_clnt;
++ spin_unlock(&ln->nsm_clnt_lock);
++ goto out;
++ }
++ spin_unlock(&ln->nsm_clnt_lock);
++
++ mutex_lock(&nsm_create_mutex);
++ clnt = nsm_create(net);
++ if (!IS_ERR(clnt)) {
++ ln->nsm_clnt = clnt;
++ smp_wmb();
++ ln->nsm_users = 1;
++ }
++ mutex_unlock(&nsm_create_mutex);
++out:
++ return clnt;
++}
++
++static void nsm_client_put(struct net *net)
++{
++ struct lockd_net *ln = net_generic(net, lockd_net_id);
++ struct rpc_clnt *clnt = ln->nsm_clnt;
++ int shutdown = 0;
++
++ spin_lock(&ln->nsm_clnt_lock);
++ if (ln->nsm_users) {
++ if (--ln->nsm_users)
++ ln->nsm_clnt = NULL;
++ shutdown = !ln->nsm_users;
++ }
++ spin_unlock(&ln->nsm_clnt_lock);
++
++ if (shutdown)
++ rpc_shutdown_client(clnt);
++}
++
++static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
++ struct rpc_clnt *clnt)
++{
+ int status;
+ struct nsm_args args = {
+ .priv = &nsm->sm_priv,
+@@ -94,31 +140,24 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
+ .vers = 3,
+ .proc = NLMPROC_NSM_NOTIFY,
+ .mon_name = nsm->sm_mon_name,
++ .nodename = clnt->cl_nodename,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = &args,
+ .rpc_resp = res,
+ };
+
+- clnt = nsm_create(net);
+- if (IS_ERR(clnt)) {
+- status = PTR_ERR(clnt);
+- dprintk("lockd: failed to create NSM upcall transport, "
+- "status=%d\n", status);
+- goto out;
+- }
++ BUG_ON(clnt == NULL);
+
+ memset(res, 0, sizeof(*res));
+
+ msg.rpc_proc = &clnt->cl_procinfo[proc];
+- status = rpc_call_sync(clnt, &msg, 0);
++ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
+ if (status < 0)
+ dprintk("lockd: NSM upcall RPC failed, status=%d\n",
+ status);
+ else
+ status = 0;
+- rpc_shutdown_client(clnt);
+- out:
+ return status;
+ }
+
+@@ -138,6 +177,7 @@ int nsm_monitor(const struct nlm_host *host)
+ struct nsm_handle *nsm = host->h_nsmhandle;
+ struct nsm_res res;
+ int status;
++ struct rpc_clnt *clnt;
+
+ dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
+
+@@ -150,7 +190,15 @@ int nsm_monitor(const struct nlm_host *host)
+ */
+ nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
+
+- status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host->net);
++ clnt = nsm_client_get(host->net);
++ if (IS_ERR(clnt)) {
++ status = PTR_ERR(clnt);
++ dprintk("lockd: failed to create NSM upcall transport, "
++ "status=%d, net=%p\n", status, host->net);
++ return status;
++ }
++
++ status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, clnt);
+ if (unlikely(res.status != 0))
+ status = -EIO;
+ if (unlikely(status < 0)) {
+@@ -182,9 +230,11 @@ void nsm_unmonitor(const struct nlm_host *host)
+
+ if (atomic_read(&nsm->sm_count) == 1
+ && nsm->sm_monitored && !nsm->sm_sticky) {
++ struct lockd_net *ln = net_generic(host->net, lockd_net_id);
++
+ dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
+
+- status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host->net);
++ status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, ln->nsm_clnt);
+ if (res.status != 0)
+ status = -EIO;
+ if (status < 0)
+@@ -192,6 +242,8 @@ void nsm_unmonitor(const struct nlm_host *host)
+ nsm->sm_name);
+ else
+ nsm->sm_monitored = 0;
++
++ nsm_client_put(host->net);
+ }
+ }
+
+@@ -430,7 +482,7 @@ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+ {
+ __be32 *p;
+
+- encode_nsm_string(xdr, utsname()->nodename);
++ encode_nsm_string(xdr, argp->nodename);
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(argp->prog);
+ *p++ = cpu_to_be32(argp->vers);
+diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
+index 4eee248..5010b55 100644
+--- a/fs/lockd/netns.h
++++ b/fs/lockd/netns.h
+@@ -12,6 +12,10 @@ struct lockd_net {
+ struct delayed_work grace_period_end;
+ struct lock_manager lockd_manager;
+ struct list_head grace_list;
++
++ spinlock_t nsm_clnt_lock;
++ unsigned int nsm_users;
++ struct rpc_clnt *nsm_clnt;
+ };
+
+ extern int lockd_net_id;
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 31a63f8..7e35587 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -596,6 +596,7 @@ static int lockd_init_net(struct net *net)
+
+ INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
+ INIT_LIST_HEAD(&ln->grace_list);
++ spin_lock_init(&ln->nsm_clnt_lock);
+ return 0;
+ }
+
+diff --git a/fs/namei.c b/fs/namei.c
+index dd1ed1b..81bd546 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -692,9 +692,9 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
+ if (parent->i_uid == inode->i_uid)
+ return 0;
+
++ audit_log_link_denied("follow_link", link);
+ path_put_conditional(link, nd);
+ path_put(&nd->path);
+- audit_log_link_denied("follow_link", link);
+ return -EACCES;
+ }
+
+diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
+index dd392ed..f3d16ad 100644
+--- a/fs/nfs/blocklayout/blocklayout.c
++++ b/fs/nfs/blocklayout/blocklayout.c
+@@ -162,25 +162,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ return bio;
+ }
+
+-static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
+ sector_t isect, struct page *page,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+- struct parallel_io *par)
++ struct parallel_io *par,
++ unsigned int offset, int len)
+ {
++ isect = isect + (offset >> SECTOR_SHIFT);
++ dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
++ npg, rw, (unsigned long long)isect, offset, len);
+ retry:
+ if (!bio) {
+ bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+ }
+- if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
++ if (bio_add_page(bio, page, len, offset) < len) {
+ bio = bl_submit_bio(rw, bio);
+ goto retry;
+ }
+ return bio;
+ }
+
++static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
++ sector_t isect, struct page *page,
++ struct pnfs_block_extent *be,
++ void (*end_io)(struct bio *, int err),
++ struct parallel_io *par)
++{
++ return do_add_page_to_bio(bio, npg, rw, isect, page, be,
++ end_io, par, 0, PAGE_CACHE_SIZE);
++}
++
+ /* This is basically copied from mpage_end_io_read */
+ static void bl_end_io_read(struct bio *bio, int err)
+ {
+@@ -461,6 +475,106 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
+ return;
+ }
+
++static void
++bl_read_single_end_io(struct bio *bio, int error)
++{
++ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
++ struct page *page = bvec->bv_page;
++
++ /* Only one page in bvec */
++ unlock_page(page);
++}
++
++static int
++bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int offset, unsigned int len)
++{
++ struct bio *bio;
++ struct page *shadow_page;
++ sector_t isect;
++ char *kaddr, *kshadow_addr;
++ int ret = 0;
++
++ dprintk("%s: offset %u len %u\n", __func__, offset, len);
++
++ shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
++ if (shadow_page == NULL)
++ return -ENOMEM;
++
++ bio = bio_alloc(GFP_NOIO, 1);
++ if (bio == NULL)
++ return -ENOMEM;
++
++ isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
++ (offset / SECTOR_SIZE);
++
++ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
++ bio->bi_bdev = be->be_mdev;
++ bio->bi_end_io = bl_read_single_end_io;
++
++ lock_page(shadow_page);
++ if (bio_add_page(bio, shadow_page,
++ SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
++ unlock_page(shadow_page);
++ bio_put(bio);
++ return -EIO;
++ }
++
++ submit_bio(READ, bio);
++ wait_on_page_locked(shadow_page);
++ if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
++ ret = -EIO;
++ } else {
++ kaddr = kmap_atomic(page);
++ kshadow_addr = kmap_atomic(shadow_page);
++ memcpy(kaddr + offset, kshadow_addr + offset, len);
++ kunmap_atomic(kshadow_addr);
++ kunmap_atomic(kaddr);
++ }
++ __free_page(shadow_page);
++ bio_put(bio);
++
++ return ret;
++}
++
++static int
++bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
++ unsigned int dirty_offset, unsigned int dirty_len,
++ bool full_page)
++{
++ int ret = 0;
++ unsigned int start, end;
++
++ if (full_page) {
++ start = 0;
++ end = PAGE_CACHE_SIZE;
++ } else {
++ start = round_down(dirty_offset, SECTOR_SIZE);
++ end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
++ }
++
++ dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
++ if (!be) {
++ zero_user_segments(page, start, dirty_offset,
++ dirty_offset + dirty_len, end);
++ if (start == 0 && end == PAGE_CACHE_SIZE &&
++ trylock_page(page)) {
++ SetPageUptodate(page);
++ unlock_page(page);
++ }
++ return ret;
++ }
++
++ if (start != dirty_offset)
++ ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
++
++ if (!ret && (dirty_offset + dirty_len < end))
++ ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
++ end - dirty_offset - dirty_len);
++
++ return ret;
++}
++
+ /* Given an unmapped page, zero it or read in page for COW, page is locked
+ * by caller.
+ */
+@@ -494,7 +608,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
+ SetPageUptodate(page);
+
+ cleanup:
+- bl_put_extent(cow_read);
+ if (bh)
+ free_buffer_head(bh);
+ if (ret) {
+@@ -566,6 +679,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
+ struct parallel_io *par = NULL;
+ loff_t offset = wdata->args.offset;
+ size_t count = wdata->args.count;
++ unsigned int pg_offset, pg_len, saved_len;
+ struct page **pages = wdata->args.pages;
+ struct page *page;
+ pgoff_t index;
+@@ -674,10 +788,11 @@ next_page:
+ if (!extent_length) {
+ /* We've used up the previous extent */
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bio = bl_submit_bio(WRITE, bio);
+ /* Get the next one */
+ be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
+- isect, NULL);
++ isect, &cow_read);
+ if (!be || !is_writable(be, isect)) {
+ header->pnfs_error = -EINVAL;
+ goto out;
+@@ -694,7 +809,26 @@ next_page:
+ extent_length = be->be_length -
+ (isect - be->be_f_offset);
+ }
+- if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
++
++ dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
++ pg_offset = offset & ~PAGE_CACHE_MASK;
++ if (pg_offset + count > PAGE_CACHE_SIZE)
++ pg_len = PAGE_CACHE_SIZE - pg_offset;
++ else
++ pg_len = count;
++
++ saved_len = pg_len;
++ if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
++ !bl_is_sector_init(be->be_inval, isect)) {
++ ret = bl_read_partial_page_sync(pages[i], cow_read,
++ pg_offset, pg_len, true);
++ if (ret) {
++ dprintk("%s bl_read_partial_page_sync fail %d\n",
++ __func__, ret);
++ header->pnfs_error = ret;
++ goto out;
++ }
++
+ ret = bl_mark_sectors_init(be->be_inval, isect,
+ PAGE_CACHE_SECTORS);
+ if (unlikely(ret)) {
+@@ -703,15 +837,35 @@ next_page:
+ header->pnfs_error = ret;
+ goto out;
+ }
++
++ /* Expand to full page write */
++ pg_offset = 0;
++ pg_len = PAGE_CACHE_SIZE;
++ } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
++ (pg_len & (SECTOR_SIZE - 1))){
++ /* ahh, nasty case. We have to do sync full sector
++ * read-modify-write cycles.
++ */
++ unsigned int saved_offset = pg_offset;
++ ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
++ pg_len, false);
++ pg_offset = round_down(pg_offset, SECTOR_SIZE);
++ pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
++ - pg_offset;
+ }
+- bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
++
++
++ bio = do_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
+ isect, pages[i], be,
+- bl_end_io_write, par);
++ bl_end_io_write, par,
++ pg_offset, pg_len);
+ if (IS_ERR(bio)) {
+ header->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
+ goto out;
+ }
++ offset += saved_len;
++ count -= saved_len;
+ isect += PAGE_CACHE_SECTORS;
+ last_isect = isect;
+ extent_length -= PAGE_CACHE_SECTORS;
+@@ -729,17 +883,16 @@ next_page:
+ }
+
+ write_done:
+- wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
+- if (count < wdata->res.count) {
+- wdata->res.count = count;
+- }
++ wdata->res.count = wdata->args.count;
+ out:
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ bl_submit_bio(WRITE, bio);
+ put_parallel(par);
+ return PNFS_ATTEMPTED;
+ out_mds:
+ bl_put_extent(be);
++ bl_put_extent(cow_read);
+ kfree(par);
+ return PNFS_NOT_ATTEMPTED;
+ }
+diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
+index 0335069..39bb51a 100644
+--- a/fs/nfs/blocklayout/blocklayout.h
++++ b/fs/nfs/blocklayout/blocklayout.h
+@@ -41,6 +41,7 @@
+
+ #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
+ #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
++#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+
+ struct block_mount_id {
+ spinlock_t bm_lock; /* protects list */
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 9969444..0e7cd89 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -855,7 +855,6 @@ static void nfs_server_set_fsinfo(struct nfs_server *server,
+ if (server->wsize > NFS_MAX_FILE_IO_SIZE)
+ server->wsize = NFS_MAX_FILE_IO_SIZE;
+ server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+- server->pnfs_blksize = fsinfo->blksize;
+
+ server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 1e50326..d5a0cf1 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1774,7 +1774,11 @@ static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
+ * informs us the stateid is unrecognized. */
+ if (status != -NFS4ERR_BAD_STATEID)
+ nfs41_free_stateid(server, stateid);
++ nfs_remove_bad_delegation(state->inode);
+
++ write_seqlock(&state->seqlock);
++ nfs4_stateid_copy(&state->stateid, &state->open_stateid);
++ write_sequnlock(&state->seqlock);
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ }
+ }
+@@ -3362,8 +3366,11 @@ static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, s
+
+ nfs_fattr_init(fsinfo->fattr);
+ error = nfs4_do_fsinfo(server, fhandle, fsinfo);
+- if (error == 0)
++ if (error == 0) {
++ /* block layout checks this! */
++ server->pnfs_blksize = fsinfo->blksize;
+ set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
++ }
+
+ return error;
+ }
+diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
+index fdc91a6..ccfe0d0 100644
+--- a/fs/nfsd/nfs4idmap.c
++++ b/fs/nfsd/nfs4idmap.c
+@@ -598,7 +598,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel
+ /* Just to make sure it's null-terminated: */
+ memcpy(buf, name, namelen);
+ buf[namelen] = '\0';
+- ret = kstrtouint(name, 10, id);
++ ret = kstrtouint(buf, 10, id);
+ return ret == 0;
+ }
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index cc894ed..5b3224c 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1223,10 +1223,26 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
+ return true;
+ }
+
++/*
++ * RFC 3530 language requires clid_inuse be returned when the
++ * "principal" associated with a requests differs from that previously
++ * used. We use uid, gid's, and gss principal string as our best
++ * approximation. We also don't want to allow non-gss use of a client
++ * established using gss: in theory cr_principal should catch that
++ * change, but in practice cr_principal can be null even in the gss case
++ * since gssd doesn't always pass down a principal string.
++ */
++static bool is_gss_cred(struct svc_cred *cr)
++{
++ /* Is cr_flavor one of the gss "pseudoflavors"?: */
++ return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
++}
++
++
+ static bool
+ same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
+ {
+- if ((cr1->cr_flavor != cr2->cr_flavor)
++ if ((is_gss_cred(cr1) != is_gss_cred(cr2))
+ || (cr1->cr_uid != cr2->cr_uid)
+ || (cr1->cr_gid != cr2->cr_gid)
+ || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
+@@ -3766,6 +3782,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
+
+ nfsd4_close_open_stateid(stp);
++ release_last_closed_stateid(oo);
+ oo->oo_last_closed_stid = stp;
+
+ if (list_empty(&oo->oo_owner.so_stateids)) {
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index 855da58..63ce6be 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1573,8 +1573,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ reiserfs_warning(sb, "reiserfs-13077",
+ "nfsd/reiserfs, fhtype=%d, len=%d - odd",
+ fh_type, fh_len);
+- fh_type = 5;
++ fh_type = fh_len;
+ }
++ if (fh_len < 2)
++ return NULL;
+
+ return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1],
+ (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0);
+@@ -1583,6 +1585,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+ {
++ if (fh_type > fh_len)
++ fh_type = fh_len;
+ if (fh_type < 4)
+ return NULL;
+
+diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
+index 4267922..8c6d1d7 100644
+--- a/fs/xfs/xfs_export.c
++++ b/fs/xfs/xfs_export.c
+@@ -189,6 +189,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid,
+ struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid;
+ struct inode *inode = NULL;
+
++ if (fh_len < xfs_fileid_length(fileid_type))
++ return NULL;
++
+ switch (fileid_type) {
+ case FILEID_INO32_GEN_PARENT:
+ inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino,
+diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
+index 57977c6..e5cf2c8 100644
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -212,9 +212,6 @@ typedef enum {
+ #define NAND_SUBPAGE_READ(chip) ((chip->ecc.mode == NAND_ECC_SOFT) \
+ && (chip->page_shift > 9))
+
+-/* Mask to zero out the chip options, which come from the id table */
+-#define NAND_CHIPOPTIONS_MSK 0x0000ffff
+-
+ /* Non chip related options */
+ /* This option skips the bbt scan during initialization. */
+ #define NAND_SKIP_BBTSCAN 0x00010000
+diff --git a/kernel/audit.c b/kernel/audit.c
+index ea3b7b6..a8c84be 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1466,6 +1466,8 @@ void audit_log_link_denied(const char *operation, struct path *link)
+
+ ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ AUDIT_ANOM_LINK);
++ if (!ab)
++ return;
+ audit_log_format(ab, "op=%s action=denied", operation);
+ audit_log_format(ab, " pid=%d comm=", current->pid);
+ audit_log_untrustedstring(ab, current->comm);
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index 0a69d2a..14ff484 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -552,6 +552,7 @@ int vkdb_printf(const char *fmt, va_list ap)
+ {
+ int diag;
+ int linecount;
++ int colcount;
+ int logging, saved_loglevel = 0;
+ int saved_trap_printk;
+ int got_printf_lock = 0;
+@@ -584,6 +585,10 @@ int vkdb_printf(const char *fmt, va_list ap)
+ if (diag || linecount <= 1)
+ linecount = 24;
+
++ diag = kdbgetintenv("COLUMNS", &colcount);
++ if (diag || colcount <= 1)
++ colcount = 80;
++
+ diag = kdbgetintenv("LOGGING", &logging);
+ if (diag)
+ logging = 0;
+@@ -690,7 +695,7 @@ kdb_printit:
+ gdbstub_msg_write(kdb_buffer, retlen);
+ } else {
+ if (dbg_io_ops && !dbg_io_ops->is_console) {
+- len = strlen(kdb_buffer);
++ len = retlen;
+ cp = kdb_buffer;
+ while (len--) {
+ dbg_io_ops->write_char(*cp);
+@@ -709,11 +714,29 @@ kdb_printit:
+ printk(KERN_INFO "%s", kdb_buffer);
+ }
+
+- if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n'))
+- kdb_nextline++;
++ if (KDB_STATE(PAGER)) {
++ /*
++ * Check printed string to decide how to bump the
++ * kdb_nextline to control when the more prompt should
++ * show up.
++ */
++ int got = 0;
++ len = retlen;
++ while (len--) {
++ if (kdb_buffer[len] == '\n') {
++ kdb_nextline++;
++ got = 0;
++ } else if (kdb_buffer[len] == '\r') {
++ got = 0;
++ } else {
++ got++;
++ }
++ }
++ kdb_nextline += got / (colcount + 1);
++ }
+
+ /* check for having reached the LINES number of printed lines */
+- if (kdb_nextline == linecount) {
++ if (kdb_nextline >= linecount) {
+ char buf1[16] = "";
+
+ /* Watch out for recursion here. Any routine that calls
+@@ -765,7 +788,7 @@ kdb_printit:
+ kdb_grepping_flag = 0;
+ kdb_printf("\n");
+ } else if (buf1[0] == ' ') {
+- kdb_printf("\n");
++ kdb_printf("\r");
+ suspend_grep = 1; /* for this recursion */
+ } else if (buf1[0] == '\n') {
+ kdb_nextline = linecount - 1;
+diff --git a/kernel/module.c b/kernel/module.c
+index 4edbd9c..9ad9ee9 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2730,6 +2730,10 @@ static int check_module_license_and_versions(struct module *mod)
+ if (strcmp(mod->name, "driverloader") == 0)
+ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
+
++ /* lve claims to be GPL but upstream won't provide source */
++ if (strcmp(mod->name, "lve") == 0)
++ add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
++
+ #ifdef CONFIG_MODVERSIONS
+ if ((mod->num_syms && !mod->crcs)
+ || (mod->num_gpl_syms && !mod->gpl_crcs)
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index 3a9e5d5..e430b97 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -835,7 +835,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
+ */
+ if (ts->tick_stopped) {
+ touch_softlockup_watchdog();
+- if (idle_cpu(cpu))
++ if (is_idle_task(current))
+ ts->idle_jiffies++;
+ }
+ update_process_times(user_mode(regs));
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index d3b91e7..f791637 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1111,7 +1111,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
+ accumulate_nsecs_to_secs(tk);
+
+ /* Accumulate raw time */
+- raw_nsecs = tk->raw_interval << shift;
++ raw_nsecs = (u64)tk->raw_interval << shift;
+ raw_nsecs += tk->raw_time.tv_nsec;
+ if (raw_nsecs >= NSEC_PER_SEC) {
+ u64 raw_secs = raw_nsecs;
+diff --git a/kernel/timer.c b/kernel/timer.c
+index 8c5e7b9..46ef2b1 100644
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64);
+ #define TVR_SIZE (1 << TVR_BITS)
+ #define TVN_MASK (TVN_SIZE - 1)
+ #define TVR_MASK (TVR_SIZE - 1)
++#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
+
+ struct tvec {
+ struct list_head vec[TVN_SIZE];
+@@ -358,11 +359,12 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+ vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+ } else {
+ int i;
+- /* If the timeout is larger than 0xffffffff on 64-bit
+- * architectures then we use the maximum timeout:
++ /* If the timeout is larger than MAX_TVAL (on 64-bit
++ * architectures or with CONFIG_BASE_SMALL=1) then we
++ * use the maximum timeout.
+ */
+- if (idx > 0xffffffffUL) {
+- idx = 0xffffffffUL;
++ if (idx > MAX_TVAL) {
++ idx = MAX_TVAL;
+ expires = idx + base->timer_jiffies;
+ }
+ i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+diff --git a/mm/shmem.c b/mm/shmem.c
+index d4e184e..d2eeca1 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2366,12 +2366,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
+ {
+ struct inode *inode;
+ struct dentry *dentry = NULL;
+- u64 inum = fid->raw[2];
+- inum = (inum << 32) | fid->raw[1];
++ u64 inum;
+
+ if (fh_len < 3)
+ return NULL;
+
++ inum = fid->raw[2];
++ inum = (inum << 32) | fid->raw[1];
++
+ inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
+ shmem_match, fid->raw);
+ if (inode) {
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 148e73d..e356b8d 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2927,7 +2927,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
+ sizeof(struct ipv6hdr) - sizeof(struct udphdr) -
+ pkt_dev->pkt_overhead;
+
+- if (datalen < sizeof(struct pktgen_hdr)) {
++ if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) {
+ datalen = sizeof(struct pktgen_hdr);
+ net_info_ratelimited("increased datalen to %d\n", datalen);
+ }
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 8cd7291..118329a 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -34,7 +34,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
+ skb_queue_len(&local->skb_queue_unreliable);
+ while (tmp > IEEE80211_IRQSAFE_QUEUE_LIMIT &&
+ (skb = skb_dequeue(&local->skb_queue_unreliable))) {
+- dev_kfree_skb_irq(skb);
++ ieee80211_free_txskb(hw, skb);
+ tmp--;
+ I802_DEBUG_INC(local->tx_status_drop);
+ }
+@@ -159,7 +159,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
+ "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
+ skb_queue_len(&sta->tx_filtered[ac]),
+ !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ }
+
+ static void ieee80211_check_pending_bar(struct sta_info *sta, u8 *addr, u8 tid)
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index c5e8c9c..362c418 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -354,7 +354,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
+ total += skb_queue_len(&sta->ps_tx_buf[ac]);
+ if (skb) {
+ purged++;
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ break;
+ }
+ }
+@@ -466,7 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
+ ps_dbg(tx->sdata,
+ "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
+ sta->sta.addr, ac);
+- dev_kfree_skb(old);
++ ieee80211_free_txskb(&local->hw, old);
+ } else
+ tx->local->total_ps_buffered++;
+
+@@ -1103,7 +1103,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
+ spin_unlock(&tx->sta->lock);
+
+ if (purge_skb)
+- dev_kfree_skb(purge_skb);
++ ieee80211_free_txskb(&tx->local->hw, purge_skb);
+ }
+
+ /* reset session timer */
+@@ -1214,7 +1214,7 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
+ #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ if (WARN_ON_ONCE(q >= local->hw.queues)) {
+ __skb_unlink(skb, skbs);
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ continue;
+ }
+ #endif
+@@ -1356,7 +1356,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
+ if (unlikely(res == TX_DROP)) {
+ I802_DEBUG_INC(tx->local->tx_handlers_drop);
+ if (tx->skb)
+- dev_kfree_skb(tx->skb);
++ ieee80211_free_txskb(&tx->local->hw, tx->skb);
+ else
+ __skb_queue_purge(&tx->skbs);
+ return -1;
+@@ -1393,7 +1393,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
+ res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
+
+ if (unlikely(res_prepare == TX_DROP)) {
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ goto out;
+ } else if (unlikely(res_prepare == TX_QUEUED)) {
+ goto out;
+@@ -1466,7 +1466,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+ headroom = max_t(int, 0, headroom);
+
+ if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
+- dev_kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ rcu_read_unlock();
+ return;
+ }
+@@ -2060,8 +2060,10 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
+ head_need += IEEE80211_ENCRYPT_HEADROOM;
+ head_need += local->tx_headroom;
+ head_need = max_t(int, 0, head_need);
+- if (ieee80211_skb_resize(sdata, skb, head_need, true))
+- goto fail;
++ if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
++ ieee80211_free_txskb(&local->hw, skb);
++ return NETDEV_TX_OK;
++ }
+ }
+
+ if (encaps_data) {
+@@ -2196,7 +2198,7 @@ void ieee80211_tx_pending(unsigned long data)
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (WARN_ON(!info->control.vif)) {
+- kfree_skb(skb);
++ ieee80211_free_txskb(&local->hw, skb);
+ continue;
+ }
+
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index a35b8e5..d1988cf 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1025,6 +1025,16 @@ static void xs_udp_data_ready(struct sock *sk, int len)
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+
++/*
++ * Helper function to force a TCP close if the server is sending
++ * junk and/or it has put us in CLOSE_WAIT
++ */
++static void xs_tcp_force_close(struct rpc_xprt *xprt)
++{
++ set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
++ xprt_force_disconnect(xprt);
++}
++
+ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
+ {
+ struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+@@ -1051,7 +1061,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
+ /* Sanity check of the record length */
+ if (unlikely(transport->tcp_reclen < 8)) {
+ dprintk("RPC: invalid TCP record fragment length\n");
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ return;
+ }
+ dprintk("RPC: reading TCP record fragment of length %d\n",
+@@ -1132,7 +1142,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
+ break;
+ default:
+ dprintk("RPC: invalid request message type\n");
+- xprt_force_disconnect(&transport->xprt);
++ xs_tcp_force_close(&transport->xprt);
+ }
+ xs_tcp_check_fraghdr(transport);
+ }
+@@ -1455,6 +1465,8 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
+ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+ {
+ smp_mb__before_clear_bit();
++ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
++ clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+ clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ clear_bit(XPRT_CLOSING, &xprt->state);
+ smp_mb__after_clear_bit();
+@@ -1512,8 +1524,8 @@ static void xs_tcp_state_change(struct sock *sk)
+ break;
+ case TCP_CLOSE_WAIT:
+ /* The server initiated a shutdown of the socket */
+- xprt_force_disconnect(xprt);
+ xprt->connect_cookie++;
++ xs_tcp_force_close(xprt);
+ case TCP_CLOSING:
+ /*
+ * If the server closed down the connection, make sure that
+@@ -2199,8 +2211,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ /* We're probably in TIME_WAIT. Get rid of existing socket,
+ * and retry
+ */
+- set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
+- xprt_force_disconnect(xprt);
++ xs_tcp_force_close(xprt);
+ break;
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
+index c3f69ae..4d908d1 100644
+--- a/scripts/Makefile.fwinst
++++ b/scripts/Makefile.fwinst
+@@ -27,7 +27,7 @@ endif
+ installed-mod-fw := $(addprefix $(INSTALL_FW_PATH)/,$(mod-fw))
+
+ installed-fw := $(addprefix $(INSTALL_FW_PATH)/,$(fw-shipped-all))
+-installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/.
++installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/./
+
+ # Workaround for make < 3.81, where .SECONDEXPANSION doesn't work.
+ PHONY += $(INSTALL_FW_PATH)/$$(%) install-all-dirs
+@@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@)
+ $(installed-fw-dirs):
+ $(call cmd,mkdir)
+
+-$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%)
++$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %)
+ $(call cmd,install)
+
+ PHONY += __fw_install __fw_modinst FORCE
+diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
+index 9473fca..8b0f996 100644
+--- a/sound/pci/ac97/ac97_codec.c
++++ b/sound/pci/ac97/ac97_codec.c
+@@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne
+ tmp.index = ac97->num;
+ kctl = snd_ctl_new1(&tmp, ac97);
+ }
++ if (!kctl)
++ return -ENOMEM;
+ if (reg >= AC97_PHONE && reg <= AC97_PCM)
+ set_tlv_db_scale(kctl, db_scale_5bit_12db_max);
+ else
+diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
+index 7549240..a78fdf4 100644
+--- a/sound/pci/emu10k1/emu10k1_main.c
++++ b/sound/pci/emu10k1/emu10k1_main.c
+@@ -1416,6 +1416,15 @@ static struct snd_emu_chip_details emu_chip_details[] = {
+ .ca0108_chip = 1,
+ .spk71 = 1,
+ .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */
++ /* Tested by Maxim Kachur <mcdebugger@duganet.ru> 17th Oct 2012. */
++ /* This is MAEM8986, 0202 is MAEM8980 */
++ {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102,
++ .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]",
++ .id = "EMU1010",
++ .emu10k2_chip = 1,
++ .ca0108_chip = 1,
++ .spk71 = 1,
++ .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */
+ /* Tested by James@superbug.co.uk 8th July 2005. */
+ /* This is MAEM8810, 0202 is MAEM8820 */
+ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102,
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 12a9432..a5dc746 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -487,6 +487,7 @@ struct azx {
+
+ /* VGA-switcheroo setup */
+ unsigned int use_vga_switcheroo:1;
++ unsigned int vga_switcheroo_registered:1;
+ unsigned int init_failed:1; /* delayed init failed */
+ unsigned int disabled:1; /* disabled by VGA-switcher */
+
+@@ -2135,9 +2136,12 @@ static unsigned int azx_get_position(struct azx *chip,
+ if (delay < 0)
+ delay += azx_dev->bufsize;
+ if (delay >= azx_dev->period_bytes) {
+- snd_printdd("delay %d > period_bytes %d\n",
+- delay, azx_dev->period_bytes);
+- delay = 0; /* something is wrong */
++ snd_printk(KERN_WARNING SFX
++ "Unstable LPIB (%d >= %d); "
++ "disabling LPIB delay counting\n",
++ delay, azx_dev->period_bytes);
++ delay = 0;
++ chip->driver_caps &= ~AZX_DCAPS_COUNT_LPIB_DELAY;
+ }
+ azx_dev->substream->runtime->delay =
+ bytes_to_frames(azx_dev->substream->runtime, delay);
+@@ -2556,7 +2560,9 @@ static void azx_vs_set_state(struct pci_dev *pci,
+ if (disabled) {
+ azx_suspend(&pci->dev);
+ chip->disabled = true;
+- snd_hda_lock_devices(chip->bus);
++ if (snd_hda_lock_devices(chip->bus))
++ snd_printk(KERN_WARNING SFX
++ "Cannot lock devices!\n");
+ } else {
+ snd_hda_unlock_devices(chip->bus);
+ chip->disabled = false;
+@@ -2599,14 +2605,20 @@ static const struct vga_switcheroo_client_ops azx_vs_ops = {
+
+ static int __devinit register_vga_switcheroo(struct azx *chip)
+ {
++ int err;
++
+ if (!chip->use_vga_switcheroo)
+ return 0;
+ /* FIXME: currently only handling DIS controller
+ * is there any machine with two switchable HDMI audio controllers?
+ */
+- return vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
++ err = vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
+ VGA_SWITCHEROO_DIS,
+ chip->bus != NULL);
++ if (err < 0)
++ return err;
++ chip->vga_switcheroo_registered = 1;
++ return 0;
+ }
+ #else
+ #define init_vga_switcheroo(chip) /* NOP */
+@@ -2626,7 +2638,8 @@ static int azx_free(struct azx *chip)
+ if (use_vga_switcheroo(chip)) {
+ if (chip->disabled && chip->bus)
+ snd_hda_unlock_devices(chip->bus);
+- vga_switcheroo_unregister_client(chip->pci);
++ if (chip->vga_switcheroo_registered)
++ vga_switcheroo_unregister_client(chip->pci);
+ }
+
+ if (chip->initialized) {
+@@ -2974,14 +2987,6 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
+ }
+
+ ok:
+- err = register_vga_switcheroo(chip);
+- if (err < 0) {
+- snd_printk(KERN_ERR SFX
+- "Error registering VGA-switcheroo client\n");
+- azx_free(chip);
+- return err;
+- }
+-
+ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
+ if (err < 0) {
+ snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
+@@ -3208,6 +3213,13 @@ static int __devinit azx_probe(struct pci_dev *pci,
+
+ pci_set_drvdata(pci, card);
+
++ err = register_vga_switcheroo(chip);
++ if (err < 0) {
++ snd_printk(KERN_ERR SFX
++ "Error registering VGA-switcheroo client\n");
++ goto out_free;
++ }
++
+ dev++;
+ return 0;
+
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 0c4c1a6..cc31346 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -1417,7 +1417,7 @@ static int patch_cs420x(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+@@ -1974,7 +1974,7 @@ static int patch_cs4210(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+@@ -1999,7 +1999,7 @@ static int patch_cs4213(struct hda_codec *codec)
+ return 0;
+
+ error:
+- kfree(codec->spec);
++ cs_free(codec);
+ codec->spec = NULL;
+ return err;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 56a3eef..155cbd2 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -611,6 +611,8 @@ static void alc_line_automute(struct hda_codec *codec)
+ {
+ struct alc_spec *spec = codec->spec;
+
++ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
++ return;
+ /* check LO jack only when it's different from HP */
+ if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0])
+ return;
+@@ -2627,8 +2629,10 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
+ return "PCM";
+ break;
+ }
+- if (snd_BUG_ON(ch >= ARRAY_SIZE(channel_name)))
++ if (ch >= ARRAY_SIZE(channel_name)) {
++ snd_BUG();
+ return "PCM";
++ }
+
+ return channel_name[ch];
+ }
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 4b4072f..4c404a0 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -118,6 +118,8 @@ enum {
+ };
+
+ struct via_spec {
++ struct hda_gen_spec gen;
++
+ /* codec parameterization */
+ const struct snd_kcontrol_new *mixers[6];
+ unsigned int num_mixers;
+@@ -246,6 +248,7 @@ static struct via_spec * via_new_spec(struct hda_codec *codec)
+ /* VT1708BCE & VT1708S are almost same */
+ if (spec->codec_type == VT1708BCE)
+ spec->codec_type = VT1708S;
++ snd_hda_gen_init(&spec->gen);
+ return spec;
+ }
+
+@@ -1628,6 +1631,7 @@ static void via_free(struct hda_codec *codec)
+ vt1708_stop_hp_work(spec);
+ kfree(spec->bind_cap_vol);
+ kfree(spec->bind_cap_sw);
++ snd_hda_gen_free(&spec->gen);
+ kfree(spec);
+ }
+
+diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
+index 32682c1..c8bff6d 100644
+--- a/sound/soc/codecs/wm2200.c
++++ b/sound/soc/codecs/wm2200.c
+@@ -1028,7 +1028,7 @@ SOC_DOUBLE_R_TLV("OUT2 Digital Volume", WM2200_DAC_DIGITAL_VOLUME_2L,
+ WM2200_DAC_DIGITAL_VOLUME_2R, WM2200_OUT2L_VOL_SHIFT, 0x9f, 0,
+ digital_tlv),
+ SOC_DOUBLE("OUT2 Switch", WM2200_PDM_1, WM2200_SPK1L_MUTE_SHIFT,
+- WM2200_SPK1R_MUTE_SHIFT, 1, 0),
++ WM2200_SPK1R_MUTE_SHIFT, 1, 1),
+ };
+
+ WM2200_MIXER_ENUMS(OUT1L, WM2200_OUT1LMIX_INPUT_1_SOURCE);
+@@ -2091,6 +2091,7 @@ static __devinit int wm2200_i2c_probe(struct i2c_client *i2c,
+
+ switch (wm2200->rev) {
+ case 0:
++ case 1:
+ ret = regmap_register_patch(wm2200->regmap, wm2200_reva_patch,
+ ARRAY_SIZE(wm2200_reva_patch));
+ if (ret != 0) {
+diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
+index 9d93793..f8fba57 100644
+--- a/sound/soc/omap/omap-abe-twl6040.c
++++ b/sound/soc/omap/omap-abe-twl6040.c
+@@ -190,7 +190,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
+ twl6040_disconnect_pin(dapm, pdata->has_hf, "Ext Spk");
+ twl6040_disconnect_pin(dapm, pdata->has_ep, "Earphone Spk");
+ twl6040_disconnect_pin(dapm, pdata->has_aux, "Line Out");
+- twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vinrator");
++ twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vibrator");
+ twl6040_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic");
+ twl6040_disconnect_pin(dapm, pdata->has_mainmic, "Main Handset Mic");
+ twl6040_disconnect_pin(dapm, pdata->has_submic, "Sub Handset Mic");
+diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
+index 0540408..1bb0d58c 100644
+--- a/sound/soc/sh/fsi.c
++++ b/sound/soc/sh/fsi.c
+@@ -20,6 +20,7 @@
+ #include <linux/sh_dma.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/workqueue.h>
+ #include <sound/soc.h>
+ #include <sound/sh_fsi.h>
+
+@@ -223,7 +224,7 @@ struct fsi_stream {
+ */
+ struct dma_chan *chan;
+ struct sh_dmae_slave slave; /* see fsi_handler_init() */
+- struct tasklet_struct tasklet;
++ struct work_struct work;
+ dma_addr_t dma;
+ };
+
+@@ -1085,9 +1086,9 @@ static void fsi_dma_complete(void *data)
+ snd_pcm_period_elapsed(io->substream);
+ }
+
+-static void fsi_dma_do_tasklet(unsigned long data)
++static void fsi_dma_do_work(struct work_struct *work)
+ {
+- struct fsi_stream *io = (struct fsi_stream *)data;
++ struct fsi_stream *io = container_of(work, struct fsi_stream, work);
+ struct fsi_priv *fsi = fsi_stream_to_priv(io);
+ struct snd_soc_dai *dai;
+ struct dma_async_tx_descriptor *desc;
+@@ -1129,7 +1130,7 @@ static void fsi_dma_do_tasklet(unsigned long data)
+ * FIXME
+ *
+ * In DMAEngine case, codec and FSI cannot be started simultaneously
+- * since FSI is using tasklet.
++ * since FSI is using the scheduler work queue.
+ * Therefore, in capture case, probably FSI FIFO will have got
+ * overflow error in this point.
+ * in that case, DMA cannot start transfer until error was cleared.
+@@ -1153,7 +1154,7 @@ static bool fsi_dma_filter(struct dma_chan *chan, void *param)
+
+ static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io)
+ {
+- tasklet_schedule(&io->tasklet);
++ schedule_work(&io->work);
+
+ return 0;
+ }
+@@ -1195,14 +1196,14 @@ static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io, struct dev
+ return fsi_stream_probe(fsi, dev);
+ }
+
+- tasklet_init(&io->tasklet, fsi_dma_do_tasklet, (unsigned long)io);
++ INIT_WORK(&io->work, fsi_dma_do_work);
+
+ return 0;
+ }
+
+ static int fsi_dma_remove(struct fsi_priv *fsi, struct fsi_stream *io)
+ {
+- tasklet_kill(&io->tasklet);
++ cancel_work_sync(&io->work);
+
+ fsi_stream_stop(fsi, io);
+
diff --git a/patches.rpmify/x86-kbuild-archscripts-depends-on-scripts_basic b/patches.rpmify/x86-kbuild-archscripts-depends-on-scripts_basic
deleted file mode 100644
index 84e4d8699c..0000000000
--- a/patches.rpmify/x86-kbuild-archscripts-depends-on-scripts_basic
+++ /dev/null
@@ -1,43 +0,0 @@
-From: Jeff Mahoney <jeffm@suse.com>
-Subject: x86/kbuild: archscripts depends on scripts_basic
-Patch-mainline: Submitted 20 Sep 2012
-
-While building the SUSE kernel packages, which build the scripts,
-make clean, and then build everything, we have been running into spurious
-build failures. We tracked them down to a simple dependency issue:
-
-$ make mrproper
- CLEAN arch/x86/tools
- CLEAN scripts/basic
-$ cp patches/config/x86_64/desktop .config
-$ make archscripts
- HOSTCC arch/x86/tools/relocs
-/bin/sh: scripts/basic/fixdep: No such file or directory
-make[3]: *** [arch/x86/tools/relocs] Error 1
-make[2]: *** [archscripts] Error 2
-make[1]: *** [sub-make] Error 2
-make: *** [all] Error 2
-
-This was introduced by commit
-6520fe55 (x86, realmode: 16-bit real-mode code support for relocs),
-which added the archscripts dependency to archprepare.
-
-This patch adds the scripts_basic dependency to the x86 archscripts.
-
-Signed-off-by: Jeff Mahoney <jeffm@suse.com>
----
- arch/x86/Makefile | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/arch/x86/Makefile
-+++ b/arch/x86/Makefile
-@@ -142,7 +142,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-a
- KBUILD_CFLAGS += $(mflags-y)
- KBUILD_AFLAGS += $(mflags-y)
-
--archscripts:
-+archscripts: scripts_basic
- $(Q)$(MAKE) $(build)=arch/x86/tools relocs
-
- ###
-
diff --git a/patches.xen/xen3-fixup-xen b/patches.xen/xen3-fixup-xen
index 182056fb9f..865911080a 100644
--- a/patches.xen/xen3-fixup-xen
+++ b/patches.xen/xen3-fixup-xen
@@ -2657,12 +2657,13 @@ Acked-by: jbeulich@suse.com
+EXPORT_SYMBOL_GPL(xenbus_for_each_backend);
--- head.orig/drivers/xen/xenbus/xenbus_xs.c 2012-10-04 11:30:42.000000000 +0200
+++ head/drivers/xen/xenbus/xenbus_xs.c 2012-01-20 14:18:49.000000000 +0100
-@@ -45,9 +45,16 @@
+@@ -45,10 +45,17 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <xen/xenbus.h>
-#include <xen/xen.h>
#include "xenbus_comms.h"
+ #include <asm/xen/hypervisor.h>
+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
+#include <xen/platform-compat.h>
@@ -2760,8 +2761,8 @@ Acked-by: jbeulich@suse.com
return ret;
}
-@@ -620,19 +629,19 @@ static struct xenbus_watch *find_watch(c
-
+@@ -620,22 +629,22 @@ static struct xenbus_watch *find_watch(c
+ }
static void xs_reset_watches(void)
{
+#ifndef CONFIG_XEN
@@ -2770,6 +2771,9 @@ Acked-by: jbeulich@suse.com
- if (!xen_hvm_domain())
- return;
-
+ if (xen_strict_xenbus_quirk())
+ return;
+
err = xenbus_scanf(XBT_NIL, "control",
- "platform-feature-xs_reset_watches", "%d", &supported);
+ "platform-feature-xs_reset_watches", "%d",
diff --git a/patches.xen/xen3-patch-2.6.23 b/patches.xen/xen3-patch-2.6.23
index ba8d2979d1..0ac75a61de 100644
--- a/patches.xen/xen3-patch-2.6.23
+++ b/patches.xen/xen3-patch-2.6.23
@@ -4039,14 +4039,14 @@ Acked-by: jbeulich@novell.com
--- head.orig/drivers/xen/xenbus/xenbus_xs.c 2011-06-30 16:06:45.000000000 +0200
+++ head/drivers/xen/xenbus/xenbus_xs.c 2012-01-20 14:47:34.000000000 +0100
@@ -626,7 +626,7 @@ static struct xenbus_watch *find_watch(c
-
+ }
static void xs_reset_watches(void)
{
-#ifndef CONFIG_XEN
+#ifdef MODULE
int err, supported = 0;
- err = xenbus_scanf(XBT_NIL, "control",
+ if (xen_strict_xenbus_quirk())
@@ -677,7 +677,9 @@ void unregister_xenbus_watch(struct xenb
char token[sizeof(watch) * 2 + 1];
int err;
diff --git a/patches.xen/xen3-patch-3.2 b/patches.xen/xen3-patch-3.2
index 84ee728e42..b3fb857321 100644
--- a/patches.xen/xen3-patch-3.2
+++ b/patches.xen/xen3-patch-3.2
@@ -4011,8 +4011,8 @@ Acked-by: jbeulich@suse.com
#include <xen/xenbus.h>
+#include <xen/xen.h>
#include "xenbus_comms.h"
+ #include <asm/xen/hypervisor.h>
- #ifdef HAVE_XEN_PLATFORM_COMPAT_H
--- head.orig/include/xen/balloon.h 2012-02-03 13:34:56.000000000 +0100
+++ head/include/xen/balloon.h 2012-02-03 13:44:44.000000000 +0100
@@ -82,8 +82,9 @@ extern struct balloon_stats balloon_stat
diff --git a/patches.xen/xen3-patch-3.6 b/patches.xen/xen3-patch-3.6
index 40f20f4f97..6675467471 100644
--- a/patches.xen/xen3-patch-3.6
+++ b/patches.xen/xen3-patch-3.6
@@ -1649,7 +1649,7 @@ Acked-by: jbeulich@suse.com
--- head.orig/drivers/xen/xenbus/xenbus_xs.c 2012-03-12 16:18:49.000000000 +0100
+++ head/drivers/xen/xenbus/xenbus_xs.c 2012-08-20 14:37:06.000000000 +0200
@@ -631,9 +631,14 @@ static struct xenbus_watch *find_watch(c
-
+ }
static void xs_reset_watches(void)
{
-#ifdef MODULE
@@ -1661,9 +1661,9 @@ Acked-by: jbeulich@suse.com
+ return;
+#endif
+
- err = xenbus_scanf(XBT_NIL, "control",
- "platform-feature-xs_reset_watches", "%d",
- &supported);
+ if (xen_strict_xenbus_quirk())
+ return;
+
--- head.orig/include/xen/interface/platform.h 2012-10-04 13:09:12.000000000 +0200
+++ head/include/xen/interface/platform.h 2012-08-20 14:37:06.000000000 +0200
@@ -482,6 +482,7 @@ DEFINE_XEN_GUEST_HANDLE(xenpf_pcpu_versi
diff --git a/series.conf b/series.conf
index 260cc85b1e..c71c816659 100644
--- a/series.conf
+++ b/series.conf
@@ -29,6 +29,7 @@
########################################################
patches.kernel.org/patch-3.6.1
patches.kernel.org/patch-3.6.1-2
+ patches.kernel.org/patch-3.6.2-3
########################################################
# Build fixes that apply to the vanilla kernel too.
@@ -41,7 +42,6 @@
patches.rpmify/geode-depends-on-x86_32
patches.rpmify/ubifs-remove-DATE-TIME.patch
patches.rpmify/chipidea-clean-up-dependencies
- patches.rpmify/x86-kbuild-archscripts-depends-on-scripts_basic
########################################################
# kABI consistency patches
@@ -263,7 +263,6 @@
patches.suse/msft-hv-0402-hyperv-Report-actual-status-in-receive-completion-pa.patch
patches.suse/msft-hv-0403-hyperv-Add-buffer-for-extended-info-after-the-RNDIS-.patch
patches.suse/suse-hv-identify-virtual-pc-in-ata_piix.patch
- patches.fixes/hv-storvsc-reset-wait.patch
########################################################
# Networking, IPv6