Home Home > GIT Browse > SLE15-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOlaf Hering <ohering@suse.de>2019-05-14 23:26:16 +0200
committerOlaf Hering <ohering@suse.de>2019-05-14 23:26:16 +0200
commit6686fbd5b417ca251db2daa3563a6e74862bb3c0 (patch)
treec3de2de6c96b18a5415d0be530ae22d269808f61
parentee22ab83ba291d6366b410e01d5d85c8a02a291b (diff)
parent80d3d27d4fd41e74500b4809b067d969be9a4419 (diff)
Merge remote-tracking branch 'kerncvs/SLE15' into SLE15-AZURE
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt26
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt2
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/x86/conf.py10
-rw-r--r--Documentation/x86/index.rst8
-rw-r--r--Documentation/x86/mds.rst224
-rw-r--r--arch/x86/entry/common.c3
-rw-r--r--arch/x86/events/intel/core.c18
-rw-r--r--arch/x86/events/intel/cstate.c4
-rw-r--r--arch/x86/events/msr.c4
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/intel-family.h30
-rw-r--r--arch/x86/include/asm/irqflags.h4
-rw-r--r--arch/x86/include/asm/msr-index.h39
-rw-r--r--arch/x86/include/asm/mwait.h6
-rw-r--r--arch/x86/include/asm/nospec-branch.h50
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/kernel/cpu/bugs.c132
-rw-r--r--arch/x86/kernel/cpu/common.c134
-rw-r--r--arch/x86/kernel/nmi.c4
-rw-r--r--arch/x86/kernel/traps.c8
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kvm/cpuid.c6
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/platform/atom/punit_atom_debug.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bt.c2
-rw-r--r--block/genhd.c50
-rw-r--r--drivers/acpi/acpi_lpss.c2
-rw-r--r--drivers/acpi/x86/utils.c2
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/block/DAC960.c1
-rw-r--r--drivers/block/amiflop.c1
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/paride/pcd.c1
-rw-r--r--drivers/block/paride/pd.c1
-rw-r--r--drivers/block/paride/pf.c1
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c1
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xsysace.c1
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c4
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c18
-rw-r--r--drivers/cpufreq/intel_pstate.c4
-rw-r--r--drivers/dma/dma-axi-dmac.c2
-rw-r--r--drivers/dma/tegra210-adma.c1
-rw-r--r--drivers/edac/pnd2_edac.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/ide/ide-cd.c1
-rw-r--r--drivers/ide/ide-cd_ioctl.c5
-rw-r--r--drivers/ide/ide-gd.c6
-rw-r--r--drivers/idle/intel_idle.c18
-rw-r--r--drivers/iommu/intel-iommu.c18
-rw-r--r--drivers/media/i2c/ov2659.c2
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c5
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c2
-rw-r--r--drivers/media/pci/tw5864/tw5864-video.c4
-rw-r--r--drivers/media/platform/davinci/isif.c9
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c12
-rw-r--r--drivers/media/rc/serial_ir.c9
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.h1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mtd/devices/docg3.c7
-rw-r--r--drivers/mtd/mtdpart.c2
-rw-r--r--drivers/mtd/spi-nor/intel-spi.c8
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c62
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/pci/pci-mid.c4
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c4
-rw-r--r--drivers/platform/x86/alienware-wmi.c2
-rw-r--r--drivers/platform/x86/dell-rbtn.c2
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c4
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c6
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c8
-rw-r--r--drivers/platform/x86/sony-laptop.c8
-rw-r--r--drivers/powercap/intel_rapl.c10
-rw-r--r--drivers/pwm/core.c10
-rw-r--r--drivers/pwm/pwm-meson.c34
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/pwm/sysfs.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c3
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sr.c3
-rw-r--r--drivers/spi/spi-armada-3700.c5
-rw-r--r--drivers/ssb/bridge_pcmcia_80211.c9
-rw-r--r--drivers/thermal/intel_soc_dts_thermal.c2
-rw-r--r--drivers/tty/pty.c7
-rw-r--r--drivers/tty/serial/sc16is7xx.c14
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/storage/scsiglue.c26
-rw-r--r--drivers/vfio/mdev/mdev_core.c11
-rw-r--r--drivers/vfio/pci/vfio_pci.c23
-rw-r--r--drivers/vhost/vsock.c22
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c4
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/virtio/virtio_ring.c2
-rw-r--r--fs/btrfs/backref.c9
-rw-r--r--fs/btrfs/delayed-ref.c475
-rw-r--r--fs/btrfs/delayed-ref.h163
-rw-r--r--fs/btrfs/disk-io.c22
-rw-r--r--fs/btrfs/extent-tree.c410
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/kcore.c23
-rw-r--r--include/linux/bitops.h21
-rw-r--r--include/linux/bits.h26
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/genhd.h7
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/platform_data/elm.h2
-rw-r--r--include/linux/pwm.h5
-rw-r--r--include/linux/virtio_ring.h2
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/sound/core.h16
-rw-r--r--include/sound/seq_kernel.h5
-rw-r--r--include/trace/events/btrfs.h13
-rw-r--r--include/uapi/drm/i915_drm.h2
-rw-r--r--kernel/sysctl.c16
-rw-r--r--net/bluetooth/hci_conn.c8
-rw-r--r--net/bluetooth/hidp/sock.c1
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/vmw_vsock/virtio_transport.c29
-rw-r--r--net/vmw_vsock/virtio_transport_common.c22
-rw-r--r--sound/core/init.c22
-rw-r--r--sound/core/oss/mixer_oss.c16
-rw-r--r--sound/core/seq/oss/seq_oss_device.h10
-rw-r--r--sound/core/seq/oss/seq_oss_rw.c11
-rw-r--r--sound/core/seq/oss/seq_oss_writeq.c2
-rw-r--r--sound/core/seq/seq_clientmgr.c99
-rw-r--r--sound/core/seq/seq_clientmgr.h8
-rw-r--r--sound/core/sound.c5
-rw-r--r--sound/last.c10
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c16
-rw-r--r--sound/soc/codecs/cs4270.c1
-rw-r--r--sound/soc/codecs/hdmi-codec.c118
-rw-r--r--sound/soc/codecs/nau8810.c4
-rw-r--r--sound/soc/codecs/nau8824.c46
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c2
-rw-r--r--sound/soc/codecs/wm_adsp.c11
-rw-r--r--sound/soc/intel/common/sst-firmware.c8
-rw-r--r--sound/soc/samsung/odroid.c4
-rw-r--r--sound/soc/soc-pcm.c11
-rw-r--r--tools/power/x86/turbostat/Makefile2
-rw-r--r--tools/power/x86/turbostat/turbostat.c46
156 files changed, 1963 insertions, 1106 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 6cae60929cb6..1afba2acce34 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -380,6 +380,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/l1tf
+ /sys/devices/system/cpu/vulnerabilities/mds
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index b557af2832a8..9fb9ea189121 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2209,6 +2209,30 @@
Format: <first>,<last>
Specifies range of consoles to be captured by the MDA.
+ mds= [X86,INTEL]
+ Control mitigation for the Micro-architectural Data
+ Sampling (MDS) vulnerability.
+
+ Certain CPUs are vulnerable to an exploit against CPU
+ internal buffers which can forward information to a
+ disclosure gadget under certain conditions.
+
+ In vulnerable processors, the speculatively
+ forwarded data can be used in a cache side channel
+ attack, to access data to which the attacker does
+ not have direct access.
+
+ This parameter controls the MDS mitigation. The
+ options are:
+
+ full - Enable MDS mitigation on vulnerable CPUs
+ full,nosmt - Enable MDS mitigation and disable
+ SMT on vulnerable CPUs
+ off - Unconditionally disable MDS mitigation
+
+ Not specifying this option is equivalent to
+ mds=full.
+
mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
Amount of memory to be used when the kernel is not able
to see the whole system memory or for test.
@@ -2374,6 +2398,7 @@
spectre_v2_user=off [X86]
spec_store_bypass_disable=off [X86,PPC]
l1tf=off [X86]
+ mds=off [X86]
auto (default)
Mitigate all CPU vulnerabilities, but leave SMT
@@ -2388,6 +2413,7 @@
if needed. This is for users who always want to
be fully mitigated, even if it means losing SMT.
Equivalent to: l1tf=flush,nosmt [X86]
+ mds=full,nosmt [X86]
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index d4abe9a98109..905c87565d24 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -29,7 +29,7 @@ The following properties are common to the Ethernet controllers:
* "smii"
* "xgmii"
* "trgmii"
- * "2000base-x",
+ * "1000base-x",
* "2500base-x",
* "rxaui"
* "xaui"
diff --git a/Documentation/index.rst b/Documentation/index.rst
index bc67dbf76eb0..102bb6987a9e 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -34,6 +34,7 @@ the kernel interface as seen by application developers.
:maxdepth: 2
userspace-api/index
+ x86/index
Introduction to kernel development
diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py
new file mode 100644
index 000000000000..33c5c3142e20
--- /dev/null
+++ b/Documentation/x86/conf.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8; mode: python -*-
+
+project = "X86 architecture specific documentation"
+
+tags.add("subproject")
+
+latex_documents = [
+ ('index', 'x86.tex', project,
+ 'The kernel development community', 'manual'),
+]
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
new file mode 100644
index 000000000000..ef389dcf1b1d
--- /dev/null
+++ b/Documentation/x86/index.rst
@@ -0,0 +1,8 @@
+==========================
+x86 architecture specifics
+==========================
+
+.. toctree::
+ :maxdepth: 1
+
+ mds
diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
new file mode 100644
index 000000000000..7e5b1d2fc29c
--- /dev/null
+++ b/Documentation/x86/mds.rst
@@ -0,0 +1,224 @@
+Microarchitectural Data Sampling (MDS) mitigation
+=================================================
+
+.. _mds:
+
+Overview
+--------
+
+Microarchitectural Data Sampling (MDS) is a family of side channel attacks
+on internal buffers in Intel CPUs. The variants are:
+
+ - Microarchitectural Store Buffer Data Sampling (MSBDS) (CVE-2018-12126)
+ - Microarchitectural Fill Buffer Data Sampling (MFBDS) (CVE-2018-12130)
+ - Microarchitectural Load Port Data Sampling (MLPDS) (CVE-2018-12127)
+
+MSBDS leaks Store Buffer Entries which can be speculatively forwarded to a
+dependent load (store-to-load forwarding) as an optimization. The forward
+can also happen to a faulting or assisting load operation for a different
+memory address, which can be exploited under certain conditions. Store
+buffers are partitioned between Hyper-Threads so cross thread forwarding is
+not possible. But if a thread enters or exits a sleep state the store
+buffer is repartitioned which can expose data from one thread to the other.
+
+MFBDS leaks Fill Buffer Entries. Fill buffers are used internally to manage
+L1 miss situations and to hold data which is returned or sent in response
+to a memory or I/O operation. Fill buffers can forward data to a load
+operation and also write data to the cache. When the fill buffer is
+deallocated it can retain the stale data of the preceding operations which
+can then be forwarded to a faulting or assisting load operation, which can
+be exploited under certain conditions. Fill buffers are shared between
+Hyper-Threads so cross thread leakage is possible.
+
+MLPDS leaks Load Port Data. Load ports are used to perform load operations
+from memory or I/O. The received data is then forwarded to the register
+file or a subsequent operation. In some implementations the Load Port can
+contain stale data from a previous operation which can be forwarded to
+faulting or assisting loads under certain conditions, which again can be
+exploited eventually. Load ports are shared between Hyper-Threads so cross
+thread leakage is possible.
+
+
+Exposure assumptions
+--------------------
+
+It is assumed that attack code resides in user space or in a guest with one
+exception. The rationale behind this assumption is that the code construct
+needed for exploiting MDS requires:
+
+ - to control the load to trigger a fault or assist
+
+ - to have a disclosure gadget which exposes the speculatively accessed
+ data for consumption through a side channel.
+
+ - to control the pointer through which the disclosure gadget exposes the
+ data
+
+The existence of such a construct in the kernel cannot be excluded with
+100% certainty, but the complexity involved makes it extremly unlikely.
+
+There is one exception, which is untrusted BPF. The functionality of
+untrusted BPF is limited, but it needs to be thoroughly investigated
+whether it can be used to create such a construct.
+
+
+Mitigation strategy
+-------------------
+
+All variants have the same mitigation strategy at least for the single CPU
+thread case (SMT off): Force the CPU to clear the affected buffers.
+
+This is achieved by using the otherwise unused and obsolete VERW
+instruction in combination with a microcode update. The microcode clears
+the affected CPU buffers when the VERW instruction is executed.
+
+For virtualization there are two ways to achieve CPU buffer
+clearing. Either the modified VERW instruction or via the L1D Flush
+command. The latter is issued when L1TF mitigation is enabled so the extra
+VERW can be avoided. If the CPU is not affected by L1TF then VERW needs to
+be issued.
+
+If the VERW instruction with the supplied segment selector argument is
+executed on a CPU without the microcode update there is no side effect
+other than a small number of pointlessly wasted CPU cycles.
+
+This does not protect against cross Hyper-Thread attacks except for MSBDS
+which is only exploitable cross Hyper-thread when one of the Hyper-Threads
+enters a C-state.
+
+The kernel provides a function to invoke the buffer clearing:
+
+ mds_clear_cpu_buffers()
+
+The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
+(idle) transitions.
+
+As a special quirk to address virtualization scenarios where the host has
+the microcode updated, but the hypervisor does not (yet) expose the
+MD_CLEAR CPUID bit to guests, the kernel issues the VERW instruction in the
+hope that it might actually clear the buffers. The state is reflected
+accordingly.
+
+According to current knowledge additional mitigations inside the kernel
+itself are not required because the necessary gadgets to expose the leaked
+data cannot be controlled in a way which allows exploitation from malicious
+user space or VM guests.
+
+Kernel internal mitigation modes
+--------------------------------
+
+ ======= ============================================================
+ off Mitigation is disabled. Either the CPU is not affected or
+ mds=off is supplied on the kernel command line
+
+ full Mitigation is eanbled. CPU is affected and MD_CLEAR is
+ advertised in CPUID.
+
+ vmwerv Mitigation is enabled. CPU is affected and MD_CLEAR is not
+ advertised in CPUID. That is mainly for virtualization
+ scenarios where the host has the updated microcode but the
+ hypervisor does not expose MD_CLEAR in CPUID. It's a best
+ effort approach without guarantee.
+
+ full,nosmt The same as mds=full, with SMT disabled on vulnerable
+ CPUs. This is the complete mitigation.
+
+ ======= ============================================================
+
+If the CPU is affected and mds=off is not supplied on the kernel command
+line then the kernel selects the appropriate mitigation mode depending on
+the availability of the MD_CLEAR CPUID bit.
+
+Mitigation points
+-----------------
+
+1. Return to user space
+^^^^^^^^^^^^^^^^^^^^^^^
+
+ When transitioning from kernel to user space the CPU buffers are flushed
+ on affected CPUs when the mitigation is not disabled on the kernel
+ command line. The migitation is enabled through the static key
+ mds_user_clear.
+
+ The mitigation is invoked in prepare_exit_to_usermode() which covers
+ most of the kernel to user space transitions. There are a few exceptions
+ which are not invoking prepare_exit_to_usermode() on return to user
+ space. These exceptions use the paranoid exit code.
+
+ - Non Maskable Interrupt (NMI):
+
+ Access to sensible data like keys, credentials in the NMI context is
+ mostly theoretical: The CPU can do prefetching or execute a
+ misspeculated code path and thereby fetching data which might end up
+ leaking through a buffer.
+
+ But for mounting other attacks the kernel stack address of the task is
+ already valuable information. So in full mitigation mode, the NMI is
+ mitigated on the return from do_nmi() to provide almost complete
+ coverage.
+
+ - Double fault (#DF):
+
+ A double fault is usually fatal, but the ESPFIX workaround, which can
+ be triggered from user space through modify_ldt(2) is a recoverable
+ double fault. #DF uses the paranoid exit path, so explicit mitigation
+ in the double fault handler is required.
+
+ - Machine Check Exception (#MC):
+
+ Another corner case is a #MC which hits between the CPU buffer clear
+ invocation and the actual return to user. As this still is in kernel
+ space it takes the paranoid exit path which does not clear the CPU
+ buffers. So the #MC handler repopulates the buffers to some
+ extent. Machine checks are not reliably controllable and the window is
+ extremly small so mitigation would just tick a checkbox that this
+ theoretical corner case is covered. To keep the amount of special
+ cases small, ignore #MC.
+
+ - Debug Exception (#DB):
+
+ This takes the paranoid exit path only when the INT1 breakpoint is in
+ kernel space. #DB on a user space address takes the regular exit path,
+ so no extra mitigation required.
+
+
+2. C-State transition
+^^^^^^^^^^^^^^^^^^^^^
+
+ When a CPU goes idle and enters a C-State the CPU buffers need to be
+ cleared on affected CPUs when SMT is active. This addresses the
+ repartitioning of the store buffer when one of the Hyper-Threads enters
+ a C-State.
+
+ When SMT is inactive, i.e. either the CPU does not support it or all
+ sibling threads are offline CPU buffer clearing is not required.
+
+ The idle clearing is enabled on CPUs which are only affected by MSBDS
+ and not by any other MDS variant. The other MDS variants cannot be
+ protected against cross Hyper-Thread attacks because the Fill Buffer and
+ the Load Ports are shared. So on CPUs affected by other variants, the
+ idle clearing would be a window dressing exercise and is therefore not
+ activated.
+
+ The invocation is controlled by the static key mds_idle_clear which is
+ switched depending on the chosen mitigation mode and the SMT state of
+ the system.
+
+ The buffer clear is only invoked before entering the C-State to prevent
+ that stale data from the idling CPU from spilling to the Hyper-Thread
+ sibling after the store buffer got repartitioned and all entries are
+ available to the non idle sibling.
+
+ When coming out of idle the store buffer is partitioned again so each
+ sibling has half of it available. The back from idle CPU could be then
+ speculatively exposed to contents of the sibling. The buffers are
+ flushed either on exit to user space or on VMENTER so malicious code
+ in user space or the guest cannot speculatively access them.
+
+ The mitigation is hooked into all variants of halt()/mwait(), but does
+ not cover the legacy ACPI IO-Port mechanism because the ACPI idle driver
+ has been superseded by the intel_idle driver around 2010 and is
+ preferred on all affected CPUs which are expected to gain the MD_CLEAR
+ functionality in microcode. Aside of that the IO-Port mechanism is a
+ legacy interface which is only used on older systems which are either
+ not affected or do not receive microcode updates anymore.
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index b84b94ebdab4..60920243d703 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -30,6 +30,7 @@
#include <asm/vdso.h>
#include <linux/uaccess.h>
#include <asm/cpufeature.h>
+#include <asm/nospec-branch.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
@@ -208,6 +209,8 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
#endif
user_enter_irqoff();
+
+ mds_user_clear_cpu_buffers();
}
#define SYSCALL_EXIT_WORK_FLAGS \
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index e597853ee884..9b7b777176bf 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3871,11 +3871,11 @@ __init int intel_pmu_init(void)
pr_cont("Nehalem events, ");
break;
- case INTEL_FAM6_ATOM_PINEVIEW:
- case INTEL_FAM6_ATOM_LINCROFT:
- case INTEL_FAM6_ATOM_PENWELL:
- case INTEL_FAM6_ATOM_CLOVERVIEW:
- case INTEL_FAM6_ATOM_CEDARVIEW:
+ case INTEL_FAM6_ATOM_BONNELL:
+ case INTEL_FAM6_ATOM_BONNELL_MID:
+ case INTEL_FAM6_ATOM_SALTWELL:
+ case INTEL_FAM6_ATOM_SALTWELL_MID:
+ case INTEL_FAM6_ATOM_SALTWELL_TABLET:
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -3887,9 +3887,11 @@ __init int intel_pmu_init(void)
pr_cont("Atom events, ");
break;
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_X:
+ case INTEL_FAM6_ATOM_SILVERMONT_MID:
case INTEL_FAM6_ATOM_AIRMONT:
+ case INTEL_FAM6_ATOM_AIRMONT_MID:
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -3906,7 +3908,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 53da7f3aa6e0..37b48ec31c79 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -531,8 +531,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
- X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_X, slm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates),
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index be0b1968d60a..68144a341903 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -61,8 +61,8 @@ static bool test_intel(int idx)
case INTEL_FAM6_BROADWELL_GT3E:
case INTEL_FAM6_BROADWELL_X:
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_X:
case INTEL_FAM6_ATOM_AIRMONT:
if (idx == PERF_MSR_SMI)
return true;
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 995d1b8e36e6..e283be0d79bd 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -342,6 +342,7 @@
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
+#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
@@ -379,4 +380,6 @@
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
+#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index bd00c57a3b2c..2d3649de0fac 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -50,19 +50,23 @@
/* "Small Core" Processors (Atom) */
-#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
-#define INTEL_FAM6_ATOM_LINCROFT 0x26
-#define INTEL_FAM6_ATOM_PENWELL 0x27
-#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
-#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
-#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
-#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
-#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
-#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
-#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
-#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
-#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A
+#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
+#define INTEL_FAM6_ATOM_BONNELL_MID 0x26 /* Silverthorne, Lincroft */
+
+#define INTEL_FAM6_ATOM_SALTWELL 0x36 /* Cedarview */
+#define INTEL_FAM6_ATOM_SALTWELL_MID 0x27 /* Penwell */
+#define INTEL_FAM6_ATOM_SALTWELL_TABLET 0x35 /* Cloverview */
+
+#define INTEL_FAM6_ATOM_SILVERMONT 0x37 /* Bay Trail, Valleyview */
+#define INTEL_FAM6_ATOM_SILVERMONT_X 0x4D /* Avaton, Rangely */
+#define INTEL_FAM6_ATOM_SILVERMONT_MID 0x4A /* Merriefield */
+
+#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* Cherry Trail, Braswell */
+#define INTEL_FAM6_ATOM_AIRMONT_MID 0x5A /* Moorefield */
+
+#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
+#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
+#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobswille */
/* Xeon Phi */
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index d937781e1047..f6e182f34501 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -5,6 +5,8 @@
#ifndef __ASSEMBLY__
+#include <asm/nospec-branch.h>
+
/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
@@ -50,11 +52,13 @@ static inline void native_irq_enable(void)
static inline __cpuidle void native_safe_halt(void)
{
+ mds_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory");
}
static inline __cpuidle void native_halt(void)
{
+ mds_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory");
}
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 812fef2839e8..207dc68a9434 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_MSR_INDEX_H
#define _ASM_X86_MSR_INDEX_H
+#include <linux/bits.h>
+
/*
* CPU model specific register (MSR) numbers.
*
@@ -39,14 +41,14 @@
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
-#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
-#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
+#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
-#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
#define MSR_PPIN_CTL 0x0000004e
#define MSR_PPIN 0x0000004f
@@ -68,20 +70,25 @@
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
-#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
-#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
-#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
-#define ARCH_CAP_SSB_NO (1 << 4) /*
- * Not susceptible to Speculative Store Bypass
- * attack, so no Speculative Store Bypass
- * control required.
- */
+#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
+#define ARCH_CAP_SSB_NO BIT(4) /*
+ * Not susceptible to Speculative Store Bypass
+ * attack, so no Speculative Store Bypass
+ * control required.
+ */
+#define ARCH_CAP_MDS_NO BIT(5) /*
+ * Not susceptible to
+ * Microarchitectural Data
+ * Sampling (MDS) vulnerabilities.
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
-#define L1D_FLUSH (1 << 0) /*
- * Writeback and invalidate the
- * L1 data cache.
- */
+#define L1D_FLUSH BIT(0) /*
+ * Writeback and invalidate the
+ * L1 data cache.
+ */
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index e2cea24bfec9..634e144c0f75 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -40,6 +40,8 @@ static inline void __monitorx(const void *eax, unsigned long ecx,
static inline void __mwait(unsigned long eax, unsigned long ecx)
{
+ mds_idle_clear_cpu_buffers();
+
/* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
@@ -74,6 +76,8 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
static inline void __mwaitx(unsigned long eax, unsigned long ebx,
unsigned long ecx)
{
+ /* No MDS buffer clear as this is AMD/HYGON only */
+
/* "mwaitx %eax, %ebx, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xfb;"
:: "a" (eax), "b" (ebx), "c" (ecx));
@@ -81,6 +85,8 @@ static inline void __mwaitx(unsigned long eax, unsigned long ebx,
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
+ mds_idle_clear_cpu_buffers();
+
trace_hardirqs_on();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 849acf1cdd5f..279c4e44b112 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -352,6 +352,56 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+DECLARE_STATIC_KEY_FALSE(mds_user_clear);
+DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+
+#include <asm/segment.h>
+
+/**
+ * mds_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * This uses the otherwise unused and obsolete VERW instruction in
+ * combination with microcode which triggers a CPU buffer flush when the
+ * instruction is executed.
+ */
+static inline void mds_clear_cpu_buffers(void)
+{
+ static const u16 ds = __KERNEL_DS;
+
+ /*
+ * Has to be the memory-operand variant because only that
+ * guarantees the CPU buffer flush functionality according to
+ * documentation. The register-operand variant does not.
+ * Works with any segment selector, but a valid writable
+ * data segment is the fastest variant.
+ *
+ * "cc" clobber is required because VERW modifies ZF.
+ */
+ asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
+}
+
+/**
+ * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_user_clear_cpu_buffers(void)
+{
+ if (static_branch_likely(&mds_user_clear))
+ mds_clear_cpu_buffers();
+}
+
+/**
+ * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+static inline void mds_idle_clear_cpu_buffers(void)
+{
+ if (static_branch_likely(&mds_idle_clear))
+ mds_clear_cpu_buffers();
+}
+
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index b7a43f614d1e..44a5f5ae801a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -984,4 +984,10 @@ enum l1tf_mitigations {
extern enum l1tf_mitigations l1tf_mitigation;
+enum mds_mitigations {
+ MDS_MITIGATION_OFF,
+ MDS_MITIGATION_FULL,
+ MDS_MITIGATION_VMWERV,
+};
+
#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 8a080b942074..c1023744d228 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -34,6 +34,7 @@
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
+static void __init mds_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
u64 x86_spec_ctrl_base;
@@ -60,6 +61,13 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+/* Control MDS CPU buffer clear before returning to user space */
+DEFINE_STATIC_KEY_FALSE(mds_user_clear);
+EXPORT_SYMBOL_GPL(mds_user_clear);
+/* Control MDS CPU buffer clear before idling (halt, mwait) */
+DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
+EXPORT_SYMBOL_GPL(mds_idle_clear);
+
void __init check_bugs(void)
{
identify_boot_cpu();
@@ -98,6 +106,10 @@ void __init check_bugs(void)
l1tf_select_mitigation();
+ mds_select_mitigation();
+
+ arch_smt_update();
+
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@@ -328,6 +340,61 @@ early_param("l1tf", l1tf_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "MDS: " fmt
+
+/* Default mitigation for L1TF-affected CPUs */
+static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
+static bool mds_nosmt __ro_after_init = false;
+
+static const char * const mds_strings[] = {
+ [MDS_MITIGATION_OFF] = "Vulnerable",
+ [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
+ [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
+};
+
+static void __init mds_select_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
+ mds_mitigation = MDS_MITIGATION_OFF;
+ return;
+ }
+
+ if (mds_mitigation == MDS_MITIGATION_FULL) {
+ if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ mds_mitigation = MDS_MITIGATION_VMWERV;
+
+ static_branch_enable(&mds_user_clear);
+
+ if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
+ (mds_nosmt || cpu_mitigations_auto_nosmt()))
+ cpu_smt_disable(false);
+ }
+
+ pr_info("%s\n", mds_strings[mds_mitigation]);
+}
+
+static int __init mds_cmdline(char *str)
+{
+ if (!boot_cpu_has_bug(X86_BUG_MDS))
+ return 0;
+
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "off"))
+ mds_mitigation = MDS_MITIGATION_OFF;
+ else if (!strcmp(str, "full"))
+ mds_mitigation = MDS_MITIGATION_FULL;
+ else if (!strcmp(str, "full,nosmt")) {
+ mds_mitigation = MDS_MITIGATION_FULL;
+ mds_nosmt = true;
+ }
+
+ return 0;
+}
+early_param("mds", mds_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
@@ -694,9 +761,6 @@ specv2_set_mode:
/* Set up IBPB and STIBP depending on the general spectre V2 command */
spectre_v2_user_select_mitigation(cmd);
-
- /* Enable STIBP if appropriate */
- arch_smt_update();
}
static void update_stibp_msr(void * __unused)
@@ -730,6 +794,31 @@ static void update_indir_branch_cond(void)
static_branch_disable(&switch_to_cond_stibp);
}
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+/* Update the static key controlling the MDS CPU buffer clear in idle */
+static void update_mds_branch_idle(void)
+{
+ /*
+ * Enable the idle clearing if SMT is active on CPUs which are
+ * affected only by MSBDS and not any other MDS variant.
+ *
+ * The other variants cannot be mitigated when SMT is enabled, so
+ * clearing the buffers on idle just to prevent the Store Buffer
+ * repartitioning leak would be a window dressing exercise.
+ */
+ if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
+ return;
+
+ if (sched_smt_active())
+ static_branch_enable(&mds_idle_clear);
+ else
+ static_branch_disable(&mds_idle_clear);
+}
+
+#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
+
void arch_smt_update(void)
{
/* Enhanced IBRS implies STIBP. No update required. */
@@ -750,6 +839,17 @@ void arch_smt_update(void)
break;
}
+ switch (mds_mitigation) {
+ case MDS_MITIGATION_FULL:
+ case MDS_MITIGATION_VMWERV:
+ if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
+ pr_warn_once(MDS_MSG_SMT);
+ update_mds_branch_idle();
+ break;
+ case MDS_MITIGATION_OFF:
+ break;
+ }
+
mutex_unlock(&spec_ctrl_mutex);
}
@@ -1093,6 +1193,23 @@ static ssize_t l1tf_show_state(char *buf)
}
#endif
+static ssize_t mds_show_state(char *buf)
+{
+ if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
+ return sprintf(buf, "%s; SMT Host state unknown\n",
+ mds_strings[mds_mitigation]);
+ }
+
+ if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+ (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
+ sched_smt_active() ? "mitigated" : "disabled"));
+ }
+
+ return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
+ sched_smt_active() ? "vulnerable" : "disabled");
+}
+
static char *stibp_state(void)
{
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
@@ -1159,6 +1276,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
return l1tf_show_state(buf);
break;
+
+ case X86_BUG_MDS:
+ return mds_show_state(buf);
+
default:
break;
}
@@ -1190,4 +1311,9 @@ ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *b
{
return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
}
+
+ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d6fe81bfa28a..1b53caf30805 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -898,85 +898,95 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
-static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
- { X86_VENDOR_CENTAUR, 5 },
- { X86_VENDOR_INTEL, 5 },
- { X86_VENDOR_NSC, 5 },
- { X86_VENDOR_ANY, 4 },
+#define NO_SPECULATION BIT(0)
+#define NO_MELTDOWN BIT(1)
+#define NO_SSB BIT(2)
+#define NO_L1TF BIT(3)
+#define NO_MDS BIT(4)
+#define MSBDS_ONLY BIT(5)
+
+#define VULNWL(_vendor, _family, _model, _whitelist) \
+ { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
+
+#define VULNWL_INTEL(model, whitelist) \
+ VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
+
+#define VULNWL_AMD(family, whitelist) \
+ VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
+
+static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
+ VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
+ VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
+
+ /* Intel Family 6 */
+ VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
+ VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
+
+ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
+
+ VULNWL_INTEL(CORE_YONAH, NO_SSB),
+
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
+
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
+
+ /* AMD Family 0xf - 0x12 */
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+
+ /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
{}
};
-static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
- { X86_VENDOR_AMD },
- {}
-};
-
-static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
- { X86_VENDOR_CENTAUR, 5, },
- { X86_VENDOR_INTEL, 5, },
- { X86_VENDOR_NSC, 5, },
- { X86_VENDOR_AMD, 0x12, },
- { X86_VENDOR_AMD, 0x11, },
- { X86_VENDOR_AMD, 0x10, },
- { X86_VENDOR_AMD, 0xf, },
- { X86_VENDOR_ANY, 4, },
- {}
-};
+static bool __init cpu_matches(unsigned long which)
+{
+ const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist);
-static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
- /* in addition to cpu_no_speculation */
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
- {}
-};
+ return m && !!(m->driver_data & which);
+}
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = 0;
+ if (cpu_matches(NO_SPECULATION))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
- if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
- !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
- if (x86_match_cpu(cpu_no_speculation))
- return;
-
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
-
if (ia32_cap & ARCH_CAP_IBRS_ALL)
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
- if (x86_match_cpu(cpu_no_meltdown))
+ if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ setup_force_cpu_bug(X86_BUG_MDS);
+ if (cpu_matches(MSBDS_ONLY))
+ setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
+ }
+
+ if (cpu_matches(NO_MELTDOWN))
return;
/* Rogue Data Cache Load? No! */
@@ -985,7 +995,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
- if (x86_match_cpu(cpu_no_l1tf))
+ if (cpu_matches(NO_L1TF))
return;
setup_force_cpu_bug(X86_BUG_L1TF);
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 18bc9b51ac9b..086cf1d1d71d 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -34,6 +34,7 @@
#include <asm/x86_init.h>
#include <asm/reboot.h>
#include <asm/cache.h>
+#include <asm/nospec-branch.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
@@ -533,6 +534,9 @@ nmi_restart:
write_cr2(this_cpu_read(nmi_cr2));
if (this_cpu_dec_return(nmi_state))
goto nmi_restart;
+
+ if (user_mode(regs))
+ mds_user_clear_cpu_buffers();
}
NOKPROBE_SYMBOL(do_nmi);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 2501816f5d8b..68f65420ad0c 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -59,6 +59,7 @@
#include <asm/alternative.h>
#include <asm/fpu/xstate.h>
#include <asm/trace/mpx.h>
+#include <asm/nospec-branch.h>
#include <asm/mpx.h>
#include <asm/vm86.h>
@@ -393,6 +394,13 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
regs->ip = (unsigned long)general_protection;
regs->sp = (unsigned long)&gpregs->orig_ax;
+ /*
+ * This situation can be triggered by userspace via
+ * modify_ldt(2) and the return does not take the regular
+ * user space exit, so a CPU buffer clear is required when
+ * MDS mitigation is enabled.
+ */
+ mds_user_clear_cpu_buffers();
return;
}
#endif
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index a3633285a2e5..692c1cf24faf 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -620,7 +620,7 @@ unsigned long native_calibrate_tsc(void)
case INTEL_FAM6_KABYLAKE_DESKTOP:
crystal_khz = 24000; /* 24.0 MHz */
break;
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
crystal_khz = 25000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT:
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 873baf0b7799..087c5d40795a 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -367,7 +367,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
- F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | F(AMD_SSB_NO);
+ F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
+ F(AMD_SSB_NO) | F(AMD_STIBP);
/* cpuid 0xC0000001.edx */
const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -395,7 +396,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
+ F(MD_CLEAR);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 2e37c1158312..4601ed5816f8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9720,8 +9720,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->__launched = vmx->loaded_vmcs->launched;
+ /* L1D Flush includes CPU buffer clear to mitigate MDS */
if (static_branch_unlikely(&vmx_l1d_should_flush))
vmx_l1d_flush(vcpu);
+ else if (static_branch_unlikely(&mds_user_clear))
+ mds_clear_cpu_buffers();
asm(
/* Store host registers */
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
index d49d3be81953..ecb5866aaf84 100644
--- a/arch/x86/platform/atom/punit_atom_debug.c
+++ b/arch/x86/platform/atom/punit_atom_debug.c
@@ -154,8 +154,8 @@ static void punit_dbgfs_unregister(void)
(kernel_ulong_t)&drv_data }
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, punit_device_tng),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT, punit_device_byt),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, punit_device_tng),
ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht),
{}
};
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
index 5a0483e7bf66..31dce781364c 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c
@@ -68,7 +68,7 @@ static struct bt_sfi_data tng_bt_sfi_data __initdata = {
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
static const struct x86_cpu_id bt_sfi_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, tng_bt_sfi_data),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, tng_bt_sfi_data),
{}
};
diff --git a/block/genhd.c b/block/genhd.c
index 9ceeb9e16f98..dff0f28feb54 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1577,7 +1577,8 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
*/
if (ev->poll_msecs >= 0)
intv_msecs = ev->poll_msecs;
- else if (disk->events & ~disk->async_events)
+ else if (disk->events & DISK_EVENT_FLAG_POLL
+ && disk->events & ~disk->async_events)
intv_msecs = disk_events_dfl_poll_msecs;
return msecs_to_jiffies(intv_msecs);
@@ -1787,11 +1788,13 @@ static void disk_check_events(struct disk_events *ev,
/*
* Tell userland about new events. Only the events listed in
- * @disk->events are reported. Unlisted events are processed the
- * same internally but never get reported to userland.
+ * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
+ * is set. Otherwise, events are processed internally but never
+ * get reported to userland.
*/
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
- if (events & disk->events & (1 << i))
+ if (events & disk->events & (1 << i) &&
+ disk->events & DISK_EVENT_FLAG_UEVENT)
envp[nr_events++] = disk_uevents[i];
if (nr_events)
@@ -1828,7 +1831,10 @@ static ssize_t disk_events_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
- return __disk_events_show(disk->events, buf);
+ if (!(disk->events & DISK_EVENT_FLAG_UEVENT))
+ return 0;
+
+ return __disk_events_show(disk->events & DISK_EVENT_TYPES_MASK, buf);
}
static ssize_t disk_events_async_show(struct device *dev,
@@ -1836,7 +1842,11 @@ static ssize_t disk_events_async_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
- return __disk_events_show(disk->async_events, buf);
+ if (!(disk->events & DISK_EVENT_FLAG_UEVENT))
+ return 0;
+
+ return __disk_events_show(disk->async_events & DISK_EVENT_TYPES_MASK,
+ buf);
}
static ssize_t disk_events_poll_msecs_show(struct device *dev,
@@ -1845,6 +1855,9 @@ static ssize_t disk_events_poll_msecs_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
+ if (!disk->ev)
+ return sprintf(buf, "-1\n");
+
return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
}
@@ -1861,6 +1874,9 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
if (intv < 0 && intv != -1)
return -EINVAL;
+ if (!disk->ev)
+ return -ENODEV;
+
disk_block_events(disk);
disk->ev->poll_msecs = intv;
__disk_unblock_events(disk, true);
@@ -1925,7 +1941,8 @@ static void disk_alloc_events(struct gendisk *disk)
{
struct disk_events *ev;
- if (!disk->fops->check_events)
+ if (!disk->fops->check_events ||
+ !(disk->events & DISK_EVENT_TYPES_MASK))
return;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -1947,14 +1964,14 @@ static void disk_alloc_events(struct gendisk *disk)
static void disk_add_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
-
/* FIXME: error handling */
if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
pr_warn("%s: failed to create sysfs files for events\n",
disk->disk_name);
+ if (!disk->ev)
+ return;
+
mutex_lock(&disk_events_mutex);
list_add_tail(&disk->ev->node, &disk_events);
mutex_unlock(&disk_events_mutex);
@@ -1968,14 +1985,13 @@ static void disk_add_events(struct gendisk *disk)
static void disk_del_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
+ if (disk->ev) {
+ disk_block_events(disk);
- disk_block_events(disk);
-
- mutex_lock(&disk_events_mutex);
- list_del_init(&disk->ev->node);
- mutex_unlock(&disk_events_mutex);
+ mutex_lock(&disk_events_mutex);
+ list_del_init(&disk->ev->node);
+ mutex_unlock(&disk_events_mutex);
+ }
sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
}
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index b85dadb4501b..0a06872f1719 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -291,7 +291,7 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1), /* Valleyview, Bay Trail */
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
{}
};
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 95444f5f00a0..9997eac8fdec 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -54,7 +54,7 @@ static const struct always_present_id always_present_ids[] = {
* Bay / Cherry Trail PWM directly poked by GPU driver in win10,
* but Linux uses a separate PWM driver, harmless if not used.
*/
- ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1), {}),
+ ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}),
ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
/*
* The INT0002 device is necessary to clear wakeup interrupt sources
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 93758b528d8f..32b52e6bd13b 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -533,11 +533,18 @@ ssize_t __weak cpu_show_l1tf(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -545,6 +552,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spectre_v2.attr,
&dev_attr_spec_store_bypass.attr,
&dev_attr_l1tf.attr,
+ &dev_attr_mds.attr,
NULL
};
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 245a879b036e..9d0bf8b8b902 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -2548,6 +2548,7 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
disk->major = MajorNumber;
disk->first_minor = n << DAC960_MaxPartitionsBits;
disk->fops = &DAC960_BlockDeviceOperations;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
}
/*
Indicate the Block Device Registration completed successfully,
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 49908c74bfcb..12631bd8495d 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1735,6 +1735,7 @@ static int __init fd_probe_drives(void)
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disk->disk_name, "fd%d", drive);
disk->private_data = &unit[drive];
set_capacity(disk, 880*2);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 1dacc42e2dcf..9ba088707d32 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1970,6 +1970,7 @@ static int __init atari_floppy_init (void)
unit[i].disk->first_minor = i;
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
+ unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
unit[i].disk->private_data = &unit[i];
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 3cfa5d578d12..d8ade9ca32e8 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4212,6 +4212,7 @@ static int __init do_floppy_init(void)
disks[drive]->major = FLOPPY_MAJOR;
disks[drive]->first_minor = TOMINOR(drive);
disks[drive]->fops = &floppy_fops;
+ disks[drive]->events = DISK_EVENT_MEDIA_CHANGE;
sprintf(disks[drive]->disk_name, "fd%d", drive);
setup_timer(&motor_off_timer[drive], motor_off_callback, drive);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index a026211afb51..e94d2816194c 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -329,6 +329,7 @@ static void pcd_init_units(void)
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 27a44b97393a..e5ede556f5c9 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -854,6 +854,7 @@ static void pd_probe_drive(struct pd_unit *disk)
p->fops = &pd_fops;
p->major = major;
p->first_minor = (disk - pd) << PD_BITS;
+ p->events = DISK_EVENT_MEDIA_CHANGE;
disk->gd = p;
p->private_data = disk;
p->queue = blk_init_queue(do_pd_request, &pd_lock);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index eef7a91f667d..97b0dbae9369 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -304,6 +304,7 @@ static void __init pf_init_units(void)
disk->first_minor = unit;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
}
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 0e902ad1c27b..3099226a55e5 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -875,6 +875,7 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->first_minor = drive;
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
swd->unit[drive].disk->fops = &floppy_fops;
+ swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
add_disk(swd->unit[drive].disk);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 25a601507446..19c4666535d2 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1242,6 +1242,7 @@ static int swim3_attach(struct macio_dev *mdev,
disk->first_minor = index;
disk->fops = &floppy_fops;
disk->private_data = &floppy_states[index];
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 98d3e8f37cc0..a51044c8dfca 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -433,6 +433,8 @@ static int init_vq(struct virtio_blk *vblk)
if (err)
num_vqs = 1;
+ num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
return -ENOMEM;
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 14459d66ef0c..76aa86af96bf 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1005,6 +1005,7 @@ static int ace_setup(struct ace_device *ace)
ace->gd->major = ace_major;
ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
ace->gd->fops = &ace_fops;
+ ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
ace->gd->queue = ace->queue;
ace->gd->private_data = ace;
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 72cd96a8eb19..eff92053a3bf 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -807,6 +807,7 @@ static int probe_gdrom(struct platform_device *devptr)
goto probe_fail_cdrom_register;
}
gd.disk->fops = &gdrom_bdops;
+ gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
/* latch on to the interrupt */
err = gdrom_set_interrupt_handlers();
if (err)
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 68ba7d4105e7..04a963d9392a 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -198,7 +198,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
-PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" };
+PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" };
PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
"sclk_otgphy0_480m" };
PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
@@ -399,7 +399,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
RK3288_CLKGATE_CON(3), 11, GFLAGS),
- MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
+ MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
RK3288_CLKGATE_CON(9), 0, GFLAGS),
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index 559abf76891e..33d1cf4e6d80 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -458,7 +458,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKSEL_CON(35), 15, 1, MFLAGS, 8, 7, DFLAGS,
RK3328_CLKGATE_CON(2), 12, GFLAGS),
COMPOSITE(SCLK_CRYPTO, "clk_crypto", mux_2plls_p, 0,
- RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 7, DFLAGS,
+ RK3328_CLKSEL_CON(20), 7, 1, MFLAGS, 0, 5, DFLAGS,
RK3328_CLKGATE_CON(2), 4, GFLAGS),
COMPOSITE_NOMUX(SCLK_TSADC, "clk_tsadc", "clk_24m", 0,
RK3328_CLKSEL_CON(22), 0, 10, DFLAGS,
@@ -550,15 +550,15 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(0, "hclk_rkvenc_niu", "hclk_rkvenc", CLK_IGNORE_UNUSED,
RK3328_CLKGATE_CON(25), 1, GFLAGS),
GATE(ACLK_H265, "aclk_h265", "aclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 0, GFLAGS),
+ RK3328_CLKGATE_CON(25), 2, GFLAGS),
GATE(PCLK_H265, "pclk_h265", "hclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 1, GFLAGS),
+ RK3328_CLKGATE_CON(25), 3, GFLAGS),
GATE(ACLK_H264, "aclk_h264", "aclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 0, GFLAGS),
+ RK3328_CLKGATE_CON(25), 4, GFLAGS),
GATE(HCLK_H264, "hclk_h264", "hclk_rkvenc", 0,
- RK3328_CLKGATE_CON(25), 1, GFLAGS),
+ RK3328_CLKGATE_CON(25), 5, GFLAGS),
GATE(ACLK_AXISRAM, "aclk_axisram", "aclk_rkvenc", CLK_IGNORE_UNUSED,
- RK3328_CLKGATE_CON(25), 0, GFLAGS),
+ RK3328_CLKGATE_CON(25), 6, GFLAGS),
COMPOSITE(SCLK_VENC_CORE, "sclk_venc_core", mux_4plls_p, 0,
RK3328_CLKSEL_CON(51), 14, 2, MFLAGS, 8, 5, DFLAGS,
@@ -663,7 +663,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
/* PD_GMAC */
COMPOSITE(ACLK_GMAC, "aclk_gmac", mux_2plls_hdmiphy_p, 0,
- RK3328_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3328_CLKSEL_CON(25), 6, 2, MFLAGS, 0, 5, DFLAGS,
RK3328_CLKGATE_CON(3), 2, GFLAGS),
COMPOSITE_NOMUX(PCLK_GMAC, "pclk_gmac", "aclk_gmac", 0,
RK3328_CLKSEL_CON(25), 8, 3, DFLAGS,
@@ -733,7 +733,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
/* PD_PERI */
GATE(0, "aclk_peri_noc", "aclk_peri", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(19), 11, GFLAGS),
- GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 4, GFLAGS),
+ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri", 0, RK3328_CLKGATE_CON(19), 14, GFLAGS),
GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 0, GFLAGS),
GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK3328_CLKGATE_CON(19), 1, GFLAGS),
@@ -894,7 +894,7 @@ static void __init rk3328_clk_init(struct device_node *np)
&rk3328_cpuclk_data, rk3328_cpuclk_rates,
ARRAY_SIZE(rk3328_cpuclk_rates));
- rockchip_register_softrst(np, 11, reg_base + RK3328_SOFTRST_CON(0),
+ rockchip_register_softrst(np, 12, reg_base + RK3328_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
rockchip_register_restart_notifier(ctx, RK3328_GLB_SRST_FST, NULL);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b97c7afe970e..6f8f913f096e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2052,7 +2052,7 @@ static const struct pstate_funcs knl_funcs = {
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, silvermont_funcs),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs),
ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs),
ICPU(INTEL_FAM6_HASWELL_CORE, core_funcs),
ICPU(INTEL_FAM6_BROADWELL_CORE, core_funcs),
@@ -2069,7 +2069,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs),
ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
{}
};
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 7f0b9aa15867..9887f2a14aa9 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -451,7 +451,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
if (chan->hw_2d) {
if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
- !axi_dmac_check_len(chan, xt->numf))
+ xt->numf == 0)
return NULL;
if (xt->sgl[0].size + dst_icg > chan->max_length ||
xt->sgl[0].size + src_icg > chan->max_length)
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b26256f23d67..08b10274284a 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -786,6 +786,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
struct tegra_adma *tdma = platform_get_drvdata(pdev);
int i;
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&tdma->dma_dev);
for (i = 0; i < tdma->nr_channels; ++i)
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index df28b65358d2..903a4f1fadcc 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -1541,7 +1541,7 @@ static struct dunit_ops dnv_ops = {
static const struct x86_cpu_id pnd2_cpuids[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X, 0, (kernel_ulong_t)&dnv_ops },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 3830c5e54574..64a3b137d3c0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,20 +607,19 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_mode_set *modeset;
int i, j;
drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ modeset = &fb_helper->crtc_info[i].mode_set;
- if (!crtc->enabled)
+ if (!modeset->crtc->enabled)
continue;
- /* Walk the connectors & encoders on this fb turning them on/off */
- drm_fb_helper_for_each_connector(fb_helper, j) {
- connector = fb_helper->connector_info[j]->connector;
+ for (j = 0; j < modeset->num_connectors; j++) {
+ connector = modeset->connectors[j];
connector->funcs->dpms(connector, dpms_mode);
drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c563457de000..3d2d36df3603 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -79,7 +79,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+ {BCS, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
{RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
};
@@ -130,7 +130,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+ {BCS, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
{VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index bcfe885c1453..61fb3d4f974b 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1509,6 +1509,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
of_node_put(remote);
hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+ of_node_put(i2c_np);
if (!hdmi->ddc_adpt) {
dev_err(dev, "Failed to get ddc i2c adapter by node\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 6c2b31595dce..246c950a4f4f 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -93,6 +93,18 @@ static irqreturn_t meson_irq(int irq, void *arg)
return IRQ_HANDLED;
}
+static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /*
+ * We need 64bytes aligned stride, and PAGE aligned size
+ */
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), SZ_64);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver meson_driver = {
@@ -115,7 +127,7 @@ static struct drm_driver meson_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
/* GEM Ops */
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = meson_dumb_create,
.dumb_destroy = drm_gem_dumb_destroy,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.gem_free_object_unlocked = drm_gem_cma_free_object,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 76d63de5921d..e7dda3089cc0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -427,6 +427,14 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
+static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (drm)
+ drm_atomic_helper_shutdown(drm);
+}
+
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -436,6 +444,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
+ .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 622dab6c4347..90a56c6724b5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -284,8 +284,6 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
struct ttm_buffer_object *bo =
(struct ttm_buffer_object *)vma->vm_private_data;
- WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
-
(void)ttm_bo_reference(bo);
}
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index ef699477d94a..eb8444faa14e 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -173,6 +173,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Comet Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index d1609a507354..0794df0116db 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1796,6 +1796,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g);
return 0;
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c
index 9d26c9737e21..18e07adb02d4 100644
--- a/drivers/ide/ide-cd_ioctl.c
+++ b/drivers/ide/ide-cd_ioctl.c
@@ -81,8 +81,9 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
/*
* ide-cd always generates media changed event if media is missing, which
- * makes it impossible to use for proper event reporting, so disk->events
- * is cleared to 0 and the following function is used only to trigger
+ * makes it impossible to use for proper event reporting, so
+ * DISK_EVENT_FLAG_UEVENT is cleared in disk->events
+ * and the following function is used only to trigger
* revalidation and never propagated to userland.
*/
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index e823394ed543..b6d80b2bbda2 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -299,8 +299,9 @@ static unsigned int ide_gd_check_events(struct gendisk *disk,
/*
* The following is used to force revalidation on the first open on
* removeable devices, and never gets reported to userland as
- * genhd->events is 0. This is intended as removeable ide disk
- * can't really detect MEDIA_CHANGE events.
+ * DISK_EVENT_FLAG_UEVENT isn't set in genhd->events.
+ * This is intended as removeable ide disk can't really detect
+ * MEDIA_CHANGE events.
*/
ret = drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
@@ -416,6 +417,7 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
+ g->events = DISK_EVENT_MEDIA_CHANGE;
device_add_disk(&drive->gendev, g);
return 0;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 008eb4d58a86..ec95c0d56fa7 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -1069,14 +1069,14 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_WESTMERE, idle_cpu_nehalem),
ICPU(INTEL_FAM6_WESTMERE_EP, idle_cpu_nehalem),
ICPU(INTEL_FAM6_NEHALEM_EX, idle_cpu_nehalem),
- ICPU(INTEL_FAM6_ATOM_PINEVIEW, idle_cpu_atom),
- ICPU(INTEL_FAM6_ATOM_LINCROFT, idle_cpu_lincroft),
+ ICPU(INTEL_FAM6_ATOM_BONNELL, idle_cpu_atom),
+ ICPU(INTEL_FAM6_ATOM_BONNELL_MID, idle_cpu_lincroft),
ICPU(INTEL_FAM6_WESTMERE_EX, idle_cpu_nehalem),
ICPU(INTEL_FAM6_SANDYBRIDGE, idle_cpu_snb),
ICPU(INTEL_FAM6_SANDYBRIDGE_X, idle_cpu_snb),
- ICPU(INTEL_FAM6_ATOM_CEDARVIEW, idle_cpu_atom),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT1, idle_cpu_byt),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, idle_cpu_tangier),
+ ICPU(INTEL_FAM6_ATOM_SALTWELL, idle_cpu_atom),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT, idle_cpu_byt),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, idle_cpu_tangier),
ICPU(INTEL_FAM6_ATOM_AIRMONT, idle_cpu_cht),
ICPU(INTEL_FAM6_IVYBRIDGE, idle_cpu_ivb),
ICPU(INTEL_FAM6_IVYBRIDGE_X, idle_cpu_ivt),
@@ -1084,7 +1084,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_HASWELL_X, idle_cpu_hsw),
ICPU(INTEL_FAM6_HASWELL_ULT, idle_cpu_hsw),
ICPU(INTEL_FAM6_HASWELL_GT3E, idle_cpu_hsw),
- ICPU(INTEL_FAM6_ATOM_SILVERMONT2, idle_cpu_avn),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_X, idle_cpu_avn),
ICPU(INTEL_FAM6_BROADWELL_CORE, idle_cpu_bdw),
ICPU(INTEL_FAM6_BROADWELL_GT3E, idle_cpu_bdw),
ICPU(INTEL_FAM6_BROADWELL_X, idle_cpu_bdw),
@@ -1097,8 +1097,8 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl),
ICPU(INTEL_FAM6_XEON_PHI_KNM, idle_cpu_knl),
ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt),
- ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, idle_cpu_bxt),
- ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
+ ICPU(INTEL_FAM6_ATOM_GOLDMONT_X, idle_cpu_dnv),
ICPU(INTEL_FAM6_ATOM_TREMONT_X, idle_cpu_dnv),
{}
};
@@ -1311,7 +1311,7 @@ static void intel_idle_state_table_update(void)
ivt_idle_state_table_update();
break;
case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
bxt_idle_state_table_update();
break;
case INTEL_FAM6_SKYLAKE_DESKTOP:
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0478b681ba31..f4cecf5d6422 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3351,9 +3351,12 @@ static int __init init_dmars(void)
iommu_identity_mapping |= IDENTMAP_ALL;
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- iommu_identity_mapping |= IDENTMAP_GFX;
+ dmar_map_gfx = 0;
#endif
+ if (!dmar_map_gfx)
+ iommu_identity_mapping |= IDENTMAP_GFX;
+
check_tylersburg_isoch();
if (iommu_identity_mapping) {
@@ -3435,7 +3438,13 @@ domains_done:
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
+ /*
+ * Call dmar_alloc_hwirq() with dmar_global_lock held,
+ * could cause possible lock race condition.
+ */
+ up_write(&dmar_global_lock);
ret = intel_svm_enable_prq(iommu);
+ down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
}
@@ -4129,9 +4138,7 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
- if (dmar_map_gfx) {
- intel_iommu_gfx_mapped = 1;
- } else {
+ if (!dmar_map_gfx) {
drhd->ignored = 1;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
@@ -4931,6 +4938,9 @@ int __init intel_iommu_init(void)
goto out_free_reserved_range;
}
+ if (dmar_map_gfx)
+ intel_iommu_gfx_mapped = 1;
+
init_no_remapping_devices();
ret = init_dmars();
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 6e6367214d40..a2f35ab73d89 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1130,7 +1130,7 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
*mf = fmt->format;
#else
- return -ENOTTY;
+ ret = -ENOTTY;
#endif
} else {
s64 val;
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index 98467b2089fa..099d59b992c1 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -484,7 +484,7 @@ static ssize_t cx18_read_pos(struct cx18_stream *s, char __user *ubuf,
CX18_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
if (rc > 0)
- pos += rc;
+ *pos += rc;
return rc;
}
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 979b66627f60..cc9d43cd5aa1 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1460,8 +1460,9 @@ static int dvb_register(struct cx23885_tsport *port)
if (fe0->dvb.frontend != NULL) {
struct i2c_adapter *tun_i2c;
- fe0->dvb.frontend->sec_priv = kmalloc(sizeof(dib7000p_ops), GFP_KERNEL);
- memcpy(fe0->dvb.frontend->sec_priv, &dib7000p_ops, sizeof(dib7000p_ops));
+ fe0->dvb.frontend->sec_priv = kmemdup(&dib7000p_ops, sizeof(dib7000p_ops), GFP_KERNEL);
+ if (!fe0->dvb.frontend->sec_priv)
+ return -ENOMEM;
tun_i2c = dib7000p_ops.get_i2c_master(fe0->dvb.frontend, DIBX000_I2C_INTERFACE_TUNER, 1);
if (!dvb_attach(dib0070_attach, fe0->dvb.frontend, tun_i2c, &dib7070p_dib0070_config))
return -ENODEV;
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index c9bd018e53de..e2b19c3eaa87 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -420,7 +420,7 @@ static ssize_t ivtv_read_pos(struct ivtv_stream *s, char __user *ubuf, size_t co
IVTV_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
if (rc > 0)
- pos += rc;
+ *pos += rc;
return rc;
}
diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
index 2a044be729da..0684235b5d2b 100644
--- a/drivers/media/pci/tw5864/tw5864-video.c
+++ b/drivers/media/pci/tw5864/tw5864-video.c
@@ -1394,13 +1394,13 @@ static void tw5864_handle_frame(struct tw5864_h264_frame *frame)
input->vb = NULL;
spin_unlock_irqrestore(&input->slock, flags);
- v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
-
if (!vb) { /* Gone because of disabling */
dev_dbg(&dev->pci->dev, "vb is empty, dropping frame\n");
return;
}
+ v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
+
/*
* Check for space.
* Mind the overhead of startcode emulation prevention.
diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
index 5813b49391ed..cf282a679f1f 100644
--- a/drivers/media/platform/davinci/isif.c
+++ b/drivers/media/platform/davinci/isif.c
@@ -886,9 +886,7 @@ static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
static int isif_config_ycbcr(void)
{
struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
- struct vpss_pg_frame_size frame_size;
u32 modeset = 0, ccdcfg = 0;
- struct vpss_sync_pol sync;
dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
@@ -976,13 +974,6 @@ static int isif_config_ycbcr(void)
/* two fields are interleaved in memory */
regw(0x00000249, SDOFST);
- /* Setup test pattern if enabled */
- if (isif_cfg.bayer.config_params.test_pat_gen) {
- sync.ccdpg_hdpol = params->hd_pol;
- sync.ccdpg_vdpol = params->vd_pol;
- dm365_vpss_set_sync_pol(sync);
- dm365_vpss_set_pg_frame_size(frame_size);
- }
return 0;
}
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index a7a366093524..4ca3d600aa84 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -1007,7 +1007,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
if (dev->bitmap_cap && (compose->width != s->r.width ||
compose->height != s->r.height)) {
- kfree(dev->bitmap_cap);
+ vfree(dev->bitmap_cap);
dev->bitmap_cap = NULL;
}
*compose = s->r;
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index ab3428bf63fe..2d20d908e280 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -489,7 +489,8 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
return -EIO;
}
/* Send response data to caller */
- if (response != NULL && response_len != NULL && evt_hdr->dlen) {
+ if (response != NULL && response_len != NULL && evt_hdr->dlen &&
+ evt_hdr->dlen <= payload_len) {
/* Skip header info and copy only response data */
skb_pull(skb, sizeof(struct fm_event_msg_hdr));
memcpy(response, skb->data, evt_hdr->dlen);
@@ -583,6 +584,8 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
return;
fm_evt_hdr = (void *)skb->data;
+ if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
+ return;
/* Skip header info and copy only response data */
skb_pull(skb, sizeof(struct fm_event_msg_hdr));
@@ -1268,8 +1271,9 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
switch (action->type) {
case ACTION_SEND_COMMAND: /* Send */
- if (fmc_send_cmd(fmdev, 0, 0, action->data,
- action->size, NULL, NULL))
+ ret = fmc_send_cmd(fmdev, 0, 0, action->data,
+ action->size, NULL, NULL);
+ if (ret)
goto rel_fw;
cmd_cnt++;
@@ -1308,7 +1312,7 @@ static int load_default_rx_configuration(struct fmdev *fmdev)
static int fm_power_up(struct fmdev *fmdev, u8 mode)
{
u16 payload;
- __be16 asic_id, asic_ver;
+ __be16 asic_id = 0, asic_ver = 0;
int resp_len, ret;
u8 fw_name[50];
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 77d5d4cbed0a..ed28f2a3f8ff 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -780,8 +780,6 @@ static void serial_ir_exit(void)
static int __init serial_ir_init_module(void)
{
- int result;
-
switch (type) {
case IR_HOMEBREW:
case IR_IRDEO:
@@ -809,12 +807,7 @@ static int __init serial_ir_init_module(void)
if (sense != -1)
sense = !!sense;
- result = serial_ir_init();
- if (!result)
- return 0;
-
- serial_ir_exit();
- return result;
+ return serial_ir_init();
}
static void __exit serial_ir_exit_module(void)
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 44975061b953..ddededc4ced4 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -666,6 +666,8 @@ static int ctrl_get_input(struct pvr2_ctrl *cptr,int *vp)
static int ctrl_check_input(struct pvr2_ctrl *cptr,int v)
{
+ if (v < 0 || v > PVR2_CVAL_INPUT_MAX)
+ return 0;
return ((1 << v) & cptr->hdw->input_allowed_mask) != 0;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
index 25648add77e5..bd2b7a67b732 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
@@ -50,6 +50,7 @@
#define PVR2_CVAL_INPUT_COMPOSITE 2
#define PVR2_CVAL_INPUT_SVIDEO 3
#define PVR2_CVAL_INPUT_RADIO 4
+#define PVR2_CVAL_INPUT_MAX PVR2_CVAL_INPUT_RADIO
enum pvr2_config {
pvr2_config_empty, /* No configuration */
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b3fb155f50e4..dd3cad2d4d65 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -128,7 +128,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
static bool sdhci_acpi_byt(void)
{
static const struct x86_cpu_id byt[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
{}
};
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index b833e6cc684c..ba1313d258dc 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1858,8 +1858,8 @@ static int __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
switch (chip_id) {
case DOC_CHIPID_G3:
- mtd->name = kasprintf(GFP_KERNEL, "docg3.%d",
- docg3->device_id);
+ mtd->name = devm_kasprintf(docg3->dev, GFP_KERNEL, "docg3.%d",
+ docg3->device_id);
if (!mtd->name)
return -ENOMEM;
docg3->max_block = 2047;
@@ -1965,7 +1965,7 @@ nomem3:
nomem2:
kfree(docg3);
nomem1:
- return ERR_PTR(ret);
+ return ret ? ERR_PTR(ret) : NULL;
}
/**
@@ -1979,7 +1979,6 @@ static void doc_release_device(struct mtd_info *mtd)
mtd_device_unregister(mtd);
kfree(docg3->bbt);
kfree(docg3);
- kfree(mtd->name);
kfree(mtd);
}
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index e930e9e10254..c5e595157c77 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -583,7 +583,7 @@ static ssize_t mtd_partition_offset_show(struct device *dev,
{
struct mtd_info *mtd = dev_get_drvdata(dev);
struct mtd_part *part = mtd_to_part(mtd);
- return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
+ return snprintf(buf, PAGE_SIZE, "%llu\n", part->offset);
}
static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
index d655da57587a..445eb08e0299 100644
--- a/drivers/mtd/spi-nor/intel-spi.c
+++ b/drivers/mtd/spi-nor/intel-spi.c
@@ -509,6 +509,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ /* Read cannot cross 4K boundary */
+ block_size = min_t(loff_t, from + block_size,
+ round_up(from + 1, SZ_4K)) - from;
+
writel(from, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
@@ -559,6 +563,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+ /* Write cannot cross 4K boundary */
+ block_size = min_t(loff_t, to + block_size,
+ round_up(to + 1, SZ_4K)) - to;
+
writel(to, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1e60c84da5dc..0d17d5b6e744 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -118,7 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
static int ibmvnic_init(struct ibmvnic_adapter *);
static int ibmvnic_reset_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
+static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
@@ -852,11 +852,7 @@ static int ibmvnic_login(struct net_device *netdev)
}
} while (retry);
- /* handle pending MAC address changes after successful login */
- if (adapter->mac_change_pending) {
- __ibmvnic_set_mac(netdev, &adapter->desired.mac);
- adapter->mac_change_pending = false;
- }
+ __ibmvnic_set_mac(netdev, adapter->mac_addr);
return 0;
}
@@ -1118,7 +1114,6 @@ static int ibmvnic_open(struct net_device *netdev)
}
rc = __ibmvnic_open(netdev);
- netif_carrier_on(netdev);
return rc;
}
@@ -1689,28 +1684,40 @@ static void ibmvnic_set_multi(struct net_device *netdev)
}
}
-static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
+static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
union ibmvnic_crq crq;
int rc;
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
+ if (!is_valid_ether_addr(dev_addr)) {
+ rc = -EADDRNOTAVAIL;
+ goto err;
+ }
memset(&crq, 0, sizeof(crq));
crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
- ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
+ ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
init_completion(&adapter->fw_done);
rc = ibmvnic_send_crq(adapter, &crq);
- if (rc)
- return rc;
+ if (rc) {
+ rc = -EIO;
+ goto err;
+ }
+
wait_for_completion(&adapter->fw_done);
/* netdev->dev_addr is changed in handle_change_mac_rsp function */
- return adapter->fw_done_rc ? -EIO : 0;
+ if (adapter->fw_done_rc) {
+ rc = -EIO;
+ goto err;
+ }
+
+ return 0;
+err:
+ ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
+ return rc;
}
static int ibmvnic_set_mac(struct net_device *netdev, void *p)
@@ -1719,13 +1726,10 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
int rc;
- if (adapter->state == VNIC_PROBED) {
- memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
- adapter->mac_change_pending = true;
- return 0;
- }
-
- rc = __ibmvnic_set_mac(netdev, addr);
+ rc = 0;
+ ether_addr_copy(adapter->mac_addr, addr->sa_data);
+ if (adapter->state != VNIC_PROBED)
+ rc = __ibmvnic_set_mac(netdev, addr->sa_data);
return rc;
}
@@ -1862,8 +1866,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
- netif_carrier_on(netdev);
-
return 0;
}
@@ -1933,8 +1935,6 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
return 0;
}
- netif_carrier_on(netdev);
-
return 0;
}
@@ -3938,8 +3938,8 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
goto out;
}
- memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
- ETH_ALEN);
+ ether_addr_copy(netdev->dev_addr,
+ &crq->change_mac_addr_rsp.mac_addr[0]);
out:
complete(&adapter->fw_done);
return rc;
@@ -4476,6 +4476,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
crq->link_state_indication.phys_link_state;
adapter->logical_link_state =
crq->link_state_indication.logical_link_state;
+ if (adapter->phys_link_state && adapter->logical_link_state)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
break;
case CHANGE_MAC_ADDR_RSP:
netdev_dbg(netdev, "Got MAC address change Response\n");
@@ -4852,8 +4856,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->init_done);
adapter->resetting = false;
- adapter->mac_change_pending = false;
-
do {
rc = init_crq_queue(adapter);
if (rc) {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index d5260a206708..8e328d70ceb0 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -967,7 +967,6 @@ struct ibmvnic_tunables {
u64 rx_entries;
u64 tx_entries;
u64 mtu;
- struct sockaddr mac;
};
struct ibmvnic_adapter {
@@ -1089,7 +1088,6 @@ struct ibmvnic_adapter {
bool resetting;
bool napi_enabled, from_passive_init;
- bool mac_change_pending;
bool failover_pending;
bool force_reset_recovery;
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index 1c4af7227bca..a8e15e8c474c 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -71,8 +71,8 @@ static const struct pci_platform_pm_ops mid_pci_platform_pm = {
* arch/x86/platform/intel-mid/pwr.c.
*/
static const struct x86_cpu_id lpss_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_PENWELL),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD),
+ ICPU(INTEL_FAM6_ATOM_SALTWELL_MID),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID),
{}
};
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index a8d465a52041..666d0cbda598 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -506,6 +506,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
struct sun4i_usb_phy_data *data =
container_of(work, struct sun4i_usb_phy_data, detect.work);
struct phy *phy0 = data->phys[0].phy;
+ struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
bool force_session_end, id_notify = false, vbus_notify = false;
int id_det, vbus_det;
@@ -562,6 +563,9 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
mutex_unlock(&phy0->mutex);
}
+ /* Enable PHY0 passby for host mode only. */
+ sun4i_usb_phy_passby(phy, !id_det);
+
/* Re-route PHY0 if necessary */
if (data->cfg->phy0_dual_route)
sun4i_usb_phy0_reroute(data, id_det);
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index ff45b5651161..b825fb554b7f 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -569,7 +569,7 @@ static ssize_t show_hdmi_source(struct device *dev,
return scnprintf(buf, PAGE_SIZE,
"input [gpu] unknown\n");
}
- pr_err("alienware-wmi: unknown HDMI source status: %d\n", out_data);
+ pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
}
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index dcd9f40a4b18..45d9355d0c57 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -18,6 +18,8 @@
#include <linux/rfkill.h>
#include <linux/input.h>
+#include "dell-rbtn.h"
+
enum rbtn_type {
RBTN_UNKNOWN,
RBTN_TOGGLE,
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index 92dc230ef5b2..08107731afe0 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -60,7 +60,7 @@ static const struct x86_cpu_id int0002_cpu_ids[] = {
/*
* Limit ourselves to Cherry Trail for now, until testing shows we
* need to handle the INT0002 device on Baytrail too.
- * ICPU(INTEL_FAM6_ATOM_SILVERMONT1), * Valleyview, Bay Trail *
+ * ICPU(INTEL_FAM6_ATOM_SILVERMONT), * Valleyview, Bay Trail *
*/
ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
{}
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 871cfa682519..ee45d61127f1 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -125,8 +125,8 @@ static struct mid_pb_ddata mrfld_ddata = {
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
static const struct x86_cpu_id mid_pb_cpu_ids[] = {
- ICPU(INTEL_FAM6_ATOM_PENWELL, mfld_ddata),
- ICPU(INTEL_FAM6_ATOM_MERRIFIELD, mrfld_ddata),
+ ICPU(INTEL_FAM6_ATOM_SALTWELL_MID, mfld_ddata),
+ ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, mrfld_ddata),
{}
};
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index e4d4dfe3e1d1..a9d6c7c2a22a 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -757,13 +757,17 @@ static int ipc_create_pmc_devices(void)
if (ret) {
dev_err(ipcdev.dev, "Failed to add punit platform device\n");
platform_device_unregister(ipcdev.tco_dev);
+ return ret;
}
if (!ipcdev.telem_res_inval) {
ret = ipc_create_telemetry_device();
- if (ret)
+ if (ret) {
dev_warn(ipcdev.dev,
"Failed to add telemetry platform device\n");
+ platform_device_unregister(ipcdev.punit_dev);
+ platform_device_unregister(ipcdev.tco_dev);
+ }
}
return ret;
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index b5b890127479..a47a41fc10ad 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - GTDRIVER_IPC BASE_IFACE
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
- if (res && resource_size(res) > 1) {
+ if (res) {
addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 07666dd1ad60..a4134a0c3e9b 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -4424,14 +4424,16 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
}
return AE_OK;
}
+
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ return AE_OK;
+
default:
dprintk("Resource %d isn't an IRQ nor an IO port\n",
resource->type);
+ return AE_CTRL_TERMINATE;
- case ACPI_RESOURCE_TYPE_END_TAG:
- return AE_OK;
}
- return AE_CTRL_TERMINATE;
}
static int sony_pic_possible_resources(struct acpi_device *device)
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 9ff30d1cddf6..225b50f01a13 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1159,13 +1159,13 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1, rapl_defaults_byt),
+ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT, rapl_defaults_byt),
RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT, rapl_defaults_cht),
- RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD, rapl_defaults_tng),
- RAPL_CPU(INTEL_FAM6_ATOM_MOOREFIELD, rapl_defaults_ann),
+ RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT_MID,rapl_defaults_tng),
+ RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT_MID, rapl_defaults_ann),
RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_ATOM_GEMINI_LAKE, rapl_defaults_core),
- RAPL_CPU(INTEL_FAM6_ATOM_DENVERTON, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, rapl_defaults_core),
+ RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT_X, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_ATOM_TREMONT_X, rapl_defaults_core),
RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL, rapl_defaults_hsw_server),
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index a0860b30bd93..f125bfa5c752 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -311,10 +311,12 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
if (IS_ENABLED(CONFIG_OF))
of_pwmchip_add(chip);
- pwmchip_sysfs_export(chip);
-
out:
mutex_unlock(&pwm_lock);
+
+ if (!ret)
+ pwmchip_sysfs_export(chip);
+
return ret;
}
EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
@@ -348,7 +350,7 @@ int pwmchip_remove(struct pwm_chip *chip)
unsigned int i;
int ret = 0;
- pwmchip_sysfs_unexport_children(chip);
+ pwmchip_sysfs_unexport(chip);
mutex_lock(&pwm_lock);
@@ -368,8 +370,6 @@ int pwmchip_remove(struct pwm_chip *chip)
free_pwms(chip);
- pwmchip_sysfs_unexport(chip);
-
out:
mutex_unlock(&pwm_lock);
return ret;
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 0b0754e11a19..00ce2a4f58e5 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -110,6 +110,10 @@ struct meson_pwm {
const struct meson_pwm_data *data;
void __iomem *base;
u8 inverter_mask;
+ /*
+ * Protects register (write) access to the REG_MISC_AB register
+ * that is shared between the two PWMs.
+ */
spinlock_t lock;
};
@@ -183,7 +187,7 @@ static int meson_pwm_calc(struct meson_pwm *meson,
do_div(fin_ps, fin_freq);
/* Calc pre_div with the period */
- for (pre_div = 0; pre_div < MISC_CLK_DIV_MASK; pre_div++) {
+ for (pre_div = 0; pre_div <= MISC_CLK_DIV_MASK; pre_div++) {
cnt = DIV_ROUND_CLOSEST_ULL((u64)period * 1000,
fin_ps * (pre_div + 1));
dev_dbg(meson->chip.dev, "fin_ps=%llu pre_div=%u cnt=%u\n",
@@ -192,7 +196,7 @@ static int meson_pwm_calc(struct meson_pwm *meson,
break;
}
- if (pre_div == MISC_CLK_DIV_MASK) {
+ if (pre_div > MISC_CLK_DIV_MASK) {
dev_err(meson->chip.dev, "unable to get period pre_div\n");
return -EINVAL;
}
@@ -234,6 +238,7 @@ static void meson_pwm_enable(struct meson_pwm *meson,
{
u32 value, clk_shift, clk_enable, enable;
unsigned int offset;
+ unsigned long flags;
switch (id) {
case 0:
@@ -254,6 +259,8 @@ static void meson_pwm_enable(struct meson_pwm *meson,
return;
}
+ spin_lock_irqsave(&meson->lock, flags);
+
value = readl(meson->base + REG_MISC_AB);
value &= ~(MISC_CLK_DIV_MASK << clk_shift);
value |= channel->pre_div << clk_shift;
@@ -266,11 +273,14 @@ static void meson_pwm_enable(struct meson_pwm *meson,
value = readl(meson->base + REG_MISC_AB);
value |= enable;
writel(value, meson->base + REG_MISC_AB);
+
+ spin_unlock_irqrestore(&meson->lock, flags);
}
static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
{
u32 value, enable;
+ unsigned long flags;
switch (id) {
case 0:
@@ -285,9 +295,13 @@ static void meson_pwm_disable(struct meson_pwm *meson, unsigned int id)
return;
}
+ spin_lock_irqsave(&meson->lock, flags);
+
value = readl(meson->base + REG_MISC_AB);
value &= ~enable;
writel(value, meson->base + REG_MISC_AB);
+
+ spin_unlock_irqrestore(&meson->lock, flags);
}
static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -295,29 +309,21 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
struct meson_pwm *meson = to_meson_pwm(chip);
- unsigned long flags;
int err = 0;
if (!state)
return -EINVAL;
- spin_lock_irqsave(&meson->lock, flags);
-
if (!state->enabled) {
meson_pwm_disable(meson, pwm->hwpwm);
channel->state.enabled = false;
- goto unlock;
+ return 0;
}
if (state->period != channel->state.period ||
state->duty_cycle != channel->state.duty_cycle ||
state->polarity != channel->state.polarity) {
- if (channel->state.enabled) {
- meson_pwm_disable(meson, pwm->hwpwm);
- channel->state.enabled = false;
- }
-
if (state->polarity != channel->state.polarity) {
if (state->polarity == PWM_POLARITY_NORMAL)
meson->inverter_mask |= BIT(pwm->hwpwm);
@@ -328,7 +334,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
err = meson_pwm_calc(meson, channel, pwm->hwpwm,
state->duty_cycle, state->period);
if (err < 0)
- goto unlock;
+ return err;
channel->state.polarity = state->polarity;
channel->state.period = state->period;
@@ -340,9 +346,7 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
channel->state.enabled = true;
}
-unlock:
- spin_unlock_irqrestore(&meson->lock, flags);
- return err;
+ return 0;
}
static void meson_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 261ca74033bc..14d1d1f6554a 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -383,6 +383,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
}
/* Update shadow register first before modifying active register */
+ ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK,
+ AQSFRC_RLDCSF_ZRO);
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/*
* Changes to immediate action on Action Qualifier. This puts
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index a813239300c3..0850b11dfd83 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -399,19 +399,6 @@ void pwmchip_sysfs_export(struct pwm_chip *chip)
void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
struct device *parent;
-
- parent = class_find_device(&pwm_class, NULL, chip,
- pwmchip_sysfs_match);
- if (parent) {
- /* for class_find_device() */
- put_device(parent);
- device_unregister(parent);
- }
-}
-
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
- struct device *parent;
unsigned int i;
parent = class_find_device(&pwm_class, NULL, chip,
@@ -427,6 +414,7 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
}
put_device(parent);
+ device_unregister(parent);
}
static int __init pwm_sysfs_init(void)
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 100a4a5a5b99..d1aab85f5102 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3932,7 +3932,8 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
uint32_t tag;
uint16_t hwq;
- if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
+ if (cmnd && shost_use_blk_mq(cmnd->device->host) &&
+ cmnd->request && cmnd->request->q) {
tag = blk_mq_unique_tag(cmnd->request);
hwq = blk_mq_unique_tag_to_hwq(tag);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d9c03f894f55..10c940cf2cdc 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3314,7 +3314,8 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
- gd->events |= DISK_EVENT_MEDIA_CHANGE;
+ gd->events |= DISK_EVENT_MEDIA_CHANGE |
+ DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
}
blk_pm_runtime_init(sdp->request_queue, dev);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 20bd0c3ca2ba..1c8ff3562caf 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -712,7 +712,8 @@ static int sr_probe(struct device *dev)
sprintf(disk->disk_name, "sr%d", minor);
disk->fops = &sr_bdops;
disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
+ disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST
+ | DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 3848bde779d3..9eaa803195b2 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -668,6 +668,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
if (xfer->rx_buf) {
+ /* Clear WFIFO, since it's last 2 bytes are shifted out during
+ * a read operation
+ */
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
+
/* Set read data length */
spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
a3700_spi->buf_len);
diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
index d70568ea02d5..2ff7d90e166a 100644
--- a/drivers/ssb/bridge_pcmcia_80211.c
+++ b/drivers/ssb/bridge_pcmcia_80211.c
@@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = {
.resume = ssb_host_pcmcia_resume,
};
+static int pcmcia_init_failed;
+
/*
* These are not module init/exit functions!
* The module_pcmcia_driver() helper cannot be used here.
*/
int ssb_host_pcmcia_init(void)
{
- return pcmcia_register_driver(&ssb_host_pcmcia_driver);
+ pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver);
+
+ return pcmcia_init_failed;
}
void ssb_host_pcmcia_exit(void)
{
- pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
+ if (!pcmcia_init_failed)
+ pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
}
diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c
index c27868b2c6af..ce2722edd307 100644
--- a/drivers/thermal/intel_soc_dts_thermal.c
+++ b/drivers/thermal/intel_soc_dts_thermal.c
@@ -43,7 +43,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
}
static const struct x86_cpu_id soc_thermal_ids[] = {
- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0,
BYT_SOC_DTS_APIC_IRQ},
{}
};
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 8446ef43b972..520ddce8baef 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -111,12 +111,6 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
if (tty->stopped)
return 0;
- mutex_lock(&tty_mutex);
- if (to->magic != TTY_MAGIC) {
- mutex_unlock(&tty_mutex);
- return -EIO;
- }
-
if (c > 0) {
spin_lock_irqsave(&to->port->lock, flags);
/* Stuff the data into the input queue of the other end */
@@ -126,7 +120,6 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
tty_flip_buffer_push(to->port);
spin_unlock_irqrestore(&to->port->lock, flags);
}
- mutex_unlock(&tty_mutex);
return c;
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 1e3d2021c89f..8885e999160e 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1486,7 +1486,7 @@ static int __init sc16is7xx_init(void)
ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
- return ret;
+ goto err_i2c;
}
#endif
@@ -1494,10 +1494,20 @@ static int __init sc16is7xx_init(void)
ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx spi --> %d\n", ret);
- return ret;
+ goto err_spi;
}
#endif
return ret;
+
+#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
+err_spi:
+#endif
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+ i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+err_i2c:
+#endif
+ uart_unregister_driver(&sc16is7xx_uart);
+ return ret;
}
module_init(sc16is7xx_init);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index e4ac153db423..e1e30128cbdb 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1376,13 +1376,10 @@ static void release_one_tty(struct work_struct *work)
struct tty_driver *driver = tty->driver;
struct module *owner = driver->owner;
- mutex_lock(&tty_mutex);
if (tty->ops->cleanup)
tty->ops->cleanup(tty);
tty->magic = 0;
- mutex_unlock(&tty_mutex);
-
tty_driver_kref_put(driver);
module_put(owner);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 7921781cd770..2f90437f3cbb 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2276,6 +2276,7 @@ static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
if (status & DEV_DMA_STS_MASK)
dev_err(hsotg->dev, "descriptor %d closed with %x\n",
i, status & DEV_DMA_STS_MASK);
+ desc++;
}
return bytes_rem;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 13f2c051dbf2..afb4b0bf47b3 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -81,6 +81,7 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
+ int maxp;
/*
* Set the INQUIRY transfer length to 36. We don't use any of
@@ -90,20 +91,17 @@ static int slave_alloc (struct scsi_device *sdev)
sdev->inquiry_len = 36;
/*
- * USB has unusual DMA-alignment requirements: Although the
- * starting address of each scatter-gather element doesn't matter,
- * the length of each element except the last must be divisible
- * by the Bulk maxpacket value. There's currently no way to
- * express this by block-layer constraints, so we'll cop out
- * and simply require addresses to be aligned at 512-byte
- * boundaries. This is okay since most block I/O involves
- * hardware sectors that are multiples of 512 bytes in length,
- * and since host controllers up through USB 2.0 have maxpacket
- * values no larger than 512.
- *
- * But it doesn't suffice for Wireless USB, where Bulk maxpacket
- * values can be as large as 2048. To make that work properly
- * will require changes to the block layer.
+ * USB has unusual scatter-gather requirements: the length of each
+ * scatterlist element except the last must be divisible by the
+ * Bulk maxpacket value. Fortunately this value is always a
+ * power of 2. Inform the block layer about this requirement.
+ */
+ maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
+ blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
+
+ /*
+ * Some host controllers may have alignment requirements.
+ * We'll play it safe by requiring 512-byte alignment always.
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index 126991046eb7..4cc8b6281ffa 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -178,10 +178,10 @@ static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
static int mdev_device_remove_cb(struct device *dev, void *data)
{
- if (!dev_is_mdev(dev))
- return 0;
+ if (dev_is_mdev(dev))
+ mdev_device_remove(dev, true);
- return mdev_device_remove(dev, data ? *(bool *)data : true);
+ return 0;
}
/*
@@ -210,6 +210,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
/* Check for duplicate */
parent = __find_parent_device(dev);
if (parent) {
+ parent = NULL;
ret = -EEXIST;
goto add_dev_err;
}
@@ -269,7 +270,6 @@ EXPORT_SYMBOL(mdev_register_device);
void mdev_unregister_device(struct device *dev)
{
struct mdev_parent *parent;
- bool force_remove = true;
mutex_lock(&parent_list_lock);
parent = __find_parent_device(dev);
@@ -283,8 +283,7 @@ void mdev_unregister_device(struct device *dev)
list_del(&parent->next);
class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
- device_for_each_child(dev, (void *)&force_remove,
- mdev_device_remove_cb);
+ device_for_each_child(dev, NULL, mdev_device_remove_cb);
parent_remove_sysfs_files(parent);
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index dcac1fffaee3..4041da199a43 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -697,6 +697,7 @@ static long vfio_pci_ioctl(void *device_data,
{
void __iomem *io;
size_t size;
+ u16 orig_cmd;
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.flags = 0;
@@ -712,15 +713,23 @@ static long vfio_pci_ioctl(void *device_data,
break;
}
- /* Is it really there? */
+ /*
+ * Is it really there? Enable memory decode for
+ * implicit access in pci_map_rom().
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
+ pci_write_config_word(pdev, PCI_COMMAND,
+ orig_cmd | PCI_COMMAND_MEMORY);
+
io = pci_map_rom(pdev, &size);
- if (!io || !size) {
+ if (io) {
+ info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_unmap_rom(pdev, io);
+ } else {
info.size = 0;
- break;
}
- pci_unmap_rom(pdev, io);
- info.flags = VFIO_REGION_INFO_FLAG_READ;
+ pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
break;
}
case VFIO_PCI_VGA_REGION_INDEX:
@@ -1436,11 +1445,11 @@ static void __init vfio_pci_fill_ids(void)
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
- pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
+ pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
- pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
+ pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 6db66c1e0a86..698b734b8a2e 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -563,13 +563,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
* executing.
*/
- if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
- sock_set_flag(sk, SOCK_DONE);
- vsk->peer_shutdown = SHUTDOWN_MASK;
- sk->sk_state = SS_UNCONNECTED;
- sk->sk_err = ECONNRESET;
- sk->sk_error_report(sk);
- }
+ /* If the peer is still valid, no need to reset connection */
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+ return;
+
+ /* If the close timeout is pending, let it expire. This avoids races
+ * with the timeout callback.
+ */
+ if (vsk->close_work_scheduled)
+ return;
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk->sk_error_report(sk);
}
static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
index 136d30484d02..cb6acbac9c47 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/omapdss-boot-init.c
@@ -193,8 +193,10 @@ static int __init omapdss_boot_init(void)
dss = of_find_matching_node(NULL, omapdss_of_match);
- if (dss == NULL || !of_device_is_available(dss))
+ if (dss == NULL || !of_device_is_available(dss)) {
+ of_node_put(dss);
return 0;
+ }
omapdss_walk_device(dss, true);
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 1c4797e53f68..80a3704939cd 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -254,9 +254,11 @@ void vp_del_vqs(struct virtio_device *vdev)
for (i = 0; i < vp_dev->msix_used_vectors; ++i)
free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
- for (i = 0; i < vp_dev->msix_vectors; i++)
- if (vp_dev->msix_affinity_masks[i])
- free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ if (vp_dev->msix_affinity_masks) {
+ for (i = 0; i < vp_dev->msix_vectors; i++)
+ if (vp_dev->msix_affinity_masks[i])
+ free_cpumask_var(vp_dev->msix_affinity_masks[i]);
+ }
if (vp_dev->msix_enabled) {
/* Disable the vector used for configuration */
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 30bc03f2bf12..151dee26eaf4 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1097,6 +1097,8 @@ struct virtqueue *vring_create_virtqueue(
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (queue)
break;
+ if (!may_reduce_num)
+ return NULL;
}
if (!num)
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 5a5df0b29776..deaeae612f3a 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -770,6 +770,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
struct btrfs_key key;
struct btrfs_key tmp_op_key;
struct btrfs_key *op_key = NULL;
+ struct rb_node *n;
int count;
int ret = 0;
@@ -779,7 +780,9 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
}
spin_lock(&head->lock);
- list_for_each_entry(node, &head->ref_list, list) {
+ for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
+ node = rb_entry(n, struct btrfs_delayed_ref_node,
+ ref_node);
if (node->seq > seq)
continue;
@@ -1175,7 +1178,7 @@ again:
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
@@ -1186,7 +1189,7 @@ again:
*/
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
goto again;
}
spin_unlock(&delayed_refs->lock);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 120a6d9fa064..39b397cb38c8 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -124,15 +124,43 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
u64 bytenr;
ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
- bytenr = ins->node.bytenr;
+ bytenr = ins->bytenr;
while (*p) {
parent_node = *p;
entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
href_node);
- if (bytenr < entry->node.bytenr)
+ if (bytenr < entry->bytenr)
p = &(*p)->rb_left;
- else if (bytenr > entry->node.bytenr)
+ else if (bytenr > entry->bytenr)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+
+ rb_link_node(node, parent_node, p);
+ rb_insert_color(node, root);
+ return NULL;
+}
+
+static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
+ struct btrfs_delayed_ref_node *ins)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *node = &ins->ref_node;
+ struct rb_node *parent_node = NULL;
+ struct btrfs_delayed_ref_node *entry;
+
+ while (*p) {
+ int comp;
+
+ parent_node = *p;
+ entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
+ ref_node);
+ comp = comp_refs(ins, entry, true);
+ if (comp < 0)
+ p = &(*p)->rb_left;
+ else if (comp > 0)
p = &(*p)->rb_right;
else
return entry;
@@ -161,15 +189,15 @@ find_ref_head(struct rb_root *root, u64 bytenr,
while (n) {
entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
- if (bytenr < entry->node.bytenr)
+ if (bytenr < entry->bytenr)
n = n->rb_left;
- else if (bytenr > entry->node.bytenr)
+ else if (bytenr > entry->bytenr)
n = n->rb_right;
else
return entry;
}
if (entry && return_bigger) {
- if (bytenr > entry->node.bytenr) {
+ if (bytenr > entry->bytenr) {
n = rb_next(&entry->href_node);
if (!n)
n = rb_first(root);
@@ -192,17 +220,17 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
if (mutex_trylock(&head->mutex))
return 0;
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
spin_lock(&delayed_refs->lock);
- if (!head->node.in_tree) {
+ if (RB_EMPTY_NODE(&head->href_node)) {
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
return -EAGAIN;
}
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
return 0;
}
@@ -211,15 +239,11 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_ref_node *ref)
{
- if (btrfs_delayed_ref_is_head(ref)) {
- head = btrfs_delayed_node_to_head(ref);
- rb_erase(&head->href_node, &delayed_refs->href_root);
- } else {
- assert_spin_locked(&head->lock);
- list_del(&ref->list);
- if (!list_empty(&ref->add_list))
- list_del(&ref->add_list);
- }
+ assert_spin_locked(&head->lock);
+ rb_erase(&ref->ref_node, &head->ref_tree);
+ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
ref->in_tree = 0;
btrfs_put_delayed_ref(ref);
atomic_dec(&delayed_refs->num_entries);
@@ -234,24 +258,18 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
u64 seq)
{
struct btrfs_delayed_ref_node *next;
+ struct rb_node *node = rb_next(&ref->ref_node);
bool done = false;
- next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
- list);
- while (!done && &next->list != &head->ref_list) {
+ while (!done && node) {
int mod;
- struct btrfs_delayed_ref_node *next2;
-
- next2 = list_next_entry(next, list);
-
- if (next == ref)
- goto next;
+ next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ node = rb_next(node);
if (seq && next->seq >= seq)
- goto next;
-
+ break;
if (comp_refs(ref, next, false))
- goto next;
+ break;
if (ref->action == next->action) {
mod = next->ref_mod;
@@ -275,8 +293,6 @@ static bool merge_ref(struct btrfs_trans_handle *trans,
WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
}
-next:
- next = next2;
}
return done;
@@ -288,11 +304,12 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_node *ref;
+ struct rb_node *node;
u64 seq = 0;
assert_spin_locked(&head->lock);
- if (list_empty(&head->ref_list))
+ if (RB_EMPTY_ROOT(&head->ref_tree))
return;
/* We don't have too many refs to merge for data. */
@@ -309,22 +326,13 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
}
spin_unlock(&fs_info->tree_mod_seq_lock);
- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
- list);
- while (&ref->list != &head->ref_list) {
+again:
+ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
if (seq && ref->seq >= seq)
- goto next;
-
- if (merge_ref(trans, delayed_refs, head, ref, seq)) {
- if (list_empty(&head->ref_list))
- break;
- ref = list_first_entry(&head->ref_list,
- struct btrfs_delayed_ref_node,
- list);
continue;
- }
-next:
- ref = list_next_entry(ref, list);
+ if (merge_ref(trans, delayed_refs, head, ref, seq))
+ goto again;
}
}
@@ -396,8 +404,8 @@ again:
head->processing = 1;
WARN_ON(delayed_refs->num_heads_ready == 0);
delayed_refs->num_heads_ready--;
- delayed_refs->run_delayed_start = head->node.bytenr +
- head->node.num_bytes;
+ delayed_refs->run_delayed_start = head->bytenr +
+ head->num_bytes;
return head;
}
@@ -407,25 +415,19 @@ again:
* Return 0 for insert.
* Return >0 for merge.
*/
-static int
-add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_root *root,
- struct btrfs_delayed_ref_head *href,
- struct btrfs_delayed_ref_node *ref)
+static int insert_delayed_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_root *root,
+ struct btrfs_delayed_ref_head *href,
+ struct btrfs_delayed_ref_node *ref)
{
struct btrfs_delayed_ref_node *exist;
int mod;
int ret = 0;
spin_lock(&href->lock);
- /* Check whether we can merge the tail node with ref */
- if (list_empty(&href->ref_list))
- goto add_tail;
- exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
- list);
- /* No need to compare bytenr nor is_head */
- if (comp_refs(exist, ref, true))
- goto add_tail;
+ exist = tree_insert(&href->ref_tree, ref);
+ if (!exist)
+ goto inserted;
/* Now we are sure we can merge */
ret = 1;
@@ -456,9 +458,7 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
drop_delayed_ref(trans, root, href, exist);
spin_unlock(&href->lock);
return ret;
-
-add_tail:
- list_add_tail(&ref->list, &href->ref_list);
+inserted:
if (ref->action == BTRFS_ADD_DELAYED_REF)
list_add_tail(&ref->add_list, &href->ref_add_list);
atomic_inc(&root->num_entries);
@@ -473,20 +473,16 @@ add_tail:
*/
static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_delayed_ref_node *existing,
- struct btrfs_delayed_ref_node *update,
+ struct btrfs_delayed_ref_head *existing,
+ struct btrfs_delayed_ref_head *update,
int *old_ref_mod_ret)
{
- struct btrfs_delayed_ref_head *existing_ref;
- struct btrfs_delayed_ref_head *ref;
int old_ref_mod;
- existing_ref = btrfs_delayed_node_to_head(existing);
- ref = btrfs_delayed_node_to_head(update);
- BUG_ON(existing_ref->is_data != ref->is_data);
+ BUG_ON(existing->is_data != update->is_data);
- spin_lock(&existing_ref->lock);
- if (ref->must_insert_reserved) {
+ spin_lock(&existing->lock);
+ if (update->must_insert_reserved) {
/* if the extent was freed and then
* reallocated before the delayed ref
* entries were processed, we can end up
@@ -494,7 +490,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
* the must_insert_reserved flag set.
* Set it again here
*/
- existing_ref->must_insert_reserved = ref->must_insert_reserved;
+ existing->must_insert_reserved = update->must_insert_reserved;
/*
* update the num_bytes so we make sure the accounting
@@ -504,22 +500,22 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
}
- if (ref->extent_op) {
- if (!existing_ref->extent_op) {
- existing_ref->extent_op = ref->extent_op;
+ if (update->extent_op) {
+ if (!existing->extent_op) {
+ existing->extent_op = update->extent_op;
} else {
- if (ref->extent_op->update_key) {
- memcpy(&existing_ref->extent_op->key,
- &ref->extent_op->key,
- sizeof(ref->extent_op->key));
- existing_ref->extent_op->update_key = true;
+ if (update->extent_op->update_key) {
+ memcpy(&existing->extent_op->key,
+ &update->extent_op->key,
+ sizeof(update->extent_op->key));
+ existing->extent_op->update_key = true;
}
- if (ref->extent_op->update_flags) {
- existing_ref->extent_op->flags_to_set |=
- ref->extent_op->flags_to_set;
- existing_ref->extent_op->update_flags = true;
+ if (update->extent_op->update_flags) {
+ existing->extent_op->flags_to_set |=
+ update->extent_op->flags_to_set;
+ existing->extent_op->update_flags = true;
}
- btrfs_free_delayed_extent_op(ref->extent_op);
+ btrfs_free_delayed_extent_op(update->extent_op);
}
}
/*
@@ -527,43 +523,31 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
* only need the lock for this case cause we could be processing it
* currently, for refs we just added we know we're a-ok.
*/
- old_ref_mod = existing_ref->total_ref_mod;
+ old_ref_mod = existing->total_ref_mod;
if (old_ref_mod_ret)
*old_ref_mod_ret = old_ref_mod;
existing->ref_mod += update->ref_mod;
- existing_ref->total_ref_mod += update->ref_mod;
+ existing->total_ref_mod += update->ref_mod;
/*
* If we are going to from a positive ref mod to a negative or vice
* versa we need to make sure to adjust pending_csums accordingly.
*/
- if (existing_ref->is_data) {
- if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
+ if (existing->is_data) {
+ if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
delayed_refs->pending_csums -= existing->num_bytes;
- if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
+ if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
delayed_refs->pending_csums += existing->num_bytes;
}
- spin_unlock(&existing_ref->lock);
+ spin_unlock(&existing->lock);
}
-/*
- * helper function to actually insert a head node into the rbtree.
- * this does all the dirty work in terms of maintaining the correct
- * overall modification count.
- */
-static noinline struct btrfs_delayed_ref_head *
-add_delayed_ref_head(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_node *ref,
- struct btrfs_qgroup_extent_record *qrecord,
- u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
- int action, int is_data, int is_system,
- int *old_ref_mod, int *new_ref_mod)
-
+static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_qgroup_extent_record *qrecord,
+ u64 bytenr, u64 num_bytes, u64 ref_root,
+ u64 reserved, int action, bool is_data,
+ bool is_system)
{
- struct btrfs_delayed_ref_head *existing;
- struct btrfs_delayed_ref_head *head_ref = NULL;
- struct btrfs_delayed_ref_root *delayed_refs;
int count_mod = 1;
int must_insert_reserved = 0;
@@ -571,7 +555,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
BUG_ON(!is_data && reserved);
/*
- * the head node stores the sum of all the mods, so dropping a ref
+ * The head node stores the sum of all the mods, so dropping a ref
* should drop the sum in the head node by one.
*/
if (action == BTRFS_UPDATE_DELAYED_HEAD)
@@ -580,12 +564,11 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
count_mod = -1;
/*
- * BTRFS_ADD_DELAYED_EXTENT means that we need to update
- * the reserved accounting when the extent is finally added, or
- * if a later modification deletes the delayed ref without ever
- * inserting the extent into the extent allocation tree.
- * ref->must_insert_reserved is the flag used to record
- * that accounting mods are required.
+ * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
+ * accounting when the extent is finally added, or if a later
+ * modification deletes the delayed ref without ever inserting the
+ * extent into the extent allocation tree. ref->must_insert_reserved
+ * is the flag used to record that accounting mods are required.
*
* Once we record must_insert_reserved, switch the action to
* BTRFS_ADD_DELAYED_REF because other special casing is not required.
@@ -595,29 +578,21 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
else
must_insert_reserved = 0;
- delayed_refs = &trans->transaction->delayed_refs;
-
- /* first set the basic ref node struct up */
- refcount_set(&ref->refs, 1);
- ref->bytenr = bytenr;
- ref->num_bytes = num_bytes;
- ref->ref_mod = count_mod;
- ref->type = 0;
- ref->action = 0;
- ref->is_head = 1;
- ref->in_tree = 1;
- ref->seq = 0;
-
- head_ref = btrfs_delayed_node_to_head(ref);
+ refcount_set(&head_ref->refs, 1);
+ head_ref->bytenr = bytenr;
+ head_ref->num_bytes = num_bytes;
+ head_ref->ref_mod = count_mod;
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
head_ref->is_system = is_system;
- INIT_LIST_HEAD(&head_ref->ref_list);
+ head_ref->ref_tree = RB_ROOT;
INIT_LIST_HEAD(&head_ref->ref_add_list);
+ RB_CLEAR_NODE(&head_ref->href_node);
head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
+ spin_lock_init(&head_ref->lock);
+ mutex_init(&head_ref->mutex);
- /* Record qgroup extent info if provided */
if (qrecord) {
if (ref_root && reserved) {
qrecord->data_rsv = reserved;
@@ -627,21 +602,40 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
qrecord->bytenr = bytenr;
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
+ }
+}
+
+/*
+ * helper function to actually insert a head node into the rbtree.
+ * this does all the dirty work in terms of maintaining the correct
+ * overall modification count.
+ */
+static noinline struct btrfs_delayed_ref_head *
+add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head_ref,
+ struct btrfs_qgroup_extent_record *qrecord,
+ int action, int *old_ref_mod, int *new_ref_mod)
+
+{
+ struct btrfs_delayed_ref_head *existing;
+ struct btrfs_delayed_ref_root *delayed_refs;
- if(btrfs_qgroup_trace_extent_nolock(fs_info,
+ delayed_refs = &trans->transaction->delayed_refs;
+
+ /* Record qgroup extent info if provided */
+ if (qrecord) {
+ if (btrfs_qgroup_trace_extent_nolock(fs_info,
delayed_refs, qrecord))
kfree(qrecord);
}
- spin_lock_init(&head_ref->lock);
- mutex_init(&head_ref->mutex);
-
- trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
+ trace_add_delayed_ref_head(fs_info, head_ref, action);
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
if (existing) {
- update_existing_head_ref(delayed_refs, &existing->node, ref,
+ update_existing_head_ref(delayed_refs, existing, head_ref,
old_ref_mod);
/*
* we've updated the existing ref, free the newly
@@ -652,8 +646,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
} else {
if (old_ref_mod)
*old_ref_mod = 0;
- if (is_data && count_mod < 0)
- delayed_refs->pending_csums += num_bytes;
+ if (head_ref->is_data && head_ref->ref_mod < 0)
+ delayed_refs->pending_csums += head_ref->num_bytes;
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
@@ -661,90 +655,48 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
}
if (new_ref_mod)
*new_ref_mod = head_ref->total_ref_mod;
- return head_ref;
-}
-/*
- * helper to insert a delayed tree ref into the rbtree.
- */
-static noinline void
-add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head_ref,
- struct btrfs_delayed_ref_node *ref, u64 bytenr,
- u64 num_bytes, u64 parent, u64 ref_root, int level,
- int action)
-{
- struct btrfs_delayed_tree_ref *full_ref;
- struct btrfs_delayed_ref_root *delayed_refs;
- u64 seq = 0;
- int ret;
-
- if (action == BTRFS_ADD_DELAYED_EXTENT)
- action = BTRFS_ADD_DELAYED_REF;
-
- if (is_fstree(ref_root))
- seq = atomic64_read(&fs_info->tree_mod_seq);
- delayed_refs = &trans->transaction->delayed_refs;
-
- /* first set the basic ref node struct up */
- refcount_set(&ref->refs, 1);
- ref->bytenr = bytenr;
- ref->num_bytes = num_bytes;
- ref->ref_mod = 1;
- ref->action = action;
- ref->is_head = 0;
- ref->in_tree = 1;
- ref->seq = seq;
- INIT_LIST_HEAD(&ref->list);
- INIT_LIST_HEAD(&ref->add_list);
-
- full_ref = btrfs_delayed_node_to_tree_ref(ref);
- full_ref->parent = parent;
- full_ref->root = ref_root;
- if (parent)
- ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
- else
- ref->type = BTRFS_TREE_BLOCK_REF_KEY;
- full_ref->level = level;
-
- trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
-
- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
-
- /*
- * XXX: memory should be freed at the same level allocated.
- * But bad practice is anywhere... Follow it now. Need cleanup.
- */
- if (ret > 0)
- kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
+ return head_ref;
}
/*
- * helper to insert a delayed data ref into the rbtree.
+ * init_delayed_ref_common - Initialize the structure which represents a
+ * modification to a an extent.
+ *
+ * @fs_info: Internal to the mounted filesystem mount structure.
+ *
+ * @ref: The structure which is going to be initialized.
+ *
+ * @bytenr: The logical address of the extent for which a modification is
+ * going to be recorded.
+ *
+ * @num_bytes: Size of the extent whose modification is being recorded.
+ *
+ * @ref_root: The id of the root where this modification has originated, this
+ * can be either one of the well-known metadata trees or the
+ * subvolume id which references this extent.
+ *
+ * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
+ * BTRFS_ADD_DELAYED_EXTENT
+ *
+ * @ref_type: Holds the type of the extent which is being recorded, can be
+ * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
+ * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
+ * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
*/
-static noinline void
-add_delayed_data_ref(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head_ref,
- struct btrfs_delayed_ref_node *ref, u64 bytenr,
- u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
- u64 offset, int action)
+static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_node *ref,
+ u64 bytenr, u64 num_bytes, u64 ref_root,
+ int action, u8 ref_type)
{
- struct btrfs_delayed_data_ref *full_ref;
- struct btrfs_delayed_ref_root *delayed_refs;
u64 seq = 0;
- int ret;
if (action == BTRFS_ADD_DELAYED_EXTENT)
action = BTRFS_ADD_DELAYED_REF;
- delayed_refs = &trans->transaction->delayed_refs;
-
if (is_fstree(ref_root))
seq = atomic64_read(&fs_info->tree_mod_seq);
- /* first set the basic ref node struct up */
refcount_set(&ref->refs, 1);
ref->bytenr = bytenr;
ref->num_bytes = num_bytes;
@@ -753,26 +705,9 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
ref->seq = seq;
- INIT_LIST_HEAD(&ref->list);
+ ref->type = ref_type;
+ RB_CLEAR_NODE(&ref->ref_node);
INIT_LIST_HEAD(&ref->add_list);
-
- full_ref = btrfs_delayed_node_to_data_ref(ref);
- full_ref->parent = parent;
- full_ref->root = ref_root;
- if (parent)
- ref->type = BTRFS_SHARED_DATA_REF_KEY;
- else
- ref->type = BTRFS_EXTENT_DATA_REF_KEY;
-
- full_ref->objectid = owner;
- full_ref->offset = offset;
-
- trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
-
- ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
-
- if (ret > 0)
- kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
}
/*
@@ -791,13 +726,25 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
- int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+ bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
+ int ret;
+ u8 ref_type;
BUG_ON(extent_op && extent_op->is_data);
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
if (!ref)
return -ENOMEM;
+ if (parent)
+ ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
+ else
+ ref_type = BTRFS_TREE_BLOCK_REF_KEY;
+ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
+ ref_root, action, ref_type);
+ ref->root = ref_root;
+ ref->parent = parent;
+ ref->level = level;
+
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref)
goto free_ref;
@@ -809,6 +756,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
goto free_head_ref;
}
+ init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
+ ref_root, 0, action, false, is_system);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
@@ -818,14 +767,19 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping
* the spin lock
*/
- head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
- bytenr, num_bytes, 0, 0, action, 0,
- is_system, old_ref_mod, new_ref_mod);
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+ action, old_ref_mod, new_ref_mod);
- add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
- num_bytes, parent, ref_root, level, action);
+
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
+ trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
+ action == BTRFS_ADD_DELAYED_EXTENT ?
+ BTRFS_ADD_DELAYED_REF : action);
+ if (ret > 0)
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+
return 0;
free_head_ref:
@@ -850,11 +804,25 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
+ int ret;
+ u8 ref_type;
ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
if (!ref)
return -ENOMEM;
+ if (parent)
+ ref_type = BTRFS_SHARED_DATA_REF_KEY;
+ else
+ ref_type = BTRFS_EXTENT_DATA_REF_KEY;
+ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
+ ref_root, action, ref_type);
+ ref->root = ref_root;
+ ref->parent = parent;
+ ref->objectid = owner;
+ ref->offset = offset;
+
+
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) {
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
@@ -872,6 +840,8 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
}
}
+ init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
+ reserved, action, true, false);
head_ref->extent_op = NULL;
delayed_refs = &trans->transaction->delayed_refs;
@@ -881,15 +851,18 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping
* the spin lock
*/
- head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
- bytenr, num_bytes, ref_root, reserved,
- action, 1, 0, old_ref_mod, new_ref_mod);
+ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
+ action, old_ref_mod, new_ref_mod);
- add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
- num_bytes, parent, ref_root, owner, offset,
- action);
+ ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
+ trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
+ action == BTRFS_ADD_DELAYED_EXTENT ?
+ BTRFS_ADD_DELAYED_REF : action);
+ if (ret > 0)
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+
return 0;
}
@@ -905,19 +878,17 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
if (!head_ref)
return -ENOMEM;
+ init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
+ BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
+ false);
head_ref->extent_op = extent_op;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- /*
- * extent_ops just modify the flags of an extent and they don't result
- * in ref count changes, hence it's safe to pass false/0 for is_system
- * argument
- */
- add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
- num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
- extent_op->is_data, 0, NULL, NULL);
+ add_delayed_ref_head(fs_info, trans, head_ref, NULL,
+ BTRFS_UPDATE_DELAYED_HEAD,
+ NULL, NULL);
spin_unlock(&delayed_refs->lock);
return 0;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 0a74a2d3e970..19e553863539 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -26,18 +26,8 @@
#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
-/*
- * XXX: Qu: I really hate the design that ref_head and tree/data ref shares the
- * same ref_node structure.
- * Ref_head is in a higher logic level than tree/data ref, and duplicated
- * bytenr/num_bytes in ref_node is really a waste or memory, they should be
- * referred from ref_head.
- * This gets more disgusting after we use list to store tree/data ref in
- * ref_head. Must clean this mess up later.
- */
struct btrfs_delayed_ref_node {
- /*data/tree ref use list, stored in ref_head->ref_list. */
- struct list_head list;
+ struct rb_node ref_node;
/*
* If action is BTRFS_ADD_DELAYED_REF, also link this node to
* ref_head->ref_add_list, then we do not need to iterate the
@@ -91,8 +81,9 @@ struct btrfs_delayed_extent_op {
* reference count modifications we've queued up.
*/
struct btrfs_delayed_ref_head {
- struct btrfs_delayed_ref_node node;
-
+ u64 bytenr;
+ u64 num_bytes;
+ refcount_t refs;
/*
* the mutex is held while running the refs, and it is also
* held when checking the sum of reference modifications.
@@ -100,7 +91,7 @@ struct btrfs_delayed_ref_head {
struct mutex mutex;
spinlock_t lock;
- struct list_head ref_list;
+ struct rb_root ref_tree;
/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
struct list_head ref_add_list;
@@ -116,6 +107,14 @@ struct btrfs_delayed_ref_head {
int total_ref_mod;
/*
+ * This is the current outstanding mod references for this bytenr. This
+ * is used with lookup_extent_info to get an accurate reference count
+ * for a bytenr, so it is adjusted as delayed refs are run so that any
+ * on disk reference count + ref_mod is accurate.
+ */
+ int ref_mod;
+
+ /*
* when a new extent is allocated, it is just reserved in memory
* The actual extent isn't inserted into the extent allocation tree
* until the delayed ref is processed. must_insert_reserved is
@@ -189,6 +188,83 @@ struct btrfs_delayed_ref_root {
u64 qgroup_to_skip;
};
+enum btrfs_ref_type {
+ BTRFS_REF_NOT_SET,
+ BTRFS_REF_DATA,
+ BTRFS_REF_METADATA,
+ BTRFS_REF_LAST,
+};
+
+struct btrfs_data_ref {
+ /* For EXTENT_DATA_REF */
+
+ /* Root which refers to this data extent */
+ u64 ref_root;
+
+ /* Inode which refers to this data extent */
+ u64 ino;
+
+ /*
+ * file_offset - extent_offset
+ *
+ * file_offset is the key.offset of the EXTENT_DATA key.
+ * extent_offset is btrfs_file_extent_offset() of the EXTENT_DATA data.
+ */
+ u64 offset;
+};
+
+struct btrfs_tree_ref {
+ /*
+ * Level of this tree block
+ *
+ * Shared for skinny (TREE_BLOCK_REF) and normal tree ref.
+ */
+ int level;
+
+ /*
+ * Root which refers to this tree block.
+ *
+ * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
+ */
+ u64 root;
+
+ /* For non-skinny metadata, no special member needed */
+};
+
+struct btrfs_ref {
+ enum btrfs_ref_type type;
+ int action;
+
+ /*
+ * Whether this extent should go through qgroup record.
+ *
+ * Normally false, but for certain cases like delayed subtree scan,
+ * setting this flag can hugely reduce qgroup overhead.
+ */
+ bool skip_qgroup;
+
+ /*
+ * Optional. For which root is this modification.
+ * Mostly used for qgroup optimization.
+ *
+ * When unset, data/tree ref init code will populate it.
+ * In certain cases, we're modifying reference for a different root.
+ * E.g. COW fs tree blocks for balance.
+ * In that case, tree_ref::root will be fs tree, but we're doing this
+ * for reloc tree, then we should set @real_root to reloc tree.
+ */
+ u64 real_root;
+ u64 bytenr;
+ u64 len;
+
+ /* Bytenr of the parent tree block */
+ u64 parent;
+ union {
+ struct btrfs_data_ref data_ref;
+ struct btrfs_tree_ref tree_ref;
+ };
+};
+
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
@@ -197,6 +273,38 @@ extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
int btrfs_delayed_ref_init(void);
void btrfs_delayed_ref_exit(void);
+static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
+ int action, u64 bytenr, u64 len, u64 parent)
+{
+ generic_ref->action = action;
+ generic_ref->bytenr = bytenr;
+ generic_ref->len = len;
+ generic_ref->parent = parent;
+}
+
+static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
+ int level, u64 root)
+{
+ /* If @real_root not set, use @root as fallback */
+ if (!generic_ref->real_root)
+ generic_ref->real_root = root;
+ generic_ref->tree_ref.level = level;
+ generic_ref->tree_ref.root = root;
+ generic_ref->type = BTRFS_REF_METADATA;
+}
+
+static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
+ u64 ref_root, u64 ino, u64 offset)
+{
+ /* If @real_root not set, use @root as fallback */
+ if (!generic_ref->real_root)
+ generic_ref->real_root = ref_root;
+ generic_ref->data_ref.ref_root = ref_root;
+ generic_ref->data_ref.ino = ino;
+ generic_ref->data_ref.offset = offset;
+ generic_ref->type = BTRFS_REF_DATA;
+}
+
static inline struct btrfs_delayed_extent_op *
btrfs_alloc_delayed_extent_op(void)
{
@@ -224,15 +332,18 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
case BTRFS_SHARED_DATA_REF_KEY:
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
break;
- case 0:
- kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
- break;
default:
BUG();
}
}
}
+static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head)
+{
+ if (refcount_dec_and_test(&head->refs))
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head);
+}
+
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent,
@@ -273,35 +384,17 @@ int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
u64 seq);
/*
- * a node might live in a head or a regular ref, this lets you
- * test for the proper type to use.
- */
-static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
-{
- return node->is_head;
-}
-
-/*
* helper functions to cast a node into its container
*/
static inline struct btrfs_delayed_tree_ref *
btrfs_delayed_node_to_tree_ref(struct btrfs_delayed_ref_node *node)
{
- WARN_ON(btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_tree_ref, node);
}
static inline struct btrfs_delayed_data_ref *
btrfs_delayed_node_to_data_ref(struct btrfs_delayed_ref_node *node)
{
- WARN_ON(btrfs_delayed_ref_is_head(node));
return container_of(node, struct btrfs_delayed_data_ref, node);
}
-
-static inline struct btrfs_delayed_ref_head *
-btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
-{
- WARN_ON(!btrfs_delayed_ref_is_head(node));
- return container_of(node, struct btrfs_delayed_ref_head, node);
-}
#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index d64f92cbade2..a82243503743 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4269,26 +4269,28 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
struct btrfs_delayed_ref_head *head;
- struct btrfs_delayed_ref_node *tmp;
+ struct rb_node *n;
bool pin_bytes = false;
head = rb_entry(node, struct btrfs_delayed_ref_head,
href_node);
if (!mutex_trylock(&head->mutex)) {
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
spin_lock(&delayed_refs->lock);
continue;
}
spin_lock(&head->lock);
- list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
- list) {
+ while ((n = rb_first(&head->ref_tree)) != NULL) {
+ ref = rb_entry(n, struct btrfs_delayed_ref_node,
+ ref_node);
ref->in_tree = 0;
- list_del(&ref->list);
+ rb_erase(&ref->ref_node, &head->ref_tree);
+ RB_CLEAR_NODE(&ref->ref_node);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
atomic_dec(&delayed_refs->num_entries);
@@ -4301,16 +4303,16 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
if (head->processing == 0)
delayed_refs->num_heads_ready--;
atomic_dec(&delayed_refs->num_entries);
- head->node.in_tree = 0;
rb_erase(&head->href_node, &delayed_refs->href_root);
+ RB_CLEAR_NODE(&head->href_node);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
mutex_unlock(&head->mutex);
if (pin_bytes)
- btrfs_pin_extent(fs_info, head->node.bytenr,
- head->node.num_bytes, 1);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_pin_extent(fs_info, head->bytenr,
+ head->num_bytes, 1);
+ btrfs_put_delayed_ref_head(head);
cond_resched();
spin_lock(&delayed_refs->lock);
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 64336bbeb998..e7498a453092 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -925,7 +925,7 @@ search_again:
head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
if (head) {
if (!mutex_trylock(&head->mutex)) {
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
@@ -936,7 +936,7 @@ search_again:
*/
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
goto search_again;
}
spin_lock(&head->lock);
@@ -945,7 +945,7 @@ search_again:
else
BUG_ON(num_refs == 0);
- num_refs += head->node.ref_mod;
+ num_refs += head->ref_mod;
spin_unlock(&head->lock);
mutex_unlock(&head->mutex);
}
@@ -2266,7 +2266,7 @@ static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
- struct btrfs_delayed_ref_node *node,
+ struct btrfs_delayed_ref_head *head,
struct btrfs_delayed_extent_op *extent_op)
{
struct btrfs_key key;
@@ -2288,14 +2288,14 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- key.objectid = node->bytenr;
+ key.objectid = head->bytenr;
if (metadata) {
key.type = BTRFS_METADATA_ITEM_KEY;
key.offset = extent_op->level;
} else {
key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = node->num_bytes;
+ key.offset = head->num_bytes;
}
again:
@@ -2312,17 +2312,17 @@ again:
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
path->slots[0]);
- if (key.objectid == node->bytenr &&
+ if (key.objectid == head->bytenr &&
key.type == BTRFS_EXTENT_ITEM_KEY &&
- key.offset == node->num_bytes)
+ key.offset == head->num_bytes)
ret = 0;
}
if (ret > 0) {
btrfs_release_path(path);
metadata = 0;
- key.objectid = node->bytenr;
- key.offset = node->num_bytes;
+ key.objectid = head->bytenr;
+ key.offset = head->num_bytes;
key.type = BTRFS_EXTENT_ITEM_KEY;
goto again;
}
@@ -2429,47 +2429,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
return 0;
}
- if (btrfs_delayed_ref_is_head(node)) {
- struct btrfs_delayed_ref_head *head;
- /*
- * we've hit the end of the chain and we were supposed
- * to insert this extent into the tree. But, it got
- * deleted before we ever needed to insert it, so all
- * we have to do is clean up the accounting
- */
- BUG_ON(extent_op);
- head = btrfs_delayed_node_to_head(node);
- trace_run_delayed_ref_head(fs_info, node, head, node->action);
-
- if (head->total_ref_mod < 0) {
- struct btrfs_space_info *space_info;
- u64 flags;
-
- if (head->is_data)
- flags = BTRFS_BLOCK_GROUP_DATA;
- else if (head->is_system)
- flags = BTRFS_BLOCK_GROUP_SYSTEM;
- else
- flags = BTRFS_BLOCK_GROUP_METADATA;
- space_info = __find_space_info(fs_info, flags);
- ASSERT(space_info);
- percpu_counter_add(&space_info->total_bytes_pinned,
- -node->num_bytes);
- }
-
- if (insert_reserved) {
- btrfs_pin_extent(fs_info, node->bytenr,
- node->num_bytes, 1);
- if (head->is_data) {
- ret = btrfs_del_csums(trans, fs_info,
- node->bytenr,
- node->num_bytes);
- }
- }
-
- return ret;
- }
-
if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
node->type == BTRFS_SHARED_BLOCK_REF_KEY)
ret = run_delayed_tree_ref(trans, fs_info, node, extent_op,
@@ -2488,7 +2447,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_ref_node *ref;
- if (list_empty(&head->ref_list))
+ if (RB_EMPTY_ROOT(&head->ref_tree))
return NULL;
/*
@@ -2501,12 +2460,116 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
return list_first_entry(&head->ref_add_list,
struct btrfs_delayed_ref_node, add_list);
- ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
- list);
+ ref = rb_entry(rb_first(&head->ref_tree),
+ struct btrfs_delayed_ref_node, ref_node);
ASSERT(list_empty(&ref->add_list));
return ref;
}
+static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
+{
+ spin_lock(&delayed_refs->lock);
+ head->processing = 0;
+ delayed_refs->num_heads_ready++;
+ spin_unlock(&delayed_refs->lock);
+ btrfs_delayed_ref_unlock(head);
+}
+
+static int cleanup_extent_op(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_delayed_extent_op *extent_op = head->extent_op;
+ int ret;
+
+ if (!extent_op)
+ return 0;
+ head->extent_op = NULL;
+ if (head->must_insert_reserved) {
+ btrfs_free_delayed_extent_op(extent_op);
+ return 0;
+ }
+ spin_unlock(&head->lock);
+ ret = run_delayed_extent_op(trans, fs_info, head, extent_op);
+ btrfs_free_delayed_extent_op(extent_op);
+ return ret ? ret : 1;
+}
+
+static int cleanup_ref_head(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_delayed_ref_root *delayed_refs;
+ int ret;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+
+ ret = cleanup_extent_op(trans, fs_info, head);
+ if (ret < 0) {
+ unselect_delayed_ref_head(delayed_refs, head);
+ btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
+ return ret;
+ } else if (ret) {
+ return ret;
+ }
+
+ /*
+ * Need to drop our head ref lock and re-acquire the delayed ref lock
+ * and then re-check to make sure nobody got added.
+ */
+ spin_unlock(&head->lock);
+ spin_lock(&delayed_refs->lock);
+ spin_lock(&head->lock);
+ if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
+ spin_unlock(&head->lock);
+ spin_unlock(&delayed_refs->lock);
+ return 1;
+ }
+ delayed_refs->num_heads--;
+ rb_erase(&head->href_node, &delayed_refs->href_root);
+ RB_CLEAR_NODE(&head->href_node);
+ spin_unlock(&delayed_refs->lock);
+ spin_unlock(&head->lock);
+ atomic_dec(&delayed_refs->num_entries);
+
+ trace_run_delayed_ref_head(fs_info, head, 0);
+
+ if (head->total_ref_mod < 0) {
+ struct btrfs_space_info *space_info;
+ u64 flags;
+
+ if (head->is_data)
+ flags = BTRFS_BLOCK_GROUP_DATA;
+ else if (head->is_system)
+ flags = BTRFS_BLOCK_GROUP_SYSTEM;
+ else
+ flags = BTRFS_BLOCK_GROUP_METADATA;
+ space_info = __find_space_info(fs_info, flags);
+ ASSERT(space_info);
+ percpu_counter_add(&space_info->total_bytes_pinned,
+ -head->num_bytes);
+ if (head->is_data) {
+ spin_lock(&delayed_refs->lock);
+ delayed_refs->pending_csums -= head->num_bytes;
+ spin_unlock(&delayed_refs->lock);
+ }
+ }
+
+ if (head->must_insert_reserved) {
+ btrfs_pin_extent(fs_info, head->bytenr,
+ head->num_bytes, 1);
+ if (head->is_data) {
+ ret = btrfs_del_csums(trans, fs_info, head->bytenr,
+ head->num_bytes);
+ }
+ }
+
+ btrfs_delayed_ref_unlock(head);
+ btrfs_put_delayed_ref_head(head);
+ return 0;
+}
+
/*
* Returns 0 on success or if called with an already aborted transaction.
* Returns -ENOMEM or -EIO on failure and will abort the transaction.
@@ -2580,11 +2643,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (ref && ref->seq &&
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
spin_unlock(&locked_ref->lock);
- spin_lock(&delayed_refs->lock);
- locked_ref->processing = 0;
- delayed_refs->num_heads_ready++;
- spin_unlock(&delayed_refs->lock);
- btrfs_delayed_ref_unlock(locked_ref);
+ unselect_delayed_ref_head(delayed_refs, locked_ref);
locked_ref = NULL;
cond_resched();
count++;
@@ -2592,102 +2651,55 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
}
/*
- * record the must insert reserved flag before we
- * drop the spin lock.
+ * We're done processing refs in this ref_head, clean everything
+ * up and move on to the next ref_head.
*/
- must_insert_reserved = locked_ref->must_insert_reserved;
- locked_ref->must_insert_reserved = 0;
-
- extent_op = locked_ref->extent_op;
- locked_ref->extent_op = NULL;
-
if (!ref) {
-
-
- /* All delayed refs have been processed, Go ahead
- * and send the head node to run_one_delayed_ref,
- * so that any accounting fixes can happen
- */
- ref = &locked_ref->node;
-
- if (extent_op && must_insert_reserved) {
- btrfs_free_delayed_extent_op(extent_op);
- extent_op = NULL;
- }
-
- if (extent_op) {
- spin_unlock(&locked_ref->lock);
- ret = run_delayed_extent_op(trans, fs_info,
- ref, extent_op);
- btrfs_free_delayed_extent_op(extent_op);
-
- if (ret) {
- /*
- * Need to reset must_insert_reserved if
- * there was an error so the abort stuff
- * can cleanup the reserved space
- * properly.
- */
- if (must_insert_reserved)
- locked_ref->must_insert_reserved = 1;
- spin_lock(&delayed_refs->lock);
- locked_ref->processing = 0;
- delayed_refs->num_heads_ready++;
- spin_unlock(&delayed_refs->lock);
- btrfs_debug(fs_info,
- "run_delayed_extent_op returned %d",
- ret);
- btrfs_delayed_ref_unlock(locked_ref);
- return ret;
- }
+ ret = cleanup_ref_head(trans, fs_info, locked_ref);
+ if (ret > 0 ) {
+ /* We dropped our lock, we need to loop. */
+ ret = 0;
continue;
+ } else if (ret) {
+ return ret;
}
+ locked_ref = NULL;
+ count++;
+ continue;
+ }
- /*
- * Need to drop our head ref lock and re-acquire the
- * delayed ref lock and then re-check to make sure
- * nobody got added.
- */
- spin_unlock(&locked_ref->lock);
- spin_lock(&delayed_refs->lock);
- spin_lock(&locked_ref->lock);
- if (!list_empty(&locked_ref->ref_list) ||
- locked_ref->extent_op) {
- spin_unlock(&locked_ref->lock);
- spin_unlock(&delayed_refs->lock);
- continue;
- }
- ref->in_tree = 0;
- delayed_refs->num_heads--;
- rb_erase(&locked_ref->href_node,
- &delayed_refs->href_root);
- spin_unlock(&delayed_refs->lock);
- } else {
- actual_count++;
- ref->in_tree = 0;
- list_del(&ref->list);
- if (!list_empty(&ref->add_list))
- list_del(&ref->add_list);
+ actual_count++;
+ ref->in_tree = 0;
+ rb_erase(&ref->ref_node, &locked_ref->ref_tree);
+ RB_CLEAR_NODE(&ref->ref_node);
+ if (!list_empty(&ref->add_list))
+ list_del(&ref->add_list);
+ /*
+ * When we play the delayed ref, also correct the ref_mod on
+ * head
+ */
+ switch (ref->action) {
+ case BTRFS_ADD_DELAYED_REF:
+ case BTRFS_ADD_DELAYED_EXTENT:
+ locked_ref->ref_mod -= ref->ref_mod;
+ break;
+ case BTRFS_DROP_DELAYED_REF:
+ locked_ref->ref_mod += ref->ref_mod;
+ break;
+ default:
+ WARN_ON(1);
}
atomic_dec(&delayed_refs->num_entries);
- if (!btrfs_delayed_ref_is_head(ref)) {
- /*
- * when we play the delayed ref, also correct the
- * ref_mod on head
- */
- switch (ref->action) {
- case BTRFS_ADD_DELAYED_REF:
- case BTRFS_ADD_DELAYED_EXTENT:
- locked_ref->node.ref_mod -= ref->ref_mod;
- break;
- case BTRFS_DROP_DELAYED_REF:
- locked_ref->node.ref_mod += ref->ref_mod;
- break;
- default:
- WARN_ON(1);
- }
- }
+ /*
+ * Record the must-insert_reserved flag before we drop the spin
+ * lock.
+ */
+ must_insert_reserved = locked_ref->must_insert_reserved;
+ locked_ref->must_insert_reserved = 0;
+
+ extent_op = locked_ref->extent_op;
+ locked_ref->extent_op = NULL;
spin_unlock(&locked_ref->lock);
ret = run_one_delayed_ref(trans, fs_info, ref, extent_op,
@@ -2695,33 +2707,13 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
btrfs_free_delayed_extent_op(extent_op);
if (ret) {
- spin_lock(&delayed_refs->lock);
- locked_ref->processing = 0;
- delayed_refs->num_heads_ready++;
- spin_unlock(&delayed_refs->lock);
- btrfs_delayed_ref_unlock(locked_ref);
+ unselect_delayed_ref_head(delayed_refs, locked_ref);
btrfs_put_delayed_ref(ref);
btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
ret);
return ret;
}
- /*
- * If this node is a head, that means all the refs in this head
- * have been dealt with, and we will pick the next head to deal
- * with, so we must unlock the head and drop it from the cluster
- * list before we release it.
- */
- if (btrfs_delayed_ref_is_head(ref)) {
- if (locked_ref->is_data &&
- locked_ref->total_ref_mod < 0) {
- spin_lock(&delayed_refs->lock);
- delayed_refs->pending_csums -= ref->num_bytes;
- spin_unlock(&delayed_refs->lock);
- }
- btrfs_delayed_ref_unlock(locked_ref);
- locked_ref = NULL;
- }
btrfs_put_delayed_ref(ref);
count++;
cond_resched();
@@ -3023,33 +3015,16 @@ again:
spin_unlock(&delayed_refs->lock);
goto out;
}
+ head = rb_entry(node, struct btrfs_delayed_ref_head,
+ href_node);
+ refcount_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
- while (node) {
- head = rb_entry(node, struct btrfs_delayed_ref_head,
- href_node);
- if (btrfs_delayed_ref_is_head(&head->node)) {
- struct btrfs_delayed_ref_node *ref;
-
- ref = &head->node;
- refcount_inc(&ref->refs);
-
- spin_unlock(&delayed_refs->lock);
- /*
- * Mutex was contended, block until it's
- * released and try again
- */
- mutex_lock(&head->mutex);
- mutex_unlock(&head->mutex);
+ /* Mutex was contended, block until it's released and retry. */
+ mutex_lock(&head->mutex);
+ mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(ref);
- cond_resched();
- goto again;
- } else {
- WARN_ON(1);
- }
- node = rb_next(node);
- }
- spin_unlock(&delayed_refs->lock);
+ btrfs_put_delayed_ref_head(head);
cond_resched();
goto again;
}
@@ -3091,6 +3066,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
struct btrfs_delayed_data_ref *data_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_transaction *cur_trans;
+ struct rb_node *node;
int ret = 0;
spin_lock(&root->fs_info->trans_lock);
@@ -3111,7 +3087,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
}
if (!mutex_trylock(&head->mutex)) {
- refcount_inc(&head->node.refs);
+ refcount_inc(&head->refs);
spin_unlock(&delayed_refs->lock);
btrfs_release_path(path);
@@ -3122,14 +3098,19 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
*/
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
btrfs_put_transaction(cur_trans);
return -EAGAIN;
}
spin_unlock(&delayed_refs->lock);
spin_lock(&head->lock);
- list_for_each_entry(ref, &head->ref_list, list) {
+ /*
+ * XXX: We should replace this with a proper search function in the
+ * future.
+ */
+ for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+ ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
/* If it's a shared ref we know a cross reference exists */
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
ret = 1;
@@ -3277,10 +3258,6 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
int i;
int level;
int ret = 0;
- int (*process_func)(struct btrfs_trans_handle *,
- struct btrfs_fs_info *,
- u64, u64, u64, u64, u64, u64);
-
if (btrfs_is_testing(fs_info))
return 0;
@@ -3292,11 +3269,6 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
return 0;
- if (inc)
- process_func = btrfs_inc_extent_ref;
- else
- process_func = btrfs_free_extent;
-
if (full_backref)
parent = buf->start;
else
@@ -3318,16 +3290,27 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
key.offset -= btrfs_file_extent_offset(buf, fi);
- ret = process_func(trans, fs_info, bytenr, num_bytes,
- parent, ref_root, key.objectid,
- key.offset);
+ if (inc)
+ ret = btrfs_inc_extent_ref(trans, fs_info, bytenr,
+ num_bytes, parent, ref_root,
+ key.objectid, key.offset);
+ else
+ ret = btrfs_free_extent(trans, fs_info, bytenr,
+ num_bytes, parent, ref_root,
+ key.objectid, key.offset);
if (ret)
goto fail;
} else {
bytenr = btrfs_node_blockptr(buf, i);
num_bytes = fs_info->nodesize;
- ret = process_func(trans, fs_info, bytenr, num_bytes,
- parent, ref_root, level - 1, 0);
+ if (inc)
+ ret = btrfs_inc_extent_ref(trans, fs_info, bytenr,
+ num_bytes, parent, ref_root,
+ level - 1, 0);
+ else
+ ret = btrfs_free_extent(trans, fs_info, bytenr,
+ num_bytes, parent, ref_root,
+ level - 1, 0);
if (ret)
goto fail;
}
@@ -7087,7 +7070,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
goto out_delayed_unlock;
spin_lock(&head->lock);
- if (!list_empty(&head->ref_list))
+ if (!RB_EMPTY_ROOT(&head->ref_tree))
goto out;
if (head->extent_op) {
@@ -7108,9 +7091,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
* at this point we have a head with no other entries. Go
* ahead and process it.
*/
- head->node.in_tree = 0;
rb_erase(&head->href_node, &delayed_refs->href_root);
-
+ RB_CLEAR_NODE(&head->href_node);
atomic_dec(&delayed_refs->num_entries);
/*
@@ -7129,7 +7111,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
ret = 1;
mutex_unlock(&head->mutex);
- btrfs_put_delayed_ref(&head->node);
+ btrfs_put_delayed_ref_head(head);
return ret;
out:
spin_unlock(&head->lock);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 2b4fcb1570a2..4d4bbc95eda0 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1661,6 +1661,12 @@ void task_dump_owner(struct task_struct *task, mode_t mode,
kuid_t uid;
kgid_t gid;
+ if (unlikely(task->flags & PF_KTHREAD)) {
+ *ruid = GLOBAL_ROOT_UID;
+ *rgid = GLOBAL_ROOT_GID;
+ return;
+ }
+
/* Default to the tasks effective ownership */
rcu_read_lock();
cred = __task_cred(task);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 82e6787d976e..08bf08a52e80 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -208,25 +208,34 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
{
struct list_head *head = (struct list_head *)arg;
struct kcore_list *ent;
+ struct page *p;
+
+ if (!pfn_valid(pfn))
+ return 1;
+
+ p = pfn_to_page(pfn);
+ if (!memmap_valid_within(pfn, p, page_zone(p)))
+ return 1;
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return -ENOMEM;
- ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
+ ent->addr = (unsigned long)page_to_virt(p);
ent->size = nr_pages << PAGE_SHIFT;
- /* Sanity check: Can happen in 32bit arch...maybe */
- if (ent->addr < (unsigned long) __va(0))
+ if (!virt_addr_valid(ent->addr))
goto free_out;
/* cut not-mapped area. ....from ppc-32 code. */
if (ULONG_MAX - ent->addr < ent->size)
ent->size = ULONG_MAX - ent->addr;
- /* cut when vmalloc() area is higher than direct-map area */
- if (VMALLOC_START > (unsigned long)__va(0)) {
- if (ent->addr > VMALLOC_START)
- goto free_out;
+ /*
+ * We've already checked virt_addr_valid so we know this address
+ * is a valid pointer, therefore we can check against it to determine
+ * if we need to trim
+ */
+ if (VMALLOC_START > ent->addr) {
if (VMALLOC_START - ent->addr < ent->size)
ent->size = VMALLOC_START - ent->addr;
}
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 229eef0da61e..c4d2daa2e2da 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -1,28 +1,9 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
+#include <linux/bits.h>
-#ifdef __KERNEL__
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
- (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
- (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index cf5b0d9c4483..9bd30749d04f 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -56,6 +56,8 @@ extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 234545fb4e66..7cd8ae671c06 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -145,8 +145,15 @@ struct hd_struct {
enum {
DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
+ /* Poll even if events_poll_msecs is unset */
+ DISK_EVENT_FLAG_POLL = 1 << 16,
+ /* Forward events to udev */
+ DISK_EVENT_FLAG_UEVENT = 1 << 17,
};
+#define DISK_EVENT_TYPES_MASK \
+ (DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST)
+
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 939b6bb5c0a7..e01ac6e19ab8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -60,8 +60,8 @@
#define u64_to_user_ptr(x) ( \
{ \
- typecheck(u64, x); \
- (void __user *)(uintptr_t)x; \
+ typecheck(u64, (x)); \
+ (void __user *)(uintptr_t)(x); \
} \
)
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
index b8686c00f15f..fef4b081b736 100644
--- a/include/linux/platform_data/elm.h
+++ b/include/linux/platform_data/elm.h
@@ -60,6 +60,6 @@ static inline int elm_config(struct device *dev, enum bch_ecc bch_type,
{
return -ENOSYS;
}
-#endif /* CONFIG_MTD_NAND_ECC_BCH */
+#endif /* CONFIG_MTD_NAND_OMAP_BCH */
#endif /* __ELM_H */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 08fad7c6a471..5c2e26a354bf 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -638,7 +638,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
#ifdef CONFIG_PWM_SYSFS
void pwmchip_sysfs_export(struct pwm_chip *chip);
void pwmchip_sysfs_unexport(struct pwm_chip *chip);
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
#else
static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
{
@@ -647,10 +646,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
}
-
-static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
-}
#endif /* CONFIG_PWM_SYSFS */
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 270cfa81830e..d502e15e4e8e 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -62,7 +62,7 @@ struct virtqueue;
/*
* Creates a virtqueue and allocates the descriptor ring. If
* may_reduce_num is set, then this may allocate a smaller ring than
- * expected. The caller should query virtqueue_get_ring_size to learn
+ * expected. The caller should query virtqueue_get_vring_size to learn
* the actual size of the ring.
*/
struct virtqueue *vring_create_virtqueue(unsigned int index,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 95ccc1eef558..c9a60089b7e5 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -178,6 +178,9 @@ struct adv_info {
#define HCI_MAX_SHORT_NAME_LENGTH 10
+/* Min encryption key size to match with SMP */
+#define HCI_MIN_ENC_KEY_SIZE 7
+
/* Default LE RPA expiry time, 15 minutes */
#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
diff --git a/include/sound/core.h b/include/sound/core.h
index f7d8c10c4c45..9c2e4b514c64 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -241,7 +241,6 @@ int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size
/* init.c */
-extern struct snd_card *snd_cards[SNDRV_CARDS];
int snd_card_locked(int card);
#if defined(CONFIG_SND_MIXER_OSS) || defined(CONFIG_SND_MIXER_OSS_MODULE)
#define SND_MIXER_OSS_NOTIFY_REGISTER 0
@@ -265,7 +264,20 @@ int snd_card_add_dev_attr(struct snd_card *card,
int snd_component_add(struct snd_card *card, const char *component);
int snd_card_file_add(struct snd_card *card, struct file *file);
int snd_card_file_remove(struct snd_card *card, struct file *file);
-#define snd_card_unref(card) put_device(&(card)->card_dev)
+
+struct snd_card *snd_card_ref(int card);
+
+/**
+ * snd_card_unref - Unreference the card object
+ * @card: the card object to unreference
+ *
+ * Call this function for the card object that was obtained via snd_card_ref()
+ * or snd_lookup_minor_data().
+ */
+static inline void snd_card_unref(struct snd_card *card)
+{
+ put_device(&card->card_dev);
+}
#define snd_card_set_dev(card, devptr) ((card)->dev = (devptr))
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index 4b9ee3009aa0..e43d04a1d663 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -73,7 +73,10 @@ __printf(3, 4)
int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
const char *name_fmt, ...);
int snd_seq_delete_kernel_client(int client);
-int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev, int atomic, int hop);
+/* XXX kABI compatibility for SLE15 XXX */
+#define snd_seq_kernel_client_enqueue __snd_seq_kernel_client_enqueue
+int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev,
+ struct file *file, bool blocking);
int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event *ev, int atomic, int hop);
int snd_seq_kernel_client_ctl(int client, unsigned int cmd, void *arg);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index aad5303e8d58..1387a243794d 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -797,11 +797,10 @@ DEFINE_EVENT(btrfs_delayed_data_ref, run_delayed_data_ref,
DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
const struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(fs_info, ref, head_ref, action),
+ TP_ARGS(fs_info, head_ref, action),
TP_STRUCT__entry_btrfs(
__field( u64, bytenr )
@@ -811,8 +810,8 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
),
TP_fast_assign_btrfs(fs_info,
- __entry->bytenr = ref->bytenr;
- __entry->num_bytes = ref->num_bytes;
+ __entry->bytenr = head_ref->bytenr;
+ __entry->num_bytes = head_ref->num_bytes;
__entry->action = action;
__entry->is_data = head_ref->is_data;
),
@@ -827,21 +826,19 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
DEFINE_EVENT(btrfs_delayed_ref_head, add_delayed_ref_head,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
const struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(fs_info, ref, head_ref, action)
+ TP_ARGS(fs_info, head_ref, action)
);
DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_delayed_ref_node *ref,
const struct btrfs_delayed_ref_head *head_ref,
int action),
- TP_ARGS(fs_info, ref, head_ref, action)
+ TP_ARGS(fs_info, head_ref, action)
);
#define show_chunk_type(type) \
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 316d2f3596a1..34d2b08233eb 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -900,7 +900,7 @@ struct drm_i915_gem_execbuffer2 {
* struct drm_i915_gem_exec_fence *fences.
*/
__u64 cliprects_ptr;
-#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index a9ae498993c3..d0c04b0a03ae 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -125,6 +125,7 @@ static int zero;
static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static int __maybe_unused four = 4;
+static unsigned long zero_ul;
static unsigned long one_ul = 1;
static unsigned long long_max = LONG_MAX;
static int one_hundred = 100;
@@ -1707,7 +1708,7 @@ static struct ctl_table fs_table[] = {
.maxlen = sizeof(files_stat.max_files),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
- .extra1 = &zero,
+ .extra1 = &zero_ul,
.extra2 = &long_max,
},
{
@@ -2567,7 +2568,16 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
{
struct do_proc_dointvec_minmax_conv_param *param = data;
if (write) {
- int val = *negp ? -*lvalp : *lvalp;
+ int val;
+ if (*negp) {
+ if (*lvalp > (unsigned long) INT_MAX + 1)
+ return -EINVAL;
+ val = -*lvalp;
+ } else {
+ if (*lvalp > (unsigned long) INT_MAX)
+ return -EINVAL;
+ val = *lvalp;
+ }
if ((param->min && *param->min > val) ||
(param->max && *param->max < val))
return -EINVAL;
@@ -2745,6 +2755,8 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
bool neg;
left -= proc_skip_spaces(&p);
+ if (!left)
+ break;
err = proc_get_long(&p, &left, &val, &neg,
proc_wspace_sep,
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index dc59eae54717..c02864a9ef24 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1152,6 +1152,14 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
return 0;
+ /* The minimum encryption key size needs to be enforced by the
+ * host stack before establishing any L2CAP connections. The
+ * specification in theory allows a minimum of 1, but to align
+ * BR/EDR and LE transports, a minimum of 7 is chosen.
+ */
+ if (conn->enc_key_size < HCI_MIN_ENC_KEY_SIZE)
+ return 0;
+
return 1;
}
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 008ba439bd62..cc80c76177b6 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -76,6 +76,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
sockfd_put(csock);
return err;
}
+ ca.name[sizeof(ca.name)-1] = 0;
err = hidp_connection_add(&ca, csock, isock);
if (!err && copy_to_user(argp, &ca, sizeof(ca)))
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index b598c81aef69..9bee2f9ca11e 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -509,7 +509,7 @@ static void rds_tcp_kill_sock(struct net *net)
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
- if (net != c_net || !tc->t_sock)
+ if (net != c_net)
continue;
if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
list_move_tail(&tc->t_tcp_node, &tmp_list);
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index fdb294441682..2ff751eba037 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
{
struct virtio_vsock *vsock = virtio_vsock_get();
+ if (!vsock)
+ return VMADDR_CID_ANY;
+
return vsock->guest_cid;
}
@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
virtio_vsock_update_guest_cid(vsock);
- ret = vsock_core_init(&virtio_transport.transport);
- if (ret < 0)
- goto out_vqs;
-
vsock->rx_buf_nr = 0;
vsock->rx_buf_max_nr = 0;
atomic_set(&vsock->queued_replies, 0);
@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
mutex_unlock(&the_virtio_vsock_mutex);
return 0;
-out_vqs:
- vsock->vdev->config->del_vqs(vsock->vdev);
out:
kfree(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
flush_work(&vsock->event_work);
flush_work(&vsock->send_pkt_work);
+ /* Reset all connected sockets when the device disappear */
+ vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+
vdev->config->reset(vdev);
mutex_lock(&vsock->rx_lock);
@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
mutex_lock(&the_virtio_vsock_mutex);
the_virtio_vsock = NULL;
- vsock_core_exit();
mutex_unlock(&the_virtio_vsock_mutex);
vdev->config->del_vqs(vdev);
@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
if (!virtio_vsock_workqueue)
return -ENOMEM;
+
ret = register_virtio_driver(&virtio_vsock_driver);
if (ret)
- destroy_workqueue(virtio_vsock_workqueue);
+ goto out_wq;
+
+ ret = vsock_core_init(&virtio_transport.transport);
+ if (ret)
+ goto out_vdr;
+
+ return 0;
+
+out_vdr:
+ unregister_virtio_driver(&virtio_vsock_driver);
+out_wq:
+ destroy_workqueue(virtio_vsock_workqueue);
return ret;
+
}
static void __exit virtio_vsock_exit(void)
{
+ vsock_core_exit();
unregister_virtio_driver(&virtio_vsock_driver);
destroy_workqueue(virtio_vsock_workqueue);
}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 7d6ee03f2762..8e5b557521cb 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -663,6 +663,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
*/
static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
{
+ const struct virtio_transport *t;
+ struct virtio_vsock_pkt *reply;
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
.type = le16_to_cpu(pkt->hdr.type),
@@ -673,15 +675,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
return 0;
- pkt = virtio_transport_alloc_pkt(&info, 0,
- le64_to_cpu(pkt->hdr.dst_cid),
- le32_to_cpu(pkt->hdr.dst_port),
- le64_to_cpu(pkt->hdr.src_cid),
- le32_to_cpu(pkt->hdr.src_port));
- if (!pkt)
+ reply = virtio_transport_alloc_pkt(&info, 0,
+ le64_to_cpu(pkt->hdr.dst_cid),
+ le32_to_cpu(pkt->hdr.dst_port),
+ le64_to_cpu(pkt->hdr.src_cid),
+ le32_to_cpu(pkt->hdr.src_port));
+ if (!reply)
return -ENOMEM;
- return virtio_transport_get_ops()->send_pkt(pkt);
+ t = virtio_transport_get_ops();
+ if (!t) {
+ virtio_transport_free_pkt(reply);
+ return -ENOTCONN;
+ }
+
+ return t->send_pkt(reply);
}
static void virtio_transport_wait_close(struct sock *sk, long timeout)
diff --git a/sound/core/init.c b/sound/core/init.c
index 430efc8424a5..2298f1c30e47 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -50,7 +50,7 @@ static const struct file_operations snd_shutdown_f_ops;
/* locked for registering/using */
static DECLARE_BITMAP(snd_cards_lock, SNDRV_CARDS);
struct snd_card *snd_cards[SNDRV_CARDS];
-EXPORT_SYMBOL(snd_cards);
+EXPORT_SYMBOL(snd_cards); /* XXX exported only for SLE15 kABI compatibility */
static DEFINE_MUTEX(snd_card_mutex);
@@ -294,6 +294,26 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
}
EXPORT_SYMBOL(snd_card_new);
+/**
+ * snd_card_ref - Get the card object from the index
+ * @idx: the card index
+ *
+ * Returns a card object corresponding to the given index or NULL if not found.
+ * Release the object via snd_card_unref().
+ */
+struct snd_card *snd_card_ref(int idx)
+{
+ struct snd_card *card;
+
+ mutex_lock(&snd_card_mutex);
+ card = snd_cards[idx];
+ if (card)
+ get_device(&card->card_dev);
+ mutex_unlock(&snd_card_mutex);
+ return card;
+}
+EXPORT_SYMBOL_GPL(snd_card_ref);
+
/* return non-zero if a card is already locked */
int snd_card_locked(int card)
{
diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c
index 2ff9c12d664a..ec9409d69c47 100644
--- a/sound/core/oss/mixer_oss.c
+++ b/sound/core/oss/mixer_oss.c
@@ -1402,24 +1402,32 @@ static int snd_mixer_oss_notify_handler(struct snd_card *card, int cmd)
static int __init alsa_mixer_oss_init(void)
{
+ struct snd_card *card;
int idx;
snd_mixer_oss_notify_callback = snd_mixer_oss_notify_handler;
for (idx = 0; idx < SNDRV_CARDS; idx++) {
- if (snd_cards[idx])
- snd_mixer_oss_notify_handler(snd_cards[idx], SND_MIXER_OSS_NOTIFY_REGISTER);
+ card = snd_card_ref(idx);
+ if (card) {
+ snd_mixer_oss_notify_handler(card, SND_MIXER_OSS_NOTIFY_REGISTER);
+ snd_card_unref(card);
+ }
}
return 0;
}
static void __exit alsa_mixer_oss_exit(void)
{
+ struct snd_card *card;
int idx;
snd_mixer_oss_notify_callback = NULL;
for (idx = 0; idx < SNDRV_CARDS; idx++) {
- if (snd_cards[idx])
- snd_mixer_oss_notify_handler(snd_cards[idx], SND_MIXER_OSS_NOTIFY_FREE);
+ card = snd_card_ref(idx);
+ if (card) {
+ snd_mixer_oss_notify_handler(card, SND_MIXER_OSS_NOTIFY_FREE);
+ snd_card_unref(card);
+ }
}
}
diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
index afa007c0cc2d..0afc812fa3c0 100644
--- a/sound/core/seq/oss/seq_oss_device.h
+++ b/sound/core/seq/oss/seq_oss_device.h
@@ -30,6 +30,7 @@
#include <sound/rawmidi.h>
#include <sound/seq_kernel.h>
#include <sound/info.h>
+#include "../seq_clientmgr.h"
/* max. applications */
#define SNDRV_SEQ_OSS_MAX_CLIENTS 16
@@ -150,11 +151,16 @@ snd_seq_oss_dispatch(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, int a
return snd_seq_kernel_client_dispatch(dp->cseq, ev, atomic, hop);
}
-/* ioctl */
+/* ioctl for writeq */
static inline int
snd_seq_oss_control(struct seq_oss_devinfo *dp, unsigned int type, void *arg)
{
- return snd_seq_kernel_client_ctl(dp->cseq, type, arg);
+ int err;
+
+ snd_seq_client_ioctl_lock(dp->cseq);
+ err = snd_seq_kernel_client_ctl(dp->cseq, type, arg);
+ snd_seq_client_ioctl_unlock(dp->cseq);
+ return err;
}
/* fill the addresses in header */
diff --git a/sound/core/seq/oss/seq_oss_rw.c b/sound/core/seq/oss/seq_oss_rw.c
index 6a7b6aceeca9..098d4c969dd5 100644
--- a/sound/core/seq/oss/seq_oss_rw.c
+++ b/sound/core/seq/oss/seq_oss_rw.c
@@ -180,14 +180,11 @@ insert_queue(struct seq_oss_devinfo *dp, union evrec *rec, struct file *opt)
return 0; /* invalid event - no need to insert queue */
event.time.tick = snd_seq_oss_timer_cur_tick(dp->timer);
- if (dp->timer->realtime || !dp->timer->running) {
+ if (dp->timer->realtime || !dp->timer->running)
snd_seq_oss_dispatch(dp, &event, 0, 0);
- } else {
- if (is_nonblock_mode(dp->file_mode))
- rc = snd_seq_kernel_client_enqueue(dp->cseq, &event, 0, 0);
- else
- rc = snd_seq_kernel_client_enqueue_blocking(dp->cseq, &event, opt, 0, 0);
- }
+ else
+ rc = snd_seq_kernel_client_enqueue(dp->cseq, &event, opt,
+ !is_nonblock_mode(dp->file_mode));
return rc;
}
diff --git a/sound/core/seq/oss/seq_oss_writeq.c b/sound/core/seq/oss/seq_oss_writeq.c
index 5e04f4df10e4..b2f69617591f 100644
--- a/sound/core/seq/oss/seq_oss_writeq.c
+++ b/sound/core/seq/oss/seq_oss_writeq.c
@@ -116,7 +116,7 @@ snd_seq_oss_writeq_sync(struct seq_oss_writeq *q)
rec->t.code = SEQ_SYNCTIMER;
rec->t.time = time;
q->sync_event_put = 1;
- snd_seq_kernel_client_enqueue_blocking(dp->cseq, &ev, NULL, 0, 0);
+ snd_seq_kernel_client_enqueue(dp->cseq, &ev, NULL, true);
}
wait_event_interruptible_timeout(q->sync_sleep, ! q->sync_event_put, HZ);
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 57547a1e3d5c..9665f87919e9 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -179,6 +179,41 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
return client;
}
+/* Take refcount and perform ioctl_mutex lock on the given client;
+ * used only for OSS sequencer
+ * Unlock via snd_seq_client_ioctl_unlock() below
+ */
+bool snd_seq_client_ioctl_lock(int clientid)
+{
+ struct snd_seq_client *client;
+
+ client = snd_seq_client_use_ptr(clientid);
+ if (!client)
+ return false;
+ mutex_lock(&client->ioctl_mutex);
+ /* The client isn't unrefed here; see snd_seq_client_ioctl_unlock() */
+ return true;
+}
+EXPORT_SYMBOL_GPL(snd_seq_client_ioctl_lock);
+
+/* Unlock and unref the given client; for OSS sequencer use only */
+void snd_seq_client_ioctl_unlock(int clientid)
+{
+ struct snd_seq_client *client;
+
+ client = snd_seq_client_use_ptr(clientid);
+ if (WARN_ON(!client))
+ return;
+ mutex_unlock(&client->ioctl_mutex);
+ /* The doubly unrefs below are intentional; the first one releases the
+ * leftover from snd_seq_client_ioctl_lock() above, and the second one
+ * is for releasing snd_seq_client_use_ptr() in this function
+ */
+ snd_seq_client_unlock(client);
+ snd_seq_client_unlock(client);
+}
+EXPORT_SYMBOL_GPL(snd_seq_client_ioctl_unlock);
+
static void usage_alloc(struct snd_seq_usage *res, int num)
{
res->cur += num;
@@ -2225,12 +2260,13 @@ int snd_seq_delete_kernel_client(int client)
EXPORT_SYMBOL(snd_seq_delete_kernel_client);
-/* skeleton to enqueue event, called from snd_seq_kernel_client_enqueue
- * and snd_seq_kernel_client_enqueue_blocking
+/*
+ * exported, called by kernel clients to enqueue events (w/o blocking)
+ *
+ * RETURN VALUE: zero if succeed, negative if error
*/
-static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
- struct file *file, int blocking,
- int atomic, int hop)
+int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev,
+ struct file *file, bool blocking)
{
struct snd_seq_client *cptr;
int result;
@@ -2253,43 +2289,21 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
if (cptr == NULL)
return -EINVAL;
- if (! cptr->accept_output)
+ if (!cptr->accept_output) {
result = -EPERM;
- else /* send it */
+ } else { /* send it */
+ mutex_lock(&cptr->ioctl_mutex);
result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
- atomic, hop, NULL);
+ false, 0,
+ &cptr->ioctl_mutex);
+ mutex_unlock(&cptr->ioctl_mutex);
+ }
snd_seq_client_unlock(cptr);
return result;
}
-
-/*
- * exported, called by kernel clients to enqueue events (w/o blocking)
- *
- * RETURN VALUE: zero if succeed, negative if error
- */
-int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event * ev,
- int atomic, int hop)
-{
- return kernel_client_enqueue(client, ev, NULL, 0, atomic, hop);
-}
-
EXPORT_SYMBOL(snd_seq_kernel_client_enqueue);
-/*
- * exported, called by kernel clients to enqueue events (with blocking)
- *
- * RETURN VALUE: zero if succeed, negative if error
- */
-int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event * ev,
- struct file *file,
- int atomic, int hop)
-{
- return kernel_client_enqueue(client, ev, file, 1, atomic, hop);
-}
-
-EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking);
-
/*
* exported, called by kernel clients to dispatch events directly to other
* clients, bypassing the queues. Event time-stamp will be updated.
@@ -2543,3 +2557,20 @@ void __exit snd_sequencer_device_done(void)
snd_unregister_device(&seq_dev);
put_device(&seq_dev);
}
+
+/* XXX kABI compatibility for SLE15 XXX */
+#undef snd_seq_kernel_client_enqueue
+int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev,
+ int atomic, int hop)
+{
+ return __snd_seq_kernel_client_enqueue(client, ev, NULL, false);
+}
+EXPORT_SYMBOL(snd_seq_kernel_client_enqueue);
+
+int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event *ev,
+ struct file *file, int atomic,
+ int hop)
+{
+ return __snd_seq_kernel_client_enqueue(client, ev, file, true);
+}
+EXPORT_SYMBOL(snd_seq_kernel_client_enqueue_blocking);
diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h
index 0611e1e0ed5b..28a51dcc0190 100644
--- a/sound/core/seq/seq_clientmgr.h
+++ b/sound/core/seq/seq_clientmgr.h
@@ -93,14 +93,14 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid);
/* dispatch event to client(s) */
int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop);
-/* exported to other modules */
-int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev, int atomic, int hop);
-int snd_seq_kernel_client_enqueue_blocking(int client, struct snd_seq_event * ev,
- struct file *file, int atomic, int hop);
int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait);
int snd_seq_client_notify_subscription(int client, int port,
struct snd_seq_port_subscribe *info, int evtype);
+/* only for OSS sequencer */
+bool snd_seq_client_ioctl_lock(int clientid);
+void snd_seq_client_ioctl_unlock(int clientid);
+
extern int seq_client_load[15];
#endif
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 175f9e4e01c8..ca060056a4f1 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -136,8 +136,11 @@ static struct snd_minor *autoload_device(unsigned int minor)
if (dev == SNDRV_MINOR_CONTROL) {
/* /dev/aloadC? */
int card = SNDRV_MINOR_CARD(minor);
- if (snd_cards[card] == NULL)
+ struct snd_card *ref = snd_card_ref(card);
+ if (!ref)
snd_request_card(card);
+ else
+ snd_card_unref(ref);
} else if (dev == SNDRV_MINOR_GLOBAL) {
/* /dev/aloadSEQ */
snd_request_other(minor);
diff --git a/sound/last.c b/sound/last.c
index 43f222825038..4f5a624ab438 100644
--- a/sound/last.c
+++ b/sound/last.c
@@ -24,14 +24,18 @@
static int __init alsa_sound_last_init(void)
{
+ struct snd_card *card;
int idx, ok = 0;
printk(KERN_INFO "ALSA device list:\n");
- for (idx = 0; idx < SNDRV_CARDS; idx++)
- if (snd_cards[idx] != NULL) {
- printk(KERN_INFO " #%i: %s\n", idx, snd_cards[idx]->longname);
+ for (idx = 0; idx < SNDRV_CARDS; idx++) {
+ card = snd_card_ref(idx);
+ if (card) {
+ printk(KERN_INFO " #%i: %s\n", idx, card->longname);
+ snd_card_unref(card);
ok++;
}
+ }
if (ok == 0)
printk(KERN_INFO " No soundcards found.\n");
return 0;
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 18267de3a269..de2ebaae1355 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1882,22 +1882,8 @@ int snd_emu10k1_create(struct snd_card *card,
c->name, pci->vendor, pci->device,
emu->serial);
- if (!*card->id && c->id) {
- int i, n = 0;
+ if (!*card->id && c->id)
strlcpy(card->id, c->id, sizeof(card->id));
- for (;;) {
- for (i = 0; i < snd_ecards_limit; i++) {
- if (snd_cards[i] && !strcmp(snd_cards[i]->id, card->id))
- break;
- }
- if (i >= snd_ecards_limit)
- break;
- n++;
- if (n >= SNDRV_CARDS)
- break;
- snprintf(card->id, sizeof(card->id), "%s_%d", c->id, n);
- }
- }
is_audigy = emu->audigy = c->emu10k2_chip;
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 84f86745c30e..828bc615a190 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -643,6 +643,7 @@ static const struct regmap_config cs4270_regmap = {
.reg_defaults = cs4270_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(cs4270_reg_defaults),
.cache_type = REGCACHE_RBTREE,
+ .write_flag_mask = CS4270_I2C_INCR,
.readable_reg = cs4270_reg_is_readable,
.volatile_reg = cs4270_reg_is_volatile,
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 2f6e5f1d4734..169cc8857efb 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -549,73 +549,71 @@ static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
struct hdmi_codec_daifmt cf = { 0 };
- int ret = 0;
dev_dbg(dai->dev, "%s()\n", __func__);
- if (dai->id == DAI_ID_SPDIF) {
- cf.fmt = HDMI_SPDIF;
- } else {
- switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
- case SND_SOC_DAIFMT_CBM_CFM:
- cf.bit_clk_master = 1;
- cf.frame_clk_master = 1;
- break;
- case SND_SOC_DAIFMT_CBS_CFM:
- cf.frame_clk_master = 1;
- break;
- case SND_SOC_DAIFMT_CBM_CFS:
- cf.bit_clk_master = 1;
- break;
- case SND_SOC_DAIFMT_CBS_CFS:
- break;
- default:
- return -EINVAL;
- }
+ if (dai->id == DAI_ID_SPDIF)
+ return 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ cf.bit_clk_master = 1;
+ cf.frame_clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ cf.frame_clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ cf.bit_clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
- switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_NB_NF:
- break;
- case SND_SOC_DAIFMT_NB_IF:
- cf.frame_clk_inv = 1;
- break;
- case SND_SOC_DAIFMT_IB_NF:
- cf.bit_clk_inv = 1;
- break;
- case SND_SOC_DAIFMT_IB_IF:
- cf.frame_clk_inv = 1;
- cf.bit_clk_inv = 1;
- break;
- }
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ cf.frame_clk_inv = 1;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ cf.bit_clk_inv = 1;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ cf.frame_clk_inv = 1;
+ cf.bit_clk_inv = 1;
+ break;
+ }
- switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
- case SND_SOC_DAIFMT_I2S:
- cf.fmt = HDMI_I2S;
- break;
- case SND_SOC_DAIFMT_DSP_A:
- cf.fmt = HDMI_DSP_A;
- break;
- case SND_SOC_DAIFMT_DSP_B:
- cf.fmt = HDMI_DSP_B;
- break;
- case SND_SOC_DAIFMT_RIGHT_J:
- cf.fmt = HDMI_RIGHT_J;
- break;
- case SND_SOC_DAIFMT_LEFT_J:
- cf.fmt = HDMI_LEFT_J;
- break;
- case SND_SOC_DAIFMT_AC97:
- cf.fmt = HDMI_AC97;
- break;
- default:
- dev_err(dai->dev, "Invalid DAI interface format\n");
- return -EINVAL;
- }
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ cf.fmt = HDMI_I2S;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ cf.fmt = HDMI_DSP_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ cf.fmt = HDMI_DSP_B;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ cf.fmt = HDMI_RIGHT_J;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ cf.fmt = HDMI_LEFT_J;
+ break;
+ case SND_SOC_DAIFMT_AC97:
+ cf.fmt = HDMI_AC97;
+ break;
+ default:
+ dev_err(dai->dev, "Invalid DAI interface format\n");
+ return -EINVAL;
}
hcp->daifmt[dai->id] = cf;
- return ret;
+ return 0;
}
static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
@@ -771,8 +769,10 @@ static int hdmi_codec_probe(struct platform_device *pdev)
i++;
}
- if (hcd->spdif)
+ if (hcd->spdif) {
hcp->daidrv[i] = hdmi_spdif_dai;
+ hcp->daifmt[DAI_ID_SPDIF].fmt = HDMI_SPDIF;
+ }
ret = snd_soc_register_codec(dev, &hdmi_codec, hcp->daidrv,
dai_count);
diff --git a/sound/soc/codecs/nau8810.c b/sound/soc/codecs/nau8810.c
index e45518629968..2234d0c04165 100644
--- a/sound/soc/codecs/nau8810.c
+++ b/sound/soc/codecs/nau8810.c
@@ -414,9 +414,9 @@ static const struct snd_soc_dapm_widget nau8810_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("Mono Mixer", NAU8810_REG_POWER3,
NAU8810_MOUTMX_EN_SFT, 0, &nau8810_mono_mixer_controls[0],
ARRAY_SIZE(nau8810_mono_mixer_controls)),
- SND_SOC_DAPM_DAC("DAC", "HiFi Playback", NAU8810_REG_POWER3,
+ SND_SOC_DAPM_DAC("DAC", "Playback", NAU8810_REG_POWER3,
NAU8810_DAC_EN_SFT, 0),
- SND_SOC_DAPM_ADC("ADC", "HiFi Capture", NAU8810_REG_POWER2,
+ SND_SOC_DAPM_ADC("ADC", "Capture", NAU8810_REG_POWER2,
NAU8810_ADC_EN_SFT, 0),
SND_SOC_DAPM_PGA("SpkN Out", NAU8810_REG_POWER3,
NAU8810_NSPK_EN_SFT, 0, NULL, 0),
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index cca974d26136..7b977e601d59 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -634,8 +634,8 @@ static const struct snd_soc_dapm_widget nau8824_dapm_widgets[] = {
SND_SOC_DAPM_ADC("ADCR", NULL, NAU8824_REG_ANALOG_ADC_2,
NAU8824_ADCR_EN_SFT, 0),
- SND_SOC_DAPM_AIF_OUT("AIFTX", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
- SND_SOC_DAPM_AIF_IN("AIFRX", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIFTX", "Capture", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIFRX", "Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DACL", NULL, NAU8824_REG_RDAC,
NAU8824_DACL_EN_SFT, 0),
@@ -784,6 +784,36 @@ static void nau8824_int_status_clear_all(struct regmap *regmap)
}
}
+static void nau8824_dapm_disable_pin(struct nau8824 *nau8824, const char *pin)
+{
+ struct snd_soc_dapm_context *dapm = nau8824->dapm;
+ const char *prefix = dapm->component->name_prefix;
+ char prefixed_pin[80];
+
+ if (prefix) {
+ snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+ prefix, pin);
+ snd_soc_dapm_disable_pin(dapm, prefixed_pin);
+ } else {
+ snd_soc_dapm_disable_pin(dapm, pin);
+ }
+}
+
+static void nau8824_dapm_enable_pin(struct nau8824 *nau8824, const char *pin)
+{
+ struct snd_soc_dapm_context *dapm = nau8824->dapm;
+ const char *prefix = dapm->component->name_prefix;
+ char prefixed_pin[80];
+
+ if (prefix) {
+ snprintf(prefixed_pin, sizeof(prefixed_pin), "%s %s",
+ prefix, pin);
+ snd_soc_dapm_force_enable_pin(dapm, prefixed_pin);
+ } else {
+ snd_soc_dapm_force_enable_pin(dapm, pin);
+ }
+}
+
static void nau8824_eject_jack(struct nau8824 *nau8824)
{
struct snd_soc_dapm_context *dapm = nau8824->dapm;
@@ -792,8 +822,8 @@ static void nau8824_eject_jack(struct nau8824 *nau8824)
/* Clear all interruption status */
nau8824_int_status_clear_all(regmap);
- snd_soc_dapm_disable_pin(dapm, "SAR");
- snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+ nau8824_dapm_disable_pin(nau8824, "SAR");
+ nau8824_dapm_disable_pin(nau8824, "MICBIAS");
snd_soc_dapm_sync(dapm);
/* Enable the insertion interruption, disable the ejection
@@ -822,8 +852,8 @@ static void nau8824_jdet_work(struct work_struct *work)
struct regmap *regmap = nau8824->regmap;
int adc_value, event = 0, event_mask = 0;
- snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
- snd_soc_dapm_force_enable_pin(dapm, "SAR");
+ nau8824_dapm_enable_pin(nau8824, "MICBIAS");
+ nau8824_dapm_enable_pin(nau8824, "SAR");
snd_soc_dapm_sync(dapm);
msleep(100);
@@ -834,8 +864,8 @@ static void nau8824_jdet_work(struct work_struct *work)
if (adc_value < HEADSET_SARADC_THD) {
event |= SND_JACK_HEADPHONE;
- snd_soc_dapm_disable_pin(dapm, "SAR");
- snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+ nau8824_dapm_disable_pin(nau8824, "SAR");
+ nau8824_dapm_disable_pin(nau8824, "MICBIAS");
snd_soc_dapm_sync(dapm);
} else {
event |= SND_JACK_HEADSET;
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 28fdfc5ec544..c27e3476848a 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -316,6 +316,8 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("IN2_R"),
SND_SOC_DAPM_INPUT("IN3_L"),
SND_SOC_DAPM_INPUT("IN3_R"),
+ SND_SOC_DAPM_INPUT("CM_L"),
+ SND_SOC_DAPM_INPUT("CM_R"),
};
static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index b8d48d5516ae..1a4a6ee186d9 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -3713,11 +3713,13 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
struct regmap *regmap = dsp->regmap;
int ret = 0;
+ mutex_lock(&dsp->pwr_lock);
+
ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
if (ret) {
adsp_err(dsp,
"Failed to read Region Lock Ctrl register: %d\n", ret);
- return IRQ_HANDLED;
+ goto error;
}
if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
@@ -3736,7 +3738,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
adsp_err(dsp,
"Failed to read Bus Err Addr register: %d\n",
ret);
- return IRQ_HANDLED;
+ goto error;
}
adsp_err(dsp, "bus error address = 0x%x\n",
@@ -3749,7 +3751,7 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
adsp_err(dsp,
"Failed to read Pmem Xmem Err Addr register: %d\n",
ret);
- return IRQ_HANDLED;
+ goto error;
}
adsp_err(dsp, "xmem error address = 0x%x\n",
@@ -3762,6 +3764,9 @@ irqreturn_t wm_adsp2_bus_error(struct wm_adsp *dsp)
regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
+error:
+ mutex_unlock(&dsp->pwr_lock);
+
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(wm_adsp2_bus_error);
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 657afc02f1c4..7a6a64599def 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -1253,11 +1253,15 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
goto irq_err;
err = sst_dma_new(sst);
- if (err)
- dev_warn(dev, "sst_dma_new failed %d\n", err);
+ if (err) {
+ dev_err(dev, "sst_dma_new failed %d\n", err);
+ goto dma_err;
+ }
return sst;
+dma_err:
+ free_irq(sst->irq, sst);
irq_err:
if (sst->ops->free)
sst->ops->free(sst);
diff --git a/sound/soc/samsung/odroid.c b/sound/soc/samsung/odroid.c
index 06a31a9585a0..32c9e197ca95 100644
--- a/sound/soc/samsung/odroid.c
+++ b/sound/soc/samsung/odroid.c
@@ -66,11 +66,11 @@ static int odroid_card_hw_params(struct snd_pcm_substream *substream,
return ret;
/*
- * We add 1 to the rclk_freq value in order to avoid too low clock
+ * We add 2 to the rclk_freq value in order to avoid too low clock
* frequency values due to the EPLL output frequency not being exact
* multiple of the audio sampling rate.
*/
- rclk_freq = params_rate(params) * rfs + 1;
+ rclk_freq = params_rate(params) * rfs + 2;
ret = clk_set_rate(priv->sclk_i2s, rclk_freq);
if (ret < 0)
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 914cb99a6ed2..10e6b9877ac6 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -48,8 +48,8 @@ static bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream)
else
codec_stream = &dai->driver->capture;
- /* If the codec specifies any rate at all, it supports the stream. */
- return codec_stream->rates;
+ /* If the codec specifies any channels at all, it supports the stream */
+ return codec_stream->channels_min;
}
/**
@@ -894,10 +894,13 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
codec_params = *params;
/* fixup params based on TDM slot masks */
- if (codec_dai->tx_mask)
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ codec_dai->tx_mask)
soc_pcm_codec_params_fixup(&codec_params,
codec_dai->tx_mask);
- if (codec_dai->rx_mask)
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
+ codec_dai->rx_mask)
soc_pcm_codec_params_fixup(&codec_params,
codec_dai->rx_mask);
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index 8792ad8dbf83..7aea57e88bf4 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -8,7 +8,7 @@ ifeq ("$(origin O)", "command line")
endif
turbostat : turbostat.c
-CFLAGS += -Wall
+override CFLAGS += -Wall -I../../../include
CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
CFLAGS += -DINTEL_FAMILY_HEADER='"../../../../arch/x86/include/asm/intel-family.h"'
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index d19c18bf5f3d..bbafe59c014d 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1839,7 +1839,7 @@ int has_turbo_ratio_group_limits(int family, int model)
switch (model) {
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_SKYLAKE_X:
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
return 1;
}
return 0;
@@ -2701,9 +2701,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
pkg_cstate_limits = skx_pkg_cstate_limits;
has_misc_feature_control = 1;
break;
- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
+ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
no_MSR_MISC_PWR_MGMT = 1;
- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
+ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */
pkg_cstate_limits = slv_pkg_cstate_limits;
break;
case INTEL_FAM6_ATOM_AIRMONT: /* AMT */
@@ -2715,8 +2715,8 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
pkg_cstate_limits = phi_pkg_cstate_limits;
break;
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
pkg_cstate_limits = bxt_pkg_cstate_limits;
break;
default:
@@ -2745,9 +2745,9 @@ int has_slv_msrs(unsigned int family, unsigned int model)
return 0;
switch (model) {
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_MERRIFIELD:
- case INTEL_FAM6_ATOM_MOOREFIELD:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_MID:
+ case INTEL_FAM6_ATOM_AIRMONT_MID:
return 1;
}
return 0;
@@ -2759,7 +2759,7 @@ int is_dnv(unsigned int family, unsigned int model)
return 0;
switch (model) {
- case INTEL_FAM6_ATOM_DENVERTON:
+ case INTEL_FAM6_ATOM_GOLDMONT_X:
return 1;
}
return 0;
@@ -3275,8 +3275,8 @@ double get_tdp(unsigned int model)
return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
switch (model) {
- case INTEL_FAM6_ATOM_SILVERMONT1:
- case INTEL_FAM6_ATOM_SILVERMONT2:
+ case INTEL_FAM6_ATOM_SILVERMONT:
+ case INTEL_FAM6_ATOM_SILVERMONT_X:
return 30.0;
default:
return 135.0;
@@ -3342,7 +3342,7 @@ void rapl_probe(unsigned int family, unsigned int model)
}
break;
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO;
if (rapl_joules)
BIC_PRESENT(BIC_Pkg_J);
@@ -3400,8 +3400,8 @@ void rapl_probe(unsigned int family, unsigned int model)
BIC_PRESENT(BIC_RAMWatt);
}
break;
- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
+ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
+ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */
do_rapl = RAPL_PKG | RAPL_CORES;
if (rapl_joules) {
BIC_PRESENT(BIC_Pkg_J);
@@ -3411,7 +3411,7 @@ void rapl_probe(unsigned int family, unsigned int model)
BIC_PRESENT(BIC_CorWatt);
}
break;
- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
+ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO | RAPL_CORES_ENERGY_STATUS;
BIC_PRESENT(BIC_PKG__);
BIC_PRESENT(BIC_RAM__);
@@ -3434,7 +3434,7 @@ void rapl_probe(unsigned int family, unsigned int model)
return;
rapl_power_units = 1.0 / (1 << (msr & 0xF));
- if (model == INTEL_FAM6_ATOM_SILVERMONT1)
+ if (model == INTEL_FAM6_ATOM_SILVERMONT)
rapl_energy_units = 1.0 * (1 << (msr >> 8 & 0x1F)) / 1000000;
else
rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
@@ -3684,8 +3684,8 @@ int has_snb_msrs(unsigned int family, unsigned int model)
case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
case INTEL_FAM6_SKYLAKE_X: /* SKX */
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
return 1;
}
return 0;
@@ -3716,7 +3716,7 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
return 1;
}
return 0;
@@ -3750,8 +3750,8 @@ int is_slm(unsigned int family, unsigned int model)
if (!genuine_intel)
return 0;
switch (model) {
- case INTEL_FAM6_ATOM_SILVERMONT1: /* BYT */
- case INTEL_FAM6_ATOM_SILVERMONT2: /* AVN */
+ case INTEL_FAM6_ATOM_SILVERMONT: /* BYT */
+ case INTEL_FAM6_ATOM_SILVERMONT_X: /* AVN */
return 1;
}
return 0;
@@ -4106,11 +4106,11 @@ void process_cpuid()
crystal_hz = 24000000; /* 24.0 MHz */
break;
case INTEL_FAM6_SKYLAKE_X: /* SKX */
- case INTEL_FAM6_ATOM_DENVERTON: /* DNV */
+ case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
crystal_hz = 25000000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
- case INTEL_FAM6_ATOM_GEMINI_LAKE:
+ case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
crystal_hz = 19200000; /* 19.2 MHz */
break;
default: