Home Home > GIT Browse > SLE12-SP4-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOlaf Hering <ohering@suse.de>2019-06-11 09:02:34 +0200
committerOlaf Hering <ohering@suse.de>2019-06-11 09:02:34 +0200
commit1dcb86a289c00c4b0a4d735f6fd5169858e586ac (patch)
tree86b0f76f3471aa0fb37de0960bf2791856e5774d
parent8acd99e93b27546245dde31dcf02f5b1451413a1 (diff)
parent9b92ab5bf705f6ece2ec754b976f4c5d305d78f9 (diff)
Merge remote-tracking branch 'kerncvs/SLE12-SP4' into SLE12-SP4-AZURE
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu3
-rw-r--r--Documentation/admin-guide/hw-vuln/mds.rst307
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt2
-rw-r--r--Documentation/devicetree/bindings/net/ti,wilink-st.txt6
-rw-r--r--Documentation/devicetree/bindings/rtc/sun6i-rtc.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/cx2072x.txt36
-rw-r--r--Documentation/networking/ip-sysctl.txt1
-rw-r--r--arch/arm/mach-iop13xx/setup.c8
-rw-r--r--arch/arm/mach-iop13xx/tpmi.c10
-rw-r--r--arch/arm/plat-iop/adma.c6
-rw-r--r--arch/arm/plat-orion/common.c4
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/acpi.h26
-rw-r--r--arch/arm64/include/asm/memory.h11
-rw-r--r--arch/arm64/kernel/acpi.c11
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/powerpc/perf/core-book3s.c6
-rw-r--r--arch/powerpc/perf/power8-pmu.c3
-rw-r--r--arch/powerpc/perf/power9-pmu.c3
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/vsie.c2
-rw-r--r--arch/s390/mm/gup.c9
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/entry/entry_64.S18
-rw-r--r--arch/x86/include/asm/text-patching.h28
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/ftrace.c32
-rw-r--r--arch/x86/mm/gup.c14
-rw-r--r--drivers/acpi/Kconfig8
-rw-r--r--drivers/crypto/vmx/aes.c7
-rw-r--r--drivers/crypto/vmx/aes_cbc.c7
-rw-r--r--drivers/crypto/vmx/aes_ctr.c5
-rw-r--r--drivers/crypto/vmx/aes_xts.c9
-rw-r--r--drivers/crypto/vmx/ghash.c213
-rw-r--r--drivers/firmware/efi/arm-init.c10
-rw-r--r--drivers/firmware/efi/arm-runtime.c16
-rw-r--r--drivers/firmware/efi/efi.c121
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c27
-rw-r--r--drivers/firmware/efi/memmap.c3
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c3
-rw-r--r--drivers/input/touchscreen/elants_i2c.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c20
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c22
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c24
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c8
-rw-r--r--drivers/net/phy/marvell.c6
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/wimax/i2400m/sysfs.c3
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c45
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c19
-rw-r--r--drivers/nvme/host/core.c10
-rw-r--r--drivers/nvme/host/rdma.c24
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c4
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c4
-rw-r--r--drivers/pci/pcie/aspm.c49
-rw-r--r--drivers/pci/quirks.c17
-rw-r--r--drivers/pci/switch/switchtec.c3
-rw-r--r--drivers/rtc/rtc-da9063.c7
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c3
-rw-r--r--drivers/scsi/qedf/qedf_els.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c78
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c49
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c140
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h8
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c12
-rw-r--r--drivers/thermal/thermal_sysfs.c2
-rw-r--r--drivers/vhost/vhost.c6
-rw-r--r--drivers/video/fbdev/core/fbcmap.c2
-rw-r--r--drivers/video/fbdev/core/modedb.c3
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
-rw-r--r--fs/btrfs/extent-tree.c44
-rw-r--r--fs/btrfs/file.c16
-rw-r--r--fs/btrfs/inode.c14
-rw-r--r--fs/btrfs/qgroup.c8
-rw-r--r--fs/btrfs/relocation.c27
-rw-r--r--fs/btrfs/tree-checker.c49
-rw-r--r--fs/btrfs/tree-log.c20
-rw-r--r--fs/configfs/dir.c14
-rw-r--r--fs/ext4/file.c7
-rw-r--r--fs/ext4/inode.c25
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/fs-writeback.c11
-rw-r--r--fs/fuse/dev.c12
-rw-r--r--fs/jbd2/journal.c4
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/ocfs2/export.c30
-rw-r--r--fs/pipe.c9
-rw-r--r--fs/splice.c12
-rw-r--r--fs/sync.c15
-rw-r--r--fs/xfs/xfs_file.c27
-rw-r--r--include/dt-bindings/clock/r8a7795-cpg-mssr.h2
-rw-r--r--include/dt-bindings/clock/r8a7796-cpg-mssr.h2
-rw-r--r--include/linux/efi.h18
-rw-r--r--include/linux/indirect_call_wrapper.h51
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/memblock.h3
-rw-r--r--include/linux/mm.h15
-rw-r--r--include/linux/of.h4
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/pipe_fs_i.h17
-rw-r--r--include/linux/sched/signal.h18
-rw-r--r--include/linux/sched/user.h7
-rw-r--r--include/net/arp.h8
-rw-r--r--include/net/inet_common.h9
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/uapi/linux/fs.h3
-rw-r--r--kernel/bpf/hashtab.c22
-rw-r--r--kernel/bpf/hashtab.h16
-rw-r--r--kernel/bpf/syscall.c7
-rw-r--r--kernel/fork.c31
-rw-r--r--kernel/livepatch/core.c91
-rw-r--r--kernel/ptrace.c15
-rw-r--r--kernel/signal.c63
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/trace/trace.c6
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--mm/gup.c55
-rw-r--r--mm/hugetlb.c13
-rw-r--r--mm/list_lru.c6
-rw-r--r--mm/memblock.c11
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/memory.c1
-rw-r--r--mm/mempolicy.c32
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/core/dev.c15
-rw-r--r--net/core/neighbour.c9
-rw-r--r--net/ipv4/af_inet.c13
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/route.c30
-rw-r--r--net/ipv4/sysctl_net_ipv4.c5
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_offload.c6
-rw-r--r--net/ipv4/udp_offload.c15
-rw-r--r--net/ipv6/ip6_offload.c35
-rw-r--r--net/ipv6/tcpv6_offload.c7
-rw-r--r--net/ipv6/udp_offload.c7
-rw-r--r--net/rds/ib_fmr.c11
-rw-r--r--net/rds/ib_rdma.c3
-rw-r--r--net/rose/rose_loopback.c26
-rw-r--r--net/sctp/sm_sideeffect.c29
-rw-r--r--net/sctp/sm_statefuns.c35
-rw-r--r--net/tipc/name_table.c3
-rw-r--r--net/tipc/socket.c4
-rw-r--r--security/keys/process_keys.c31
-rw-r--r--security/keys/request_key.c5
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--sound/soc/codecs/cx2072x.c12
-rw-r--r--tools/objtool/check.c3
180 files changed, 2157 insertions, 937 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 1afba2acce34..d6f2f75fa8e5 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -393,8 +393,7 @@ Description: Information about CPU vulnerabilities
"Vulnerable" CPU is affected and no mitigation in effect
"Mitigation: $M" CPU is affected and mitigation $M is in effect
- Details about the l1tf file can be found in
- Documentation/admin-guide/l1tf.rst
+ See also: Documentation/admin-guide/hw-vuln/index.rst
What: /sys/devices/system/cpu/smt
/sys/devices/system/cpu/smt/active
diff --git a/Documentation/admin-guide/hw-vuln/mds.rst b/Documentation/admin-guide/hw-vuln/mds.rst
new file mode 100644
index 000000000000..1de29d28903d
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/mds.rst
@@ -0,0 +1,307 @@
+MDS - Microarchitectural Data Sampling
+======================================
+
+Microarchitectural Data Sampling is a hardware vulnerability which allows
+unprivileged speculative access to data which is available in various CPU
+internal buffers.
+
+Affected processors
+-------------------
+
+This vulnerability affects a wide range of Intel processors. The
+vulnerability is not present on:
+
+ - Processors from AMD, Centaur and other non Intel vendors
+
+ - Older processor models, where the CPU family is < 6
+
+ - Some Atoms (Bonnell, Saltwell, Goldmont, GoldmontPlus)
+
+ - Intel processors which have the ARCH_CAP_MDS_NO bit set in the
+ IA32_ARCH_CAPABILITIES MSR.
+
+Whether a processor is affected or not can be read out from the MDS
+vulnerability file in sysfs. See :ref:`mds_sys_info`.
+
+Not all processors are affected by all variants of MDS, but the mitigation
+is identical for all of them so the kernel treats them as a single
+vulnerability.
+
+Related CVEs
+------------
+
+The following CVE entries are related to the MDS vulnerability:
+
+ ============== ===== ==============================================
+ CVE-2018-12126 MSBDS Microarchitectural Store Buffer Data Sampling
+ CVE-2018-12130 MFBDS Microarchitectural Fill Buffer Data Sampling
+ CVE-2018-12127 MLPDS Microarchitectural Load Port Data Sampling
+ ============== ===== ==============================================
+
+Problem
+-------
+
+When performing store, load, L1 refill operations, processors write data
+into temporary microarchitectural structures (buffers). The data in the
+buffer can be forwarded to load operations as an optimization.
+
+Under certain conditions, usually a fault/assist caused by a load
+operation, data unrelated to the load memory address can be speculatively
+forwarded from the buffers. Because the load operation causes a fault or
+assist and its result will be discarded, the forwarded data will not cause
+incorrect program execution or state changes. But a malicious operation
+may be able to forward this speculative data to a disclosure gadget which
+allows in turn to infer the value via a cache side channel attack.
+
+Because the buffers are potentially shared between Hyper-Threads cross
+Hyper-Thread attacks are possible.
+
+Deeper technical information is available in the MDS specific x86
+architecture section: :ref:`Documentation/x86/mds.rst <mds>`.
+
+
+Attack scenarios
+----------------
+
+Attacks against the MDS vulnerabilities can be mounted from malicious non
+priviledged user space applications running on hosts or guest. Malicious
+guest OSes can obviously mount attacks as well.
+
+Contrary to other speculation based vulnerabilities the MDS vulnerability
+does not allow the attacker to control the memory target address. As a
+consequence the attacks are purely sampling based, but as demonstrated with
+the TLBleed attack samples can be postprocessed successfully.
+
+Web-Browsers
+^^^^^^^^^^^^
+
+ It's unclear whether attacks through Web-Browsers are possible at
+ all. The exploitation through Java-Script is considered very unlikely,
+ but other widely used web technologies like Webassembly could possibly be
+ abused.
+
+
+.. _mds_sys_info:
+
+MDS system information
+-----------------------
+
+The Linux kernel provides a sysfs interface to enumerate the current MDS
+status of the system: whether the system is vulnerable, and which
+mitigations are active. The relevant sysfs file is:
+
+/sys/devices/system/cpu/vulnerabilities/mds
+
+The possible values in this file are:
+
+ ========================================= =================================
+ 'Not affected' The processor is not vulnerable
+
+ 'Vulnerable' The processor is vulnerable,
+ but no mitigation enabled
+
+ 'Vulnerable: Clear CPU buffers attempted' The processor is vulnerable but
+ microcode is not updated.
+ The mitigation is enabled on a
+ best effort basis.
+ See :ref:`vmwerv`
+
+ 'Mitigation: CPU buffer clear' The processor is vulnerable and the
+ CPU buffer clearing mitigation is
+ enabled.
+ ========================================= =================================
+
+If the processor is vulnerable then the following information is appended
+to the above information:
+
+ ======================== ============================================
+ 'SMT vulnerable' SMT is enabled
+ 'SMT mitigated' SMT is enabled and mitigated
+ 'SMT disabled' SMT is disabled
+ 'SMT Host state unknown' Kernel runs in a VM, Host SMT state unknown
+ ======================== ============================================
+
+.. _vmwerv:
+
+Best effort mitigation mode
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ If the processor is vulnerable, but the availability of the microcode based
+ mitigation mechanism is not advertised via CPUID the kernel selects a best
+ effort mitigation mode. This mode invokes the mitigation instructions
+ without a guarantee that they clear the CPU buffers.
+
+ This is done to address virtualization scenarios where the host has the
+ microcode update applied, but the hypervisor is not yet updated to expose
+ the CPUID to the guest. If the host has updated microcode the protection
+ takes effect otherwise a few cpu cycles are wasted pointlessly.
+
+ The state in the mds sysfs file reflects this situation accordingly.
+
+
+Mitigation mechanism
+-------------------------
+
+The kernel detects the affected CPUs and the presence of the microcode
+which is required.
+
+If a CPU is affected and the microcode is available, then the kernel
+enables the mitigation by default. The mitigation can be controlled at boot
+time via a kernel command line option. See
+:ref:`mds_mitigation_control_command_line`.
+
+.. _cpu_buffer_clear:
+
+CPU buffer clearing
+^^^^^^^^^^^^^^^^^^^
+
+ The mitigation for MDS clears the affected CPU buffers on return to user
+ space and when entering a guest.
+
+ If SMT is enabled it also clears the buffers on idle entry when the CPU
+ is only affected by MSBDS and not any other MDS variant, because the
+ other variants cannot be protected against cross Hyper-Thread attacks.
+
+ For CPUs which are only affected by MSBDS the user space, guest and idle
+ transition mitigations are sufficient and SMT is not affected.
+
+.. _virt_mechanism:
+
+Virtualization mitigation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The protection for host to guest transition depends on the L1TF
+ vulnerability of the CPU:
+
+ - CPU is affected by L1TF:
+
+ If the L1D flush mitigation is enabled and up to date microcode is
+ available, the L1D flush mitigation is automatically protecting the
+ guest transition.
+
+ If the L1D flush mitigation is disabled then the MDS mitigation is
+ invoked explicit when the host MDS mitigation is enabled.
+
+ For details on L1TF and virtualization see:
+ :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst <mitigation_control_kvm>`.
+
+ - CPU is not affected by L1TF:
+
+ CPU buffers are flushed before entering the guest when the host MDS
+ mitigation is enabled.
+
+ The resulting MDS protection matrix for the host to guest transition:
+
+ ============ ===== ============= ============ =================
+ L1TF MDS VMX-L1FLUSH Host MDS MDS-State
+
+ Don't care No Don't care N/A Not affected
+
+ Yes Yes Disabled Off Vulnerable
+
+ Yes Yes Disabled Full Mitigated
+
+ Yes Yes Enabled Don't care Mitigated
+
+ No Yes N/A Off Vulnerable
+
+ No Yes N/A Full Mitigated
+ ============ ===== ============= ============ =================
+
+ This only covers the host to guest transition, i.e. prevents leakage from
+ host to guest, but does not protect the guest internally. Guests need to
+ have their own protections.
+
+.. _xeon_phi:
+
+XEON PHI specific considerations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The XEON PHI processor family is affected by MSBDS which can be exploited
+ cross Hyper-Threads when entering idle states. Some XEON PHI variants allow
+ to use MWAIT in user space (Ring 3) which opens an potential attack vector
+ for malicious user space. The exposure can be disabled on the kernel
+ command line with the 'ring3mwait=disable' command line option.
+
+ XEON PHI is not affected by the other MDS variants and MSBDS is mitigated
+ before the CPU enters a idle state. As XEON PHI is not affected by L1TF
+ either disabling SMT is not required for full protection.
+
+.. _mds_smt_control:
+
+SMT control
+^^^^^^^^^^^
+
+ All MDS variants except MSBDS can be attacked cross Hyper-Threads. That
+ means on CPUs which are affected by MFBDS or MLPDS it is necessary to
+ disable SMT for full protection. These are most of the affected CPUs; the
+ exception is XEON PHI, see :ref:`xeon_phi`.
+
+ Disabling SMT can have a significant performance impact, but the impact
+ depends on the type of workloads.
+
+ See the relevant chapter in the L1TF mitigation documentation for details:
+ :ref:`Documentation/admin-guide/hw-vuln/l1tf.rst <smt_control>`.
+
+
+.. _mds_mitigation_control_command_line:
+
+Mitigation control on the kernel command line
+---------------------------------------------
+
+The kernel command line allows to control the MDS mitigations at boot
+time with the option "mds=". The valid arguments for this option are:
+
+ ============ =============================================================
+ full If the CPU is vulnerable, enable all available mitigations
+ for the MDS vulnerability, CPU buffer clearing on exit to
+ userspace and when entering a VM. Idle transitions are
+ protected as well if SMT is enabled.
+
+ It does not automatically disable SMT.
+
+ off Disables MDS mitigations completely.
+
+ ============ =============================================================
+
+Not specifying this option is equivalent to "mds=full".
+
+
+Mitigation selection guide
+--------------------------
+
+1. Trusted userspace
+^^^^^^^^^^^^^^^^^^^^
+
+ If all userspace applications are from a trusted source and do not
+ execute untrusted code which is supplied externally, then the mitigation
+ can be disabled.
+
+
+2. Virtualization with trusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The same considerations as above versus trusted user space apply.
+
+3. Virtualization with untrusted guests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+ The protection depends on the state of the L1TF mitigations.
+ See :ref:`virt_mechanism`.
+
+ If the MDS mitigation is enabled and SMT is disabled, guest to host and
+ guest to guest attacks are prevented.
+
+.. _mds_default_mitigations:
+
+Default mitigations
+-------------------
+
+ The kernel default mitigations for vulnerable processors are:
+
+ - Enable CPU buffer clearing
+
+ The kernel does not by default enforce the disabling of SMT, which leaves
+ SMT systems vulnerable when running untrusted code. The same rationale as
+ for L1TF applies.
+ See :ref:`Documentation/admin-guide/hw-vuln//l1tf.rst <default_mitigations>`.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 9fb9ea189121..3202915b1b17 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2233,6 +2233,8 @@
Not specifying this option is equivalent to
mds=full.
+ For details see: Documentation/admin-guide/hw-vuln/mds.rst
+
mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory
Amount of memory to be used when the kernel is not able
to see the whole system memory or for test.
diff --git a/Documentation/devicetree/bindings/net/ti,wilink-st.txt b/Documentation/devicetree/bindings/net/ti,wilink-st.txt
index cbad73a84ac4..3404a4a2dd86 100644
--- a/Documentation/devicetree/bindings/net/ti,wilink-st.txt
+++ b/Documentation/devicetree/bindings/net/ti,wilink-st.txt
@@ -22,6 +22,10 @@ Optional properties:
- enable-gpios : GPIO signal controlling enabling of BT. Active high.
- vio-supply : Vio input supply (1.8V)
- vbat-supply : Vbat input supply (2.9-4.8V)
+ - clocks : Must contain an entry, for each entry in clock-names.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names : Must include the following entry:
+ "ext_clock" (External clock provided to the TI combo chip).
Example:
@@ -31,5 +35,7 @@ Example:
bluetooth {
compatible = "ti,wl1835-st";
enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+ clocks = <&clk32k_wl18xx>;
+ clock-names = "ext_clock";
};
};
diff --git a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
index 945934918b71..abd92d62d81c 100644
--- a/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/sun6i-rtc.txt
@@ -19,7 +19,7 @@ Example:
rtc: rtc@01f00000 {
compatible = "allwinner,sun6i-a31-rtc";
- reg = <0x01f00000 0x54>;
+ reg = <0x01f00000 0x400>;
interrupts = <0 40 4>, <0 41 4>;
clock-output-names = "osc32k";
clocks = <&ext_osc32k>;
diff --git a/Documentation/devicetree/bindings/sound/cx2072x.txt b/Documentation/devicetree/bindings/sound/cx2072x.txt
deleted file mode 100644
index 05ddf7c0320e..000000000000
--- a/Documentation/devicetree/bindings/sound/cx2072x.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Conexant CX20721/CX20723/CX7601 audio CODEC
-
-The devices support I2C only.
-
-Required properties:
-
- - compatible : One of "cnxt,cx20721", "cnxt,cx20723", "cnxt,cx7601".
-
- - reg : the I2C address of the device for I2C, it should be <0x33>
-
-Optional properties:
-
- - clocks : phandle and clock specifier for codec MCLK.
- - clock-names : Clock name string for 'clocks' attribute, should be "mclk".
-
-CODEC output pins:
- "PORTA" - Headphone
- "PORTG" - Class-D output
- "PORTE" - Line out
-
-CODEC output pins for Conexant DSP chip:
- "AEC REF" - AEC reference signal
-
-CODEC input pins:
- "PORTB" - Analog mic
- "PORTC" - Digital mic
- "PORTD" - Headset mic
-
-Example:
-
-codec: cx20721@33 {
- compatible = "cnxt,cx20721";
- reg = <0x33>;
- clocks = <&sco>;
- clock-names = "mclk";
-};
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 6594e03e9973..be73a0285300 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -402,6 +402,7 @@ tcp_min_rtt_wlen - INTEGER
minimum RTT when it is moved to a longer path (e.g., due to traffic
engineering). A longer window makes the filter more resistant to RTT
inflations such as transient congestion. The unit is seconds.
+ Possible values: 0 - 86400 (1 day)
Default: 300
tcp_moderate_rcvbuf - BOOLEAN
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index 53c316f7301e..fe4932fda01d 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
}
};
-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0,
.pool_size = PAGE_SIZE,
@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
.resource = iop13xx_adma_0_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_0_data,
},
};
@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
.resource = iop13xx_adma_1_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_1_data,
},
};
@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
.resource = iop13xx_adma_2_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_2_data,
},
};
diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
index db511ec2b1df..116feb6b261e 100644
--- a/arch/arm/mach-iop13xx/tpmi.c
+++ b/arch/arm/mach-iop13xx/tpmi.c
@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
}
};
-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
+u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
static struct platform_device iop13xx_tpmi_0_device = {
.name = "iop-tpmi",
.id = 0,
@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
.resource = iop13xx_tpmi_0_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
.resource = iop13xx_tpmi_1_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
.resource = iop13xx_tpmi_2_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
.resource = iop13xx_tpmi_3_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
index a4d1f8de3b5b..d9612221e484 100644
--- a/arch/arm/plat-iop/adma.c
+++ b/arch/arm/plat-iop/adma.c
@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
.resource = iop3xx_dma_0_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_0_data,
},
};
@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
.resource = iop3xx_dma_1_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_1_data,
},
};
@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
.resource = iop3xx_aau_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_aau_data,
},
};
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index a2399fd66e97..1e970873439c 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
.resource = orion_xor0_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor0_pdata,
},
};
@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
.resource = orion_xor1_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(64),
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor1_pdata,
},
};
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8de86d7095ed..8cabb248a9a0 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1151,6 +1151,7 @@ config EFI_STUB
config EFI
bool "UEFI runtime support"
depends on OF && !CPU_BIG_ENDIAN
+ select ARCH_SUPPORTS_ACPI
select LIBFDT
select UCS2_STRING
select EFI_PARAMS_FROM_FDT
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 8fc6b0948059..e75257ce3cec 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,10 +12,15 @@
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
+#ifndef __GENKSYMS__
+#include <linux/efi.h>
+#endif
+
#include <linux/memblock.h>
#include <linux/psci.h>
#include <asm/cputype.h>
+#include <asm/io.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@@ -29,18 +34,22 @@
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
+pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
+
/* ACPI table mapping after acpi_permanent_mmap is set */
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
acpi_size size)
{
+ /* For normal memory we already have a cacheable mapping. */
+ if (memblock_is_map_memory(phys))
+ return (void __iomem *)__phys_to_virt(phys);
+
/*
- * EFI's reserve_regions() call adds memory with the WB attribute
- * to memblock via early_init_dt_add_memory_arch().
+ * We should still honor the memory's attribute here because
+ * crash dump kernel possibly excludes some ACPI (reclaim)
+ * regions from memblock list.
*/
- if (!memblock_is_memory(phys))
- return ioremap(phys, size);
-
- return ioremap_cache(phys, size);
+ return __ioremap(phys, size, __acpi_get_mem_attribute(phys));
}
#define acpi_os_ioremap acpi_os_ioremap
@@ -129,7 +138,10 @@ static inline const char *acpi_get_enable_method(int cpu)
* for compatibility.
*/
#define acpi_disable_cmcff 1
-pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr);
+static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
+{
+ return __acpi_get_mem_attribute(addr);
+}
/*
* Despite its name, this function must still broadcast the TLB
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index a56c176ab4bc..f1fbd7a8cde7 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -300,6 +300,17 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
_virt_addr_valid(kaddr))
+/*
+ * Given that the GIC architecture permits ITS implementations that can only be
+ * configured with a LPI table address once, GICv3 systems with many CPUs may
+ * end up reserving a lot of different regions after a kexec for their LPI
+ * tables (one per CPU), as we are forced to reuse the same memory after kexec
+ * (and thus reserve it persistently with EFI beforehand)
+ */
+#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
+# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1)
+#endif
+
#include <asm-generic/memory_model.h>
#endif
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index e25c11e727fe..255bce35699f 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -18,6 +18,7 @@
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/cpumask.h>
+#include <linux/efi.h>
#include <linux/efi-bgrt.h>
#include <linux/init.h>
#include <linux/irq.h>
@@ -29,13 +30,9 @@
#include <asm/cputype.h>
#include <asm/cpu_ops.h>
+#include <asm/pgtable.h>
#include <asm/smp_plat.h>
-#ifdef CONFIG_ACPI_APEI
-# include <linux/efi.h>
-# include <asm/pgtable.h>
-#endif
-
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
int acpi_disabled = 1;
EXPORT_SYMBOL(acpi_disabled);
@@ -239,8 +236,7 @@ done:
}
}
-#ifdef CONFIG_ACPI_APEI
-pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
+pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
{
/*
* According to "Table 8 Map: EFI memory types to AArch64 memory
@@ -261,4 +257,3 @@ pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
return __pgprot(PROT_NORMAL_NC);
return __pgprot(PROT_DEVICE_nGnRnE);
}
-#endif
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6a15083cc366..2815e5f174af 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -15,6 +15,7 @@ config IA64
select ARCH_MIGHT_HAVE_PC_SERIO
select PCI if (!IA64_HP_SIM)
select ACPI if (!IA64_HP_SIM)
+ select ARCH_SUPPORTS_ACPI if (!IA64_HP_SIM)
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select HAVE_UNSTABLE_SCHED_CLOCK
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 9d78938f7cc3..0c2b55438c2d 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1861,6 +1861,7 @@ static int power_pmu_event_init(struct perf_event *event)
int n;
int err;
struct cpu_hw_events *cpuhw;
+ u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
@@ -1966,13 +1967,14 @@ static int power_pmu_event_init(struct perf_event *event)
err = power_check_constraints(cpuhw, events, cflags, n + 1);
if (has_branch_stack(event)) {
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
+ bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
- if (cpuhw->bhrb_filter == -1) {
+ if (bhrb_filter == -1) {
put_cpu_var(cpu_hw_events);
return -EOPNOTSUPP;
}
+ cpuhw->bhrb_filter = bhrb_filter;
}
put_cpu_var(cpu_hw_events);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index d12a2db26353..d10feef93b6b 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -29,6 +29,7 @@ enum {
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
+#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
/*
* Raw event encoding for PowerISA v2.07 (Power8):
@@ -243,6 +244,8 @@ static u64 power8_bhrb_filter_map(u64 branch_sample_type)
static void power8_config_bhrb(u64 pmu_bhrb_filter)
{
+ pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
+
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 319bc0c93814..293ee063cf5b 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -100,6 +100,7 @@ enum {
#define POWER9_MMCRA_IFM1 0x0000000040000000UL
#define POWER9_MMCRA_IFM2 0x0000000080000000UL
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
+#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
@@ -317,6 +318,8 @@ static u64 power9_bhrb_filter_map(u64 branch_sample_type)
static void power9_config_bhrb(u64 pmu_bhrb_filter)
{
+ pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
+
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f9d3163294a6..07383496f1d3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1175,7 +1175,7 @@ static ssize_t __ref rescan_store(struct device *dev,
unlock_device_hotplug();
return rc ? rc : count;
}
-static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
+static DEVICE_ATTR_WO(rescan);
#endif /* CONFIG_HOTPLUG_CPU */
static int __init s390_smp_init(void)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 530e647f6d7f..a8f125aa751f 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -86,6 +86,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
+ { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
{ "instruction_epsw", VCPU_STAT(instruction_epsw) },
{ "instruction_gs", VCPU_STAT(instruction_gs) },
@@ -635,7 +636,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
case KVM_CAP_S390_GS:
r = -EINVAL;
mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus)) {
+ if (kvm->created_vcpus) {
r = -EBUSY;
} else if (test_facility(133)) {
set_kvm_facility(kvm->arch.model.fac_mask, 133);
@@ -1155,7 +1156,7 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
return -EINVAL;
mutex_lock(&kvm->lock);
- if (!atomic_read(&kvm->online_vcpus)) {
+ if (!kvm->created_vcpus) {
bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
KVM_S390_VM_CPU_FEAT_NR_BITS);
ret = 0;
@@ -2146,6 +2147,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
/* we still need the basic sca for the ipte control */
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
+ return;
}
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 5fef491564ec..2b89505bf4ae 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -568,7 +568,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
gpa = scb_o->itdba & ~0xffUL;
if (gpa && (scb_s->ecb & ECB_TE)) {
- if (!(gpa & ~0x1fffU)) {
+ if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x0080U);
goto unpin;
}
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index da2f7e26cfa3..7419baf6997d 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -38,7 +38,8 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
+ || !page_cache_get_speculative(head)))
return 0;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(head);
@@ -76,7 +77,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
+ || !page_cache_add_speculative(head, refs))) {
*nr -= refs;
return 0;
}
@@ -150,7 +152,8 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
+ || !page_cache_add_speculative(head, refs))) {
*nr -= refs;
return 0;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a5a72edc9ed9..28241e8cdc83 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -64,6 +64,7 @@ config X86
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
+ select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 0e3e5116fe0f..3b5371915d01 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -878,7 +878,7 @@ ENTRY(switch_to_thread_stack)
ret
END(switch_to_thread_stack)
-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 create_gap=0
ENTRY(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8
@@ -898,6 +898,20 @@ ENTRY(\sym)
jnz .Lfrom_usermode_switch_stack_\@
.endif
+ .if \create_gap == 1
+ /*
+ * If coming from kernel space, create a 6-word gap to allow the
+ * int3 handler to emulate a call instruction.
+ */
+ testb $3, CS-ORIG_RAX(%rsp)
+ jnz .Lfrom_usermode_no_gap_\@
+ .rept 6
+ pushq 5*8(%rsp)
+ .endr
+ UNWIND_HINT_IRET_REGS offset=8
+.Lfrom_usermode_no_gap_\@:
+ .endif
+
.if \paranoid
call paranoid_entry
.else
@@ -1135,7 +1149,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \
#endif /* CONFIG_HYPERV */
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry int3 do_int3 has_error_code=0
+idtentry int3 do_int3 has_error_code=0 create_gap=1
idtentry stack_segment do_stack_segment has_error_code=1
#ifdef CONFIG_XEN
diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
index 90395063383c..ed9ee6defd10 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -37,4 +37,32 @@ extern void *text_poke(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
+{
+ regs->ip = ip;
+}
+
+#define INT3_INSN_SIZE 1
+#define CALL_INSN_SIZE 5
+
+#ifdef CONFIG_X86_64
+static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val)
+{
+ /*
+ * The int3 handler in entry_64.S adds a gap between the
+ * stack where the break point happened, and the saving of
+ * pt_regs. We can extend the original stack because of
+ * this gap. See the idtentry macro's create_gap option.
+ */
+ regs->sp -= sizeof(unsigned long);
+ *(unsigned long *)regs->sp = val;
+}
+
+static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
+{
+ int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
+ int3_emulate_jmp(regs, func);
+}
+#endif
+
#endif /* _ASM_X86_TEXT_PATCHING_H */
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 0d04a844553b..39cc501fa547 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -645,7 +645,7 @@ static ssize_t pf_show(struct device *dev,
return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
}
-static DEVICE_ATTR(reload, 0200, NULL, reload_store);
+static DEVICE_ATTR_WO(reload);
static DEVICE_ATTR(version, 0400, version_show, NULL);
static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 9bef1bbeba63..c98f757395a2 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -28,6 +28,7 @@
#include <asm/kprobes.h>
#include <asm/ftrace.h>
#include <asm/nops.h>
+#include <asm/text-patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -227,6 +228,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
static unsigned long ftrace_update_func;
+static unsigned long ftrace_update_func_call;
static int update_ftrace_func(unsigned long ip, void *new)
{
@@ -255,6 +257,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
unsigned char *new;
int ret;
+ ftrace_update_func_call = (unsigned long)func;
+
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
@@ -290,13 +294,28 @@ int ftrace_int3_handler(struct pt_regs *regs)
if (WARN_ON_ONCE(!regs))
return 0;
- ip = regs->ip - 1;
- if (!ftrace_location(ip) && !is_ftrace_caller(ip))
- return 0;
+ ip = regs->ip - INT3_INSN_SIZE;
- regs->ip += MCOUNT_INSN_SIZE - 1;
+#ifdef CONFIG_X86_64
+ if (ftrace_location(ip)) {
+ int3_emulate_call(regs, (unsigned long)ftrace_regs_caller);
+ return 1;
+ } else if (is_ftrace_caller(ip)) {
+ if (!ftrace_update_func_call) {
+ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
+ return 1;
+ }
+ int3_emulate_call(regs, ftrace_update_func_call);
+ return 1;
+ }
+#else
+ if (ftrace_location(ip) || is_ftrace_caller(ip)) {
+ int3_emulate_jmp(regs, ip + CALL_INSN_SIZE);
+ return 1;
+ }
+#endif
- return 1;
+ return 0;
}
static int ftrace_write(unsigned long ip, const char *val, int size)
@@ -867,6 +886,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
func = ftrace_ops_get_func(ops);
+ ftrace_update_func_call = (unsigned long)func;
+
/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
@@ -963,6 +984,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func)
{
unsigned char *new;
+ ftrace_update_func_call = 0UL;
new = ftrace_jmp_replace(ip, (unsigned long)func);
return update_ftrace_func(ip, new);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 456dfdfd2249..7eeffee21ace 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -137,7 +137,10 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
- get_page(page);
+ if (unlikely(!try_get_page(page))) {
+ put_dev_pagemap(pgmap);
+ break;
+ }
put_dev_pagemap(pgmap);
SetPageReferenced(page);
pages[*nr] = page;
@@ -173,9 +176,12 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
undo_dev_pagemap(nr, nr_start, pages);
return 0;
}
+ if (unlikely(!try_get_page(page))) {
+ put_dev_pagemap(pgmap);
+ return 0;
+ }
SetPageReferenced(page);
pages[*nr] = page;
- get_page(page);
put_dev_pagemap(pgmap);
(*nr)++;
pfn++;
@@ -219,6 +225,8 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
refs = 0;
head = pmd_page(pmd);
+ if (WARN_ON_ONCE(page_ref_count(head) <= 0))
+ return 0;
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON_PAGE(compound_head(page) != head, page);
@@ -282,6 +290,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
refs = 0;
head = pud_page(pud);
+ if (WARN_ON_ONCE(page_ref_count(head) <= 0))
+ return 0;
page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON_PAGE(compound_head(page) != head, page);
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 176fae699891..ea07f88fa45b 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -2,13 +2,15 @@
# ACPI Configuration
#
+config ARCH_SUPPORTS_ACPI
+ bool
+
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
- depends on !IA64_HP_SIM
- depends on IA64 || X86 || ARM64
+ depends on ARCH_SUPPORTS_ACPI
depends on PCI
select PNP
- default y if (IA64 || X86)
+ default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
Linux requires an ACPI-compliant platform (hardware/firmware),
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index b0cd5aff3822..5e85dfca8242 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -83,13 +83,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 668e285f1a64..bb01e62700af 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -86,13 +86,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 386943e65a20..a9bac01ba2fb 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -88,8 +88,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index 16f6c0cef4ac..f9c224192802 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -91,14 +91,15 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_disable();
enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
- ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
- ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+ ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
- return ret;
+ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
+
+ return ret ? -EINVAL : 0;
}
static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 1c4b5b889fba..1bfe867c0b7b 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -1,22 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
/**
* GHASH routines supporting VMX instructions on the Power 8
*
- * Copyright (C) 2015 International Business Machines Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 only.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Copyright (C) 2015, 2019 International Business Machines Inc.
*
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
+ *
+ * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
+ * mechanism. The new approach is based on arm64 code, which is:
+ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*/
#include <linux/types.h>
@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
const u8 *in, size_t len);
struct p8_ghash_ctx {
+ /* key used by vector asm */
u128 htable[16];
- struct crypto_shash *fallback;
+ /* key used by software fallback */
+ be128 key;
};
struct p8_ghash_desc_ctx {
u64 shash[2];
u8 buffer[GHASH_DIGEST_SIZE];
int bytes;
- struct shash_desc fallback_desc;
};
-static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
-{
- const char *alg = "ghash-generic";
- struct crypto_shash *fallback;
- struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(fallback)) {
- printk(KERN_ERR
- "Failed to allocate transformation for '%s': %ld\n",
- alg, PTR_ERR(fallback));
- return PTR_ERR(fallback);
- }
-
- crypto_shash_set_flags(fallback,
- crypto_shash_get_flags((struct crypto_shash
- *) tfm));
-
- /* Check if the descsize defined in the algorithm is still enough. */
- if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
- + crypto_shash_descsize(fallback)) {
- printk(KERN_ERR
- "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
- alg,
- shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
- crypto_shash_descsize(fallback));
- return -EINVAL;
- }
- ctx->fallback = fallback;
-
- return 0;
-}
-
-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
-{
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- if (ctx->fallback) {
- crypto_free_shash(ctx->fallback);
- ctx->fallback = NULL;
- }
-}
-
static int p8_ghash_init(struct shash_desc *desc)
{
- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
dctx->bytes = 0;
memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
- dctx->fallback_desc.tfm = ctx->fallback;
- dctx->fallback_desc.flags = desc->flags;
- return crypto_shash_init(&dctx->fallback_desc);
+ return 0;
}
static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
- return crypto_shash_setkey(ctx->fallback, key, keylen);
+
+ memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
+
+ return 0;
+}
+
+static inline void __ghash_block(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx)
+{
+ if (!IN_INTERRUPT) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ dctx->buffer, GHASH_DIGEST_SIZE);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ } else {
+ crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
+ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
+ }
+}
+
+static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
+ struct p8_ghash_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen)
+{
+ if (!IN_INTERRUPT) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ gcm_ghash_p8(dctx->shash, ctx->htable,
+ src, srclen);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+ } else {
+ while (srclen >= GHASH_BLOCK_SIZE) {
+ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
+ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
+ srclen -= GHASH_BLOCK_SIZE;
+ src += GHASH_BLOCK_SIZE;
+ }
+ }
}
static int p8_ghash_update(struct shash_desc *desc,
@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_desc *desc,
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
- return crypto_shash_update(&dctx->fallback_desc, src,
- srclen);
- } else {
- if (dctx->bytes) {
- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
- memcpy(dctx->buffer + dctx->bytes, src,
- srclen);
- dctx->bytes += srclen;
- return 0;
- }
+ if (dctx->bytes) {
+ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
memcpy(dctx->buffer + dctx->bytes, src,
- GHASH_DIGEST_SIZE - dctx->bytes);
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- dctx->buffer, GHASH_DIGEST_SIZE);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- src += GHASH_DIGEST_SIZE - dctx->bytes;
- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
- dctx->bytes = 0;
- }
- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
- if (len) {
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- src += len;
- srclen -= len;
- }
- if (srclen) {
- memcpy(dctx->buffer, src, srclen);
- dctx->bytes = srclen;
+ srclen);
+ dctx->bytes += srclen;
+ return 0;
}
- return 0;
+ memcpy(dctx->buffer + dctx->bytes, src,
+ GHASH_DIGEST_SIZE - dctx->bytes);
+
+ __ghash_block(ctx, dctx);
+
+ src += GHASH_DIGEST_SIZE - dctx->bytes;
+ srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+ dctx->bytes = 0;
+ }
+ len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+ if (len) {
+ __ghash_blocks(ctx, dctx, src, len);
+ src += len;
+ srclen -= len;
}
+ if (srclen) {
+ memcpy(dctx->buffer, src, srclen);
+ dctx->bytes = srclen;
+ }
+ return 0;
}
static int p8_ghash_final(struct shash_desc *desc, u8 *out)
@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
- if (IN_INTERRUPT) {
- return crypto_shash_final(&dctx->fallback_desc, out);
- } else {
- if (dctx->bytes) {
- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
- dctx->buffer[i] = 0;
- preempt_disable();
- pagefault_disable();
- enable_kernel_vsx();
- gcm_ghash_p8(dctx->shash, ctx->htable,
- dctx->buffer, GHASH_DIGEST_SIZE);
- disable_kernel_vsx();
- pagefault_enable();
- preempt_enable();
- dctx->bytes = 0;
- }
- memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
- return 0;
+ if (dctx->bytes) {
+ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+ dctx->buffer[i] = 0;
+ __ghash_block(ctx, dctx);
+ dctx->bytes = 0;
}
+ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+ return 0;
}
struct shash_alg p8_ghash_alg = {
@@ -215,11 +178,9 @@ struct shash_alg p8_ghash_alg = {
.cra_name = "ghash",
.cra_driver_name = "p8_ghash",
.cra_priority = 1000,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
.cra_module = THIS_MODULE,
- .cra_init = p8_ghash_init_tfm,
- .cra_exit = p8_ghash_exit_tfm,
},
};
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 1027d7b44358..ad923ca5c14f 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -159,6 +159,7 @@ static __init int is_usable_memory(efi_memory_desc_t *md)
switch (md->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
+ case EFI_ACPI_RECLAIM_MEMORY:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
@@ -211,6 +212,10 @@ static __init void reserve_regions(void)
if (!is_usable_memory(md))
memblock_mark_nomap(paddr, size);
+
+ /* keep ACPI reclaim memory intact for kexec etc. */
+ if (md->type == EFI_ACPI_RECLAIM_MEMORY)
+ memblock_reserve(paddr, size);
}
}
}
@@ -251,13 +256,16 @@ void __init efi_init(void)
reserve_regions();
efi_esrt_init();
- efi_memmap_unmap();
memblock_reserve(params.mmap & PAGE_MASK,
PAGE_ALIGN(params.mmap_size +
(params.mmap & ~PAGE_MASK)));
init_screen_info();
+
+ /* ARM does not permit early mappings to persist across paging_init() */
+ if (IS_ENABLED(CONFIG_ARM))
+ efi_memmap_unmap();
}
static int __init register_gop_device(void)
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 974c5a31a005..d8bf4a693ce6 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -123,6 +123,15 @@ static int __init arm_enable_runtime_services(void)
return 0;
}
+ efi_memmap_unmap();
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return 0;
+ }
+
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
return 0;
@@ -135,13 +144,6 @@ static int __init arm_enable_runtime_services(void)
pr_info("Remapping and enabling EFI services.\n");
- mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
-
- if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
- pr_err("Failed to remap EFI memory map\n");
- return -ENOMEM;
- }
-
if (!efi_virtmap_init()) {
pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
return -ENOMEM;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index a631539538ba..f00c87a40e86 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -55,6 +55,11 @@ struct efi __read_mostly efi = {
};
EXPORT_SYMBOL(efi);
+ /* Linux EFI memreserve table
+ * Declared as global variable for not breaking KABI
+ */
+unsigned long mem_reserve = EFI_INVALID_TABLE_ADDR;
+
static unsigned long *efi_tables[] = {
&efi.mps,
&efi.acpi,
@@ -466,6 +471,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
+ {LINUX_EFI_MEMRESERVE_TABLE_GUID, "MEMRESERVE", &mem_reserve},
{NULL_GUID, NULL, NULL},
};
@@ -570,6 +576,41 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
early_memunmap(tbl, sizeof(*tbl));
}
+ if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
+ unsigned long prsv = mem_reserve;
+
+ while (prsv) {
+ struct linux_efi_memreserve *rsv;
+ u8 *p;
+ int i;
+
+ /*
+ * Just map a full page: that is what we will get
+ * anyway, and it permits us to map the entire entry
+ * before knowing its size.
+ */
+ p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
+ PAGE_SIZE);
+ if (p == NULL) {
+ pr_err("Could not map UEFI memreserve entry!\n");
+ return -ENOMEM;
+ }
+
+ rsv = (void *)(p + prsv % PAGE_SIZE);
+
+ /* reserve the entry itself */
+ memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
+
+ for (i = 0; i < atomic_read(&rsv->count); i++) {
+ memblock_reserve(rsv->entry[i].base,
+ rsv->entry[i].size);
+ }
+
+ prsv = rsv->next;
+ early_memunmap(p, PAGE_SIZE);
+ }
+ }
+
return 0;
}
@@ -891,6 +932,86 @@ bool efi_is_table_address(unsigned long phys_addr)
return false;
}
+static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
+
+static int __init efi_memreserve_map_root(void)
+{
+ if (mem_reserve == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ efi_memreserve_root = memremap(mem_reserve,
+ sizeof(*efi_memreserve_root),
+ MEMREMAP_WB);
+ if (WARN_ON_ONCE(!efi_memreserve_root))
+ return -ENOMEM;
+ return 0;
+}
+
+int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+{
+ struct linux_efi_memreserve *rsv;
+ unsigned long prsv;
+ int rc, index;
+
+ if (efi_memreserve_root == (void *)ULONG_MAX)
+ return -ENODEV;
+
+ if (!efi_memreserve_root) {
+ rc = efi_memreserve_map_root();
+ if (rc)
+ return rc;
+ }
+
+ /* first try to find a slot in an existing linked list entry */
+ for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
+ rsv = __va(prsv);
+ /* implement atomic_fetch_add_unless for
+ * index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
+ */
+ index = atomic_read(&rsv->count);
+
+ do {
+ if (unlikely(index == rsv->size))
+ break;
+ } while (!atomic_try_cmpxchg(&rsv->count, &index, index + 1));
+
+ if (index < rsv->size) {
+ rsv->entry[index].base = addr;
+ rsv->entry[index].size = size;
+
+ return 0;
+ }
+ }
+
+ /* no slot found - allocate a new linked list entry */
+ rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
+ if (!rsv)
+ return -ENOMEM;
+
+ rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE);
+ atomic_set(&rsv->count, 1);
+ rsv->entry[0].base = addr;
+ rsv->entry[0].size = size;
+
+ spin_lock(&efi_mem_reserve_persistent_lock);
+ rsv->next = efi_memreserve_root->next;
+ efi_memreserve_root->next = __pa(rsv);
+ spin_unlock(&efi_mem_reserve_persistent_lock);
+
+ return 0;
+}
+
+static int __init efi_memreserve_root_init(void)
+{
+ if (efi_memreserve_root)
+ return 0;
+ if (efi_memreserve_map_root())
+ efi_memreserve_root = (void *)ULONG_MAX;
+ return 0;
+}
+early_initcall(efi_memreserve_root_init);
+
#ifdef CONFIG_KEXEC
static int update_efi_random_seed(struct notifier_block *nb,
unsigned long code, void *unused)
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 8181ac179d14..88facc5c5839 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -94,6 +94,31 @@ static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
return si;
}
+void install_memreserve_table(efi_system_table_t *sys_table_arg)
+{
+ struct linux_efi_memreserve *rsv;
+ efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
+ efi_status_t status;
+
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
+ (void **)&rsv);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "Failed to allocate memreserve entry!\n");
+ return;
+ }
+
+ rsv->next = 0;
+ rsv->size = 0;
+ atomic_set(&rsv->count, 0);
+
+ status = efi_call_early(install_configuration_table,
+ &memreserve_table_guid,
+ rsv);
+ if (status != EFI_SUCCESS)
+ pr_efi_err(sys_table_arg, "Failed to install memreserve config table!\n");
+}
+
+
/*
* This function handles the architcture specific differences between arm and
* arm64 regarding where the kernel image must be loaded and any memory that
@@ -255,6 +280,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
}
}
+ install_memreserve_table(sys_table);
+
new_fdt_addr = fdt_addr;
status = allocate_new_fdt_and_exit_boot(sys_table, handle,
&new_fdt_addr, efi_get_max_fdt_addr(dram_base),
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 78686443cb37..3fd2b450c649 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -117,6 +117,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
void __init efi_memmap_unmap(void)
{
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
if (!efi.memmap.late) {
unsigned long size;
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 1c0495acf341..06656acea420 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -274,6 +274,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
* the last one found one as a fallback.
*/
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
+ if (!fwstr)
+ return ERR_PTR(-ENOMEM);
edidstr = fwstr;
while ((edidname = strsep(&edidstr, ","))) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index e154e6fb64da..25e2cf587bcf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -125,6 +125,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ mutex_lock(&gpu->mmu->lock);
+
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
/* We always dump registers, mmu, ring and end marker */
@@ -164,6 +166,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL);
if (!iter.start) {
+ mutex_unlock(&gpu->mmu->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
@@ -226,6 +229,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
obj->base.size);
}
+ mutex_unlock(&gpu->mmu->lock);
+
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 3d2d36df3603..c740751e99f0 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -119,12 +119,13 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
{RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
- {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
- {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
- {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
- {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
- {RCS, TRVADR, 0, false}, /* 0x4df0 */
- {RCS, TRTTE, 0, false}, /* 0x4df4 */
+ {RCS, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */
+ {RCS, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */
+ {RCS, TRNULLDETCT, 0, true}, /* 0x4de8 */
+ {RCS, TRINVTILEDETCT, 0, true}, /* 0x4dec */
+ {RCS, TRVADR, 0, true}, /* 0x4df0 */
+ {RCS, TRTTE, 0, true}, /* 0x4df4 */
+ {RCS, _MMIO(0x4dfc), 0, true},
{BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
{BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 31da4f288c94..40c1e89ed361 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2733,6 +2733,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
if (view_type == vmw_view_max)
return -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
+ if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
+ DRM_ERROR("Invalid surface id.\n");
+ return -EINVAL;
+ }
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->sid, &srf_node);
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index d9841d30fd60..24b76dd7588e 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -558,8 +558,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
return NULL;
}
- skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
+ skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
+ /* FIXME: hold reference to this netdev until life of this skb. */
skb->dev = ndev;
if (av->network_type == RDMA_NETWORK_IPV4)
skb->protocol = htons(ETH_P_IP);
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 872750eeca93..ec375cc9b9f4 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -999,7 +999,7 @@ static ssize_t show_iap_mode(struct device *dev,
"Normal" : "Recovery");
}
-static DEVICE_ATTR(calibrate, S_IWUSR, NULL, calibrate_store);
+static DEVICE_ATTR_WO(calibrate);
static DEVICE_ATTR(iap_mode, S_IRUGO, show_iap_mode, NULL);
static DEVICE_ATTR(update_fw, S_IWUSR, NULL, write_update_fw);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 63150ff60981..1e46d1c2c5d7 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -22,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
#include <linux/err.h>
@@ -2187,8 +2188,12 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
reg &= ~clr;
reg |= set;
writel_relaxed(reg | GBPA_UPDATE, gbpa);
- return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
- 1, ARM_SMMU_POLL_TIMEOUT_US);
+ ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
+ 1, ARM_SMMU_POLL_TIMEOUT_US);
+
+ if (ret)
+ dev_err(smmu->dev, "GBPA not responding to update\n");
+ return ret;
}
static void arm_smmu_free_msis(void *data)
@@ -2366,8 +2371,11 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
/* Clear CR0 and sync (disables SMMU and queue processing) */
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
- if (reg & CR0_SMMUEN)
+ if (reg & CR0_SMMUEN) {
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
+ WARN_ON(is_kdump_kernel() && !disable_bypass);
+ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
+ }
ret = arm_smmu_device_disable(smmu);
if (ret)
@@ -2461,16 +2469,16 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
return ret;
}
+ if (is_kdump_kernel())
+ enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
/* Enable the SMMU interface, or ensure bypass */
if (!bypass || disable_bypass) {
enables |= CR0_SMMUEN;
} else {
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
- if (ret) {
- dev_err(smmu->dev, "GBPA not responding to update\n");
+ if (ret)
return ret;
- }
}
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
ARM_SMMU_CR0ACK);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index aab62f949d6f..72d774a40215 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2124,7 +2124,6 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
* from being accepted.
*/
card = md->queue.card;
- mmc_cleanup_queue(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2134,6 +2133,7 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
del_gendisk(md->disk);
}
+ mmc_cleanup_queue(&md->queue);
mmc_blk_put(md);
}
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c54fcae79712..5735e1d7403c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3161,8 +3161,12 @@ static int bond_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
if (event_dev->flags & IFF_MASTER) {
+ int ret;
+
netdev_dbg(event_dev, "IFF_MASTER\n");
- return bond_master_netdev_event(event, event_dev);
+ ret = bond_master_netdev_event(event, event_dev);
+ if (ret != NOTIFY_DONE)
+ return ret;
}
if (event_dev->flags & IFF_SLAVE) {
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 8a1da7e67707..7f8d269dd75a 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -130,6 +130,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
(fs->m_ext.vlan_etype || fs->m_ext.data[1]))
return -EINVAL;
+ if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
+ return -EINVAL;
+
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
@@ -330,6 +333,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
int ret;
u32 reg;
+ if (loc >= CFP_NUM_RULES)
+ return -EINVAL;
+
/* Refuse deletion of unused rules, and the default reserved rule */
if (!test_bit(loc, priv->cfp.used) || loc == 0)
return -EINVAL;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 67824faf2cd7..f114da4346ef 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -854,7 +854,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
if (err)
return UINT64_MAX;
- high = reg;
+ low |= ((u32)reg) << 16;
}
break;
case STATS_TYPE_BANK1:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9c6e7fd24104..04b5159502e5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7246,8 +7246,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
skip_uc:
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ if (rc && vnic->mc_list_count) {
+ netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
+ rc);
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+ vnic->mc_list_count = 0;
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
+ }
if (rc)
- netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
+ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
rc);
return rc;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 7d9e149dce2e..9873665e8648 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -32,6 +32,13 @@
#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
/* Supported devices */
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1539,6 +1546,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
struct nicvf *nic = netdev_priv(netdev);
int orig_mtu = netdev->mtu;
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
+
netdev->mtu = new_mtu;
if (!netif_running(netdev))
@@ -1789,8 +1805,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
bool bpf_attached = false;
int ret = 0;
- /* For now just support only the usual MTU sized frames */
- if (prog && (dev->mtu > 1500)) {
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (prog && dev->mtu > MAX_XDP_MTU) {
netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
dev->mtu);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0d17d5b6e744..3dcd9c3d8781 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -774,11 +774,8 @@ static void release_napi(struct ibmvnic_adapter *adapter)
return;
for (i = 0; i < adapter->num_active_rx_napi; i++) {
- if (&adapter->napi[i]) {
- netdev_dbg(adapter->netdev,
- "Releasing napi[%d]\n", i);
- netif_napi_del(&adapter->napi[i]);
- }
+ netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
+ netif_napi_del(&adapter->napi[i]);
}
kfree(adapter->napi);
@@ -1972,13 +1969,11 @@ static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
- struct net_device *netdev;
bool we_lock_rtnl = false;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
- netdev = adapter->netdev;
/* netif_set_real_num_xx_queues needs to take rtnl lock here
* unless wait_for_reset is set, in which case the rtnl lock
@@ -2924,8 +2919,10 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
goto req_tx_irq_failed;
}
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
+ adapter->vdev->unit_address, i);
rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
- 0, "ibmvnic_tx", scrq);
+ 0, scrq->name, scrq);
if (rc) {
dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
@@ -2945,8 +2942,10 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
dev_err(dev, "Error mapping irq\n");
goto req_rx_irq_failed;
}
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
+ adapter->vdev->unit_address, i);
rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
- 0, "ibmvnic_rx", scrq);
+ 0, scrq->name, scrq);
if (rc) {
dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
scrq->irq, rc);
@@ -4676,8 +4675,9 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
(unsigned long)adapter);
netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
- rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
- adapter);
+ snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
+ adapter->vdev->unit_address);
+ rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
if (rc) {
dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
vdev->irq, rc);
@@ -4976,7 +4976,7 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(failover, 0200, NULL, failover_store);
+static DEVICE_ATTR_WO(failover);
static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
{
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 8e328d70ceb0..dcf2eb6d9290 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -855,6 +855,7 @@ struct ibmvnic_crq_queue {
dma_addr_t msg_token;
spinlock_t lock;
bool active;
+ char name[32];
};
union sub_crq {
@@ -881,6 +882,7 @@ struct ibmvnic_sub_crq_queue {
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
atomic_t used;
+ char name[32];
};
struct ibmvnic_long_term_buff {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 6024338d0914..90ac9a9fa28e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1633,7 +1633,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
break;
case MLX5_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
break;
default:
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index ccb6287aeeb7..1d2bb7fa68b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -392,10 +392,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
i2c_addr = MLX5_I2C_ADDR_LOW;
- if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
- i2c_addr = MLX5_I2C_ADDR_HIGH;
- offset -= MLX5_EEPROM_PAGE_LENGTH;
- }
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 295bf3ffa43e..2e86b41ff327 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2289,11 +2289,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
if (err)
return err;
+ mlxsw_sp_port->link.autoneg = autoneg;
+
if (!netif_running(dev))
return 0;
- mlxsw_sp_port->link.autoneg = autoneg;
-
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 872d56167000..3fbaebde9f8f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2597,8 +2597,6 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- stmmac_check_ether_addr(priv);
-
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -4178,6 +4176,8 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
+ stmmac_check_ether_addr(priv);
+
/* Configure real RX and TX queues */
netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 5c399d2891cf..90a38250f274 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -160,15 +160,19 @@ static struct stmmac_pci_dmi_data quark_pci_dmi_data[] = {
.func = 6,
.phy_addr = 1,
},
+ /*
+ * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
+ * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+ * has only one pci network device while other asset tags are
+ * for IOT2040 which has two.
+ */
{
.name = "SIMATIC IOT2000",
- .asset_tag = "6ES7647-0AA00-1YA2",
.func = 6,
.phy_addr = 1,
},
{
.name = "SIMATIC IOT2000",
- .asset_tag = "6ES7647-0AA00-1YA2",
.func = 7,
.phy_addr = 1,
},
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 1311f53e8127..e4fd2976c52d 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1482,9 +1482,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
{
+ int count = marvell_get_sset_count(phydev);
int i;
- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
+ for (i = 0; i < count; i++) {
memcpy(data + i * ETH_GSTRING_LEN,
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
@@ -1523,9 +1524,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
static void marvell_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
+ int count = marvell_get_sset_count(phydev);
int i;
- for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
+ for (i = 0; i < count; i++)
data[i] = marvell_get_stat(phydev, i);
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index aa5b23be8cb3..ade4726ea59a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1157,6 +1157,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EINVAL;
}
+ if (netdev_has_upper_dev(dev, port_dev)) {
+ netdev_err(dev, "Device %s is already an upper device of the team interface\n",
+ portname);
+ return -EBUSY;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
diff --git a/drivers/net/wimax/i2400m/sysfs.c b/drivers/net/wimax/i2400m/sysfs.c
index 1237109f251a..8c67df11105c 100644
--- a/drivers/net/wimax/i2400m/sysfs.c
+++ b/drivers/net/wimax/i2400m/sysfs.c
@@ -65,8 +65,7 @@ error_bad_value:
}
static
-DEVICE_ATTR(i2400m_idle_timeout, S_IWUSR,
- NULL, i2400m_idle_timeout_store);
+DEVICE_ATTR_WO(i2400m_idle_timeout);
static
struct attribute *i2400m_dev_attrs[] = {
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index 27a49068d32d..57ad56435dda 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable new PCI device\n");
- return err;
+ goto err_put;
}
mem_addr = pci_resource_start(pdev, 0);
@@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
err_disable_dev:
pci_disable_device(pdev);
+err_put:
pci_dev_put(pdev);
return err;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 922e3d69fd84..32853496fe8c 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -329,6 +329,8 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
struct ieee80211_vendor_ie *vendorhdr;
u16 gen_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
int left_len, parsed_len = 0;
+ unsigned int token_len;
+ int err = 0;
if (!info->tail || !info->tail_len)
return 0;
@@ -344,6 +346,12 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
*/
while (left_len > sizeof(struct ieee_types_header)) {
hdr = (void *)(info->tail + parsed_len);
+ token_len = hdr->len + sizeof(struct ieee_types_header);
+ if (token_len > left_len) {
+ err = -EINVAL;
+ goto out;
+ }
+
switch (hdr->element_id) {
case WLAN_EID_SSID:
case WLAN_EID_SUPP_RATES:
@@ -357,13 +365,16 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
case WLAN_EID_VENDOR_SPECIFIC:
break;
default:
- memcpy(gen_ie->ie_buffer + ie_len, hdr,
- hdr->len + sizeof(struct ieee_types_header));
- ie_len += hdr->len + sizeof(struct ieee_types_header);
+ if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
+ err = -EINVAL;
+ goto out;
+ }
+ memcpy(gen_ie->ie_buffer + ie_len, hdr, token_len);
+ ie_len += token_len;
break;
}
- left_len -= hdr->len + sizeof(struct ieee_types_header);
- parsed_len += hdr->len + sizeof(struct ieee_types_header);
+ left_len -= token_len;
+ parsed_len += token_len;
}
/* parse only WPA vendor IE from tail, WMM IE is configured by
@@ -373,15 +384,17 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
WLAN_OUI_TYPE_MICROSOFT_WPA,
info->tail, info->tail_len);
if (vendorhdr) {
- memcpy(gen_ie->ie_buffer + ie_len, vendorhdr,
- vendorhdr->len + sizeof(struct ieee_types_header));
- ie_len += vendorhdr->len + sizeof(struct ieee_types_header);
+ token_len = vendorhdr->len + sizeof(struct ieee_types_header);
+ if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
+ err = -EINVAL;
+ goto out;
+ }
+ memcpy(gen_ie->ie_buffer + ie_len, vendorhdr, token_len);
+ ie_len += token_len;
}
- if (!ie_len) {
- kfree(gen_ie);
- return 0;
- }
+ if (!ie_len)
+ goto out;
gen_ie->ie_index = cpu_to_le16(gen_idx);
gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
@@ -391,13 +404,15 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
if (mwifiex_update_uap_custom_ie(priv, gen_ie, &gen_idx, NULL, NULL,
NULL, NULL)) {
- kfree(gen_ie);
- return -1;
+ err = -EINVAL;
+ goto out;
}
priv->gen_idx = gen_idx;
+
+ out:
kfree(gen_ie);
- return 0;
+ return err;
}
/* This function parses different IEs-head & tail IEs, beacon IEs,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index abd156db08fb..04730ae2386b 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1244,6 +1244,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
}
switch (element_id) {
case WLAN_EID_SSID:
+ if (element_len > IEEE80211_MAX_SSID_LEN)
+ return -EINVAL;
bss_entry->ssid.ssid_len = element_len;
memcpy(bss_entry->ssid.ssid, (current_ptr + 2),
element_len);
@@ -1253,6 +1255,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_SUPP_RATES:
+ if (element_len > MWIFIEX_SUPPORTED_RATES)
+ return -EINVAL;
memcpy(bss_entry->data_rates, current_ptr + 2,
element_len);
memcpy(bss_entry->supported_rates, current_ptr + 2,
@@ -1262,6 +1266,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_FH_PARAMS:
+ if (element_len + 2 < sizeof(*fh_param_set))
+ return -EINVAL;
fh_param_set =
(struct ieee_types_fh_param_set *) current_ptr;
memcpy(&bss_entry->phy_param_set.fh_param_set,
@@ -1270,6 +1276,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_DS_PARAMS:
+ if (element_len + 2 < sizeof(*ds_param_set))
+ return -EINVAL;
ds_param_set =
(struct ieee_types_ds_param_set *) current_ptr;
@@ -1281,6 +1289,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_CF_PARAMS:
+ if (element_len + 2 < sizeof(*cf_param_set))
+ return -EINVAL;
cf_param_set =
(struct ieee_types_cf_param_set *) current_ptr;
memcpy(&bss_entry->ss_param_set.cf_param_set,
@@ -1289,6 +1299,8 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_IBSS_PARAMS:
+ if (element_len + 2 < sizeof(*ibss_param_set))
+ return -EINVAL;
ibss_param_set =
(struct ieee_types_ibss_param_set *)
current_ptr;
@@ -1298,10 +1310,14 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_ERP_INFO:
+ if (!element_len)
+ return -EINVAL;
bss_entry->erp_flags = *(current_ptr + 2);
break;
case WLAN_EID_PWR_CONSTRAINT:
+ if (!element_len)
+ return -EINVAL;
bss_entry->local_constraint = *(current_ptr + 2);
bss_entry->sensed_11h = true;
break;
@@ -1341,6 +1357,9 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
break;
case WLAN_EID_VENDOR_SPECIFIC:
+ if (element_len + 2 < sizeof(vendor_ie->vend_hdr))
+ return -EINVAL;
+
vendor_ie = (struct ieee_types_vendor_specific *)
current_ptr;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fd3bf6ff1ead..aeabd8eaa215 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1530,14 +1530,14 @@ static int nvme_revalidate_disk(struct gendisk *disk)
struct nvme_ns_ids ids;
int ret = 0;
- if (ctrl->state != NVME_CTRL_LIVE)
- return 0;
-
if (test_bit(NVME_NS_DEAD, &ns->flags)) {
set_capacity(disk, 0);
return -ENODEV;
}
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return 0;
+
id = nvme_identify_ns(ctrl, ns->head->ns_id);
if (!id)
return -ENODEV;
@@ -3217,6 +3217,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
return -ENOMEM;
for (i = 0; i < num_lists; i++) {
+ if (ctrl->state != NVME_CTRL_LIVE)
+ goto free;
ret = nvme_identify_ns_list(ctrl, prev, ns_list);
if (ret)
goto free;
@@ -3295,6 +3297,8 @@ static void nvme_scan_work(struct work_struct *work)
if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
dev_info(ctrl->device, "rescanning namespaces.\n");
nvme_clear_changed_ns_log(ctrl);
+ if (ctrl->state != NVME_CTRL_LIVE)
+ return;
}
if (nvme_identify_ctrl(ctrl, &id))
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f3b7929ea4d3..2ebd5f0ef250 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
- if (nvme_rdma_queue_idx(queue) == 0) {
- nvme_rdma_free_qe(queue->device->dev,
- &queue->ctrl->async_event_sqe,
- sizeof(struct nvme_command), DMA_TO_DEVICE);
- }
-
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
}
@@ -738,6 +732,8 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_cleanup_queue(ctrl->ctrl.admin_q);
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
}
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
nvme_rdma_free_queue(&ctrl->queues[0]);
}
@@ -754,11 +750,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
+ error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ if (error)
+ goto out_free_queue;
+
if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
if (IS_ERR(ctrl->ctrl.admin_tagset)) {
error = PTR_ERR(ctrl->ctrl.admin_tagset);
- goto out_free_queue;
+ goto out_free_async_qe;
}
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -794,12 +795,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (error)
goto out_cleanup_queue;
- error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
- &ctrl->async_event_sqe, sizeof(struct nvme_command),
- DMA_TO_DEVICE);
- if (error)
- goto out_cleanup_queue;
-
return 0;
out_cleanup_queue:
@@ -808,6 +803,9 @@ out_cleanup_queue:
out_free_tagset:
if (new)
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+out_free_async_qe:
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index caa7be10e473..c490b018b279 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -381,8 +381,6 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf)
return -EINVAL;
epf->epc = epc;
- dma_set_coherent_mask(&epf->dev, epc->dev.coherent_dma_mask);
- epf->dev.dma_mask = epc->dev.dma_mask;
spin_lock_irqsave(&epc->lock, flags);
list_add_tail(&epf->list, &epc->pci_epf);
@@ -497,9 +495,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
INIT_LIST_HEAD(&epc->pci_epf);
device_initialize(&epc->dev);
- dma_set_coherent_mask(&epc->dev, dev->coherent_dma_mask);
epc->dev.class = pci_epc_class;
- epc->dev.dma_mask = dev->dma_mask;
epc->ops = ops;
ret = dev_set_name(&epc->dev, "%s", dev_name(dev));
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index debb99ddfd53..1176c8271615 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(pci_epf_bind);
*/
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar)
{
- struct device *dev = &epf->dev;
+ struct device *dev = epf->epc->dev.parent;
if (!addr)
return;
@@ -122,7 +122,7 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space);
void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
{
void *space;
- struct device *dev = &epf->dev;
+ struct device *dev = epf->epc->dev.parent;
dma_addr_t phys_addr;
if (size < 128)
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 33bca405ae87..ba42cc7882fb 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -198,6 +198,38 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_capable = (blacklist) ? 0 : capable;
}
+static bool pcie_retrain_link(struct pcie_link_state *link)
+{
+ struct pci_dev *parent = link->pdev;
+ unsigned long start_jiffies;
+ u16 reg16;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ reg16 |= PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ if (parent->clear_retrain_link) {
+ /*
+ * Due to an erratum in some devices the Retrain Link bit
+ * needs to be cleared again manually to allow the link
+ * training to succeed.
+ */
+ reg16 &= ~PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+ }
+
+ /* Wait for link training end. Break out after waiting for timeout */
+ start_jiffies = jiffies;
+ for (;;) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
+ break;
+ msleep(1);
+ }
+ return !(reg16 & PCI_EXP_LNKSTA_LT);
+}
+
/*
* pcie_aspm_configure_common_clock: check if the 2 ends of a link
* could use common clock. If they are, configure them to use the
@@ -207,7 +239,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
int same_clock = 1;
u16 reg16, parent_reg, child_reg[8];
- unsigned long start_jiffies;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
/*
@@ -247,21 +278,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
reg16 &= ~PCI_EXP_LNKCTL_CCC;
pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
- /* Retrain link */
- reg16 |= PCI_EXP_LNKCTL_RL;
- pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
-
- /* Wait for link training end. Break out after waiting for timeout */
- start_jiffies = jiffies;
- for (;;) {
- pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
- break;
- if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
- break;
- msleep(1);
- }
- if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ if (pcie_retrain_link(link))
return;
/* Training failed. Restore common clock configurations */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5e40d91b8a20..dcefae71d7fa 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2085,6 +2085,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+/*
+ * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
+ * Link bit cleared after starting the link retrain process to allow this
+ * process to finish.
+ *
+ * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the
+ * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf.
+ */
+static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
+{
+ dev->clear_retrain_link = 1;
+ dev_info(&dev->dev, "Enable PCIe Retrain Link quirk\n");
+}
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
+
static void fixup_rev1_53c810(struct pci_dev *dev)
{
u32 class = dev->class;
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index b738561c971c..698309ee7b5d 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1314,6 +1314,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
return 0;
+ if (eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
+ return 0;
+
dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
iowrite32(hdr, hdr_reg);
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index f85cae240f12..7e92e491c2e7 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
+ /*
+ * TODO: some models have alarms on a minute boundary but still support
+ * real hardware interrupts. Add this once the core supports it.
+ */
+ if (config->rtc_data_start != RTC_SEC)
+ rtc->rtc_dev->uie_unsupported = 1;
+
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 6c2d3989f967..9b6a927149a4 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -462,7 +462,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
{
unsigned int byte;
- int value = 0xff; /* return 0xff for ignored values */
+ int value = -1; /* return -1 for ignored values */
byte = readb(rtc->regbase + reg_off);
if (byte & AR_ENB) {
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 7727b79b9a34..e593787fa82b 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2640,8 +2640,7 @@ lpfc_soft_wwn_enable_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR(lpfc_soft_wwn_enable, S_IWUSR, NULL,
- lpfc_soft_wwn_enable_store);
+static DEVICE_ATTR_WO(lpfc_soft_wwn_enable);
/**
* lpfc_soft_wwpn_show - Return the cfg soft ww port name of the adapter
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 04f0c4d2e256..c62ccfbfcf6a 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -335,20 +335,24 @@ void qedf_restart_rport(struct qedf_rport *fcport)
struct fc_lport *lport;
struct fc_rport_priv *rdata;
u32 port_id;
+ unsigned long flags;
if (!fcport)
return;
+ spin_lock_irqsave(&fcport->rport_lock, flags);
if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) ||
!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
QEDF_ERR(&(fcport->qedf->dbg_ctx), "fcport %p already in reset or not offloaded.\n",
fcport);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
return;
}
/* Set that we are now in reset */
set_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
rdata = fcport->rdata;
if (rdata) {
@@ -357,10 +361,16 @@ void qedf_restart_rport(struct qedf_rport *fcport)
QEDF_ERR(&(fcport->qedf->dbg_ctx),
"LOGO port_id=%x.\n", port_id);
fc_rport_logoff(rdata);
+ mutex_lock(&lport->disc.disc_mutex);
/* Recreate the rport and log back in */
rdata = fc_rport_create(lport, port_id);
- if (rdata)
+ if (rdata) {
+ mutex_unlock(&lport->disc.disc_mutex);
fc_rport_login(rdata);
+ fcport->rdata = rdata;
+ } else {
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
}
clear_bit(QEDF_RPORT_IN_RESET, &fcport->flags);
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 4114090daea6..e1a661bfda05 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -908,8 +908,10 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
"Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
kfree_skb(skb);
rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
- if (rdata)
+ if (rdata) {
rdata->retries = lport->max_rport_retry_count;
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
return -EINVAL;
}
/* End NPIV filtering */
@@ -1369,9 +1371,11 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
*/
fcport = (struct qedf_rport *)&rp[1];
+ spin_lock_irqsave(&fcport->rport_lock, flags);
/* Only free this fcport if it is offloaded already */
if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
qedf_cleanup_fcport(qedf, fcport);
/*
@@ -1385,8 +1389,9 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
&fcport->flags);
atomic_dec(&qedf->num_offloads);
+ } else {
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
}
-
break;
case RPORT_EV_NONE:
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 35420a2054b2..1ef08e8f9f13 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -218,7 +218,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
mutex_lock(&ha->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
- mutex_unlock(&vha->hw->optrom_mutex);
+ mutex_unlock(&ha->optrom_mutex);
return -EAGAIN;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 62e151dddec1..c7533fa7f46e 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -14,7 +14,7 @@
* | Module Init and Probe | 0x0193 | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e |
- * | Mailbox commands | 0x1205 | 0x11a2-0x11ff |
+ * | Mailbox commands | 0x1206 | 0x11a2-0x11ff |
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
* | | | 0x211a |
* | | | 0x211c-0x2128 |
@@ -717,7 +717,7 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
/**
* qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
- * @ha: HA context
+ * @vha: HA context
* @hardware_locked: Called with the hardware_lock
*/
void
@@ -887,7 +887,7 @@ qla2300_fw_dump_failed:
/**
* qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
- * @ha: HA context
+ * @vha: HA context
* @hardware_locked: Called with the hardware_lock
*/
void
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 436d5c5e8b4a..3aae78a78917 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -21,11 +21,10 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *);
/**
* qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
- * @ha: HA context
- * @req_size: request size in bytes
- * @rsp_size: response size in bytes
+ * @vha: HA context
+ * @arg: CT arguments
*
- * Returns a pointer to the @ha's ms_iocb.
+ * Returns a pointer to the @vha's ms_iocb.
*/
void *
qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
@@ -61,9 +60,8 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
/**
* qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
- * @ha: HA context
- * @req_size: request size in bytes
- * @rsp_size: response size in bytes
+ * @vha: HA context
+ * @arg: CT arguments
*
* Returns a pointer to the @ha's ms_iocb.
*/
@@ -101,7 +99,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
/**
* qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
- * @ct_req: CT request buffer
+ * @p: CT request buffer
* @cmd: GS command
* @rsp_size: response size in bytes
*
@@ -196,7 +194,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
/**
* qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
- * @ha: HA context
+ * @vha: HA context
* @fcport: fcport entry to updated
*
* Returns 0 on success.
@@ -283,7 +281,7 @@ qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
/**
* qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* NOTE: Non-Nx_Ports are not requested.
@@ -371,7 +369,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
/**
* qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* Returns 0 on success.
@@ -441,7 +439,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
/**
* qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* Returns 0 on success.
@@ -583,7 +581,7 @@ err2:
/**
* qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -677,7 +675,8 @@ done:
/**
* qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
- * @ha: HA context
+ * @vha: HA context
+ * @type: not used
*
* Returns 0 on success.
*/
@@ -773,7 +772,7 @@ done:
/**
* qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -880,7 +879,7 @@ qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
/**
* qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -978,7 +977,7 @@ done:
/**
* qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
- * @ha: HA context
+ * @vha: HA context
* @cmd: GS command
* @scmd_len: Subcommand length
* @data_size: response size in bytes
@@ -1011,7 +1010,7 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
/**
* qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
- * @ha: HA context
+ * @vha: HA context
* @fcport: fcport entry to updated
*
* This command uses the old Exectute SNS Command mailbox routine.
@@ -1075,7 +1074,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
/**
* qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* This command uses the old Exectute SNS Command mailbox routine.
@@ -1148,7 +1147,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
/**
* qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* This command uses the old Exectute SNS Command mailbox routine.
@@ -1204,7 +1203,7 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
/**
* qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* This command uses the old Exectute SNS Command mailbox routine.
@@ -1267,7 +1266,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
/**
* qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
- * @ha: HA context
+ * @vha: HA context
*
* This command uses the old Exectute SNS Command mailbox routine.
*
@@ -1316,8 +1315,7 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha)
/**
* qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
- * HBA.
- * @ha: HA context
+ * @vha: HA context
*
* This command uses the old Exectute SNS Command mailbox routine.
*
@@ -1373,7 +1371,7 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
/**
* qla2x00_mgmt_svr_login() - Login to fabric Management Service.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -1409,7 +1407,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
/**
* qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
- * @ha: HA context
+ * @vha: HA context
* @req_size: request size in bytes
* @rsp_size: response size in bytes
*
@@ -1447,7 +1445,7 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
/**
* qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
- * @ha: HA context
+ * @vha: HA context
* @req_size: request size in bytes
* @rsp_size: response size in bytes
*
@@ -1504,7 +1502,7 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
/**
* qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
- * @ct_req: CT request buffer
+ * @p: CT request buffer
* @cmd: GS command
* @rsp_size: response size in bytes
*
@@ -1526,8 +1524,8 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
}
/**
- * qla2x00_fdmi_rhba() -
- * @ha: HA context
+ * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -1736,8 +1734,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
}
/**
- * qla2x00_fdmi_rpa() -
- * @ha: HA context
+ * qla2x00_fdmi_rpa() - perform RPA registration
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -1948,8 +1946,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
}
/**
- * qla2x00_fdmiv2_rhba() -
- * @ha: HA context
+ * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2262,7 +2260,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
/**
* qla2x00_fdmi_dhba() -
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2310,7 +2308,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
/**
* qla2x00_fdmiv2_rpa() -
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2640,7 +2638,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
/**
* qla2x00_fdmi_register() -
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2698,7 +2696,7 @@ out:
/**
* qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* Returns 0 on success.
@@ -2783,7 +2781,7 @@ qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
/**
* qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
* Returns 0 on success.
@@ -2897,7 +2895,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
/**
* qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
*
- * @ha: HA context
+ * @vha: HA context
* @list: switch info entries to populate
*
*/
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a9076fb93b8f..d22376566f53 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -423,7 +423,7 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__qla24xx_handle_gpdb_event(vha, ea);
}
-int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_work_evt *e;
@@ -1551,7 +1551,8 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
}
-void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea)
+static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
+ struct event_arg *ea)
{
ql_dbg(ql_dbg_disc, vha, 0x2118,
"%s %d %8phC post PRLI\n",
@@ -2298,7 +2299,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
/**
* qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2329,7 +2330,7 @@ qla2100_pci_config(scsi_qla_host_t *vha)
/**
* qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2411,7 +2412,7 @@ qla2300_pci_config(scsi_qla_host_t *vha)
/**
* qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2455,7 +2456,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
/**
* qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2486,7 +2487,7 @@ qla25xx_pci_config(scsi_qla_host_t *vha)
/**
* qla2x00_isp_firmware() - Choose firmware image.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2522,7 +2523,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha)
/**
* qla2x00_reset_chip() - Reset ISP chip.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2666,6 +2667,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
/**
* qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2682,7 +2684,7 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
/**
* qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2897,7 +2899,7 @@ acquired:
/**
* qla24xx_reset_chip() - Reset ISP24xx chip.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2921,7 +2923,7 @@ qla24xx_reset_chip(scsi_qla_host_t *vha)
/**
* qla2x00_chip_diag() - Test chip for proper operation.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2940,8 +2942,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
/* Assume a failed state */
rval = QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_init, vha, 0x007b,
- "Testing device at %lx.\n", (u_long)&reg->flash_address);
+ ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
+ &reg->flash_address);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -3045,7 +3047,7 @@ chip_diag_failed:
/**
* qla24xx_chip_diag() - Test ISP24xx for proper operation.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -3513,7 +3515,7 @@ out:
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -3673,7 +3675,7 @@ failed:
/**
* qla2x00_init_response_q_entries() - Initializes response queue entries.
- * @ha: HA context
+ * @rsp: response queue
*
* Beginning of request ring has initialization control block already built
* by nvram config routine.
@@ -3698,7 +3700,7 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp)
/**
* qla2x00_update_fw_options() - Read and process firmware options.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -3961,7 +3963,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
/**
* qla2x00_init_rings() - Initializes firmware.
- * @ha: HA context
+ * @vha: HA context
*
* Beginning of request ring has initialization control block already built
* by nvram config routine.
@@ -4070,7 +4072,7 @@ next_check:
/**
* qla2x00_fw_ready() - Waits for firmware ready.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -4739,7 +4741,7 @@ qla2x00_rport_del(void *data)
/**
* qla2x00_alloc_fcport() - Allocate a generic fcport.
- * @ha: HA context
+ * @vha: HA context
* @flags: allocation flags
*
* Returns a pointer to the allocated fcport, or NULL, if none available.
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 0854a8cfc60b..e396dd14ab5f 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -10,6 +10,7 @@
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
*
+ * @vha: HA context
* @dsds: number of data segment decriptors needed
*
* Returns the number of IOCB entries needed to store @dsds.
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 38061573bff9..a8eb42f993c4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -14,7 +14,7 @@
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
- * @cmd: SCSI command
+ * @sp: SCSI command
*
* Returns the proper CF_* direction based on CDB.
*/
@@ -86,7 +86,7 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
/**
* qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
- * @ha: HA context
+ * @vha: HA context
*
* Returns a pointer to the Continuation Type 0 IOCB packet.
*/
@@ -114,7 +114,8 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
/**
* qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
- * @ha: HA context
+ * @vha: HA context
+ * @req: request queue
*
* Returns a pointer to the continuation type 1 IOCB packet.
*/
@@ -445,6 +446,8 @@ queuing_error:
/**
* qla2x00_start_iocbs() - Execute the IOCB command
+ * @vha: HA context
+ * @req: request queue
*/
void
qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
@@ -486,7 +489,9 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
/**
* qla2x00_marker() - Send a marker IOCB to the firmware.
- * @ha: HA context
+ * @vha: HA context
+ * @req: request queue
+ * @rsp: response queue
* @loop_id: loop ID
* @lun: LUN
* @type: marker modifier
@@ -1190,6 +1195,8 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
* @sp: SRB command to process
* @cmd_pkt: Command type 3 IOCB
* @tot_dsds: Total number of segments to transfer
+ * @tot_prot_dsds: Total number of segments with protection information
+ * @fw_prot_opts: Protection options to be passed to firmware
*/
inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7ca59386ab60..7902ed2a4864 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -25,7 +25,7 @@ static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
@@ -144,7 +144,7 @@ qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
/**
* qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
@@ -259,7 +259,7 @@ qla2300_intr_handler(int irq, void *dev_id)
/**
* qla2x00_mbx_completion() - Process mailbox command completions.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
* @mb0: Mailbox0 register
*/
static void
@@ -613,7 +613,8 @@ qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
/**
* qla2x00_async_event() - Process aynchronous events.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
* @mb: Mailbox registers (0 - 3)
*/
void
@@ -1257,7 +1258,8 @@ global_port_update:
/**
* qla2x00_process_completed_request() - Process a Fast Post response.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @req: request queue
* @index: SRB index
*/
void
@@ -1959,7 +1961,7 @@ static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
/**
* qla2x00_process_response_queue() - Process response queue entries.
- * @ha: SCSI driver HA context
+ * @rsp: response queue
*/
void
qla2x00_process_response_queue(struct rsp_que *rsp)
@@ -2363,7 +2365,8 @@ done:
/**
* qla2x00_status_entry() - Process a Status IOCB entry.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
* @pkt: Entry pointer
*/
static void
@@ -2745,7 +2748,7 @@ out:
/**
* qla2x00_status_cont_entry() - Process a Status Continuations entry.
- * @ha: SCSI driver HA context
+ * @rsp: response queue
* @pkt: Entry pointer
*
* Extended sense data.
@@ -2803,7 +2806,8 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
/**
* qla2x00_error_entry() - Process an error entry.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
* @pkt: Entry pointer
* return : 1=allow further error analysis. 0=no additional error analysis.
*/
@@ -2862,7 +2866,7 @@ fatal:
/**
* qla24xx_mbx_completion() - Process mailbox command completions.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
* @mb0: Mailbox0 register
*/
static void
@@ -2931,7 +2935,8 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
/**
* qla24xx_process_response_queue() - Process response queue entries.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
*/
void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct rsp_que *rsp)
@@ -3104,7 +3109,7 @@ done:
/**
* qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 67deeee6f8c8..f753aa495753 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -579,11 +579,19 @@ mbx_done:
}
pr_warn(" cmd=%x ****\n", command);
}
- ql_dbg(ql_dbg_mbx, vha, 0x1198,
- "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
- RD_REG_DWORD(&reg->isp24.host_status),
- RD_REG_DWORD(&reg->isp24.ictrl),
- RD_REG_DWORD(&reg->isp24.istatus));
+ if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1198,
+ "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
+ RD_REG_DWORD(&reg->isp24.host_status),
+ RD_REG_DWORD(&reg->isp24.ictrl),
+ RD_REG_DWORD(&reg->isp24.istatus));
+ } else {
+ ql_dbg(ql_dbg_mbx, vha, 0x1206,
+ "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
+ RD_REG_WORD(&reg->isp.ctrl_status),
+ RD_REG_WORD(&reg->isp.ictrl),
+ RD_REG_WORD(&reg->isp.istatus));
+ }
} else {
ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
}
@@ -3471,7 +3479,10 @@ qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
/**
* qla2x00_set_serdes_params() -
- * @ha: HA context
+ * @vha: HA context
+ * @sw_em_1g: serial link options
+ * @sw_em_2g: serial link options
+ * @sw_em_4g: serial link options
*
* Returns
*/
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 134f2b8a49fe..60f964c53c01 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -490,7 +490,7 @@ qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
/**
* qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -519,9 +519,9 @@ qlafx00_pci_config(scsi_qla_host_t *vha)
/**
* qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
- * @ha: HA context
+ * @vha: HA context
*
- */
+ */
static inline void
qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
{
@@ -625,7 +625,7 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
/**
* qlafx00_soft_reset() - Soft Reset ISPFx00.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -644,7 +644,7 @@ qlafx00_soft_reset(scsi_qla_host_t *vha)
/**
* qlafx00_chip_diag() - Test ISPFx00 for proper operation.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -1408,7 +1408,7 @@ qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
/**
* qlafx00_init_response_q_entries() - Initializes response queue entries.
- * @ha: HA context
+ * @rsp: response queue
*
* Beginning of request ring has initialization control block already built
* by nvram config routine.
@@ -2212,7 +2212,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct bsg_job *bsg_job;
struct fc_bsg_reply *bsg_reply;
struct srb_iocb *iocb_job;
- int res;
+ int res = 0;
struct qla_mt_iocb_rsp_fx00 fstatus;
uint8_t *fw_sts_ptr;
@@ -2270,7 +2270,8 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
/**
* qlafx00_status_entry() - Process a Status IOCB entry.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
* @pkt: Entry pointer
*/
static void
@@ -2543,7 +2544,7 @@ check_scsi_status:
/**
* qlafx00_status_cont_entry() - Process a Status Continuations entry.
- * @ha: SCSI driver HA context
+ * @rsp: response queue
* @pkt: Entry pointer
*
* Extended sense data.
@@ -2621,7 +2622,9 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
/**
* qlafx00_multistatus_entry() - Process Multi response queue entries.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
+ * @pkt: received packet
*/
static void
qlafx00_multistatus_entry(struct scsi_qla_host *vha,
@@ -2675,12 +2678,13 @@ qlafx00_multistatus_entry(struct scsi_qla_host *vha,
/**
* qlafx00_error_entry() - Process an error entry.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
* @pkt: Entry pointer
*/
static void
qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
- struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+ struct sts_entry_fx00 *pkt)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -2689,9 +2693,6 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
struct req_que *req = NULL;
int res = DID_ERROR << 16;
- ql_dbg(ql_dbg_async, vha, 0x507f,
- "type of error status in response: 0x%x\n", estatus);
-
req = ha->req_q_map[que];
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
@@ -2706,7 +2707,8 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
/**
* qlafx00_process_response_queue() - Process response queue entries.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
+ * @rsp: response queue
*/
static void
qlafx00_process_response_queue(struct scsi_qla_host *vha,
@@ -2738,9 +2740,11 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
if (pkt->entry_status != 0 &&
pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+ ql_dbg(ql_dbg_async, vha, 0x507f,
+ "type of error status in response: 0x%x\n",
+ pkt->entry_status);
qlafx00_error_entry(vha, rsp,
- (struct sts_entry_fx00 *)pkt, pkt->entry_status,
- pkt->entry_type);
+ (struct sts_entry_fx00 *)pkt);
continue;
}
@@ -2782,7 +2786,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
/**
* qlafx00_async_event() - Process aynchronous events.
- * @ha: SCSI driver HA context
+ * @vha: SCSI driver HA context
*/
static void
qlafx00_async_event(scsi_qla_host_t *vha)
@@ -2858,10 +2862,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)
}
/**
- *
* qlafx00x_mbx_completion() - Process mailbox command completions.
- * @ha: SCSI driver HA context
- * @mb16: Mailbox16 register
+ * @vha: SCSI driver HA context
+ * @mb0: value to be written into mailbox register 0
*/
static void
qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
@@ -2887,7 +2890,7 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
/**
* qlafx00_intr_handler() - Process interrupts for the ISPFX00.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 8d6cb46b898f..4d0aaae12253 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1732,7 +1732,7 @@ iospace_error_exit:
/**
* qla82xx_pci_config() - Setup ISP82xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -1753,7 +1753,7 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
/**
* qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers.
- * @ha: HA context
+ * @vha: HA context
*
* Returns 0 on success.
*/
@@ -2008,11 +2008,10 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
"MBX pointer ERROR.\n");
}
-/*
+/**
* qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
- * @regs:
*
* Called by system whenever the host adapter generates an interrupt.
*
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 525ac35a757b..fe856b602e03 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -280,9 +280,8 @@ qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
}
/**
- *
* qla8044_lock_recovery - Recovers the idc_lock.
- * @ha : Pointer to adapter structure
+ * @vha : Pointer to adapter structure
*
* Lock Recovery Register
* 5-2 Lock recovery owner: Function ID of driver doing lock recovery,
@@ -1639,10 +1638,10 @@ qla8044_set_rst_ready(struct scsi_qla_host *vha)
/**
* qla8044_need_reset_handler - Code to start reset sequence
- * @ha: pointer to adapter structure
+ * @vha: pointer to adapter structure
*
* Note: IDC lock must be held upon entry
- **/
+ */
static void
qla8044_need_reset_handler(struct scsi_qla_host *vha)
{
@@ -1859,8 +1858,8 @@ exit_update_idc_reg:
/**
* qla8044_need_qsnt_handler - Code to start qsnt
- * @ha: pointer to adapter structure
- **/
+ * @vha: pointer to adapter structure
+ */
static void
qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
{
@@ -2031,10 +2030,10 @@ exit_error:
/**
* qla4_8xxx_check_temp - Check the ISP82XX temperature.
- * @ha: adapter block pointer.
+ * @vha: adapter block pointer.
*
* Note: The caller should not hold the idc lock.
- **/
+ */
static int
qla8044_check_temp(struct scsi_qla_host *vha)
{
@@ -2071,10 +2070,10 @@ int qla8044_read_temperature(scsi_qla_host_t *vha)
/**
* qla8044_check_fw_alive - Check firmware health
- * @ha: Pointer to host adapter structure.
+ * @vha: Pointer to host adapter structure.
*
* Context: Interrupt
- **/
+ */
int
qla8044_check_fw_alive(struct scsi_qla_host *vha)
{
@@ -3879,7 +3878,7 @@ out:
#define PF_BITS_MASK (0xF << 16)
/**
* qla8044_intr_handler() - Process interrupts for the ISP8044
- * @irq:
+ * @irq: interrupt number
* @dev_id: SCSI driver HA context
*
* Called by system whenever the host adapter generates an interrupt.
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 83c1b7e17c80..8ba7c1db07c3 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -23,10 +23,6 @@
#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4
#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0
#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4
-#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8
-#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC
-#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8
-#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC
/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e0ac46db69c7..d1194b7a4b51 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -492,7 +492,7 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
{
if (IS_QLAFX00(ha)) {
- if (rsp && rsp->ring)
+ if (rsp && rsp->ring_fx00)
dma_free_coherent(&ha->pdev->dev,
(rsp->length_fx00 + 1) * sizeof(request_t),
rsp->ring_fx00, rsp->dma_fx00);
@@ -1744,10 +1744,45 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
return QLA_SUCCESS;
}
+static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
+ unsigned long *flags)
+ __releases(qp->qp_lock_ptr)
+ __acquires(qp->qp_lock_ptr)
+{
+ scsi_qla_host_t *vha = qp->vha;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS) {
+ if (!sp_get(sp)) {
+ /* got sp */
+ spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
+ qla_nvme_abort(ha, sp, res);
+ spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+ }
+ } else if (GET_CMD_SP(sp) && !ha->flags.eeh_busy &&
+ !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
+ !qla2x00_isp_reg_stat(ha) && sp->type == SRB_SCSI_CMD) {
+ /*
+ * Don't abort commands in adapter during EEH recovery as it's
+ * not accessible/responding.
+ *
+ * Get a reference to the sp and drop the lock. The reference
+ * ensures this sp->done() call and not the call in
+ * qla2xxx_eh_abort() ends the SCSI cmd (with result 'res').
+ */
+ if (!sp_get(sp)) {
+ spin_unlock_irqrestore(qp->qp_lock_ptr, *flags);
+ qla2xxx_eh_abort(GET_CMD_SP(sp));
+ spin_lock_irqsave(qp->qp_lock_ptr, *flags);
+ }
+ }
+ sp->done(sp, res);
+}
+
static void
__qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
{
- int cnt, status;
+ int cnt;
unsigned long flags;
srb_t *sp;
scsi_qla_host_t *vha = qp->vha;
@@ -1766,50 +1801,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
req->outstanding_cmds[cnt] = NULL;
switch (sp->cmd_type) {
case TYPE_SRB:
- if (sp->type == SRB_NVME_CMD ||
- sp->type == SRB_NVME_LS) {
- if (!sp_get(sp)) {
- /* got sp */
- spin_unlock_irqrestore
- (qp->qp_lock_ptr,
- flags);
- qla_nvme_abort(ha, sp, res);
- spin_lock_irqsave
- (qp->qp_lock_ptr, flags);
- }
- } else if (GET_CMD_SP(sp) &&
- !ha->flags.eeh_busy &&
- (!test_bit(ABORT_ISP_ACTIVE,
- &vha->dpc_flags)) &&
- !qla2x00_isp_reg_stat(ha) &&
- (sp->type == SRB_SCSI_CMD)) {
- /*
- * Don't abort commands in adapter
- * during EEH recovery as it's not
- * accessible/responding.
- *
- * Get a reference to the sp and drop
- * the lock. The reference ensures this
- * sp->done() call and not the call in
- * qla2xxx_eh_abort() ends the SCSI cmd
- * (with result 'res').
- */
- if (!sp_get(sp)) {
- spin_unlock_irqrestore
- (qp->qp_lock_ptr, flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave
- (qp->qp_lock_ptr, flags);
- /*
- * Get rid of extra reference caused
- * by early exit from qla2xxx_eh_abort
- */
- if (status == FAST_IO_FAIL)
- atomic_dec(&sp->ref_count);
- }
- }
- sp->done(sp, res);
+ qla2x00_abort_srb(qp, sp, res, &flags);
break;
case TYPE_TGT_CMD:
if (!vha->hw->tgt.tgt_ops || !tgt ||
@@ -3679,6 +3671,23 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);
+ qla2x00_wait_for_sess_deletion(base_vha);
+
+ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(base_vha);
+ } else if (!IS_QLAFX00(ha)) {
+ if (IS_QLA8031(ha)) {
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
+ "Clearing fcoe driver presence.\n");
+ if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
+ "Error while clearing DRV-Presence.\n");
+ }
+
+ qla2x00_try_to_stop_firmware(base_vha);
+ }
+
/*
* if UNLOAD flag is already set, then continue unload,
* where it was set first.
@@ -4173,12 +4182,10 @@ fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
fail_free_ctx_mempool:
- if (ha->ctx_mempool)
- mempool_destroy(ha->ctx_mempool);
+ mempool_destroy(ha->ctx_mempool);
ha->ctx_mempool = NULL;
fail_free_srb_mempool:
- if (ha->srb_mempool)
- mempool_destroy(ha->srb_mempool);
+ mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
@@ -4480,8 +4487,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
dma_free_coherent(&ha->pdev->dev, MCTP_DUMP_SIZE, ha->mctp_dump,
ha->mctp_dump_dma);
- if (ha->srb_mempool)
- mempool_destroy(ha->srb_mempool);
+ mempool_destroy(ha->srb_mempool);
if (ha->dcbx_tlv)
dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
@@ -4513,8 +4519,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->async_pd)
dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
- if (ha->s_dma_pool)
- dma_pool_destroy(ha->s_dma_pool);
+ dma_pool_destroy(ha->s_dma_pool);
if (ha->gid_list)
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
@@ -4535,14 +4540,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
}
}
- if (ha->dl_dma_pool)
- dma_pool_destroy(ha->dl_dma_pool);
+ dma_pool_destroy(ha->dl_dma_pool);
- if (ha->fcp_cmnd_dma_pool)
- dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+ dma_pool_destroy(ha->fcp_cmnd_dma_pool);
- if (ha->ctx_mempool)
- mempool_destroy(ha->ctx_mempool);
+ mempool_destroy(ha->ctx_mempool);
qlt_mem_free(ha);
@@ -5885,21 +5887,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
return;
}
- if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
- if (ha->flags.fw_started)
- qla2x00_abort_isp_cleanup(base_vha);
- } else if (!IS_QLAFX00(ha)) {
- if (IS_QLA8031(ha)) {
- ql_dbg(ql_dbg_p3p, base_vha, 0xb07e,
- "Clearing fcoe driver presence.\n");
- if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS)
- ql_dbg(ql_dbg_p3p, base_vha, 0xb079,
- "Error while clearing DRV-Presence.\n");
- }
-
- qla2x00_try_to_stop_firmware(base_vha);
- }
-
qla2x00_wait_for_sess_deletion(base_vha);
set_bit(UNLOADING, &base_vha->dpc_flags);
@@ -7103,8 +7090,7 @@ qla2x00_module_exit(void)
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
qlt_exit();
- if (ctx_cachep)
- kmem_cache_destroy(ctx_cachep);
+ kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index d9649b3afc51..2a3055c799fb 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2229,6 +2229,7 @@ qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
/**
* qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip.
+ * @ha: host adapter
* @man_id: Flash manufacturer ID
* @flash_id: Flash ID
*/
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 1339a647805a..79cbeb97ad57 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2193,7 +2193,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
if (!sess) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
- "qla_target(%d): task abort for non-existant session\n",
+ "qla_target(%d): task abort for non-existent session\n",
vha->vp_idx);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2379,20 +2379,20 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
}
if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
- if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
- ELS_LOGO ||
- mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
- ELS_PRLO ||
- mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
- ELS_TPRLO) {
+ switch (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode) {
+ case ELS_LOGO:
+ case ELS_PRLO:
+ case ELS_TPRLO:
ql_dbg(ql_dbg_disc, vha, 0x2106,
"TM response logo %phC status %#x state %#x",
mcmd->sess->port_name, mcmd->fc_tm_rsp,
mcmd->flags);
qlt_schedule_sess_for_deletion(mcmd->sess);
- } else {
+ break;
+ default:
qlt_send_notify_ack(vha->hw->base_qpair,
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
+ break;
}
} else {
if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
@@ -2425,7 +2425,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
BUG_ON(cmd->sg_cnt == 0);
prm->sg = (struct scatterlist *)cmd->sg;
- prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
+ prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
cmd->sg_cnt, cmd->dma_data_direction);
if (unlikely(prm->seg_cnt == 0))
goto out_err;
@@ -2452,7 +2452,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
if (cmd->prot_sg_cnt) {
prm->prot_sg = cmd->prot_sg;
- prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
+ prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (unlikely(prm->prot_seg_cnt == 0))
@@ -2487,12 +2487,12 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
qpair = cmd->qpair;
- pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
+ dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
cmd->dma_data_direction);
cmd->sg_mapped = 0;
if (cmd->prot_sg_cnt)
- pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+ dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (!cmd->ctx)
@@ -2660,9 +2660,9 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm)
cnt < QLA_TGT_DATASEGS_PER_CONT_24XX && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
*dword_ptr++ =
- cpu_to_le32(pci_dma_lo32
+ cpu_to_le32(lower_32_bits
(sg_dma_address(prm->sg)));
- *dword_ptr++ = cpu_to_le32(pci_dma_hi32
+ *dword_ptr++ = cpu_to_le32(upper_32_bits
(sg_dma_address(prm->sg)));
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
@@ -2704,9 +2704,9 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm)
(cnt < QLA_TGT_DATASEGS_PER_CMD_24XX) && prm->seg_cnt;
cnt++, prm->seg_cnt--) {
*dword_ptr++ =
- cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+ cpu_to_le32(lower_32_bits(sg_dma_address(prm->sg)));
- *dword_ptr++ = cpu_to_le32(pci_dma_hi32(
+ *dword_ptr++ = cpu_to_le32(upper_32_bits(
sg_dma_address(prm->sg)));
*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
@@ -6599,10 +6599,11 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
/**
* qla_tgt_lport_register - register lport with external module
*
- * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
- * @wwpn: Passwd FC target WWPN
- * @callback: lport initialization callback for tcm_qla2xxx code
* @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ * @phys_wwpn: physical port WWPN
+ * @npiv_wwpn: NPIV WWPN
+ * @npiv_wwnn: NPIV WWNN
+ * @callback: lport initialization callback for tcm_qla2xxx code
*/
int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
u64 npiv_wwpn, u64 npiv_wwnn,
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 91403269b204..085782db911c 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -771,14 +771,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define FC_TM_REJECT 4
#define FC_TM_FAILED 5
-#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
-#define pci_dma_lo32(a) (a & 0xffffffff)
-#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
-#else
-#define pci_dma_lo32(a) (a & 0xffffffff)
-#define pci_dma_hi32(a) 0
-#endif
-
#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
(((const uint8_t *)(sense))[0] & 0x70) == 0x70)
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e24dc60feb78..44997d74e5a4 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -48,7 +48,6 @@
#include "tcm_qla2xxx.h"
static struct workqueue_struct *tcm_qla2xxx_free_wq;
-static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
/*
* Parse WWN.
@@ -425,7 +424,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
se_cmd->pi_err = 0;
/*
- * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+ * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup
* the SGL mappings into PCIe memory for incoming FCP WRITE data.
*/
return qlt_rdy_to_xfer(cmd);
@@ -2035,16 +2034,8 @@ static int tcm_qla2xxx_register_configfs(void)
goto out_fabric_npiv;
}
- tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
- if (!tcm_qla2xxx_cmd_wq) {
- ret = -ENOMEM;
- goto out_free_wq;
- }
-
return 0;
-out_free_wq:
- destroy_workqueue(tcm_qla2xxx_free_wq);
out_fabric_npiv:
target_unregister_template(&tcm_qla2xxx_npiv_ops);
out_fabric:
@@ -2054,7 +2045,6 @@ out_fabric:
static void tcm_qla2xxx_deregister_configfs(void)
{
- destroy_workqueue(tcm_qla2xxx_cmd_wq);
destroy_workqueue(tcm_qla2xxx_free_wq);
target_unregister_template(&tcm_qla2xxx_ops);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index a694de907a26..094993409cf2 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -317,7 +317,7 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
return ret ? ret : count;
}
-static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store);
+static DEVICE_ATTR_WO(emul_temp);
#endif
static ssize_t
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 8316a9e129e6..b8c6a4889d8c 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -918,8 +918,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end,
u64 userspace_addr, int perm)
{
- struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+ struct vhost_umem_node *tmp, *node;
+ if (!size)
+ return -EFAULT;
+
+ node = kmalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 68a113594808..2811c4afde01 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -94,6 +94,8 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
int size = len * sizeof(u16);
int ret = -ENOMEM;
+ flags |= __GFP_NOWARN;
+
if (cmap->len != len) {
fb_dealloc_cmap(cmap);
if (!len)
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index de119f11b78f..455a15f70172 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -933,6 +933,9 @@ void fb_var_to_videomode(struct fb_videomode *mode,
if (var->vmode & FB_VMODE_DOUBLE)
vtotal *= 2;
+ if (!htotal || !vtotal)
+ return;
+
hfreq = pixclock/htotal;
mode->refresh = hfreq/vtotal;
}
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 83243af22d51..54a9ab285ab9 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -457,7 +457,6 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
struct watch_adapter *watch;
char *path, *token;
int err, rc;
- LIST_HEAD(staging_q);
path = u->u.buffer + sizeof(u->u.msg);
token = memchr(path, 0, u->u.msg.len);
@@ -515,7 +514,6 @@ static ssize_t xenbus_file_write(struct file *filp,
uint32_t msg_type;
int rc = len;
int ret;
- LIST_HEAD(staging_q);
/*
* We're expecting usermode to be writing properly formed
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 7b914c07c972..d8a7c023aa21 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -769,37 +769,27 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
return NULL;
}
-static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
-{
- if (ref->type == BTRFS_REF_METADATA) {
- if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
- return BTRFS_BLOCK_GROUP_SYSTEM;
- else
- return BTRFS_BLOCK_GROUP_METADATA;
- }
- return BTRFS_BLOCK_GROUP_DATA;
-}
-
static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *ref)
+ struct btrfs_ref *ref, int sign)
{
struct btrfs_space_info *space_info;
- u64 flags = generic_ref_to_space_flags(ref);
-
- space_info = __find_space_info(fs_info, flags);
- ASSERT(space_info);
- percpu_counter_add(&space_info->total_bytes_pinned, ref->len);
-}
+ s64 num_bytes;
+ u64 flags;
-static void sub_pinned_bytes(struct btrfs_fs_info *fs_info,
- struct btrfs_ref *ref)
-{
- struct btrfs_space_info *space_info;
- u64 flags = generic_ref_to_space_flags(ref);
+ ASSERT(sign == 1 || sign == -1);
+ num_bytes = sign * ref->len;
+ if (ref->type == BTRFS_REF_METADATA) {
+ if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
+ flags = BTRFS_BLOCK_GROUP_SYSTEM;
+ else
+ flags = BTRFS_BLOCK_GROUP_METADATA;
+ } else {
+ flags = BTRFS_BLOCK_GROUP_DATA;
+ }
space_info = __find_space_info(fs_info, flags);
ASSERT(space_info);
- percpu_counter_add(&space_info->total_bytes_pinned, -ref->len);
+ percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
}
/*
@@ -2143,7 +2133,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
&old_ref_mod, &new_ref_mod);
if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
- sub_pinned_bytes(fs_info, generic_ref);
+ add_pinned_bytes(fs_info, generic_ref, -1);
return ret;
}
@@ -7194,7 +7184,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
}
out:
if (pin)
- add_pinned_bytes(fs_info, &generic_ref);
+ add_pinned_bytes(fs_info, &generic_ref, 1);
if (last_ref) {
/*
@@ -7237,7 +7227,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
}
if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
- add_pinned_bytes(fs_info, ref);
+ add_pinned_bytes(fs_info, ref, 1);
return ret;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 3ca7189c3902..e4137008e12b 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2060,6 +2060,18 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
u64 len;
/*
+ * If the inode needs a full sync, make sure we use a full range to
+ * avoid log tree corruption, due to hole detection racing with ordered
+ * extent completion for adjacent ranges, and assertion failures during
+ * hole detection.
+ */
+ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags)) {
+ start = 0;
+ end = LLONG_MAX;
+ }
+
+ /*
* The range length can be represented by u64, we have to do the typecasts
* to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
*/
@@ -2616,10 +2628,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
&cached_state);
- if (ret) {
- inode_unlock(inode);
+ if (ret)
goto out_only_mutex;
- }
path = btrfs_alloc_path();
if (!path) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b3e279e68eaa..5f35f47ac107 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6231,8 +6231,18 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
name_len * 2);
inode_inc_iversion(&parent_inode->vfs_inode);
- parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime =
- current_time(&parent_inode->vfs_inode);
+ /*
+ * If we are replaying a log tree, we do not want to update the mtime
+ * and ctime of the parent directory with the current time, since the
+ * log replay procedure is responsible for setting them to their correct
+ * values (the ones it had when the fsync was done).
+ */
+ if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) {
+ struct timespec now = current_time(&parent_inode->vfs_inode);
+
+ parent_inode->vfs_inode.i_mtime = now;
+ parent_inode->vfs_inode.i_ctime = now;
+ }
ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode);
if (ret)
btrfs_abort_transaction(trans, ret);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 56a507325508..9471e0376104 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -3872,7 +3872,13 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
subvol_slot);
block->last_snapshot = last_snapshot;
block->level = level;
- if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
+
+ /*
+ * If we have bg == NULL, we're called from btrfs_recover_relocation(),
+ * no one else can modify tree blocks thus we qgroup will not change
+ * no matter the value of trace_leaf.
+ */
+ if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
block->trace_leaf = true;
else
block->trace_leaf = false;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 170a5aa63288..361f808e795b 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2211,22 +2211,30 @@ static int clean_dirty_subvols(struct reloc_control *rc)
struct btrfs_root *root;
struct btrfs_root *next;
int ret = 0;
+ int ret2;
list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
reloc_dirty_list) {
- struct btrfs_root *reloc_root = root->reloc_root;
+ if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
+ /* Merged subvolume, cleanup its reloc root */
+ struct btrfs_root *reloc_root = root->reloc_root;
- clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
- list_del_init(&root->reloc_dirty_list);
- root->reloc_root = NULL;
- if (reloc_root) {
- int ret2;
+ clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
+ list_del_init(&root->reloc_dirty_list);
+ root->reloc_root = NULL;
+ if (reloc_root) {
- ret2 = btrfs_drop_snapshot(reloc_root, NULL, 0, 1);
+ ret2 = btrfs_drop_snapshot(reloc_root, NULL, 0, 1);
+ if (ret2 < 0 && !ret)
+ ret = ret2;
+ }
+ btrfs_put_fs_root(root);
+ } else {
+ /* Orphan reloc tree, just clean it up */
+ ret2 = btrfs_drop_snapshot(root, NULL, 0, 1);
if (ret2 < 0 && !ret)
ret = ret2;
}
- btrfs_put_fs_root(root);
}
return ret;
}
@@ -2515,6 +2523,9 @@ again:
}
} else {
list_del_init(&reloc_root->root_list);
+ /* Don't forget to queue this reloc root for cleanup */
+ list_add_tail(&reloc_root->reloc_dirty_list,
+ &rc->dirty_subvol_roots);
}
}
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 60a70af16b06..b6caebc636c3 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -114,9 +114,27 @@ static void file_extent_err(const struct btrfs_fs_info *fs_info,
(!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \
})
+static u64 file_extent_end(struct extent_buffer *leaf,
+ struct btrfs_key *key,
+ struct btrfs_file_extent_item *extent)
+{
+ u64 end;
+ u64 len;
+
+ if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) {
+ len = btrfs_file_extent_ram_bytes(leaf, extent);
+ end = ALIGN(key->offset + len, leaf->fs_info->sectorsize);
+ } else {
+ len = btrfs_file_extent_num_bytes(leaf, extent);
+ end = key->offset + len;
+ }
+ return end;
+}
+
static int check_extent_data_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
- struct btrfs_key *key, int slot)
+ struct btrfs_key *key, int slot,
+ struct btrfs_key *prev_key)
{
struct btrfs_file_extent_item *fi;
u32 sectorsize = fs_info->sectorsize;
@@ -195,6 +213,28 @@ static int check_extent_data_item(struct btrfs_fs_info *fs_info,
CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, offset, sectorsize) ||
CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, num_bytes, sectorsize))
return -EUCLEAN;
+
+ /*
+ * Check that no two consecutive file extent items, in the same leaf,
+ * present ranges that overlap each other.
+ */
+ if (slot > 0 &&
+ prev_key->objectid == key->objectid &&
+ prev_key->type == BTRFS_EXTENT_DATA_KEY) {
+ struct btrfs_file_extent_item *prev_fi;
+ u64 prev_end;
+
+ prev_fi = btrfs_item_ptr(leaf, slot - 1,
+ struct btrfs_file_extent_item);
+ prev_end = file_extent_end(leaf, prev_key, prev_fi);
+ if (prev_end > key->offset) {
+ file_extent_err(fs_info, leaf, slot - 1,
+"file extent end range (%llu) goes beyond start offset (%llu) of the next file extent",
+ prev_end, key->offset);
+ return -EUCLEAN;
+ }
+ }
+
return 0;
}
@@ -461,13 +501,14 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
*/
static int check_leaf_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
- struct btrfs_key *key, int slot)
+ struct btrfs_key *key, int slot,
+ struct btrfs_key *prev_key)
{
int ret = 0;
switch (key->type) {
case BTRFS_EXTENT_DATA_KEY:
- ret = check_extent_data_item(fs_info, leaf, key, slot);
+ ret = check_extent_data_item(fs_info, leaf, key, slot, prev_key);
break;
case BTRFS_EXTENT_CSUM_KEY:
ret = check_csum_item(fs_info, leaf, key, slot);
@@ -621,7 +662,7 @@ static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
* Check if the item size and content meet other
* criteria
*/
- ret = check_leaf_item(fs_info, leaf, &key, slot);
+ ret = check_leaf_item(fs_info, leaf, &key, slot, &prev_key);
if (ret < 0)
return ret;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 5c0fd8b848ec..46ba55a225ff 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3138,6 +3138,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
log->log_transid = root->log_transid;
root->log_start_pid = 0;
/*
+ * Update or create log root item under the root's log_mutex to prevent
+ * races with concurrent log syncs that can lead to failure to update
+ * log root item because it was not created yet.
+ */
+ ret = update_log_root(trans, log);
+ /*
* IO has been started, blocks of the log tree have WRITTEN flag set
* in their headers. new modifications of the log will be written to
* new positions. so it's safe to allow log writers to go in.
@@ -3156,8 +3162,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&log_root_tree->log_mutex);
- ret = update_log_root(trans, log);
-
mutex_lock(&log_root_tree->log_mutex);
if (atomic_dec_and_test(&log_root_tree->log_writers)) {
/*
@@ -5653,7 +5657,6 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
{
int ret = 0;
struct dentry *old_parent = NULL;
- struct btrfs_inode *orig_inode = inode;
/*
* for regular files, if its inode is already on disk, we don't
@@ -5673,16 +5676,6 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
}
while (1) {
- /*
- * If we are logging a directory then we start with our inode,
- * not our parent's inode, so we need to skip setting the
- * logged_trans so that further down in the log code we don't
- * think this inode has already been logged.
- */
- if (inode != orig_inode)
- inode->logged_trans = trans->transid;
- smp_mb();
-
if (btrfs_must_commit_transaction(trans, inode)) {
ret = 1;
break;
@@ -6560,7 +6553,6 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
* if this directory was already logged any new
* names for this file/dir will get recorded
*/
- smp_mb();
if (dir->logged_trans == trans->transid)
return;
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index d7955dc56737..a1985a9ad2d6 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry,
if (sd) {
/* Coordinate with configfs_readdir */
spin_lock(&configfs_dirent_lock);
- /* Coordinate with configfs_attach_attr where will increase
- * sd->s_count and update sd->s_dentry to new allocated one.
- * Only set sd->dentry to null when this dentry is the only
- * sd owner.
- * If not do so, configfs_d_iput may run just after
- * configfs_attach_attr and set sd->s_dentry to null
- * even it's still in use.
+ /*
+ * Set sd->s_dentry to null only when this dentry is the one
+ * that is going to be killed. Otherwise configfs_d_iput may
+ * run just after configfs_attach_attr and set sd->s_dentry to
+ * NULL even it's still in use.
*/
- if (atomic_read(&sd->s_count) <= 2)
+ if (sd->s_dentry == dentry)
sd->s_dentry = NULL;
spin_unlock(&configfs_dirent_lock);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 89ea4dde9b99..644b6b65f0be 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -260,6 +260,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
}
ret = __generic_file_write_iter(iocb, from);
+ /*
+ * Unaligned direct AIO must be the only IO in flight. Otherwise
+ * overlapping aligned IO after unaligned might result in data
+ * corruption.
+ */
+ if (ret == -EIOCBQUEUED && unaligned_aio)
+ ext4_unwritten_wait(inode);
inode_unlock(inode);
if (ret > 0)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e21d7247d0e0..6b15a3a2015c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5476,22 +5476,19 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
goto err_out;
}
}
- if (!shrink)
+ if (!shrink) {
pagecache_isize_extended(inode, oldsize, inode->i_size);
-
- /*
- * Blocks are going to be removed from the inode. Wait
- * for dio in flight. Temporarily disable
- * dioread_nolock to prevent livelock.
- */
- if (orphan) {
- if (!ext4_should_journal_data(inode)) {
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
- ext4_inode_resume_unlocked_dio(inode);
- } else
- ext4_wait_for_tail_page_commit(inode);
+ } else {
+ /*
+ * Blocks are going to be removed from the inode. Wait
+ * for dio in flight.
+ */
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+ ext4_inode_resume_unlocked_dio(inode);
}
+ if (orphan && ext4_should_journal_data(inode))
+ ext4_wait_for_tail_page_commit(inode);
down_write(&EXT4_I(inode)->i_mmap_sem);
rc = ext4_break_layouts(inode);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 94fd2d1775f3..82cefdb38221 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1557,7 +1557,7 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
ex->fe_len += 1 << order;
}
- if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) {
+ if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
/* Should never happen! (but apparently sometimes does?!?) */
WARN_ON(1);
ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 5b59acc9a887..fe53e4588bc0 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -651,7 +651,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
save_error_info(sb, function, line);
}
- if (test_opt(sb, ERRORS_PANIC)) {
+ if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
if (EXT4_SB(sb)->s_journal &&
!(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
return;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index aa7b43e60336..da7596f1ad88 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -502,8 +502,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
isw->inode = inode;
- atomic_inc(&isw_nr_in_flight);
-
/*
* In addition to synchronizing among switchers, I_WB_SWITCH tells
* the RCU protected stat update paths to grab the mapping's
@@ -511,6 +509,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
* Let's continue after I_WB_SWITCH is guaranteed to be visible.
*/
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
+
+ atomic_inc(&isw_nr_in_flight);
+
return;
out_free:
@@ -877,7 +878,11 @@ restart:
void cgroup_writeback_umount(void)
{
if (atomic_read(&isw_nr_in_flight)) {
- synchronize_rcu();
+ /*
+ * Use rcu_barrier() to wait for all pending callbacks to
+ * ensure that all in-flight wb switches are in the workqueue.
+ */
+ rcu_barrier();
flush_workqueue(isw_wq);
}
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 08145191f45f..42ec8c9afbf3 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1979,10 +1979,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
ret = -EINVAL;
- if (rem < len) {
- pipe_unlock(pipe);
- goto out;
- }
+ if (rem < len)
+ goto out_free;
rem = len;
while (rem) {
@@ -2000,7 +1998,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
pipe->nrbufs--;
} else {
- pipe_buf_get(pipe, ibuf);
+ if (!pipe_buf_get(pipe, ibuf))
+ goto out_free;
+
*obuf = *ibuf;
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
obuf->len = rem;
@@ -2023,11 +2023,11 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
ret = fuse_dev_do_write(fud, &cs, len);
pipe_lock(pipe);
+out_free:
for (idx = 0; idx < nbuf; idx++)
pipe_buf_release(pipe, &bufs[idx]);
pipe_unlock(pipe);
-out:
kfree(bufs);
return ret;
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 7507635d7725..40f214f0798f 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1366,6 +1366,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
journal_superblock_t *sb = journal->j_superblock;
int ret;
+ /* Buffer got discarded which means block device got invalidated */
+ if (!buffer_mapped(bh))
+ return -EIO;
+
trace_jbd2_write_superblock(journal, write_flags);
if (!(journal->j_flags & JBD2_BARRIER))
write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index aab825143032..35f3b8cc50a6 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -845,6 +845,7 @@ void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *sour
target->caps = source->caps;
target->options = source->options;
target->auth_info = source->auth_info;
+ target->port = source->port;
}
EXPORT_SYMBOL_GPL(nfs_server_copy_userdata);
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 9f88188060db..5c2ecba997a2 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -148,16 +148,24 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
u64 blkno;
struct dentry *parent;
struct inode *dir = d_inode(child);
+ int set;
trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno);
+ status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1);
+ if (status < 0) {
+ mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
+ parent = ERR_PTR(status);
+ goto bail;
+ }
+
status = ocfs2_inode_lock(dir, NULL, 0);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
parent = ERR_PTR(status);
- goto bail;
+ goto unlock_nfs_sync;
}
status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
@@ -166,11 +174,31 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
goto bail_unlock;
}
+ status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set);
+ if (status < 0) {
+ if (status == -EINVAL) {
+ status = -ESTALE;
+ } else
+ mlog(ML_ERROR, "test inode bit failed %d\n", status);
+ parent = ERR_PTR(status);
+ goto bail_unlock;
+ }
+
+ trace_ocfs2_get_dentry_test_bit(status, set);
+ if (!set) {
+ status = -ESTALE;
+ parent = ERR_PTR(status);
+ goto bail_unlock;
+ }
+
parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
bail_unlock:
ocfs2_inode_unlock(dir, 0);
+unlock_nfs_sync:
+ ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1);
+
bail:
trace_ocfs2_get_parent_end(parent);
diff --git a/fs/pipe.c b/fs/pipe.c
index 51bcae9fbbf4..f809602e978f 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -193,9 +193,14 @@ EXPORT_SYMBOL(generic_pipe_buf_steal);
* in the tee() system call, when we duplicate the buffers in one
* pipe into another.
*/
-void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
+#ifndef __GENKSYMS__
+bool
+#else
+void
+#endif
+generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
- get_page(buf->page);
+ return try_get_page(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_get);
diff --git a/fs/splice.c b/fs/splice.c
index 2c9ae49d83e5..01a20a6bb49b 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1592,7 +1592,11 @@ retry:
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
- pipe_buf_get(ipipe, ibuf);
+ if (!pipe_buf_get(ipipe, ibuf)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
*obuf = *ibuf;
/*
@@ -1666,7 +1670,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
* Get a reference to this pipe buffer,
* so we can copy the contents over.
*/
- pipe_buf_get(ipipe, ibuf);
+ if (!pipe_buf_get(ipipe, ibuf)) {
+ if (ret == 0)
+ ret = -EFAULT;
+ break;
+ }
obuf = opipe->bufs + nbuf;
*obuf = *ibuf;
diff --git a/fs/sync.c b/fs/sync.c
index 07c1e3105a83..51beb4743ed2 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -264,10 +264,13 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
* earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
* for that operation to complete and to return the result.
*
- * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER:
+ * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER
+ * (a.k.a. SYNC_FILE_RANGE_WRITE_AND_WAIT):
* a traditional sync() operation. This is a write-for-data-integrity operation
* which will ensure that all pages in the range which were dirty on entry to
- * sys_sync_file_range() are committed to disk.
+ * sys_sync_file_range() are written to disk. It should be noted that disk
+ * caches are not flushed by this call, so there are no guarantees here that the
+ * data will be available on disk after a crash.
*
*
* SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
@@ -348,8 +351,14 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
}
if (flags & SYNC_FILE_RANGE_WRITE) {
+ int sync_mode = WB_SYNC_NONE;
+
+ if ((flags & SYNC_FILE_RANGE_WRITE_AND_WAIT) ==
+ SYNC_FILE_RANGE_WRITE_AND_WAIT)
+ sync_mode = WB_SYNC_ALL;
+
ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
- WB_SYNC_NONE);
+ sync_mode);
if (ret < 0)
goto out_put;
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 8239d52b0a9c..fc53c2e9d597 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -527,18 +527,17 @@ xfs_file_dio_aio_write(
count = iov_iter_count(from);
/*
- * If we are doing unaligned IO, wait for all other IO to drain,
- * otherwise demote the lock if we had to take the exclusive lock
- * for other reasons in xfs_file_aio_write_checks.
+ * If we are doing unaligned IO, we can't allow any other overlapping IO
+ * in-flight at the same time or we risk data corruption. Wait for all
+ * other IO to drain before we submit. If the IO is aligned, demote the
+ * iolock if we had to take the exclusive lock in
+ * xfs_file_aio_write_checks() for other reasons.
*/
if (unaligned_io) {
- /* If we are going to wait for other DIO to finish, bail */
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (atomic_read(&inode->i_dio_count))
- return -EAGAIN;
- } else {
- inode_dio_wait(inode);
- }
+ /* unaligned dio always waits, bail */
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return -EAGAIN;
+ inode_dio_wait(inode);
} else if (iolock == XFS_IOLOCK_EXCL) {
xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
iolock = XFS_IOLOCK_SHARED;
@@ -546,6 +545,14 @@ xfs_file_dio_aio_write(
trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
+
+ /*
+ * If unaligned, this is the only IO in-flight. If it has not yet
+ * completed, wait on it before we release the iolock to prevent
+ * subsequent overlapping IO.
+ */
+ if (ret == -EIOCBQUEUED && unaligned_io)
+ inode_dio_wait(inode);
out:
xfs_iunlock(ip, iolock);
diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
index f047eaf261f3..55d5e119cedd 100644
--- a/include/dt-bindings/clock/r8a7795-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
@@ -54,7 +54,7 @@
#define R8A7795_CLK_CANFD 39
#define R8A7795_CLK_HDMI 40
#define R8A7795_CLK_CSI0 41
-#define R8A7795_CLK_CSIREF 42
+/* CLK_CSIREF was removed */
#define R8A7795_CLK_CP 43
#define R8A7795_CLK_CPEX 44
#define R8A7795_CLK_R 45
diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
index 1e5942695f0d..ad99588426fd 100644
--- a/include/dt-bindings/clock/r8a7796-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
@@ -60,7 +60,7 @@
#define R8A7796_CLK_CANFD 45
#define R8A7796_CLK_HDMI 46
#define R8A7796_CLK_CSI0 47
-#define R8A7796_CLK_CSIREF 48
+/* CLK_CSIREF was removed */
#define R8A7796_CLK_CP 49
#define R8A7796_CLK_CPEX 50
#define R8A7796_CLK_R 51
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ca7def997611..54a0d1674d6f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -626,6 +626,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
+#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
typedef struct {
efi_guid_t guid;
@@ -1005,6 +1006,7 @@ extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
extern void efi_mem_reserve(phys_addr_t addr, u64 size);
+extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_reserve_boot_services(void);
@@ -1614,6 +1616,22 @@ struct linux_efi_random_seed {
u8 bits[];
};
+struct linux_efi_memreserve {
+ int size; // allocated size of the array
+ atomic_t count; // number of entries used
+ phys_addr_t next; // pa of next struct instance
+ struct {
+ phys_addr_t base;
+ phys_addr_t size;
+ } entry[0];
+};
+
+#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \
+ (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+
+#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
+ / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+
#define EFI_STATUS_STR(_status) \
case EFI_##_status: \
return "EFI_" __stringify(_status); \
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
new file mode 100644
index 000000000000..00d7e8e919c6
--- /dev/null
+++ b/include/linux/indirect_call_wrapper.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
+#define _LINUX_INDIRECT_CALL_WRAPPER_H
+
+#ifdef CONFIG_RETPOLINE
+
+/*
+ * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
+ * @f: function pointer
+ * @f$NR: builtin functions names, up to $NR of them
+ * @__VA_ARGS__: arguments for @f
+ *
+ * Avoid retpoline overhead for known builtin, checking @f vs each of them and
+ * eventually invoking directly the builtin function. The functions are check
+ * in the given order. Fallback to the indirect call.
+ */
+#define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+ likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
+ })
+#define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+ likely(f == f2) ? f2(__VA_ARGS__) : \
+ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
+ })
+
+#define INDIRECT_CALLABLE_DECLARE(f) f
+#define INDIRECT_CALLABLE_SCOPE
+
+#else
+#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALLABLE_DECLARE(f)
+#define INDIRECT_CALLABLE_SCOPE static
+#endif
+
+/*
+ * We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is
+ * builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6
+ * alternatives
+ */
+#if IS_BUILTIN(CONFIG_IPV6)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
+#elif IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
+#endif
+
+#endif
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 70f8c5506208..cbe60bda2d6b 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -89,7 +89,6 @@ struct klp_func {
struct list_head node;
struct list_head stack_node;
unsigned long old_size, new_size;
- bool kobj_added;
bool nop;
bool patched;
bool transition;
@@ -144,7 +143,6 @@ struct klp_object {
struct list_head func_list;
struct list_head node;
struct module *mod;
- bool kobj_added;
bool dynamic;
bool patched;
};
@@ -173,7 +171,6 @@ struct klp_patch {
struct list_head list;
struct kobject kobj;
struct list_head obj_list;
- bool kobj_added;
bool enabled;
bool forced;
struct work_struct free_work;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ff1de89d2a21..97ef38a30758 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -17,9 +17,6 @@
#include <linux/init.h>
#include <linux/mm.h>
-#define INIT_MEMBLOCK_REGIONS 128
-#define INIT_PHYSMEM_REGIONS 4
-
/* Definition of memblock flags. */
enum {
MEMBLOCK_NONE = 0x0, /* No special request */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 01e74f259e31..32d7c919a0e7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -869,6 +869,10 @@ static inline bool is_device_public_page(const struct page *page)
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
+/* 127: arbitrary random number, small enough to assemble well */
+#define page_ref_zero_or_close_to_overflow(page) \
+ ((unsigned int) page_ref_count(page) + 127u <= 127u)
+
static inline void get_page(struct page *page)
{
page = compound_head(page);
@@ -876,8 +880,17 @@ static inline void get_page(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_refcount.
*/
- VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
+ page_ref_inc(page);
+}
+
+static inline __must_check bool try_get_page(struct page *page)
+{
+ page = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+ return false;
page_ref_inc(page);
+ return true;
}
static inline void put_page(struct page *page)
diff --git a/include/linux/of.h b/include/linux/of.h
index 2b52b32b0a81..fb4b192c684b 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -226,8 +226,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
static inline u64 of_read_number(const __be32 *cell, int size)
{
u64 r = 0;
- while (size--)
- r = (r << 32) | be32_to_cpu(*(cell++));
+ for (; size--; cell++)
+ r = (r << 32) | be32_to_cpu(*cell);
return r;
}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index bad08407c92f..13f43b375f57 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -328,6 +328,10 @@ struct pci_dev {
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
controlled exclusively by
user sysfs */
+#ifndef __GENKSYMS__
+ unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
+ bit manually */
+#endif
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 4d4d87ba51a7..1f56e1076daf 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -107,18 +107,24 @@ struct pipe_buf_operations {
/*
* Get a reference to the pipe buffer.
*/
+#ifndef __GENKSYMS__
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+#else
void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+#endif
};
/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
+ *
+ * Return: %true if the reference was successfully obtained.
*/
-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- buf->ops->get(pipe, buf);
+ return buf->ops->get(pipe, buf);
}
/**
@@ -178,7 +184,12 @@ struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);
/* Generic pipe buffer ops functions */
-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+#ifndef __GENKSYMS__
+bool
+#else
+void
+#endif
+generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 55a91fbbd9a1..96aeb449db51 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -377,10 +377,20 @@ static inline void set_restore_sigmask(void)
set_thread_flag(TIF_RESTORE_SIGMASK);
WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
+
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
+
static inline void clear_restore_sigmask(void)
{
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
static inline bool test_restore_sigmask(void)
{
return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -398,6 +408,10 @@ static inline void set_restore_sigmask(void)
current->restore_sigmask = true;
WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ tsk->restore_sigmask = false;
+}
static inline void clear_restore_sigmask(void)
{
current->restore_sigmask = false;
@@ -406,6 +420,10 @@ static inline bool test_restore_sigmask(void)
{
return current->restore_sigmask;
}
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ return tsk->restore_sigmask;
+}
static inline bool test_and_clear_restore_sigmask(void)
{
if (!current->restore_sigmask)
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 5d5415e129d4..936e9ca58151 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -28,6 +28,13 @@ struct user_struct {
atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
+ /*
+ * These pointers can only change from NULL to a non-NULL value once.
+ * Writes are protected by key_user_keyring_mutex.
+ * Unlocked readers should use READ_ONCE() unless they know that
+ * install_user_keyrings() has been called successfully (which sets
+ * these members to non-NULL values, preventing further modifications).
+ */
struct key *uid_keyring; /* UID specific keyring */
struct key *session_keyring; /* UID's default session keyring */
#endif
diff --git a/include/net/arp.h b/include/net/arp.h
index 40522874d7b8..738df12c2c5e 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -17,6 +17,7 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
return val * hash_rnd[0];
}
+#ifdef CONFIG_INET
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
@@ -24,6 +25,13 @@ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
+#else
+static inline
+struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
+{
+ return NULL;
+}
+#endif
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
{
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index f39ae697347f..915eff156c74 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -1,6 +1,8 @@
#ifndef _INET_COMMON_H
#define _INET_COMMON_H
+#include <linux/indirect_call_wrapper.h>
+
extern const struct proto_ops inet_stream_ops;
extern const struct proto_ops inet_dgram_ops;
@@ -51,4 +53,11 @@ static inline void inet_ctl_sock_destroy(struct sock *sk)
sock_release(sk->sk_socket);
}
+#define indirect_call_gro_receive(f2, f1, cb, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_2(cb, f2, f1, head, skb); \
+})
+
#endif
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index d4a20d00461c..52a4efb9a30d 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -104,7 +104,6 @@ typedef enum {
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
- SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
SCTP_CMD_SET_ASOC, /* Restore association context */
SCTP_CMD_LAST
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 27d8c36c04af..d7eaace0c981 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -355,6 +355,9 @@ struct fscrypt_key {
#define SYNC_FILE_RANGE_WAIT_BEFORE 1
#define SYNC_FILE_RANGE_WRITE 2
#define SYNC_FILE_RANGE_WAIT_AFTER 4
+#define SYNC_FILE_RANGE_WRITE_AND_WAIT (SYNC_FILE_RANGE_WRITE | \
+ SYNC_FILE_RANGE_WAIT_BEFORE | \
+ SYNC_FILE_RANGE_WAIT_AFTER)
/* flags for preadv2/pwritev2: */
#define RWF_HIPRI 0x00000001 /* high priority request, poll if possible */
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 762bc8ce40e6..034a05a6b104 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -493,18 +493,30 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
return insn - insn_buf;
}
-static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
+static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
+ void *key, const bool mark)
{
struct htab_elem *l = __htab_map_lookup_elem(map, key);
if (l) {
- bpf_lru_node_set_ref(&l->lru_node);
+ if (mark)
+ bpf_lru_node_set_ref(&l->lru_node);
return l->key + round_up(map->key_size, 8);
}
return NULL;
}
+static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
+{
+ return __htab_lru_map_lookup_elem(map, key, true);
+}
+
+void *suse_htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
+{
+ return __htab_lru_map_lookup_elem(map, key, false);
+}
+
static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
struct bpf_insn *insn_buf)
{
@@ -1184,7 +1196,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
{
- struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
struct htab_elem *l;
void __percpu *pptr;
int ret = -ENOENT;
@@ -1200,8 +1211,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
l = __htab_map_lookup_elem(map, key);
if (!l)
goto out;
- if (htab_is_lru(htab))
- bpf_lru_node_set_ref(&l->lru_node);
+ /* We do not mark LRU map element here in order to not mess up
+ * eviction heuristics when user space does a map walk.
+ */
pptr = htab_elem_get_ptr(l, map->key_size);
for_each_possible_cpu(cpu) {
bpf_long_memcpy(value + off,
diff --git a/kernel/bpf/hashtab.h b/kernel/bpf/hashtab.h
new file mode 100644
index 000000000000..be3e179ed4d0
--- /dev/null
+++ b/kernel/bpf/hashtab.h
@@ -0,0 +1,16 @@
+/* Copyright (c) 2019 SUSE, https://www.suse.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/bpf.h>
+
+/* Add this function to avoid adding an extra member to bpf_map_ops */
+void *suse_htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 94c7885261b1..6d2513c58800 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -24,6 +24,8 @@
#include <linux/kernel.h>
#include <linux/idr.h>
+#include "hashtab.h"
+
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
@@ -488,7 +490,10 @@ static int map_lookup_elem(union bpf_attr *attr)
err = bpf_fd_htab_map_lookup_elem(map, key, value);
} else {
rcu_read_lock();
- ptr = map->ops->map_lookup_elem(map, key);
+ if (map->map_type == BPF_MAP_TYPE_LRU_HASH)
+ ptr = suse_htab_lru_map_lookup_elem_sys(map, key);
+ else
+ ptr = map->ops->map_lookup_elem(map, key);
if (ptr)
memcpy(value, ptr, value_size);
rcu_read_unlock();
diff --git a/kernel/fork.c b/kernel/fork.c
index 2f8b7c0ff278..fe02658a06f8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -821,6 +821,15 @@ static void mm_init_aio(struct mm_struct *mm)
#endif
}
+static __always_inline void mm_clear_owner(struct mm_struct *mm,
+ struct task_struct *p)
+{
+#ifdef CONFIG_MEMCG
+ if (mm->owner == p)
+ WRITE_ONCE(mm->owner, NULL);
+#endif
+}
+
static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
#ifdef CONFIG_MEMCG
@@ -1225,6 +1234,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
free_pt:
/* don't put binfmt in mmput, we haven't got module yet */
mm->binfmt = NULL;
+ mm_init_owner(mm, NULL);
mmput(mm);
fail_nomem:
@@ -1542,6 +1552,21 @@ static inline void rcu_copy_process(struct task_struct *p)
#endif /* #ifdef CONFIG_TASKS_RCU */
}
+static void __delayed_free_task(struct rcu_head *rhp)
+{
+ struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
+
+ free_task(tsk);
+}
+
+static __always_inline void delayed_free_task(struct task_struct *tsk)
+{
+ if (IS_ENABLED(CONFIG_MEMCG))
+ call_rcu(&tsk->rcu, __delayed_free_task);
+ else
+ free_task(tsk);
+}
+
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
@@ -1977,8 +2002,10 @@ bad_fork_cleanup_io:
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
- if (p->mm)
+ if (p->mm) {
+ mm_clear_owner(p->mm, p);
mmput(p->mm);
+ }
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
@@ -2008,7 +2035,7 @@ bad_fork_cleanup_count:
bad_fork_free:
p->state = TASK_DEAD;
put_task_stack(p);
- free_task(p);
+ delayed_free_task(p);
fork_out:
return ERR_PTR(retval);
}
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 4641973fadae..584183ec7a63 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -426,7 +426,13 @@ static void klp_free_object_dynamic(struct klp_object *obj)
kfree(obj);
}
-static struct klp_object *klp_alloc_object_dynamic(const char *name)
+static void klp_init_func_early(struct klp_object *obj,
+ struct klp_func *func);
+static void klp_init_object_early(struct klp_patch *patch,
+ struct klp_object *obj);
+
+static struct klp_object *klp_alloc_object_dynamic(const char *name,
+ struct klp_patch *patch)
{
struct klp_object *obj;
@@ -442,7 +448,7 @@ static struct klp_object *klp_alloc_object_dynamic(const char *name)
}
}
- INIT_LIST_HEAD(&obj->func_list);
+ klp_init_object_early(patch, obj);
obj->dynamic = true;
return obj;
@@ -471,6 +477,7 @@ static struct klp_func *klp_alloc_func_nop(struct klp_func *old_func,
}
}
+ klp_init_func_early(obj, func);
/*
* func->new_func is same as func->old_func. These addresses are
* set when the object is loaded, see klp_init_object_loaded().
@@ -490,11 +497,9 @@ static int klp_add_object_nops(struct klp_patch *patch,
obj = klp_find_object(patch, old_obj);
if (!obj) {
- obj = klp_alloc_object_dynamic(old_obj->name);
+ obj = klp_alloc_object_dynamic(old_obj->name, patch);
if (!obj)
return -ENOMEM;
-
- list_add_tail(&obj->node, &patch->obj_list);
}
klp_for_each_func(old_obj, old_func) {
@@ -505,8 +510,6 @@ static int klp_add_object_nops(struct klp_patch *patch,
func = klp_alloc_func_nop(old_func, obj);
if (!func)
return -ENOMEM;
-
- list_add_tail(&func->node, &obj->func_list);
}
return 0;
@@ -588,13 +591,7 @@ static void __klp_free_funcs(struct klp_object *obj, bool nops_only)
continue;
list_del(&func->node);
-
- /* Might be called from klp_init_patch() error path. */
- if (func->kobj_added) {
- kobject_put(&func->kobj);
- } else if (func->nop) {
- klp_free_func_nop(func);
- }
+ kobject_put(&func->kobj);
}
}
@@ -624,13 +621,7 @@ static void __klp_free_objects(struct klp_patch *patch, bool nops_only)
continue;
list_del(&obj->node);
-
- /* Might be called from klp_init_patch() error path. */
- if (obj->kobj_added) {
- kobject_put(&obj->kobj);
- } else if (obj->dynamic) {
- klp_free_object_dynamic(obj);
- }
+ kobject_put(&obj->kobj);
}
}
@@ -675,10 +666,8 @@ static void klp_free_patch_finish(struct klp_patch *patch)
* this is called when the patch gets disabled and it
* cannot get enabled again.
*/
- if (patch->kobj_added) {
- kobject_put(&patch->kobj);
- wait_for_completion(&patch->finish);
- }
+ kobject_put(&patch->kobj);
+ wait_for_completion(&patch->finish);
/* Put the module after the last access to struct klp_patch. */
if (!patch->forced)
@@ -700,8 +689,6 @@ static void klp_free_patch_work_fn(struct work_struct *work)
static int klp_init_func(struct klp_object *obj, struct klp_func *func)
{
- int ret;
-
if (!func->old_name)
return -EINVAL;
@@ -724,13 +711,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
* object. If the user selects 0 for old_sympos, then 1 will be used
* since a unique symbol will be the first occurrence.
*/
- ret = kobject_init_and_add(&func->kobj, &klp_ktype_func,
- &obj->kobj, "%s,%lu", func->old_name,
- func->old_sympos ? func->old_sympos : 1);
- if (!ret)
- func->kobj_added = true;
-
- return ret;
+ return kobject_add(&func->kobj, &obj->kobj, "%s,%lu",
+ func->old_name,
+ func->old_sympos ? func->old_sympos : 1);
}
/* Arches may override this to finish any remaining arch-specific tasks */
@@ -801,11 +784,9 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
klp_find_object_module(obj);
name = klp_is_module(obj) ? obj->name : "vmlinux";
- ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
- &patch->kobj, "%s", name);
+ ret = kobject_add(&obj->kobj, &patch->kobj, "%s", name);
if (ret)
return ret;
- obj->kobj_added = true;
klp_for_each_func(obj, func) {
ret = klp_init_func(obj, func);
@@ -819,6 +800,21 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
return ret;
}
+static void klp_init_func_early(struct klp_object *obj,
+ struct klp_func *func)
+{
+ kobject_init(&func->kobj, &klp_ktype_func);
+ list_add_tail(&func->node, &obj->func_list);
+}
+
+static void klp_init_object_early(struct klp_patch *patch,
+ struct klp_object *obj)
+{
+ INIT_LIST_HEAD(&obj->func_list);
+ kobject_init(&obj->kobj, &klp_ktype_object);
+ list_add_tail(&obj->node, &patch->obj_list);
+}
+
static int klp_init_patch_early(struct klp_patch *patch)
{
struct klp_object *obj;
@@ -829,7 +825,7 @@ static int klp_init_patch_early(struct klp_patch *patch)
INIT_LIST_HEAD(&patch->list);
INIT_LIST_HEAD(&patch->obj_list);
- patch->kobj_added = false;
+ kobject_init(&patch->kobj, &klp_ktype_patch);
patch->enabled = false;
patch->forced = false;
INIT_WORK(&patch->free_work, klp_free_patch_work_fn);
@@ -839,13 +835,10 @@ static int klp_init_patch_early(struct klp_patch *patch)
if (!obj->funcs)
return -EINVAL;
- INIT_LIST_HEAD(&obj->func_list);
- obj->kobj_added = false;
- list_add_tail(&obj->node, &patch->obj_list);
+ klp_init_object_early(patch, obj);
klp_for_each_func_static(obj, func) {
- func->kobj_added = false;
- list_add_tail(&func->node, &obj->func_list);
+ klp_init_func_early(obj, func);
}
}
@@ -860,11 +853,9 @@ static int klp_init_patch(struct klp_patch *patch)
struct klp_object *obj;
int ret;
- ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
- klp_root_kobj, "%s", patch->mod->name);
+ ret = kobject_add(&patch->kobj, klp_root_kobj, "%s", patch->mod->name);
if (ret)
return ret;
- patch->kobj_added = true;
if (patch->replace) {
ret = klp_add_nops(patch);
@@ -937,9 +928,6 @@ static int __klp_enable_patch(struct klp_patch *patch)
if (WARN_ON(patch->enabled))
return -EINVAL;
- if (!patch->kobj_added)
- return -EINVAL;
-
pr_notice("enabling patch '%s'\n", patch->mod->name);
klp_init_transition(patch, KLP_PATCHED);
@@ -1014,11 +1002,10 @@ int klp_enable_patch(struct klp_patch *patch)
return -ENODEV;
if (!klp_have_reliable_stack()) {
- pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
- return -EOPNOTSUPP;
+ pr_warn("This architecture doesn't have support for the livepatch consistency model.\n");
+ pr_warn("The livepatch transition may never complete.\n");
}
-
mutex_lock(&klp_mutex);
ret = klp_init_patch_early(patch);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 60f356d91060..861cf2f7ace6 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -29,6 +29,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/cn_proc.h>
#include <linux/compat.h>
+#include <linux/sched/signal.h>
/*
* Access another process' address space via ptrace.
@@ -927,18 +928,26 @@ int ptrace_request(struct task_struct *child, long request,
ret = ptrace_setsiginfo(child, &siginfo);
break;
- case PTRACE_GETSIGMASK:
+ case PTRACE_GETSIGMASK: {
+ sigset_t *mask;
+
if (addr != sizeof(sigset_t)) {
ret = -EINVAL;
break;
}
- if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
+ if (test_tsk_restore_sigmask(child))
+ mask = &child->saved_sigmask;
+ else
+ mask = &child->blocked;
+
+ if (copy_to_user(datavp, mask, sizeof(sigset_t)))
ret = -EFAULT;
else
ret = 0;
break;
+ }
case PTRACE_SETSIGMASK: {
sigset_t new_set;
@@ -964,6 +973,8 @@ int ptrace_request(struct task_struct *child, long request,
child->blocked = new_set;
spin_unlock_irq(&child->sighand->siglock);
+ clear_tsk_restore_sigmask(child);
+
ret = 0;
break;
}
diff --git a/kernel/signal.c b/kernel/signal.c
index c23b073f0a39..26f239db8239 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -648,6 +648,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
return signr;
}
+static int dequeue_synchronous_signal(siginfo_t *info)
+{
+ struct task_struct *tsk = current;
+ struct sigpending *pending = &tsk->pending;
+ struct sigqueue *q, *sync = NULL;
+
+ /*
+ * Might a synchronous signal be in the queue?
+ */
+ if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
+ return 0;
+
+ /*
+ * Return the first synchronous signal in the queue.
+ */
+ list_for_each_entry(q, &pending->list, list) {
+ /* Synchronous signals have a postive si_code */
+ if ((q->info.si_code > SI_USER) &&
+ (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
+ sync = q;
+ goto next;
+ }
+ }
+ return 0;
+next:
+ /*
+ * Check if there is another siginfo for the same signal.
+ */
+ list_for_each_entry_continue(q, &pending->list, list) {
+ if (q->info.si_signo == sync->info.si_signo)
+ goto still_pending;
+ }
+
+ sigdelset(&pending->signal, sync->info.si_signo);
+ recalc_sigpending();
+still_pending:
+ list_del_init(&sync->list);
+ copy_siginfo(info, &sync->info);
+ __sigqueue_free(sync);
+ return info->si_signo;
+}
+
/*
* Tell a process that it has a new active signal..
*
@@ -2211,6 +2253,16 @@ relock:
goto relock;
}
+ /* Has this task already been marked for death? */
+ if (signal_group_exit(signal)) {
+ ksig->info.si_signo = signr = SIGKILL;
+ sigdelset(&current->pending.signal, SIGKILL);
+ trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
+ &sighand->action[SIGKILL - 1]);
+ recalc_sigpending();
+ goto fatal;
+ }
+
for (;;) {
struct k_sigaction *ka;
@@ -2224,7 +2276,15 @@ relock:
goto relock;
}
- signr = dequeue_signal(current, &current->blocked, &ksig->info);
+ /*
+ * Signals generated by the execution of an instruction
+ * need to be delivered before any other pending signals
+ * so that the instruction pointer in the signal stack
+ * frame points to the faulting instruction.
+ */
+ signr = dequeue_synchronous_signal(&ksig->info);
+ if (!signr)
+ signr = dequeue_signal(current, &current->blocked, &ksig->info);
if (!signr)
break; /* will return 0 */
@@ -2306,6 +2366,7 @@ relock:
continue;
}
+ fatal:
spin_unlock_irq(&sighand->siglock);
/*
diff --git a/kernel/sys.c b/kernel/sys.c
index aeac1f2f72af..cd0df92a40af 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1768,7 +1768,7 @@ static int validate_prctl_map(struct prctl_mm_map *prctl_map)
((unsigned long)prctl_map->__m1 __op \
(unsigned long)prctl_map->__m2) ? 0 : -EINVAL
error = __prctl_check_order(start_code, <, end_code);
- error |= __prctl_check_order(start_data, <, end_data);
+ error |= __prctl_check_order(start_data,<=, end_data);
error |= __prctl_check_order(start_brk, <=, brk);
error |= __prctl_check_order(arg_start, <=, arg_end);
error |= __prctl_check_order(env_start, <=, env_end);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f07424238431..eeef671c9944 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6536,12 +6536,16 @@ static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
buf->private = 0;
}
-static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
+static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
+ if (refcount_read(&ref->refcount) > INT_MAX/2)
+ return false;
+
refcount_inc(&ref->refcount);
+ return true;
}
/* Pipe buffer operations for a buffer. */
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 0adc3872a20d..823b212009ed 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1279,9 +1279,6 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
char buf[32];
int len;
- if (*ppos)
- return 0;
-
if (unlikely(!id))
return -ENODEV;
diff --git a/mm/gup.c b/mm/gup.c
index 2fc424f8b63c..eb396dc771c1 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -153,7 +153,11 @@ retry:
}
if (flags & FOLL_GET) {
- get_page(page);
+ if (unlikely(!try_get_page(page))) {
+ page = ERR_PTR(-ENOMEM);
+ put_dev_pagemap(pgmap);
+ goto out;
+ }
/* drop the pgmap reference now that we hold the page */
if (pgmap) {
@@ -306,7 +310,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
if (pmd_trans_unstable(pmd))
ret = -EBUSY;
} else {
- get_page(page);
+ if (unlikely(!try_get_page(page))) {
+ spin_unlock(ptl);
+ return ERR_PTR(-ENOMEM);
+ }
spin_unlock(ptl);
lock_page(page);
ret = split_huge_page(page);
@@ -372,7 +379,10 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
if (is_device_public_page(*page))
goto unmap;
}
- get_page(*page);
+ if (unlikely(!try_get_page(*page))) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
out:
ret = 0;
unmap:
@@ -1275,6 +1285,20 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
}
}
+/*
+ * Return the compund head page with ref appropriately incremented,
+ * or NULL if that failed.
+ */
+static inline struct page *try_get_compound_head(struct page *page, int refs)
+{
+ struct page *head = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(head) < 0))
+ return NULL;
+ if (unlikely(!page_cache_add_speculative(head, refs)))
+ return NULL;
+ return head;
+}
+
#ifdef __HAVE_ARCH_PTE_SPECIAL
static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
@@ -1309,9 +1333,9 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
- head = compound_head(page);
- if (!page_cache_get_speculative(head))
+ head = try_get_compound_head(page, 1);
+ if (!head)
goto pte_unmap;
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
@@ -1442,17 +1466,16 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
refs = 0;
- head = pmd_page(orig);
- page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+ page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(pmd_page(orig), refs);
+ if (!head) {
*nr -= refs;
return 0;
}
@@ -1481,17 +1504,16 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
refs = 0;
- head = pud_page(orig);
- page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+ page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(pud_page(orig), refs);
+ if (!head) {
*nr -= refs;
return 0;
}
@@ -1519,17 +1541,16 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
BUILD_BUG_ON(pgd_devmap(orig));
refs = 0;
- head = pgd_page(orig);
- page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
+ page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
- if (!page_cache_add_speculative(head, refs)) {
+ head = try_get_compound_head(pgd_page(orig), refs);
+ if (!head) {
*nr -= refs;
return 0;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6b4792e7b6a6..6e21d2ff98f4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4271,6 +4271,19 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte));
+
+ /*
+ * Instead of doing 'try_get_page()' below in the same_page
+ * loop, just check the count once here.
+ */
+ if (unlikely(page_count(page) <= 0)) {
+ if (pages) {
+ spin_unlock(ptl);
+ remainder = 0;
+ err = -ENOMEM;
+ break;
+ }
+ }
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7a40fa2be858..fbac5b368e56 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -42,11 +42,7 @@ static void list_lru_unregister(struct list_lru *lru)
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
- /*
- * This needs node 0 to be always present, even
- * in the systems supporting sparse numa ids.
- */
- return !!lru->node[0].memcg_lrus;
+ return !!lru->node[first_online_node].memcg_lrus;
}
static inline struct list_lru_one *
diff --git a/mm/memblock.c b/mm/memblock.c
index 8dae5a83611d..7ae1eb2bcba7 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -25,8 +25,15 @@
#include "internal.h"
+#define INIT_MEMBLOCK_REGIONS 128
+#define INIT_PHYSMEM_REGIONS 4
+
+#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
+# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
+#endif
+
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
-static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
+static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
#endif
@@ -39,7 +46,7 @@ struct memblock memblock __initdata_memblock = {
.reserved.regions = memblock_reserved_init_regions,
.reserved.cnt = 1, /* empty dummy entry */
- .reserved.max = INIT_MEMBLOCK_REGIONS,
+ .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
.reserved.name = "reserved",
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9540f2589455..c919c9e263a9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1826,19 +1826,17 @@ static int soft_offline_in_use_page(struct page *page, int flags)
struct page *hpage = compound_head(page);
if (!PageHuge(page) && PageTransHuge(hpage)) {
- lock_page(hpage);
- if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
- unlock_page(hpage);
- if (!PageAnon(hpage))
+ lock_page(page);
+ if (!PageAnon(page) || unlikely(split_huge_page(page))) {
+ unlock_page(page);
+ if (!PageAnon(page))
pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
else
pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
- put_hwpoison_page(hpage);
+ put_hwpoison_page(page);
return -EBUSY;
}
- unlock_page(hpage);
- get_hwpoison_page(page);
- put_hwpoison_page(hpage);
+ unlock_page(page);
}
if (PageHuge(page))
diff --git a/mm/memory.c b/mm/memory.c
index 0161bbb75cae..a62006774511 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1803,6 +1803,7 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (ptep_set_access_flags(vma, addr, pte, entry, 1))
update_mmu_cache(vma, addr, pte);
+ retval = 0;
}
goto out_unlock;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index c3a7514e4005..988879708295 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2009,8 +2009,36 @@ retry_cpuset:
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
- page = __alloc_pages_node(hpage_node,
- gfp | __GFP_THISNODE, order);
+ /*
+ * We cannot invoke reclaim if __GFP_THISNODE
+ * is set. Invoking reclaim with
+ * __GFP_THISNODE set, would cause THP
+ * allocations to trigger heavy swapping
+ * despite there may be tons of free memory
+ * (including potentially plenty of THP
+ * already available in the buddy) on all the
+ * other NUMA nodes.
+ *
+ * At most we could invoke compaction when
+ * __GFP_THISNODE is set (but we would need to
+ * refrain from invoking reclaim even if
+ * compaction returned COMPACT_SKIPPED because
+ * there wasn't not enough memory to succeed
+ * compaction). For now just avoid
+ * __GFP_THISNODE instead of limiting the
+ * allocation path to a strict and single
+ * compaction invocation.
+ *
+ * Supposedly if direct reclaim was enabled by
+ * the caller, the app prefers THP regardless
+ * of the node it comes from so this would be
+ * more desiderable behavior than only
+ * providing THP originated from the local
+ * node in such case.
+ */
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
+ gfp |= __GFP_THISNODE;
+ page = __alloc_pages_node(hpage_node, gfp, order);
goto out;
}
}
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 8c5f68cf61a3..41e0695af299 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
{
- if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
+ if (arg < 0 || arg >= MAX_LEC_ITF)
+ return -EINVAL;
+ arg = array_index_nospec(arg, MAX_LEC_ITF);
+ if (!dev_lec[arg])
return -EINVAL;
vcc->proto_data = dev_lec[arg];
return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
i = arg;
if (arg >= MAX_LEC_ITF)
return -EINVAL;
+ i = array_index_nospec(arg, MAX_LEC_ITF);
if (!dev_lec[i]) {
int size;
diff --git a/net/core/dev.c b/net/core/dev.c
index 24b4ee021a86..40d980f5da78 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -144,6 +144,7 @@
#include <linux/netfilter_ingress.h>
#include <linux/crash_dump.h>
#include <linux/sctp.h>
+#include <linux/indirect_call_wrapper.h>
#include "net-sysfs.h"
@@ -4617,6 +4618,8 @@ static void flush_all_backlogs(void)
put_online_cpus();
}
+INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
static int napi_gro_complete(struct sk_buff *skb)
{
struct packet_offload *ptype;
@@ -4636,7 +4639,9 @@ static int napi_gro_complete(struct sk_buff *skb)
if (ptype->type != type || !ptype->callbacks.gro_complete)
continue;
- err = ptype->callbacks.gro_complete(skb, 0);
+ err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
+ ipv6_gro_complete, inet_gro_complete,
+ skb, 0);
break;
}
rcu_read_unlock();
@@ -4750,6 +4755,10 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
}
}
+INDIRECT_CALLABLE_DECLARE(struct sk_buff **inet_gro_receive(struct sk_buff **,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff **ipv6_gro_receive(struct sk_buff **,
+ struct sk_buff *));
static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff **pp = NULL;
@@ -4797,7 +4806,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->csum_valid = 0;
}
- pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
+ pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
+ ipv6_gro_receive, inet_gro_receive,
+ &napi->gro_list, skb);
break;
}
rcu_read_unlock();
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7394254d7e7c..897d20d7ba38 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -30,6 +30,7 @@
#include <linux/times.h>
#include <net/net_namespace.h>
#include <net/neighbour.h>
+#include <net/arp.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/netevent.h>
@@ -2529,7 +2530,13 @@ int neigh_xmit(int index, struct net_device *dev,
if (!tbl)
goto out;
rcu_read_lock_bh();
- neigh = __neigh_lookup_noref(tbl, addr, dev);
+ if (index == NEIGH_ARP_TABLE) {
+ u32 key = *((u32 *)addr);
+
+ neigh = __ipv4_neigh_lookup_noref(dev, key);
+ } else {
+ neigh = __neigh_lookup_noref(tbl, addr, dev);
+ }
if (!neigh)
neigh = __neigh_create(tbl, addr, dev, false);
err = PTR_ERR(neigh);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c621d64914cc..68d8bfa8b268 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1313,6 +1313,10 @@ out:
}
EXPORT_SYMBOL(inet_gso_segment);
+INDIRECT_CALLABLE_DECLARE(struct sk_buff **tcp4_gro_receive(struct sk_buff **,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff **udp4_gro_receive(struct sk_buff **,
+ struct sk_buff *));
struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
const struct net_offload *ops;
@@ -1422,7 +1426,8 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
- pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
+ ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -1484,6 +1489,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
return -EINVAL;
}
+INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
int inet_gro_complete(struct sk_buff *skb, int nhoff)
{
__be16 newlen = htons(skb->len - nhoff);
@@ -1509,7 +1516,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
* because any hdr with option will have been flushed in
* inet_gro_receive().
*/
- err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph));
+ err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
+ tcp4_gro_complete, udp4_gro_complete,
+ skb, nhoff + sizeof(*iph));
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index f4096839e8e2..2091fd6fca9a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -119,6 +119,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
struct guehdr *guehdr;
void *data;
u16 doffset = 0;
+ u8 proto_ctype;
if (!fou)
return 1;
@@ -210,13 +211,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
if (unlikely(guehdr->control))
return gue_control_message(skb, guehdr);
+ proto_ctype = guehdr->proto_ctype;
__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
skb_reset_transport_header(skb);
if (iptunnel_pull_offloads(skb))
goto drop;
- return -guehdr->proto_ctype;
+ return -proto_ctype;
drop:
kfree_skb(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c81916930652..fd426651c3d4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -518,6 +518,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
+ to->skb_iif = from->skb_iif;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 61f548d27762..61d94e5a1231 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1186,11 +1186,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
+static void ipv4_send_dest_unreach(struct sk_buff *skb)
+{
+ struct ip_options opt;
+ int res;
+
+ /* Recompile ip options since IPCB may not be valid anymore.
+ * Also check we have a reasonable ipv4 header.
+ */
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
+ ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
+ return;
+
+ memset(&opt, 0, sizeof(opt));
+ if (ip_hdr(skb)->ihl > 5) {
+ if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
+ return;
+ opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+
+ rcu_read_lock();
+ res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
+ rcu_read_unlock();
+
+ if (res)
+ return;
+ }
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
+}
+
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ ipv4_send_dest_unreach(skb);
rt = skb_rtable(skb);
if (rt)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index a2cc128df455..d52ba5b907c2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -44,6 +44,7 @@ static int tcp_syn_retries_min = 1;
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int one_day_secs = 24 * 3600;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -553,7 +554,9 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_min_rtt_wlen,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one_day_secs
},
{
.procname = "tcp_low_latency",
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 649931071092..3654b330ce00 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -393,11 +393,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ int room;
+
+ room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
/* Check #1 */
- if (tp->rcv_ssthresh < tp->window_clamp &&
- (int)tp->rcv_ssthresh < tcp_space(sk) &&
- !tcp_under_memory_pressure(sk)) {
+ if (room > 0 && !tcp_under_memory_pressure(sk)) {
int incr;
/* Check #2. Increase window, if skb with such overhead
@@ -410,8 +411,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
if (incr) {
incr = max_t(int, incr, 2 * skb->len);
- tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
- tp->window_clamp);
+ tp->rcv_ssthresh += min(room, incr);
inet_csk(sk)->icsk_ack.quick |= 1;
}
}
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 366b1becff9d..2861d178775a 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -10,6 +10,7 @@
* TCPv4 GSO/GRO support
*/
+#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
#include <net/tcp.h>
#include <net/protocol.h>
@@ -296,7 +297,8 @@ int tcp_gro_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_gro_complete);
-static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
@@ -309,7 +311,7 @@ static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *
return tcp_gro_receive(head, skb);
}
-static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
+INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
{
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index cde2719fcb89..dbcc40813a25 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -13,6 +13,7 @@
#include <linux/skbuff.h>
#include <net/udp.h>
#include <net/protocol.h>
+#include <net/inet_common.h>
static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
@@ -244,6 +245,8 @@ out:
return segs;
}
+INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+ __be16 sport, __be16 dport));
struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
struct udphdr *uh, udp_lookup_t lookup)
{
@@ -263,7 +266,8 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
NAPI_GRO_CB(skb)->encap_mark = 1;
rcu_read_lock();
- sk = (*lookup)(skb, uh->source, uh->dest);
+ sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
+ udp4_lib_lookup_skb, skb, uh->source, uh->dest);
if (sk && udp_sk(sk)->gro_receive)
goto unflush;
@@ -300,8 +304,8 @@ out:
}
EXPORT_SYMBOL(udp_gro_receive);
-static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+struct sk_buff **udp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
struct udphdr *uh = udp_gro_udphdr(skb);
@@ -343,7 +347,8 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
skb->encapsulation = 1;
rcu_read_lock();
- sk = (*lookup)(skb, uh->source, uh->dest);
+ sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
+ udp4_lib_lookup_skb, skb, uh->source, uh->dest);
if (sk && udp_sk(sk)->gro_complete)
err = udp_sk(sk)->gro_complete(sk, skb,
nhoff + sizeof(struct udphdr));
@@ -356,7 +361,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
}
EXPORT_SYMBOL(udp_gro_complete);
-static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
+INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
{
const struct iphdr *iph = ip_hdr(skb);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e3698b6d8231..f9abeae31db8 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -20,6 +20,23 @@
#include "ip6_offload.h"
+/* All GRO functions are always builtin, except UDP over ipv6, which lays in
+ * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
+ * when ipv6 is built as a module
+ */
+#if IS_BUILTIN(CONFIG_IPV6)
+#define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
+#endif
+
+#define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_L4(cb, f2, f1, head, skb); \
+})
+
static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
{
const struct net_offload *ops = NULL;
@@ -162,8 +179,12 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph,
return len;
}
-static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+INDIRECT_CALLABLE_DECLARE(struct sk_buff **tcp6_gro_receive(struct sk_buff **,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff **udp6_gro_receive(struct sk_buff **,
+ struct sk_buff *));
+INDIRECT_CALLABLE_SCOPE
+struct sk_buff **ipv6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
const struct net_offload *ops;
struct sk_buff **pp = NULL;
@@ -251,7 +272,8 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_gro_postpull_rcsum(skb, iph, nlen);
- pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
+ ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -292,7 +314,9 @@ static struct sk_buff **ip4ip6_gro_receive(struct sk_buff **head,
return inet_gro_receive(head, skb);
}
-static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
+INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
{
const struct net_offload *ops;
struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
@@ -311,7 +335,8 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
goto out_unlock;
- err = ops->callbacks.gro_complete(skb, nhoff);
+ err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
+ udp6_gro_complete, skb, nhoff);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 278e49cd67d4..df0a6de64cf0 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -9,14 +9,15 @@
*
* TCPv6 GSO/GRO support
*/
+#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
-static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+struct sk_buff **tcp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
@@ -29,7 +30,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
return tcp_gro_receive(head, skb);
}
-static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
+INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 2a04dc9c781b..77cf1fde8352 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -11,6 +11,7 @@
*/
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/protocol.h>
#include <net/ipv6.h>
#include <net/udp.h>
@@ -111,8 +112,8 @@ out:
return segs;
}
-static struct sk_buff **udp6_gro_receive(struct sk_buff **head,
- struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+struct sk_buff **udp6_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
struct udphdr *uh = udp_gro_udphdr(skb);
@@ -139,7 +140,7 @@ flush:
return NULL;
}
-static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
+INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
{
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
index 86ef907067bb..353b59d3bd44 100644
--- a/net/rds/ib_fmr.c
+++ b/net/rds/ib_fmr.c
@@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
else
pool = rds_ibdev->mr_1m_pool;
+ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
+
+ /* Switch pools if one of the pool is reaching upper limit */
+ if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
+ if (pool->pool_type == RDS_IB_MR_8K_POOL)
+ pool = rds_ibdev->mr_1m_pool;
+ else
+ pool = rds_ibdev->mr_8k_pool;
+ }
+
ibmr = rds_ib_try_reuse_ibmr(pool);
if (ibmr)
return ibmr;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 977f69886c00..91b53d462fc0 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -442,9 +442,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
struct rds_ib_mr *ibmr = NULL;
int iter = 0;
- if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
- queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
-
while (1) {
ibmr = rds_ib_reuse_mr(pool);
if (ibmr)
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 344456206b70..9b4608c1bd95 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
static struct sk_buff_head loopback_queue;
+#define ROSE_LOOPBACK_LIMIT 1000
static struct timer_list loopback_timer;
static void rose_set_loopback_timer(void);
@@ -34,17 +35,19 @@ static int rose_loopback_running(void)
int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
{
- struct sk_buff *skbn;
+ struct sk_buff *skbn = NULL;
- skbn = skb_clone(skb, GFP_ATOMIC);
+ if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
+ skbn = skb_clone(skb, GFP_ATOMIC);
- kfree_skb(skb);
-
- if (skbn != NULL) {
+ if (skbn) {
+ consume_skb(skb);
skb_queue_tail(&loopback_queue, skbn);
if (!rose_loopback_running())
rose_set_loopback_timer();
+ } else {
+ kfree_skb(skb);
}
return 1;
@@ -54,13 +57,10 @@ static void rose_loopback_timer(unsigned long);
static void rose_set_loopback_timer(void)
{
- del_timer(&loopback_timer);
-
loopback_timer.data = 0;
loopback_timer.function = &rose_loopback_timer;
- loopback_timer.expires = jiffies + 10;
- add_timer(&loopback_timer);
+ mod_timer(&loopback_timer, jiffies + 10);
}
static void rose_loopback_timer(unsigned long param)
@@ -71,8 +71,12 @@ static void rose_loopback_timer(unsigned long param)
struct sock *sk;
unsigned short frametype;
unsigned int lci_i, lci_o;
+ int count;
- while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
+ for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
+ skb = skb_dequeue(&loopback_queue);
+ if (!skb)
+ return;
if (skb->len < ROSE_MIN_LEN) {
kfree_skb(skb);
continue;
@@ -109,6 +113,8 @@ static void rose_loopback_timer(unsigned long param)
kfree_skb(skb);
}
}
+ if (!skb_queue_empty(&loopback_queue))
+ mod_timer(&loopback_timer, jiffies + 1);
}
void __exit rose_loopback_clear(void)
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index b054ff1c1856..eb3dd7213bd7 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1066,32 +1066,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
}
-/* Sent the next ASCONF packet currently stored in the association.
- * This happens after the ASCONF_ACK was succeffully processed.
- */
-static void sctp_cmd_send_asconf(struct sctp_association *asoc)
-{
- struct net *net = sock_net(asoc->base.sk);
-
- /* Send the next asconf chunk from the addip chunk
- * queue.
- */
- if (!list_empty(&asoc->addip_chunk_list)) {
- struct list_head *entry = asoc->addip_chunk_list.next;
- struct sctp_chunk *asconf = list_entry(entry,
- struct sctp_chunk, list);
- list_del_init(entry);
-
- /* Hold the chunk until an ASCONF_ACK is received. */
- sctp_chunk_hold(asconf);
- if (sctp_primitive_ASCONF(net, asoc, asconf))
- sctp_chunk_free(asconf);
- else
- asoc->addip_last_asconf = asconf;
- }
-}
-
-
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
@@ -1740,9 +1714,6 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
}
sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
break;
- case SCTP_CMD_SEND_NEXT_ASCONF:
- sctp_cmd_send_asconf(asoc);
- break;
case SCTP_CMD_PURGE_ASCONF_QUEUE:
sctp_asconf_queue_teardown(asoc);
break;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 6d26e22e1cdf..01c81b671deb 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3727,6 +3727,29 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
+static sctp_disposition_t sctp_send_next_asconf(
+ struct net *net,
+ const struct sctp_endpoint *ep,
+ struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ sctp_cmd_seq_t *commands)
+{
+ struct sctp_chunk *asconf;
+ struct list_head *entry;
+
+ if (list_empty(&asoc->addip_chunk_list))
+ return SCTP_DISPOSITION_CONSUME;
+
+ entry = asoc->addip_chunk_list.next;
+ asconf = list_entry(entry, struct sctp_chunk, list);
+
+ list_del_init(entry);
+ sctp_chunk_hold(asconf);
+ asoc->addip_last_asconf = asconf;
+
+ return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
+}
+
/*
* ADDIP Section 4.3 General rules for address manipulation
* When building TLV parameters for the ASCONF Chunk that will add or
@@ -3815,14 +3838,10 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
- asconf_ack)) {
- /* Successfully processed ASCONF_ACK. We can
- * release the next asconf if we have one.
- */
- sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
- SCTP_NULL());
- return SCTP_DISPOSITION_CONSUME;
- }
+ asconf_ack))
+ return sctp_send_next_asconf(net, ep,
+ (struct sctp_association *)asoc,
+ type, commands);
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index bd0aac87b41a..c8ddd35e439c 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -989,7 +989,8 @@ static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
for (; i < TIPC_NAMETBL_SIZE; i++) {
seq_head = &tn->nametbl->seq_hlist[i];
- if (*last_type) {
+ if (*last_type ||
+ (!i && *last_publ && (*last_lower == *last_publ))) {
seq = nametbl_find_seq(net, *last_type);
if (!seq)
return -EPIPE;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 6fee0d7ac112..41411487fd9d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -711,11 +711,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
switch (sk->sk_state) {
case TIPC_ESTABLISHED:
- case TIPC_CONNECTING:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
mask |= POLLOUT;
/* fall thru' */
case TIPC_LISTEN:
+ case TIPC_CONNECTING:
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= (POLLIN | POLLRDNORM);
break;
@@ -1597,7 +1597,7 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
return true;
/* If empty 'ACK-' message, wake up sleeping connect() */
- sk->sk_data_ready(sk);
+ sk->sk_state_change(sk);
/* 'ACK-' message is neither accepted nor rejected: */
msg_set_dest_droppable(hdr, 1);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 740affd65ee9..8e0dce5d0e57 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -58,7 +58,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring && user->session_keyring) {
+ if (READ_ONCE(user->uid_keyring) && READ_ONCE(user->session_keyring)) {
kleave(" = 0 [exist]");
return 0;
}
@@ -111,8 +111,10 @@ int install_user_keyrings(void)
}
/* install the keyrings */
- user->uid_keyring = uid_keyring;
- user->session_keyring = session_keyring;
+ /* paired with READ_ONCE() */
+ smp_store_release(&user->uid_keyring, uid_keyring);
+ /* paired with READ_ONCE() */
+ smp_store_release(&user->session_keyring, session_keyring);
}
mutex_unlock(&key_user_keyring_mutex);
@@ -339,6 +341,7 @@ void key_fsgid_changed(struct task_struct *tsk)
key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
{
key_ref_t key_ref, ret, err;
+ const struct cred *cred = ctx->cred;
/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
* searchable, but we failed to find a key or we found a negative key;
@@ -352,9 +355,9 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
- if (ctx->cred->thread_keyring) {
+ if (cred->thread_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(ctx->cred->thread_keyring, 1), ctx);
+ make_key_ref(cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -370,9 +373,9 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
}
/* search the process keyring second */
- if (ctx->cred->process_keyring) {
+ if (cred->process_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(ctx->cred->process_keyring, 1), ctx);
+ make_key_ref(cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -390,10 +393,10 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
}
/* search the session keyring */
- if (ctx->cred->session_keyring) {
+ if (cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
- make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
+ make_key_ref(rcu_dereference(cred->session_keyring), 1),
ctx);
rcu_read_unlock();
@@ -413,9 +416,9 @@ key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
}
}
/* or search the user-session keyring */
- else if (ctx->cred->user->session_keyring) {
+ else if (READ_ONCE(cred->user->session_keyring)) {
key_ref = keyring_search_aux(
- make_key_ref(ctx->cred->user->session_keyring, 1),
+ make_key_ref(READ_ONCE(cred->user->session_keyring), 1),
ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -601,7 +604,7 @@ try_again:
goto error;
goto reget_creds;
} else if (ctx.cred->session_keyring ==
- ctx.cred->user->session_keyring &&
+ READ_ONCE(ctx.cred->user->session_keyring) &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
@@ -617,7 +620,7 @@ try_again:
break;
case KEY_SPEC_USER_KEYRING:
- if (!ctx.cred->user->uid_keyring) {
+ if (!READ_ONCE(ctx.cred->user->uid_keyring)) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
@@ -629,7 +632,7 @@ try_again:
break;
case KEY_SPEC_USER_SESSION_KEYRING:
- if (!ctx.cred->user->session_keyring) {
+ if (!READ_ONCE(ctx.cred->user->session_keyring)) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 4213dfcbc596..bbdbb6eab788 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -308,11 +308,12 @@ static int construct_get_dest_keyring(struct key **_dest_keyring)
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
dest_keyring =
- key_get(cred->user->session_keyring);
+ key_get(READ_ONCE(cred->user->session_keyring));
break;
case KEY_REQKEY_DEFL_USER_KEYRING:
- dest_keyring = key_get(cred->user->uid_keyring);
+ dest_keyring =
+ key_get(READ_ONCE(cred->user->uid_keyring));
break;
case KEY_REQKEY_DEFL_GROUP_KEYRING:
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e59483316018..b4205e7f1003 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6186,13 +6186,15 @@ static const struct hda_fixup alc269_fixups[] = {
.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
},
[ALC255_FIXUP_ACER_MIC_NO_PRESENCE] = {
- .type = HDA_FIXUP_PINS,
- .v.pins = (const struct hda_pintbl[]) {
- { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
- { }
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Enable the Mic */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
+ {}
},
.chained = true,
- .chain_id = ALC255_FIXUP_HEADSET_MODE
+ .chain_id = ALC269_FIXUP_LIFEBOOK_EXTMIC
},
[ALC255_FIXUP_ASUS_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
@@ -7112,6 +7114,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x19, 0x0181303F},
{0x21, 0x0221102f}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170120},
+ {0x21, 0x02211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
{0x12, 0x90a601c0},
{0x14, 0x90171120},
{0x21, 0x02211030}),
diff --git a/sound/soc/codecs/cx2072x.c b/sound/soc/codecs/cx2072x.c
index d6616bccfd27..878b0c9d164b 100644
--- a/sound/soc/codecs/cx2072x.c
+++ b/sound/soc/codecs/cx2072x.c
@@ -17,13 +17,10 @@
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
-#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/firmware.h>
#include <linux/regmap.h>
-#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clk.h>
@@ -2232,14 +2229,6 @@ static const struct i2c_device_id cx2072x_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, cx2072x_i2c_id);
-static const struct of_device_id cx2072x_of_match[] = {
- { .compatible = "cnxt,cx20721", },
- { .compatible = "cnxt,cx20723", },
- { .compatible = "cnxt,cx7601", },
- {}
-};
-MODULE_DEVICE_TABLE(of, cx2072x_of_match);
-
#ifdef CONFIG_ACPI
static struct acpi_device_id cx2072x_acpi_match[] = {
{ "14F10720", 0 },
@@ -2254,7 +2243,6 @@ static struct i2c_driver cx2072x_i2c_driver = {
.id_table = cx2072x_i2c_id,
.driver = {
.name = "cx2072x",
- .of_match_table = cx2072x_of_match,
.acpi_match_table = ACPI_PTR(cx2072x_acpi_match),
},
};
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 6acf221b0cc3..45d5087a752b 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1790,7 +1790,8 @@ static int validate_branch(struct objtool_file *file, struct instruction *first,
return 1;
}
- func = insn->func ? insn->func->pfunc : NULL;
+ if (insn->func)
+ func = insn->func->pfunc;
if (func && insn->ignore) {
WARN_FUNC("BUG: why am I validating an ignored function?",