Home Home > GIT Browse > SLE15
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.com>2017-03-03 14:39:33 +0100
committerMichal Marek <mmarek@suse.com>2017-03-03 14:39:33 +0100
commitf599ef31dc67d96237d1cf11a40273dcdcef09d1 (patch)
tree38decd058935a39364b52c996f58b50f1d4c258b
parentc34d17deae04d561aa4549f881a4f072cb243f32 (diff)
parent601f5403f9c237da89523e039518e5944c1f9c07 (diff)
Merge branch 'users/mbrugger/SLE12-SP3/for-next' into SLE12-SP3rpm-4.4.52-1
Pull Qualcomm Centriq enablement from Matthias Brugger (fate#320512). suse-commit: 56e022453a9793ec089a82d5640ef2d926161f7f
-rw-r--r--Documentation/ABI/testing/sysfs-platform-hidma9
-rw-r--r--Documentation/ABI/testing/sysfs-platform-hidma-mgmt97
-rw-r--r--Documentation/DMA-API-HOWTO.txt10
-rw-r--r--Documentation/DMA-API.txt22
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt89
-rw-r--r--Documentation/features/io/dma_map_attrs/arch-support.txt40
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/dma-mapping.h2
-rw-r--r--arch/arc/include/asm/dma-mapping.h187
-rw-r--r--arch/arc/mm/dma.c152
-rw-r--r--arch/arm/Kconfig20
-rw-r--r--arch/arm/boot/compressed/Makefile4
-rw-r--r--arch/arm/boot/compressed/efi-header.S130
-rw-r--r--arch/arm/boot/compressed/head.S54
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S7
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm/include/asm/efi.h84
-rw-r--r--arch/arm/include/asm/mmu_context.h2
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/efi.c79
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/mm/dma-mapping.c63
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h2
-rw-r--r--arch/arm64/include/asm/efi.h11
-rw-r--r--arch/arm64/kernel/efi.c423
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S1
-rw-r--r--arch/avr32/include/asm/dma-mapping.h342
-rw-r--r--arch/avr32/mm/dma-coherent.c115
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h127
-rw-r--r--arch/blackfin/kernel/dma-mapping.c52
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/dma-mapping.h98
-rw-r--r--arch/c6x/kernel/dma.c95
-rw-r--r--arch/c6x/mm/dma-coherent.c10
-rw-r--r--arch/cris/arch-v32/drivers/pci/dma.c54
-rw-r--r--arch/cris/include/asm/dma-mapping.h161
-rw-r--r--arch/frv/Kconfig1
-rw-r--r--arch/frv/include/asm/dma-mapping.h132
-rw-r--r--arch/frv/mb93090-mb00/pci-dma-nommu.c72
-rw-r--r--arch/frv/mb93090-mb00/pci-dma.c74
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/include/asm/dma-mapping.h2
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/hexagon/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/m68k/include/asm/dma-mapping.h112
-rw-r--r--arch/m68k/kernel/dma.c61
-rw-r--r--arch/metag/include/asm/dma-mapping.h179
-rw-r--r--arch/metag/kernel/dma.c146
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/include/asm/dma-mapping.h161
-rw-r--r--arch/mn10300/mm/dma-alloc.c67
-rw-r--r--arch/nios2/include/asm/dma-mapping.h123
-rw-r--r--arch/nios2/mm/dma-mapping.c149
-rw-r--r--arch/openrisc/Kconfig3
-rw-r--r--arch/openrisc/include/asm/dma-mapping.h2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/include/asm/dma-mapping.h189
-rw-r--r--arch/parisc/kernel/drivers.c2
-rw-r--r--arch/parisc/kernel/pci-dma.c92
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/dma-mapping.h2
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/include/asm/dma-mapping.h2
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/dma-mapping.h17
-rw-r--r--arch/tile/Kconfig1
-rw-r--r--arch/tile/include/asm/dma-mapping.h32
-rw-r--r--arch/tile/kernel/pci-dma.c29
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile3
-rw-r--r--arch/x86/boot/compressed/eboot.c134
-rw-r--r--arch/x86/include/asm/bios_ebda.h2
-rw-r--r--arch/x86/include/asm/dma-mapping.h2
-rw-r--r--arch/x86/include/asm/efi.h27
-rw-r--r--arch/x86/include/asm/paravirt.h6
-rw-r--r--arch/x86/include/asm/paravirt_types.h5
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/include/asm/x86_init.h24
-rw-r--r--arch/x86/kernel/Makefile6
-rw-r--r--arch/x86/kernel/ebda.c109
-rw-r--r--arch/x86/kernel/head.c70
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/platform-quirks.c25
-rw-r--r--arch/x86/kernel/rtc.c7
-rw-r--r--arch/x86/kernel/setup.c16
-rw-r--r--arch/x86/lguest/boot.c1
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c40
-rw-r--r--arch/x86/platform/efi/efi.c375
-rw-r--r--arch/x86/platform/efi/efi_32.c5
-rw-r--r--arch/x86/platform/efi/efi_64.c125
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S43
-rw-r--r--arch/x86/platform/efi/quirks.c212
-rw-r--r--arch/x86/xen/enlighten.c10
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/include/asm/dma-mapping.h4
-rw-r--r--crypto/Kconfig10
-rw-r--r--crypto/Makefile3
-rw-r--r--crypto/acompress.c169
-rw-r--r--crypto/crypto_user.c19
-rw-r--r--crypto/scompress.c356
-rw-r--r--crypto/testmgr.c187
-rw-r--r--drivers/base/dma-mapping.c7
-rw-r--r--drivers/base/platform-msi.c2
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/dmaengine.h84
-rw-r--r--drivers/dma/dmatest.c17
-rw-r--r--drivers/dma/qcom/Kconfig29
-rw-r--r--drivers/dma/qcom/Makefile5
-rw-r--r--drivers/dma/qcom/bam_dma.c (renamed from drivers/dma/qcom_bam_dma.c)4
-rw-r--r--drivers/dma/qcom/hidma.c919
-rw-r--r--drivers/dma/qcom/hidma.h165
-rw-r--r--drivers/dma/qcom/hidma_dbg.c217
-rw-r--r--drivers/dma/qcom/hidma_ll.c859
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c413
-rw-r--r--drivers/dma/qcom/hidma_mgmt.h39
-rw-r--r--drivers/dma/qcom/hidma_mgmt_sys.c295
-rw-r--r--drivers/dma/sh/rcar-dmac.c116
-rw-r--r--drivers/firmware/dmi_scan.c4
-rw-r--r--drivers/firmware/efi/Makefile7
-rw-r--r--drivers/firmware/efi/arm-init.c235
-rw-r--r--drivers/firmware/efi/arm-runtime.c144
-rw-r--r--drivers/firmware/efi/capsule-loader.c8
-rw-r--r--drivers/firmware/efi/capsule.c6
-rw-r--r--drivers/firmware/efi/efi.c98
-rw-r--r--drivers/firmware/efi/esrt.c23
-rw-r--r--drivers/firmware/efi/fake_mem.c135
-rw-r--r--drivers/firmware/efi/libstub/Makefile9
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c8
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c102
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c34
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c175
-rw-r--r--drivers/firmware/efi/libstub/efistub.h12
-rw-r--r--drivers/firmware/efi/libstub/fdt.c54
-rw-r--r--drivers/firmware/efi/libstub/random.c12
-rw-r--r--drivers/firmware/efi/memattr.c182
-rw-r--r--drivers/firmware/efi/memmap.c341
-rw-r--r--drivers/firmware/efi/runtime-map.c35
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/imx/Kconfig2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig2
-rw-r--r--drivers/gpu/drm/sti/Kconfig2
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig2
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/of/fdt.c13
-rw-r--r--drivers/of/irq.c1
-rw-r--r--drivers/parisc/ccio-dma.c57
-rw-r--r--drivers/parisc/sba_iommu.c52
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c6
-rw-r--r--drivers/pci/setup-res.c29
-rw-r--r--include/asm-generic/dma-coherent.h32
-rw-r--r--include/asm-generic/dma-mapping-broken.h95
-rw-r--r--include/asm-generic/dma-mapping-common.h358
-rw-r--r--include/crypto/acompress.h269
-rw-r--r--include/crypto/internal/acompress.h81
-rw-r--r--include/crypto/internal/scompress.h136
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/crypto.h3
-rw-r--r--include/linux/dma-attrs.h10
-rw-r--r--include/linux/dma-debug.h19
-rw-r--r--include/linux/dma-mapping.h453
-rw-r--r--include/linux/dmaengine.h16
-rw-r--r--include/linux/efi.h102
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--include/linux/pci.h8
-rw-r--r--include/uapi/linux/cryptouser.h5
-rw-r--r--lib/dma-debug.c52
182 files changed, 8705 insertions, 4162 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-hidma b/Documentation/ABI/testing/sysfs-platform-hidma
new file mode 100644
index 000000000000..d36441538660
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-hidma
@@ -0,0 +1,9 @@
+What: /sys/devices/platform/hidma-*/chid
+ /sys/devices/platform/QCOM8061:*/chid
+Date: Dec 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Contains the ID of the channel within the HIDMA instance.
+ It is used to associate a given HIDMA channel with the
+ priority and weight calls in the management interface.
diff --git a/Documentation/ABI/testing/sysfs-platform-hidma-mgmt b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt
new file mode 100644
index 000000000000..c2fb5d033f0e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt
@@ -0,0 +1,97 @@
+What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/priority
+ /sys/devices/platform/QCOM8060:*/chanops/chan*/priority
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Contains either 0 or 1 and indicates if the DMA channel is a
+ low priority (0) or high priority (1) channel.
+
+What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/weight
+ /sys/devices/platform/QCOM8060:*/chanops/chan*/weight
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Contains 0..15 and indicates the weight of the channel among
+ equal priority channels during round robin scheduling.
+
+What: /sys/devices/platform/hidma-mgmt*/chreset_timeout_cycles
+ /sys/devices/platform/QCOM8060:*/chreset_timeout_cycles
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Contains the platform specific cycle value to wait after a
+ reset command is issued. If the value is chosen too short,
+ then the HW will issue a reset failure interrupt. The value
+ is platform specific and should not be changed without
+ consultance.
+
+What: /sys/devices/platform/hidma-mgmt*/dma_channels
+ /sys/devices/platform/QCOM8060:*/dma_channels
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Contains the number of dma channels supported by one instance
+ of HIDMA hardware. The value may change from chip to chip.
+
+What: /sys/devices/platform/hidma-mgmt*/hw_version_major
+ /sys/devices/platform/QCOM8060:*/hw_version_major
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Version number major for the hardware.
+
+What: /sys/devices/platform/hidma-mgmt*/hw_version_minor
+ /sys/devices/platform/QCOM8060:*/hw_version_minor
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Version number minor for the hardware.
+
+What: /sys/devices/platform/hidma-mgmt*/max_rd_xactions
+ /sys/devices/platform/QCOM8060:*/max_rd_xactions
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Contains a value between 0 and 31. Maximum number of
+ read transactions that can be issued back to back.
+ Choosing a higher number gives better performance but
+ can also cause performance reduction to other peripherals
+ sharing the same bus.
+
+What: /sys/devices/platform/hidma-mgmt*/max_read_request
+ /sys/devices/platform/QCOM8060:*/max_read_request
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Size of each read request. The value needs to be a power
+ of two and can be between 128 and 1024.
+
+What: /sys/devices/platform/hidma-mgmt*/max_wr_xactions
+ /sys/devices/platform/QCOM8060:*/max_wr_xactions
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Contains a value between 0 and 31. Maximum number of
+ write transactions that can be issued back to back.
+ Choosing a higher number gives better performance but
+ can also cause performance reduction to other peripherals
+ sharing the same bus.
+
+
+What: /sys/devices/platform/hidma-mgmt*/max_write_request
+ /sys/devices/platform/QCOM8060:*/max_write_request
+Date: Nov 2015
+KernelVersion: 4.4
+Contact: "Sinan Kaya <okaya@cudeaurora.org>"
+Description:
+ Size of each write request. The value needs to be a power
+ of two and can be between 128 and 1024.
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index d69b3fc64e14..781024ef9050 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -951,16 +951,6 @@ to "Closing".
alignment constraints (e.g. the alignment constraints about 64-bit
objects).
-3) Supporting multiple types of IOMMUs
-
- If your architecture needs to support multiple types of IOMMUs, you
- can use include/linux/asm-generic/dma-mapping-common.h. It's a
- library to support the DMA API with multiple types of IOMMUs. Lots
- of architectures (x86, powerpc, sh, alpha, ia64, microblaze and
- sparc) use it. Choose one to see how it can be used. If you need to
- support multiple types of IOMMUs in a single system, the example of
- x86 or powerpc helps.
-
Closing
This document, and the API itself, would not be in its current
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 1e98a7e6bccc..5d974ffed8cd 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -277,14 +277,26 @@ and <size> parameters are provided to do partial page mapping, it is
recommended that you never use these unless you really know what the
cache width is.
+dma_addr_t
+dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+
+void
+dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+
+API for mapping and unmapping for MMIO resources. All the notes and
+warnings for the other mapping APIs apply here. The API should only be
+used to map device MMIO resources, mapping of RAM is not permitted.
+
int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-In some circumstances dma_map_single() and dma_map_page() will fail to create
-a mapping. A driver can check for these errors by testing the returned
-DMA address with dma_mapping_error(). A non-zero return value means the mapping
-could not be created and the driver should take appropriate action (e.g.
-reduce current DMA mapping usage or delay and try again later).
+In some circumstances dma_map_single(), dma_map_page() and dma_map_resource()
+will fail to create a mapping. A driver can check for these errors by testing
+the returned DMA address with dma_mapping_error(). A non-zero return value
+means the mapping could not be created and the driver should take appropriate
+action (e.g. reduce current DMA mapping usage or delay and try again later).
int
dma_map_sg(struct device *dev, struct scatterlist *sg,
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
new file mode 100644
index 000000000000..fd5618bd8fbc
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
@@ -0,0 +1,89 @@
+Qualcomm Technologies HIDMA Management interface
+
+Qualcomm Technologies HIDMA is a high speed DMA device. It only supports
+memcpy and memset capabilities. It has been designed for virtualized
+environments.
+
+Each HIDMA HW instance consists of multiple DMA channels. These channels
+share the same bandwidth. The bandwidth utilization can be parititioned
+among channels based on the priority and weight assignments.
+
+There are only two priority levels and 15 weigh assignments possible.
+
+Other parameters here determine how much of the system bus this HIDMA
+instance can use like maximum read/write request and and number of bytes to
+read/write in a single burst.
+
+Main node required properties:
+- compatible: "qcom,hidma-mgmt-1.0";
+- reg: Address range for DMA device
+- dma-channels: Number of channels supported by this DMA controller.
+- max-write-burst-bytes: Maximum write burst in bytes that HIDMA can
+ occupy the bus for in a single transaction. A memcpy requested is
+ fragmented to multiples of this amount. This parameter is used while
+ writing into destination memory. Setting this value incorrectly can
+ starve other peripherals in the system.
+- max-read-burst-bytes: Maximum read burst in bytes that HIDMA can
+ occupy the bus for in a single transaction. A memcpy request is
+ fragmented to multiples of this amount. This parameter is used while
+ reading the source memory. Setting this value incorrectly can starve
+ other peripherals in the system.
+- max-write-transactions: This value is how many times a write burst is
+ applied back to back while writing to the destination before yielding
+ the bus.
+- max-read-transactions: This value is how many times a read burst is
+ applied back to back while reading the source before yielding the bus.
+- channel-reset-timeout-cycles: Channel reset timeout in cycles for this SOC.
+ Once a reset is applied to the HW, HW starts a timer for reset operation
+ to confirm. If reset is not completed within this time, HW reports reset
+ failure.
+
+Sub-nodes:
+
+HIDMA has one or more DMA channels that are used to move data from one
+memory location to another.
+
+When the OS is not in control of the management interface (i.e. it's a guest),
+the channel nodes appear on their own, not under a management node.
+
+Required properties:
+- compatible: must contain "qcom,hidma-1.0"
+- reg: Addresses for the transfer and event channel
+- interrupts: Should contain the event interrupt
+- desc-count: Number of asynchronous requests this channel can handle
+- iommus: required a iommu node
+
+Example:
+
+Hypervisor OS configuration:
+
+ hidma-mgmt@f9984000 = {
+ compatible = "qcom,hidma-mgmt-1.0";
+ reg = <0xf9984000 0x15000>;
+ dma-channels = <6>;
+ max-write-burst-bytes = <1024>;
+ max-read-burst-bytes = <1024>;
+ max-write-transactions = <31>;
+ max-read-transactions = <31>;
+ channel-reset-timeout-cycles = <0x500>;
+
+ hidma_24: dma-controller@0x5c050000 {
+ compatible = "qcom,hidma-1.0";
+ reg = <0 0x5c050000 0x0 0x1000>,
+ <0 0x5c0b0000 0x0 0x1000>;
+ interrupts = <0 389 0>;
+ desc-count = <10>;
+ iommus = <&system_mmu>;
+ };
+ };
+
+Guest OS configuration:
+
+ hidma_24: dma-controller@0x5c050000 {
+ compatible = "qcom,hidma-1.0";
+ reg = <0 0x5c050000 0x0 0x1000>,
+ <0 0x5c0b0000 0x0 0x1000>;
+ interrupts = <0 389 0>;
+ desc-count = <10>;
+ iommus = <&system_mmu>;
+ };
diff --git a/Documentation/features/io/dma_map_attrs/arch-support.txt b/Documentation/features/io/dma_map_attrs/arch-support.txt
deleted file mode 100644
index 51d0f1c02a3e..000000000000
--- a/Documentation/features/io/dma_map_attrs/arch-support.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-#
-# Feature name: dma_map_attrs
-# Kconfig: HAVE_DMA_ATTRS
-# description: arch provides dma_*map*_attrs() APIs
-#
- -----------------------
- | arch |status|
- -----------------------
- | alpha: | ok |
- | arc: | TODO |
- | arm: | ok |
- | arm64: | ok |
- | avr32: | TODO |
- | blackfin: | TODO |
- | c6x: | TODO |
- | cris: | TODO |
- | frv: | TODO |
- | h8300: | ok |
- | hexagon: | ok |
- | ia64: | ok |
- | m32r: | TODO |
- | m68k: | TODO |
- | metag: | TODO |
- | microblaze: | ok |
- | mips: | ok |
- | mn10300: | TODO |
- | nios2: | TODO |
- | openrisc: | ok |
- | parisc: | TODO |
- | powerpc: | ok |
- | s390: | ok |
- | score: | TODO |
- | sh: | ok |
- | sparc: | ok |
- | tile: | ok |
- | um: | TODO |
- | unicore32: | ok |
- | x86: | ok |
- | xtensa: | TODO |
- -----------------------
diff --git a/arch/Kconfig b/arch/Kconfig
index 82b887d21863..dcd0eb08a8be 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -205,9 +205,6 @@ config HAVE_NMI_WATCHDOG
config HAVE_ARCH_TRACEHOOK
bool
-config HAVE_DMA_ATTRS
- bool
-
config HAVE_DMA_CONTIGUOUS
bool
@@ -569,4 +566,7 @@ config OLD_SIGACTION
config COMPAT_OLD_SIGACTION
bool
+config ARCH_NO_COHERENT_DMA_MMAP
+ bool
+
source "kernel/gcov/Kconfig"
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index f515a4dbf7a0..9d8a85801ed1 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -9,7 +9,6 @@ config ALPHA
select HAVE_OPROFILE
select HAVE_PCSPKR_PLATFORM
select HAVE_PERF_EVENTS
- select HAVE_DMA_ATTRS
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select AUTO_IRQ_AFFINITY if SMP
diff --git a/arch/alpha/include/asm/dma-mapping.h b/arch/alpha/include/asm/dma-mapping.h
index 72a8ca7796d9..3c3451f58ff4 100644
--- a/arch/alpha/include/asm/dma-mapping.h
+++ b/arch/alpha/include/asm/dma-mapping.h
@@ -10,8 +10,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dma_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
#define dma_cache_sync(dev, va, size, dir) ((void)0)
#endif /* _ALPHA_DMA_MAPPING_H */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
index 2d28ba939d8e..660205414f1d 100644
--- a/arch/arc/include/asm/dma-mapping.h
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -11,192 +11,11 @@
#ifndef ASM_ARC_DMA_MAPPING_H
#define ASM_ARC_DMA_MAPPING_H
-#include <asm-generic/dma-coherent.h>
-#include <asm/cacheflush.h>
+extern struct dma_map_ops arc_dma_ops;
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-
-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
- dma_addr_t dma_handle);
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
-/*
- * streaming DMA Mapping API...
- * CPU accesses page via normal paddr, thus needs to explicitly made
- * consistent before each use
- */
-
-static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
- enum dma_data_direction dir)
-{
- switch (dir) {
- case DMA_FROM_DEVICE:
- dma_cache_inv(paddr, size);
- break;
- case DMA_TO_DEVICE:
- dma_cache_wback(paddr, size);
- break;
- case DMA_BIDIRECTIONAL:
- dma_cache_wback_inv(paddr, size);
- break;
- default:
- pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
- }
-}
-
-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
- enum dma_data_direction dir);
-
-#define _dma_cache_sync(addr, sz, dir) \
-do { \
- if (__builtin_constant_p(dir)) \
- __inline_dma_cache_sync(addr, sz, dir); \
- else \
- __arc_dma_cache_sync(addr, sz, dir); \
-} \
-while (0);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction dir)
-{
- _dma_cache_sync((unsigned long)cpu_addr, size, dir);
- return (dma_addr_t)cpu_addr;
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- unsigned long paddr = page_to_phys(page) + offset;
- return dma_map_single(dev, (void *)paddr, size, dir);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i)
- s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
- s->length, dir);
-
- return nents;
-}
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- struct scatterlist *s;
- int i;
-
- for_each_sg(sg, s, nents, i)
- dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
- _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction dir)
-{
- _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i)
- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i)
- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
-}
-
-static inline int dma_supported(struct device *dev, u64 dma_mask)
-{
- /* Support 32 bit DMA mask exclusively */
- return dma_mask == DMA_BIT_MASK(32);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
+ return &arc_dma_ops;
}
#endif
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 29a46bb198cc..01eaf88bf821 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -17,18 +17,14 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/dma-debug.h>
-#include <linux/export.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
-/*
- * Helpers for Coherent DMA API.
- */
-void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+
+static void *arc_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
- void *paddr;
+ void *paddr, *kvaddr;
/* This is linear addr (0x8000_0000 based) */
paddr = alloc_pages_exact(size, gfp);
@@ -38,22 +34,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
/* This is bus address, platform dependent */
*dma_handle = (dma_addr_t)paddr;
- return paddr;
-}
-EXPORT_SYMBOL(dma_alloc_noncoherent);
-
-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
-{
- free_pages_exact((void *)dma_handle, size);
-}
-EXPORT_SYMBOL(dma_free_noncoherent);
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- void *paddr, *kvaddr;
-
/*
* IOC relies on all data (even coherent DMA data) being in cache
* Thus allocate normal cached memory
@@ -65,22 +45,15 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
* -For coherent data, Read/Write to buffers terminate early in cache
* (vs. always going to memory - thus are faster)
*/
- if (is_isa_arcv2() && ioc_exists)
- return dma_alloc_noncoherent(dev, size, dma_handle, gfp);
-
- /* This is linear addr (0x8000_0000 based) */
- paddr = alloc_pages_exact(size, gfp);
- if (!paddr)
- return NULL;
+ if ((is_isa_arcv2() && ioc_exists) ||
+ dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ return paddr;
/* This is kernel Virtual address (0x7000_0000 based) */
kvaddr = ioremap_nocache((unsigned long)paddr, size);
if (kvaddr == NULL)
return NULL;
- /* This is bus address, platform dependent */
- *dma_handle = (dma_addr_t)paddr;
-
/*
* Evict any existing L1 and/or L2 lines for the backing page
* in case it was used earlier as a normal "cached" page.
@@ -95,26 +68,111 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return kvaddr;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
- dma_addr_t dma_handle)
+static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- if (is_isa_arcv2() && ioc_exists)
- return dma_free_noncoherent(dev, size, kvaddr, dma_handle);
-
- iounmap((void __force __iomem *)kvaddr);
+ if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
+ !(is_isa_arcv2() && ioc_exists))
+ iounmap((void __force __iomem *)vaddr);
free_pages_exact((void *)dma_handle, size);
}
-EXPORT_SYMBOL(dma_free_coherent);
/*
- * Helper for streaming DMA...
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
*/
-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
- enum dma_data_direction dir)
+static void _dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+ default:
+ pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+ }
+}
+
+static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ unsigned long paddr = page_to_phys(page) + offset;
+ _dma_cache_sync(paddr, size, dir);
+ return (dma_addr_t)paddr;
+}
+
+static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+
+ return nents;
+}
+
+static void arc_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
+}
+
+static void arc_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{
- __inline_dma_cache_sync(paddr, size, dir);
+ _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
}
-EXPORT_SYMBOL(__arc_dma_cache_sync);
+
+static void arc_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static void arc_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static int arc_dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == DMA_BIT_MASK(32);
+}
+
+struct dma_map_ops arc_dma_ops = {
+ .alloc = arc_dma_alloc,
+ .free = arc_dma_free,
+ .map_page = arc_dma_map_page,
+ .map_sg = arc_dma_map_sg,
+ .sync_single_for_device = arc_dma_sync_single_for_device,
+ .sync_single_for_cpu = arc_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = arc_dma_sync_sg_for_device,
+ .dma_supported = arc_dma_supported,
+};
+EXPORT_SYMBOL(arc_dma_ops);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b0c3d0bcb1f5..a129c0ade98c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -45,7 +45,6 @@ config ARM
select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32
select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
@@ -2044,6 +2043,25 @@ config AUTO_ZRELADDR
0xf8000000. This assumes the zImage being placed in the first 128MB
from start of memory.
+config EFI_STUB
+ bool
+
+config EFI
+ bool "UEFI runtime support"
+ depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL
+ select UCS2_STRING
+ select EFI_PARAMS_FROM_FDT
+ select EFI_STUB
+ select EFI_ARMSTUB
+ select EFI_RUNTIME_WRAPPERS
+ ---help---
+ This option provides support for runtime services provided
+ by UEFI firmware (such as non-volatile variables, realtime
+ clock, and platform reset). A UEFI stub is also provided to
+ allow the kernel to be booted as an EFI application. This
+ is only useful for kernels that may run on systems that have
+ UEFI firmware.
+
endmenu
menu "CPU Power Management"
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 3f9a9ebc77c3..4c23a68a0917 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -167,9 +167,11 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
false; \
fi
+efi-obj-$(CONFIG_EFI_STUB) := $(objtree)/drivers/firmware/efi/libstub/lib.a
+
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
$(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) \
- $(bswapsdi2) FORCE
+ $(bswapsdi2) $(efi-obj-y) FORCE
@$(check_for_multiple_zreladdr)
$(call if_changed,ld)
@$(check_for_bad_syms)
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
new file mode 100644
index 000000000000..9d5dc4fda3c1
--- /dev/null
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2013-2015 Linaro Ltd
+ * Authors: Roy Franz <roy.franz@linaro.org>
+ * Ard Biesheuvel <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ .macro __nop
+#ifdef CONFIG_EFI_STUB
+ @ This is almost but not quite a NOP, since it does clobber the
+ @ condition flags. But it is the best we can do for EFI, since
+ @ PE/COFF expects the magic string "MZ" at offset 0, while the
+ @ ARM/Linux boot protocol expects an executable instruction
+ @ there.
+ .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
+#else
+ mov r0, r0
+#endif
+ .endm
+
+ .macro __EFI_HEADER
+#ifdef CONFIG_EFI_STUB
+ b __efi_start
+
+ .set start_offset, __efi_start - start
+ .org start + 0x3c
+ @
+ @ The PE header can be anywhere in the file, but for
+ @ simplicity we keep it together with the MSDOS header
+ @ The offset to the PE/COFF header needs to be at offset
+ @ 0x3C in the MSDOS header.
+ @ The only 2 fields of the MSDOS header that are used are this
+ @ PE/COFF offset, and the "MZ" bytes at offset 0x0.
+ @
+ .long pe_header - start @ Offset to the PE header.
+
+pe_header:
+ .ascii "PE\0\0"
+
+coff_header:
+ .short 0x01c2 @ ARM or Thumb
+ .short 2 @ nr_sections
+ .long 0 @ TimeDateStamp
+ .long 0 @ PointerToSymbolTable
+ .long 1 @ NumberOfSymbols
+ .short section_table - optional_header
+ @ SizeOfOptionalHeader
+ .short 0x306 @ Characteristics.
+ @ IMAGE_FILE_32BIT_MACHINE |
+ @ IMAGE_FILE_DEBUG_STRIPPED |
+ @ IMAGE_FILE_EXECUTABLE_IMAGE |
+ @ IMAGE_FILE_LINE_NUMS_STRIPPED
+
+optional_header:
+ .short 0x10b @ PE32 format
+ .byte 0x02 @ MajorLinkerVersion
+ .byte 0x14 @ MinorLinkerVersion
+ .long _end - __efi_start @ SizeOfCode
+ .long 0 @ SizeOfInitializedData
+ .long 0 @ SizeOfUninitializedData
+ .long efi_stub_entry - start @ AddressOfEntryPoint
+ .long start_offset @ BaseOfCode
+ .long 0 @ data
+
+extra_header_fields:
+ .long 0 @ ImageBase
+ .long 0x200 @ SectionAlignment
+ .long 0x200 @ FileAlignment
+ .short 0 @ MajorOperatingSystemVersion
+ .short 0 @ MinorOperatingSystemVersion
+ .short 0 @ MajorImageVersion
+ .short 0 @ MinorImageVersion
+ .short 0 @ MajorSubsystemVersion
+ .short 0 @ MinorSubsystemVersion
+ .long 0 @ Win32VersionValue
+
+ .long _end - start @ SizeOfImage
+ .long start_offset @ SizeOfHeaders
+ .long 0 @ CheckSum
+ .short 0xa @ Subsystem (EFI application)
+ .short 0 @ DllCharacteristics
+ .long 0 @ SizeOfStackReserve
+ .long 0 @ SizeOfStackCommit
+ .long 0 @ SizeOfHeapReserve
+ .long 0 @ SizeOfHeapCommit
+ .long 0 @ LoaderFlags
+ .long 0x6 @ NumberOfRvaAndSizes
+
+ .quad 0 @ ExportTable
+ .quad 0 @ ImportTable
+ .quad 0 @ ResourceTable
+ .quad 0 @ ExceptionTable
+ .quad 0 @ CertificationTable
+ .quad 0 @ BaseRelocationTable
+
+section_table:
+ @
+ @ The EFI application loader requires a relocation section
+ @ because EFI applications must be relocatable. This is a
+ @ dummy section as far as we are concerned.
+ @
+ .ascii ".reloc\0\0"
+ .long 0 @ VirtualSize
+ .long 0 @ VirtualAddress
+ .long 0 @ SizeOfRawData
+ .long 0 @ PointerToRawData
+ .long 0 @ PointerToRelocations
+ .long 0 @ PointerToLineNumbers
+ .short 0 @ NumberOfRelocations
+ .short 0 @ NumberOfLineNumbers
+ .long 0x42100040 @ Characteristics
+
+ .ascii ".text\0\0\0"
+ .long _end - __efi_start @ VirtualSize
+ .long __efi_start @ VirtualAddress
+ .long _edata - __efi_start @ SizeOfRawData
+ .long __efi_start @ PointerToRawData
+ .long 0 @ PointerToRelocations
+ .long 0 @ PointerToLineNumbers
+ .short 0 @ NumberOfRelocations
+ .short 0 @ NumberOfLineNumbers
+ .long 0xe0500020 @ Characteristics
+
+ .align 9
+__efi_start:
+#endif
+ .endm
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 856913705169..fc6d541549a2 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -12,6 +12,8 @@
#include <asm/assembler.h>
#include <asm/v7m.h>
+#include "efi-header.S"
+
AR_CLASS( .arch armv7-a )
M_CLASS( .arch armv7-m )
@@ -126,7 +128,7 @@
start:
.type start,#function
.rept 7
- mov r0, r0
+ __nop
.endr
ARM( mov r0, r0 )
ARM( b 1f )
@@ -139,7 +141,8 @@ start:
.word 0x04030201 @ endianness flag
THUMB( .thumb )
-1:
+1: __EFI_HEADER
+
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
AR_CLASS( mrs r9, cpsr )
#ifdef CONFIG_ARM_VIRT_EXT
@@ -1353,6 +1356,53 @@ __enter_kernel:
reloc_code_end:
+#ifdef CONFIG_EFI_STUB
+ .align 2
+_start: .long start - .
+
+ENTRY(efi_stub_entry)
+ @ allocate space on stack for passing current zImage address
+ @ and for the EFI stub to return of new entry point of
+ @ zImage, as EFI stub may copy the kernel. Pointer address
+ @ is passed in r2. r0 and r1 are passed through from the
+ @ EFI firmware to efi_entry
+ adr ip, _start
+ ldr r3, [ip]
+ add r3, r3, ip
+ stmfd sp!, {r3, lr}
+ mov r2, sp @ pass zImage address in r2
+ bl efi_entry
+
+ @ Check for error return from EFI stub. r0 has FDT address
+ @ or error code.
+ cmn r0, #1
+ beq efi_load_fail
+
+ @ Preserve return value of efi_entry() in r4
+ mov r4, r0
+ bl cache_clean_flush
+ bl cache_off
+
+ @ Set parameters for booting zImage according to boot protocol
+ @ put FDT address in r2, it was returned by efi_entry()
+ @ r1 is the machine type, and r0 needs to be 0
+ mov r0, #0
+ mov r1, #0xFFFFFFFF
+ mov r2, r4
+
+ @ Branch to (possibly) relocated zImage that is in [sp]
+ ldr lr, [sp]
+ ldr ip, =start_offset
+ add lr, lr, ip
+ mov pc, lr @ no mode switch
+
+efi_load_fail:
+ @ Return EFI_LOAD_ERROR to EFI firmware on error.
+ ldr r0, =0x80000001
+ ldmfd sp!, {ip, pc}
+ENDPROC(efi_stub_entry)
+#endif
+
.align
.section ".stack", "aw", %nobits
.L_user_stack: .space 4096
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index 2b60b843ac5e..81c493156ce8 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -48,6 +48,13 @@ SECTIONS
*(.rodata)
*(.rodata.*)
}
+ .data : {
+ /*
+ * The EFI stub always executes from RAM, and runs strictly before the
+ * decompressor, so we can make an exception for its r/w data, and keep it
+ */
+ *(.data.efistub)
+ }
.piggydata : {
*(.piggydata)
}
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 36ae8dff6089..550d1d1fda03 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -41,13 +41,6 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
#define HAVE_ARCH_DMA_SUPPORTED 1
extern int dma_supported(struct device *dev, u64 mask);
-/*
- * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent
- * implementations, we don't provide a dma_cache_sync function so drivers using
- * this API are highlighted with build warnings.
- */
-#include <asm-generic/dma-mapping-common.h>
-
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
new file mode 100644
index 000000000000..b0c341d7ceee
--- /dev/null
+++ b/arch/arm/include/asm/efi.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_EFI_H
+#define __ASM_ARM_EFI_H
+
+#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
+#include <asm/early_ioremap.h>
+#include <asm/fixmap.h>
+#include <asm/highmem.h>
+#include <asm/mach/map.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_EFI
+void efi_init(void);
+
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+
+#define efi_call_virt(f, ...) \
+({ \
+ efi_##f##_t *__f; \
+ efi_status_t __s; \
+ \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
+ __s = __f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
+ __s; \
+})
+
+#define __efi_call_virt(f, ...) \
+({ \
+ efi_##f##_t *__f; \
+ \
+ efi_virtmap_load(); \
+ __f = efi.systab->runtime->f; \
+ __f(__VA_ARGS__); \
+ efi_virtmap_unload(); \
+})
+
+static inline void efi_set_pgd(struct mm_struct *mm)
+{
+ check_and_switch_context(mm, NULL);
+}
+
+void efi_virtmap_load(void);
+void efi_virtmap_unload(void);
+
+#else
+#define efi_init()
+#endif /* CONFIG_EFI */
+
+/* arch specific definitions used by the stub code */
+
+#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+
+/*
+ * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
+ * so we will reserve that amount of memory. We have no easy way to tell what
+ * the actuall size of code + data the uncompressed kernel will use.
+ * If this is insufficient, the decompressor will relocate itself out of the
+ * way before performing the decompression.
+ */
+#define MAX_UNCOMP_KERNEL_SIZE SZ_32M
+
+/*
+ * The kernel zImage should preferably be located between 32 MB and 128 MB
+ * from the base of DRAM. The min address leaves space for a maximal size
+ * uncompressed image, and the max address is due to how the zImage decompressor
+ * picks a destination address.
+ */
+#define ZIMAGE_OFFSET_LIMIT SZ_128M
+#define MIN_ZIMAGE_OFFSET MAX_UNCOMP_KERNEL_SIZE
+#define MAX_FDT_OFFSET ZIMAGE_OFFSET_LIMIT
+
+#endif /* _ASM_ARM_EFI_H */
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index 9b32f76bb0dd..432ce8176498 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -26,7 +26,7 @@ void __check_vmalloc_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
-#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
+#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
#ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 80856def2465..24d25ff34333 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -77,6 +77,7 @@ CFLAGS_pj4-cp0.o := -marm
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_VDSO) += vdso.o
+obj-$(CONFIG_EFI) += efi.o
ifneq ($(CONFIG_ARCH_EBSA110),y)
obj-y += io.o
diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c
new file mode 100644
index 000000000000..9f43ba012d10
--- /dev/null
+++ b/arch/arm/kernel/efi.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/efi.h>
+#include <asm/efi.h>
+#include <asm/mach/map.h>
+#include <asm/mmu_context.h>
+
+static int __init set_permissions(pte_t *ptep, pgtable_t token,
+ unsigned long addr, void *data)
+{
+ efi_memory_desc_t *md = data;
+ pte_t pte = *ptep;
+
+ if (md->attribute & EFI_MEMORY_RO)
+ pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+ if (md->attribute & EFI_MEMORY_XP)
+ pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
+ set_pte_ext(ptep, pte, PTE_EXT_NG);
+ return 0;
+}
+
+int __init efi_set_mapping_permissions(struct mm_struct *mm,
+ efi_memory_desc_t *md)
+{
+ unsigned long base, size;
+
+ base = md->virt_addr;
+ size = md->num_pages << EFI_PAGE_SHIFT;
+
+ /*
+ * We can only use apply_to_page_range() if we can guarantee that the
+ * entire region was mapped using pages. This should be the case if the
+ * region does not cover any naturally aligned SECTION_SIZE sized
+ * blocks.
+ */
+ if (round_down(base + size, SECTION_SIZE) <
+ round_up(base, SECTION_SIZE) + SECTION_SIZE)
+ return apply_to_page_range(mm, base, size, set_permissions, md);
+
+ return 0;
+}
+
+int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+{
+ struct map_desc desc = {
+ .virtual = md->virt_addr,
+ .pfn = __phys_to_pfn(md->phys_addr),
+ .length = md->num_pages * EFI_PAGE_SIZE,
+ };
+
+ /*
+ * Order is important here: memory regions may have all of the
+ * bits below set (and usually do), so we check them in order of
+ * preference.
+ */
+ if (md->attribute & EFI_MEMORY_WB)
+ desc.type = MT_MEMORY_RWX;
+ else if (md->attribute & EFI_MEMORY_WT)
+ desc.type = MT_MEMORY_RWX_NONCACHED;
+ else if (md->attribute & EFI_MEMORY_WC)
+ desc.type = MT_DEVICE_WC;
+ else
+ desc.type = MT_DEVICE;
+
+ create_mapping_late(mm, &desc, true);
+
+ /*
+ * If stricter permissions were specified, apply them now.
+ */
+ if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
+ return efi_set_mapping_permissions(mm, md);
+ return 0;
+}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 20edd349d379..1521049e6374 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -7,6 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/efi.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
@@ -37,6 +38,7 @@
#include <asm/cp15.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
+#include <asm/efi.h>
#include <asm/elf.h>
#include <asm/fixmap.h>
#include <asm/procinfo.h>
@@ -965,6 +967,7 @@ void __init setup_arch(char **cmdline_p)
early_paging_init(mdesc);
#endif
setup_dma_zone(mdesc);
+ efi_init();
sanity_check_meminfo();
arm_memblock_init(mdesc);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index cafa9f432bde..aced38450018 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1821,6 +1821,63 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
__free_iova(mapping, iova, len);
}
+/**
+ * arm_iommu_map_resource - map a device resource for DMA
+ * @dev: valid struct device pointer
+ * @phys_addr: physical address of resource
+ * @size: size of resource to map
+ * @dir: DMA transfer direction
+ */
+static dma_addr_t arm_iommu_map_resource(struct device *dev,
+ phys_addr_t phys_addr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
+{
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+ dma_addr_t dma_addr;
+ int ret, prot;
+ phys_addr_t addr = phys_addr & PAGE_MASK;
+ unsigned int offset = phys_addr & ~PAGE_MASK;
+ size_t len = PAGE_ALIGN(size + offset);
+
+ dma_addr = __alloc_iova(mapping, len);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ prot = __dma_direction_to_prot(dir) | IOMMU_MMIO;
+
+ ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
+ if (ret < 0)
+ goto fail;
+
+ return dma_addr + offset;
+fail:
+ __free_iova(mapping, dma_addr, len);
+ return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_unmap_resource - unmap a device DMA resource
+ * @dev: valid struct device pointer
+ * @dma_handle: DMA address to resource
+ * @size: size of resource to map
+ * @dir: DMA transfer direction
+ */
+static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+ dma_addr_t iova = dma_handle & PAGE_MASK;
+ unsigned int offset = dma_handle & ~PAGE_MASK;
+ size_t len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
static void arm_iommu_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
@@ -1866,6 +1923,9 @@ struct dma_map_ops iommu_ops = {
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
.set_dma_mask = arm_dma_set_mask,
+
+ .map_resource = arm_iommu_map_resource,
+ .unmap_resource = arm_iommu_unmap_resource,
};
struct dma_map_ops iommu_coherent_ops = {
@@ -1881,6 +1941,9 @@ struct dma_map_ops iommu_coherent_ops = {
.unmap_sg = arm_coherent_iommu_unmap_sg,
.set_dma_mask = arm_dma_set_mask,
+
+ .map_resource = arm_iommu_map_resource,
+ .unmap_resource = arm_iommu_unmap_resource,
};
/**
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 404f8ffcecc6..f4363ec3e451 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -68,7 +68,6 @@ config ARM64
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_EFFICIENT_UNALIGNED_ACCESS
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 2ae2bbd430cc..ccea82c2b089 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -64,8 +64,6 @@ static inline bool is_device_dma_coherent(struct device *dev)
return dev->archdata.dma_coherent;
}
-#include <asm-generic/dma-mapping-common.h>
-
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
dma_addr_t dev_addr = (dma_addr_t)paddr;
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 311d90ce83ba..5196ccb0850f 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -2,7 +2,9 @@
#define _ASM_EFI_H
#include <asm/io.h>
+#include <asm/mmu_context.h>
#include <asm/neon.h>
+#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#ifdef CONFIG_EFI
@@ -11,6 +13,10 @@ extern void efi_init(void);
#define efi_init()
#endif
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+
+#define efi_set_mapping_permissions efi_create_mapping
+
#define arch_efi_call_virt_setup() \
({ \
kernel_neon_begin(); \
@@ -68,6 +74,11 @@ static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
* Services are enabled and the EFI_RUNTIME_SERVICES bit set.
*/
+static inline void efi_set_pgd(struct mm_struct *mm)
+{
+ switch_mm(NULL, mm, NULL);
+}
+
void efi_virtmap_load(void);
void efi_virtmap_unload(void);
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 63a2474563fc..5c01626ae2da 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -11,403 +11,63 @@
*
*/
-#include <linux/atomic.h>
#include <linux/dmi.h>
#include <linux/efi.h>
-#include <linux/export.h>
-#include <linux/memblock.h>
-#include <linux/mm_types.h>
-#include <linux/bootmem.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/preempt.h>
-#include <linux/rbtree.h>
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
-#include <linux/pci.h>
+#include <linux/init.h>
-#include <asm/cacheflush.h>
#include <asm/efi.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
/* we will fill this structure from the stub, so don't put it in .bss */
struct screen_info screen_info __section(.data);
-struct efi_memory_map memmap;
-
-static u64 efi_system_table;
-
-static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
-
-static struct mm_struct efi_mm = {
- .mm_rb = RB_ROOT,
- .pgd = efi_pgd,
- .mm_users = ATOMIC_INIT(2),
- .mm_count = ATOMIC_INIT(1),
- .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
- .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
- .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
-};
-
-static int __init is_normal_ram(efi_memory_desc_t *md)
-{
- if (md->attribute & EFI_MEMORY_WB)
- return 1;
- return 0;
-}
-
/*
- * Translate a EFI virtual address into a physical address: this is necessary,
- * as some data members of the EFI system table are virtually remapped after
- * SetVirtualAddressMap() has been called.
+ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+ * executable, everything else can be mapped with the XN bits
+ * set. Also take the new (optional) RO/XP bits into account.
*/
-static phys_addr_t efi_to_phys(unsigned long addr)
-{
- efi_memory_desc_t *md;
-
- for_each_efi_memory_desc(&memmap, md) {
- if (!(md->attribute & EFI_MEMORY_RUNTIME))
- continue;
- if (md->virt_addr == 0)
- /* no virtual mapping has been installed by the stub */
- break;
- if (md->virt_addr <= addr &&
- (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
- return md->phys_addr + addr - md->virt_addr;
- }
- return addr;
-}
-
-static int __init uefi_init(void)
+static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
{
- efi_char16_t *c16;
- void *config_tables;
- u64 table_size;
- char vendor[100] = "unknown";
- int i, retval;
-
- efi.systab = early_memremap(efi_system_table,
- sizeof(efi_system_table_t));
- if (efi.systab == NULL) {
- pr_warn("Unable to map EFI system table.\n");
- return -ENOMEM;
- }
+ u64 attr = md->attribute;
+ u32 type = md->type;
- set_bit(EFI_BOOT, &efi.flags);
- set_bit(EFI_64BIT, &efi.flags);
-
- /*
- * Verify the EFI Table
- */
- if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
- pr_err("System table signature incorrect\n");
- retval = -EINVAL;
- goto out;
- }
- if ((efi.systab->hdr.revision >> 16) < 2)
- pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
- efi.systab->hdr.revision >> 16,
- efi.systab->hdr.revision & 0xffff);
-
- /* Show what we know for posterity */
- c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
- sizeof(vendor) * sizeof(efi_char16_t));
- if (c16) {
- for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
- vendor[i] = c16[i];
- vendor[i] = '\0';
- early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
- }
-
- pr_info("EFI v%u.%.02u by %s\n",
- efi.systab->hdr.revision >> 16,
- efi.systab->hdr.revision & 0xffff, vendor);
-
- table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
- config_tables = early_memremap(efi_to_phys(efi.systab->tables),
- table_size);
- if (config_tables == NULL) {
- pr_warn("Unable to map EFI config table array.\n");
- retval = -ENOMEM;
- goto out;
- }
- retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
- sizeof(efi_config_table_64_t), NULL);
-
- early_memunmap(config_tables, table_size);
-out:
- early_memunmap(efi.systab, sizeof(efi_system_table_t));
- return retval;
-}
-
-/*
- * Return true for RAM regions we want to permanently reserve.
- */
-static __init int is_reserve_region(efi_memory_desc_t *md)
-{
- switch (md->type) {
- case EFI_LOADER_CODE:
- case EFI_LOADER_DATA:
- case EFI_BOOT_SERVICES_CODE:
- case EFI_BOOT_SERVICES_DATA:
- case EFI_CONVENTIONAL_MEMORY:
- case EFI_PERSISTENT_MEMORY:
- return 0;
- default:
- break;
- }
- return is_normal_ram(md);
-}
-
-static __init void reserve_regions(void)
-{
- efi_memory_desc_t *md;
- u64 paddr, npages, size;
-
- if (efi_enabled(EFI_DBG))
- pr_info("Processing EFI memory map:\n");
-
- /*
- * Discard memblocks discovered so far: if there are any at this
- * point, they originate from memory nodes in the DT, and UEFI
- * uses its own memory map instead.
- */
- memblock_dump_all();
- memblock_remove(0, (phys_addr_t)ULLONG_MAX);
-
- for_each_efi_memory_desc(&memmap, md) {
- paddr = md->phys_addr;
- npages = md->num_pages;
-
- if (efi_enabled(EFI_DBG)) {
- char buf[64];
-
- pr_info(" 0x%012llx-0x%012llx %s",
- paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
- efi_md_typeattr_format(buf, sizeof(buf), md));
- }
-
- memrange_efi_to_native(&paddr, &npages);
- size = npages << PAGE_SHIFT;
-
- if (is_normal_ram(md))
- early_init_dt_add_memory_arch(paddr, size);
-
- if (is_reserve_region(md)) {
- memblock_mark_nomap(paddr, size);
- if (efi_enabled(EFI_DBG))
- pr_cont("*");
- }
-
- if (efi_enabled(EFI_DBG))
- pr_cont("\n");
- }
-
- set_bit(EFI_MEMMAP, &efi.flags);
-}
-
-#ifdef CONFIG_PCI
-static bool efi_pci_overlaps_efifb(struct pci_bar_update_info *update_info)
-{
- u64 lfb_base = screen_info.lfb_base;
-
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- lfb_base |= (u64)screen_info.ext_lfb_base << 32;
-
- /* is the screen_info frame buffer inside the pci BAR? */
- if (lfb_base >= update_info->old_start &&
- (lfb_base + screen_info.lfb_size) <=
- (update_info->old_start + update_info->size))
- return true;
-
- return false;
-}
-
-static int efi_pci_notifier(struct notifier_block *self,
- unsigned long cmd, void *v)
-{
- struct pci_bar_update_info *update_info = v;
+ if (type == EFI_MEMORY_MAPPED_IO)
+ return PROT_DEVICE_nGnRE;
- /*
- * When we reallocate a BAR that contains our frame buffer, set the
- * screen_info base to where it belongs
- */
- if (efi_pci_overlaps_efifb(update_info)) {
- u64 diff = (update_info->new_start - update_info->old_start);
- u32 ext_lfb_base = screen_info.ext_lfb_base;
- u64 new_base = screen_info.lfb_base;
-
- if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
- new_base += (u64)ext_lfb_base << 32;
-
- new_base += diff;
-
- screen_info.lfb_base = new_base;
-
- ext_lfb_base = new_base >> 32;
- if (ext_lfb_base) {
- screen_info.capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
- screen_info.ext_lfb_base = ext_lfb_base;
- }
- }
-
- return NOTIFY_OK;
-}
-static struct notifier_block efi_pci_notifier_block = {
- .notifier_call = efi_pci_notifier,
-};
-#else
-#define pci_notify_on_update_resource(a)
-#endif
-
-void __init efi_init(void)
-{
- struct efi_fdt_params params;
-
- /* Grab UEFI information placed in FDT by stub */
- if (!efi_get_fdt_params(&params))
- return;
-
- efi_system_table = params.system_table;
-
- memmap.phys_map = params.mmap;
- memmap.map = early_memremap(params.mmap, params.mmap_size);
- if (memmap.map == NULL) {
+ if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
+ "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
/*
- * If we are booting via UEFI, the UEFI memory map is the only
- * description of memory we have, so there is little point in
- * proceeding if we cannot access it.
- */
- panic("Unable to map EFI memory map.\n");
- }
- memmap.map_end = memmap.map + params.mmap_size;
- memmap.desc_size = params.desc_size;
- memmap.desc_version = params.desc_ver;
-
- if (uefi_init() < 0)
- return;
-
- reserve_regions();
- early_memunmap(memmap.map, params.mmap_size);
- memblock_reserve(params.mmap & PAGE_MASK,
- PAGE_ALIGN(params.mmap_size +
- (params.mmap & ~PAGE_MASK)));
-
- if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
- pci_notify_on_update_resource(&efi_pci_notifier_block);
- memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
- }
-}
-
-static int __init register_gop_device(void)
-{
- void *pd;
-
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
- return 0;
-
- /* the efifb driver accesses screen_info directly, no need to pass it */
- pd = platform_device_register_simple("efi-framebuffer", 0, NULL, 0);
- return PTR_ERR_OR_ZERO(pd);
-}
-subsys_initcall(register_gop_device);
-
-static bool __init efi_virtmap_init(void)
-{
- efi_memory_desc_t *md;
-
- init_new_context(NULL, &efi_mm);
-
- for_each_efi_memory_desc(&memmap, md) {
- pgprot_t prot;
+ * If the region is not aligned to the page size of the OS, we
+ * can not use strict permissions, since that would also affect
+ * the mapping attributes of the adjacent regions.
+ */
+ return pgprot_val(PAGE_KERNEL_EXEC);
- if (!(md->attribute & EFI_MEMORY_RUNTIME))
- continue;
- if (md->virt_addr == 0)
- return false;
+ /* R-- */
+ if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
+ (EFI_MEMORY_XP | EFI_MEMORY_RO))
+ return pgprot_val(PAGE_KERNEL_RO);
- pr_info(" EFI remap 0x%016llx => %p\n",
- md->phys_addr, (void *)md->virt_addr);
+ /* R-X */
+ if (attr & EFI_MEMORY_RO)
+ return pgprot_val(PAGE_KERNEL_ROX);
- /*
- * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
- * executable, everything else can be mapped with the XN bits
- * set.
- */
- if (!is_normal_ram(md))
- prot = __pgprot(PROT_DEVICE_nGnRE);
- else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
- !PAGE_ALIGNED(md->phys_addr))
- prot = PAGE_KERNEL_EXEC;
- else
- prot = PAGE_KERNEL;
+ /* RW- */
+ if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
+ return pgprot_val(PAGE_KERNEL);
- create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
- md->num_pages << EFI_PAGE_SHIFT,
- __pgprot(pgprot_val(prot) | PTE_NG));
- }
- return true;
+ /* RWX */
+ return pgprot_val(PAGE_KERNEL_EXEC);
}
-/*
- * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
- * non-early mapping of the UEFI system table and virtual mappings for all
- * EFI_MEMORY_RUNTIME regions.
- */
-static int __init arm64_enable_runtime_services(void)
+int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
{
- u64 mapsize;
-
- if (!efi_enabled(EFI_BOOT)) {
- pr_info("EFI services will not be available.\n");
- return 0;
- }
-
- if (efi_runtime_disabled()) {
- pr_info("EFI runtime services will be disabled.\n");
- return 0;
- }
-
- pr_info("Remapping and enabling EFI services.\n");
-
- mapsize = memmap.map_end - memmap.map;
- memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
- mapsize);
- if (!memmap.map) {
- pr_err("Failed to remap EFI memory map\n");
- return -ENOMEM;
- }
- memmap.map_end = memmap.map + mapsize;
- efi.memmap = &memmap;
-
- efi.systab = (__force void *)ioremap_cache(efi_system_table,
- sizeof(efi_system_table_t));
- if (!efi.systab) {
- pr_err("Failed to remap EFI System Table\n");
- return -ENOMEM;
- }
- set_bit(EFI_SYSTEM_TABLES, &efi.flags);
-
- if (!efi_virtmap_init()) {
- pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
- return -ENOMEM;
- }
-
- /* Set up runtime services function pointers */
- efi_native_runtime_setup();
- set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
-
- efi.runtime_version = efi.systab->hdr.revision;
+ pteval_t prot_val = create_mapping_protection(md);
+ create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
+ md->num_pages << EFI_PAGE_SHIFT,
+ __pgprot(prot_val | PTE_NG));
return 0;
}
-early_initcall(arm64_enable_runtime_services);
static int __init arm64_dmi_init(void)
{
@@ -423,23 +83,6 @@ static int __init arm64_dmi_init(void)
}
core_initcall(arm64_dmi_init);
-static void efi_set_pgd(struct mm_struct *mm)
-{
- switch_mm(NULL, mm, NULL);
-}
-
-void efi_virtmap_load(void)
-{
- preempt_disable();
- efi_set_pgd(&efi_mm);
-}
-
-void efi_virtmap_unload(void)
-{
- efi_set_pgd(current->active_mm);
- preempt_enable();
-}
-
/*
* UpdateCapsule() depends on the system being shutdown via
* ResetSystem().
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 126fa53f7ae8..ac52fa9fa913 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -136,6 +136,7 @@ SECTIONS
CON_INITCALL
SECURITY_INITCALL
INIT_RAM_FS
+ *(.init.rodata.* .init.bss) /* from the EFI stub */
}
.exit.data : {
ARM_EXIT_KEEP(EXIT_DATA)
diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
index ae7ac9205d20..1115f2a645d1 100644
--- a/arch/avr32/include/asm/dma-mapping.h
+++ b/arch/avr32/include/asm/dma-mapping.h
@@ -1,350 +1,14 @@
#ifndef __ASM_AVR32_DMA_MAPPING_H
#define __ASM_AVR32_DMA_MAPPING_H
-#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/scatterlist.h>
-#include <asm/processor.h>
-#include <asm/cacheflush.h>
-#include <asm/io.h>
-
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
int direction);
-/*
- * Return whether the given device DMA address mask can be supported
- * properly. For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask
- * to this function.
- */
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- /* Fix when needed. I really don't know of any limitations */
- return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
- return 0;
-}
+extern struct dma_map_ops avr32_dma_ops;
-/*
- * dma_map_single can't fail as it is implemented now.
- */
-static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- return 0;
+ return &avr32_dma_ops;
}
-/**
- * dma_alloc_coherent - allocate consistent memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, unbuffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp);
-
-/**
- * dma_free_coherent - free memory allocated by dma_alloc_coherent
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_coherent
- * @cpu_addr: CPU-view address returned from dma_alloc_coherent
- * @handle: device-view address returned from dma_alloc_coherent
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_coherent().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle);
-
-/**
- * dma_alloc_writecombine - allocate write-combining memory for DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: required memory size
- * @handle: bus-specific DMA address
- *
- * Allocate some uncached, buffered memory for a device for
- * performing DMA. This function allocates pages, and will
- * return the CPU-viewed address, and sets @handle to be the
- * device-viewed address.
- */
-extern void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp);
-
-/**
- * dma_free_coherent - free memory allocated by dma_alloc_writecombine
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @size: size of memory originally requested in dma_alloc_writecombine
- * @cpu_addr: CPU-view address returned from dma_alloc_writecombine
- * @handle: device-view address returned from dma_alloc_writecombine
- *
- * Free (and unmap) a DMA buffer previously allocated by
- * dma_alloc_writecombine().
- *
- * References to memory and mappings associated with cpu_addr/handle
- * during and after this call executing are illegal.
- */
-extern void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle);
-
-/**
- * dma_map_single - map a single buffer for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @cpu_addr: CPU direct mapped address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_single() or dma_sync_single().
- */
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
- enum dma_data_direction direction)
-{
- dma_cache_sync(dev, cpu_addr, size, direction);
- return virt_to_bus(cpu_addr);
-}
-
-/**
- * dma_unmap_single - unmap a single buffer previously mapped
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Unmap a single streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
-
-}
-
-/**
- * dma_map_page - map a portion of a page for streaming DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Ensure that any data held in the cache is appropriately discarded
- * or written back.
- *
- * The device owns this memory once this call has completed. The CPU
- * can regain ownership by calling dma_unmap_page() or dma_sync_single().
- */
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- return dma_map_single(dev, page_address(page) + offset,
- size, direction);
-}
-
-/**
- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Unmap a single streaming mode DMA translation. The handle and size
- * must match what was provided in the previous dma_map_single() call.
- * All other usages are undefined.
- *
- * After this call, reads by the CPU to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- dma_unmap_single(dev, dma_address, size, direction);
-}
-
-/**
- * dma_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scatter-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i) {
- char *virt;
-
- sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
- virt = sg_virt(sg);
- dma_cache_sync(dev, virt, sg->length, direction);
- }
-
- return nents;
-}
-
-/**
- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Unmap a set of streaming mode DMA translations.
- * Again, CPU read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
-
-}
-
-/**
- * dma_sync_single_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @handle: DMA address of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Make physical memory consistent for a single streaming mode DMA
- * translation after a transfer.
- *
- * If you perform a dma_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the DMA mapping,
- * you must call this function before doing so. At the next point you
- * give the DMA address back to the card, you must first perform a
- * dma_sync_single_for_device, and then the device again owns the
- * buffer.
- */
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- /*
- * No need to do anything since the CPU isn't supposed to
- * touch this memory after we flushed it at mapping- or
- * sync-for-device time.
- */
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything, that's all the pci API can do */
- dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything, that's all the pci API can do */
- dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
-}
-
-/**
- * dma_sync_sg_for_cpu
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as dma_sync_single_for_* but for a scatter-gather list,
- * same rules and usage.
- */
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- /*
- * No need to do anything since the CPU isn't supposed to
- * touch this memory after we flushed it at mapping- or
- * sync-for-device time.
- */
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nents, i)
- dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
-}
-
-/* Now for the API extensions over the pci_ one */
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif /* __ASM_AVR32_DMA_MAPPING_H */
diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
index 50cdb5b10f0f..92cf1fb2b3e6 100644
--- a/arch/avr32/mm/dma-coherent.c
+++ b/arch/avr32/mm/dma-coherent.c
@@ -9,9 +9,14 @@
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/device.h>
+#include <linux/scatterlist.h>
-#include <asm/addrspace.h>
+#include <asm/processor.h>
#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/addrspace.h>
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
{
@@ -93,60 +98,100 @@ static void __dma_free(struct device *dev, size_t size,
__free_page(page++);
}
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
+static void *avr32_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
struct page *page;
- void *ret = NULL;
+ dma_addr_t phys;
page = __dma_alloc(dev, size, handle, gfp);
- if (page)
- ret = phys_to_uncached(page_to_phys(page));
+ if (!page)
+ return NULL;
+ phys = page_to_phys(page);
- return ret;
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+ /* Now, map the page into P3 with write-combining turned on */
+ *handle = phys;
+ return __ioremap(phys, size, _PAGE_BUFFER);
+ } else {
+ return phys_to_uncached(phys);
+ }
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle)
+static void avr32_dma_free(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
{
- void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
struct page *page;
- pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
- cpu_addr, (unsigned long)handle, (unsigned)size);
- BUG_ON(!virt_addr_valid(addr));
- page = virt_to_page(addr);
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+ iounmap(cpu_addr);
+
+ page = phys_to_page(handle);
+ } else {
+ void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
+
+ pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
+ cpu_addr, (unsigned long)handle, (unsigned)size);
+
+ BUG_ON(!virt_addr_valid(addr));
+ page = virt_to_page(addr);
+ }
+
__dma_free(dev, size, page, handle);
}
-EXPORT_SYMBOL(dma_free_coherent);
-void *dma_alloc_writecombine(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
+static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
- struct page *page;
- dma_addr_t phys;
+ void *cpu_addr = page_address(page) + offset;
- page = __dma_alloc(dev, size, handle, gfp);
- if (!page)
- return NULL;
+ dma_cache_sync(dev, cpu_addr, size, direction);
+ return virt_to_bus(cpu_addr);
+}
- phys = page_to_phys(page);
- *handle = phys;
+static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nents, i) {
+ char *virt;
- /* Now, map the page into P3 with write-combining turned on */
- return __ioremap(phys, size, _PAGE_BUFFER);
+ sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
+ virt = sg_virt(sg);
+ dma_cache_sync(dev, virt, sg->length, direction);
+ }
+
+ return nents;
}
-EXPORT_SYMBOL(dma_alloc_writecombine);
-void dma_free_writecombine(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle)
+static void avr32_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
- struct page *page;
+ dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
+}
- iounmap(cpu_addr);
+static void avr32_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+ struct scatterlist *sg;
- page = phys_to_page(handle);
- __dma_free(dev, size, page, handle);
+ for_each_sg(sglist, sg, nents, i)
+ dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
}
-EXPORT_SYMBOL(dma_free_writecombine);
+
+struct dma_map_ops avr32_dma_ops = {
+ .alloc = avr32_dma_alloc,
+ .free = avr32_dma_free,
+ .map_page = avr32_dma_map_page,
+ .map_sg = avr32_dma_map_sg,
+ .sync_single_for_device = avr32_dma_sync_single_for_device,
+ .sync_sg_for_device = avr32_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(avr32_dma_ops);
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index 054d9ec57d9d..3490570aaa82 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -8,36 +8,6 @@
#define _BLACKFIN_DMA_MAPPING_H
#include <asm/cacheflush.h>
-struct scatterlist;
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle);
-
-/*
- * Now for the API extensions over the pci_ one
- */
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-#define dma_supported(d, m) (1)
-
-static inline int
-dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
extern void
__dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
@@ -66,102 +36,11 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
__dma_sync(addr, size, dir);
}
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
-{
- _dma_sync((dma_addr_t)ptr, size, dir);
- return (dma_addr_t) ptr;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- return dma_map_single(dev, page_address(page) + offset, size, dir);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir)
-{
- dma_unmap_single(dev, dma_addr, size, dir);
-}
+extern struct dma_map_ops bfin_dma_ops;
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir);
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction dir)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- BUG_ON(!valid_dma_direction(dir));
+ return &bfin_dma_ops;
}
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
-{
- _dma_sync(handle + offset, size, dir);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
-{
- dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir)
-{
- dma_sync_single_range_for_device(dev, handle, 0, size, dir);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
-{
- BUG_ON(!valid_dma_direction(dir));
-}
-
-extern void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir);
-
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
-{
- _dma_sync((dma_addr_t)vaddr, size, dir);
-}
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index df437e52d9df..771afe6e4264 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -78,8 +78,8 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
spin_unlock_irqrestore(&dma_page_lock, flags);
}
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static void *bfin_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
@@ -92,15 +92,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void
-dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+static void bfin_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
__free_dma_pages((unsigned long)vaddr, get_pages(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
/*
* Streaming DMA mappings
@@ -112,9 +109,9 @@ void __dma_sync(dma_addr_t addr, size_t size,
}
EXPORT_SYMBOL(__dma_sync);
-int
-dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
- enum dma_data_direction direction)
+static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
@@ -126,10 +123,10 @@ dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
- int nelems, enum dma_data_direction direction)
+static void bfin_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg_list, int nelems,
+ enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
@@ -139,4 +136,31 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
+
+ _dma_sync(handle, size, dir);
+ return handle;
+}
+
+static inline void bfin_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ _dma_sync(handle, size, dir);
+}
+
+struct dma_map_ops bfin_dma_ops = {
+ .alloc = bfin_dma_alloc,
+ .free = bfin_dma_free,
+
+ .map_page = bfin_dma_map_page,
+ .map_sg = bfin_dma_map_sg,
+
+ .sync_single_for_device = bfin_dma_sync_single_for_device,
+ .sync_sg_for_device = bfin_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(bfin_dma_ops);
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 77ea09b8bce1..79049d432d3c 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -17,6 +17,7 @@ config C6X
select OF_EARLY_FLATTREE
select GENERIC_CLOCKEVENTS
select MODULES_USE_ELF_RELA
+ select ARCH_NO_COHERENT_DMA_MMAP
config MMU
def_bool n
diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
index bbd7774e4d4e..6b5cd7b0cf32 100644
--- a/arch/c6x/include/asm/dma-mapping.h
+++ b/arch/c6x/include/asm/dma-mapping.h
@@ -12,104 +12,22 @@
#ifndef _ASM_C6X_DMA_MAPPING_H
#define _ASM_C6X_DMA_MAPPING_H
-#include <linux/dma-debug.h>
-#include <asm-generic/dma-coherent.h>
-
-#define dma_supported(d, m) 1
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
-}
-
-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
-
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- debug_dma_mapping_error(dev, dma_addr);
- return dma_addr == ~0;
-}
-
-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
- size_t size, enum dma_data_direction dir);
+#define DMA_ERROR_CODE ~0
-extern void dma_unmap_single(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir);
+extern struct dma_map_ops c6x_dma_ops;
-extern int dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction);
-
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction);
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- dma_addr_t handle;
-
- handle = dma_map_single(dev, page_address(page) + offset, size, dir);
-
- debug_dma_map_page(dev, page, offset, size, dir, handle, false);
-
- return handle;
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
- dma_unmap_single(dev, handle, size, dir);
-
- debug_dma_unmap_page(dev, handle, size, dir, false);
+ return &c6x_dma_ops;
}
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir);
-
-extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
- size_t size,
- enum dma_data_direction dir);
-
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir);
-
-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir);
-
extern void coherent_mem_init(u32 start, u32 size);
-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
-
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
-{
- return -EINVAL;
-}
+void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs);
+void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs);
#endif /* _ASM_C6X_DMA_MAPPING_H */
diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
index ab7b12de144d..8a80f3a250c0 100644
--- a/arch/c6x/kernel/dma.c
+++ b/arch/c6x/kernel/dma.c
@@ -36,110 +36,101 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size,
}
}
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction dir)
+static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
- dma_addr_t addr = virt_to_phys(ptr);
+ dma_addr_t handle = virt_to_phys(page_address(page) + offset);
- c6x_dma_sync(addr, size, dir);
-
- debug_dma_map_page(dev, virt_to_page(ptr),
- (unsigned long)ptr & ~PAGE_MASK, size,
- dir, addr, true);
- return addr;
+ c6x_dma_sync(handle, size, dir);
+ return handle;
}
-EXPORT_SYMBOL(dma_map_single);
-
-void dma_unmap_single(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
+static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir, struct dma_attrs *attrs)
{
c6x_dma_sync(handle, size, dir);
-
- debug_dma_unmap_page(dev, handle, size, dir, true);
}
-EXPORT_SYMBOL(dma_unmap_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
- for_each_sg(sglist, sg, nents, i)
- sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length,
- dir);
-
- debug_dma_map_sg(dev, sglist, nents, nents, dir);
+ for_each_sg(sglist, sg, nents, i) {
+ sg->dma_address = sg_phys(sg);
+ c6x_dma_sync(sg->dma_address, sg->length, dir);
+ }
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-
-void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
for_each_sg(sglist, sg, nents, i)
- dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir);
+ c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
- debug_dma_unmap_sg(dev, sglist, nents, dir);
}
-EXPORT_SYMBOL(dma_unmap_sg);
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
+static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
c6x_dma_sync(handle, size, dir);
- debug_dma_sync_single_for_cpu(dev, handle, size, dir);
}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
+static void c6x_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
c6x_dma_sync(handle, size, dir);
- debug_dma_sync_single_for_device(dev, handle, size, dir);
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static void c6x_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nents,
+ enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sglist, sg, nents, i)
- dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+ c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
sg->length, dir);
- debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static void c6x_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nents,
+ enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sglist, sg, nents, i)
- dma_sync_single_for_device(dev, sg_dma_address(sg),
+ c6x_dma_sync_single_for_device(dev, sg_dma_address(sg),
sg->length, dir);
- debug_dma_sync_sg_for_device(dev, sglist, nents, dir);
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
+struct dma_map_ops c6x_dma_ops = {
+ .alloc = c6x_dma_alloc,
+ .free = c6x_dma_free,
+ .map_page = c6x_dma_map_page,
+ .unmap_page = c6x_dma_unmap_page,
+ .map_sg = c6x_dma_map_sg,
+ .unmap_sg = c6x_dma_unmap_sg,
+ .sync_single_for_device = c6x_dma_sync_single_for_device,
+ .sync_single_for_cpu = c6x_dma_sync_single_for_cpu,
+ .sync_sg_for_device = c6x_dma_sync_sg_for_device,
+ .sync_sg_for_cpu = c6x_dma_sync_sg_for_cpu,
+};
+EXPORT_SYMBOL(c6x_dma_ops);
/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
index 4187e5180373..f7ee63af2541 100644
--- a/arch/c6x/mm/dma-coherent.c
+++ b/arch/c6x/mm/dma-coherent.c
@@ -73,8 +73,8 @@ static void __free_dma_pages(u32 addr, int order)
* Allocate DMA coherent memory space and return both the kernel
* virtual and DMA address for that space.
*/
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
+void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
u32 paddr;
int order;
@@ -94,13 +94,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return phys_to_virt(paddr);
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
* Free DMA coherent memory as defined by the above mapping.
*/
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
int order;
@@ -111,7 +110,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
__free_dma_pages(virt_to_phys(vaddr), order);
}
-EXPORT_SYMBOL(dma_free_coherent);
/*
* Initialise the coherent DMA memory allocator using the given uncached region.
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c
index ee55578d9834..8d5efa58cce1 100644
--- a/arch/cris/arch-v32/drivers/pci/dma.c
+++ b/arch/cris/arch-v32/drivers/pci/dma.c
@@ -16,21 +16,18 @@
#include <linux/gfp.h>
#include <asm/io.h>
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static void *v32_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
- int order = get_order(size);
+
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
- if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
- return ret;
-
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA;
- ret = (void *)__get_free_pages(gfp, order);
+ ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
@@ -39,12 +36,45 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
+static inline dma_addr_t v32_dma_map_page(struct device *dev,
+ struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- int order = get_order(size);
+ return page_to_phys(page) + offset;
+}
- if (!dma_release_from_coherent(dev, order, vaddr))
- free_pages((unsigned long)vaddr, order);
+static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ printk("Map sg\n");
+ return nents;
+}
+
+static inline int v32_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+ return 1;
}
+struct dma_map_ops v32_dma_ops = {
+ .alloc = v32_dma_alloc,
+ .free = v32_dma_free,
+ .map_page = v32_dma_map_page,
+ .map_sg = v32_dma_map_sg,
+ .dma_supported = v32_dma_supported,
+};
+EXPORT_SYMBOL(v32_dma_ops);
diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
index 57f794ee6039..5a370178a0e9 100644
--- a/arch/cris/include/asm/dma-mapping.h
+++ b/arch/cris/include/asm/dma-mapping.h
@@ -1,156 +1,20 @@
-/* DMA mapping. Nothing tricky here, just virt_to_phys */
-
#ifndef _ASM_CRIS_DMA_MAPPING_H
#define _ASM_CRIS_DMA_MAPPING_H
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-
-#include <asm/cache.h>
-#include <asm/io.h>
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
#ifdef CONFIG_PCI
-#include <asm-generic/dma-coherent.h>
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+extern struct dma_map_ops v32_dma_ops;
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-#else
-static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- BUG();
- return NULL;
+ return &v32_dma_ops;
}
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+#else
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- BUG();
+ BUG();
+ return NULL;
}
#endif
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- return virt_to_phys(ptr);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- printk("Map sg\n");
- return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- return page_to_phys(page) + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if(mask < 0x00ffffff)
- return 0;
-
- return 1;
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- if(!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
@@ -158,15 +22,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
-
#endif
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 34aa19352dc1..4211be88e082 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -14,6 +14,7 @@ config FRV
select OLD_SIGSUSPEND3
select OLD_SIGACTION
select HAVE_DEBUG_STACKOVERFLOW
+ select ARCH_NO_COHERENT_DMA_MMAP
config ZONE_DMA
bool
diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
index 2840adcd6d92..9a82bfa4303b 100644
--- a/arch/frv/include/asm/dma-mapping.h
+++ b/arch/frv/include/asm/dma-mapping.h
@@ -1,128 +1,17 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
-#include <linux/device.h>
-#include <linux/scatterlist.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
-#include <asm/io.h>
-
-/*
- * See Documentation/DMA-API.txt for the description of how the
- * following DMA API should work.
- */
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern unsigned long __nongprelbss dma_coherent_mem_start;
extern unsigned long __nongprelbss dma_coherent_mem_end;
-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
-
-extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction);
-
-static inline
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-
-static inline
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-extern
-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction);
-
-static inline
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- flush_write_buffers();
-}
-
-static inline
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline
-int dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < 0x00ffffff)
- return 0;
-
- return 1;
-}
+extern struct dma_map_ops frv_dma_ops;
-static inline
-int dma_set_mask(struct device *dev, u64 mask)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
+ return &frv_dma_ops;
}
static inline
@@ -132,19 +21,4 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
flush_write_buffers();
}
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
-{
- return -EINVAL;
-}
-
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c
index 8eeea0d77aad..082be49b5df0 100644
--- a/arch/frv/mb93090-mb00/pci-dma-nommu.c
+++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c
@@ -34,7 +34,8 @@ struct dma_alloc_record {
static DEFINE_SPINLOCK(dma_alloc_lock);
static LIST_HEAD(dma_alloc_list);
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
+static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
struct dma_alloc_record *new;
struct list_head *this = &dma_alloc_list;
@@ -84,9 +85,8 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
return NULL;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-
-void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct dma_alloc_record *rec;
unsigned long flags;
@@ -105,22 +105,9 @@ void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_
BUG();
}
-EXPORT_SYMBOL(dma_free_coherent);
-
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
-
- return virt_to_bus(ptr);
-}
-
-EXPORT_SYMBOL(dma_map_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
@@ -135,14 +122,49 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
+static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
BUG_ON(direction == DMA_NONE);
flush_dcache_page(page);
return (dma_addr_t) page_to_phys(page) + offset;
}
-EXPORT_SYMBOL(dma_map_page);
+static void frv_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+static void frv_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+
+static int frv_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+ return 1;
+}
+
+struct dma_map_ops frv_dma_ops = {
+ .alloc = frv_dma_alloc,
+ .free = frv_dma_free,
+ .map_page = frv_dma_map_page,
+ .map_sg = frv_dma_map_sg,
+ .sync_single_for_device = frv_dma_sync_single_for_device,
+ .sync_sg_for_device = frv_dma_sync_sg_for_device,
+ .dma_supported = frv_dma_supported,
+};
+EXPORT_SYMBOL(frv_dma_ops);
diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
index 4d1f01dc46e5..316b7b65348d 100644
--- a/arch/frv/mb93090-mb00/pci-dma.c
+++ b/arch/frv/mb93090-mb00/pci-dma.c
@@ -18,7 +18,9 @@
#include <linux/scatterlist.h>
#include <asm/io.h>
-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
+static void *frv_dma_alloc(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
void *ret;
@@ -29,29 +31,15 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-
-void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
consistent_free(vaddr);
}
-EXPORT_SYMBOL(dma_free_coherent);
-
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-
- frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
-
- return virt_to_bus(ptr);
-}
-
-EXPORT_SYMBOL(dma_map_single);
-
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
unsigned long dampr2;
void *vaddr;
@@ -79,14 +67,48 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-
-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
+static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
- BUG_ON(direction == DMA_NONE);
flush_dcache_page(page);
return (dma_addr_t) page_to_phys(page) + offset;
}
-EXPORT_SYMBOL(dma_map_page);
+static void frv_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+static void frv_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ flush_write_buffers();
+}
+
+
+static int frv_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+ return 1;
+}
+
+struct dma_map_ops frv_dma_ops = {
+ .alloc = frv_dma_alloc,
+ .free = frv_dma_free,
+ .map_page = frv_dma_map_page,
+ .map_sg = frv_dma_map_sg,
+ .sync_single_for_device = frv_dma_sync_single_for_device,
+ .sync_sg_for_device = frv_dma_sync_sg_for_device,
+ .dma_supported = frv_dma_supported,
+};
+EXPORT_SYMBOL(frv_dma_ops);
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index dd3ac75776ad..c642ea036048 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -15,7 +15,6 @@ config H8300
select OF_IRQ
select OF_EARLY_FLATTREE
select HAVE_MEMBLOCK
- select HAVE_DMA_ATTRS
select CLKSRC_OF
config RWSEM_GENERIC_SPINLOCK
diff --git a/arch/h8300/include/asm/dma-mapping.h b/arch/h8300/include/asm/dma-mapping.h
index d9b5b806afe6..7ac7fadffed0 100644
--- a/arch/h8300/include/asm/dma-mapping.h
+++ b/arch/h8300/include/asm/dma-mapping.h
@@ -8,6 +8,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return &h8300_dma_map_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
#endif
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 4dc89d1f9c48..57298e7b4867 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -27,7 +27,6 @@ config HEXAGON
select GENERIC_CLOCKEVENTS_BROADCAST
select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES
- select HAVE_DMA_ATTRS
---help---
Qualcomm Hexagon is a processor architecture designed for high
performance and low power across a wide variety of applications.
diff --git a/arch/hexagon/include/asm/dma-mapping.h b/arch/hexagon/include/asm/dma-mapping.h
index 268fde8a4575..aa6203464520 100644
--- a/arch/hexagon/include/asm/dma-mapping.h
+++ b/arch/hexagon/include/asm/dma-mapping.h
@@ -49,8 +49,6 @@ extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
-#include <asm-generic/dma-mapping-common.h>
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 9efd0e1e5e57..92284c894d54 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,7 +26,6 @@ config IA64
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
select HAVE_FUNCTION_TRACER
- select HAVE_DMA_ATTRS
select TTY
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 9beccf8010bd..d472805edfa9 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -25,8 +25,6 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
#define get_dma_ops(dev) platform_dma_get_ops(dev)
-#include <asm-generic/dma-mapping-common.h>
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
index 05aa53594d49..96c536194287 100644
--- a/arch/m68k/include/asm/dma-mapping.h
+++ b/arch/m68k/include/asm/dma-mapping.h
@@ -1,123 +1,17 @@
#ifndef _M68K_DMA_MAPPING_H
#define _M68K_DMA_MAPPING_H
-#include <asm/cache.h>
+extern struct dma_map_ops m68k_dma_ops;
-struct scatterlist;
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- return 0;
-}
-
-extern void *dma_alloc_coherent(struct device *, size_t,
- dma_addr_t *, gfp_t);
-extern void dma_free_coherent(struct device *, size_t,
- void *, dma_addr_t);
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
-{
- /* attrs is not supported and ignored */
- return dma_alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- /* attrs is not supported and ignored */
- dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ return &m68k_dma_ops;
}
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t flag)
-{
- return dma_alloc_coherent(dev, size, handle, flag);
-}
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
- void *addr, dma_addr_t handle)
-{
- dma_free_coherent(dev, size, addr, handle);
-}
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
/* we use coherent allocation, so not much to do here. */
}
-extern dma_addr_t dma_map_single(struct device *, void *, size_t,
- enum dma_data_direction);
-static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-extern dma_addr_t dma_map_page(struct device *, struct page *,
- unsigned long, size_t size,
- enum dma_data_direction);
-static inline void dma_unmap_page(struct device *dev, dma_addr_t address,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-extern int dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction dir)
-{
-}
-
-extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
- enum dma_data_direction);
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything for now */
- dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
-{
-}
-
-static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir)
-{
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- /* just sync everything for now */
- dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
-{
- return 0;
-}
-
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif /* _M68K_DMA_MAPPING_H */
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 564665f9af30..cbc78b4117b5 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -18,8 +18,8 @@
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t flag)
+static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t flag, struct dma_attrs *attrs)
{
struct page *page, **map;
pgprot_t pgprot;
@@ -61,8 +61,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return addr;
}
-void dma_free_coherent(struct device *dev, size_t size,
- void *addr, dma_addr_t handle)
+static void m68k_dma_free(struct device *dev, size_t size, void *addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
vfree(addr);
@@ -72,8 +72,8 @@ void dma_free_coherent(struct device *dev, size_t size,
#include <asm/cacheflush.h>
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static void *m68k_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
/* ignore region specifiers */
@@ -90,19 +90,16 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
-EXPORT_SYMBOL(dma_alloc_coherent);
-EXPORT_SYMBOL(dma_free_coherent);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir)
+static void m68k_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
@@ -118,10 +115,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
break;
}
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir)
+static void m68k_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nents, enum dma_data_direction dir)
{
int i;
struct scatterlist *sg;
@@ -131,31 +127,19 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
dir);
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-
-dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
- enum dma_data_direction dir)
-{
- dma_addr_t handle = virt_to_bus(addr);
-
- dma_sync_single_for_device(dev, handle, size, dir);
- return handle;
-}
-EXPORT_SYMBOL(dma_map_single);
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir)
+static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
{
dma_addr_t handle = page_to_phys(page) + offset;
dma_sync_single_for_device(dev, handle, size, dir);
return handle;
}
-EXPORT_SYMBOL(dma_map_page);
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction dir)
+static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
@@ -167,4 +151,13 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
}
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
+
+struct dma_map_ops m68k_dma_ops = {
+ .alloc = m68k_dma_alloc,
+ .free = m68k_dma_free,
+ .map_page = m68k_dma_map_page,
+ .map_sg = m68k_dma_map_sg,
+ .sync_single_for_device = m68k_dma_sync_single_for_device,
+ .sync_sg_for_device = m68k_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(m68k_dma_ops);
diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
index eb5cdec94be0..27af5d479ce6 100644
--- a/arch/metag/include/asm/dma-mapping.h
+++ b/arch/metag/include/asm/dma-mapping.h
@@ -1,177 +1,11 @@
#ifndef _ASM_METAG_DMA_MAPPING_H
#define _ASM_METAG_DMA_MAPPING_H
-#include <linux/mm.h>
+extern struct dma_map_ops metag_dma_ops;
-#include <asm/cache.h>
-#include <asm/io.h>
-#include <linux/scatterlist.h>
-#include <asm/bug.h>
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-void dma_sync_for_device(void *vaddr, size_t size, int dma_direction);
-void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction);
-
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(size == 0);
- dma_sync_for_device(ptr, size, direction);
- return virt_to_phys(ptr);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nents == 0 || sglist[0].length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
- sg->dma_address = sg_phys(sg);
- dma_sync_for_device(sg_virt(sg), sg->length, direction);
- }
-
- return nents;
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
- direction);
- return page_to_phys(page) + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nhwentries == 0 || sglist[0].length == 0);
-
- for_each_sg(sglist, sg, nhwentries, i) {
- BUG_ON(!sg_page(sg));
-
- sg->dma_address = sg_phys(sg);
- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
- }
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size,
- direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- dma_sync_for_device(phys_to_virt(dma_handle)+offset, size,
- direction);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i)
- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i)
- dma_sync_for_device(sg_virt(sg), sg->length, direction);
-}
-
-static inline int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- return 0;
-}
-
-#define dma_supported(dev, mask) (1)
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
+ return &metag_dma_ops;
}
/*
@@ -184,11 +18,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
-/* drivers/base/dma-mapping.c */
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif
diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
index c700d625067a..e12368d02155 100644
--- a/arch/metag/kernel/dma.c
+++ b/arch/metag/kernel/dma.c
@@ -171,8 +171,8 @@ out:
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp)
+static void *metag_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
struct page *page;
struct metag_vm_region *c;
@@ -263,13 +263,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
no_page:
return NULL;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
* free a page as defined by the above mapping.
*/
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
struct metag_vm_region *c;
unsigned long flags, addr;
@@ -329,16 +328,19 @@ no_area:
__func__, vaddr);
dump_stack();
}
-EXPORT_SYMBOL(dma_free_coherent);
-
-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
- int ret = -ENXIO;
-
unsigned long flags, user_size, kern_size;
struct metag_vm_region *c;
+ int ret = -ENXIO;
+
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
@@ -364,25 +366,6 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_coherent);
-
-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-EXPORT_SYMBOL(dma_mmap_writecombine);
-
-
-
-
/*
* Initialise the consistent memory allocation.
*/
@@ -423,7 +406,7 @@ early_initcall(dma_alloc_init);
/*
* make an area consistent to devices.
*/
-void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
+static void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
{
/*
* Ensure any writes get through the write combiner. This is necessary
@@ -465,12 +448,11 @@ void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
wmb();
}
-EXPORT_SYMBOL(dma_sync_for_device);
/*
* make an area consistent to the core.
*/
-void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
+static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
{
/*
* Hardware L2 cache prefetch doesn't occur across 4K physical
@@ -497,4 +479,100 @@ void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
rmb();
}
-EXPORT_SYMBOL(dma_sync_for_cpu);
+
+static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
+{
+ dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
+ direction);
+ return page_to_phys(page) + offset;
+}
+
+static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+}
+
+static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg->dma_address = sg_phys(sg);
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
+ }
+
+ return nents;
+}
+
+
+static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nhwentries, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nhwentries, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg->dma_address = sg_phys(sg);
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ }
+}
+
+static void metag_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+}
+
+static void metag_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+}
+
+static void metag_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+}
+
+static void metag_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i)
+ dma_sync_for_device(sg_virt(sg), sg->length, direction);
+}
+
+struct dma_map_ops metag_dma_ops = {
+ .alloc = metag_dma_alloc,
+ .free = metag_dma_free,
+ .map_page = metag_dma_map_page,
+ .map_sg = metag_dma_map_sg,
+ .sync_single_for_device = metag_dma_sync_single_for_device,
+ .sync_single_for_cpu = metag_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = metag_dma_sync_sg_for_cpu,
+ .mmap = metag_dma_mmap,
+};
+EXPORT_SYMBOL(metag_dma_ops);
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 0bce820428fc..d1f589101438 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -19,7 +19,6 @@ config MICROBLAZE
select HAVE_ARCH_KGDB
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
diff --git a/arch/microblaze/include/asm/dma-mapping.h b/arch/microblaze/include/asm/dma-mapping.h
index 24b12970c9cf..1884783d15c0 100644
--- a/arch/microblaze/include/asm/dma-mapping.h
+++ b/arch/microblaze/include/asm/dma-mapping.h
@@ -44,8 +44,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return &dma_direct_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
static inline void __dma_sync(unsigned long paddr,
size_t size, enum dma_data_direction direction)
{
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index db459612de44..bdf89566ea72 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -31,7 +31,6 @@ config MIPS
select RTC_LIB if !MACH_LOONGSON64
select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
- select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS
select HAVE_DMA_API_DEBUG
select GENERIC_IRQ_PROBE
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index e604f760c4a0..12fa79e2f1b4 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -29,8 +29,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
static inline void dma_mark_clean(void *addr, size_t size) {}
-#include <asm-generic/dma-mapping-common.h>
-
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index e5f4a197a7ea..6bc9389efa67 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -15,6 +15,7 @@ config MN10300
select OLD_SIGSUSPEND3
select OLD_SIGACTION
select HAVE_DEBUG_STACKOVERFLOW
+ select ARCH_NO_COHERENT_DMA_MMAP
config AM33_2
def_bool n
diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
index a18abfc558eb..1dcd44757f32 100644
--- a/arch/mn10300/include/asm/dma-mapping.h
+++ b/arch/mn10300/include/asm/dma-mapping.h
@@ -11,154 +11,14 @@
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
#include <asm/cache.h>
#include <asm/io.h>
-/*
- * See Documentation/DMA-API.txt for the description of how the
- * following DMA API should work.
- */
-
-extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
-
-extern void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
-
-static inline
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- mn10300_dcache_flush_inv();
- return virt_to_bus(ptr);
-}
-
-static inline
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nents == 0 || sglist[0].length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
- sg->dma_address = sg_phys(sg);
- }
+extern struct dma_map_ops mn10300_dma_ops;
- mn10300_dcache_flush_inv();
- return nents;
-}
-
-static inline
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-}
-
-static inline
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
- return page_to_bus(page) + offset;
-}
-
-static inline
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(direction == DMA_NONE);
-}
-
-static inline
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
-}
-
-static inline
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- mn10300_dcache_flush_inv();
-}
-
-static inline
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- mn10300_dcache_flush_inv();
-}
-
-
-static inline
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
-{
-}
-
-static inline
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
-{
- mn10300_dcache_flush_inv();
-}
-
-static inline
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
-static inline
-int dma_supported(struct device *dev, u64 mask)
-{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
- * guarantee allocations that must be within a tighter range than
- * GFP_DMA
- */
- if (mask < 0x00ffffff)
- return 0;
- return 1;
-}
-
-static inline
-int dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
- return 0;
+ return &mn10300_dma_ops;
}
static inline
@@ -168,19 +28,4 @@ void dma_cache_sync(void *vaddr, size_t size,
mn10300_dcache_flush_inv();
}
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
-{
- return -EINVAL;
-}
-
#endif
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index e244ebe637e1..8842394cb49a 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -20,8 +20,8 @@
static unsigned long pci_sram_allocated = 0xbc000000;
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int gfp)
+static void *mn10300_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
unsigned long addr;
void *ret;
@@ -61,10 +61,9 @@ done:
printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) vaddr & ~0x20000000;
@@ -73,4 +72,60 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
free_pages(addr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
+
+static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg->dma_address = sg_phys(sg);
+ }
+
+ mn10300_dcache_flush_inv();
+ return nents;
+}
+
+static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
+{
+ return page_to_bus(page) + offset;
+}
+
+static void mn10300_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction)
+{
+ mn10300_dcache_flush_inv();
+}
+
+static void mn10300_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction direction)
+{
+ mn10300_dcache_flush_inv();
+}
+
+static int mn10300_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
+ * guarantee allocations that must be within a tighter range than
+ * GFP_DMA
+ */
+ if (mask < 0x00ffffff)
+ return 0;
+ return 1;
+}
+
+struct dma_map_ops mn10300_dma_ops = {
+ .alloc = mn10300_dma_alloc,
+ .free = mn10300_dma_free,
+ .map_page = mn10300_dma_map_page,
+ .map_sg = mn10300_dma_map_sg,
+ .sync_single_for_device = mn10300_dma_sync_single_for_device,
+ .sync_sg_for_device = mn10300_dma_sync_sg_for_device,
+};
diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h
index b5567233f7f1..bec8ac8e6ad2 100644
--- a/arch/nios2/include/asm/dma-mapping.h
+++ b/arch/nios2/include/asm/dma-mapping.h
@@ -10,131 +10,20 @@
#ifndef _ASM_NIOS2_DMA_MAPPING_H
#define _ASM_NIOS2_DMA_MAPPING_H
-#include <linux/scatterlist.h>
-#include <linux/cache.h>
-#include <asm/cacheflush.h>
+extern struct dma_map_ops nios2_dma_ops;
-static inline void __dma_sync_for_device(void *vaddr, size_t size,
- enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- switch (direction) {
- case DMA_FROM_DEVICE:
- invalidate_dcache_range((unsigned long)vaddr,
- (unsigned long)(vaddr + size));
- break;
- case DMA_TO_DEVICE:
- /*
- * We just need to flush the caches here , but Nios2 flush
- * instruction will do both writeback and invalidate.
- */
- case DMA_BIDIRECTIONAL: /* flush and invalidate */
- flush_dcache_range((unsigned long)vaddr,
- (unsigned long)(vaddr + size));
- break;
- default:
- BUG();
- }
-}
-
-static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
- enum dma_data_direction direction)
-{
- switch (direction) {
- case DMA_BIDIRECTIONAL:
- case DMA_FROM_DEVICE:
- invalidate_dcache_range((unsigned long)vaddr,
- (unsigned long)(vaddr + size));
- break;
- case DMA_TO_DEVICE:
- break;
- default:
- BUG();
- }
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
- size_t size,
- enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- __dma_sync_for_device(ptr, size, direction);
- return virt_to_phys(ptr);
-}
-
-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction)
-{
-}
-
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction direction);
-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_for_device(struct device *dev,
- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction);
-extern void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle, unsigned long offset, size_t size,
- enum dma_data_direction direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction);
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- return 1;
-}
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
+ return &nios2_dma_ops;
}
/*
-* dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
-* do any flushing here.
-*/
+ * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+ * do any flushing here.
+ */
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
}
-/* drivers/base/dma-mapping.c */
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size);
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
-
#endif /* _ASM_NIOS2_DMA_MAPPING_H */
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
index ac5da7594f0b..90422c367ed3 100644
--- a/arch/nios2/mm/dma-mapping.c
+++ b/arch/nios2/mm/dma-mapping.c
@@ -20,9 +20,46 @@
#include <linux/cache.h>
#include <asm/cacheflush.h>
+static inline void __dma_sync_for_device(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range((unsigned long)vaddr,
+ (unsigned long)(vaddr + size));
+ break;
+ case DMA_TO_DEVICE:
+ /*
+ * We just need to flush the caches here , but Nios2 flush
+ * instruction will do both writeback and invalidate.
+ */
+ case DMA_BIDIRECTIONAL: /* flush and invalidate */
+ flush_dcache_range((unsigned long)vaddr,
+ (unsigned long)(vaddr + size));
+ break;
+ default:
+ BUG();
+ }
+}
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_BIDIRECTIONAL:
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range((unsigned long)vaddr,
+ (unsigned long)(vaddr + size));
+ break;
+ case DMA_TO_DEVICE:
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void *nios2_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
@@ -45,24 +82,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret;
}
-EXPORT_SYMBOL(dma_alloc_coherent);
-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
free_pages(addr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
+static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
int i;
- BUG_ON(!valid_dma_direction(direction));
-
for_each_sg(sg, sg, nents, i) {
void *addr;
@@ -75,40 +109,32 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
+static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction)
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- void *addr;
-
- BUG_ON(!valid_dma_direction(direction));
+ void *addr = page_address(page) + offset;
- addr = page_address(page) + offset;
__dma_sync_for_device(addr, size, direction);
-
return page_to_phys(page) + offset;
}
-EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- BUG_ON(!valid_dma_direction(direction));
-
__dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
}
-EXPORT_SYMBOL(dma_unmap_page);
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
+static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
void *addr;
int i;
- BUG_ON(!valid_dma_direction(direction));
-
if (direction == DMA_TO_DEVICE)
return;
@@ -118,69 +144,54 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
__dma_sync_for_cpu(addr, sg->length, direction);
}
}
-EXPORT_SYMBOL(dma_unmap_sg);
-
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- BUG_ON(!valid_dma_direction(direction));
-
- __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
-}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static void nios2_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
- BUG_ON(!valid_dma_direction(direction));
-
__dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
}
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static void nios2_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
- BUG_ON(!valid_dma_direction(direction));
-
__dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
}
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void nios2_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
{
int i;
- BUG_ON(!valid_dma_direction(direction));
-
/* Make sure that gcc doesn't leave the empty loop body. */
for_each_sg(sg, sg, nelems, i)
__dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction direction)
+static void nios2_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
{
int i;
- BUG_ON(!valid_dma_direction(direction));
-
/* Make sure that gcc doesn't leave the empty loop body. */
for_each_sg(sg, sg, nelems, i)
__dma_sync_for_device(sg_virt(sg), sg->length, direction);
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+struct dma_map_ops nios2_dma_ops = {
+ .alloc = nios2_dma_alloc,
+ .free = nios2_dma_free,
+ .map_page = nios2_dma_map_page,
+ .unmap_page = nios2_dma_unmap_page,
+ .map_sg = nios2_dma_map_sg,
+ .unmap_sg = nios2_dma_unmap_sg,
+ .sync_single_for_device = nios2_dma_sync_single_for_device,
+ .sync_single_for_cpu = nios2_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = nios2_dma_sync_sg_for_device,
+};
+EXPORT_SYMBOL(nios2_dma_ops);
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 443f44de1020..e118c02cc79a 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -29,9 +29,6 @@ config OPENRISC
config MMU
def_bool y
-config HAVE_DMA_ATTRS
- def_bool y
-
config RWSEM_GENERIC_SPINLOCK
def_bool y
diff --git a/arch/openrisc/include/asm/dma-mapping.h b/arch/openrisc/include/asm/dma-mapping.h
index 413bfcf86384..1f260bccb368 100644
--- a/arch/openrisc/include/asm/dma-mapping.h
+++ b/arch/openrisc/include/asm/dma-mapping.h
@@ -42,6 +42,4 @@ static inline int dma_supported(struct device *dev, u64 dma_mask)
return dma_mask == DMA_BIT_MASK(32);
}
-#include <asm-generic/dma-mapping-common.h>
-
#endif /* __ASM_OPENRISC_DMA_MAPPING_H */
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 729f89163bc3..63d08a7aeb93 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -29,6 +29,7 @@ config PARISC
select TTY # Needed for pdc_cons.c
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_NO_COHERENT_DMA_MMAP
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index d8d60a57183f..16e024602737 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -1,30 +1,11 @@
#ifndef _PARISC_DMA_MAPPING_H
#define _PARISC_DMA_MAPPING_H
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
-/* See Documentation/DMA-API-HOWTO.txt */
-struct hppa_dma_ops {
- int (*dma_supported)(struct device *dev, u64 mask);
- void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
- void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
- void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
- dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
- void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
- int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
- void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
- void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
- void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
- void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
- void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
-};
-
/*
-** We could live without the hppa_dma_ops indirection if we didn't want
-** to support 4 different coherent dma models with one binary (they will
-** someday be loadable modules):
+** We need to support 4 different coherent dma models with one binary:
+**
** I/O MMU consistent method dma_sync behavior
** ============= ====================== =======================
** a) PA-7x00LC uncachable host memory flush/purge
@@ -40,158 +21,22 @@ struct hppa_dma_ops {
*/
#ifdef CONFIG_PA11
-extern struct hppa_dma_ops pcxl_dma_ops;
-extern struct hppa_dma_ops pcx_dma_ops;
+extern struct dma_map_ops pcxl_dma_ops;
+extern struct dma_map_ops pcx_dma_ops;
#endif
-extern struct hppa_dma_ops *hppa_dma_ops;
-
-#define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f)
-#define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f)
-
-static inline void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
-{
- return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
-}
-
-static inline void *
-dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag)
-{
- return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
-}
-
-static inline void
-dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
-}
-
-static inline void
-dma_free_noncoherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
-{
- hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
-}
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
-{
- return hppa_dma_ops->map_single(dev, ptr, size, direction);
-}
-
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
-{
- hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
-}
-
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction)
-{
- return hppa_dma_ops->map_sg(dev, sg, nents, direction);
-}
+extern struct dma_map_ops *hppa_dma_ops;
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
-{
- hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
-}
-
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction)
-{
- return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
- dma_unmap_single(dev, dma_address, size, direction);
-}
-
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_single_for_cpu)
- hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_single_for_device)
- hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_single_for_cpu)
- hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_single_for_device)
- hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_sg_for_cpu)
- hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
-{
- if(hppa_dma_ops->dma_sync_sg_for_device)
- hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
-}
-
-static inline int
-dma_supported(struct device *dev, u64 mask)
-{
- return hppa_dma_ops->dma_supported(dev, mask);
-}
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- if(!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
+ return hppa_dma_ops;
}
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
- if(hppa_dma_ops->dma_sync_single_for_cpu)
+ if (hppa_dma_ops->sync_single_for_cpu)
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
@@ -238,22 +83,4 @@ struct parisc_device;
void * sba_get_iommu(struct parisc_device *dev);
#endif
-/* At the moment, we panic on error for IOMMU resource exaustion */
-#define dma_mapping_error(dev, x) 0
-
-/* This API cannot be supported on PA-RISC */
-static inline int dma_mmap_coherent(struct device *dev,
- struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size)
-{
- return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size)
-{
- return -EINVAL;
-}
-
#endif
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index dba508fe1683..f8150669b8c6 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -40,7 +40,7 @@
#include <asm/parisc-device.h>
/* See comments in include/asm-parisc/pci.h */
-struct hppa_dma_ops *hppa_dma_ops __read_mostly;
+struct dma_map_ops *hppa_dma_ops __read_mostly;
EXPORT_SYMBOL(hppa_dma_ops);
static struct device root = {
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index af0d7fae7aa7..6430a3d35f59 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -413,7 +413,8 @@ pcxl_dma_init(void)
__initcall(pcxl_dma_init);
-static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
+static void *pa11_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
{
unsigned long vaddr;
unsigned long paddr;
@@ -439,7 +440,8 @@ static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_ad
return (void *)vaddr;
}
-static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
+static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
int order;
@@ -450,15 +452,20 @@ static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vad
free_pages((unsigned long)__va(dma_handle), order);
}
-static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
+static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
+ void *addr = page_address(page) + offset;
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) addr, size);
return virt_to_phys(addr);
}
-static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
+static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
BUG_ON(direction == DMA_NONE);
@@ -475,7 +482,9 @@ static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, siz
return;
}
-static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
@@ -492,7 +501,9 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n
return nents;
}
-static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
@@ -509,18 +520,24 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
return;
}
-static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+static void pa11_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
+ size);
}
-static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
+static void pa11_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
+ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
+ size);
}
static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
@@ -545,32 +562,28 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
flush_kernel_vmap_range(sg_virt(sg), sg->length);
}
-struct hppa_dma_ops pcxl_dma_ops = {
+struct dma_map_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported,
- .alloc_consistent = pa11_dma_alloc_consistent,
- .alloc_noncoherent = pa11_dma_alloc_consistent,
- .free_consistent = pa11_dma_free_consistent,
- .map_single = pa11_dma_map_single,
- .unmap_single = pa11_dma_unmap_single,
+ .alloc = pa11_dma_alloc,
+ .free = pa11_dma_free,
+ .map_page = pa11_dma_map_page,
+ .unmap_page = pa11_dma_unmap_page,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
- .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
- .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+ .sync_single_for_device = pa11_dma_sync_single_for_device,
+ .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = pa11_dma_sync_sg_for_device,
};
-static void *fail_alloc_consistent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return NULL;
-}
-
-static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+static void *pcx_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
{
void *addr;
+ if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ return NULL;
+
addr = (void *)__get_free_pages(flag, get_order(size));
if (addr)
*dma_handle = (dma_addr_t)virt_to_phys(addr);
@@ -578,24 +591,23 @@ static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
return addr;
}
-static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t iova)
+static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t iova, struct dma_attrs *attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
return;
}
-struct hppa_dma_ops pcx_dma_ops = {
+struct dma_map_ops pcx_dma_ops = {
.dma_supported = pa11_dma_supported,
- .alloc_consistent = fail_alloc_consistent,
- .alloc_noncoherent = pa11_dma_alloc_noncoherent,
- .free_consistent = pa11_dma_free_noncoherent,
- .map_single = pa11_dma_map_single,
- .unmap_single = pa11_dma_unmap_single,
+ .alloc = pcx_dma_alloc,
+ .free = pcx_dma_free,
+ .map_page = pa11_dma_map_page,
+ .unmap_page = pa11_dma_unmap_page,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
- .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
- .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
- .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
- .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+ .sync_single_for_device = pa11_dma_sync_single_for_device,
+ .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = pa11_dma_sync_sg_for_device,
};
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 7edba2f3b5e3..44009f3e4925 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -111,7 +111,6 @@ config PPC
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
- select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_OPROFILE
select HAVE_DEBUG_KMEMLEAK
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 7f522c021dc3..77816acd4fd9 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -125,8 +125,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
#define HAVE_ARCH_DMA_SET_MASK 1
extern int dma_set_mask(struct device *dev, u64 dma_mask);
-#include <asm-generic/dma-mapping-common.h>
-
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
extern u64 __dma_get_required_mask(struct device *dev);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index bb46511dbf6d..e9d1f5ab117c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -591,7 +591,6 @@ config QDIO
menuconfig PCI
bool "PCI support"
- select HAVE_DMA_ATTRS
select PCI_MSI
select IOMMU_SUPPORT
help
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index b3fd54d93dd2..e64bfcb9702f 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -23,8 +23,6 @@ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
}
-#include <asm-generic/dma-mapping-common.h>
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 0bc9282d37d2..95bb78664cd6 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -11,7 +11,6 @@ config SUPERH
select HAVE_GENERIC_DMA_COHERENT
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_PERF_EVENTS
select HAVE_DEBUG_BUGVERBOSE
select ARCH_HAVE_CUSTOM_GPIO_H
diff --git a/arch/sh/include/asm/dma-mapping.h b/arch/sh/include/asm/dma-mapping.h
index a3745a3fe029..e11cf0c8206b 100644
--- a/arch/sh/include/asm/dma-mapping.h
+++ b/arch/sh/include/asm/dma-mapping.h
@@ -11,8 +11,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
#define DMA_ERROR_CODE 0
-#include <asm-generic/dma-mapping-common.h>
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index a0aea4637fd0..fff9299563f1 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -27,7 +27,6 @@ config SPARC
select RTC_CLASS
select RTC_DRV_M48T59
select RTC_SYSTOHC
- select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL if SPARC64
select GENERIC_IRQ_SHOW
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index a21da597b0b5..1180ae254154 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -37,21 +37,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dma_ops;
}
-#define HAVE_ARCH_DMA_SET_MASK 1
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type) {
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EINVAL;
- *dev->dma_mask = mask;
- return 0;
- }
-#endif
- return -EINVAL;
-}
-
-#include <asm-generic/dma-mapping-common.h>
-
#endif
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 09d4656f1492..47155cb3abf3 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -6,7 +6,6 @@ config TILE
select HAVE_EXIT_THREAD
select HAVE_PERF_EVENTS
select USE_PMC if PERF_EVENTS
- select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_KVM if !TILEGX
select GENERIC_FIND_FIRST_BIT
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index 96ac6cce4a32..01ceb4a895b0 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -73,37 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
}
#define HAVE_ARCH_DMA_SET_MASK 1
-
-#include <asm-generic/dma-mapping-common.h>
-
-static inline int
-dma_set_mask(struct device *dev, u64 mask)
-{
- struct dma_map_ops *dma_ops = get_dma_ops(dev);
-
- /*
- * For PCI devices with 64-bit DMA addressing capability, promote
- * the dma_ops to hybrid, with the consistent memory DMA space limited
- * to 32-bit. For 32-bit capable devices, limit the streaming DMA
- * address range to max_direct_dma_addr.
- */
- if (dma_ops == gx_pci_dma_map_ops ||
- dma_ops == gx_hybrid_pci_dma_map_ops ||
- dma_ops == gx_legacy_pci_dma_map_ops) {
- if (mask == DMA_BIT_MASK(64) &&
- dma_ops == gx_legacy_pci_dma_map_ops)
- set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
- else if (mask > dev->archdata.max_direct_dma_addr)
- mask = dev->archdata.max_direct_dma_addr;
- }
-
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
-
- *dev->dma_mask = mask;
-
- return 0;
-}
+int dma_set_mask(struct device *dev, u64 mask);
/*
* dma_alloc_noncoherent() is #defined to return coherent memory,
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index 09b58703ac26..b6bc0547a4f6 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -583,6 +583,35 @@ struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
+int dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ /*
+ * For PCI devices with 64-bit DMA addressing capability, promote
+ * the dma_ops to hybrid, with the consistent memory DMA space limited
+ * to 32-bit. For 32-bit capable devices, limit the streaming DMA
+ * address range to max_direct_dma_addr.
+ */
+ if (dma_ops == gx_pci_dma_map_ops ||
+ dma_ops == gx_hybrid_pci_dma_map_ops ||
+ dma_ops == gx_legacy_pci_dma_map_ops) {
+ if (mask == DMA_BIT_MASK(64) &&
+ dma_ops == gx_legacy_pci_dma_map_ops)
+ set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
+ else if (mask > dev->archdata.max_direct_dma_addr)
+ mask = dev->archdata.max_direct_dma_addr;
+ }
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);
+
#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index c9faddc61100..4a7dc1a65d6a 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -4,7 +4,6 @@ config UNICORE32
select ARCH_MIGHT_HAVE_PC_SERIO
select HAVE_MEMBLOCK
select HAVE_GENERIC_DMA_COHERENT
- select HAVE_DMA_ATTRS
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select GENERIC_ATOMIC64
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
index 8140e053ccd3..4749854afd03 100644
--- a/arch/unicore32/include/asm/dma-mapping.h
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -28,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return &swiotlb_dma_map_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (dev && dev->dma_mask)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dcaeb0f7cc05..883a2e536721 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -96,7 +96,6 @@ config X86
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index bfabe41e9d39..2c4bc9d54e50 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -211,7 +211,8 @@ endif
head-y := arch/x86/kernel/head_$(BITS).o
head-y += arch/x86/kernel/head$(BITS).o
-head-y += arch/x86/kernel/head.o
+head-y += arch/x86/kernel/ebda.o
+head-y += arch/x86/kernel/platform-quirks.o
libs-y += arch/x86/lib/
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 937d72c4d484..9e2130d9a5f9 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1043,79 +1043,87 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
return status;
}
-static efi_status_t exit_boot(struct boot_params *boot_params,
- void *handle, bool is64)
-{
- struct efi_info *efi = &boot_params->efi_info;
- unsigned long map_sz, key, desc_size;
- efi_memory_desc_t *mem_map;
+struct exit_boot_struct {
+ struct boot_params *boot_params;
+ struct efi_info *efi;
struct setup_data *e820ext;
- const char *signature;
__u32 e820ext_size;
- __u32 nr_desc, prev_nr_desc;
- efi_status_t status;
- __u32 desc_version;
- bool called_exit = false;
- u8 nr_entries;
- int i;
-
- nr_desc = 0;
- e820ext = NULL;
- e820ext_size = 0;
-
-get_map:
- status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size,
- &desc_version, &key);
-
- if (status != EFI_SUCCESS)
- return status;
-
- prev_nr_desc = nr_desc;
- nr_desc = map_sz / desc_size;
- if (nr_desc > prev_nr_desc &&
- nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
- u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
-
- status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
- if (status != EFI_SUCCESS)
- goto free_mem_map;
+ bool is64;
+};
- efi_call_early(free_pool, mem_map);
- goto get_map; /* Allocated memory, get map again */
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv)
+{
+ static bool first = true;
+ const char *signature;
+ __u32 nr_desc;
+ efi_status_t status;
+ struct exit_boot_struct *p = priv;
+
+ if (first) {
+ nr_desc = *map->buff_size / *map->desc_size;
+ if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
+ u32 nr_e820ext = nr_desc -
+ ARRAY_SIZE(p->boot_params->e820_map);
+
+ status = alloc_e820ext(nr_e820ext, &p->e820ext,
+ &p->e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+ first = false;
}
- signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
- memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
+ signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+ memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
- efi->efi_systab = (unsigned long)sys_table;
- efi->efi_memdesc_size = desc_size;
- efi->efi_memdesc_version = desc_version;
- efi->efi_memmap = (unsigned long)mem_map;
- efi->efi_memmap_size = map_sz;
+ p->efi->efi_systab = (unsigned long)sys_table_arg;
+ p->efi->efi_memdesc_size = *map->desc_size;
+ p->efi->efi_memdesc_version = *map->desc_ver;
+ p->efi->efi_memmap = (unsigned long)*map->map;
+ p->efi->efi_memmap_size = *map->map_size;
#ifdef CONFIG_X86_64
- efi->efi_systab_hi = (unsigned long)sys_table >> 32;
- efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
+ p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
+ p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
#endif
+ return EFI_SUCCESS;
+}
+
+static efi_status_t exit_boot(struct boot_params *boot_params,
+ void *handle, bool is64)
+{
+ unsigned long map_sz, key, desc_size, buff_size;
+ efi_memory_desc_t *mem_map;
+ struct setup_data *e820ext;
+ __u32 e820ext_size;
+ efi_status_t status;
+ __u32 desc_version;
+ struct efi_boot_memmap map;
+ struct exit_boot_struct priv;
+
+ map.map = &mem_map;
+ map.map_size = &map_sz;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_version;
+ map.key_ptr = &key;
+ map.buff_size = &buff_size;
+ priv.boot_params = boot_params;
+ priv.efi = &boot_params->efi_info;
+ priv.e820ext = NULL;
+ priv.e820ext_size = 0;
+ priv.is64 = is64;
+
/* Might as well exit boot services now */
- status = efi_call_early(exit_boot_services, handle, key);
- if (status != EFI_SUCCESS) {
- /*
- * ExitBootServices() will fail if any of the event
- * handlers change the memory map. In which case, we
- * must be prepared to retry, but only once so that
- * we're guaranteed to exit on repeated failures instead
- * of spinning forever.
- */
- if (called_exit)
- goto free_mem_map;
-
- called_exit = true;
- efi_call_early(free_pool, mem_map);
- goto get_map;
- }
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+ exit_boot_func);
+ if (status != EFI_SUCCESS)
+ return status;
+ e820ext = priv.e820ext;
+ e820ext_size = priv.e820ext_size;
/* Historic? */
boot_params->alt_mem_k = 32 * 1024;
@@ -1124,10 +1132,6 @@ get_map:
return status;
return EFI_SUCCESS;
-
-free_mem_map:
- efi_call_early(free_pool, mem_map);
- return status;
}
#ifdef CONFIG_HIBERNATE_VERIFICATION
diff --git a/arch/x86/include/asm/bios_ebda.h b/arch/x86/include/asm/bios_ebda.h
index aa6a3170ab5a..72b2c06e1c72 100644
--- a/arch/x86/include/asm/bios_ebda.h
+++ b/arch/x86/include/asm/bios_ebda.h
@@ -38,7 +38,7 @@ static inline unsigned int get_bios_ebda_length(void)
return length;
}
-void reserve_ebda_region(void);
+void reserve_bios_regions(void);
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
/*
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 953b7263f844..3a27b93e6261 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -46,8 +46,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
#define HAVE_ARCH_DMA_SUPPORTED 1
extern int dma_supported(struct device *hwdev, u64 mask);
-#include <asm-generic/dma-mapping-common.h>
-
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs);
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 8ea0fd541afe..345d6d5854cc 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -4,6 +4,7 @@
#include <asm/fpu/api.h>
#include <asm/pgtable.h>
#include <asm/processor-flags.h>
+#include <asm/tlb.h>
/*
* We map the EFI regions needed for runtime services non-contiguously,
@@ -56,11 +57,29 @@ extern u64 asmlinkage efi_call(void *fp, ...);
#define efi_call_phys(f, args...) efi_call((f), args)
+/*
+ * Scratch space used for switching the pagetable in the EFI stub
+ */
+struct efi_scratch {
+ u64 r15;
+ u64 prev_cr3;
+ pgd_t *efi_pgt;
+ bool use_pgd;
+ u64 phys_stack;
+} __packed;
+
#define arch_efi_call_virt_setup() \
({ \
efi_sync_low_kernel_mappings(); \
preempt_disable(); \
__kernel_fpu_begin(); \
+ \
+ if (efi_scratch.use_pgd) { \
+ efi_scratch.prev_cr3 = read_cr3(); \
+ write_cr3((unsigned long)efi_scratch.efi_pgt); \
+ __flush_tlb_all(); \
+ } \
+ \
})
#define arch_efi_call_virt(f, args...) \
@@ -68,6 +87,12 @@ extern u64 asmlinkage efi_call(void *fp, ...);
#define arch_efi_call_virt_teardown() \
({ \
+ \
+ if (efi_scratch.use_pgd) { \
+ write_cr3(efi_scratch.prev_cr3); \
+ __flush_tlb_all(); \
+ } \
+ \
__kernel_fpu_end(); \
preempt_enable(); \
})
@@ -95,11 +120,11 @@ extern int __init efi_memblock_x86_reserve_range(void);
extern pgd_t * __init efi_call_phys_prolog(void);
extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
extern void __init efi_print_memmap(void);
-extern void __init efi_unmap_memmap(void);
extern void __init efi_memory_uc(u64 addr, unsigned long size);
extern void __init efi_map_region(efi_memory_desc_t *md);
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
+extern int __init efi_alloc_page_tables(void);
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
extern void __init old_map_region(efi_memory_desc_t *md);
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index c759b3cca663..10d0596433f8 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -19,12 +19,6 @@ static inline int paravirt_enabled(void)
return pv_info.paravirt_enabled;
}
-static inline int paravirt_has_feature(unsigned int feature)
-{
- WARN_ON_ONCE(!pv_info.paravirt_enabled);
- return (pv_info.features & feature);
-}
-
static inline void load_sp0(struct tss_struct *tss,
struct thread_struct *thread)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 9b240398d980..9f2b694f6a1e 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -70,14 +70,9 @@ struct pv_info {
#endif
int paravirt_enabled;
- unsigned int features; /* valid only if paravirt_enabled is set */
const char *name;
};
-#define paravirt_has(x) paravirt_has_feature(PV_SUPPORTED_##x)
-/* Supported features */
-#define PV_SUPPORTED_RTC (1<<0)
-
struct pv_init_ops {
/*
* Patch may replace one of the defined code sequences with
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4abe2eeeff9c..092a038156d6 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -472,7 +472,6 @@ static inline unsigned long current_top_of_stack(void)
#else
#define __cpuid native_cpuid
#define paravirt_enabled() 0
-#define paravirt_has(x) 0
static inline void load_sp0(struct tss_struct *tss,
struct thread_struct *thread)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index cd0fc0cc78bc..1f92459af2f4 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -144,6 +144,18 @@ struct x86_cpuinit_ops {
struct timespec;
/**
+ * struct x86_legacy_features - legacy x86 features
+ *
+ * @rtc: this device has a CMOS real-time clock present
+ * @reserve_bios_regions: it's safe to search for the EBDA signature in the hardware's
+ * low RAM
+ */
+struct x86_legacy_features {
+ int rtc;
+ int reserve_bios_regions;
+};
+
+/**
* struct x86_platform_ops - platform specific runtime functions
* @calibrate_tsc: calibrate TSC
* @get_wallclock: get time from HW clock like RTC etc.
@@ -154,6 +166,14 @@ struct timespec;
* @save_sched_clock_state: save state for sched_clock() on suspend
* @restore_sched_clock_state: restore state for sched_clock() on resume
* @apic_post_init: adjust apic if neeeded
+ * @legacy: legacy features
+ * @set_legacy_features: override legacy features. Use of this callback
+ * is highly discouraged. You should only need
+ * this if your hardware platform requires further
+ * custom fine tuning far beyong what may be
+ * possible in x86_early_init_platform_quirks() by
+ * only using the current x86_hardware_subarch
+ * semantics.
*/
struct x86_platform_ops {
unsigned long (*calibrate_tsc)(void);
@@ -167,6 +187,8 @@ struct x86_platform_ops {
void (*save_sched_clock_state)(void);
void (*restore_sched_clock_state)(void);
void (*apic_post_init)(void);
+ struct x86_legacy_features legacy;
+ void (*set_legacy_features)(void);
};
struct pci_dev;
@@ -188,6 +210,8 @@ extern struct x86_cpuinit_ops x86_cpuinit;
extern struct x86_platform_ops x86_platform;
extern struct x86_msi_ops x86_msi;
extern struct x86_io_apic_ops x86_io_apic_ops;
+
+extern void x86_early_init_platform_quirks(void);
extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index b1b78ffe01d0..4b040be2c7e2 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -2,7 +2,11 @@
# Makefile for the linux kernel.
#
-extra-y := head_$(BITS).o head$(BITS).o head.o vmlinux.lds
+extra-y := head_$(BITS).o
+extra-y += head$(BITS).o
+extra-y += ebda.o
+extra-y += platform-quirks.o
+extra-y += vmlinux.lds
CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
diff --git a/arch/x86/kernel/ebda.c b/arch/x86/kernel/ebda.c
new file mode 100644
index 000000000000..6219eef20e2e
--- /dev/null
+++ b/arch/x86/kernel/ebda.c
@@ -0,0 +1,109 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+
+#include <asm/setup.h>
+#include <asm/bios_ebda.h>
+
+/*
+ * This function reserves all conventional PC system BIOS related
+ * firmware memory areas (some of which are data, some of which
+ * are code), that must not be used by the kernel as available
+ * RAM.
+ *
+ * The BIOS places the EBDA/XBDA at the top of conventional
+ * memory, and usually decreases the reported amount of
+ * conventional memory (int 0x12) too.
+ *
+ * This means that as a first approximation on most systems we can
+ * guess the reserved BIOS area by looking at the low BIOS RAM size
+ * value and assume that everything above that value (up to 1MB) is
+ * reserved.
+ *
+ * But life in firmware country is not that simple:
+ *
+ * - This code also contains a quirk for Dell systems that neglect
+ * to reserve the EBDA area in the 'RAM size' value ...
+ *
+ * - The same quirk also avoids a problem with the AMD768MPX
+ * chipset: reserve a page before VGA to prevent PCI prefetch
+ * into it (errata #56). (Usually the page is reserved anyways,
+ * unless you have no PS/2 mouse plugged in.)
+ *
+ * - Plus paravirt systems don't have a reliable value in the
+ * 'BIOS RAM size' pointer we can rely on, so we must quirk
+ * them too.
+ *
+ * Due to those various problems this function is deliberately
+ * very conservative and tries to err on the side of reserving
+ * too much, to not risk reserving too little.
+ *
+ * Losing a small amount of memory in the bottom megabyte is
+ * rarely a problem, as long as we have enough memory to install
+ * the SMP bootup trampoline which *must* be in this area.
+ *
+ * Using memory that is in use by the BIOS or by some DMA device
+ * the BIOS didn't shut down *is* a big problem to the kernel,
+ * obviously.
+ */
+
+#define BIOS_RAM_SIZE_KB_PTR 0x413
+
+#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
+#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
+
+void __init reserve_bios_regions(void)
+{
+ unsigned int bios_start, ebda_start;
+
+ /*
+ * NOTE: In a paravirtual environment the BIOS reserved
+ * area is absent. We'll just have to assume that the
+ * paravirt case can handle memory setup correctly,
+ * without our help.
+ */
+ if (!x86_platform.legacy.reserve_bios_regions)
+ return;
+
+ /* Get the start address of the EBDA page: */
+ ebda_start = get_bios_ebda();
+
+ /*
+ * Quirk: some old Dells seem to have a 4k EBDA without
+ * reporting so in their BIOS RAM size value, so just
+ * consider the memory above 640K to be off limits
+ * (bugzilla 2990).
+ *
+ * We detect this case by filtering for nonsensical EBDA
+ * addresses below 128K, where we can assume that they
+ * are bogus and bump it up to a fixed 640K value:
+ */
+ if (ebda_start < BIOS_START_MIN)
+ ebda_start = BIOS_START_MAX;
+
+ /*
+ * BIOS RAM size is encoded in kilobytes, convert it
+ * to bytes to get a first guess at where the BIOS
+ * firmware area starts:
+ */
+ bios_start = *(unsigned short *)__va(BIOS_RAM_SIZE_KB_PTR);
+ bios_start <<= 10;
+
+ /*
+ * If bios_start is less than 128K, assume it is bogus
+ * and bump it up to 640K:
+ */
+ if (bios_start < BIOS_START_MIN)
+ bios_start = BIOS_START_MAX;
+
+ /*
+ * Use the lower of the bios_start and ebda_start
+ * as the starting point, but don't allow it to
+ * go beyond 640K:
+ */
+ bios_start = min(bios_start, ebda_start);
+ bios_start = min(bios_start, BIOS_START_MAX);
+
+ /* Reserve all memory between bios_start and the 1MB mark: */
+ memblock_reserve(bios_start, 0x100000 - bios_start);
+}
diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
index 992f442ca155..8b137891791f 100644
--- a/arch/x86/kernel/head.c
+++ b/arch/x86/kernel/head.c
@@ -1,71 +1 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/memblock.h>
-#include <asm/setup.h>
-#include <asm/bios_ebda.h>
-
-/*
- * The BIOS places the EBDA/XBDA at the top of conventional
- * memory, and usually decreases the reported amount of
- * conventional memory (int 0x12) too. This also contains a
- * workaround for Dell systems that neglect to reserve EBDA.
- * The same workaround also avoids a problem with the AMD768MPX
- * chipset: reserve a page before VGA to prevent PCI prefetch
- * into it (errata #56). Usually the page is reserved anyways,
- * unless you have no PS/2 mouse plugged in.
- *
- * This functions is deliberately very conservative. Losing
- * memory in the bottom megabyte is rarely a problem, as long
- * as we have enough memory to install the trampoline. Using
- * memory that is in use by the BIOS or by some DMA device
- * the BIOS didn't shut down *is* a big problem.
- */
-
-#define BIOS_LOWMEM_KILOBYTES 0x413
-#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
-#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
-
-void __init reserve_ebda_region(void)
-{
- unsigned int lowmem, ebda_addr;
-
- /*
- * To determine the position of the EBDA and the
- * end of conventional memory, we need to look at
- * the BIOS data area. In a paravirtual environment
- * that area is absent. We'll just have to assume
- * that the paravirt case can handle memory setup
- * correctly, without our help.
- */
- if (paravirt_enabled())
- return;
-
- /* end of low (conventional) memory */
- lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
- lowmem <<= 10;
-
- /* start of EBDA area */
- ebda_addr = get_bios_ebda();
-
- /*
- * Note: some old Dells seem to need 4k EBDA without
- * reporting so, so just consider the memory above 0x9f000
- * to be off limits (bugzilla 2990).
- */
-
- /* If the EBDA address is below 128K, assume it is bogus */
- if (ebda_addr < INSANE_CUTOFF)
- ebda_addr = LOWMEM_CAP;
-
- /* If lowmem is less than 128K, assume it is bogus */
- if (lowmem < INSANE_CUTOFF)
- lowmem = LOWMEM_CAP;
-
- /* Use the lower of the lowmem and EBDA markers as the cutoff */
- lowmem = min(lowmem, ebda_addr);
- lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
-
- /* reserve all memory between lowmem and the 1MB mark */
- memblock_reserve(lowmem, 0x100000 - lowmem);
-}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 2911ef3a9f1c..2dda0bc4576e 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -26,7 +26,7 @@ static void __init i386_default_early_setup(void)
x86_init.resources.reserve_resources = i386_reserve_resources;
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
- reserve_ebda_region();
+ reserve_bios_regions();
}
asmlinkage __visible void __init i386_start_kernel(void)
@@ -34,6 +34,8 @@ asmlinkage __visible void __init i386_start_kernel(void)
cr4_init_shadow();
sanitize_boot_params(&boot_params);
+ x86_early_init_platform_quirks();
+
/* Call the subarch specific early setup function */
switch (boot_params.hdr.hardware_subarch) {
case X86_SUBARCH_INTEL_MID:
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index f129a9af6357..c2a1a64cbbac 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -190,7 +190,8 @@ void __init x86_64_start_reservations(char *real_mode_data)
if (!boot_params.hdr.version)
copy_bootdata(__va(real_mode_data));
- reserve_ebda_region();
+ x86_early_init_platform_quirks();
+ reserve_bios_regions();
start_kernel();
}
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
new file mode 100644
index 000000000000..bd4d9f0d2864
--- /dev/null
+++ b/arch/x86/kernel/platform-quirks.c
@@ -0,0 +1,25 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+#include <asm/bios_ebda.h>
+
+void __init x86_early_init_platform_quirks(void)
+{
+ x86_platform.legacy.rtc = 1;
+ x86_platform.legacy.reserve_bios_regions = 0;
+
+ switch (boot_params.hdr.hardware_subarch) {
+ case X86_SUBARCH_PC:
+ x86_platform.legacy.reserve_bios_regions = 1;
+ break;
+ case X86_SUBARCH_XEN:
+ case X86_SUBARCH_LGUEST:
+ case X86_SUBARCH_INTEL_MID:
+ x86_platform.legacy.rtc = 0;
+ break;
+ }
+
+ if (x86_platform.set_legacy_features)
+ x86_platform.set_legacy_features();
+}
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 4af8d063fb36..62c48da3889d 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -14,6 +14,7 @@
#include <asm/time.h>
#include <asm/intel-mid.h>
#include <asm/rtc.h>
+#include <asm/setup.h>
#ifdef CONFIG_X86_32
/*
@@ -188,10 +189,6 @@ static __init int add_rtc_cmos(void)
if (of_have_populated_dt())
return 0;
- /* Intel MID platforms don't have ioport rtc */
- if (intel_mid_identify_cpu())
- return -ENODEV;
-
#ifdef CONFIG_ACPI
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
/* This warning can likely go away again in a year or two. */
@@ -200,7 +197,7 @@ static __init int add_rtc_cmos(void)
}
#endif
- if (paravirt_enabled() && !paravirt_has(RTC))
+ if (!x86_platform.legacy.rtc)
return -ENODEV;
platform_device_register(&rtc_device);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index abb057a45e91..baff97240100 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1106,17 +1106,19 @@ void __init setup_arch(char **cmdline_p)
memblock_set_current_limit(ISA_END_ADDRESS);
memblock_x86_fill();
- if (efi_enabled(EFI_BOOT)) {
+ reserve_bios_regions();
+
+ if (efi_enabled(EFI_MEMMAP)) {
efi_fake_memmap();
efi_find_mirror();
- }
+ efi_esrt_init();
- /*
- * The EFI specification says that boot service code won't be called
- * after ExitBootServices(). This is, in fact, a lie.
- */
- if (efi_enabled(EFI_MEMMAP))
+ /*
+ * The EFI specification says that boot service code won't be
+ * called after ExitBootServices(). This is, in fact, a lie.
+ */
efi_reserve_boot_services();
+ }
/* preallocate 4k for mptable mpc */
early_reserve_e820_mpc_new();
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index a43b2eafc466..a0d09f6c6533 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -1414,7 +1414,6 @@ __init void lguest_init(void)
pv_info.kernel_rpl = 1;
/* Everyone except Xen runs with this set. */
pv_info.shared_kernel_pmd = 1;
- pv_info.features = 0;
/*
* We set up all the lguest overrides for sensitive operations. These
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index ea48449b2e63..358f38245b6c 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -28,8 +28,7 @@ struct bmp_header {
void __init efi_bgrt_init(void)
{
acpi_status status;
- void __iomem *image;
- bool ioremapped = false;
+ void *image;
struct bmp_header bmp_header;
if (acpi_disabled)
@@ -70,41 +69,22 @@ void __init efi_bgrt_init(void)
return;
}
- image = efi_lookup_mapped_addr(bgrt_tab->image_address);
+ image = early_memremap(bgrt_tab->image_address, sizeof(bmp_header));
if (!image) {
- image = early_ioremap(bgrt_tab->image_address,
- sizeof(bmp_header));
- ioremapped = true;
- if (!image) {
- pr_err("Ignoring BGRT: failed to map image header memory\n");
- return;
- }
+ pr_err("Ignoring BGRT: failed to map image header memory\n");
+ return;
}
- memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
- if (ioremapped)
- early_iounmap(image, sizeof(bmp_header));
+ memcpy(&bmp_header, image, sizeof(bmp_header));
+ early_memunmap(image, sizeof(bmp_header));
bgrt_image_size = bmp_header.size;
- bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
+ bgrt_image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
if (!bgrt_image) {
- pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
- bgrt_image_size);
+ pr_err("Ignoring BGRT: failed to map image memory\n");
+ bgrt_image = NULL;
return;
}
- if (ioremapped) {
- image = early_ioremap(bgrt_tab->image_address,
- bmp_header.size);
- if (!image) {
- pr_err("Ignoring BGRT: failed to map image memory\n");
- kfree(bgrt_image);
- bgrt_image = NULL;
- return;
- }
- }
-
- memcpy_fromio(bgrt_image, image, bgrt_image_size);
- if (ioremapped)
- early_iounmap(image, bmp_header.size);
+ efi_mem_reserve(bgrt_tab->image_address, bgrt_image_size);
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index ad285404ea7f..f3e0d5f0f0ab 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -56,8 +56,6 @@
#define EFI_DEBUG
-struct efi_memory_map memmap;
-
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
@@ -119,11 +117,10 @@ void efi_get_time(struct timespec *now)
void __init efi_find_mirror(void)
{
- void *p;
+ efi_memory_desc_t *md;
u64 mirror_size = 0, total_size = 0;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- efi_memory_desc_t *md = p;
+ for_each_efi_memory_desc(md) {
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
@@ -146,10 +143,9 @@ void __init efi_find_mirror(void)
static void __init do_add_efi_memmap(void)
{
- void *p;
+ efi_memory_desc_t *md;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- efi_memory_desc_t *md = p;
+ for_each_efi_memory_desc(md) {
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
int e820_type;
@@ -194,7 +190,9 @@ static void __init do_add_efi_memmap(void)
int __init efi_memblock_x86_reserve_range(void)
{
struct efi_info *e = &boot_params.efi_info;
+ struct efi_memory_map_data data;
phys_addr_t pmap;
+ int rv;
if (efi_enabled(EFI_PARAVIRT))
return 0;
@@ -209,34 +207,98 @@ int __init efi_memblock_x86_reserve_range(void)
#else
pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
#endif
- memmap.phys_map = pmap;
- memmap.nr_map = e->efi_memmap_size /
- e->efi_memdesc_size;
- memmap.desc_size = e->efi_memdesc_size;
- memmap.desc_version = e->efi_memdesc_version;
+ data.phys_map = pmap;
+ data.size = e->efi_memmap_size;
+ data.desc_size = e->efi_memdesc_size;
+ data.desc_version = e->efi_memdesc_version;
- memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
+ rv = efi_memmap_init_early(&data);
+ if (rv)
+ return rv;
- efi.memmap = &memmap;
+ if (add_efi_memmap)
+ do_add_efi_memmap();
+
+ memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
return 0;
}
+#define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
+#define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
+#define U64_HIGH_BIT (~(U64_MAX >> 1))
+
+static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
+{
+ u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
+ u64 end_hi = 0;
+ char buf[64];
+
+ if (md->num_pages == 0) {
+ end = 0;
+ } else if (md->num_pages > EFI_PAGES_MAX ||
+ EFI_PAGES_MAX - md->num_pages <
+ (md->phys_addr >> EFI_PAGE_SHIFT)) {
+ end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
+ >> OVERFLOW_ADDR_SHIFT;
+
+ if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
+ end_hi += 1;
+ } else {
+ return true;
+ }
+
+ pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
+
+ if (end_hi) {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end_hi, end);
+ } else {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end);
+ }
+ return false;
+}
+
+static void __init efi_clean_memmap(void)
+{
+ efi_memory_desc_t *out = efi.memmap.map;
+ const efi_memory_desc_t *in = out;
+ const efi_memory_desc_t *end = efi.memmap.map_end;
+ int i, n_removal;
+
+ for (i = n_removal = 0; in < end; i++) {
+ if (efi_memmap_entry_valid(in, i)) {
+ if (out != in)
+ memcpy(out, in, efi.memmap.desc_size);
+ out = (void *)out + efi.memmap.desc_size;
+ } else {
+ n_removal++;
+ }
+ in = (void *)in + efi.memmap.desc_size;
+ }
+
+ if (n_removal > 0) {
+ u64 size = efi.memmap.nr_map - n_removal;
+
+ pr_warn("Removing %d invalid memory map entries.\n", n_removal);
+ efi_memmap_install(efi.memmap.phys_map, size);
+ }
+}
+
void __init efi_print_memmap(void)
{
#ifdef EFI_DEBUG
efi_memory_desc_t *md;
- void *p;
- int i;
+ int i = 0;
- for (p = memmap.map, i = 0;
- p < memmap.map_end;
- p += memmap.desc_size, i++) {
+ for_each_efi_memory_desc(md) {
char buf[64];
- md = p;
pr_info("mem%02u: %s range=[0x%016llx-0x%016llx) (%lluMB)\n",
- i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ i++, efi_md_typeattr_format(buf, sizeof(buf), md),
md->phys_addr,
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
@@ -244,15 +306,6 @@ void __init efi_print_memmap(void)
#endif /* EFI_DEBUG */
}
-void __init efi_unmap_memmap(void)
-{
- clear_bit(EFI_MEMMAP, &efi.flags);
- if (memmap.map) {
- early_memunmap(memmap.map, memmap.nr_map * memmap.desc_size);
- memmap.map = NULL;
- }
-}
-
static int __init efi_systab_init(void *phys)
{
if (efi_enabled(EFI_64BIT)) {
@@ -438,28 +491,6 @@ static int __init efi_runtime_init(void)
return 0;
}
-static int __init efi_memmap_init(void)
-{
- if (efi_enabled(EFI_PARAVIRT))
- return 0;
-
- /* Map the EFI memory map */
- memmap.map = early_memremap((unsigned long)memmap.phys_map,
- memmap.nr_map * memmap.desc_size);
- if (memmap.map == NULL) {
- pr_err("Could not map the memory map!\n");
- return -ENOMEM;
- }
- memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
-
- if (add_efi_memmap)
- do_add_efi_memmap();
-
- set_bit(EFI_MEMMAP, &efi.flags);
-
- return 0;
-}
-
void __init efi_init(void)
{
efi_char16_t *c16;
@@ -517,16 +548,16 @@ void __init efi_init(void)
if (!efi_runtime_supported())
pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
else {
- if (efi_runtime_disabled() || efi_runtime_init())
+ if (efi_runtime_disabled() || efi_runtime_init()) {
+ efi_memmap_unmap();
return;
+ }
}
- if (efi_memmap_init())
- return;
+
+ efi_clean_memmap();
if (efi_enabled(EFI_DBG))
efi_print_memmap();
-
- efi_esrt_init();
}
void __init efi_late_init(void)
@@ -552,12 +583,9 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
void __init runtime_code_page_mkexec(void)
{
efi_memory_desc_t *md;
- void *p;
/* Make EFI runtime service code area executable */
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- md = p;
-
+ for_each_efi_memory_desc(md) {
if (md->type != EFI_RUNTIME_SERVICES_CODE)
continue;
@@ -604,12 +632,10 @@ void __init old_map_region(efi_memory_desc_t *md)
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
{
- void *p;
efi_memory_desc_t *md, *prev_md = NULL;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ for_each_efi_memory_desc(md) {
u64 prev_size;
- md = p;
if (!prev_md) {
prev_md = md;
@@ -648,41 +674,6 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md)
}
}
-static void __init save_runtime_map(void)
-{
-#ifdef CONFIG_KEXEC_CORE
- efi_memory_desc_t *md;
- void *tmp, *p, *q = NULL;
- int count = 0;
-
- if (efi_enabled(EFI_OLD_MEMMAP))
- return;
-
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- md = p;
-
- if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
- (md->type == EFI_BOOT_SERVICES_CODE) ||
- (md->type == EFI_BOOT_SERVICES_DATA))
- continue;
- tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL);
- if (!tmp)
- goto out;
- q = tmp;
-
- memcpy(q + count * memmap.desc_size, md, memmap.desc_size);
- count++;
- }
-
- efi_runtime_map_setup(q, count, memmap.desc_size);
- return;
-
-out:
- kfree(q);
- pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
-#endif
-}
-
static void *realloc_pages(void *old_memmap, int old_shift)
{
void *ret;
@@ -714,10 +705,10 @@ static inline void *efi_map_next_entry_reverse(void *entry)
{
/* Initial call */
if (!entry)
- return memmap.map_end - memmap.desc_size;
+ return efi.memmap.map_end - efi.memmap.desc_size;
- entry -= memmap.desc_size;
- if (entry < memmap.map)
+ entry -= efi.memmap.desc_size;
+ if (entry < efi.memmap.map)
return NULL;
return entry;
@@ -759,15 +750,55 @@ static void *efi_map_next_entry(void *entry)
/* Initial call */
if (!entry)
- return memmap.map;
+ return efi.memmap.map;
- entry += memmap.desc_size;
- if (entry >= memmap.map_end)
+ entry += efi.memmap.desc_size;
+ if (entry >= efi.memmap.map_end)
return NULL;
return entry;
}
+static bool should_map_region(efi_memory_desc_t *md)
+{
+ /*
+ * Runtime regions always require runtime mappings (obviously).
+ */
+ if (md->attribute & EFI_MEMORY_RUNTIME)
+ return true;
+
+ /*
+ * 32-bit EFI doesn't suffer from the bug that requires us to
+ * reserve boot services regions, and mixed mode support
+ * doesn't exist for 32-bit kernels.
+ */
+ if (IS_ENABLED(CONFIG_X86_32))
+ return false;
+
+ /*
+ * Map all of RAM so that we can access arguments in the 1:1
+ * mapping when making EFI runtime calls.
+ */
+ if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_is_native()) {
+ if (md->type == EFI_CONVENTIONAL_MEMORY ||
+ md->type == EFI_LOADER_DATA ||
+ md->type == EFI_LOADER_CODE)
+ return true;
+ }
+
+ /*
+ * Map boot services regions as a workaround for buggy
+ * firmware that accesses them even when they shouldn't.
+ *
+ * See efi_{reserve,free}_boot_services().
+ */
+ if (md->type == EFI_BOOT_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_DATA)
+ return true;
+
+ return false;
+}
+
/*
* Map the efi memory ranges of the runtime services and update new_mmap with
* virtual addresses.
@@ -776,23 +807,22 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
{
void *p, *new_memmap = NULL;
unsigned long left = 0;
+ unsigned long desc_size;
efi_memory_desc_t *md;
+ desc_size = efi.memmap.desc_size;
+
p = NULL;
while ((p = efi_map_next_entry(p))) {
md = p;
- if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
-#ifdef CONFIG_X86_64
- if (md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
-#endif
- continue;
- }
+
+ if (!should_map_region(md))
+ continue;
efi_map_region(md);
get_systab_virt_addr(md);
- if (left < memmap.desc_size) {
+ if (left < desc_size) {
new_memmap = realloc_pages(new_memmap, *pg_shift);
if (!new_memmap)
return NULL;
@@ -801,10 +831,9 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
(*pg_shift)++;
}
- memcpy(new_memmap + (*count * memmap.desc_size), md,
- memmap.desc_size);
+ memcpy(new_memmap + (*count * desc_size), md, desc_size);
- left -= memmap.desc_size;
+ left -= desc_size;
(*count)++;
}
@@ -815,7 +844,7 @@ static void __init kexec_enter_virtual_mode(void)
{
#ifdef CONFIG_KEXEC_CORE
efi_memory_desc_t *md;
- void *p;
+ unsigned int num_pages;
efi.systab = NULL;
@@ -824,7 +853,13 @@ static void __init kexec_enter_virtual_mode(void)
* non-native EFI
*/
if (!efi_is_native()) {
- efi_unmap_memmap();
+ efi_memmap_unmap();
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+
+ if (efi_alloc_page_tables()) {
+ pr_err("Failed to allocate EFI page tables\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
}
@@ -833,16 +868,34 @@ static void __init kexec_enter_virtual_mode(void)
* Map efi regions which were passed via setup_data. The virt_addr is a
* fixed addr which was used in first kernel of a kexec boot.
*/
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- md = p;
+ for_each_efi_memory_desc(md) {
efi_map_region_fixed(md); /* FIXME: add error handling */
get_systab_virt_addr(md);
}
- save_runtime_map();
+ /*
+ * Unregister the early EFI memmap from efi_init() and install
+ * the new EFI memory map.
+ */
+ efi_memmap_unmap();
+
+ if (efi_memmap_init_late(efi.memmap.phys_map,
+ efi.memmap.desc_size * efi.memmap.nr_map)) {
+ pr_err("Failed to remap late EFI memory map\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
BUG_ON(!efi.systab);
+ num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
+ num_pages >>= PAGE_SHIFT;
+
+ if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+
efi_sync_low_kernel_mappings();
/*
@@ -869,7 +922,7 @@ static void __init kexec_enter_virtual_mode(void)
* This function will switch the EFI runtime services to virtual mode.
* Essentially, we look through the EFI memmap and map every region that
* has the runtime attribute bit set in its memory descriptor into the
- * ->trampoline_pgd page table using a top-down VA allocation scheme.
+ * efi_pgd page table.
*
* The old method which used to update that memory descriptor with the
* virtual address obtained from ioremap() is still supported when the
@@ -879,8 +932,8 @@ static void __init kexec_enter_virtual_mode(void)
*
* The new method does a pagetable switch in a preemption-safe manner
* so that we're in a different address space when calling a runtime
- * function. For function arguments passing we do copy the PGDs of the
- * kernel page table into ->trampoline_pgd prior to each call.
+ * function. For function arguments passing we do copy the PUDs of the
+ * kernel page table into efi_pgd prior to each call.
*
* Specially for kexec boot, efi runtime maps in previous kernel should
* be passed in via setup_data. In that case runtime ranges will be mapped
@@ -892,9 +945,16 @@ static void __init __efi_enter_virtual_mode(void)
int count = 0, pg_shift = 0;
void *new_memmap = NULL;
efi_status_t status;
+ phys_addr_t pa;
efi.systab = NULL;
+ if (efi_alloc_page_tables()) {
+ pr_err("Failed to allocate EFI page tables\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+
efi_merge_regions();
new_memmap = efi_map_regions(&count, &pg_shift);
if (!new_memmap) {
@@ -903,11 +963,24 @@ static void __init __efi_enter_virtual_mode(void)
return;
}
- save_runtime_map();
+ pa = __pa(new_memmap);
+
+ /*
+ * Unregister the early EFI memmap from efi_init() and install
+ * the new EFI memory map that we are about to pass to the
+ * firmware via SetVirtualAddressMap().
+ */
+ efi_memmap_unmap();
+
+ if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
+ pr_err("Failed to remap late EFI memory map\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
BUG_ON(!efi.systab);
- if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) {
+ if (efi_setup_page_tables(pa, 1 << pg_shift)) {
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
}
@@ -917,17 +990,17 @@ static void __init __efi_enter_virtual_mode(void)
if (efi_is_native()) {
status = phys_efi_set_virtual_address_map(
- memmap.desc_size * count,
- memmap.desc_size,
- memmap.desc_version,
- (efi_memory_desc_t *)__pa(new_memmap));
+ efi.memmap.desc_size * count,
+ efi.memmap.desc_size,
+ efi.memmap.desc_version,
+ (efi_memory_desc_t *)pa);
} else {
status = efi_thunk_set_virtual_address_map(
efi_phys.set_virtual_address_map,
- memmap.desc_size * count,
- memmap.desc_size,
- memmap.desc_version,
- (efi_memory_desc_t *)__pa(new_memmap));
+ efi.memmap.desc_size * count,
+ efi.memmap.desc_size,
+ efi.memmap.desc_version,
+ (efi_memory_desc_t *)pa);
}
if (status != EFI_SUCCESS) {
@@ -953,34 +1026,6 @@ static void __init __efi_enter_virtual_mode(void)
efi_runtime_mkexec();
- /*
- * We mapped the descriptor array into the EFI pagetable above but we're
- * not unmapping it here. Here's why:
- *
- * We're copying select PGDs from the kernel page table to the EFI page
- * table and when we do so and make changes to those PGDs like unmapping
- * stuff from them, those changes appear in the kernel page table and we
- * go boom.
- *
- * From setup_real_mode():
- *
- * ...
- * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
- *
- * In this particular case, our allocation is in PGD 0 of the EFI page
- * table but we've copied that PGD from PGD[272] of the EFI page table:
- *
- * pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
- *
- * where the direct memory mapping in kernel space is.
- *
- * new_memmap's VA comes from that direct mapping and thus clearing it,
- * it would get cleared in the kernel page table too.
- *
- * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
- */
- free_pages((unsigned long)new_memmap, pg_shift);
-
/* clean DUMMY object */
efi_delete_dummy_variable();
}
@@ -1002,13 +1047,11 @@ void __init efi_enter_virtual_mode(void)
u32 efi_mem_type(unsigned long phys_addr)
{
efi_memory_desc_t *md;
- void *p;
if (!efi_enabled(EFI_MEMMAP))
return 0;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- md = p;
+ for_each_efi_memory_desc(md) {
if ((md->phys_addr <= phys_addr) &&
(phys_addr < (md->phys_addr +
(md->num_pages << EFI_PAGE_SHIFT))))
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index ed5b67338294..58d669bc8250 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -38,6 +38,11 @@
* say 0 - 3G.
*/
+int __init efi_alloc_page_tables(void)
+{
+ return 0;
+}
+
void efi_sync_low_kernel_mappings(void) {}
void __init efi_dump_pagetable(void) {}
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index a0ac0f9c307f..8d6e7d481dba 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -40,6 +40,7 @@
#include <asm/fixmap.h>
#include <asm/realmode.h>
#include <asm/time.h>
+#include <asm/pgalloc.h>
/*
* We allocate runtime services regions bottom-up, starting from -4G, i.e.
@@ -47,28 +48,17 @@
*/
static u64 efi_va = EFI_VA_START;
-/*
- * Scratch space used for switching the pagetable in the EFI stub
- */
-struct efi_scratch {
- u64 r15;
- u64 prev_cr3;
- pgd_t *efi_pgt;
- bool use_pgd;
- u64 phys_stack;
-} __packed;
+struct efi_scratch efi_scratch;
static void __init early_code_mapping_set_exec(int executable)
{
efi_memory_desc_t *md;
- void *p;
if (!(__supported_pte_mask & _PAGE_NX))
return;
/* Make EFI service code area executable */
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- md = p;
+ for_each_efi_memory_desc(md) {
if (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_BOOT_SERVICES_CODE)
efi_set_executable(md, executable);
@@ -83,8 +73,11 @@ pgd_t * __init efi_call_phys_prolog(void)
int pgd;
int n_pgds;
- if (!efi_enabled(EFI_OLD_MEMMAP))
- return NULL;
+ if (!efi_enabled(EFI_OLD_MEMMAP)) {
+ save_pgd = (pgd_t *)read_cr3();
+ write_cr3((unsigned long)efi_scratch.efi_pgt);
+ goto out;
+ }
early_code_mapping_set_exec(1);
@@ -96,6 +89,7 @@ pgd_t * __init efi_call_phys_prolog(void)
vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
}
+out:
__flush_tlb_all();
return save_pgd;
@@ -109,8 +103,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
int pgd_idx;
int nr_pgds;
- if (!save_pgd)
+ if (!efi_enabled(EFI_OLD_MEMMAP)) {
+ write_cr3((unsigned long)save_pgd);
+ __flush_tlb_all();
return;
+ }
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
@@ -123,22 +120,92 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
early_code_mapping_set_exec(0);
}
+static pgd_t *efi_pgd;
+
+/*
+ * We need our own copy of the higher levels of the page tables
+ * because we want to avoid inserting EFI region mappings (EFI_VA_END
+ * to EFI_VA_START) into the standard kernel page tables. Everything
+ * else can be shared, see efi_sync_low_kernel_mappings().
+ */
+int __init efi_alloc_page_tables(void)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ gfp_t gfp_mask;
+
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return 0;
+
+ gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+ efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
+ if (!efi_pgd)
+ return -ENOMEM;
+
+ pgd = efi_pgd + pgd_index(EFI_VA_END);
+
+ pud = pud_alloc_one(NULL, 0);
+ if (!pud) {
+ free_page((unsigned long)efi_pgd);
+ return -ENOMEM;
+ }
+
+ pgd_populate(NULL, pgd, pud);
+
+ return 0;
+}
+
/*
* Add low kernel mappings for passing arguments to EFI functions.
*/
void efi_sync_low_kernel_mappings(void)
{
- unsigned num_pgds;
- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+ unsigned num_entries;
+ pgd_t *pgd_k, *pgd_efi;
+ pud_t *pud_k, *pud_efi;
if (efi_enabled(EFI_OLD_MEMMAP))
return;
- num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
+ /*
+ * We can share all PGD entries apart from the one entry that
+ * covers the EFI runtime mapping space.
+ *
+ * Make sure the EFI runtime region mappings are guaranteed to
+ * only span a single PGD entry and that the entry also maps
+ * other important kernel regions.
+ */
+ BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
+ BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
+ (EFI_VA_END & PGDIR_MASK));
+
+ pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
+ pgd_k = pgd_offset_k(PAGE_OFFSET);
- memcpy(pgd + pgd_index(PAGE_OFFSET),
- init_mm.pgd + pgd_index(PAGE_OFFSET),
- sizeof(pgd_t) * num_pgds);
+ num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
+ memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
+
+ /*
+ * We share all the PUD entries apart from those that map the
+ * EFI regions. Copy around them.
+ */
+ BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
+ BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
+
+ pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
+ pud_efi = pud_offset(pgd_efi, 0);
+
+ pgd_k = pgd_offset_k(EFI_VA_END);
+ pud_k = pud_offset(pgd_k, 0);
+
+ num_entries = pud_index(EFI_VA_END);
+ memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
+
+ pud_efi = pud_offset(pgd_efi, EFI_VA_START);
+ pud_k = pud_offset(pgd_k, EFI_VA_START);
+
+ num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
+ memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
}
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
@@ -151,8 +218,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
if (efi_enabled(EFI_OLD_MEMMAP))
return 0;
- efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
- pgd = __va(efi_scratch.efi_pgt);
+ efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
+ pgd = efi_pgd;
/*
* It can happen that the physical address of new_memmap lands in memory
@@ -196,15 +263,13 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-
- kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
+ kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
}
static void __init __map_region(efi_memory_desc_t *md, u64 va)
{
- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
unsigned long pf = 0;
+ pgd_t *pgd = efi_pgd;
if (!(md->attribute & EFI_MEMORY_WB))
pf |= _PAGE_PCD;
@@ -312,9 +377,7 @@ void __init efi_runtime_mkexec(void)
void __init efi_dump_pagetable(void)
{
#ifdef CONFIG_EFI_PGT_DUMP
- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-
- ptdump_walk_pgd_level(NULL, pgd);
+ ptdump_walk_pgd_level(NULL, efi_pgd);
#endif
}
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 86d0f9e08dd9..32020cb8bb08 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -38,41 +38,6 @@
mov %rsi, %cr0; \
mov (%rsp), %rsp
- /* stolen from gcc */
- .macro FLUSH_TLB_ALL
- movq %r15, efi_scratch(%rip)
- movq %r14, efi_scratch+8(%rip)
- movq %cr4, %r15
- movq %r15, %r14
- andb $0x7f, %r14b
- movq %r14, %cr4
- movq %r15, %cr4
- movq efi_scratch+8(%rip), %r14
- movq efi_scratch(%rip), %r15
- .endm
-
- .macro SWITCH_PGT
- cmpb $0, efi_scratch+24(%rip)
- je 1f
- movq %r15, efi_scratch(%rip) # r15
- # save previous CR3
- movq %cr3, %r15
- movq %r15, efi_scratch+8(%rip) # prev_cr3
- movq efi_scratch+16(%rip), %r15 # EFI pgt
- movq %r15, %cr3
- 1:
- .endm
-
- .macro RESTORE_PGT
- cmpb $0, efi_scratch+24(%rip)
- je 2f
- movq efi_scratch+8(%rip), %r15
- movq %r15, %cr3
- movq efi_scratch(%rip), %r15
- FLUSH_TLB_ALL
- 2:
- .endm
-
ENTRY(efi_call)
SAVE_XMM
mov (%rsp), %rax
@@ -83,16 +48,8 @@ ENTRY(efi_call)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
- SWITCH_PGT
call *%rdi
- RESTORE_PGT
addq $48, %rsp
RESTORE_XMM
ret
ENDPROC(efi_call)
-
- .data
-ENTRY(efi_scratch)
- .fill 3,8,0
- .byte 0
- .quad 0
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 1c7380da65ff..7708acb498f4 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -130,66 +130,210 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
EXPORT_SYMBOL_GPL(efi_query_variable_store);
/*
- * The UEFI specification makes it clear that the operating system is free to do
- * whatever it wants with boot services code after ExitBootServices() has been
- * called. Ignoring this recommendation a significant bunch of EFI implementations
- * continue calling into boot services code (SetVirtualAddressMap). In order to
- * work around such buggy implementations we reserve boot services region during
- * EFI init and make sure it stays executable. Then, after SetVirtualAddressMap(), it
-* is discarded.
-*/
+ * Helper function for efi_reserve_boot_services() to figure out if we
+ * can free regions in efi_free_boot_services().
+ *
+ * Use this function to ensure we do not free regions owned by somebody
+ * else. We must only reserve (and then free) regions:
+ *
+ * - Not within any part of the kernel
+ * - Not the BIOS reserved area (E820_RESERVED, E820_NVS, etc)
+ */
+static bool can_free_region(u64 start, u64 size)
+{
+ if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end))
+ return false;
+
+ if (!e820_all_mapped(start, start+size, E820_RAM))
+ return false;
+
+ return true;
+}
+
+/*
+ * The UEFI specification makes it clear that the operating system is
+ * free to do whatever it wants with boot services code after
+ * ExitBootServices() has been called. Ignoring this recommendation a
+ * significant bunch of EFI implementations continue calling into boot
+ * services code (SetVirtualAddressMap). In order to work around such
+ * buggy implementations we reserve boot services region during EFI
+ * init and make sure it stays executable. Then, after
+ * SetVirtualAddressMap(), it is discarded.
+ *
+ * However, some boot services regions contain data that is required
+ * by drivers, so we need to track which memory ranges can never be
+ * freed. This is done by tagging those regions with the
+ * EFI_MEMORY_RUNTIME attribute.
+ *
+ * Any driver that wants to mark a region as reserved must use
+ * efi_mem_reserve() which will insert a new EFI memory descriptor
+ * into efi.memmap (splitting existing regions if necessary) and tag
+ * it with EFI_MEMORY_RUNTIME.
+ */
+void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
+{
+ phys_addr_t new_phys, new_size;
+ struct efi_mem_range mr;
+ efi_memory_desc_t md;
+ int num_entries;
+ void *new;
+
+ if (efi_mem_desc_lookup(addr, &md)) {
+ pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
+ return;
+ }
+
+ if (addr + size > md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT)) {
+ pr_err("Region spans EFI memory descriptors, %pa\n", &addr);
+ return;
+ }
+
+ size += addr % EFI_PAGE_SIZE;
+ size = round_up(size, EFI_PAGE_SIZE);
+ addr = round_down(addr, EFI_PAGE_SIZE);
+
+ mr.range.start = addr;
+ mr.range.end = addr + size - 1;
+ mr.attribute = md.attribute | EFI_MEMORY_RUNTIME;
+
+ num_entries = efi_memmap_split_count(&md, &mr.range);
+ num_entries += efi.memmap.nr_map;
+
+ new_size = efi.memmap.desc_size * num_entries;
+
+ new_phys = efi_memmap_alloc(num_entries);
+ if (!new_phys) {
+ pr_err("Could not allocate boot services memmap\n");
+ return;
+ }
+
+ new = early_memremap(new_phys, new_size);
+ if (!new) {
+ pr_err("Failed to map new boot services memmap\n");
+ return;
+ }
+
+ efi_memmap_insert(&efi.memmap, new, &mr);
+ early_memunmap(new, new_size);
+
+ efi_memmap_install(new_phys, num_entries);
+}
+
void __init efi_reserve_boot_services(void)
{
- void *p;
+ efi_memory_desc_t *md;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- efi_memory_desc_t *md = p;
+ for_each_efi_memory_desc(md) {
u64 start = md->phys_addr;
u64 size = md->num_pages << EFI_PAGE_SHIFT;
+ bool already_reserved;
if (md->type != EFI_BOOT_SERVICES_CODE &&
md->type != EFI_BOOT_SERVICES_DATA)
continue;
- /* Only reserve where possible:
- * - Not within any already allocated areas
- * - Not over any memory area (really needed, if above?)
- * - Not within any part of the kernel
- * - Not the bios reserved area
- */
- if ((start + size > __pa_symbol(_text)
- && start <= __pa_symbol(_end)) ||
- !e820_all_mapped(start, start+size, E820_RAM) ||
- memblock_is_region_reserved(start, size)) {
- /* Could not reserve, skip it */
- md->num_pages = 0;
- memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n",
- start, start+size-1);
- } else
+
+ already_reserved = memblock_is_region_reserved(start, size);
+
+ /*
+ * Because the following memblock_reserve() is paired
+ * with free_bootmem_late() for this region in
+ * efi_free_boot_services(), we must be extremely
+ * careful not to reserve, and subsequently free,
+ * critical regions of memory (like the kernel image) or
+ * those regions that somebody else has already
+ * reserved.
+ *
+ * A good example of a critical region that must not be
+ * freed is page zero (first 4Kb of memory), which may
+ * contain boot services code/data but is marked
+ * E820_RESERVED by trim_bios_range().
+ */
+ if (!already_reserved) {
memblock_reserve(start, size);
+
+ /*
+ * If we are the first to reserve the region, no
+ * one else cares about it. We own it and can
+ * free it later.
+ */
+ if (can_free_region(start, size))
+ continue;
+ }
+
+ /*
+ * We don't own the region. We must not free it.
+ *
+ * Setting this bit for a boot services region really
+ * doesn't make sense as far as the firmware is
+ * concerned, but it does provide us with a way to tag
+ * those regions that must not be paired with
+ * free_bootmem_late().
+ */
+ md->attribute |= EFI_MEMORY_RUNTIME;
}
}
void __init efi_free_boot_services(void)
{
- void *p;
+ phys_addr_t new_phys, new_size;
+ efi_memory_desc_t *md;
+ int num_entries = 0;
+ void *new, *new_md;
- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
- efi_memory_desc_t *md = p;
+ for_each_efi_memory_desc(md) {
unsigned long long start = md->phys_addr;
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
if (md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
+ md->type != EFI_BOOT_SERVICES_DATA) {
+ num_entries++;
continue;
+ }
- /* Could not reserve boot area */
- if (!size)
+ /* Do not free, someone else owns it: */
+ if (md->attribute & EFI_MEMORY_RUNTIME) {
+ num_entries++;
continue;
+ }
free_bootmem_late(start, size);
}
- efi_unmap_memmap();
+ new_size = efi.memmap.desc_size * num_entries;
+ new_phys = efi_memmap_alloc(num_entries);
+ if (!new_phys) {
+ pr_err("Failed to allocate new EFI memmap\n");
+ return;
+ }
+
+ new = memremap(new_phys, new_size, MEMREMAP_WB);
+ if (!new) {
+ pr_err("Failed to map new EFI memmap\n");
+ return;
+ }
+
+ /*
+ * Build a new EFI memmap that excludes any boot services
+ * regions that are not tagged EFI_MEMORY_RUNTIME, since those
+ * regions have now been freed.
+ */
+ new_md = new;
+ for_each_efi_memory_desc(md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+ (md->type == EFI_BOOT_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_DATA))
+ continue;
+
+ memcpy(new_md, md, efi.memmap.desc_size);
+ new_md += efi.memmap.desc_size;
+ }
+
+ memunmap(new);
+
+ if (efi_memmap_install(new_phys, num_entries)) {
+ pr_err("Could not install new EFI memmap\n");
+ return;
+ }
}
/*
@@ -257,7 +401,7 @@ void __init efi_apply_memmap_quirks(void)
*/
if (!efi_runtime_supported()) {
pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
- efi_unmap_memmap();
+ efi_memmap_unmap();
}
/*
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index ffa41591bff9..60e81c5ea4f3 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1191,7 +1191,6 @@ static const struct pv_info xen_info __initconst = {
#ifdef CONFIG_X86_64
.extra_user_64bit_cs = FLAT_USER_CS64,
#endif
- .features = 0,
.name = "Xen",
};
@@ -1513,6 +1512,11 @@ static void __init xen_pvh_early_guest_init(void)
}
#endif /* CONFIG_XEN_PVH */
+static void __init xen_dom0_set_legacy_features(void)
+{
+ x86_platform.legacy.rtc = 1;
+}
+
/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void)
{
@@ -1533,8 +1537,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Install Xen paravirt ops */
pv_info = xen_info;
- if (xen_initial_domain())
- pv_info.features |= PV_SUPPORTED_RTC;
pv_init_ops = xen_init_ops;
pv_apic_ops = xen_apic_ops;
if (!xen_pvh_domain()) {
@@ -1687,6 +1689,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
.u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
};
+ x86_platform.set_legacy_features =
+ xen_dom0_set_legacy_features;
xen_init_vga(info, xen_start_info->console.dom0.info_size);
xen_start_info->console.domU.mfn = 0;
xen_start_info->console.domU.evtchn = 0;
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index bdad9be4a729..6b014a4a3e63 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -15,7 +15,6 @@ config XTENSA
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select HAVE_DMA_API_DEBUG
- select HAVE_DMA_ATTRS
select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if !MMU
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 66c9ba261e30..3fc1170a6488 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -13,8 +13,6 @@
#include <asm/cache.h>
#include <asm/io.h>
-#include <asm-generic/dma-coherent.h>
-
#include <linux/mm.h>
#include <linux/scatterlist.h>
@@ -30,8 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return &xtensa_dma_map_ops;
}
-#include <asm-generic/dma-mapping-common.h>
-
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index e8c0d2927e9a..4ab73b04b167 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -102,6 +102,15 @@ config CRYPTO_KPP
select CRYPTO_ALGAPI
select CRYPTO_KPP2
+config CRYPTO_ACOMP2
+ tristate
+ select CRYPTO_ALGAPI2
+
+config CRYPTO_ACOMP
+ tristate
+ select CRYPTO_ALGAPI
+ select CRYPTO_ACOMP2
+
config CRYPTO_RSA
tristate "RSA algorithm"
select CRYPTO_AKCIPHER
@@ -132,6 +141,7 @@ config CRYPTO_MANAGER2
select CRYPTO_BLKCIPHER2
select CRYPTO_AKCIPHER2
select CRYPTO_KPP2
+ select CRYPTO_ACOMP2
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
diff --git a/crypto/Makefile b/crypto/Makefile
index d6d28e6cde9d..5a464824374b 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -48,6 +48,9 @@ rsa_generic-y += rsa_helper.o
rsa_generic-y += rsa-pkcs1pad.o
obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
+obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
+obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
+
cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
diff --git a/crypto/acompress.c b/crypto/acompress.c
new file mode 100644
index 000000000000..887783d8e9a9
--- /dev/null
+++ b/crypto/acompress.c
@@ -0,0 +1,169 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include "internal.h"
+
+static const struct crypto_type crypto_acomp_type;
+
+#ifdef CONFIG_NET
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_acomp racomp;
+
+ strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
+ sizeof(struct crypto_report_acomp), &racomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+
+static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_puts(m, "type : acomp\n");
+}
+
+static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+ struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+ alg->exit(acomp);
+}
+
+static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
+ struct acomp_alg *alg = crypto_acomp_alg(acomp);
+
+ if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
+ return crypto_init_scomp_ops_async(tfm);
+
+ acomp->compress = alg->compress;
+ acomp->decompress = alg->decompress;
+ acomp->dst_free = alg->dst_free;
+ acomp->reqsize = alg->reqsize;
+
+ if (alg->exit)
+ acomp->base.exit = crypto_acomp_exit_tfm;
+
+ if (alg->init)
+ return alg->init(acomp);
+
+ return 0;
+}
+
+static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
+{
+ int extsize = crypto_alg_extsize(alg);
+
+ if (alg->cra_type != &crypto_acomp_type)
+ extsize += sizeof(struct crypto_scomp *);
+
+ return extsize;
+}
+
+static const struct crypto_type crypto_acomp_type = {
+ .extsize = crypto_acomp_extsize,
+ .init_tfm = crypto_acomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_acomp_show,
+#endif
+ .report = crypto_acomp_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
+ .type = CRYPTO_ALG_TYPE_ACOMPRESS,
+ .tfmsize = offsetof(struct crypto_acomp, base),
+};
+
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
+
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct acomp_req *req;
+
+ req = __acomp_request_alloc(acomp);
+ if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
+ return crypto_acomp_scomp_alloc_ctx(req);
+
+ return req;
+}
+EXPORT_SYMBOL_GPL(acomp_request_alloc);
+
+void acomp_request_free(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+
+ if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
+ crypto_acomp_scomp_free_ctx(req);
+
+ if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
+ acomp->dst_free(req->dst);
+ req->dst = NULL;
+ }
+
+ __acomp_request_free(req);
+}
+EXPORT_SYMBOL_GPL(acomp_request_free);
+
+int crypto_register_acomp(struct acomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+
+ base->cra_type = &crypto_acomp_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
+
+ return crypto_register_alg(base);
+}
+EXPORT_SYMBOL_GPL(crypto_register_acomp);
+
+int crypto_unregister_acomp(struct acomp_alg *alg)
+{
+ return crypto_unregister_alg(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Asynchronous compression type");
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 8b0208fec103..22c909b310ce 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -112,6 +112,21 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_acomp racomp;
+
+ strncpy(racomp.type, "acomp", sizeof(racomp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
+ sizeof(struct crypto_report_acomp), &racomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
@@ -186,7 +201,11 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
+ case CRYPTO_ALG_TYPE_ACOMPRESS:
+ if (crypto_report_acomp(skb, alg))
+ goto nla_put_failure;
+ break;
case CRYPTO_ALG_TYPE_AKCIPHER:
if (crypto_report_akcipher(skb, alg))
goto nla_put_failure;
diff --git a/crypto/scompress.c b/crypto/scompress.c
new file mode 100644
index 000000000000..35e396d154b7
--- /dev/null
+++ b/crypto/scompress.c
@@ -0,0 +1,356 @@
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/vmalloc.h>
+#include <crypto/algapi.h>
+#include <linux/cryptouser.h>
+#include <net/netlink.h>
+#include <linux/scatterlist.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/acompress.h>
+#include <crypto/internal/scompress.h>
+#include "internal.h"
+
+static const struct crypto_type crypto_scomp_type;
+static void * __percpu *scomp_src_scratches;
+static void * __percpu *scomp_dst_scratches;
+static int scomp_scratch_users;
+static DEFINE_MUTEX(scomp_lock);
+
+#ifdef CONFIG_NET
+static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_report_comp rscomp;
+
+ strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
+
+ if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+ sizeof(struct crypto_report_comp), &rscomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+#else
+static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
+ __attribute__ ((unused));
+
+static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
+{
+ seq_puts(m, "type : scomp\n");
+}
+
+static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
+{
+ return 0;
+}
+
+static void crypto_scomp_free_scratches(void * __percpu *scratches)
+{
+ int i;
+
+ if (!scratches)
+ return;
+
+ for_each_possible_cpu(i)
+ vfree(*per_cpu_ptr(scratches, i));
+
+ free_percpu(scratches);
+}
+
+static void * __percpu *crypto_scomp_alloc_scratches(void)
+{
+ void * __percpu *scratches;
+ int i;
+
+ scratches = alloc_percpu(void *);
+ if (!scratches)
+ return NULL;
+
+ for_each_possible_cpu(i) {
+ void *scratch;
+
+ scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
+ if (!scratch)
+ goto error;
+ *per_cpu_ptr(scratches, i) = scratch;
+ }
+
+ return scratches;
+
+error:
+ crypto_scomp_free_scratches(scratches);
+ return NULL;
+}
+
+static void crypto_scomp_free_all_scratches(void)
+{
+ if (!--scomp_scratch_users) {
+ crypto_scomp_free_scratches(scomp_src_scratches);
+ crypto_scomp_free_scratches(scomp_dst_scratches);
+ scomp_src_scratches = NULL;
+ scomp_dst_scratches = NULL;
+ }
+}
+
+static int crypto_scomp_alloc_all_scratches(void)
+{
+ if (!scomp_scratch_users++) {
+ scomp_src_scratches = crypto_scomp_alloc_scratches();
+ if (!scomp_src_scratches)
+ return -ENOMEM;
+ scomp_dst_scratches = crypto_scomp_alloc_scratches();
+ if (!scomp_dst_scratches)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void crypto_scomp_sg_free(struct scatterlist *sgl)
+{
+ int i, n;
+ struct page *page;
+
+ if (!sgl)
+ return;
+
+ n = sg_nents(sgl);
+ for_each_sg(sgl, sgl, n, i) {
+ page = sg_page(sgl);
+ if (page)
+ __free_page(page);
+ }
+
+ kfree(sgl);
+}
+
+static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
+{
+ struct scatterlist *sgl;
+ struct page *page;
+ int i, n;
+
+ n = ((size - 1) >> PAGE_SHIFT) + 1;
+
+ sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
+ if (!sgl)
+ return NULL;
+
+ sg_init_table(sgl, n);
+
+ for (i = 0; i < n; i++) {
+ page = alloc_page(gfp);
+ if (!page)
+ goto err;
+ sg_set_page(sgl + i, page, PAGE_SIZE, 0);
+ }
+
+ return sgl;
+
+err:
+ sg_mark_end(sgl + i);
+ crypto_scomp_sg_free(sgl);
+ return NULL;
+}
+
+static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ void **tfm_ctx = acomp_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void **ctx = acomp_request_ctx(req);
+ const int cpu = get_cpu();
+ u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
+ u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
+ int ret;
+
+ if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (req->dst && !req->dlen) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
+ req->dlen = SCOMP_SCRATCH_SIZE;
+
+ scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
+ if (dir)
+ ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
+ scratch_dst, &req->dlen, *ctx);
+ else
+ ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
+ scratch_dst, &req->dlen, *ctx);
+ if (!ret) {
+ if (!req->dst) {
+ req->dst = crypto_scomp_sg_alloc(req->dlen,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC);
+ if (!req->dst)
+ goto out;
+ }
+ scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
+ 1);
+ }
+out:
+ put_cpu();
+ return ret;
+}
+
+static int scomp_acomp_compress(struct acomp_req *req)
+{
+ return scomp_acomp_comp_decomp(req, 1);
+}
+
+static int scomp_acomp_decompress(struct acomp_req *req)
+{
+ return scomp_acomp_comp_decomp(req, 0);
+}
+
+static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
+{
+ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_scomp(*ctx);
+}
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *calg = tfm->__crt_alg;
+ struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
+ struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
+ struct crypto_scomp *scomp;
+
+ if (!crypto_mod_get(calg))
+ return -EAGAIN;
+
+ scomp = crypto_create_tfm(calg, &crypto_scomp_type);
+ if (IS_ERR(scomp)) {
+ crypto_mod_put(calg);
+ return PTR_ERR(scomp);
+ }
+
+ *ctx = scomp;
+ tfm->exit = crypto_exit_scomp_ops_async;
+
+ crt->compress = scomp_acomp_compress;
+ crt->decompress = scomp_acomp_decompress;
+ crt->dst_free = crypto_scomp_sg_free;
+ crt->reqsize = sizeof(void *);
+
+ return 0;
+}
+
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void *ctx;
+
+ ctx = crypto_scomp_alloc_ctx(scomp);
+ if (IS_ERR(ctx)) {
+ kfree(req);
+ return NULL;
+ }
+
+ *req->__ctx = ctx;
+
+ return req;
+}
+
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
+ struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void *ctx = *req->__ctx;
+
+ if (ctx)
+ crypto_scomp_free_ctx(scomp, ctx);
+}
+
+static const struct crypto_type crypto_scomp_type = {
+ .extsize = crypto_alg_extsize,
+ .init_tfm = crypto_scomp_init_tfm,
+#ifdef CONFIG_PROC_FS
+ .show = crypto_scomp_show,
+#endif
+ .report = crypto_scomp_report,
+ .maskclear = ~CRYPTO_ALG_TYPE_MASK,
+ .maskset = CRYPTO_ALG_TYPE_MASK,
+ .type = CRYPTO_ALG_TYPE_SCOMPRESS,
+ .tfmsize = offsetof(struct crypto_scomp, base),
+};
+
+int crypto_register_scomp(struct scomp_alg *alg)
+{
+ struct crypto_alg *base = &alg->base;
+ int ret = -ENOMEM;
+
+ mutex_lock(&scomp_lock);
+ if (crypto_scomp_alloc_all_scratches())
+ goto error;
+
+ base->cra_type = &crypto_scomp_type;
+ base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
+ base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
+
+ ret = crypto_register_alg(base);
+ if (ret)
+ goto error;
+
+ mutex_unlock(&scomp_lock);
+ return ret;
+
+error:
+ crypto_scomp_free_all_scratches();
+ mutex_unlock(&scomp_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_register_scomp);
+
+int crypto_unregister_scomp(struct scomp_alg *alg)
+{
+ int ret;
+
+ mutex_lock(&scomp_lock);
+ ret = crypto_unregister_alg(&alg->base);
+ crypto_scomp_free_all_scratches();
+ mutex_unlock(&scomp_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Synchronous compression type");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 6c61d24ec6a5..dea891d7e849 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -33,6 +33,7 @@
#include <crypto/drbg.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
+#include <crypto/acompress.h>
#include "internal.h"
@@ -1301,6 +1302,150 @@ out:
return ret;
}
+static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
+ struct comp_testvec *dtemplate, int ctcount, int dtcount)
+{
+ const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
+ unsigned int i;
+ char *output;
+ int ret;
+ struct scatterlist src, dst;
+ struct acomp_req *req;
+ struct tcrypt_result result;
+
+ output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
+
+ for (i = 0; i < ctcount; i++) {
+ unsigned int dlen = COMP_BUF_SIZE;
+ int ilen = ctemplate[i].inlen;
+ void *input_vec;
+
+ input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
+ if (!input_vec) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(output, 0, dlen);
+ init_completion(&result.completion);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
+
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ kfree(input_vec);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ ret = wait_async_op(&result, crypto_acomp_compress(req));
+ if (ret) {
+ pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (req->dlen != ctemplate[i].outlen) {
+ pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(output, ctemplate[i].output, req->dlen)) {
+ pr_err("alg: acomp: Compression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ kfree(input_vec);
+ acomp_request_free(req);
+ }
+
+ for (i = 0; i < dtcount; i++) {
+ unsigned int dlen = COMP_BUF_SIZE;
+ int ilen = dtemplate[i].inlen;
+ void *input_vec;
+
+ input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
+ if (!input_vec) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(output, 0, dlen);
+ init_completion(&result.completion);
+ sg_init_one(&src, input_vec, ilen);
+ sg_init_one(&dst, output, dlen);
+
+ req = acomp_request_alloc(tfm);
+ if (!req) {
+ pr_err("alg: acomp: request alloc failed for %s\n",
+ algo);
+ kfree(input_vec);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acomp_request_set_params(req, &src, &dst, ilen, dlen);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tcrypt_complete, &result);
+
+ ret = wait_async_op(&result, crypto_acomp_decompress(req));
+ if (ret) {
+ pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
+ i + 1, algo, -ret);
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (req->dlen != dtemplate[i].outlen) {
+ pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
+ i + 1, algo, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ if (memcmp(output, dtemplate[i].output, req->dlen)) {
+ pr_err("alg: acomp: Decompression test %d failed for %s\n",
+ i + 1, algo);
+ hexdump(output, req->dlen);
+ ret = -EINVAL;
+ kfree(input_vec);
+ acomp_request_free(req);
+ goto out;
+ }
+
+ kfree(input_vec);
+ acomp_request_free(req);
+ }
+
+ ret = 0;
+
+out:
+ kfree(output);
+ return ret;
+}
+
static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
unsigned int tcount)
{
@@ -1452,22 +1597,38 @@ out:
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
- struct crypto_comp *tfm;
+ struct crypto_comp *comp;
+ struct crypto_acomp *acomp;
int err;
+ u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
+
+ if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
+ acomp = crypto_alloc_acomp(driver, type, mask);
+ if (IS_ERR(acomp)) {
+ pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(acomp));
+ return PTR_ERR(acomp);
+ }
+ err = test_acomp(acomp, desc->suite.comp.comp.vecs,
+ desc->suite.comp.decomp.vecs,
+ desc->suite.comp.comp.count,
+ desc->suite.comp.decomp.count);
+ crypto_free_acomp(acomp);
+ } else {
+ comp = crypto_alloc_comp(driver, type, mask);
+ if (IS_ERR(comp)) {
+ pr_err("alg: comp: Failed to load transform for %s: %ld\n",
+ driver, PTR_ERR(comp));
+ return PTR_ERR(comp);
+ }
- tfm = crypto_alloc_comp(driver, type, mask);
- if (IS_ERR(tfm)) {
- printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
- "%ld\n", driver, PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- err = test_comp(tfm, desc->suite.comp.comp.vecs,
- desc->suite.comp.decomp.vecs,
- desc->suite.comp.comp.count,
- desc->suite.comp.decomp.count);
+ err = test_comp(comp, desc->suite.comp.comp.vecs,
+ desc->suite.comp.decomp.vecs,
+ desc->suite.comp.comp.count,
+ desc->suite.comp.decomp.count);
- crypto_free_comp(tfm);
+ crypto_free_comp(comp);
+ }
return err;
}
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index a00f7b79202b..261420ddfe66 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -12,7 +12,6 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <asm-generic/dma-coherent.h>
/*
* Managed DMA API
@@ -167,7 +166,7 @@ void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
}
EXPORT_SYMBOL(dmam_free_noncoherent);
-#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
static void dmam_coherent_decl_release(struct device *dev, void *res)
{
@@ -247,7 +246,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
int ret = -ENXIO;
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
@@ -264,7 +263,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
user_count << PAGE_SHIFT,
vma->vm_page_prot);
}
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
return ret;
}
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 5819d50ab258..be6a599bc0c1 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -283,6 +283,7 @@ out_free_priv_data:
return err;
}
+EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
/**
* platform_msi_domain_free_irqs - Free MSI interrupts for @dev
@@ -300,6 +301,7 @@ void platform_msi_domain_free_irqs(struct device *dev)
msi_domain_free_irqs(dev->msi_domain, dev);
platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
}
+EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
/**
* platform_msi_get_host_data - Query the private data associated with
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e6cd1a32025a..212346645bc6 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -408,15 +408,6 @@ config PXA_DMA
16 to 32 channels for peripheral to memory or memory to memory
transfers.
-config QCOM_BAM_DMA
- tristate "QCOM BAM DMA support"
- depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- ---help---
- Enable support for the QCOM BAM DMA controller. This controller
- provides DMA capabilities for a variety of on-chip devices.
-
config SIRF_DMA
tristate "CSR SiRFprimaII/SiRFmarco DMA support"
depends on ARCH_SIRF
@@ -527,6 +518,8 @@ config ZX_DMA
# driver files
source "drivers/dma/bestcomm/Kconfig"
+source "drivers/dma/qcom/Kconfig"
+
source "drivers/dma/dw/Kconfig"
source "drivers/dma/hsu/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index ef9c099bd2b6..797846dc4533 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
-obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
@@ -66,4 +65,5 @@ obj-$(CONFIG_TI_EDMA) += edma.o
obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+obj-y += qcom/
obj-y += xilinx/
diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h
index 17f983a4e9ba..882ff9448c3b 100644
--- a/drivers/dma/dmaengine.h
+++ b/drivers/dma/dmaengine.h
@@ -86,4 +86,88 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
state->residue = residue;
}
+struct dmaengine_desc_callback {
+ dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
+ void *callback_param;
+};
+
+/**
+ * dmaengine_desc_get_callback - get the passed in callback function
+ * @tx: tx descriptor
+ * @cb: temp struct to hold the callback info
+ *
+ * Fill the passed in cb struct with what's available in the passed in
+ * tx descriptor struct
+ * No locking is required.
+ */
+static inline void
+dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_desc_callback *cb)
+{
+ cb->callback = tx->callback;
+ cb->callback_result = tx->callback_result;
+ cb->callback_param = tx->callback_param;
+}
+
+/**
+ * dmaengine_desc_callback_invoke - call the callback function in cb struct
+ * @cb: temp struct that is holding the callback info
+ * @result: transaction result
+ *
+ * Call the callback function provided in the cb struct with the parameter
+ * in the cb struct.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_result dummy_result = {
+ .result = DMA_TRANS_NOERROR,
+ .residue = 0
+ };
+
+ if (cb->callback_result) {
+ if (!result)
+ result = &dummy_result;
+ cb->callback_result(cb->callback_param, result);
+ } else if (cb->callback) {
+ cb->callback(cb->callback_param);
+ }
+}
+
+/**
+ * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
+ * then immediately call the callback.
+ * @tx: dma async tx descriptor
+ * @result: transaction result
+ *
+ * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
+ * in a single function since no work is necessary in between for the driver.
+ * Locking is dependent on the driver.
+ */
+static inline void
+dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
+ const struct dmaengine_result *result)
+{
+ struct dmaengine_desc_callback cb;
+
+ dmaengine_desc_get_callback(tx, &cb);
+ dmaengine_desc_callback_invoke(&cb, result);
+}
+
+/**
+ * dmaengine_desc_callback_valid - verify the callback is valid in cb
+ * @cb: callback info struct
+ *
+ * Return a bool that verifies whether callback in cb is valid or not.
+ * No locking is required.
+ */
+static inline bool
+dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
+{
+ return (cb->callback) ? true : false;
+}
+
#endif
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 4a31905ec1b5..9e88f0eeb79d 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -416,7 +416,9 @@ static int dmatest_func(void *data)
int src_cnt;
int dst_cnt;
int i;
- ktime_t ktime;
+ ktime_t ktime, start, diff;
+ ktime_t filltime = ktime_set(0, 0);
+ ktime_t comparetime = ktime_set(0, 0);
s64 runtime = 0;
unsigned long long total_len = 0;
@@ -519,6 +521,7 @@ static int dmatest_func(void *data)
src_off = 0;
dst_off = 0;
} else {
+ start = ktime_get();
src_off = dmatest_random() % (params->buf_size - len + 1);
dst_off = dmatest_random() % (params->buf_size - len + 1);
@@ -529,6 +532,9 @@ static int dmatest_func(void *data)
params->buf_size);
dmatest_init_dsts(thread->dsts, dst_off, len,
params->buf_size);
+
+ diff = ktime_sub(ktime_get(), start);
+ filltime = ktime_add(filltime, diff);
}
um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt,
@@ -661,6 +667,7 @@ static int dmatest_func(void *data)
continue;
}
+ start = ktime_get();
pr_debug("%s: verifying source buffer...\n", current->comm);
error_count = dmatest_verify(thread->srcs, 0, src_off,
0, PATTERN_SRC, true);
@@ -681,6 +688,9 @@ static int dmatest_func(void *data)
params->buf_size, dst_off + len,
PATTERN_DST, false);
+ diff = ktime_sub(ktime_get(), start);
+ comparetime = ktime_add(comparetime, diff);
+
if (error_count) {
result("data error", total_tests, src_off, dst_off,
len, error_count);
@@ -690,7 +700,10 @@ static int dmatest_func(void *data)
dst_off, len, 0);
}
}
- runtime = ktime_us_delta(ktime_get(), ktime);
+ ktime = ktime_sub(ktime_get(), ktime);
+ ktime = ktime_sub(ktime, comparetime);
+ ktime = ktime_sub(ktime, filltime);
+ runtime = ktime_to_us(ktime);
ret = 0;
err_dstbuf:
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
new file mode 100644
index 000000000000..a7761c4025f4
--- /dev/null
+++ b/drivers/dma/qcom/Kconfig
@@ -0,0 +1,29 @@
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
+config QCOM_HIDMA_MGMT
+ tristate "Qualcomm Technologies HIDMA Management support"
+ select DMA_ENGINE
+ help
+ Enable support for the Qualcomm Technologies HIDMA Management.
+ Each DMA device requires one management interface driver
+ for basic initialization before QCOM_HIDMA channel driver can
+ start managing the channels. In a virtualized environment,
+ the guest OS would run QCOM_HIDMA channel driver and the
+ host would run the QCOM_HIDMA_MGMT management driver.
+
+config QCOM_HIDMA
+ tristate "Qualcomm Technologies HIDMA Channel support"
+ select DMA_ENGINE
+ help
+ Enable support for the Qualcomm Technologies HIDMA controller.
+ The HIDMA controller supports optimized buffer copies
+ (user to kernel, kernel to kernel, etc.). It only supports
+ memcpy interface. The core is not intended for general
+ purpose slave DMA.
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
new file mode 100644
index 000000000000..4bfc38b45220
--- /dev/null
+++ b/drivers/dma/qcom/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
+obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
+hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
+obj-$(CONFIG_QCOM_HIDMA) += hdma.o
+hdma-objs := hidma_ll.o hidma.o hidma_dbg.o
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 5a250cdc8376..b6f053dcc5d9 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -49,8 +49,8 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
-#include "dmaengine.h"
-#include "virt-dma.h"
+#include "../dmaengine.h"
+#include "../virt-dma.h"
struct bam_desc_hw {
u32 addr; /* Buffer physical address */
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
new file mode 100644
index 000000000000..3c982c96b4b7
--- /dev/null
+++ b/drivers/dma/qcom/hidma.c
@@ -0,0 +1,919 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine interface
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
+ * Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
+ * Copyright (C) Alexander Popov, Promcontroller 2014
+ *
+ * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
+ * (defines, structures and comments) was taken from MPC5121 DMA driver
+ * written by Hongjun Chen <hong-jun.chen@freescale.com>.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/* Linux Foundation elects GPLv2 license only. */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_dma.h>
+#include <linux/property.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/atomic.h>
+#include <linux/pm_runtime.h>
+#include <linux/msi.h>
+
+#include "../dmaengine.h"
+#include "hidma.h"
+
+/*
+ * Default idle time is 2 seconds. This parameter can
+ * be overridden by changing the following
+ * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
+ * during kernel boot.
+ */
+#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
+#define HIDMA_ERR_INFO_SW 0xFF
+#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
+#define HIDMA_NR_DEFAULT_DESC 10
+#define HIDMA_MSI_INTS 11
+
+static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
+{
+ return container_of(dmadev, struct hidma_dev, ddev);
+}
+
+static inline
+struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
+{
+ return container_of(_lldevp, struct hidma_dev, lldev);
+}
+
+static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
+{
+ return container_of(dmach, struct hidma_chan, chan);
+}
+
+static inline
+struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
+{
+ return container_of(t, struct hidma_desc, desc);
+}
+
+static void hidma_free(struct hidma_dev *dmadev)
+{
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+}
+
+static unsigned int nr_desc_prm;
+module_param(nr_desc_prm, uint, 0644);
+MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
+
+
+/* process completed descriptors */
+static void hidma_process_completed(struct hidma_chan *mchan)
+{
+ struct dma_device *ddev = mchan->chan.device;
+ struct hidma_dev *mdma = to_hidma_dev(ddev);
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t last_cookie;
+ struct hidma_desc *mdesc;
+ struct hidma_desc *next;
+ unsigned long irqflags;
+ struct list_head list;
+
+ INIT_LIST_HEAD(&list);
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry_safe(mdesc, next, &list, node) {
+ enum dma_status llstat;
+ struct dmaengine_desc_callback cb;
+ struct dmaengine_result result;
+
+ desc = &mdesc->desc;
+ last_cookie = desc->cookie;
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ dma_cookie_complete(desc);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
+ dmaengine_desc_get_callback(desc, &cb);
+
+ dma_run_dependencies(desc);
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_move(&mdesc->node, &mchan->free);
+
+ if (llstat == DMA_COMPLETE) {
+ mchan->last_success = last_cookie;
+ result.result = DMA_TRANS_NOERROR;
+ } else
+ result.result = DMA_TRANS_ABORTED;
+
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ dmaengine_desc_callback_invoke(&cb, &result);
+ }
+}
+
+/*
+ * Called once for each submitted descriptor.
+ * PM is locked once for each descriptor that is currently
+ * in execution.
+ */
+static void hidma_callback(void *data)
+{
+ struct hidma_desc *mdesc = data;
+ struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
+ struct dma_device *ddev = mchan->chan.device;
+ struct hidma_dev *dmadev = to_hidma_dev(ddev);
+ unsigned long irqflags;
+ bool queued = false;
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (mdesc->node.next) {
+ /* Delete from the active list, add to completed list */
+ list_move_tail(&mdesc->node, &mchan->completed);
+ queued = true;
+
+ /* calculate the next running descriptor */
+ mchan->running = list_first_entry(&mchan->active,
+ struct hidma_desc, node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ hidma_process_completed(mchan);
+
+ if (queued) {
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+}
+
+static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
+{
+ struct hidma_chan *mchan;
+ struct dma_device *ddev;
+
+ mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
+ if (!mchan)
+ return -ENOMEM;
+
+ ddev = &dmadev->ddev;
+ mchan->dma_sig = dma_sig;
+ mchan->dmadev = dmadev;
+ mchan->chan.device = ddev;
+ dma_cookie_init(&mchan->chan);
+
+ INIT_LIST_HEAD(&mchan->free);
+ INIT_LIST_HEAD(&mchan->prepared);
+ INIT_LIST_HEAD(&mchan->active);
+ INIT_LIST_HEAD(&mchan->completed);
+
+ spin_lock_init(&mchan->lock);
+ list_add_tail(&mchan->chan.device_node, &ddev->channels);
+ dmadev->ddev.chancnt++;
+ return 0;
+}
+
+static void hidma_issue_task(unsigned long arg)
+{
+ struct hidma_dev *dmadev = (struct hidma_dev *)arg;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ hidma_ll_start(dmadev->lldev);
+}
+
+static void hidma_issue_pending(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (!mchan->running) {
+ struct hidma_desc *desc = list_first_entry(&mchan->active,
+ struct hidma_desc,
+ node);
+ mchan->running = desc;
+ }
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* PM will be released in hidma_callback function. */
+ status = pm_runtime_get(dmadev->ddev.dev);
+ if (status < 0)
+ tasklet_schedule(&dmadev->task);
+ else
+ hidma_ll_start(dmadev->lldev);
+}
+
+static inline bool hidma_txn_is_success(dma_cookie_t cookie,
+ dma_cookie_t last_success, dma_cookie_t last_used)
+{
+ if (last_success <= last_used) {
+ if ((cookie <= last_success) || (cookie > last_used))
+ return true;
+ } else {
+ if ((cookie <= last_success) && (cookie > last_used))
+ return true;
+ }
+ return false;
+}
+
+static enum dma_status hidma_tx_status(struct dma_chan *dmach,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dmach, cookie, txstate);
+ if (ret == DMA_COMPLETE) {
+ bool is_success;
+
+ is_success = hidma_txn_is_success(cookie, mchan->last_success,
+ dmach->cookie);
+ return is_success ? ret : DMA_ERROR;
+ }
+
+ if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
+ unsigned long flags;
+ dma_cookie_t runcookie;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (mchan->running)
+ runcookie = mchan->running->desc.cookie;
+ else
+ runcookie = -EINVAL;
+
+ if (runcookie == cookie)
+ ret = DMA_PAUSED;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+ }
+
+ return ret;
+}
+
+/*
+ * Submit descriptor to hardware.
+ * Lock the PM for each descriptor we are sending.
+ */
+static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct hidma_chan *mchan = to_hidma_chan(txd->chan);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ struct hidma_desc *mdesc;
+ unsigned long irqflags;
+ dma_cookie_t cookie;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ if (!hidma_ll_isenabled(dmadev->lldev)) {
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return -ENODEV;
+ }
+
+ mdesc = container_of(txd, struct hidma_desc, desc);
+ spin_lock_irqsave(&mchan->lock, irqflags);
+
+ /* Move descriptor to active */
+ list_move_tail(&mdesc->node, &mchan->active);
+
+ /* Update cookie */
+ cookie = dma_cookie_assign(txd);
+
+ hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return cookie;
+}
+
+static int hidma_alloc_chan_resources(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *dmadev = mchan->dmadev;
+ struct hidma_desc *mdesc, *tmp;
+ unsigned long irqflags;
+ LIST_HEAD(descs);
+ unsigned int i;
+ int rc = 0;
+
+ if (mchan->allocated)
+ return 0;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < dmadev->nr_descriptors; i++) {
+ mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
+ if (!mdesc) {
+ rc = -ENOMEM;
+ break;
+ }
+ dma_async_tx_descriptor_init(&mdesc->desc, dmach);
+ mdesc->desc.tx_submit = hidma_tx_submit;
+
+ rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
+ "DMA engine", hidma_callback, mdesc,
+ &mdesc->tre_ch);
+ if (rc) {
+ dev_err(dmach->device->dev,
+ "channel alloc failed at %u\n", i);
+ kfree(mdesc);
+ break;
+ }
+ list_add_tail(&mdesc->node, &descs);
+ }
+
+ if (rc) {
+ /* return the allocated descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+ hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
+ kfree(mdesc);
+ }
+ return rc;
+ }
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_splice_tail_init(&descs, &mchan->free);
+ mchan->allocated = true;
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+ return 1;
+}
+
+static struct dma_async_tx_descriptor *
+hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_desc *mdesc = NULL;
+ struct hidma_dev *mdma = mchan->dmadev;
+ unsigned long irqflags;
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ if (!list_empty(&mchan->free)) {
+ mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
+ list_del(&mdesc->node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ if (!mdesc)
+ return NULL;
+
+ hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
+ src, dest, len, flags);
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ return &mdesc->desc;
+}
+
+static int hidma_terminate_channel(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan = to_hidma_chan(chan);
+ struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+ struct hidma_desc *tmp, *mdesc;
+ unsigned long irqflags;
+ LIST_HEAD(list);
+ int rc;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ /* give completed requests a chance to finish */
+ hidma_process_completed(mchan);
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+ mchan->last_success = 0;
+ list_splice_init(&mchan->active, &list);
+ list_splice_init(&mchan->prepared, &list);
+ list_splice_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+
+ /* this suspends the existing transfer */
+ rc = hidma_ll_disable(dmadev->lldev);
+ if (rc) {
+ dev_err(dmadev->ddev.dev, "channel did not pause\n");
+ goto out;
+ }
+
+ /* return all user requests */
+ list_for_each_entry_safe(mdesc, tmp, &list, node) {
+ struct dma_async_tx_descriptor *txd = &mdesc->desc;
+
+ dma_descriptor_unmap(txd);
+ dmaengine_desc_get_callback_invoke(txd, NULL);
+ dma_run_dependencies(txd);
+
+ /* move myself to free_list */
+ list_move(&mdesc->node, &mchan->free);
+ }
+
+ rc = hidma_ll_enable(dmadev->lldev);
+out:
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return rc;
+}
+
+static int hidma_terminate_all(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan = to_hidma_chan(chan);
+ struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
+ int rc;
+
+ rc = hidma_terminate_channel(chan);
+ if (rc)
+ return rc;
+
+ /* reinitialize the hardware */
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ rc = hidma_ll_setup(dmadev->lldev);
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return rc;
+}
+
+static void hidma_free_chan_resources(struct dma_chan *dmach)
+{
+ struct hidma_chan *mchan = to_hidma_chan(dmach);
+ struct hidma_dev *mdma = mchan->dmadev;
+ struct hidma_desc *mdesc, *tmp;
+ unsigned long irqflags;
+ LIST_HEAD(descs);
+
+ /* terminate running transactions and free descriptors */
+ hidma_terminate_channel(dmach);
+
+ spin_lock_irqsave(&mchan->lock, irqflags);
+
+ /* Move data */
+ list_splice_tail_init(&mchan->free, &descs);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node) {
+ hidma_ll_free(mdma->lldev, mdesc->tre_ch);
+ list_del(&mdesc->node);
+ kfree(mdesc);
+ }
+
+ mchan->allocated = 0;
+ spin_unlock_irqrestore(&mchan->lock, irqflags);
+}
+
+static int hidma_pause(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan;
+ struct hidma_dev *dmadev;
+
+ mchan = to_hidma_chan(chan);
+ dmadev = to_hidma_dev(mchan->chan.device);
+ if (!mchan->paused) {
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ if (hidma_ll_disable(dmadev->lldev))
+ dev_warn(dmadev->ddev.dev, "channel did not stop\n");
+ mchan->paused = true;
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+ return 0;
+}
+
+static int hidma_resume(struct dma_chan *chan)
+{
+ struct hidma_chan *mchan;
+ struct hidma_dev *dmadev;
+ int rc = 0;
+
+ mchan = to_hidma_chan(chan);
+ dmadev = to_hidma_dev(mchan->chan.device);
+ if (mchan->paused) {
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ rc = hidma_ll_enable(dmadev->lldev);
+ if (!rc)
+ mchan->paused = false;
+ else
+ dev_err(dmadev->ddev.dev,
+ "failed to resume the channel");
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ }
+ return rc;
+}
+
+static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+
+ /*
+ * All interrupts are request driven.
+ * HW doesn't send an interrupt by itself.
+ */
+ return hidma_ll_inthandler(chirq, lldev);
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
+{
+ struct hidma_lldev **lldevp = arg;
+ struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
+
+ return hidma_ll_inthandler_msi(chirq, *lldevp,
+ 1 << (chirq - dmadev->msi_virqbase));
+}
+#endif
+
+static ssize_t hidma_show_values(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_dev *mdev = platform_get_drvdata(pdev);
+
+ buf[0] = 0;
+
+ if (strcmp(attr->attr.name, "chid") == 0)
+ sprintf(buf, "%d\n", mdev->chidx);
+
+ return strlen(buf);
+}
+
+static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
+{
+ device_remove_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+static struct device_attribute*
+hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
+{
+ struct device_attribute *attrs;
+ char *name_copy;
+
+ attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
+ GFP_KERNEL);
+ if (!attrs)
+ return NULL;
+
+ name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return NULL;
+
+ attrs->attr.name = name_copy;
+ attrs->attr.mode = mode;
+ attrs->show = hidma_show_values;
+ sysfs_attr_init(&attrs->attr);
+
+ return attrs;
+}
+
+static int hidma_sysfs_init(struct hidma_dev *dev)
+{
+ dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
+ if (!dev->chid_attrs)
+ return -ENOMEM;
+
+ return device_create_file(dev->ddev.dev, dev->chid_attrs);
+}
+
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct hidma_dev *dmadev = dev_get_drvdata(dev);
+
+ if (!desc->platform.msi_index) {
+ writel(msg->address_lo, dmadev->dev_evca + 0x118);
+ writel(msg->address_hi, dmadev->dev_evca + 0x11C);
+ writel(msg->data, dmadev->dev_evca + 0x120);
+ }
+}
+#endif
+
+static void hidma_free_msis(struct hidma_dev *dmadev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct device *dev = dmadev->ddev.dev;
+ struct msi_desc *desc;
+
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, dev)
+ devm_free_irq(dev, desc->irq, &dmadev->lldev);
+
+ platform_msi_domain_free_irqs(dev);
+#endif
+}
+
+static int hidma_request_msi(struct hidma_dev *dmadev,
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ int rc;
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
+
+ rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
+ hidma_write_msi_msg);
+ if (rc)
+ return rc;
+
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (!desc->platform.msi_index)
+ dmadev->msi_virqbase = desc->irq;
+
+ rc = devm_request_irq(&pdev->dev, desc->irq,
+ hidma_chirq_handler_msi,
+ 0, "qcom-hidma-msi",
+ &dmadev->lldev);
+ if (rc) {
+ failed_desc = desc;
+ break;
+ }
+ }
+
+ if (rc) {
+ /* free allocated MSI interrupts above */
+ for_each_msi_entry(desc, &pdev->dev) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(&pdev->dev, desc->irq,
+ &dmadev->lldev);
+ }
+ } else {
+ /* Add callback to free MSIs on teardown */
+ hidma_ll_setup_irq(dmadev->lldev, true);
+
+ }
+ if (rc)
+ dev_warn(&pdev->dev,
+ "failed to request MSI irq, falling back to wired IRQ\n");
+ return rc;
+#else
+ return -EINVAL;
+#endif
+}
+
+static bool hidma_msi_capable(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ const char *of_compat;
+ int ret = -EINVAL;
+
+ if (!adev || acpi_disabled) {
+ ret = device_property_read_string(dev, "compatible",
+ &of_compat);
+ if (ret)
+ return false;
+
+ ret = strcmp(of_compat, "qcom,hidma-1.1");
+ } else {
+#ifdef CONFIG_ACPI
+ ret = strcmp(acpi_device_hid(adev), "QCOM8062");
+#endif
+ }
+ return ret == 0;
+}
+
+static int hidma_probe(struct platform_device *pdev)
+{
+ struct hidma_dev *dmadev;
+ struct resource *trca_resource;
+ struct resource *evca_resource;
+ int chirq;
+ void __iomem *evca;
+ void __iomem *trca;
+ int rc;
+ bool msi;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ trca = devm_ioremap_resource(&pdev->dev, trca_resource);
+ if (IS_ERR(trca)) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ evca = devm_ioremap_resource(&pdev->dev, evca_resource);
+ if (IS_ERR(evca)) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ /*
+ * This driver only handles the channel IRQs.
+ * Common IRQ is handled by the management driver.
+ */
+ chirq = platform_get_irq(pdev, 0);
+ if (chirq < 0) {
+ rc = -ENODEV;
+ goto bailout;
+ }
+
+ dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
+ if (!dmadev) {
+ rc = -ENOMEM;
+ goto bailout;
+ }
+
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+ spin_lock_init(&dmadev->lock);
+ dmadev->ddev.dev = &pdev->dev;
+ pm_runtime_get_sync(dmadev->ddev.dev);
+
+ dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
+ if (WARN_ON(!pdev->dev.dma_mask)) {
+ rc = -ENXIO;
+ goto dmafree;
+ }
+
+ dmadev->dev_evca = evca;
+ dmadev->evca_resource = evca_resource;
+ dmadev->dev_trca = trca;
+ dmadev->trca_resource = trca_resource;
+ dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
+ dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
+ dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
+ dmadev->ddev.device_tx_status = hidma_tx_status;
+ dmadev->ddev.device_issue_pending = hidma_issue_pending;
+ dmadev->ddev.device_pause = hidma_pause;
+ dmadev->ddev.device_resume = hidma_resume;
+ dmadev->ddev.device_terminate_all = hidma_terminate_all;
+ dmadev->ddev.copy_align = 8;
+
+ /*
+ * Determine the MSI capability of the platform. Old HW doesn't
+ * support MSI.
+ */
+ msi = hidma_msi_capable(&pdev->dev);
+
+ device_property_read_u32(&pdev->dev, "desc-count",
+ &dmadev->nr_descriptors);
+
+ if (!dmadev->nr_descriptors && nr_desc_prm)
+ dmadev->nr_descriptors = nr_desc_prm;
+
+ if (!dmadev->nr_descriptors)
+ dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
+
+ dmadev->chidx = readl(dmadev->dev_trca + 0x28);
+
+ /* Set DMA mask to 64 bits. */
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_warn(&pdev->dev, "unable to set coherent mask to 64");
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ goto dmafree;
+ }
+
+ dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
+ dmadev->nr_descriptors, dmadev->dev_trca,
+ dmadev->dev_evca, dmadev->chidx);
+ if (!dmadev->lldev) {
+ rc = -EPROBE_DEFER;
+ goto dmafree;
+ }
+
+ platform_set_drvdata(pdev, dmadev);
+ if (msi)
+ rc = hidma_request_msi(dmadev, pdev);
+
+ if (!msi || rc) {
+ hidma_ll_setup_irq(dmadev->lldev, false);
+ rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
+ 0, "qcom-hidma", dmadev->lldev);
+ if (rc)
+ goto uninit;
+ }
+
+ INIT_LIST_HEAD(&dmadev->ddev.channels);
+ rc = hidma_chan_init(dmadev, 0);
+ if (rc)
+ goto uninit;
+
+ rc = dma_async_device_register(&dmadev->ddev);
+ if (rc)
+ goto uninit;
+
+ dmadev->irq = chirq;
+ tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
+ hidma_debug_init(dmadev);
+ hidma_sysfs_init(dmadev);
+ dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return 0;
+
+uninit:
+ if (msi)
+ hidma_free_msis(dmadev);
+
+ hidma_debug_uninit(dmadev);
+ hidma_ll_uninit(dmadev->lldev);
+dmafree:
+ if (dmadev)
+ hidma_free(dmadev);
+bailout:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return rc;
+}
+
+static int hidma_remove(struct platform_device *pdev)
+{
+ struct hidma_dev *dmadev = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ dma_async_device_unregister(&dmadev->ddev);
+ if (!dmadev->lldev->msi_support)
+ devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
+ else
+ hidma_free_msis(dmadev);
+
+ tasklet_kill(&dmadev->task);
+ hidma_sysfs_uninit(dmadev);
+ hidma_debug_uninit(dmadev);
+ hidma_ll_uninit(dmadev->lldev);
+ hidma_free(dmadev);
+
+ dev_info(&pdev->dev, "HI-DMA engine removed\n");
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_acpi_ids[] = {
+ {"QCOM8061"},
+ {"QCOM8062"},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
+#endif
+
+static const struct of_device_id hidma_match[] = {
+ {.compatible = "qcom,hidma-1.0",},
+ {.compatible = "qcom,hidma-1.1",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, hidma_match);
+
+static struct platform_driver hidma_driver = {
+ .probe = hidma_probe,
+ .remove = hidma_remove,
+ .driver = {
+ .name = "hidma",
+ .of_match_table = hidma_match,
+ .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
+ },
+};
+
+module_platform_driver(hidma_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
new file mode 100644
index 000000000000..c7d014235c32
--- /dev/null
+++ b/drivers/dma/qcom/hidma.h
@@ -0,0 +1,165 @@
+/*
+ * Qualcomm Technologies HIDMA data structures
+ *
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QCOM_HIDMA_H
+#define QCOM_HIDMA_H
+
+#include <linux/kfifo.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+
+#define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */
+#define HIDMA_TRE_CFG_IDX 0
+#define HIDMA_TRE_LEN_IDX 1
+#define HIDMA_TRE_SRC_LOW_IDX 2
+#define HIDMA_TRE_SRC_HI_IDX 3
+#define HIDMA_TRE_DEST_LOW_IDX 4
+#define HIDMA_TRE_DEST_HI_IDX 5
+
+struct hidma_tre {
+ atomic_t allocated; /* if this channel is allocated */
+ bool queued; /* flag whether this is pending */
+ u16 status; /* status */
+ u32 idx; /* index of the tre */
+ u32 dma_sig; /* signature of the tre */
+ const char *dev_name; /* name of the device */
+ void (*callback)(void *data); /* requester callback */
+ void *data; /* Data associated with this channel*/
+ struct hidma_lldev *lldev; /* lldma device pointer */
+ u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */
+ u32 tre_index; /* the offset where this was written*/
+ u32 int_flags; /* interrupt flags */
+ u8 err_info; /* error record in this transfer */
+ u8 err_code; /* completion code */
+};
+
+struct hidma_lldev {
+ bool msi_support; /* flag indicating MSI support */
+ bool initialized; /* initialized flag */
+ u8 trch_state; /* trch_state of the device */
+ u8 evch_state; /* evch_state of the device */
+ u8 chidx; /* channel index in the core */
+ u32 nr_tres; /* max number of configs */
+ spinlock_t lock; /* reentrancy */
+ struct hidma_tre *trepool; /* trepool of user configs */
+ struct device *dev; /* device */
+ void __iomem *trca; /* Transfer Channel address */
+ void __iomem *evca; /* Event Channel address */
+ struct hidma_tre
+ **pending_tre_list; /* Pointers to pending TREs */
+ atomic_t pending_tre_count; /* Number of TREs pending */
+
+ void *tre_ring; /* TRE ring */
+ dma_addr_t tre_dma; /* TRE ring to be shared with HW */
+ u32 tre_ring_size; /* Byte size of the ring */
+ u32 tre_processed_off; /* last processed TRE */
+
+ void *evre_ring; /* EVRE ring */
+ dma_addr_t evre_dma; /* EVRE ring to be shared with HW */
+ u32 evre_ring_size; /* Byte size of the ring */
+ u32 evre_processed_off; /* last processed EVRE */
+
+ u32 tre_write_offset; /* TRE write location */
+ struct tasklet_struct task; /* task delivering notifications */
+ DECLARE_KFIFO_PTR(handoff_fifo,
+ struct hidma_tre *); /* pending TREs FIFO */
+};
+
+struct hidma_desc {
+ struct dma_async_tx_descriptor desc;
+ /* link list node for this channel*/
+ struct list_head node;
+ u32 tre_ch;
+};
+
+struct hidma_chan {
+ bool paused;
+ bool allocated;
+ char dbg_name[16];
+ u32 dma_sig;
+ dma_cookie_t last_success;
+
+ /*
+ * active descriptor on this channel
+ * It is used by the DMA complete notification to
+ * locate the descriptor that initiated the transfer.
+ */
+ struct dentry *debugfs;
+ struct dentry *stats;
+ struct hidma_dev *dmadev;
+ struct hidma_desc *running;
+
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head active;
+ struct list_head completed;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+};
+
+struct hidma_dev {
+ int irq;
+ int chidx;
+ u32 nr_descriptors;
+ int msi_virqbase;
+
+ struct hidma_lldev *lldev;
+ void __iomem *dev_trca;
+ struct resource *trca_resource;
+ void __iomem *dev_evca;
+ struct resource *evca_resource;
+
+ /* used to protect the pending channel list*/
+ spinlock_t lock;
+ struct dma_device ddev;
+
+ struct dentry *debugfs;
+ struct dentry *stats;
+
+ /* sysfs entry for the channel id */
+ struct device_attribute *chid_attrs;
+
+ /* Task delivering issue_pending */
+ struct tasklet_struct task;
+};
+
+int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id,
+ const char *dev_name,
+ void (*callback)(void *data), void *data, u32 *tre_ch);
+
+void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch);
+enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
+bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
+void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
+void hidma_ll_start(struct hidma_lldev *llhndl);
+int hidma_ll_disable(struct hidma_lldev *lldev);
+int hidma_ll_enable(struct hidma_lldev *llhndl);
+void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
+ dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
+int hidma_ll_setup(struct hidma_lldev *lldev);
+struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
+ void __iomem *trca, void __iomem *evca,
+ u8 chidx);
+int hidma_ll_uninit(struct hidma_lldev *llhndl);
+irqreturn_t hidma_ll_inthandler(int irq, void *arg);
+irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
+void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
+ u8 err_code);
+int hidma_debug_init(struct hidma_dev *dmadev);
+void hidma_debug_uninit(struct hidma_dev *dmadev);
+#endif
diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c
new file mode 100644
index 000000000000..3bdcb8056a36
--- /dev/null
+++ b/drivers/dma/qcom/hidma_dbg.c
@@ -0,0 +1,217 @@
+/*
+ * Qualcomm Technologies HIDMA debug file
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/pm_runtime.h>
+
+#include "hidma.h"
+
+static void hidma_ll_chstats(struct seq_file *s, void *llhndl, u32 tre_ch)
+{
+ struct hidma_lldev *lldev = llhndl;
+ struct hidma_tre *tre;
+ u32 length;
+ dma_addr_t src_start;
+ dma_addr_t dest_start;
+ u32 *tre_local;
+
+ if (tre_ch >= lldev->nr_tres) {
+ dev_err(lldev->dev, "invalid TRE number in chstats:%d", tre_ch);
+ return;
+ }
+ tre = &lldev->trepool[tre_ch];
+ seq_printf(s, "------Channel %d -----\n", tre_ch);
+ seq_printf(s, "allocated=%d\n", atomic_read(&tre->allocated));
+ seq_printf(s, "queued = 0x%x\n", tre->queued);
+ seq_printf(s, "err_info = 0x%x\n", tre->err_info);
+ seq_printf(s, "err_code = 0x%x\n", tre->err_code);
+ seq_printf(s, "status = 0x%x\n", tre->status);
+ seq_printf(s, "idx = 0x%x\n", tre->idx);
+ seq_printf(s, "dma_sig = 0x%x\n", tre->dma_sig);
+ seq_printf(s, "dev_name=%s\n", tre->dev_name);
+ seq_printf(s, "callback=%p\n", tre->callback);
+ seq_printf(s, "data=%p\n", tre->data);
+ seq_printf(s, "tre_index = 0x%x\n", tre->tre_index);
+
+ tre_local = &tre->tre_local[0];
+ src_start = tre_local[HIDMA_TRE_SRC_LOW_IDX];
+ src_start = ((u64) (tre_local[HIDMA_TRE_SRC_HI_IDX]) << 32) + src_start;
+ dest_start = tre_local[HIDMA_TRE_DEST_LOW_IDX];
+ dest_start += ((u64) (tre_local[HIDMA_TRE_DEST_HI_IDX]) << 32);
+ length = tre_local[HIDMA_TRE_LEN_IDX];
+
+ seq_printf(s, "src=%pap\n", &src_start);
+ seq_printf(s, "dest=%pap\n", &dest_start);
+ seq_printf(s, "length = 0x%x\n", length);
+}
+
+static void hidma_ll_devstats(struct seq_file *s, void *llhndl)
+{
+ struct hidma_lldev *lldev = llhndl;
+
+ seq_puts(s, "------Device -----\n");
+ seq_printf(s, "lldev init = 0x%x\n", lldev->initialized);
+ seq_printf(s, "trch_state = 0x%x\n", lldev->trch_state);
+ seq_printf(s, "evch_state = 0x%x\n", lldev->evch_state);
+ seq_printf(s, "chidx = 0x%x\n", lldev->chidx);
+ seq_printf(s, "nr_tres = 0x%x\n", lldev->nr_tres);
+ seq_printf(s, "trca=%p\n", lldev->trca);
+ seq_printf(s, "tre_ring=%p\n", lldev->tre_ring);
+ seq_printf(s, "tre_ring_handle=%pap\n", &lldev->tre_dma);
+ seq_printf(s, "tre_ring_size = 0x%x\n", lldev->tre_ring_size);
+ seq_printf(s, "tre_processed_off = 0x%x\n", lldev->tre_processed_off);
+ seq_printf(s, "pending_tre_count=%d\n",
+ atomic_read(&lldev->pending_tre_count));
+ seq_printf(s, "evca=%p\n", lldev->evca);
+ seq_printf(s, "evre_ring=%p\n", lldev->evre_ring);
+ seq_printf(s, "evre_ring_handle=%pap\n", &lldev->evre_dma);
+ seq_printf(s, "evre_ring_size = 0x%x\n", lldev->evre_ring_size);
+ seq_printf(s, "evre_processed_off = 0x%x\n", lldev->evre_processed_off);
+ seq_printf(s, "tre_write_offset = 0x%x\n", lldev->tre_write_offset);
+}
+
+/*
+ * hidma_chan_stats: display HIDMA channel statistics
+ *
+ * Display the statistics for the current HIDMA virtual channel device.
+ */
+static int hidma_chan_stats(struct seq_file *s, void *unused)
+{
+ struct hidma_chan *mchan = s->private;
+ struct hidma_desc *mdesc;
+ struct hidma_dev *dmadev = mchan->dmadev;
+
+ pm_runtime_get_sync(dmadev->ddev.dev);
+ seq_printf(s, "paused=%u\n", mchan->paused);
+ seq_printf(s, "dma_sig=%u\n", mchan->dma_sig);
+ seq_puts(s, "prepared\n");
+ list_for_each_entry(mdesc, &mchan->prepared, node)
+ hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+ seq_puts(s, "active\n");
+ list_for_each_entry(mdesc, &mchan->active, node)
+ hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+ seq_puts(s, "completed\n");
+ list_for_each_entry(mdesc, &mchan->completed, node)
+ hidma_ll_chstats(s, mchan->dmadev->lldev, mdesc->tre_ch);
+
+ hidma_ll_devstats(s, mchan->dmadev->lldev);
+ pm_runtime_mark_last_busy(dmadev->ddev.dev);
+ pm_runtime_put_autosuspend(dmadev->ddev.dev);
+ return 0;
+}
+
+/*
+ * hidma_dma_info: display HIDMA device info
+ *
+ * Display the info for the current HIDMA device.
+ */
+static int hidma_dma_info(struct seq_file *s, void *unused)
+{
+ struct hidma_dev *dmadev = s->private;
+ resource_size_t sz;
+
+ seq_printf(s, "nr_descriptors=%d\n", dmadev->nr_descriptors);
+ seq_printf(s, "dev_trca=%p\n", &dmadev->dev_trca);
+ seq_printf(s, "dev_trca_phys=%pa\n", &dmadev->trca_resource->start);
+ sz = resource_size(dmadev->trca_resource);
+ seq_printf(s, "dev_trca_size=%pa\n", &sz);
+ seq_printf(s, "dev_evca=%p\n", &dmadev->dev_evca);
+ seq_printf(s, "dev_evca_phys=%pa\n", &dmadev->evca_resource->start);
+ sz = resource_size(dmadev->evca_resource);
+ seq_printf(s, "dev_evca_size=%pa\n", &sz);
+ return 0;
+}
+
+static int hidma_chan_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hidma_chan_stats, inode->i_private);
+}
+
+static int hidma_dma_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hidma_dma_info, inode->i_private);
+}
+
+static const struct file_operations hidma_chan_fops = {
+ .open = hidma_chan_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations hidma_dma_fops = {
+ .open = hidma_dma_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void hidma_debug_uninit(struct hidma_dev *dmadev)
+{
+ debugfs_remove_recursive(dmadev->debugfs);
+}
+
+int hidma_debug_init(struct hidma_dev *dmadev)
+{
+ int rc = 0;
+ int chidx = 0;
+ struct list_head *position = NULL;
+
+ dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL);
+ if (!dmadev->debugfs) {
+ rc = -ENODEV;
+ return rc;
+ }
+
+ /* walk through the virtual channel list */
+ list_for_each(position, &dmadev->ddev.channels) {
+ struct hidma_chan *chan;
+
+ chan = list_entry(position, struct hidma_chan,
+ chan.device_node);
+ sprintf(chan->dbg_name, "chan%d", chidx);
+ chan->debugfs = debugfs_create_dir(chan->dbg_name,
+ dmadev->debugfs);
+ if (!chan->debugfs) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ chan->stats = debugfs_create_file("stats", S_IRUGO,
+ chan->debugfs, chan,
+ &hidma_chan_fops);
+ if (!chan->stats) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+ chidx++;
+ }
+
+ dmadev->stats = debugfs_create_file("stats", S_IRUGO,
+ dmadev->debugfs, dmadev,
+ &hidma_dma_fops);
+ if (!dmadev->stats) {
+ rc = -ENOMEM;
+ goto cleanup;
+ }
+
+ return 0;
+cleanup:
+ hidma_debug_uninit(dmadev);
+ return rc;
+}
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
new file mode 100644
index 000000000000..6645bdf0d151
--- /dev/null
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -0,0 +1,859 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine low level code
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/iopoll.h>
+#include <linux/kfifo.h>
+#include <linux/bitops.h>
+
+#include "hidma.h"
+
+#define HIDMA_EVRE_SIZE 16 /* each EVRE is 16 bytes */
+
+#define HIDMA_TRCA_CTRLSTS_REG 0x000
+#define HIDMA_TRCA_RING_LOW_REG 0x008
+#define HIDMA_TRCA_RING_HIGH_REG 0x00C
+#define HIDMA_TRCA_RING_LEN_REG 0x010
+#define HIDMA_TRCA_DOORBELL_REG 0x400
+
+#define HIDMA_EVCA_CTRLSTS_REG 0x000
+#define HIDMA_EVCA_INTCTRL_REG 0x004
+#define HIDMA_EVCA_RING_LOW_REG 0x008
+#define HIDMA_EVCA_RING_HIGH_REG 0x00C
+#define HIDMA_EVCA_RING_LEN_REG 0x010
+#define HIDMA_EVCA_WRITE_PTR_REG 0x020
+#define HIDMA_EVCA_DOORBELL_REG 0x400
+
+#define HIDMA_EVCA_IRQ_STAT_REG 0x100
+#define HIDMA_EVCA_IRQ_CLR_REG 0x108
+#define HIDMA_EVCA_IRQ_EN_REG 0x110
+
+#define HIDMA_EVRE_CFG_IDX 0
+
+#define HIDMA_EVRE_ERRINFO_BIT_POS 24
+#define HIDMA_EVRE_CODE_BIT_POS 28
+
+#define HIDMA_EVRE_ERRINFO_MASK GENMASK(3, 0)
+#define HIDMA_EVRE_CODE_MASK GENMASK(3, 0)
+
+#define HIDMA_CH_CONTROL_MASK GENMASK(7, 0)
+#define HIDMA_CH_STATE_MASK GENMASK(7, 0)
+#define HIDMA_CH_STATE_BIT_POS 0x8
+
+#define HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS 0
+#define HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS 1
+#define HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS 9
+#define HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS 10
+#define HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS 11
+#define HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS 14
+
+#define ENABLE_IRQS (BIT(HIDMA_IRQ_EV_CH_EOB_IRQ_BIT_POS) | \
+ BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS))
+
+#define HIDMA_INCREMENT_ITERATOR(iter, size, ring_size) \
+do { \
+ iter += size; \
+ if (iter >= ring_size) \
+ iter -= ring_size; \
+} while (0)
+
+#define HIDMA_CH_STATE(val) \
+ ((val >> HIDMA_CH_STATE_BIT_POS) & HIDMA_CH_STATE_MASK)
+
+#define HIDMA_ERR_INT_MASK \
+ (BIT(HIDMA_IRQ_TR_CH_INVALID_TRE_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_TRE_RD_RSP_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_EV_CH_WR_RESP_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_DATA_RD_ER_BIT_POS) | \
+ BIT(HIDMA_IRQ_TR_CH_DATA_WR_ER_BIT_POS))
+
+enum ch_command {
+ HIDMA_CH_DISABLE = 0,
+ HIDMA_CH_ENABLE = 1,
+ HIDMA_CH_SUSPEND = 2,
+ HIDMA_CH_RESET = 9,
+};
+
+enum ch_state {
+ HIDMA_CH_DISABLED = 0,
+ HIDMA_CH_ENABLED = 1,
+ HIDMA_CH_RUNNING = 2,
+ HIDMA_CH_SUSPENDED = 3,
+ HIDMA_CH_STOPPED = 4,
+};
+
+enum tre_type {
+ HIDMA_TRE_MEMCPY = 3,
+};
+
+enum err_code {
+ HIDMA_EVRE_STATUS_COMPLETE = 1,
+ HIDMA_EVRE_STATUS_ERROR = 4,
+};
+
+static int hidma_is_chan_enabled(int state)
+{
+ switch (state) {
+ case HIDMA_CH_ENABLED:
+ case HIDMA_CH_RUNNING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void hidma_ll_free(struct hidma_lldev *lldev, u32 tre_ch)
+{
+ struct hidma_tre *tre;
+
+ if (tre_ch >= lldev->nr_tres) {
+ dev_err(lldev->dev, "invalid TRE number in free:%d", tre_ch);
+ return;
+ }
+
+ tre = &lldev->trepool[tre_ch];
+ if (atomic_read(&tre->allocated) != true) {
+ dev_err(lldev->dev, "trying to free an unused TRE:%d", tre_ch);
+ return;
+ }
+
+ atomic_set(&tre->allocated, 0);
+}
+
+int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
+ void (*callback)(void *data), void *data, u32 *tre_ch)
+{
+ unsigned int i;
+ struct hidma_tre *tre;
+ u32 *tre_local;
+
+ if (!tre_ch || !lldev)
+ return -EINVAL;
+
+ /* need to have at least one empty spot in the queue */
+ for (i = 0; i < lldev->nr_tres - 1; i++) {
+ if (atomic_add_unless(&lldev->trepool[i].allocated, 1, 1))
+ break;
+ }
+
+ if (i == (lldev->nr_tres - 1))
+ return -ENOMEM;
+
+ tre = &lldev->trepool[i];
+ tre->dma_sig = sig;
+ tre->dev_name = dev_name;
+ tre->callback = callback;
+ tre->data = data;
+ tre->idx = i;
+ tre->status = 0;
+ tre->queued = 0;
+ tre->err_code = 0;
+ tre->err_info = 0;
+ tre->lldev = lldev;
+ tre_local = &tre->tre_local[0];
+ tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY;
+ tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
+ tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
+ *tre_ch = i;
+ if (callback)
+ callback(data);
+ return 0;
+}
+
+/*
+ * Multiple TREs may be queued and waiting in the pending queue.
+ */
+static void hidma_ll_tre_complete(unsigned long arg)
+{
+ struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
+ struct hidma_tre *tre;
+
+ while (kfifo_out(&lldev->handoff_fifo, &tre, 1)) {
+ /* call the user if it has been read by the hardware */
+ if (tre->callback)
+ tre->callback(tre->data);
+ }
+}
+
+static int hidma_post_completed(struct hidma_lldev *lldev, u8 err_info,
+ u8 err_code)
+{
+ struct hidma_tre *tre;
+ unsigned long flags;
+ u32 tre_iterator;
+
+ spin_lock_irqsave(&lldev->lock, flags);
+
+ tre_iterator = lldev->tre_processed_off;
+ tre = lldev->pending_tre_list[tre_iterator / HIDMA_TRE_SIZE];
+ if (!tre) {
+ spin_unlock_irqrestore(&lldev->lock, flags);
+ dev_warn(lldev->dev, "tre_index [%d] and tre out of sync\n",
+ tre_iterator / HIDMA_TRE_SIZE);
+ return -EINVAL;
+ }
+ lldev->pending_tre_list[tre->tre_index] = NULL;
+
+ /*
+ * Keep track of pending TREs that SW is expecting to receive
+ * from HW. We got one now. Decrement our counter.
+ */
+ if (atomic_dec_return(&lldev->pending_tre_count) < 0) {
+ dev_warn(lldev->dev, "tre count mismatch on completion");
+ atomic_set(&lldev->pending_tre_count, 0);
+ }
+
+ HIDMA_INCREMENT_ITERATOR(tre_iterator, HIDMA_TRE_SIZE,
+ lldev->tre_ring_size);
+ lldev->tre_processed_off = tre_iterator;
+ spin_unlock_irqrestore(&lldev->lock, flags);
+
+ tre->err_info = err_info;
+ tre->err_code = err_code;
+ tre->queued = 0;
+
+ kfifo_put(&lldev->handoff_fifo, tre);
+ tasklet_schedule(&lldev->task);
+
+ return 0;
+}
+
+/*
+ * Called to handle the interrupt for the channel.
+ * Return a positive number if TRE or EVRE were consumed on this run.
+ * Return a positive number if there are pending TREs or EVREs.
+ * Return 0 if there is nothing to consume or no pending TREs/EVREs found.
+ */
+static int hidma_handle_tre_completion(struct hidma_lldev *lldev)
+{
+ u32 evre_ring_size = lldev->evre_ring_size;
+ u32 err_info, err_code, evre_write_off;
+ u32 evre_iterator;
+ u32 num_completed = 0;
+
+ evre_write_off = readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
+ evre_iterator = lldev->evre_processed_off;
+
+ if ((evre_write_off > evre_ring_size) ||
+ (evre_write_off % HIDMA_EVRE_SIZE)) {
+ dev_err(lldev->dev, "HW reports invalid EVRE write offset\n");
+ return 0;
+ }
+
+ /*
+ * By the time control reaches here the number of EVREs and TREs
+ * may not match. Only consume the ones that hardware told us.
+ */
+ while ((evre_iterator != evre_write_off)) {
+ u32 *current_evre = lldev->evre_ring + evre_iterator;
+ u32 cfg;
+
+ cfg = current_evre[HIDMA_EVRE_CFG_IDX];
+ err_info = cfg >> HIDMA_EVRE_ERRINFO_BIT_POS;
+ err_info &= HIDMA_EVRE_ERRINFO_MASK;
+ err_code =
+ (cfg >> HIDMA_EVRE_CODE_BIT_POS) & HIDMA_EVRE_CODE_MASK;
+
+ if (hidma_post_completed(lldev, err_info, err_code))
+ break;
+
+ HIDMA_INCREMENT_ITERATOR(evre_iterator, HIDMA_EVRE_SIZE,
+ evre_ring_size);
+
+ /*
+ * Read the new event descriptor written by the HW.
+ * As we are processing the delivered events, other events
+ * get queued to the SW for processing.
+ */
+ evre_write_off =
+ readl_relaxed(lldev->evca + HIDMA_EVCA_WRITE_PTR_REG);
+ num_completed++;
+
+ /*
+ * An error interrupt might have arrived while we are processing
+ * the completed interrupt.
+ */
+ if (!hidma_ll_isenabled(lldev))
+ break;
+ }
+
+ if (num_completed) {
+ u32 evre_read_off = (lldev->evre_processed_off +
+ HIDMA_EVRE_SIZE * num_completed);
+ evre_read_off = evre_read_off % evre_ring_size;
+ writel(evre_read_off, lldev->evca + HIDMA_EVCA_DOORBELL_REG);
+
+ /* record the last processed tre offset */
+ lldev->evre_processed_off = evre_read_off;
+ }
+
+ return num_completed;
+}
+
+void hidma_cleanup_pending_tre(struct hidma_lldev *lldev, u8 err_info,
+ u8 err_code)
+{
+ while (atomic_read(&lldev->pending_tre_count)) {
+ if (hidma_post_completed(lldev, err_info, err_code))
+ break;
+ }
+}
+
+static int hidma_ll_reset(struct hidma_lldev *lldev)
+{
+ u32 val;
+ int ret;
+
+ val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_RESET << 16;
+ writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+ /*
+ * Delay 10ms after reset to allow DMA logic to quiesce.
+ * Do a polled read up to 1ms and 10ms maximum.
+ */
+ ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+ HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
+ 1000, 10000);
+ if (ret) {
+ dev_err(lldev->dev, "transfer channel did not reset\n");
+ return ret;
+ }
+
+ val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_RESET << 16;
+ writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+ /*
+ * Delay 10ms after reset to allow DMA logic to quiesce.
+ * Do a polled read up to 1ms and 10ms maximum.
+ */
+ ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+ HIDMA_CH_STATE(val) == HIDMA_CH_DISABLED,
+ 1000, 10000);
+ if (ret)
+ return ret;
+
+ lldev->trch_state = HIDMA_CH_DISABLED;
+ lldev->evch_state = HIDMA_CH_DISABLED;
+ return 0;
+}
+
+/*
+ * The interrupt handler for HIDMA will try to consume as many pending
+ * EVRE from the event queue as possible. Each EVRE has an associated
+ * TRE that holds the user interface parameters. EVRE reports the
+ * result of the transaction. Hardware guarantees ordering between EVREs
+ * and TREs. We use last processed offset to figure out which TRE is
+ * associated with which EVRE. If two TREs are consumed by HW, the EVREs
+ * are in order in the event ring.
+ *
+ * This handler will do a one pass for consuming EVREs. Other EVREs may
+ * be delivered while we are working. It will try to consume incoming
+ * EVREs one more time and return.
+ *
+ * For unprocessed EVREs, hardware will trigger another interrupt until
+ * all the interrupt bits are cleared.
+ *
+ * Hardware guarantees that by the time interrupt is observed, all data
+ * transactions in flight are delivered to their respective places and
+ * are visible to the CPU.
+ *
+ * On demand paging for IOMMU is only supported for PCIe via PRI
+ * (Page Request Interface) not for HIDMA. All other hardware instances
+ * including HIDMA work on pinned DMA addresses.
+ *
+ * HIDMA is not aware of IOMMU presence since it follows the DMA API. All
+ * IOMMU latency will be built into the data movement time. By the time
+ * interrupt happens, IOMMU lookups + data movement has already taken place.
+ *
+ * While the first read in a typical PCI endpoint ISR flushes all outstanding
+ * requests traditionally to the destination, this concept does not apply
+ * here for this HW.
+ */
+static void hidma_ll_int_handler_internal(struct hidma_lldev *lldev, int cause)
+{
+ if (cause & HIDMA_ERR_INT_MASK) {
+ dev_err(lldev->dev, "error 0x%x, disabling...\n",
+ cause);
+
+ /* Clear out pending interrupts */
+ writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ /* No further submissions. */
+ hidma_ll_disable(lldev);
+
+ /* Driver completes the txn and intimates the client.*/
+ hidma_cleanup_pending_tre(lldev, 0xFF,
+ HIDMA_EVRE_STATUS_ERROR);
+
+ return;
+ }
+
+ /*
+ * Fine tuned for this HW...
+ *
+ * This ISR has been designed for this particular hardware. Relaxed
+ * read and write accessors are used for performance reasons due to
+ * interrupt delivery guarantees. Do not copy this code blindly and
+ * expect that to work.
+ *
+ * Try to consume as many EVREs as possible.
+ */
+ hidma_handle_tre_completion(lldev);
+
+ /* We consumed TREs or there are pending TREs or EVREs. */
+ writel_relaxed(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+}
+
+irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
+{
+ struct hidma_lldev *lldev = arg;
+ u32 status;
+ u32 enable;
+ u32 cause;
+
+ status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+ cause = status & enable;
+
+ while (cause) {
+ hidma_ll_int_handler_internal(lldev, cause);
+
+ /*
+ * Another interrupt might have arrived while we are
+ * processing this one. Read the new cause.
+ */
+ status = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ enable = readl_relaxed(lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+ cause = status & enable;
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t hidma_ll_inthandler_msi(int chirq, void *arg, int cause)
+{
+ struct hidma_lldev *lldev = arg;
+
+ hidma_ll_int_handler_internal(lldev, cause);
+ return IRQ_HANDLED;
+}
+
+int hidma_ll_enable(struct hidma_lldev *lldev)
+{
+ u32 val;
+ int ret;
+
+ val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_ENABLE << 16;
+ writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+ ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+ hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
+ 1000, 10000);
+ if (ret) {
+ dev_err(lldev->dev, "event channel did not get enabled\n");
+ return ret;
+ }
+
+ val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_ENABLE << 16;
+ writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+ ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+ hidma_is_chan_enabled(HIDMA_CH_STATE(val)),
+ 1000, 10000);
+ if (ret) {
+ dev_err(lldev->dev, "transfer channel did not get enabled\n");
+ return ret;
+ }
+
+ lldev->trch_state = HIDMA_CH_ENABLED;
+ lldev->evch_state = HIDMA_CH_ENABLED;
+
+ return 0;
+}
+
+void hidma_ll_start(struct hidma_lldev *lldev)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&lldev->lock, irqflags);
+ writel(lldev->tre_write_offset, lldev->trca + HIDMA_TRCA_DOORBELL_REG);
+ spin_unlock_irqrestore(&lldev->lock, irqflags);
+}
+
+bool hidma_ll_isenabled(struct hidma_lldev *lldev)
+{
+ u32 val;
+
+ val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+ lldev->trch_state = HIDMA_CH_STATE(val);
+ val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+ lldev->evch_state = HIDMA_CH_STATE(val);
+
+ /* both channels have to be enabled before calling this function */
+ if (hidma_is_chan_enabled(lldev->trch_state) &&
+ hidma_is_chan_enabled(lldev->evch_state))
+ return true;
+
+ return false;
+}
+
+void hidma_ll_queue_request(struct hidma_lldev *lldev, u32 tre_ch)
+{
+ struct hidma_tre *tre;
+ unsigned long flags;
+
+ tre = &lldev->trepool[tre_ch];
+
+ /* copy the TRE into its location in the TRE ring */
+ spin_lock_irqsave(&lldev->lock, flags);
+ tre->tre_index = lldev->tre_write_offset / HIDMA_TRE_SIZE;
+ lldev->pending_tre_list[tre->tre_index] = tre;
+ memcpy(lldev->tre_ring + lldev->tre_write_offset,
+ &tre->tre_local[0], HIDMA_TRE_SIZE);
+ tre->err_code = 0;
+ tre->err_info = 0;
+ tre->queued = 1;
+ atomic_inc(&lldev->pending_tre_count);
+ lldev->tre_write_offset = (lldev->tre_write_offset + HIDMA_TRE_SIZE)
+ % lldev->tre_ring_size;
+ spin_unlock_irqrestore(&lldev->lock, flags);
+}
+
+/*
+ * Note that even though we stop this channel if there is a pending transaction
+ * in flight it will complete and follow the callback. This request will
+ * prevent further requests to be made.
+ */
+int hidma_ll_disable(struct hidma_lldev *lldev)
+{
+ u32 val;
+ int ret;
+
+ /* The channel needs to be in working state */
+ if (!hidma_ll_isenabled(lldev))
+ return 0;
+
+ val = readl(lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_SUSPEND << 16;
+ writel(val, lldev->trca + HIDMA_TRCA_CTRLSTS_REG);
+
+ /*
+ * Start the wait right after the suspend is confirmed.
+ * Do a polled read up to 1ms and 10ms maximum.
+ */
+ ret = readl_poll_timeout(lldev->trca + HIDMA_TRCA_CTRLSTS_REG, val,
+ HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
+ 1000, 10000);
+ if (ret)
+ return ret;
+
+ val = readl(lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+ val &= ~(HIDMA_CH_CONTROL_MASK << 16);
+ val |= HIDMA_CH_SUSPEND << 16;
+ writel(val, lldev->evca + HIDMA_EVCA_CTRLSTS_REG);
+
+ /*
+ * Start the wait right after the suspend is confirmed
+ * Delay up to 10ms after reset to allow DMA logic to quiesce.
+ */
+ ret = readl_poll_timeout(lldev->evca + HIDMA_EVCA_CTRLSTS_REG, val,
+ HIDMA_CH_STATE(val) == HIDMA_CH_SUSPENDED,
+ 1000, 10000);
+ if (ret)
+ return ret;
+
+ lldev->trch_state = HIDMA_CH_SUSPENDED;
+ lldev->evch_state = HIDMA_CH_SUSPENDED;
+ return 0;
+}
+
+void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
+ dma_addr_t src, dma_addr_t dest, u32 len,
+ u32 flags)
+{
+ struct hidma_tre *tre;
+ u32 *tre_local;
+
+ if (tre_ch >= lldev->nr_tres) {
+ dev_err(lldev->dev, "invalid TRE number in transfer params:%d",
+ tre_ch);
+ return;
+ }
+
+ tre = &lldev->trepool[tre_ch];
+ if (atomic_read(&tre->allocated) != true) {
+ dev_err(lldev->dev, "trying to set params on an unused TRE:%d",
+ tre_ch);
+ return;
+ }
+
+ tre_local = &tre->tre_local[0];
+ tre_local[HIDMA_TRE_LEN_IDX] = len;
+ tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
+ tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
+ tre_local[HIDMA_TRE_DEST_LOW_IDX] = lower_32_bits(dest);
+ tre_local[HIDMA_TRE_DEST_HI_IDX] = upper_32_bits(dest);
+ tre->int_flags = flags;
+}
+
+/*
+ * Called during initialization and after an error condition
+ * to restore hardware state.
+ */
+int hidma_ll_setup(struct hidma_lldev *lldev)
+{
+ int rc;
+ u64 addr;
+ u32 val;
+ u32 nr_tres = lldev->nr_tres;
+
+ atomic_set(&lldev->pending_tre_count, 0);
+ lldev->tre_processed_off = 0;
+ lldev->evre_processed_off = 0;
+ lldev->tre_write_offset = 0;
+
+ /* disable interrupts */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ /* clear all pending interrupts */
+ val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ rc = hidma_ll_reset(lldev);
+ if (rc)
+ return rc;
+
+ /*
+ * Clear all pending interrupts again.
+ * Otherwise, we observe reset complete interrupts.
+ */
+ val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+
+ /* disable interrupts again after reset */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ addr = lldev->tre_dma;
+ writel(lower_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_LOW_REG);
+ writel(upper_32_bits(addr), lldev->trca + HIDMA_TRCA_RING_HIGH_REG);
+ writel(lldev->tre_ring_size, lldev->trca + HIDMA_TRCA_RING_LEN_REG);
+
+ addr = lldev->evre_dma;
+ writel(lower_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_LOW_REG);
+ writel(upper_32_bits(addr), lldev->evca + HIDMA_EVCA_RING_HIGH_REG);
+ writel(HIDMA_EVRE_SIZE * nr_tres,
+ lldev->evca + HIDMA_EVCA_RING_LEN_REG);
+
+ /* configure interrupts */
+ hidma_ll_setup_irq(lldev, lldev->msi_support);
+
+ rc = hidma_ll_enable(lldev);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi)
+{
+ u32 val;
+
+ lldev->msi_support = msi;
+
+ /* disable interrupts again after reset */
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+
+ /* support IRQ by default */
+ val = readl(lldev->evca + HIDMA_EVCA_INTCTRL_REG);
+ val &= ~0xF;
+ if (!lldev->msi_support)
+ val = val | 0x1;
+ writel(val, lldev->evca + HIDMA_EVCA_INTCTRL_REG);
+
+ /* clear all pending interrupts and enable them */
+ writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+}
+
+struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
+ void __iomem *trca, void __iomem *evca,
+ u8 chidx)
+{
+ u32 required_bytes;
+ struct hidma_lldev *lldev;
+ int rc;
+ size_t sz;
+
+ if (!trca || !evca || !dev || !nr_tres)
+ return NULL;
+
+ /* need at least four TREs */
+ if (nr_tres < 4)
+ return NULL;
+
+ /* need an extra space */
+ nr_tres += 1;
+
+ lldev = devm_kzalloc(dev, sizeof(struct hidma_lldev), GFP_KERNEL);
+ if (!lldev)
+ return NULL;
+
+ lldev->evca = evca;
+ lldev->trca = trca;
+ lldev->dev = dev;
+ sz = sizeof(struct hidma_tre);
+ lldev->trepool = devm_kcalloc(lldev->dev, nr_tres, sz, GFP_KERNEL);
+ if (!lldev->trepool)
+ return NULL;
+
+ required_bytes = sizeof(lldev->pending_tre_list[0]);
+ lldev->pending_tre_list = devm_kcalloc(dev, nr_tres, required_bytes,
+ GFP_KERNEL);
+ if (!lldev->pending_tre_list)
+ return NULL;
+
+ sz = (HIDMA_TRE_SIZE + 1) * nr_tres;
+ lldev->tre_ring = dmam_alloc_coherent(dev, sz, &lldev->tre_dma,
+ GFP_KERNEL);
+ if (!lldev->tre_ring)
+ return NULL;
+
+ memset(lldev->tre_ring, 0, (HIDMA_TRE_SIZE + 1) * nr_tres);
+ lldev->tre_ring_size = HIDMA_TRE_SIZE * nr_tres;
+ lldev->nr_tres = nr_tres;
+
+ /* the TRE ring has to be TRE_SIZE aligned */
+ if (!IS_ALIGNED(lldev->tre_dma, HIDMA_TRE_SIZE)) {
+ u8 tre_ring_shift;
+
+ tre_ring_shift = lldev->tre_dma % HIDMA_TRE_SIZE;
+ tre_ring_shift = HIDMA_TRE_SIZE - tre_ring_shift;
+ lldev->tre_dma += tre_ring_shift;
+ lldev->tre_ring += tre_ring_shift;
+ }
+
+ sz = (HIDMA_EVRE_SIZE + 1) * nr_tres;
+ lldev->evre_ring = dmam_alloc_coherent(dev, sz, &lldev->evre_dma,
+ GFP_KERNEL);
+ if (!lldev->evre_ring)
+ return NULL;
+
+ memset(lldev->evre_ring, 0, (HIDMA_EVRE_SIZE + 1) * nr_tres);
+ lldev->evre_ring_size = HIDMA_EVRE_SIZE * nr_tres;
+
+ /* the EVRE ring has to be EVRE_SIZE aligned */
+ if (!IS_ALIGNED(lldev->evre_dma, HIDMA_EVRE_SIZE)) {
+ u8 evre_ring_shift;
+
+ evre_ring_shift = lldev->evre_dma % HIDMA_EVRE_SIZE;
+ evre_ring_shift = HIDMA_EVRE_SIZE - evre_ring_shift;
+ lldev->evre_dma += evre_ring_shift;
+ lldev->evre_ring += evre_ring_shift;
+ }
+ lldev->nr_tres = nr_tres;
+ lldev->chidx = chidx;
+
+ sz = nr_tres * sizeof(struct hidma_tre *);
+ rc = kfifo_alloc(&lldev->handoff_fifo, sz, GFP_KERNEL);
+ if (rc)
+ return NULL;
+
+ rc = hidma_ll_setup(lldev);
+ if (rc)
+ return NULL;
+
+ spin_lock_init(&lldev->lock);
+ tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
+ lldev->initialized = 1;
+ writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+ return lldev;
+}
+
+int hidma_ll_uninit(struct hidma_lldev *lldev)
+{
+ u32 required_bytes;
+ int rc = 0;
+ u32 val;
+
+ if (!lldev)
+ return -ENODEV;
+
+ if (!lldev->initialized)
+ return 0;
+
+ lldev->initialized = 0;
+
+ required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
+ tasklet_kill(&lldev->task);
+ memset(lldev->trepool, 0, required_bytes);
+ lldev->trepool = NULL;
+ atomic_set(&lldev->pending_tre_count, 0);
+ lldev->tre_write_offset = 0;
+
+ rc = hidma_ll_reset(lldev);
+
+ /*
+ * Clear all pending interrupts again.
+ * Otherwise, we observe reset complete interrupts.
+ */
+ val = readl(lldev->evca + HIDMA_EVCA_IRQ_STAT_REG);
+ writel(val, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
+ writel(0, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
+ return rc;
+}
+
+enum dma_status hidma_ll_status(struct hidma_lldev *lldev, u32 tre_ch)
+{
+ enum dma_status ret = DMA_ERROR;
+ struct hidma_tre *tre;
+ unsigned long flags;
+ u8 err_code;
+
+ spin_lock_irqsave(&lldev->lock, flags);
+
+ tre = &lldev->trepool[tre_ch];
+ err_code = tre->err_code;
+
+ if (err_code & HIDMA_EVRE_STATUS_COMPLETE)
+ ret = DMA_COMPLETE;
+ else if (err_code & HIDMA_EVRE_STATUS_ERROR)
+ ret = DMA_ERROR;
+ else
+ ret = DMA_IN_PROGRESS;
+ spin_unlock_irqrestore(&lldev->lock, flags);
+
+ return ret;
+}
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
new file mode 100644
index 000000000000..f847d32cc4b5
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -0,0 +1,413 @@
+/*
+ * Qualcomm Technologies HIDMA DMA engine Management interface
+ *
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+
+#include "hidma_mgmt.h"
+
+#define HIDMA_QOS_N_OFFSET 0x300
+#define HIDMA_CFG_OFFSET 0x400
+#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
+#define HIDMA_MAX_XACTIONS_OFFSET 0x420
+#define HIDMA_HW_VERSION_OFFSET 0x424
+#define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418
+
+#define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0)
+#define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0)
+#define HIDMA_WEIGHT_MASK GENMASK(6, 0)
+#define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0)
+#define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0)
+
+#define HIDMA_MAX_WR_XACTIONS_BIT_POS 16
+#define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16
+#define HIDMA_WRR_BIT_POS 8
+#define HIDMA_PRIORITY_BIT_POS 15
+
+#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
+#define HIDMA_MAX_CHANNEL_WEIGHT 15
+
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
+{
+ unsigned int i;
+ u32 val;
+
+ if (!is_power_of_2(mgmtdev->max_write_request) ||
+ (mgmtdev->max_write_request < 128) ||
+ (mgmtdev->max_write_request > 1024)) {
+ dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
+ mgmtdev->max_write_request);
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(mgmtdev->max_read_request) ||
+ (mgmtdev->max_read_request < 128) ||
+ (mgmtdev->max_read_request > 1024)) {
+ dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
+ mgmtdev->max_read_request);
+ return -EINVAL;
+ }
+
+ if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max_wr_xactions cannot be bigger than %ld\n",
+ HIDMA_MAX_WR_XACTIONS_MASK);
+ return -EINVAL;
+ }
+
+ if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max_rd_xactions cannot be bigger than %ld\n",
+ HIDMA_MAX_RD_XACTIONS_MASK);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mgmtdev->dma_channels; i++) {
+ if (mgmtdev->priority[i] > 1) {
+ dev_err(&mgmtdev->pdev->dev,
+ "priority can be 0 or 1\n");
+ return -EINVAL;
+ }
+
+ if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
+ dev_err(&mgmtdev->pdev->dev,
+ "max value of weight can be %d.\n",
+ HIDMA_MAX_CHANNEL_WEIGHT);
+ return -EINVAL;
+ }
+
+ /* weight needs to be at least one */
+ if (mgmtdev->weight[i] == 0)
+ mgmtdev->weight[i] = 1;
+ }
+
+ pm_runtime_get_sync(&mgmtdev->pdev->dev);
+ val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+ val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
+ val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
+ val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
+ val |= mgmtdev->max_read_request;
+ writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
+
+ val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+ val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
+ val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
+ val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
+ val |= mgmtdev->max_rd_xactions;
+ writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
+
+ mgmtdev->hw_version =
+ readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
+ mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
+ mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
+
+ for (i = 0; i < mgmtdev->dma_channels; i++) {
+ u32 weight = mgmtdev->weight[i];
+ u32 priority = mgmtdev->priority[i];
+
+ val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+ val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
+ val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
+ val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
+ val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
+ writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
+ }
+
+ val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+ val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
+ val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
+ writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
+
+ pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
+ pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
+
+static int hidma_mgmt_probe(struct platform_device *pdev)
+{
+ struct hidma_mgmt_dev *mgmtdev;
+ struct resource *res;
+ void __iomem *virtaddr;
+ int irq;
+ int rc;
+ u32 val;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ virtaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(virtaddr)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "irq resources not found\n");
+ rc = irq;
+ goto out;
+ }
+
+ mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
+ if (!mgmtdev) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mgmtdev->pdev = pdev;
+ mgmtdev->addrsize = resource_size(res);
+ mgmtdev->virtaddr = virtaddr;
+
+ rc = device_property_read_u32(&pdev->dev, "dma-channels",
+ &mgmtdev->dma_channels);
+ if (rc) {
+ dev_err(&pdev->dev, "number of channels missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev,
+ "channel-reset-timeout-cycles",
+ &mgmtdev->chreset_timeout_cycles);
+ if (rc) {
+ dev_err(&pdev->dev, "channel reset timeout missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
+ &mgmtdev->max_write_request);
+ if (rc) {
+ dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
+ &mgmtdev->max_read_request);
+ if (rc) {
+ dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
+ &mgmtdev->max_wr_xactions);
+ if (rc) {
+ dev_err(&pdev->dev, "max-write-transactions missing\n");
+ goto out;
+ }
+
+ rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
+ &mgmtdev->max_rd_xactions);
+ if (rc) {
+ dev_err(&pdev->dev, "max-read-transactions missing\n");
+ goto out;
+ }
+
+ mgmtdev->priority = devm_kcalloc(&pdev->dev,
+ mgmtdev->dma_channels,
+ sizeof(*mgmtdev->priority),
+ GFP_KERNEL);
+ if (!mgmtdev->priority) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ mgmtdev->weight = devm_kcalloc(&pdev->dev,
+ mgmtdev->dma_channels,
+ sizeof(*mgmtdev->weight), GFP_KERNEL);
+ if (!mgmtdev->weight) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = hidma_mgmt_setup(mgmtdev);
+ if (rc) {
+ dev_err(&pdev->dev, "setup failed\n");
+ goto out;
+ }
+
+ /* start the HW */
+ val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+ val |= 1;
+ writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
+
+ rc = hidma_mgmt_init_sys(mgmtdev);
+ if (rc) {
+ dev_err(&pdev->dev, "sysfs setup failed\n");
+ goto out;
+ }
+
+ dev_info(&pdev->dev,
+ "HW rev: %d.%d @ %pa with %d physical channels\n",
+ mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
+ &res->start, mgmtdev->dma_channels);
+
+ platform_set_drvdata(pdev, mgmtdev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+ return 0;
+out:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return rc;
+}
+
+#if IS_ENABLED(CONFIG_ACPI)
+static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
+ {"QCOM8060"},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
+#endif
+
+static const struct of_device_id hidma_mgmt_match[] = {
+ {.compatible = "qcom,hidma-mgmt-1.0",},
+ {},
+};
+MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
+
+static struct platform_driver hidma_mgmt_driver = {
+ .probe = hidma_mgmt_probe,
+ .driver = {
+ .name = "hidma-mgmt",
+ .of_match_table = hidma_mgmt_match,
+ .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
+ },
+};
+
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+static int object_counter;
+
+static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
+{
+ struct platform_device *pdev_parent = of_find_device_by_node(np);
+ struct platform_device_info pdevinfo;
+ struct of_phandle_args out_irq;
+ struct device_node *child;
+ struct resource *res;
+ const __be32 *cell;
+ int ret = 0, size, i, num;
+ u64 addr, addr_size;
+
+ for_each_available_child_of_node(np, child) {
+ struct resource *res_iter;
+ struct platform_device *new_pdev;
+
+ cell = of_get_property(child, "reg", &size);
+ if (!cell) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ size /= sizeof(*cell);
+ num = size /
+ (of_n_addr_cells(child) + of_n_size_cells(child)) + 1;
+
+ /* allocate a resource array */
+ res = kcalloc(num, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* read each reg value */
+ i = 0;
+ res_iter = res;
+ while (i < size) {
+ addr = of_read_number(&cell[i],
+ of_n_addr_cells(child));
+ i += of_n_addr_cells(child);
+
+ addr_size = of_read_number(&cell[i],
+ of_n_size_cells(child));
+ i += of_n_size_cells(child);
+
+ res_iter->start = addr;
+ res_iter->end = res_iter->start + addr_size - 1;
+ res_iter->flags = IORESOURCE_MEM;
+ res_iter++;
+ }
+
+ ret = of_irq_parse_one(child, 0, &out_irq);
+ if (ret)
+ goto out;
+
+ res_iter->start = irq_create_of_mapping(&out_irq);
+ res_iter->name = "hidma event irq";
+ res_iter->flags = IORESOURCE_IRQ;
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.fwnode = &child->fwnode;
+ pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
+ pdevinfo.name = child->name;
+ pdevinfo.id = object_counter++;
+ pdevinfo.res = res;
+ pdevinfo.num_res = num;
+ pdevinfo.data = NULL;
+ pdevinfo.size_data = 0;
+ pdevinfo.dma_mask = DMA_BIT_MASK(64);
+ new_pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(new_pdev)) {
+ ret = PTR_ERR(new_pdev);
+ goto out;
+ }
+ of_node_get(child);
+ new_pdev->dev.of_node = child;
+ of_dma_configure(&new_pdev->dev, child);
+ /*
+ * It is assumed that calling of_msi_configure is safe on
+ * platforms with or without MSI support.
+ */
+ of_msi_configure(&new_pdev->dev, child);
+ of_node_put(child);
+ kfree(res);
+ res = NULL;
+ }
+out:
+ kfree(res);
+
+ return ret;
+}
+#endif
+
+static int __init hidma_mgmt_init(void)
+{
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
+ struct device_node *child;
+
+ for_each_matching_node(child, hidma_mgmt_match) {
+ /* device tree based firmware here */
+ hidma_mgmt_of_populate_channels(child);
+ }
+#endif
+ platform_driver_register(&hidma_mgmt_driver);
+
+ return 0;
+}
+module_init(hidma_mgmt_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma_mgmt.h b/drivers/dma/qcom/hidma_mgmt.h
new file mode 100644
index 000000000000..f7daf33769f4
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.h
@@ -0,0 +1,39 @@
+/*
+ * Qualcomm Technologies HIDMA Management common header
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+struct hidma_mgmt_dev {
+ u8 hw_version_major;
+ u8 hw_version_minor;
+
+ u32 max_wr_xactions;
+ u32 max_rd_xactions;
+ u32 max_write_request;
+ u32 max_read_request;
+ u32 dma_channels;
+ u32 chreset_timeout_cycles;
+ u32 hw_version;
+ u32 *priority;
+ u32 *weight;
+
+ /* Hardware device constants */
+ void __iomem *virtaddr;
+ resource_size_t addrsize;
+
+ struct kobject **chroots;
+ struct platform_device *pdev;
+};
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *dev);
+int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev);
diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c
new file mode 100644
index 000000000000..d61f1068a34b
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt_sys.c
@@ -0,0 +1,295 @@
+/*
+ * Qualcomm Technologies HIDMA Management SYS interface
+ *
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sysfs.h>
+#include <linux/platform_device.h>
+
+#include "hidma_mgmt.h"
+
+struct hidma_chan_attr {
+ struct hidma_mgmt_dev *mdev;
+ int index;
+ struct kobj_attribute attr;
+};
+
+struct hidma_mgmt_fileinfo {
+ char *name;
+ int mode;
+ int (*get)(struct hidma_mgmt_dev *mdev);
+ int (*set)(struct hidma_mgmt_dev *mdev, u64 val);
+};
+
+#define IMPLEMENT_GETSET(name) \
+static int get_##name(struct hidma_mgmt_dev *mdev) \
+{ \
+ return mdev->name; \
+} \
+static int set_##name(struct hidma_mgmt_dev *mdev, u64 val) \
+{ \
+ u64 tmp; \
+ int rc; \
+ \
+ tmp = mdev->name; \
+ mdev->name = val; \
+ rc = hidma_mgmt_setup(mdev); \
+ if (rc) \
+ mdev->name = tmp; \
+ return rc; \
+}
+
+#define DECLARE_ATTRIBUTE(name, mode) \
+ {#name, mode, get_##name, set_##name}
+
+IMPLEMENT_GETSET(hw_version_major)
+IMPLEMENT_GETSET(hw_version_minor)
+IMPLEMENT_GETSET(max_wr_xactions)
+IMPLEMENT_GETSET(max_rd_xactions)
+IMPLEMENT_GETSET(max_write_request)
+IMPLEMENT_GETSET(max_read_request)
+IMPLEMENT_GETSET(dma_channels)
+IMPLEMENT_GETSET(chreset_timeout_cycles)
+
+static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+ u64 tmp;
+ int rc;
+
+ if (i >= mdev->dma_channels)
+ return -EINVAL;
+
+ tmp = mdev->priority[i];
+ mdev->priority[i] = val;
+ rc = hidma_mgmt_setup(mdev);
+ if (rc)
+ mdev->priority[i] = tmp;
+ return rc;
+}
+
+static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
+{
+ u64 tmp;
+ int rc;
+
+ if (i >= mdev->dma_channels)
+ return -EINVAL;
+
+ tmp = mdev->weight[i];
+ mdev->weight[i] = val;
+ rc = hidma_mgmt_setup(mdev);
+ if (rc)
+ mdev->weight[i] = tmp;
+ return rc;
+}
+
+static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = {
+ DECLARE_ATTRIBUTE(hw_version_major, S_IRUGO),
+ DECLARE_ATTRIBUTE(hw_version_minor, S_IRUGO),
+ DECLARE_ATTRIBUTE(dma_channels, S_IRUGO),
+ DECLARE_ATTRIBUTE(chreset_timeout_cycles, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_wr_xactions, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_rd_xactions, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_write_request, S_IRUGO),
+ DECLARE_ATTRIBUTE(max_read_request, S_IRUGO),
+};
+
+static ssize_t show_values(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ buf[0] = 0;
+
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+ sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev));
+ break;
+ }
+ }
+ return strlen(buf);
+}
+
+static ssize_t set_values(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
+ unsigned long tmp;
+ unsigned int i;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &tmp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
+ rc = hidma_mgmt_files[i].set(mdev, tmp);
+ if (rc)
+ return rc;
+
+ break;
+ }
+ }
+ return count;
+}
+
+static ssize_t show_values_channel(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct hidma_chan_attr *chattr;
+ struct hidma_mgmt_dev *mdev;
+
+ buf[0] = 0;
+ chattr = container_of(attr, struct hidma_chan_attr, attr);
+ mdev = chattr->mdev;
+ if (strcmp(attr->attr.name, "priority") == 0)
+ sprintf(buf, "%d\n", mdev->priority[chattr->index]);
+ else if (strcmp(attr->attr.name, "weight") == 0)
+ sprintf(buf, "%d\n", mdev->weight[chattr->index]);
+
+ return strlen(buf);
+}
+
+static ssize_t set_values_channel(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct hidma_chan_attr *chattr;
+ struct hidma_mgmt_dev *mdev;
+ unsigned long tmp;
+ int rc;
+
+ chattr = container_of(attr, struct hidma_chan_attr, attr);
+ mdev = chattr->mdev;
+
+ rc = kstrtoul(buf, 0, &tmp);
+ if (rc)
+ return rc;
+
+ if (strcmp(attr->attr.name, "priority") == 0) {
+ rc = set_priority(mdev, chattr->index, tmp);
+ if (rc)
+ return rc;
+ } else if (strcmp(attr->attr.name, "weight") == 0) {
+ rc = set_weight(mdev, chattr->index, tmp);
+ if (rc)
+ return rc;
+ }
+ return count;
+}
+
+static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode)
+{
+ struct device_attribute *attrs;
+ char *name_copy;
+
+ attrs = devm_kmalloc(&dev->pdev->dev,
+ sizeof(struct device_attribute), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ name_copy = devm_kstrdup(&dev->pdev->dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ attrs->attr.name = name_copy;
+ attrs->attr.mode = mode;
+ attrs->show = show_values;
+ attrs->store = set_values;
+ sysfs_attr_init(&attrs->attr);
+
+ return device_create_file(&dev->pdev->dev, attrs);
+}
+
+static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name,
+ int mode, int index,
+ struct kobject *parent)
+{
+ struct hidma_chan_attr *chattr;
+ char *name_copy;
+
+ chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL);
+ if (!chattr)
+ return -ENOMEM;
+
+ name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL);
+ if (!name_copy)
+ return -ENOMEM;
+
+ chattr->mdev = mdev;
+ chattr->index = index;
+ chattr->attr.attr.name = name_copy;
+ chattr->attr.attr.mode = mode;
+ chattr->attr.show = show_values_channel;
+ chattr->attr.store = set_values_channel;
+ sysfs_attr_init(&chattr->attr.attr);
+
+ return sysfs_create_file(parent, &chattr->attr.attr);
+}
+
+int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev)
+{
+ unsigned int i;
+ int rc;
+ int required;
+ struct kobject *chanops;
+
+ required = sizeof(*mdev->chroots) * mdev->dma_channels;
+ mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL);
+ if (!mdev->chroots)
+ return -ENOMEM;
+
+ chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj);
+ if (!chanops)
+ return -ENOMEM;
+
+ /* create each channel directory here */
+ for (i = 0; i < mdev->dma_channels; i++) {
+ char name[20];
+
+ snprintf(name, sizeof(name), "chan%d", i);
+ mdev->chroots[i] = kobject_create_and_add(name, chanops);
+ if (!mdev->chroots[i])
+ return -ENOMEM;
+ }
+
+ /* populate common parameters */
+ for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
+ rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name,
+ hidma_mgmt_files[i].mode);
+ if (rc)
+ return rc;
+ }
+
+ /* populate parameters that are per channel */
+ for (i = 0; i < mdev->dma_channels; i++) {
+ rc = create_sysfs_entry_channel(mdev, "priority",
+ (S_IRUGO | S_IWUGO), i,
+ mdev->chroots[i]);
+ if (rc)
+ return rc;
+
+ rc = create_sysfs_entry_channel(mdev, "weight",
+ (S_IRUGO | S_IWUGO), i,
+ mdev->chroots[i]);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hidma_mgmt_init_sys);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 7820d07e7bee..05cfdc8d1ecf 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -118,14 +118,34 @@ struct rcar_dmac_desc_page {
sizeof(struct rcar_dmac_xfer_chunk))
/*
+ * struct rcar_dmac_chan_slave - Slave configuration
+ * @slave_addr: slave memory address
+ * @xfer_size: size (in bytes) of hardware transfers
+ */
+struct rcar_dmac_chan_slave {
+ phys_addr_t slave_addr;
+ unsigned int xfer_size;
+};
+
+/*
+ * struct rcar_dmac_chan_map - Map of slave device phys to dma address
+ * @addr: slave dma address
+ * @dir: direction of mapping
+ * @slave: slave configuration that is mapped
+ */
+struct rcar_dmac_chan_map {
+ dma_addr_t addr;
+ enum dma_data_direction dir;
+ struct rcar_dmac_chan_slave slave;
+};
+
+/*
* struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
* @chan: base DMA channel object
* @iomem: channel I/O memory base
* @index: index of this channel in the controller
- * @src_xfer_size: size (in bytes) of hardware transfers on the source side
- * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side
- * @src_slave_addr: slave source memory address
- * @dst_slave_addr: slave destination memory address
+ * @src: slave memory address and size on the source side
+ * @dst: slave memory address and size on the destination side
* @mid_rid: hardware MID/RID for the DMA client using this channel
* @lock: protects the channel CHCR register and the desc members
* @desc.free: list of free descriptors
@@ -142,10 +162,9 @@ struct rcar_dmac_chan {
void __iomem *iomem;
unsigned int index;
- unsigned int src_xfer_size;
- unsigned int dst_xfer_size;
- dma_addr_t src_slave_addr;
- dma_addr_t dst_slave_addr;
+ struct rcar_dmac_chan_slave src;
+ struct rcar_dmac_chan_slave dst;
+ struct rcar_dmac_chan_map map;
int mid_rid;
spinlock_t lock;
@@ -793,13 +812,13 @@ static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan,
case DMA_DEV_TO_MEM:
chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED
| RCAR_DMACHCR_RS_DMARS;
- xfer_size = chan->src_xfer_size;
+ xfer_size = chan->src.xfer_size;
break;
case DMA_MEM_TO_DEV:
chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC
| RCAR_DMACHCR_RS_DMARS;
- xfer_size = chan->dst_xfer_size;
+ xfer_size = chan->dst.xfer_size;
break;
case DMA_MEM_TO_MEM:
@@ -1021,13 +1040,65 @@ rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
DMA_MEM_TO_MEM, flags, false);
}
+static int rcar_dmac_map_slave_addr(struct dma_chan *chan,
+ enum dma_transfer_direction dir)
+{
+ struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
+ struct rcar_dmac_chan_map *map = &rchan->map;
+ phys_addr_t dev_addr;
+ size_t dev_size;
+ enum dma_data_direction dev_dir;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = rchan->src.slave_addr;
+ dev_size = rchan->src.xfer_size;
+ dev_dir = DMA_TO_DEVICE;
+ } else {
+ dev_addr = rchan->dst.slave_addr;
+ dev_size = rchan->dst.xfer_size;
+ dev_dir = DMA_FROM_DEVICE;
+ }
+
+ /* Reuse current map if possible. */
+ if (dev_addr == map->slave.slave_addr &&
+ dev_size == map->slave.xfer_size &&
+ dev_dir == map->dir)
+ return 0;
+
+ /* Remove old mapping if present. */
+ if (map->slave.xfer_size)
+ dma_unmap_resource(chan->device->dev, map->addr,
+ map->slave.xfer_size, map->dir, 0);
+ map->slave.xfer_size = 0;
+
+ /* Create new slave address map. */
+ map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size,
+ dev_dir, 0);
+
+ if (dma_mapping_error(chan->device->dev, map->addr)) {
+ dev_err(chan->device->dev,
+ "chan%u: failed to map %zx@%pap", rchan->index,
+ dev_size, &dev_addr);
+ return -EIO;
+ }
+
+ dev_dbg(chan->device->dev, "chan%u: map %zx@%pap to %pad dir: %s\n",
+ rchan->index, dev_size, &dev_addr, &map->addr,
+ dev_dir == DMA_TO_DEVICE ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
+
+ map->slave.slave_addr = dev_addr;
+ map->slave.xfer_size = dev_size;
+ map->dir = dev_dir;
+
+ return 0;
+}
+
static struct dma_async_tx_descriptor *
rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags, void *context)
{
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
- dma_addr_t dev_addr;
/* Someone calling slave DMA on a generic channel? */
if (rchan->mid_rid < 0 || !sg_len) {
@@ -1037,9 +1108,10 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
- dev_addr = dir == DMA_DEV_TO_MEM
- ? rchan->src_slave_addr : rchan->dst_slave_addr;
- return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+ if (rcar_dmac_map_slave_addr(chan, dir))
+ return NULL;
+
+ return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
dir, flags, false);
}
@@ -1053,7 +1125,6 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
struct dma_async_tx_descriptor *desc;
struct scatterlist *sgl;
- dma_addr_t dev_addr;
unsigned int sg_len;
unsigned int i;
@@ -1065,6 +1136,9 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
return NULL;
}
+ if (rcar_dmac_map_slave_addr(chan, dir))
+ return NULL;
+
sg_len = buf_len / period_len;
if (sg_len > RCAR_DMAC_MAX_SG_LEN) {
dev_err(chan->device->dev,
@@ -1092,9 +1166,7 @@ rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
sg_dma_len(&sgl[i]) = period_len;
}
- dev_addr = dir == DMA_DEV_TO_MEM
- ? rchan->src_slave_addr : rchan->dst_slave_addr;
- desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr,
+ desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr,
dir, flags, true);
kfree(sgl);
@@ -1110,10 +1182,10 @@ static int rcar_dmac_device_config(struct dma_chan *chan,
* We could lock this, but you shouldn't be configuring the
* channel, while using it...
*/
- rchan->src_slave_addr = cfg->src_addr;
- rchan->dst_slave_addr = cfg->dst_addr;
- rchan->src_xfer_size = cfg->src_addr_width;
- rchan->dst_xfer_size = cfg->dst_addr_width;
+ rchan->src.slave_addr = cfg->src_addr;
+ rchan->dst.slave_addr = cfg->dst_addr;
+ rchan->src.xfer_size = cfg->src_addr_width;
+ rchan->dst.xfer_size = cfg->dst_addr_width;
return 0;
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 0e08e665f715..074d21916127 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -538,7 +538,7 @@ static int __init dmi_present(const u8 *buf)
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
}
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
- printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
+ pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
@@ -566,7 +566,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
dmi_ver & 0xFF);
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
- pr_debug("DMI: %s\n", dmi_ids_string);
+ pr_info("DMI: %s\n", dmi_ids_string);
return 0;
}
}
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index f52e822d52f8..260bfb15c74a 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -9,8 +9,8 @@
#
KASAN_SANITIZE_runtime-wrappers.o := n
-obj-$(CONFIG_EFI) += efi.o vars.o reboot.o
-obj-$(CONFIG_EFI) += capsule.o
+obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o
+obj-$(CONFIG_EFI) += capsule.o memmap.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
@@ -21,4 +21,7 @@ obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += runtime-wrappers.o
obj-$(CONFIG_EFI_STUB) += libstub/
obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
+arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
+obj-$(CONFIG_ARM) += $(arm-obj-y)
+obj-$(CONFIG_ARM64) += $(arm-obj-y)
obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
new file mode 100644
index 000000000000..0e1386090816
--- /dev/null
+++ b/drivers/firmware/efi/arm-init.c
@@ -0,0 +1,235 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013 - 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+
+#include <asm/efi.h>
+
+u64 efi_system_table;
+
+static int __init is_normal_ram(efi_memory_desc_t *md)
+{
+ if (md->attribute & EFI_MEMORY_WB)
+ return 1;
+ return 0;
+}
+
+/*
+ * Translate a EFI virtual address into a physical address: this is necessary,
+ * as some data members of the EFI system table are virtually remapped after
+ * SetVirtualAddressMap() has been called.
+ */
+static phys_addr_t efi_to_phys(unsigned long addr)
+{
+ efi_memory_desc_t *md;
+
+ for_each_efi_memory_desc(md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ /* no virtual mapping has been installed by the stub */
+ break;
+ if (md->virt_addr <= addr &&
+ (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
+ return md->phys_addr + addr - md->virt_addr;
+ }
+ return addr;
+}
+
+static int __init uefi_init(void)
+{
+ efi_char16_t *c16;
+ void *config_tables;
+ size_t table_size;
+ char vendor[100] = "unknown";
+ int i, retval;
+
+ efi.systab = early_memremap_ro(efi_system_table,
+ sizeof(efi_system_table_t));
+ if (efi.systab == NULL) {
+ pr_warn("Unable to map EFI system table.\n");
+ return -ENOMEM;
+ }
+
+ set_bit(EFI_BOOT, &efi.flags);
+ if (IS_ENABLED(CONFIG_64BIT))
+ set_bit(EFI_64BIT, &efi.flags);
+
+ /*
+ * Verify the EFI Table
+ */
+ if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+ pr_err("System table signature incorrect\n");
+ retval = -EINVAL;
+ goto out;
+ }
+ if ((efi.systab->hdr.revision >> 16) < 2)
+ pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff);
+
+ efi.runtime_version = efi.systab->hdr.revision;
+
+ /* Show what we know for posterity */
+ c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
+ sizeof(vendor) * sizeof(efi_char16_t));
+ if (c16) {
+ for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
+ vendor[i] = c16[i];
+ vendor[i] = '\0';
+ early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
+ }
+
+ pr_info("EFI v%u.%.02u by %s\n",
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff, vendor);
+
+ table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
+ config_tables = early_memremap_ro(efi_to_phys(efi.systab->tables),
+ table_size);
+ if (config_tables == NULL) {
+ pr_warn("Unable to map EFI config table array.\n");
+ retval = -ENOMEM;
+ goto out;
+ }
+ retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
+ sizeof(efi_config_table_t), NULL);
+
+ early_memunmap(config_tables, table_size);
+out:
+ early_memunmap(efi.systab, sizeof(efi_system_table_t));
+ return retval;
+}
+
+/*
+ * Return true for RAM regions we want to permanently reserve.
+ */
+static __init int is_reserve_region(efi_memory_desc_t *md)
+{
+ switch (md->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ case EFI_PERSISTENT_MEMORY:
+ return 0;
+ default:
+ break;
+ }
+ return is_normal_ram(md);
+}
+
+static __init void reserve_regions(void)
+{
+ efi_memory_desc_t *md;
+ u64 paddr, npages, size;
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Processing EFI memory map:\n");
+
+ /*
+ * Discard memblocks discovered so far: if there are any at this
+ * point, they originate from memory nodes in the DT, and UEFI
+ * uses its own memory map instead.
+ */
+ memblock_dump_all();
+ memblock_remove(0, (phys_addr_t)ULLONG_MAX);
+
+ for_each_efi_memory_desc(md) {
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+
+ if (efi_enabled(EFI_DBG)) {
+ char buf[64];
+
+ pr_info(" 0x%012llx-0x%012llx %s",
+ paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
+ efi_md_typeattr_format(buf, sizeof(buf), md));
+ }
+
+ memrange_efi_to_native(&paddr, &npages);
+ size = npages << PAGE_SHIFT;
+
+ if (is_normal_ram(md))
+ early_init_dt_add_memory_arch(paddr, size);
+
+ if (is_reserve_region(md)) {
+ memblock_mark_nomap(paddr, size);
+ if (efi_enabled(EFI_DBG))
+ pr_cont("*");
+ }
+
+ if (efi_enabled(EFI_DBG))
+ pr_cont("\n");
+ }
+}
+
+void __init efi_init(void)
+{
+ struct efi_memory_map_data data;
+ struct efi_fdt_params params;
+
+ /* Grab UEFI information placed in FDT by stub */
+ if (!efi_get_fdt_params(&params))
+ return;
+
+ efi_system_table = params.system_table;
+
+ data.desc_version = params.desc_ver;
+ data.desc_size = params.desc_size;
+ data.size = params.mmap_size;
+ data.phys_map = params.mmap;
+
+ if (efi_memmap_init_early(&data) < 0) {
+ /*
+ * If we are booting via UEFI, the UEFI memory map is the only
+ * description of memory we have, so there is little point in
+ * proceeding if we cannot access it.
+ */
+ panic("Unable to map EFI memory map.\n");
+ }
+
+ if (uefi_init() < 0)
+ return;
+
+ reserve_regions();
+ efi_memattr_init();
+ efi_esrt_init();
+ efi_memmap_unmap();
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI)
+ memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
+}
+
+static int __init register_gop_device(void)
+{
+ void *pd;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return 0;
+
+ /* the efifb driver accesses screen_info directly, no need to pass it */
+ pd = platform_device_register_simple("efi-framebuffer", 0, NULL, 0);
+ return PTR_ERR_OR_ZERO(pd);
+}
+subsys_initcall(register_gop_device);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
new file mode 100644
index 000000000000..cd21f9646fb1
--- /dev/null
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -0,0 +1,144 @@
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 2.4
+ *
+ * Copyright (C) 2013, 2014 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/preempt.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/efi.h>
+#include <asm/mmu.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+
+extern u64 efi_system_table;
+
+static struct mm_struct efi_mm = {
+ .mm_rb = RB_ROOT,
+ .mm_users = ATOMIC_INIT(2),
+ .mm_count = ATOMIC_INIT(1),
+ .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
+ .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+};
+
+static bool __init efi_virtmap_init(void)
+{
+ efi_memory_desc_t *md;
+ bool systab_found;
+
+ efi_mm.pgd = pgd_alloc(&efi_mm);
+ init_new_context(NULL, &efi_mm);
+
+ systab_found = false;
+ for_each_efi_memory_desc(md) {
+ phys_addr_t phys = md->phys_addr;
+ int ret;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ return false;
+
+ ret = efi_create_mapping(&efi_mm, md);
+ if (!ret) {
+ pr_info(" EFI remap %pa => %p\n",
+ &phys, (void *)(unsigned long)md->virt_addr);
+ } else {
+ pr_warn(" EFI remap %pa: failed to create mapping (%d)\n",
+ &phys, ret);
+ return false;
+ }
+ /*
+ * If this entry covers the address of the UEFI system table,
+ * calculate and record its virtual address.
+ */
+ if (efi_system_table >= phys &&
+ efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
+ efi.systab = (void *)(unsigned long)(efi_system_table -
+ phys + md->virt_addr);
+ systab_found = true;
+ }
+ }
+ if (!systab_found) {
+ pr_err("No virtual mapping found for the UEFI System Table\n");
+ return false;
+ }
+
+ if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+ return false;
+
+ return true;
+}
+
+/*
+ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
+ */
+static int __init arm_enable_runtime_services(void)
+{
+ u64 mapsize;
+
+ if (!efi_enabled(EFI_BOOT)) {
+ pr_info("EFI services will not be available.\n");
+ return 0;
+ }
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return 0;
+ }
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return -ENOMEM;
+ }
+
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+ if (!efi_virtmap_init()) {
+ pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
+ return -ENOMEM;
+ }
+
+ /* Set up runtime services function pointers */
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+ return 0;
+}
+early_initcall(arm_enable_runtime_services);
+
+void efi_virtmap_load(void)
+{
+ preempt_disable();
+ efi_set_pgd(&efi_mm);
+}
+
+void efi_virtmap_unload(void)
+{
+ efi_set_pgd(current->active_mm);
+ preempt_enable();
+}
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index c99c24bc79b0..9ae6c116c474 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/efi.h>
+#include <linux/vmalloc.h>
#define NO_FURTHER_WRITE_ACTION -1
@@ -108,14 +109,15 @@ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
int ret;
void *cap_hdr_temp;
- cap_hdr_temp = kmap(cap_info->pages[0]);
+ cap_hdr_temp = vmap(cap_info->pages, cap_info->index,
+ VM_MAP, PAGE_KERNEL);
if (!cap_hdr_temp) {
- pr_debug("%s: kmap() failed\n", __func__);
+ pr_debug("%s: vmap() failed\n", __func__);
return -EFAULT;
}
ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
- kunmap(cap_info->pages[0]);
+ vunmap(cap_hdr_temp);
if (ret) {
pr_err("%s: efi_capsule_update() failed\n", __func__);
return ret;
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
index e530540f368c..7e2212fba390 100644
--- a/drivers/firmware/efi/capsule.c
+++ b/drivers/firmware/efi/capsule.c
@@ -197,9 +197,9 @@ efi_capsule_update_locked(efi_capsule_header_t *capsule,
* map the capsule described by @capsule with its data in @pages and
* send it to the firmware via the UpdateCapsule() runtime service.
*
- * @capsule must be a virtual mapping of the first page in @pages
- * (@pages[0]) in the kernel address space. That is, a
- * capsule_header_t that describes the entire contents of the capsule
+ * @capsule must be a virtual mapping of the complete capsule update in the
+ * kernel address space, as the capsule can be consumed immediately.
+ * A capsule_header_t that describes the entire contents of the capsule
* must be at the start of the first data page.
*
* Even though this function will validate that the firmware supports
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 3b52677f459a..bb98b3e4c3b8 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -24,6 +24,9 @@
#include <linux/of_fdt.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
struct efi __read_mostly efi = {
.mps = EFI_INVALID_TABLE_ADDR,
@@ -41,6 +44,7 @@ struct efi __read_mostly efi = {
.config_table = EFI_INVALID_TABLE_ADDR,
.esrt = EFI_INVALID_TABLE_ADDR,
.properties_table = EFI_INVALID_TABLE_ADDR,
+ .mem_attr_table = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -248,56 +252,31 @@ subsys_initcall(efisubsys_init);
/*
* Find the efi memory descriptor for a given physical address. Given a
- * physicall address, determine if it exists within an EFI Memory Map entry,
+ * physical address, determine if it exists within an EFI Memory Map entry,
* and if so, populate the supplied memory descriptor with the appropriate
* data.
*/
int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
- struct efi_memory_map *map = efi.memmap;
- phys_addr_t p, e;
+ efi_memory_desc_t *md;
if (!efi_enabled(EFI_MEMMAP)) {
pr_err_once("EFI_MEMMAP is not enabled.\n");
return -EINVAL;
}
- if (!map) {
- pr_err_once("efi.memmap is not set.\n");
- return -EINVAL;
- }
if (!out_md) {
pr_err_once("out_md is null.\n");
return -EINVAL;
}
- if (WARN_ON_ONCE(!map->phys_map))
- return -EINVAL;
- if (WARN_ON_ONCE(map->nr_map == 0) || WARN_ON_ONCE(map->desc_size == 0))
- return -EINVAL;
- e = map->phys_map + map->nr_map * map->desc_size;
- for (p = map->phys_map; p < e; p += map->desc_size) {
- efi_memory_desc_t *md;
+ for_each_efi_memory_desc(md) {
u64 size;
u64 end;
- /*
- * If a driver calls this after efi_free_boot_services,
- * ->map will be NULL, and the target may also not be mapped.
- * So just always get our own virtual map on the CPU.
- *
- */
- md = early_memremap(p, sizeof (*md));
- if (!md) {
- pr_err_once("early_memremap(%pa, %zu) failed.\n",
- &p, sizeof (*md));
- return -ENOMEM;
- }
-
if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
md->type != EFI_BOOT_SERVICES_DATA &&
md->type != EFI_RUNTIME_SERVICES_DATA) {
- early_memunmap(md, sizeof (*md));
continue;
}
@@ -305,11 +284,8 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
end = md->phys_addr + size;
if (phys_addr >= md->phys_addr && phys_addr < end) {
memcpy(out_md, md, sizeof(*out_md));
- early_memunmap(md, sizeof (*md));
return 0;
}
-
- early_memunmap(md, sizeof (*md));
}
pr_err_once("requested map not found.\n");
return -ENOENT;
@@ -325,36 +301,33 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
return end;
}
-/*
- * We can't ioremap data in EFI boot services RAM, because we've already mapped
- * it as RAM. So, look it up in the existing EFI memory map instead. Only
- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
+void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
+
+/**
+ * efi_mem_reserve - Reserve an EFI memory region
+ * @addr: Physical address to reserve
+ * @size: Size of reservation
+ *
+ * Mark a region as reserved from general kernel allocation and
+ * prevent it being released by efi_free_boot_services().
+ *
+ * This function should be called drivers once they've parsed EFI
+ * configuration tables to figure out where their data lives, e.g.
+ * efi_esrt_init().
*/
-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
+void __init efi_mem_reserve(phys_addr_t addr, u64 size)
{
- struct efi_memory_map *map;
- void *p;
- map = efi.memmap;
- if (!map)
- return NULL;
- if (WARN_ON(!map->map))
- return NULL;
- for (p = map->map; p < map->map_end; p += map->desc_size) {
- efi_memory_desc_t *md = p;
- u64 size = md->num_pages << EFI_PAGE_SHIFT;
- u64 end = md->phys_addr + size;
- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
- md->type != EFI_BOOT_SERVICES_CODE &&
- md->type != EFI_BOOT_SERVICES_DATA)
- continue;
- if (!md->virt_addr)
- continue;
- if (phys_addr >= md->phys_addr && phys_addr < end) {
- phys_addr += md->virt_addr - md->phys_addr;
- return (__force void __iomem *)(unsigned long)phys_addr;
- }
- }
- return NULL;
+ if (!memblock_is_region_reserved(addr, size))
+ memblock_reserve(addr, size);
+
+ /*
+ * Some architectures (x86) reserve all boot services ranges
+ * until efi_free_boot_services() because of buggy firmware
+ * implementations. This means the above memblock_reserve() is
+ * superfluous on x86 and instead what it needs to do is
+ * ensure the @start, @size is not freed.
+ */
+ efi_arch_mem_reserve(addr, size);
}
static __initdata efi_config_table_type_t common_tables[] = {
@@ -368,6 +341,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
{EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
+ {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{NULL_GUID, NULL, NULL},
};
@@ -646,16 +620,12 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
*/
u64 __weak efi_mem_attributes(unsigned long phys_addr)
{
- struct efi_memory_map *map;
efi_memory_desc_t *md;
- void *p;
if (!efi_enabled(EFI_MEMMAP))
return 0;
- map = efi.memmap;
- for (p = map->map; p < map->map_end; p += map->desc_size) {
- md = p;
+ for_each_efi_memory_desc(md) {
if ((md->phys_addr <= phys_addr) &&
(phys_addr < (md->phys_addr +
(md->num_pages << EFI_PAGE_SHIFT))))
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 22c5285f7705..9560729255db 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/efi.h>
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -238,7 +239,7 @@ static struct attribute_group esrt_attr_group = {
};
/*
- * remap the table, copy it to kmalloced pages, and unmap it.
+ * remap the table, validate it, mark it reserved and unmap it.
*/
void __init efi_esrt_init(void)
{
@@ -338,7 +339,7 @@ void __init efi_esrt_init(void)
end = esrt_data + size;
pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
- memblock_reserve(esrt_data, esrt_data_size);
+ efi_mem_reserve(esrt_data, esrt_data_size);
pr_debug("esrt-init: loaded.\n");
err_memunmap:
@@ -385,28 +386,18 @@ static void cleanup_entry_list(void)
static int __init esrt_sysfs_init(void)
{
int error;
- struct efi_system_resource_table __iomem *ioesrt;
pr_debug("esrt-sysfs: loading.\n");
if (!esrt_data || !esrt_data_size)
return -ENOSYS;
- ioesrt = ioremap(esrt_data, esrt_data_size);
- if (!ioesrt) {
- pr_err("ioremap(%pa, %zu) failed.\n", &esrt_data,
- esrt_data_size);
- return -ENOMEM;
- }
-
- esrt = kmalloc(esrt_data_size, GFP_KERNEL);
+ esrt = memremap(esrt_data, esrt_data_size, MEMREMAP_WB);
if (!esrt) {
- pr_err("kmalloc failed. (wanted %zu bytes)\n", esrt_data_size);
- iounmap(ioesrt);
+ pr_err("memremap(%pa, %zu) failed.\n", &esrt_data,
+ esrt_data_size);
return -ENOMEM;
}
- memcpy_fromio(esrt, ioesrt, esrt_data_size);
-
esrt_kobj = kobject_create_and_add("esrt", efi_kobj);
if (!esrt_kobj) {
pr_err("Firmware table registration failed.\n");
@@ -432,8 +423,6 @@ static int __init esrt_sysfs_init(void)
if (error)
goto err_cleanup_list;
- memblock_remove(esrt_data, esrt_data_size);
-
pr_debug("esrt-sysfs: loaded.\n");
return 0;
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index ed3a854950cc..6c7d60c239b5 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -35,17 +35,13 @@
#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
-struct fake_mem {
- struct range range;
- u64 attribute;
-};
-static struct fake_mem fake_mems[EFI_MAX_FAKEMEM];
+static struct efi_mem_range fake_mems[EFI_MAX_FAKEMEM];
static int nr_fake_mem;
static int __init cmp_fake_mem(const void *x1, const void *x2)
{
- const struct fake_mem *m1 = x1;
- const struct fake_mem *m2 = x2;
+ const struct efi_mem_range *m1 = x1;
+ const struct efi_mem_range *m2 = x2;
if (m1->range.start < m2->range.start)
return -1;
@@ -56,137 +52,44 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
void __init efi_fake_memmap(void)
{
- u64 start, end, m_start, m_end, m_attr;
- int new_nr_map = memmap.nr_map;
+ int new_nr_map = efi.memmap.nr_map;
efi_memory_desc_t *md;
phys_addr_t new_memmap_phy;
void *new_memmap;
- void *old, *new;
int i;
- if (!nr_fake_mem || !efi_enabled(EFI_MEMMAP))
+ if (!nr_fake_mem)
return;
/* count up the number of EFI memory descriptor */
- for (old = memmap.map; old < memmap.map_end; old += memmap.desc_size) {
- md = old;
- start = md->phys_addr;
- end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
-
- for (i = 0; i < nr_fake_mem; i++) {
- /* modifying range */
- m_start = fake_mems[i].range.start;
- m_end = fake_mems[i].range.end;
-
- if (m_start <= start) {
- /* split into 2 parts */
- if (start < m_end && m_end < end)
- new_nr_map++;
- }
- if (start < m_start && m_start < end) {
- /* split into 3 parts */
- if (m_end < end)
- new_nr_map += 2;
- /* split into 2 parts */
- if (end <= m_end)
- new_nr_map++;
- }
+ for (i = 0; i < nr_fake_mem; i++) {
+ for_each_efi_memory_desc(md) {
+ struct range *r = &fake_mems[i].range;
+
+ new_nr_map += efi_memmap_split_count(md, r);
}
}
/* allocate memory for new EFI memmap */
- new_memmap_phy = memblock_alloc(memmap.desc_size * new_nr_map,
- PAGE_SIZE);
+ new_memmap_phy = efi_memmap_alloc(new_nr_map);
if (!new_memmap_phy)
return;
/* create new EFI memmap */
new_memmap = early_memremap(new_memmap_phy,
- memmap.desc_size * new_nr_map);
+ efi.memmap.desc_size * new_nr_map);
if (!new_memmap) {
- memblock_free(new_memmap_phy, memmap.desc_size * new_nr_map);
+ memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
return;
}
- for (old = memmap.map, new = new_memmap;
- old < memmap.map_end;
- old += memmap.desc_size, new += memmap.desc_size) {
-
- /* copy original EFI memory descriptor */
- memcpy(new, old, memmap.desc_size);
- md = new;
- start = md->phys_addr;
- end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
-
- for (i = 0; i < nr_fake_mem; i++) {
- /* modifying range */
- m_start = fake_mems[i].range.start;
- m_end = fake_mems[i].range.end;
- m_attr = fake_mems[i].attribute;
-
- if (m_start <= start && end <= m_end)
- md->attribute |= m_attr;
-
- if (m_start <= start &&
- (start < m_end && m_end < end)) {
- /* first part */
- md->attribute |= m_attr;
- md->num_pages = (m_end - md->phys_addr + 1) >>
- EFI_PAGE_SHIFT;
- /* latter part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
- md = new;
- md->phys_addr = m_end + 1;
- md->num_pages = (end - md->phys_addr + 1) >>
- EFI_PAGE_SHIFT;
- }
-
- if ((start < m_start && m_start < end) && m_end < end) {
- /* first part */
- md->num_pages = (m_start - md->phys_addr) >>
- EFI_PAGE_SHIFT;
- /* middle part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
- md = new;
- md->attribute |= m_attr;
- md->phys_addr = m_start;
- md->num_pages = (m_end - m_start + 1) >>
- EFI_PAGE_SHIFT;
- /* last part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
- md = new;
- md->phys_addr = m_end + 1;
- md->num_pages = (end - m_end) >>
- EFI_PAGE_SHIFT;
- }
-
- if ((start < m_start && m_start < end) &&
- (end <= m_end)) {
- /* first part */
- md->num_pages = (m_start - md->phys_addr) >>
- EFI_PAGE_SHIFT;
- /* latter part */
- new += memmap.desc_size;
- memcpy(new, old, memmap.desc_size);
- md = new;
- md->phys_addr = m_start;
- md->num_pages = (end - md->phys_addr + 1) >>
- EFI_PAGE_SHIFT;
- md->attribute |= m_attr;
- }
- }
- }
+ for (i = 0; i < nr_fake_mem; i++)
+ efi_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]);
/* swap into new EFI memmap */
- efi_unmap_memmap();
- memmap.map = new_memmap;
- memmap.phys_map = new_memmap_phy;
- memmap.nr_map = new_nr_map;
- memmap.map_end = memmap.map + memmap.nr_map * memmap.desc_size;
- set_bit(EFI_MEMMAP, &efi.flags);
+ early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
+
+ efi_memmap_install(new_memmap_phy, new_nr_map);
/* print new EFI memmap */
efi_print_memmap();
@@ -224,7 +127,7 @@ static int __init setup_fake_mem(char *p)
p++;
}
- sort(fake_mems, nr_fake_mem, sizeof(struct fake_mem),
+ sort(fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
cmp_fake_mem, NULL);
for (i = 0; i < nr_fake_mem; i++)
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index ff040d53515a..8e3d360f214c 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -34,6 +34,7 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
$(patsubst %.c,lib-%.o,$(arm-deps))
+lib-$(CONFIG_ARM) += arm32-stub.o
lib-$(CONFIG_ARM64) += arm64-stub.o random.o
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
@@ -67,3 +68,11 @@ quiet_cmd_stubcopy = STUBCPY $@
$(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y) \
&& (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
rm -f $@; /bin/false); else /bin/false; fi
+
+#
+# ARM discards the .data section because it disallows r/w data in the
+# decompressor. So move our .data to .data.efistub, which is preserved
+# explicitly by the decompressor linker script.
+#
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
+STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 83f05757fc23..cb0a17f36aaa 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -212,6 +212,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
pr_efi(sys_table, "Booting Linux Kernel...\n");
+ status = check_platform_features(sys_table);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
/*
* Get a handle to the loaded image protocol. This is used to get
* information about the running image, such as size and the command
@@ -340,8 +344,10 @@ fail:
* The value chosen is the largest non-zero power of 2 suitable for this purpose
* both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
* be mapped efficiently.
+ * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
+ * map everything below 1 GB.
*/
-#define EFI_RT_VIRTUAL_BASE 0x40000000
+#define EFI_RT_VIRTUAL_BASE SZ_512M
static int cmp_mem_desc(const void *l, const void *r)
{
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
new file mode 100644
index 000000000000..6f42be4d0084
--- /dev/null
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2013 Linaro Ltd; <roy.franz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/efi.h>
+#include <asm/efi.h>
+
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+{
+ int block;
+
+ /* non-LPAE kernels can run anywhere */
+ if (!IS_ENABLED(CONFIG_ARM_LPAE))
+ return EFI_SUCCESS;
+
+ /* LPAE kernels need compatible hardware */
+ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
+ if (block < 5) {
+ pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
+efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image)
+{
+ unsigned long nr_pages;
+ efi_status_t status;
+ /* Use alloc_addr to tranlsate between types */
+ efi_physical_addr_t alloc_addr;
+
+ /*
+ * Verify that the DRAM base address is compatible with the ARM
+ * boot protocol, which determines the base of DRAM by masking
+ * off the low 27 bits of the address at which the zImage is
+ * loaded. These assumptions are made by the decompressor,
+ * before any memory map is available.
+ */
+ dram_base = round_up(dram_base, SZ_128M);
+
+ /*
+ * Reserve memory for the uncompressed kernel image. This is
+ * all that prevents any future allocations from conflicting
+ * with the kernel. Since we can't tell from the compressed
+ * image how much DRAM the kernel actually uses (due to BSS
+ * size uncertainty) we allocate the maximum possible size.
+ * Do this very early, as prints can cause memory allocations
+ * that may conflict with this.
+ */
+ alloc_addr = dram_base;
+ *reserve_size = MAX_UNCOMP_KERNEL_SIZE;
+ nr_pages = round_up(*reserve_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
+ status = sys_table->boottime->allocate_pages(EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA,
+ nr_pages, &alloc_addr);
+ if (status != EFI_SUCCESS) {
+ *reserve_size = 0;
+ pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
+ return status;
+ }
+ *reserve_addr = alloc_addr;
+
+ /*
+ * Relocate the zImage, so that it appears in the lowest 128 MB
+ * memory window.
+ */
+ *image_size = image->image_size;
+ status = efi_relocate_kernel(sys_table, image_addr, *image_size,
+ *image_size,
+ dram_base + MAX_UNCOMP_KERNEL_SIZE, 0);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Failed to relocate kernel.\n");
+ efi_free(sys_table, *reserve_size, *reserve_addr);
+ *reserve_size = 0;
+ return status;
+ }
+
+ /*
+ * Check to see if we were able to allocate memory low enough
+ * in memory. The kernel determines the base of DRAM from the
+ * address at which the zImage is loaded.
+ */
+ if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) {
+ pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n");
+ efi_free(sys_table, *reserve_size, *reserve_addr);
+ *reserve_size = 0;
+ efi_free(sys_table, *image_size, *image_addr);
+ *image_size = 0;
+ return EFI_LOAD_ERROR;
+ }
+ return EFI_SUCCESS;
+}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index e0e6b74fef8f..a90f6459f5c6 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -12,18 +12,38 @@
#include <linux/efi.h>
#include <asm/efi.h>
#include <asm/sections.h>
+#include <asm/sysreg.h>
#include "efistub.h"
extern bool __nokaslr;
-efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
- unsigned long *image_size,
- unsigned long *reserve_addr,
- unsigned long *reserve_size,
- unsigned long dram_base,
- efi_loaded_image_t *image)
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
+{
+ u64 tg;
+
+ /* UEFI mandates support for 4 KB granularity, no need to check */
+ if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
+ return EFI_SUCCESS;
+
+ tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
+ if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
+ pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n");
+ else
+ pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n");
+ return EFI_UNSUPPORTED;
+ }
+ return EFI_SUCCESS;
+}
+
+efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image)
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 29ed2f9b218c..aded10662020 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
+#define EFI_MMAP_NR_SLACK_SLOTS 8
+
struct file_info {
efi_file_handle_t *handle;
u64 size;
@@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_table_arg, char *str)
}
}
+static inline bool mmap_has_headroom(unsigned long buff_size,
+ unsigned long map_size,
+ unsigned long desc_size)
+{
+ unsigned long slack = buff_size - map_size;
+
+ return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
+}
+
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- efi_memory_desc_t **map,
- unsigned long *map_size,
- unsigned long *desc_size,
- u32 *desc_ver,
- unsigned long *key_ptr)
+ struct efi_boot_memmap *map)
{
efi_memory_desc_t *m = NULL;
efi_status_t status;
unsigned long key;
u32 desc_version;
- *map_size = sizeof(*m) * 32;
+ *map->desc_size = sizeof(*m);
+ *map->map_size = *map->desc_size * 32;
+ *map->buff_size = *map->map_size;
again:
- /*
- * Add an additional efi_memory_desc_t because we're doing an
- * allocation which may be in a new descriptor region.
- */
- *map_size += sizeof(*m);
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
- *map_size, (void **)&m);
+ *map->map_size, (void **)&m);
if (status != EFI_SUCCESS)
goto fail;
- *desc_size = 0;
+ *map->desc_size = 0;
key = 0;
- status = efi_call_early(get_memory_map, map_size, m,
- &key, desc_size, &desc_version);
- if (status == EFI_BUFFER_TOO_SMALL) {
+ status = efi_call_early(get_memory_map, map->map_size, m,
+ &key, map->desc_size, &desc_version);
+ if (status == EFI_BUFFER_TOO_SMALL ||
+ !mmap_has_headroom(*map->buff_size, *map->map_size,
+ *map->desc_size)) {
efi_call_early(free_pool, m);
+ /*
+ * Make sure there is some entries of headroom so that the
+ * buffer can be reused for a new map after allocations are
+ * no longer permitted. Its unlikely that the map will grow to
+ * exceed this headroom once we are ready to trigger
+ * ExitBootServices()
+ */
+ *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
+ *map->buff_size = *map->map_size;
goto again;
}
if (status != EFI_SUCCESS)
efi_call_early(free_pool, m);
- if (key_ptr && status == EFI_SUCCESS)
- *key_ptr = key;
- if (desc_ver && status == EFI_SUCCESS)
- *desc_ver = desc_version;
+ if (map->key_ptr && status == EFI_SUCCESS)
+ *map->key_ptr = key;
+ if (map->desc_ver && status == EFI_SUCCESS)
+ *map->desc_ver = desc_version;
fail:
- *map = m;
+ *map->map = m;
return status;
}
@@ -113,22 +128,31 @@ fail:
unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
{
efi_status_t status;
- unsigned long map_size;
+ unsigned long map_size, buff_size;
unsigned long membase = EFI_ERROR;
struct efi_memory_map map;
efi_memory_desc_t *md;
+ struct efi_boot_memmap boot_map;
- status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map,
- &map_size, &map.desc_size, NULL, NULL);
+ boot_map.map = (efi_memory_desc_t **)&map.map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &map.desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
return membase;
map.map_end = map.map + map_size;
- for_each_efi_memory_desc(&map, md)
- if (md->attribute & EFI_MEMORY_WB)
+ for_each_efi_memory_desc_in_map(&map, md) {
+ if (md->attribute & EFI_MEMORY_WB) {
if (membase > md->phys_addr)
membase = md->phys_addr;
+ }
+ }
efi_call_early(free_pool, map.map);
@@ -142,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long *addr, unsigned long max)
{
- unsigned long map_size, desc_size;
+ unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
efi_status_t status;
unsigned long nr_pages;
u64 max_addr = 0;
int i;
+ struct efi_boot_memmap boot_map;
+
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
- NULL, NULL);
+ status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -228,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long *addr)
{
- unsigned long map_size, desc_size;
+ unsigned long map_size, desc_size, buff_size;
efi_memory_desc_t *map;
efi_status_t status;
unsigned long nr_pages;
int i;
+ struct efi_boot_memmap boot_map;
+
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
- NULL, NULL);
+ status = efi_get_memory_map(sys_table_arg, &boot_map);
if (status != EFI_SUCCESS)
goto fail;
@@ -702,3 +740,76 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
*cmd_line_len = options_bytes;
return (char *)cmdline_addr;
}
+
+/*
+ * Handle calling ExitBootServices according to the requirements set out by the
+ * spec. Obtains the current memory map, and returns that info after calling
+ * ExitBootServices. The client must specify a function to perform any
+ * processing of the memory map data prior to ExitBootServices. A client
+ * specific structure may be passed to the function via priv. The client
+ * function may be called multiple times.
+ */
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
+ void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func)
+{
+ efi_status_t status;
+
+ status = efi_get_memory_map(sys_table_arg, map);
+
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = priv_func(sys_table_arg, map, priv);
+ if (status != EFI_SUCCESS)
+ goto free_map;
+
+ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+
+ if (status == EFI_INVALID_PARAMETER) {
+ /*
+ * The memory map changed between efi_get_memory_map() and
+ * exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
+ * EFI_BOOT_SERVICES.ExitBootServices we need to get the
+ * updated map, and try again. The spec implies one retry
+ * should be sufficent, which is confirmed against the EDK2
+ * implementation. Per the spec, we can only invoke
+ * get_memory_map() and exit_boot_services() - we cannot alloc
+ * so efi_get_memory_map() cannot be used, and we must reuse
+ * the buffer. For all practical purposes, the headroom in the
+ * buffer should account for any changes in the map so the call
+ * to get_memory_map() is expected to succeed here.
+ */
+ *map->map_size = *map->buff_size;
+ status = efi_call_early(get_memory_map,
+ map->map_size,
+ *map->map,
+ map->key_ptr,
+ map->desc_size,
+ map->desc_ver);
+
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = priv_func(sys_table_arg, map, priv);
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
+ }
+
+ /* exit_boot_services() was called, thus cannot free */
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+ return EFI_SUCCESS;
+
+free_map:
+ efi_call_early(free_pool, *map->map);
+fail:
+ return status;
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 5ed3d3f38166..ee49cd23ee63 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,6 +5,16 @@
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
+/*
+ * __init annotations should not be used in the EFI stub, since the code is
+ * either included in the decompressor (x86, ARM) where they have no effect,
+ * or the whole stub is __init annotated at the section level (arm64), by
+ * renaming the sections, in which case the __init annotation will be
+ * redundant, and will result in section names like .init.init.text, and our
+ * linker script does not expect that.
+ */
+#undef __init
+
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
@@ -50,4 +60,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
unsigned long *addr, unsigned long random_seed);
+efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 9f614ee1b4c5..2e5c2ad90284 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -152,6 +152,27 @@ fdt_set_fail:
#define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif
+struct exit_boot_struct {
+ efi_memory_desc_t *runtime_map;
+ int *runtime_entry_count;
+};
+
+static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv)
+{
+ struct exit_boot_struct *p = priv;
+ /*
+ * Update the memory map with virtual addresses. The function will also
+ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+ * entries so that we can pass it straight to SetVirtualAddressMap()
+ */
+ efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
+ p->runtime_map, p->runtime_entry_count);
+
+ return EFI_SUCCESS;
+}
+
/*
* Allocate memory for a new FDT, then add EFI, commandline, and
* initrd related fields to the FDT. This routine increases the
@@ -175,13 +196,22 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
unsigned long fdt_addr,
unsigned long fdt_size)
{
- unsigned long map_size, desc_size;
+ unsigned long map_size, desc_size, buff_size;
u32 desc_ver;
unsigned long mmap_key;
efi_memory_desc_t *memory_map, *runtime_map;
unsigned long new_fdt_size;
efi_status_t status;
int runtime_entry_count = 0;
+ struct efi_boot_memmap map;
+ struct exit_boot_struct priv;
+
+ map.map = &runtime_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = &desc_ver;
+ map.key_ptr = &mmap_key;
+ map.buff_size = &buff_size;
/*
* Get a copy of the current memory map that we will use to prepare
@@ -189,8 +219,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* subsequent allocations adding entries, since they could not affect
* the number of EFI_MEMORY_RUNTIME regions.
*/
- status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
- &desc_size, &desc_ver, &mmap_key);
+ status = efi_get_memory_map(sys_table, &map);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
return status;
@@ -199,6 +228,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
pr_efi(sys_table,
"Exiting boot services and installing virtual address map...\n");
+ map.map = &memory_map;
/*
* Estimate size of new FDT, and allocate memory for it. We
* will allocate a bigger buffer if this ends up being too
@@ -218,8 +248,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
* we can get the memory map key needed for
* exit_boot_services().
*/
- status = efi_get_memory_map(sys_table, &memory_map, &map_size,
- &desc_size, &desc_ver, &mmap_key);
+ status = efi_get_memory_map(sys_table, &map);
if (status != EFI_SUCCESS)
goto fail_free_new_fdt;
@@ -250,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
}
}
- /*
- * Update the memory map with virtual addresses. The function will also
- * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
- * entries so that we can pass it straight into SetVirtualAddressMap()
- */
- efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
- &runtime_entry_count);
-
- /* Now we are ready to exit_boot_services.*/
- status = sys_table->boottime->exit_boot_services(handle, mmap_key);
+ sys_table->boottime->free_pool(memory_map);
+ priv.runtime_map = runtime_map;
+ priv.runtime_entry_count = &runtime_entry_count;
+ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
+ exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 53f6d3fe6d86..0c9f58c5ba50 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
unsigned long random_seed)
{
unsigned long map_size, desc_size, total_slots = 0, target_slot;
+ unsigned long buff_size;
efi_status_t status;
efi_memory_desc_t *memory_map;
int map_offset;
+ struct efi_boot_memmap map;
- status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
- &desc_size, NULL, NULL);
+ map.map = &memory_map;
+ map.map_size = &map_size;
+ map.desc_size = &desc_size;
+ map.desc_ver = NULL;
+ map.key_ptr = NULL;
+ map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(sys_table_arg, &map);
if (status != EFI_SUCCESS)
return status;
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
new file mode 100644
index 000000000000..236004b9a50d
--- /dev/null
+++ b/drivers/firmware/efi/memattr.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "efi: memattr: " fmt
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+static int __initdata tbl_size;
+
+/*
+ * Reserve the memory associated with the Memory Attributes configuration
+ * table, if it exists.
+ */
+int __init efi_memattr_init(void)
+{
+ efi_memory_attributes_table_t *tbl;
+
+ if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR)
+ return 0;
+
+ tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl));
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi.mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (tbl->version > 1) {
+ pr_warn("Unexpected EFI Memory Attributes table version %d\n",
+ tbl->version);
+ goto unmap;
+ }
+
+ tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+ memblock_reserve(efi.mem_attr_table, tbl_size);
+
+unmap:
+ early_memunmap(tbl, sizeof(*tbl));
+ return 0;
+}
+
+/*
+ * Returns a copy @out of the UEFI memory descriptor @in if it is covered
+ * entirely by a UEFI memory map entry with matching attributes. The virtual
+ * address of @out is set according to the matching entry that was found.
+ */
+static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+{
+ u64 in_paddr = in->phys_addr;
+ u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
+ efi_memory_desc_t *md;
+
+ *out = *in;
+
+ if (in->type != EFI_RUNTIME_SERVICES_CODE &&
+ in->type != EFI_RUNTIME_SERVICES_DATA) {
+ pr_warn("Entry type should be RuntimeServiceCode/Data\n");
+ return false;
+ }
+
+ if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
+ pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
+ return false;
+ }
+
+ if (PAGE_SIZE > EFI_PAGE_SIZE &&
+ (!PAGE_ALIGNED(in->phys_addr) ||
+ !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+ /*
+ * Since arm64 may execute with page sizes of up to 64 KB, the
+ * UEFI spec mandates that RuntimeServices memory regions must
+ * be 64 KB aligned. We need to validate this here since we will
+ * not be able to tighten permissions on such regions without
+ * affecting adjacent regions.
+ */
+ pr_warn("Entry address region misaligned\n");
+ return false;
+ }
+
+ for_each_efi_memory_desc(md) {
+ u64 md_paddr = md->phys_addr;
+ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0) {
+ /* no virtual mapping has been installed by the stub */
+ break;
+ }
+
+ if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
+ continue;
+
+ /*
+ * This entry covers the start of @in, check whether
+ * it covers the end as well.
+ */
+ if (md_paddr + md_size < in_paddr + in_size) {
+ pr_warn("Entry covers multiple EFI memory map regions\n");
+ return false;
+ }
+
+ if (md->type != in->type) {
+ pr_warn("Entry type deviates from EFI memory map region type\n");
+ return false;
+ }
+
+ out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
+
+ return true;
+ }
+
+ pr_warn("No matching entry found in the EFI memory map\n");
+ return false;
+}
+
+/*
+ * To be called after the EFI page tables have been populated. If a memory
+ * attributes table is available, its contents will be used to update the
+ * mappings with tightened permissions as described by the table.
+ * This requires the UEFI memory map to have already been populated with
+ * virtual addresses.
+ */
+int __init efi_memattr_apply_permissions(struct mm_struct *mm,
+ efi_memattr_perm_setter fn)
+{
+ efi_memory_attributes_table_t *tbl;
+ int i, ret;
+
+ if (tbl_size <= sizeof(*tbl))
+ return 0;
+
+ /*
+ * We need the EFI memory map to be setup so we can use it to
+ * lookup the virtual addresses of all entries in the of EFI
+ * Memory Attributes table. If it isn't available, this
+ * function should not be called.
+ */
+ if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
+ return 0;
+
+ tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
+ if (!tbl) {
+ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+ efi.mem_attr_table);
+ return -ENOMEM;
+ }
+
+ if (efi_enabled(EFI_DBG))
+ pr_info("Processing EFI Memory Attributes table:\n");
+
+ for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
+ efi_memory_desc_t md;
+ unsigned long size;
+ bool valid;
+ char buf[64];
+
+ valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
+ &md);
+ size = md.num_pages << EFI_PAGE_SHIFT;
+ if (efi_enabled(EFI_DBG) || !valid)
+ pr_info("%s 0x%012llx-0x%012llx %s\n",
+ valid ? "" : "!", md.phys_addr,
+ md.phys_addr + size - 1,
+ efi_md_typeattr_format(buf, sizeof(buf), &md));
+
+ if (valid)
+ ret = fn(mm, &md);
+ }
+ memunmap(tbl);
+ return ret;
+}
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
new file mode 100644
index 000000000000..78686443cb37
--- /dev/null
+++ b/drivers/firmware/efi/memmap.c
@@ -0,0 +1,341 @@
+/*
+ * Common EFI memory map functions.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/efi.h>
+#include <linux/io.h>
+#include <asm/early_ioremap.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
+{
+ return memblock_alloc(size, 0);
+}
+
+static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+{
+ unsigned int order = get_order(size);
+ struct page *p = alloc_pages(GFP_KERNEL, order);
+
+ if (!p)
+ return 0;
+
+ return PFN_PHYS(page_to_pfn(p));
+}
+
+/**
+ * efi_memmap_alloc - Allocate memory for the EFI memory map
+ * @num_entries: Number of entries in the allocated map.
+ *
+ * Depending on whether mm_init() has already been invoked or not,
+ * either memblock or "normal" page allocation is used.
+ *
+ * Returns the physical address of the allocated memory map on
+ * success, zero on failure.
+ */
+phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+{
+ unsigned long size = num_entries * efi.memmap.desc_size;
+
+ if (slab_is_available())
+ return __efi_memmap_alloc_late(size);
+
+ return __efi_memmap_alloc_early(size);
+}
+
+/**
+ * __efi_memmap_init - Common code for mapping the EFI memory map
+ * @data: EFI memory map data
+ * @late: Use early or late mapping function?
+ *
+ * This function takes care of figuring out which function to use to
+ * map the EFI memory map in efi.memmap based on how far into the boot
+ * we are.
+ *
+ * During bootup @late should be %false since we only have access to
+ * the early_memremap*() functions as the vmalloc space isn't setup.
+ * Once the kernel is fully booted we can fallback to the more robust
+ * memremap*() API.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+static int __init
+__efi_memmap_init(struct efi_memory_map_data *data, bool late)
+{
+ struct efi_memory_map map;
+ phys_addr_t phys_map;
+
+ if (efi_enabled(EFI_PARAVIRT))
+ return 0;
+
+ phys_map = data->phys_map;
+
+ if (late)
+ map.map = memremap(phys_map, data->size, MEMREMAP_WB);
+ else
+ map.map = early_memremap(phys_map, data->size);
+
+ if (!map.map) {
+ pr_err("Could not map the memory map!\n");
+ return -ENOMEM;
+ }
+
+ map.phys_map = data->phys_map;
+ map.nr_map = data->size / data->desc_size;
+ map.map_end = map.map + data->size;
+
+ map.desc_version = data->desc_version;
+ map.desc_size = data->desc_size;
+ map.late = late;
+
+ set_bit(EFI_MEMMAP, &efi.flags);
+
+ efi.memmap = map;
+
+ return 0;
+}
+
+/**
+ * efi_memmap_init_early - Map the EFI memory map data structure
+ * @data: EFI memory map data
+ *
+ * Use early_memremap() to map the passed in EFI memory map and assign
+ * it to efi.memmap.
+ */
+int __init efi_memmap_init_early(struct efi_memory_map_data *data)
+{
+ /* Cannot go backwards */
+ WARN_ON(efi.memmap.late);
+
+ return __efi_memmap_init(data, false);
+}
+
+void __init efi_memmap_unmap(void)
+{
+ if (!efi.memmap.late) {
+ unsigned long size;
+
+ size = efi.memmap.desc_size * efi.memmap.nr_map;
+ early_memunmap(efi.memmap.map, size);
+ } else {
+ memunmap(efi.memmap.map);
+ }
+
+ efi.memmap.map = NULL;
+ clear_bit(EFI_MEMMAP, &efi.flags);
+}
+
+/**
+ * efi_memmap_init_late - Map efi.memmap with memremap()
+ * @phys_addr: Physical address of the new EFI memory map
+ * @size: Size in bytes of the new EFI memory map
+ *
+ * Setup a mapping of the EFI memory map using ioremap_cache(). This
+ * function should only be called once the vmalloc space has been
+ * setup and is therefore not suitable for calling during early EFI
+ * initialise, e.g. in efi_init(). Additionally, it expects
+ * efi_memmap_init_early() to have already been called.
+ *
+ * The reason there are two EFI memmap initialisation
+ * (efi_memmap_init_early() and this late version) is because the
+ * early EFI memmap should be explicitly unmapped once EFI
+ * initialisation is complete as the fixmap space used to map the EFI
+ * memmap (via early_memremap()) is a scarce resource.
+ *
+ * This late mapping is intended to persist for the duration of
+ * runtime so that things like efi_mem_desc_lookup() and
+ * efi_mem_attributes() always work.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
+{
+ struct efi_memory_map_data data = {
+ .phys_map = addr,
+ .size = size,
+ };
+
+ /* Did we forget to unmap the early EFI memmap? */
+ WARN_ON(efi.memmap.map);
+
+ /* Were we already called? */
+ WARN_ON(efi.memmap.late);
+
+ /*
+ * It makes no sense to allow callers to register different
+ * values for the following fields. Copy them out of the
+ * existing early EFI memmap.
+ */
+ data.desc_version = efi.memmap.desc_version;
+ data.desc_size = efi.memmap.desc_size;
+
+ return __efi_memmap_init(&data, true);
+}
+
+/**
+ * efi_memmap_install - Install a new EFI memory map in efi.memmap
+ * @addr: Physical address of the memory map
+ * @nr_map: Number of entries in the memory map
+ *
+ * Unlike efi_memmap_init_*(), this function does not allow the caller
+ * to switch from early to late mappings. It simply uses the existing
+ * mapping function and installs the new memmap.
+ *
+ * Returns zero on success, a negative error code on failure.
+ */
+int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
+{
+ struct efi_memory_map_data data;
+
+ efi_memmap_unmap();
+
+ data.phys_map = addr;
+ data.size = efi.memmap.desc_size * nr_map;
+ data.desc_version = efi.memmap.desc_version;
+ data.desc_size = efi.memmap.desc_size;
+
+ return __efi_memmap_init(&data, efi.memmap.late);
+}
+
+/**
+ * efi_memmap_split_count - Count number of additional EFI memmap entries
+ * @md: EFI memory descriptor to split
+ * @range: Address range (start, end) to split around
+ *
+ * Returns the number of additional EFI memmap entries required to
+ * accomodate @range.
+ */
+int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
+{
+ u64 m_start, m_end;
+ u64 start, end;
+ int count = 0;
+
+ start = md->phys_addr;
+ end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+ /* modifying range */
+ m_start = range->start;
+ m_end = range->end;
+
+ if (m_start <= start) {
+ /* split into 2 parts */
+ if (start < m_end && m_end < end)
+ count++;
+ }
+
+ if (start < m_start && m_start < end) {
+ /* split into 3 parts */
+ if (m_end < end)
+ count += 2;
+ /* split into 2 parts */
+ if (end <= m_end)
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * efi_memmap_insert - Insert a memory region in an EFI memmap
+ * @old_memmap: The existing EFI memory map structure
+ * @buf: Address of buffer to store new map
+ * @mem: Memory map entry to insert
+ *
+ * It is suggested that you call efi_memmap_split_count() first
+ * to see how large @buf needs to be.
+ */
+void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
+ struct efi_mem_range *mem)
+{
+ u64 m_start, m_end, m_attr;
+ efi_memory_desc_t *md;
+ u64 start, end;
+ void *old, *new;
+
+ /* modifying range */
+ m_start = mem->range.start;
+ m_end = mem->range.end;
+ m_attr = mem->attribute;
+
+ /*
+ * The EFI memory map deals with regions in EFI_PAGE_SIZE
+ * units. Ensure that the region described by 'mem' is aligned
+ * correctly.
+ */
+ if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
+ !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
+ WARN_ON(1);
+ return;
+ }
+
+ for (old = old_memmap->map, new = buf;
+ old < old_memmap->map_end;
+ old += old_memmap->desc_size, new += old_memmap->desc_size) {
+
+ /* copy original EFI memory descriptor */
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ start = md->phys_addr;
+ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+ if (m_start <= start && end <= m_end)
+ md->attribute |= m_attr;
+
+ if (m_start <= start &&
+ (start < m_end && m_end < end)) {
+ /* first part */
+ md->attribute |= m_attr;
+ md->num_pages = (m_end - md->phys_addr + 1) >>
+ EFI_PAGE_SHIFT;
+ /* latter part */
+ new += old_memmap->desc_size;
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ md->phys_addr = m_end + 1;
+ md->num_pages = (end - md->phys_addr + 1) >>
+ EFI_PAGE_SHIFT;
+ }
+
+ if ((start < m_start && m_start < end) && m_end < end) {
+ /* first part */
+ md->num_pages = (m_start - md->phys_addr) >>
+ EFI_PAGE_SHIFT;
+ /* middle part */
+ new += old_memmap->desc_size;
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ md->attribute |= m_attr;
+ md->phys_addr = m_start;
+ md->num_pages = (m_end - m_start + 1) >>
+ EFI_PAGE_SHIFT;
+ /* last part */
+ new += old_memmap->desc_size;
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ md->phys_addr = m_end + 1;
+ md->num_pages = (end - m_end) >>
+ EFI_PAGE_SHIFT;
+ }
+
+ if ((start < m_start && m_start < end) &&
+ (end <= m_end)) {
+ /* first part */
+ md->num_pages = (m_start - md->phys_addr) >>
+ EFI_PAGE_SHIFT;
+ /* latter part */
+ new += old_memmap->desc_size;
+ memcpy(new, old, old_memmap->desc_size);
+ md = new;
+ md->phys_addr = m_start;
+ md->num_pages = (end - md->phys_addr + 1) >>
+ EFI_PAGE_SHIFT;
+ md->attribute |= m_attr;
+ }
+ }
+}
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 5c55227a34c8..8e64b77aeac9 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -14,10 +14,6 @@
#include <asm/setup.h>
-static void *efi_runtime_map;
-static int nr_efi_runtime_map;
-static u32 efi_memdesc_size;
-
struct efi_runtime_map_entry {
efi_memory_desc_t md;
struct kobject kobj; /* kobject for each entry */
@@ -106,7 +102,8 @@ static struct kobj_type __refdata map_ktype = {
static struct kset *map_kset;
static struct efi_runtime_map_entry *
-add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
+add_sysfs_runtime_map_entry(struct kobject *kobj, int nr,
+ efi_memory_desc_t *md)
{
int ret;
struct efi_runtime_map_entry *entry;
@@ -124,8 +121,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
return ERR_PTR(-ENOMEM);
}
- memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
- sizeof(efi_memory_desc_t));
+ memcpy(&entry->md, md, sizeof(efi_memory_desc_t));
kobject_init(&entry->kobj, &map_ktype);
entry->kobj.kset = map_kset;
@@ -142,12 +138,12 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
int efi_get_runtime_map_size(void)
{
- return nr_efi_runtime_map * efi_memdesc_size;
+ return efi.memmap.nr_map * efi.memmap.desc_size;
}
int efi_get_runtime_map_desc_size(void)
{
- return efi_memdesc_size;
+ return efi.memmap.desc_size;
}
int efi_runtime_map_copy(void *buf, size_t bufsz)
@@ -157,38 +153,33 @@ int efi_runtime_map_copy(void *buf, size_t bufsz)
if (sz > bufsz)
sz = bufsz;
- memcpy(buf, efi_runtime_map, sz);
+ memcpy(buf, efi.memmap.map, sz);
return 0;
}
-void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size)
-{
- efi_runtime_map = map;
- nr_efi_runtime_map = nr_entries;
- efi_memdesc_size = desc_size;
-}
-
int __init efi_runtime_map_init(struct kobject *efi_kobj)
{
int i, j, ret = 0;
struct efi_runtime_map_entry *entry;
+ efi_memory_desc_t *md;
- if (!efi_runtime_map)
+ if (!efi_enabled(EFI_MEMMAP))
return 0;
- map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL);
+ map_entries = kzalloc(efi.memmap.nr_map * sizeof(entry), GFP_KERNEL);
if (!map_entries) {
ret = -ENOMEM;
goto out;
}
- for (i = 0; i < nr_efi_runtime_map; i++) {
- entry = add_sysfs_runtime_map_entry(efi_kobj, i);
+ i = 0;
+ for_each_efi_memory_desc(md) {
+ entry = add_sysfs_runtime_map_entry(efi_kobj, i, md);
if (IS_ERR(entry)) {
ret = PTR_ERR(entry);
goto out_add_entry;
}
- *(map_entries + i) = entry;
+ *(map_entries + i++) = entry;
}
return 0;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c4bf9a1cf4a6..7bc4c677828f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -82,13 +82,13 @@ config DRM_TTM
config DRM_GEM_CMA_HELPER
bool
- depends on DRM && HAVE_DMA_ATTRS
+ depends on DRM
help
Choose this if you need the GEM CMA helper functions
config DRM_KMS_CMA_HELPER
bool
- depends on DRM && HAVE_DMA_ATTRS
+ depends on DRM
select DRM_GEM_CMA_HELPER
select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 2b81a417cf29..cda71dbd44d3 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -5,7 +5,7 @@ config DRM_IMX
select VIDEOMODE_HELPERS
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
- depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS
+ depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
depends on IMX_IPUV3_CORE
help
enable i.MX graphics support
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index d4e0a39568f6..96dcd4a78951 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -1,6 +1,6 @@
config DRM_RCAR_DU
tristate "DRM Support for R-Car Display Unit"
- depends on DRM && ARM && HAVE_DMA_ATTRS && OF
+ depends on DRM && ARM && OF
depends on ARCH_SHMOBILE || COMPILE_TEST
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index b9202aa6f8ab..8d17d00ddb4b 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -1,6 +1,6 @@
config DRM_SHMOBILE
tristate "DRM Support for SH Mobile"
- depends on DRM && ARM && HAVE_DMA_ATTRS
+ depends on DRM && ARM
depends on ARCH_SHMOBILE || COMPILE_TEST
depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM
select BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 10c1b1926e6f..5ad43a1bb260 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -1,6 +1,6 @@
config DRM_STI
tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
- depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS
+ depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
select RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 78beafb0742c..f60a1ec84fa4 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -1,6 +1,6 @@
config DRM_TILCDC
tristate "DRM Support for TI LCDC Display Controller"
- depends on DRM && OF && ARM && HAVE_DMA_ATTRS
+ depends on DRM && OF && ARM
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index b2ffc4c605f5..e53df59cb139 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -1,7 +1,7 @@
config DRM_VC4
tristate "Broadcom VC4 Graphics"
depends on ARCH_BCM2835 || COMPILE_TEST
- depends on DRM && HAVE_DMA_ATTRS
+ depends on DRM
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_GEM_CMA_HELPER
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index ccbc9742cb7a..15431c171132 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -216,7 +216,6 @@ config VIDEO_STI_BDISP
tristate "STMicroelectronics BDISP 2D blitter driver"
depends on VIDEO_DEV && VIDEO_V4L2
depends on ARCH_STI || COMPILE_TEST
- depends on HAVE_DMA_ATTRS
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b50f77543ad0..c7d46fa3ae54 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -645,6 +645,19 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
}
/**
+ * of_get_flat_dt_subnode_by_name - get the subnode by given name
+ *
+ * @node: the parent node
+ * @uname: the name of subnode
+ * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
+ */
+
+int of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
+{
+ return fdt_subnode_offset(initial_boot_params, node, uname);
+}
+
+/**
* of_get_flat_dt_root - find the root node in the flat blob
*/
unsigned long __init of_get_flat_dt_root(void)
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 6ab6125420b4..0a873bd3e4ec 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -705,3 +705,4 @@ void of_msi_configure(struct device *dev, struct device_node *np)
dev_set_msi_domain(dev,
of_msi_get_domain(dev, np, DOMAIN_BUS_PLATFORM_MSI));
}
+EXPORT_SYMBOL_GPL(of_msi_configure);
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 8e11fb2831cd..e24b05996a1b 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -786,18 +786,27 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
return CCIO_IOVA(iovp, offset);
}
+
+static dma_addr_t
+ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ return ccio_map_single(dev, page_address(page) + offset, size,
+ direction);
+}
+
+
/**
- * ccio_unmap_single - Unmap an address range from the IOMMU.
+ * ccio_unmap_page - Unmap an address range from the IOMMU.
* @dev: The PCI device.
* @addr: The start address of the DMA region.
* @size: The length of the DMA region.
* @direction: The direction of the DMA transaction (to/from device).
- *
- * This function implements the pci_unmap_single function.
*/
static void
-ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction direction)
+ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
struct ioc *ioc;
unsigned long flags;
@@ -826,7 +835,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
}
/**
- * ccio_alloc_consistent - Allocate a consistent DMA mapping.
+ * ccio_alloc - Allocate a consistent DMA mapping.
* @dev: The PCI device.
* @size: The length of the DMA region.
* @dma_handle: The DMA address handed back to the device (not the cpu).
@@ -834,7 +843,8 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
* This function implements the pci_alloc_consistent function.
*/
static void *
-ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
+ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
{
void *ret;
#if 0
@@ -858,7 +868,7 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g
}
/**
- * ccio_free_consistent - Free a consistent DMA mapping.
+ * ccio_free - Free a consistent DMA mapping.
* @dev: The PCI device.
* @size: The length of the DMA region.
* @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
@@ -867,10 +877,10 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g
* This function implements the pci_free_consistent function.
*/
static void
-ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle)
+ccio_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- ccio_unmap_single(dev, dma_handle, size, 0);
+ ccio_unmap_page(dev, dma_handle, size, 0, NULL);
free_pages((unsigned long)cpu_addr, get_order(size));
}
@@ -897,7 +907,7 @@ ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
*/
static int
ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
@@ -974,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
*/
static void
ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
struct ioc *ioc;
@@ -993,27 +1003,22 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
#ifdef CCIO_COLLECT_STATS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
- ccio_unmap_single(dev, sg_dma_address(sglist),
- sg_dma_len(sglist), direction);
+ ccio_unmap_page(dev, sg_dma_address(sglist),
+ sg_dma_len(sglist), direction, NULL);
++sglist;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
}
-static struct hppa_dma_ops ccio_ops = {
+static struct dma_map_ops ccio_ops = {
.dma_supported = ccio_dma_supported,
- .alloc_consistent = ccio_alloc_consistent,
- .alloc_noncoherent = ccio_alloc_consistent,
- .free_consistent = ccio_free_consistent,
- .map_single = ccio_map_single,
- .unmap_single = ccio_unmap_single,
+ .alloc = ccio_alloc,
+ .free = ccio_free,
+ .map_page = ccio_map_page,
+ .unmap_page = ccio_unmap_page,
.map_sg = ccio_map_sg,
.unmap_sg = ccio_unmap_sg,
- .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */
- .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */
- .dma_sync_sg_for_cpu = NULL, /* ditto */
- .dma_sync_sg_for_device = NULL, /* ditto */
};
#ifdef CONFIG_PROC_FS
@@ -1062,7 +1067,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
ioc->msingle_calls, ioc->msingle_pages,
(int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
- /* KLUGE - unmap_sg calls unmap_single for each mapped page */
+ /* KLUGE - unmap_sg calls unmap_page for each mapped page */
min = ioc->usingle_calls - ioc->usg_calls;
max = ioc->usingle_pages - ioc->usg_pages;
seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 225049b492e5..42ec4600b7e4 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -780,8 +780,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
}
+static dma_addr_t
+sba_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ return sba_map_single(dev, page_address(page) + offset, size,
+ direction);
+}
+
+
/**
- * sba_unmap_single - unmap one IOVA and free resources
+ * sba_unmap_page - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
@@ -790,8 +800,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
* See Documentation/DMA-API-HOWTO.txt
*/
static void
-sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction direction)
+sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
@@ -858,15 +868,15 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
/**
- * sba_alloc_consistent - allocate/map shared mem for DMA
+ * sba_alloc - allocate/map shared mem for DMA
* @hwdev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @dma_handle: IOVA of new buffer.
*
* See Documentation/DMA-API-HOWTO.txt
*/
-static void *sba_alloc_consistent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
+static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, struct dma_attrs *attrs)
{
void *ret;
@@ -888,7 +898,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
/**
- * sba_free_consistent - free/unmap shared mem for DMA
+ * sba_free - free/unmap shared mem for DMA
* @hwdev: instance of PCI owned by the driver that's asking.
* @size: number of bytes mapped in driver buffer.
* @vaddr: virtual address IOVA of "consistent" buffer.
@@ -897,10 +907,10 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
* See Documentation/DMA-API-HOWTO.txt
*/
static void
-sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle)
+sba_free(struct device *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, struct dma_attrs *attrs)
{
- sba_unmap_single(hwdev, dma_handle, size, 0);
+ sba_unmap_page(hwdev, dma_handle, size, 0, NULL);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -933,7 +943,7 @@ int dump_run_sg = 0;
*/
static int
sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
@@ -1016,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
*/
static void
sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+ enum dma_data_direction direction, struct dma_attrs *attrs)
{
struct ioc *ioc;
#ifdef ASSERT_PDIR_SANITY
@@ -1040,7 +1050,8 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
while (sg_dma_len(sglist) && nents--) {
- sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
+ sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
+ direction, NULL);
#ifdef SBA_COLLECT_STATS
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
@@ -1058,19 +1069,14 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
}
-static struct hppa_dma_ops sba_ops = {
+static struct dma_map_ops sba_ops = {
.dma_supported = sba_dma_supported,
- .alloc_consistent = sba_alloc_consistent,
- .alloc_noncoherent = sba_alloc_consistent,
- .free_consistent = sba_free_consistent,
- .map_single = sba_map_single,
- .unmap_single = sba_unmap_single,
+ .alloc = sba_alloc,
+ .free = sba_free,
+ .map_page = sba_map_page,
+ .unmap_page = sba_unmap_page,
.map_sg = sba_map_sg,
.unmap_sg = sba_unmap_sg,
- .dma_sync_single_for_cpu = NULL,
- .dma_sync_single_for_device = NULL,
- .dma_sync_sg_for_cpu = NULL,
- .dma_sync_sg_for_device = NULL,
};
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 167fe411ce2e..54c4b691e51f 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -219,15 +219,13 @@ int cper_severity_to_aer(int cper_severity)
}
EXPORT_SYMBOL_GPL(cper_severity_to_aer);
-void cper_print_aer(struct pci_dev *dev, int cper_severity,
+void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer)
{
- int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
+ int layer, agent, status_strs_size, tlp_header_valid = 0;
u32 status, mask;
const char **status_strs;
- aer_severity = cper_severity_to_aer(cper_severity);
-
if (aer_severity == AER_CORRECTABLE) {
status = aer->cor_status;
mask = aer->cor_mask;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index d5c24fcd4eee..604011e047d6 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -23,10 +23,8 @@
#include <linux/ioport.h>
#include <linux/cache.h>
#include <linux/slab.h>
-#include <linux/notifier.h>
#include "pci.h"
-static RAW_NOTIFIER_HEAD(bar_update_chain);
void pci_update_resource(struct pci_dev *dev, int resno)
{
@@ -37,9 +35,6 @@ void pci_update_resource(struct pci_dev *dev, int resno)
int reg;
enum pci_bar_type type;
struct resource *res = dev->resource + resno;
- struct pci_bar_update_info update_info;
- struct pci_bus_region update_reg;
- struct resource update_res;
if (dev->is_virtfn) {
dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
@@ -82,22 +77,6 @@ void pci_update_resource(struct pci_dev *dev, int resno)
}
/*
- * Fetch the old BAR location from the device, so we can notify
- * users of that BAR that its location is changing.
- */
- pci_read_config_dword(dev, reg, &check);
- update_reg.start = check & PCI_BASE_ADDRESS_MEM_MASK;
- if (check & PCI_BASE_ADDRESS_MEM_TYPE_64) {
- pci_read_config_dword(dev, reg, &check);
- update_reg.start |= ((u64)check) << 32;
- }
- update_info.size = region.end - region.start;
- update_reg.end = update_reg.start + update_info.size;
- pcibios_bus_to_resource(dev->bus, &update_res, &update_reg);
- update_info.old_start = update_res.start;
- update_info.new_start = res->start;
-
- /*
* We can't update a 64-bit BAR atomically, so when possible,
* disable decoding so that a half-updated BAR won't conflict
* with another device.
@@ -129,14 +108,6 @@ void pci_update_resource(struct pci_dev *dev, int resno)
if (disable)
pci_write_config_word(dev, PCI_COMMAND, cmd);
-
- /* Tell interested parties that the BAR mapping changed */
- raw_notifier_call_chain(&bar_update_chain, 0, &update_info);
-}
-
-int pci_notify_on_update_resource(struct notifier_block *nb)
-{
- return raw_notifier_chain_register(&bar_update_chain, nb);
}
int pci_claim_resource(struct pci_dev *dev, int resource)
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
deleted file mode 100644
index 0297e5875798..000000000000
--- a/include/asm-generic/dma-coherent.h
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef DMA_COHERENT_H
-#define DMA_COHERENT_H
-
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-/*
- * These three functions are only for dma allocator.
- * Don't use them in device drivers.
- */
-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
- dma_addr_t *dma_handle, void **ret);
-int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
-
-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, size_t size, int *ret);
-/*
- * Standard interface
- */
-#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags);
-
-void dma_release_declared_memory(struct device *dev);
-
-void *dma_mark_declared_memory_occupied(struct device *dev,
- dma_addr_t device_addr, size_t size);
-#else
-#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
-#define dma_release_from_coherent(dev, order, vaddr) (0)
-#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
-#endif
-
-#endif
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
deleted file mode 100644
index 6c32af918c2f..000000000000
--- a/include/asm-generic/dma-mapping-broken.h
+++ /dev/null
@@ -1,95 +0,0 @@
-#ifndef _ASM_GENERIC_DMA_MAPPING_H
-#define _ASM_GENERIC_DMA_MAPPING_H
-
-/* define the dma api to allow compilation but not linking of
- * dma dependent code. Code that depends on the dma-mapping
- * API needs to set 'depends on HAS_DMA' in its Kconfig
- */
-
-struct scatterlist;
-
-extern void *
-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag);
-
-extern void
-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle);
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
-{
- /* attrs is not supported and ignored */
- return dma_alloc_coherent(dev, size, dma_handle, flag);
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- /* attrs is not supported and ignored */
- dma_free_coherent(dev, size, cpu_addr, dma_handle);
-}
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-extern dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction);
-
-extern void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction);
-
-extern int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
-
-extern void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction);
-
-extern dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction);
-
-extern void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction);
-
-extern void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
- enum dma_data_direction direction);
-
-extern void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction);
-
-extern void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction);
-
-#define dma_sync_single_for_device dma_sync_single_for_cpu
-#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu
-#define dma_sync_sg_for_device dma_sync_sg_for_cpu
-
-extern int
-dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
-
-extern int
-dma_supported(struct device *dev, u64 mask);
-
-extern int
-dma_set_mask(struct device *dev, u64 mask);
-
-extern int
-dma_get_cache_alignment(void);
-
-extern void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
-
-#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
deleted file mode 100644
index b1bc954eccf3..000000000000
--- a/include/asm-generic/dma-mapping-common.h
+++ /dev/null
@@ -1,358 +0,0 @@
-#ifndef _ASM_GENERIC_DMA_MAPPING_H
-#define _ASM_GENERIC_DMA_MAPPING_H
-
-#include <linux/kmemcheck.h>
-#include <linux/bug.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
-#include <asm-generic/dma-coherent.h>
-
-static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
- size_t size,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- kmemcheck_mark_initialized(ptr, size);
- BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, virt_to_page(ptr),
- (unsigned long)ptr & ~PAGE_MASK, size,
- dir, attrs);
- debug_dma_map_page(dev, virt_to_page(ptr),
- (unsigned long)ptr & ~PAGE_MASK, size,
- dir, addr, true);
- return addr;
-}
-
-static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
- size_t size,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, attrs);
- debug_dma_unmap_page(dev, addr, size, dir, true);
-}
-
-/*
- * dma_maps_sg_attrs returns 0 on error and > 0 on success.
- * It should never return a value < 0.
- */
-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- int i, ents;
- struct scatterlist *s;
-
- for_each_sg(sg, s, nents, i)
- kmemcheck_mark_initialized(sg_virt(s), s->length);
- BUG_ON(!valid_dma_direction(dir));
- ents = ops->map_sg(dev, sg, nents, dir, attrs);
- BUG_ON(ents < 0);
- debug_dma_map_sg(dev, sg, nents, ents, dir);
-
- return ents;
-}
-
-static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- debug_dma_unmap_sg(dev, sg, nents, dir);
- if (ops->unmap_sg)
- ops->unmap_sg(dev, sg, nents, dir, attrs);
-}
-
-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
- size_t offset, size_t size,
- enum dma_data_direction dir)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- dma_addr_t addr;
-
- kmemcheck_mark_initialized(page_address(page) + offset, size);
- BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, NULL);
- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
-
- return addr;
-}
-
-static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, NULL);
- debug_dma_unmap_page(dev, addr, size, dir, false);
-}
-
-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size,
- enum dma_data_direction dir)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr, size, dir);
- debug_dma_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static inline void dma_sync_single_for_device(struct device *dev,
- dma_addr_t addr, size_t size,
- enum dma_data_direction dir)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr, size, dir);
- debug_dma_sync_single_for_device(dev, addr, size, dir);
-}
-
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_cpu)
- ops->sync_single_for_cpu(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t addr,
- unsigned long offset,
- size_t size,
- enum dma_data_direction dir)
-{
- const struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_single_for_device)
- ops->sync_single_for_device(dev, addr + offset, size, dir);
- debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
-}
-
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_cpu)
- ops->sync_sg_for_cpu(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction dir)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!valid_dma_direction(dir));
- if (ops->sync_sg_for_device)
- ops->sync_sg_for_device(dev, sg, nelems, dir);
- debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
-
-}
-
-#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
-#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
-#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
-#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
-
-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-void *dma_common_contiguous_remap(struct page *page, size_t size,
- unsigned long vm_flags,
- pgprot_t prot, const void *caller);
-
-void *dma_common_pages_remap(struct page **pages, size_t size,
- unsigned long vm_flags, pgprot_t prot,
- const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
-
-/**
- * dma_mmap_attrs - map a coherent DMA allocation into user space
- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
- * @vma: vm_area_struct describing requested user mapping
- * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
- * @handle: device-view address returned from dma_alloc_attrs
- * @size: size of memory originally requested in dma_alloc_attrs
- * @attrs: attributes of mapping properties requested in dma_alloc_attrs
- *
- * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
- * into user space. The coherent DMA buffer must not be freed by the
- * driver until the user space mapping has been released.
- */
-static inline int
-dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->mmap)
- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
-}
-
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-
-int
-dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr, size_t size);
-
-static inline int
-dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- BUG_ON(!ops);
- if (ops->get_sgtable)
- return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
- attrs);
- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
-}
-
-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
-
-#ifndef arch_dma_alloc_attrs
-#define arch_dma_alloc_attrs(dev, flag) (true)
-#endif
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- void *cpu_addr;
-
- BUG_ON(!ops);
-
- if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
- return cpu_addr;
-
- if (!arch_dma_alloc_attrs(&dev, &flag))
- return NULL;
- if (!ops->alloc)
- return NULL;
-
- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
- return cpu_addr;
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- BUG_ON(!ops);
- WARN_ON(irqs_disabled());
-
- if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
- return;
-
- if (!ops->free)
- return;
-
- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
- ops->free(dev, size, cpu_addr, dma_handle, attrs);
-}
-
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
-}
-
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
-}
-
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp)
-{
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
-}
-
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
-}
-
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- debug_dma_mapping_error(dev, dma_addr);
-
- if (get_dma_ops(dev)->mapping_error)
- return get_dma_ops(dev)->mapping_error(dev, dma_addr);
-
-#ifdef DMA_ERROR_CODE
- return dma_addr == DMA_ERROR_CODE;
-#else
- return 0;
-#endif
-}
-
-#ifndef HAVE_ARCH_DMA_SUPPORTED
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (!ops)
- return 0;
- if (!ops->dma_supported)
- return 1;
- return ops->dma_supported(dev, mask);
-}
-#endif
-
-#ifndef HAVE_ARCH_DMA_SET_MASK
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (ops->set_dma_mask)
- return ops->set_dma_mask(dev, mask);
-
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
- *dev->dma_mask = mask;
- return 0;
-}
-#endif
-
-#endif
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
new file mode 100644
index 000000000000..e328b52425a8
--- /dev/null
+++ b/include/crypto/acompress.h
@@ -0,0 +1,269 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ACOMP_H
+#define _CRYPTO_ACOMP_H
+#include <linux/crypto.h>
+
+#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
+
+/**
+ * struct acomp_req - asynchronous (de)compression request
+ *
+ * @base: Common attributes for asynchronous crypto requests
+ * @src: Source Data
+ * @dst: Destination data
+ * @slen: Size of the input buffer
+ * @dlen: Size of the output buffer and number of bytes produced
+ * @flags: Internal flags
+ * @__ctx: Start of private context data
+ */
+struct acomp_req {
+ struct crypto_async_request base;
+ struct scatterlist *src;
+ struct scatterlist *dst;
+ unsigned int slen;
+ unsigned int dlen;
+ u32 flags;
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct crypto_acomp - user-instantiated objects which encapsulate
+ * algorithms and core processing logic
+ *
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @dst_free: Frees destination buffer if allocated inside the
+ * algorithm
+ * @reqsize: Context size for (de)compression requests
+ * @base: Common crypto API algorithm data structure
+ */
+struct crypto_acomp {
+ int (*compress)(struct acomp_req *req);
+ int (*decompress)(struct acomp_req *req);
+ void (*dst_free)(struct scatterlist *dst);
+ unsigned int reqsize;
+ struct crypto_tfm base;
+};
+
+/**
+ * struct acomp_alg - asynchronous compression algorithm
+ *
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @dst_free: Frees destination buffer if allocated inside the algorithm
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ *
+ * @reqsize: Context size for (de)compression requests
+ * @base: Common crypto API algorithm data structure
+ */
+struct acomp_alg {
+ int (*compress)(struct acomp_req *req);
+ int (*decompress)(struct acomp_req *req);
+ void (*dst_free)(struct scatterlist *dst);
+ int (*init)(struct crypto_acomp *tfm);
+ void (*exit)(struct crypto_acomp *tfm);
+ unsigned int reqsize;
+ struct crypto_alg base;
+};
+
+/**
+ * DOC: Asynchronous Compression API
+ *
+ * The Asynchronous Compression API is used with the algorithms of type
+ * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
+ */
+
+/**
+ * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * compression algorithm e.g. "deflate"
+ * @type: specifies the type of the algorithm
+ * @mask: specifies the mask for the algorithm
+ *
+ * Allocate a handle for a compression algorithm. The returned struct
+ * crypto_acomp is the handle that is required for any subsequent
+ * API invocation for the compression operations.
+ *
+ * Return: allocated handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
+ u32 mask);
+
+static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
+{
+ return &tfm->base;
+}
+
+static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct acomp_alg, base);
+}
+
+static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_acomp, base);
+}
+
+static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
+}
+
+static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
+{
+ return tfm->reqsize;
+}
+
+static inline void acomp_request_set_tfm(struct acomp_req *req,
+ struct crypto_acomp *tfm)
+{
+ req->base.tfm = crypto_acomp_tfm(tfm);
+}
+
+static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
+{
+ return __crypto_acomp_tfm(req->base.tfm);
+}
+
+/**
+ * crypto_free_acomp() -- free ACOMPRESS tfm handle
+ *
+ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ */
+static inline void crypto_free_acomp(struct crypto_acomp *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
+}
+
+static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_ACOMPRESS;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return crypto_has_alg(alg_name, type, mask);
+}
+
+/**
+ * acomp_request_alloc() -- allocates asynchronous (de)compression request
+ *
+ * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
+ *
+ * Return: allocated handle in case of success or NULL in case of an error
+ */
+struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
+
+/**
+ * acomp_request_free() -- zeroize and free asynchronous (de)compression
+ * request as well as the output buffer if allocated
+ * inside the algorithm
+ *
+ * @req: request to free
+ */
+void acomp_request_free(struct acomp_req *req);
+
+/**
+ * acomp_request_set_callback() -- Sets an asynchronous callback
+ *
+ * Callback will be called when an asynchronous operation on a given
+ * request is finished.
+ *
+ * @req: request that the callback will be set for
+ * @flgs: specify for instance if the operation may backlog
+ * @cmlp: callback which will be called
+ * @data: private data used by the caller
+ */
+static inline void acomp_request_set_callback(struct acomp_req *req,
+ u32 flgs,
+ crypto_completion_t cmpl,
+ void *data)
+{
+ req->base.complete = cmpl;
+ req->base.data = data;
+ req->base.flags = flgs;
+}
+
+/**
+ * acomp_request_set_params() -- Sets request parameters
+ *
+ * Sets parameters required by an acomp operation
+ *
+ * @req: asynchronous compress request
+ * @src: pointer to input buffer scatterlist
+ * @dst: pointer to output buffer scatterlist. If this is NULL, the
+ * acomp layer will allocate the output memory
+ * @slen: size of the input buffer
+ * @dlen: size of the output buffer. If dst is NULL, this can be used by
+ * the user to specify the maximum amount of memory to allocate
+ */
+static inline void acomp_request_set_params(struct acomp_req *req,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int slen,
+ unsigned int dlen)
+{
+ req->src = src;
+ req->dst = dst;
+ req->slen = slen;
+ req->dlen = dlen;
+
+ if (!req->dst)
+ req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
+}
+
+/**
+ * crypto_acomp_compress() -- Invoke asynchronous compress operation
+ *
+ * Function invokes the asynchronous compress operation
+ *
+ * @req: asynchronous compress request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_acomp_compress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ return tfm->compress(req);
+}
+
+/**
+ * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
+ *
+ * Function invokes the asynchronous decompress operation
+ *
+ * @req: asynchronous compress request
+ *
+ * Return: zero on success; error code in case of error
+ */
+static inline int crypto_acomp_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ return tfm->decompress(req);
+}
+
+#endif
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
new file mode 100644
index 000000000000..1de2b5af12d7
--- /dev/null
+++ b/include/crypto/internal/acompress.h
@@ -0,0 +1,81 @@
+/*
+ * Asynchronous Compression operations
+ *
+ * Copyright (c) 2016, Intel Corporation
+ * Authors: Weigang Li <weigang.li@intel.com>
+ * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ACOMP_INT_H
+#define _CRYPTO_ACOMP_INT_H
+#include <crypto/acompress.h>
+
+/*
+ * Transform internal helpers.
+ */
+static inline void *acomp_request_ctx(struct acomp_req *req)
+{
+ return req->__ctx;
+}
+
+static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
+{
+ return tfm->base.__crt_ctx;
+}
+
+static inline void acomp_request_complete(struct acomp_req *req,
+ int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
+{
+ return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
+}
+
+static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
+{
+ struct acomp_req *req;
+
+ req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
+ if (likely(req))
+ acomp_request_set_tfm(req, tfm);
+ return req;
+}
+
+static inline void __acomp_request_free(struct acomp_req *req)
+{
+ kzfree(req);
+}
+
+/**
+ * crypto_register_acomp() -- Register asynchronous compression algorithm
+ *
+ * Function registers an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_acomp(struct acomp_alg *alg);
+
+/**
+ * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
+ *
+ * Function unregisters an implementation of an asynchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_unregister_acomp(struct acomp_alg *alg);
+
+#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
new file mode 100644
index 000000000000..3fda3c5655a0
--- /dev/null
+++ b/include/crypto/internal/scompress.h
@@ -0,0 +1,136 @@
+/*
+ * Synchronous Compression operations
+ *
+ * Copyright 2015 LG Electronics Inc.
+ * Copyright (c) 2016, Intel Corporation
+ * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_SCOMP_INT_H
+#define _CRYPTO_SCOMP_INT_H
+#include <linux/crypto.h>
+
+#define SCOMP_SCRATCH_SIZE 131072
+
+struct crypto_scomp {
+ struct crypto_tfm base;
+};
+
+/**
+ * struct scomp_alg - synchronous compression algorithm
+ *
+ * @alloc_ctx: Function allocates algorithm specific context
+ * @free_ctx: Function frees context allocated with alloc_ctx
+ * @compress: Function performs a compress operation
+ * @decompress: Function performs a de-compress operation
+ * @init: Initialize the cryptographic transformation object.
+ * This function is used to initialize the cryptographic
+ * transformation object. This function is called only once at
+ * the instantiation time, right after the transformation context
+ * was allocated. In case the cryptographic hardware has some
+ * special requirements which need to be handled by software, this
+ * function shall check for the precise requirement of the
+ * transformation and put any software fallbacks in place.
+ * @exit: Deinitialize the cryptographic transformation object. This is a
+ * counterpart to @init, used to remove various changes set in
+ * @init.
+ * @base: Common crypto API algorithm data structure
+ */
+struct scomp_alg {
+ void *(*alloc_ctx)(struct crypto_scomp *tfm);
+ void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
+ int (*compress)(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx);
+ int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
+ unsigned int slen, u8 *dst, unsigned int *dlen,
+ void *ctx);
+ struct crypto_alg base;
+};
+
+static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct scomp_alg, base);
+}
+
+static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_scomp, base);
+}
+
+static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
+{
+ return &tfm->base;
+}
+
+static inline void crypto_free_scomp(struct crypto_scomp *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
+}
+
+static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
+{
+ return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
+}
+
+static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
+{
+ return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
+}
+
+static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
+ void *ctx)
+{
+ return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
+}
+
+static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen, void *ctx)
+{
+ return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
+}
+
+static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen,
+ void *ctx)
+{
+ return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
+ ctx);
+}
+
+int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
+struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
+void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
+
+/**
+ * crypto_register_scomp() -- Register synchronous compression algorithm
+ *
+ * Function registers an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_register_scomp(struct scomp_alg *alg);
+
+/**
+ * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
+ *
+ * Function unregisters an implementation of a synchronous
+ * compression algorithm
+ *
+ * @alg: algorithm definition
+ *
+ * Return: zero on success; error code in case of error
+ */
+int crypto_unregister_scomp(struct scomp_alg *alg);
+
+#endif
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 164049357e5c..04602cbe85dc 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -63,7 +63,7 @@ static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
}
#endif
-void cper_print_aer(struct pci_dev *dev, int cper_severity,
+void cper_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 316c1462afe6..01b03df14d36 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -49,6 +49,8 @@
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_KPP 0x00000008
+#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
+#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
#define CRYPTO_ALG_TYPE_RNG 0x0000000c
#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
@@ -59,6 +61,7 @@
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
+#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
#define CRYPTO_ALG_DEAD 0x00000020
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index c8e1831d7572..99c0be00b47c 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -41,7 +41,6 @@ static inline void init_dma_attrs(struct dma_attrs *attrs)
bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
}
-#ifdef CONFIG_HAVE_DMA_ATTRS
/**
* dma_set_attr - set a specific attribute
* @attr: attribute to set
@@ -67,14 +66,5 @@ static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
BUG_ON(attr >= DMA_ATTR_MAX);
return test_bit(attr, attrs->flags);
}
-#else /* !CONFIG_HAVE_DMA_ATTRS */
-static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
-}
-static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_DMA_ATTRS */
#endif /* _DMA_ATTR_H */
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index fe8cb610deac..c7d844f09c3a 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -56,6 +56,13 @@ extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
extern void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr);
+extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
+ size_t size, int direction,
+ dma_addr_t dma_addr);
+
+extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
+ size_t size, int direction);
+
extern void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
int direction);
@@ -141,6 +148,18 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
{
}
+static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
+ size_t size, int direction,
+ dma_addr_t dma_addr)
+{
+}
+
+static inline void debug_dma_unmap_resource(struct device *dev,
+ dma_addr_t dma_addr, size_t size,
+ int direction)
+{
+}
+
static inline void debug_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle,
size_t size, int direction)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 15d6618741cb..9b8305b13295 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -6,8 +6,11 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/dma-attrs.h>
+#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
+#include <linux/kmemcheck.h>
+#include <linux/bug.h>
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
@@ -46,6 +49,12 @@ struct dma_map_ops {
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
struct dma_attrs *attrs);
+ dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+ void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
@@ -83,10 +92,419 @@ static inline int is_device_dma_capable(struct device *dev)
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+/*
+ * These three functions are only for dma allocator.
+ * Don't use them in device drivers.
+ */
+int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret);
+int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
+#else
+#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
+#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
#else
-#include <asm-generic/dma-mapping-broken.h>
+/*
+ * Define the dma api to allow compilation but not linking of
+ * dma dependent code. Code that depends on the dma-mapping
+ * API needs to set 'depends on HAS_DMA' in its Kconfig
+ */
+extern struct dma_map_ops bad_dma_ops;
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return &bad_dma_ops;
+}
+#endif
+
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+ BUG_ON(!valid_dma_direction(dir));
+ addr = ops->map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, attrs);
+ debug_dma_map_page(dev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ dir, addr, true);
+ return addr;
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+ ops->unmap_page(dev, addr, size, dir, attrs);
+ debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+/*
+ * dma_maps_sg_attrs returns 0 on error and > 0 on success.
+ * It should never return a value < 0.
+ */
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ int i, ents;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, nents, i)
+ kmemcheck_mark_initialized(sg_virt(s), s->length);
+ BUG_ON(!valid_dma_direction(dir));
+ ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ BUG_ON(ents < 0);
+ debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+ return ents;
+}
+
+static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+ if (ops->unmap_sg)
+ ops->unmap_sg(dev, sg, nents, dir, attrs);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
+ BUG_ON(!valid_dma_direction(dir));
+ addr = ops->map_page(dev, page, offset, size, dir, NULL);
+ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+ return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+ ops->unmap_page(dev, addr, size, dir, NULL);
+ debug_dma_unmap_page(dev, addr, size, dir, false);
+}
+
+static inline dma_addr_t dma_map_resource(struct device *dev,
+ phys_addr_t phys_addr,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ unsigned long pfn = __phys_to_pfn(phys_addr);
+ dma_addr_t addr;
+
+ BUG_ON(!valid_dma_direction(dir));
+
+ /* Don't allow RAM to be mapped */
+ BUG_ON(pfn_valid(pfn));
+
+ addr = phys_addr;
+ if (ops->map_resource)
+ addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
+
+ debug_dma_map_resource(dev, phys_addr, size, dir, addr);
+
+ return addr;
+}
+
+static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_resource)
+ ops->unmap_resource(dev, addr, size, dir, attrs);
+ debug_dma_unmap_resource(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_cpu)
+ ops->sync_single_for_cpu(dev, addr, size, dir);
+ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_device)
+ ops->sync_single_for_device(dev, addr, size, dir);
+ debug_dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_cpu)
+ ops->sync_single_for_cpu(dev, addr + offset, size, dir);
+ debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_device)
+ ops->sync_single_for_device(dev, addr + offset, size, dir);
+ debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_cpu)
+ ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+ debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_device)
+ ops->sync_sg_for_device(dev, sg, nelems, dir);
+ debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
+
+}
+
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+ unsigned long vm_flags,
+ pgprot_t prot, const void *caller);
+
+void *dma_common_pages_remap(struct page **pages, size_t size,
+ unsigned long vm_flags, pgprot_t prot,
+ const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @handle: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+ * into user space. The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+static inline int
+dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops->mmap)
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+int
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline int
+dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops->get_sgtable)
+ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+}
+
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+
+#ifndef arch_dma_alloc_attrs
+#define arch_dma_alloc_attrs(dev, flag) (true)
+#endif
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!ops);
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+ return cpu_addr;
+
+ if (!arch_dma_alloc_attrs(&dev, &flag))
+ return NULL;
+ if (!ops->alloc)
+ return NULL;
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
+ WARN_ON(irqs_disabled());
+
+ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ return;
+
+ if (!ops->free)
+ return;
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+}
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ debug_dma_mapping_error(dev, dma_addr);
+
+ if (get_dma_ops(dev)->mapping_error)
+ return get_dma_ops(dev)->mapping_error(dev, dma_addr);
+
+#ifdef DMA_ERROR_CODE
+ return dma_addr == DMA_ERROR_CODE;
+#else
+ return 0;
+#endif
+}
+
+#ifndef HAVE_ARCH_DMA_SUPPORTED
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops)
+ return 0;
+ if (!ops->dma_supported)
+ return 1;
+ return ops->dma_supported(dev, mask);
+}
+#endif
+
+#ifndef HAVE_ARCH_DMA_SET_MASK
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->set_dma_mask)
+ return ops->set_dma_mask(dev, mask);
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
+}
#endif
static inline u64 dma_get_mask(struct device *dev)
@@ -208,7 +626,13 @@ static inline int dma_get_cache_alignment(void)
#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
#define DMA_MEMORY_EXCLUSIVE 0x08
-#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size, int flags);
+void dma_release_declared_memory(struct device *dev);
+void *dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size);
+#else
static inline int
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
@@ -227,7 +651,7 @@ dma_mark_declared_memory_occupied(struct device *dev,
{
return ERR_PTR(-EBUSY);
}
-#endif
+#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
/*
* Managed DMA API
@@ -240,13 +664,13 @@ extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
-#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
extern int dmam_declare_coherent_memory(struct device *dev,
phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size,
int flags);
extern void dmam_release_declared_memory(struct device *dev);
-#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
+#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
static inline int dmam_declare_coherent_memory(struct device *dev,
phys_addr_t phys_addr, dma_addr_t device_addr,
size_t size, gfp_t gfp)
@@ -257,24 +681,8 @@ static inline int dmam_declare_coherent_memory(struct device *dev,
static inline void dmam_release_declared_memory(struct device *dev)
{
}
-#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
-
-#ifndef CONFIG_HAVE_DMA_ATTRS
-struct dma_attrs;
-
-#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
- dma_map_single(dev, cpu_addr, size, dir)
+#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
- dma_unmap_single(dev, dma_addr, size, dir)
-
-#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
- dma_map_sg(dev, sgl, nents, dir)
-
-#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
- dma_unmap_sg(dev, sgl, nents, dir)
-
-#else
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
@@ -300,7 +708,6 @@ static inline int dma_mmap_writecombine(struct device *dev,
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
}
-#endif /* CONFIG_HAVE_DMA_ATTRS */
#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c47c68e535e8..fc30d2062fc3 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -439,6 +439,21 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
typedef void (*dma_async_tx_callback)(void *dma_async_param);
+enum dmaengine_tx_result {
+ DMA_TRANS_NOERROR = 0, /* SUCCESS */
+ DMA_TRANS_READ_FAILED, /* Source DMA read failed */
+ DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
+ DMA_TRANS_ABORTED, /* Op never submitted / aborted */
+};
+
+struct dmaengine_result {
+ enum dmaengine_tx_result result;
+ u32 residue;
+};
+
+typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
+ const struct dmaengine_result *result);
+
struct dmaengine_unmap_data {
u8 map_cnt;
u8 to_cnt;
@@ -476,6 +491,7 @@ struct dma_async_tx_descriptor {
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
int (*desc_free)(struct dma_async_tx_descriptor *tx);
dma_async_tx_callback callback;
+ dma_async_tx_callback_result callback_result;
void *callback_param;
struct dmaengine_unmap_data *unmap;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 2763c5c4b438..26741a9b3326 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -20,6 +20,7 @@
#include <linux/ioport.h>
#include <linux/pfn.h>
#include <linux/pstore.h>
+#include <linux/range.h>
#include <linux/reboot.h>
#include <linux/screen_info.h>
@@ -129,6 +130,7 @@ typedef struct {
#define EFI_PAGE_SHIFT 12
#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
+#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT)
typedef struct {
u32 type;
@@ -146,6 +148,15 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
+struct efi_boot_memmap {
+ efi_memory_desc_t **map;
+ unsigned long *map_size;
+ unsigned long *desc_size;
+ u32 *desc_ver;
+ unsigned long *key_ptr;
+ unsigned long *buff_size;
+};
+
/*
* EFI capsule flags
*/
@@ -664,6 +675,10 @@ void efi_native_runtime_setup(void);
EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \
0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \
+ EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \
+ 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
+
typedef struct {
efi_guid_t guid;
u64 table;
@@ -747,6 +762,18 @@ typedef struct {
unsigned long tables;
} efi_system_table_t;
+/*
+ * Architecture independent structure for describing a memory map for the
+ * benefit of efi_memmap_init_early(), saving us the need to pass four
+ * parameters.
+ */
+struct efi_memory_map_data {
+ phys_addr_t phys_map;
+ unsigned long size;
+ unsigned long desc_version;
+ unsigned long desc_size;
+};
+
struct efi_memory_map {
phys_addr_t phys_map;
void *map;
@@ -754,6 +781,12 @@ struct efi_memory_map {
int nr_map;
unsigned long desc_version;
unsigned long desc_size;
+ bool late;
+};
+
+struct efi_mem_range {
+ struct range range;
+ u64 attribute;
};
struct efi_fdt_params {
@@ -905,6 +938,14 @@ typedef struct {
/* efi_signature_data_t signatures[][] */
} efi_signature_list_t;
+typedef struct {
+ u32 version;
+ u32 num_entries;
+ u32 desc_size;
+ u32 reserved;
+ efi_memory_desc_t entry[0];
+} efi_memory_attributes_table_t;
+
/*
* All runtime access to EFI goes through this structure:
*/
@@ -926,6 +967,7 @@ extern struct efi {
unsigned long config_table; /* config tables */
unsigned long esrt; /* ESRT table */
unsigned long properties_table; /* properties table */
+ unsigned long mem_attr_table; /* memory attributes table */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -940,7 +982,7 @@ extern struct efi {
efi_get_next_high_mono_count_t *get_next_high_mono_count;
efi_reset_system_t *reset_system;
efi_set_virtual_address_map_t *set_virtual_address_map;
- struct efi_memory_map *memmap;
+ struct efi_memory_map memmap;
unsigned long flags;
} efi;
@@ -1011,6 +1053,17 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
}
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+
+extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
+extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
+extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
+extern void __init efi_memmap_unmap(void);
+extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
+extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
+ struct range *range);
+extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
+ void *buf, struct efi_mem_range *mem);
+
extern int efi_config_init(efi_config_table_type_t *arch_tables);
#ifdef CONFIG_EFI_ESRT
extern void __init efi_esrt_init(void);
@@ -1026,12 +1079,12 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
+extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern void efi_get_time(struct timespec *now);
extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params);
-extern struct efi_memory_map memmap;
extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
@@ -1043,12 +1096,34 @@ extern void __init efi_fake_memmap(void);
static inline void efi_fake_memmap(void) { }
#endif
+/*
+ * efi_memattr_perm_setter - arch specific callback function passed into
+ * efi_memattr_apply_permissions() that updates the
+ * mapping permissions described by the second
+ * argument in the page tables referred to by the
+ * first argument.
+ */
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+
+extern int efi_memattr_init(void);
+extern int efi_memattr_apply_permissions(struct mm_struct *mm,
+ efi_memattr_perm_setter fn);
+
/* Iterate through an efi_memory_map */
-#define for_each_efi_memory_desc(m, md) \
+#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
(md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
(md) = (void *)(md) + (m)->desc_size)
+/**
+ * for_each_efi_memory_desc - iterate over descriptors in efi.memmap
+ * @md: the efi_memory_desc_t * iterator
+ *
+ * Once the loop finishes @md must not be accessed.
+ */
+#define for_each_efi_memory_desc(md) \
+ for_each_efi_memory_desc_in_map(&efi.memmap, md)
+
/*
* Format an EFI memory descriptor's type and attributes to a user-provided
* character buffer, as per snprintf(), and return the buffer.
@@ -1429,7 +1504,6 @@ extern int efi_capsule_update(efi_capsule_header_t *capsule,
#ifdef CONFIG_EFI_RUNTIME_MAP
int efi_runtime_map_init(struct kobject *);
-void efi_runtime_map_setup(void *, int, u32);
int efi_get_runtime_map_size(void);
int efi_get_runtime_map_desc_size(void);
int efi_runtime_map_copy(void *buf, size_t bufsz);
@@ -1439,9 +1513,6 @@ static inline int efi_runtime_map_init(struct kobject *kobj)
return 0;
}
-static inline void
-efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
-
static inline int efi_get_runtime_map_size(void)
{
return 0;
@@ -1473,11 +1544,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
efi_loaded_image_t *image, int *cmd_line_len);
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- efi_memory_desc_t **map,
- unsigned long *map_size,
- unsigned long *desc_size,
- u32 *desc_ver,
- unsigned long *key_ptr);
+ struct efi_boot_memmap *map);
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
@@ -1508,4 +1575,15 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
unsigned long size);
bool efi_runtime_disabled(void);
+
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+ efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv);
+
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
+ void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func);
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 7f18fafe3530..36da78f7af51 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -53,6 +53,8 @@ extern char __dtb_end[];
extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname,
int depth, void *data),
void *data);
+extern int of_get_flat_dt_subnode_by_name(unsigned long node,
+ const char *uname);
extern const void *of_get_flat_dt_prop(unsigned long node, const char *name,
int *size);
extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index aff30e41a2a2..8a85c36b105d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -30,7 +30,6 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/resource_ext.h>
-#include <linux/notifier.h>
#include <uapi/linux/pci.h>
#include <linux/pci_ids.h>
@@ -1044,13 +1043,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
-struct pci_bar_update_info {
- u64 old_start;
- u64 new_start;
- u64 size;
-};
-int pci_notify_on_update_resource(struct notifier_block *nb);
-
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 79b5ded2001a..11d21fce14d6 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -46,6 +46,7 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
+ CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -112,5 +113,9 @@ struct crypto_report_kpp {
char type[CRYPTO_MAX_NAME];
};
+struct crypto_report_acomp {
+ char type[CRYPTO_MAX_NAME];
+};
+
#define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
sizeof(struct crypto_report_blkcipher))
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 51a76af25c66..ce75fba6fb7c 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -43,6 +43,7 @@ enum {
dma_debug_page,
dma_debug_sg,
dma_debug_coherent,
+ dma_debug_resource,
};
enum map_err_types {
@@ -150,8 +151,9 @@ static const char *const maperr2str[] = {
[MAP_ERR_CHECKED] = "dma map error checked",
};
-static const char *type2name[4] = { "single", "page",
- "scather-gather", "coherent" };
+static const char *type2name[5] = { "single", "page",
+ "scather-gather", "coherent",
+ "resource" };
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
"DMA_FROM_DEVICE", "DMA_NONE" };
@@ -397,6 +399,9 @@ static void hash_bucket_del(struct dma_debug_entry *entry)
static unsigned long long phys_addr(struct dma_debug_entry *entry)
{
+ if (entry->type == dma_debug_resource)
+ return __pfn_to_phys(entry->pfn) + entry->offset;
+
return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
}
@@ -1493,6 +1498,49 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
}
EXPORT_SYMBOL(debug_dma_free_coherent);
+void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
+ int direction, dma_addr_t dma_addr)
+{
+ struct dma_debug_entry *entry;
+
+ if (unlikely(dma_debug_disabled()))
+ return;
+
+ entry = dma_entry_alloc();
+ if (!entry)
+ return;
+
+ entry->type = dma_debug_resource;
+ entry->dev = dev;
+ entry->pfn = __phys_to_pfn(addr);
+ entry->offset = offset_in_page(addr);
+ entry->size = size;
+ entry->dev_addr = dma_addr;
+ entry->direction = direction;
+ entry->map_err_type = MAP_ERR_NOT_CHECKED;
+
+ add_dma_entry(entry);
+}
+EXPORT_SYMBOL(debug_dma_map_resource);
+
+void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ struct dma_debug_entry ref = {
+ .type = dma_debug_resource,
+ .dev = dev,
+ .dev_addr = dma_addr,
+ .size = size,
+ .direction = direction,
+ };
+
+ if (unlikely(dma_debug_disabled()))
+ return;
+
+ check_unmap(&ref);
+}
+EXPORT_SYMBOL(debug_dma_unmap_resource);
+
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, int direction)
{