Home Home > GIT Browse > vanilla
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-07-05 13:29:55 -0600
committerChristoph Hellwig <hch@lst.de>2018-07-05 13:32:06 -0600
commit7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 (patch)
treee7a9ef9d6b6ab39fc0573d9ddf275fb6202839b1
parent06c85639897cf3ea6a11c5cb6929fb0d9d7efea5 (diff)
Revert "iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent()"
This commit may cause a less than required dma mask to be used for some allocations, which apparently leads to module load failures for iwlwifi sometimes. This reverts commit d657c5c73ca987214a6f9436e435b34fc60f332a. Signed-off-by: Christoph Hellwig <hch@lst.de> Reported-by: Fabio Coatti <fabio.coatti@gmail.com> Tested-by: Fabio Coatti <fabio.coatti@gmail.com>
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/intel-iommu.c62
2 files changed, 46 insertions, 17 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e055d228bfb9..689ffe538370 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -142,7 +142,6 @@ config DMAR_TABLE
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
- select DMA_DIRECT_OPS
select IOMMU_API
select IOMMU_IOVA
select NEED_DMA_MAP_STATE
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 14e4b3722428..b344a883f116 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -31,7 +31,6 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
#include <linux/mempool.h>
#include <linux/memory.h>
#include <linux/cpu.h>
@@ -3713,30 +3712,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
{
- void *vaddr;
+ struct page *page = NULL;
+ int order;
- vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
- if (iommu_no_mapping(dev) || !vaddr)
- return vaddr;
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
- *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
- PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
- if (!*dma_handle)
- goto out_free_pages;
- return vaddr;
+ if (!iommu_no_mapping(dev))
+ flags &= ~(GFP_DMA | GFP_DMA32);
+ else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+ flags |= GFP_DMA;
+ else
+ flags |= GFP_DMA32;
+ }
+
+ if (gfpflags_allow_blocking(flags)) {
+ unsigned int count = size >> PAGE_SHIFT;
+
+ page = dma_alloc_from_contiguous(dev, count, order, flags);
+ if (page && iommu_no_mapping(dev) &&
+ page_to_phys(page) + size > dev->coherent_dma_mask) {
+ dma_release_from_contiguous(dev, page, count);
+ page = NULL;
+ }
+ }
+
+ if (!page)
+ page = alloc_pages(flags, order);
+ if (!page)
+ return NULL;
+ memset(page_address(page), 0, size);
+
+ *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+ DMA_BIDIRECTIONAL,
+ dev->coherent_dma_mask);
+ if (*dma_handle)
+ return page_address(page);
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, order);
-out_free_pages:
- dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
return NULL;
}
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- if (!iommu_no_mapping(dev))
- intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
- dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+ int order;
+ struct page *page = virt_to_page(vaddr);
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ intel_unmap(dev, dma_handle, size);
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, order);
}
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,