Commit 2fcd2b30 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-dma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 dma mapping updates from Ingo Molnar:
 "This tree, by Christoph Hellwig, switches over the x86 architecture to
  the generic dma-direct and swiotlb code, and also unifies more of the
  dma-direct code between architectures. The now unused x86-only
  primitives are removed"

* 'x86-dma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  dma-mapping: Don't clear GFP_ZERO in dma_alloc_attrs
  swiotlb: Make swiotlb_{alloc,free}_buffer depend on CONFIG_DMA_DIRECT_OPS
  dma/swiotlb: Remove swiotlb_{alloc,free}_coherent()
  dma/direct: Handle force decryption for DMA coherent buffers in common code
  dma/direct: Handle the memory encryption bit in common code
  dma/swiotlb: Remove swiotlb_set_mem_attributes()
  set_memory.h: Provide set_memory_{en,de}crypted() stubs
  x86/dma: Remove dma_alloc_coherent_gfp_flags()
  iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent()
  iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and dma_direct_{alloc,free}()
  x86/dma/amd_gart: Use dma_direct_{alloc,free}()
  x86/dma/amd_gart: Look at dev->coherent_dma_mask instead of GFP_DMA
  x86/dma: Use generic swiotlb_ops
  x86/dma: Use DMA-direct (CONFIG_DMA_DIRECT_OPS=y)
  x86/dma: Remove dma_alloc_coherent_mask()
parents ce6eba3d e89f5b37
...@@ -2,13 +2,13 @@ ...@@ -2,13 +2,13 @@
#ifndef ASM_ARM_DMA_DIRECT_H #ifndef ASM_ARM_DMA_DIRECT_H
#define ASM_ARM_DMA_DIRECT_H 1 #define ASM_ARM_DMA_DIRECT_H 1
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
unsigned int offset = paddr & ~PAGE_MASK; unsigned int offset = paddr & ~PAGE_MASK;
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
} }
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
{ {
unsigned int offset = dev_addr & ~PAGE_MASK; unsigned int offset = dev_addr & ~PAGE_MASK;
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* IP32 changes by Ilya. * IP32 changes by Ilya.
* Copyright (C) 2010 Cavium Networks, Inc. * Copyright (C) 2010 Cavium Networks, Inc.
*/ */
#include <linux/dma-mapping.h> #include <linux/dma-direct.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/export.h> #include <linux/export.h>
...@@ -182,7 +182,7 @@ struct octeon_dma_map_ops { ...@@ -182,7 +182,7 @@ struct octeon_dma_map_ops {
phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
}; };
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev), struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
struct octeon_dma_map_ops, struct octeon_dma_map_ops,
...@@ -190,9 +190,9 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) ...@@ -190,9 +190,9 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
return ops->phys_to_dma(dev, paddr); return ops->phys_to_dma(dev, paddr);
} }
EXPORT_SYMBOL(phys_to_dma); EXPORT_SYMBOL(__phys_to_dma);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{ {
struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev), struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
struct octeon_dma_map_ops, struct octeon_dma_map_ops,
...@@ -200,7 +200,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) ...@@ -200,7 +200,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return ops->dma_to_phys(dev, daddr); return ops->dma_to_phys(dev, daddr);
} }
EXPORT_SYMBOL(dma_to_phys); EXPORT_SYMBOL(__dma_to_phys);
static struct octeon_dma_map_ops octeon_linear_dma_map_ops = { static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
.dma_map_ops = { .dma_map_ops = {
......
...@@ -69,8 +69,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) ...@@ -69,8 +69,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <= *dev->dma_mask;
} }
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
struct dma_map_ops; struct dma_map_ops;
extern const struct dma_map_ops *octeon_pci_dma_map_ops; extern const struct dma_map_ops *octeon_pci_dma_map_ops;
......
...@@ -25,13 +25,13 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) ...@@ -25,13 +25,13 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <= *dev->dma_mask;
} }
extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); extern dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); extern phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size) size_t size)
{ {
#ifdef CONFIG_CPU_LOONGSON3 #ifdef CONFIG_CPU_LOONGSON3
return phys_to_dma(dev, virt_to_phys(addr)); return __phys_to_dma(dev, virt_to_phys(addr));
#else #else
return virt_to_phys(addr) | 0x80000000; return virt_to_phys(addr) | 0x80000000;
#endif #endif
...@@ -41,7 +41,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, ...@@ -41,7 +41,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
struct page *page) struct page *page)
{ {
#ifdef CONFIG_CPU_LOONGSON3 #ifdef CONFIG_CPU_LOONGSON3
return phys_to_dma(dev, page_to_phys(page)); return __phys_to_dma(dev, page_to_phys(page));
#else #else
return page_to_phys(page) | 0x80000000; return page_to_phys(page) | 0x80000000;
#endif #endif
...@@ -51,7 +51,7 @@ static inline unsigned long plat_dma_addr_to_phys(struct device *dev, ...@@ -51,7 +51,7 @@ static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr) dma_addr_t dma_addr)
{ {
#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT) #if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
return dma_to_phys(dev, dma_addr); return __dma_to_phys(dev, dma_addr);
#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT) #elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff); return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
#else #else
......
...@@ -63,7 +63,7 @@ static int loongson_dma_supported(struct device *dev, u64 mask) ...@@ -63,7 +63,7 @@ static int loongson_dma_supported(struct device *dev, u64 mask)
return swiotlb_dma_supported(dev, mask); return swiotlb_dma_supported(dev, mask);
} }
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
long nid; long nid;
#ifdef CONFIG_PHYS48_TO_HT40 #ifdef CONFIG_PHYS48_TO_HT40
...@@ -75,7 +75,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) ...@@ -75,7 +75,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
return paddr; return paddr;
} }
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{ {
long nid; long nid;
#ifdef CONFIG_PHYS48_TO_HT40 #ifdef CONFIG_PHYS48_TO_HT40
......
...@@ -17,12 +17,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) ...@@ -17,12 +17,12 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <= *dev->dma_mask;
} }
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
return paddr + get_dma_offset(dev); return paddr + get_dma_offset(dev);
} }
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
{ {
return daddr - get_dma_offset(dev); return daddr - get_dma_offset(dev);
} }
......
...@@ -54,7 +54,6 @@ config X86 ...@@ -54,7 +54,6 @@ config X86
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_REFCOUNT select ARCH_HAS_REFCOUNT
...@@ -83,6 +82,7 @@ config X86 ...@@ -83,6 +82,7 @@ config X86
select CLOCKSOURCE_VALIDATE_LAST_CYCLE select CLOCKSOURCE_VALIDATE_LAST_CYCLE
select CLOCKSOURCE_WATCHDOG select CLOCKSOURCE_WATCHDOG
select DCACHE_WORD_ACCESS select DCACHE_WORD_ACCESS
select DMA_DIRECT_OPS
select EDAC_ATOMIC_SCRUB select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT select EDAC_SUPPORT
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
...@@ -680,6 +680,7 @@ config X86_SUPPORTS_MEMORY_FAILURE ...@@ -680,6 +680,7 @@ config X86_SUPPORTS_MEMORY_FAILURE
config STA2X11 config STA2X11
bool "STA2X11 Companion Chip Support" bool "STA2X11 Companion Chip Support"
depends on X86_32_NON_STANDARD && PCI depends on X86_32_NON_STANDARD && PCI
select ARCH_HAS_PHYS_TO_DMA
select X86_DEV_DMA_OPS select X86_DEV_DMA_OPS
select X86_DMA_REMAP select X86_DMA_REMAP
select SWIOTLB select SWIOTLB
......
...@@ -6,6 +6,9 @@ struct dev_archdata { ...@@ -6,6 +6,9 @@ struct dev_archdata {
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */ void *iommu; /* hook for IOMMU specific extension */
#endif #endif
#ifdef CONFIG_STA2X11
bool is_sta2x11;
#endif
}; };
#if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS)
......
...@@ -2,29 +2,8 @@ ...@@ -2,29 +2,8 @@
#ifndef ASM_X86_DMA_DIRECT_H #ifndef ASM_X86_DMA_DIRECT_H
#define ASM_X86_DMA_DIRECT_H 1 #define ASM_X86_DMA_DIRECT_H 1
#include <linux/mem_encrypt.h>
#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr);
#else
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
return 0;
return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return __sme_set(paddr);
}
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
return __sme_clr(daddr);
}
#endif /* CONFIG_X86_DMA_REMAP */
#endif /* ASM_X86_DMA_DIRECT_H */ #endif /* ASM_X86_DMA_DIRECT_H */
...@@ -36,37 +36,4 @@ int arch_dma_supported(struct device *dev, u64 mask); ...@@ -36,37 +36,4 @@ int arch_dma_supported(struct device *dev, u64 mask);
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
#define arch_dma_alloc_attrs arch_dma_alloc_attrs #define arch_dma_alloc_attrs arch_dma_alloc_attrs
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs);
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp)
{
unsigned long dma_mask = 0;
dma_mask = dev->coherent_dma_mask;
if (!dma_mask)
dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
return dma_mask;
}
static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
if (dma_mask <= DMA_BIT_MASK(24))
gfp |= GFP_DMA;
#ifdef CONFIG_X86_64
if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
gfp |= GFP_DMA32;
#endif
return gfp;
}
#endif #endif
...@@ -2,13 +2,10 @@ ...@@ -2,13 +2,10 @@
#ifndef _ASM_X86_IOMMU_H #ifndef _ASM_X86_IOMMU_H
#define _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H
extern const struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu; extern int force_iommu, no_iommu;
extern int iommu_detected; extern int iommu_detected;
extern int iommu_pass_through; extern int iommu_pass_through;
int x86_dma_supported(struct device *dev, u64 mask);
/* 10 seconds */ /* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
......
...@@ -49,8 +49,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); ...@@ -49,8 +49,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
/* Architecture __weak replacement functions */ /* Architecture __weak replacement functions */
void __init mem_encrypt_init(void); void __init mem_encrypt_init(void);
void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
bool sme_active(void); bool sme_active(void);
bool sev_active(void); bool sev_active(void);
......
...@@ -27,12 +27,4 @@ static inline void pci_swiotlb_late_init(void) ...@@ -27,12 +27,4 @@ static inline void pci_swiotlb_late_init(void)
{ {
} }
#endif #endif
extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs);
extern void x86_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
unsigned long attrs);
#endif /* _ASM_X86_SWIOTLB_H */ #endif /* _ASM_X86_SWIOTLB_H */
...@@ -57,7 +57,7 @@ obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o ...@@ -57,7 +57,7 @@ obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o
obj-$(CONFIG_SYSFS) += ksysfs.o obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o obj-y += alternative.o i8253.o hw_breakpoint.o
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o obj-y += pci-iommu_table.o
obj-y += resource.o obj-y += resource.o
......
...@@ -480,30 +480,21 @@ static void * ...@@ -480,30 +480,21 @@ static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
gfp_t flag, unsigned long attrs) gfp_t flag, unsigned long attrs)
{ {
dma_addr_t paddr; void *vaddr;
unsigned long align_mask;
struct page *page; vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
if (!vaddr ||
if (force_iommu && !(flag & GFP_DMA)) { !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); return vaddr;
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
if (!page)
return NULL;
align_mask = (1UL << get_order(size)) - 1;
paddr = dma_map_area(dev, page_to_phys(page), size,
DMA_BIDIRECTIONAL, align_mask);
flush_gart();
if (paddr != bad_dma_addr) {
*dma_addr = paddr;
return page_address(page);
}
__free_pages(page, get_order(size));
} else
return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
attrs);
*dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size,
DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1);
flush_gart();
if (unlikely(*dma_addr == bad_dma_addr))
goto out_free;
return vaddr;
out_free:
dma_direct_free(dev, size, vaddr, *dma_addr, attrs);
return NULL; return NULL;
} }
...@@ -513,7 +504,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr, ...@@ -513,7 +504,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, unsigned long attrs) dma_addr_t dma_addr, unsigned long attrs)
{ {
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); dma_direct_free(dev, size, vaddr, dma_addr, attrs);
} }
static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
...@@ -705,7 +696,7 @@ static const struct dma_map_ops gart_dma_ops = { ...@@ -705,7 +696,7 @@ static const struct dma_map_ops gart_dma_ops = {
.alloc = gart_alloc_coherent, .alloc = gart_alloc_coherent,
.free = gart_free_coherent, .free = gart_free_coherent,
.mapping_error = gart_mapping_error, .mapping_error = gart_mapping_error,
.dma_supported = x86_dma_supported, .dma_supported = dma_direct_supported,
}; };
static void gart_iommu_shutdown(void) static void gart_iommu_shutdown(void)
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dma-direct.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/pci_ids.h> #include <linux/pci_ids.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -445,8 +446,6 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, ...@@ -445,8 +446,6 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
npages = size >> PAGE_SHIFT; npages = size >> PAGE_SHIFT;
order = get_order(size); order = get_order(size);
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
/* alloc enough pages (and possibly more) */ /* alloc enough pages (and possibly more) */
ret = (void *)__get_free_pages(flag, order); ret = (void *)__get_free_pages(flag, order);
if (!ret) if (!ret)
...@@ -493,7 +492,7 @@ static const struct dma_map_ops calgary_dma_ops = { ...@@ -493,7 +492,7 @@ static const struct dma_map_ops calgary_dma_ops = {
.map_page = calgary_map_page, .map_page = calgary_map_page,
.unmap_page = calgary_unmap_page, .unmap_page = calgary_unmap_page,
.mapping_error = calgary_mapping_error, .mapping_error = calgary_mapping_error,
.dma_supported = x86_dma_supported, .dma_supported = dma_direct_supported,
}; };
static inline void __iomem * busno_to_bbar(unsigned char num) static inline void __iomem * busno_to_bbar(unsigned char num)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
static int forbid_dac __read_mostly; static int forbid_dac __read_mostly;
const struct dma_map_ops *dma_ops = &nommu_dma_ops; const struct dma_map_ops *dma_ops = &dma_direct_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly; static int iommu_sac_force __read_mostly;
...@@ -76,70 +76,12 @@ void __init pci_iommu_alloc(void) ...@@ -76,70 +76,12 @@ void __init pci_iommu_alloc(void)
} }
} }
} }
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs)
{
unsigned long dma_mask;
struct page *page;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr;
dma_mask = dma_alloc_coherent_mask(dev, flag);
again:
page = NULL;
/* CMA can be used only in the context which permits sleeping */
if (gfpflags_allow_blocking(flag)) {
page = dma_alloc_from_contiguous(dev, count, get_order(size),
flag);
if (page) {
addr = phys_to_dma(dev, page_to_phys(page));
if (addr + size > dma_mask) {
dma_release_from_contiguous(dev, page, count);
page = NULL;
}
}
}
/* fallback */
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
return NULL;
addr = phys_to_dma(dev, page_to_phys(page));
if (addr + size > dma_mask) {
__free_pages(page, get_order(size));
if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
flag = (flag & ~GFP_DMA32) | GFP_DMA;
goto again;
}
return NULL;
}
memset(page_address(page), 0, size);
*dma_addr = addr;
return page_address(page);
}
void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = virt_to_page(vaddr);
if (!dma_release_from_contiguous(dev, page, count))
free_pages((unsigned long)vaddr, get_order(size));
}
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
{ {
if (!*dev)