@ -128,16 +128,16 @@ static void __dma_page_dev_to_cpu(struct page *, unsigned long,
*/
static dma_addr_t arm_dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , enum dma_data_direction dir ,
struct dma_attrs * attrs )
unsigned long attrs )
{
if ( ! dma_get_attr ( DMA_ATTR_SKIP_CPU_SYNC , attrs ) )
if ( ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) = = 0 )
__dma_page_cpu_to_dev ( page , offset , size , dir ) ;
return pfn_to_dma ( dev , page_to_pfn ( page ) ) + offset ;
}
static dma_addr_t arm_coherent_dma_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , enum dma_data_direction dir ,
struct dma_attrs * attrs )
unsigned long attrs )
{
return pfn_to_dma ( dev , page_to_pfn ( page ) ) + offset ;
}
@ -157,10 +157,9 @@ static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *pag
* whatever the device wrote there .
*/
static void arm_dma_unmap_page ( struct device * dev , dma_addr_t handle ,
size_t size , enum dma_data_direction dir ,
struct dma_attrs * attrs )
size_t size , enum dma_data_direction dir , unsigned long attrs )
{
if ( ! dma_get_attr ( DMA_ATTR_SKIP_CPU_SYNC , attrs ) )
if ( ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) = = 0 )
__dma_page_dev_to_cpu ( pfn_to_page ( dma_to_pfn ( dev , handle ) ) ,
handle & ~ PAGE_MASK , size , dir ) ;
}
@ -198,12 +197,12 @@ struct dma_map_ops arm_dma_ops = {
EXPORT_SYMBOL ( arm_dma_ops ) ;
static void * arm_coherent_dma_alloc ( struct device * dev , size_t size ,
dma_addr_t * handle , gfp_t gfp , struct dma_attrs * attrs ) ;
dma_addr_t * handle , gfp_t gfp , unsigned long attrs ) ;
static void arm_coherent_dma_free ( struct device * dev , size_t size , void * cpu_addr ,
dma_addr_t handle , struct dma_attrs * attrs ) ;
dma_addr_t handle , unsigned long attrs ) ;
static int arm_coherent_dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
struct dma_attrs * attrs ) ;
unsigned long attrs ) ;
struct dma_map_ops arm_coherent_dma_ops = {
. alloc = arm_coherent_dma_alloc ,
@ -639,11 +638,11 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
dma_release_from_contiguous ( dev , page , size > > PAGE_SHIFT ) ;
}
static inline pgprot_t __get_dma_pgprot ( struct dma_attrs * attrs , pgprot_t prot )
static inline pgprot_t __get_dma_pgprot ( unsigned long attrs , pgprot_t prot )
{
prot = dma_get_attr ( DMA_ATTR_WRITE_COMBINE , attrs ) ?
pgprot_writecombine ( prot ) :
pgprot_dmacoherent ( prot ) ;
prot = ( attrs & DMA_ATTR_WRITE_COMBINE ) ?
pgprot_writecombine ( prot ) :
pgprot_dmacoherent ( prot ) ;
return prot ;
}
@ -751,7 +750,7 @@ static struct arm_dma_allocator remap_allocator = {
static void * __dma_alloc ( struct device * dev , size_t size , dma_addr_t * handle ,
gfp_t gfp , pgprot_t prot , bool is_coherent ,
struct dma_attrs * attrs , const void * caller )
unsigned long attrs , const void * caller )
{
u64 mask = get_coherent_dma_mask ( dev ) ;
struct page * page = NULL ;
@ -764,7 +763,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
. gfp = gfp ,
. prot = prot ,
. caller = caller ,
. want_vaddr = ! dma_get_attr ( DMA_ATTR_NO_KERNEL_MAPPING , attrs ) ,
. want_vaddr = ( ( attrs & DMA_ATTR_NO_KERNEL_MAPPING ) = = 0 ) ,
. coherent_flag = is_coherent ? COHERENT : NORMAL ,
} ;
@ -834,7 +833,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
* virtual and bus address for that space .
*/
void * arm_dma_alloc ( struct device * dev , size_t size , dma_addr_t * handle ,
gfp_t gfp , struct dma_attrs * attrs )
gfp_t gfp , unsigned long attrs )
{
pgprot_t prot = __get_dma_pgprot ( attrs , PAGE_KERNEL ) ;
@ -843,7 +842,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
}
static void * arm_coherent_dma_alloc ( struct device * dev , size_t size ,
dma_addr_t * handle , gfp_t gfp , struct dma_attrs * attrs )
dma_addr_t * handle , gfp_t gfp , unsigned long attrs )
{
return __dma_alloc ( dev , size , handle , gfp , PAGE_KERNEL , true ,
attrs , __builtin_return_address ( 0 ) ) ;
@ -851,7 +850,7 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
static int __arm_dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
struct dma_attrs * attrs )
unsigned long attrs )
{
int ret = - ENXIO ;
# ifdef CONFIG_MMU
@ -879,14 +878,14 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
*/
static int arm_coherent_dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
struct dma_attrs * attrs )
unsigned long attrs )
{
return __arm_dma_mmap ( dev , vma , cpu_addr , dma_addr , size , attrs ) ;
}
int arm_dma_mmap ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
struct dma_attrs * attrs )
unsigned long attrs )
{
# ifdef CONFIG_MMU
vma - > vm_page_prot = __get_dma_pgprot ( attrs , vma - > vm_page_prot ) ;
@ -898,7 +897,7 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
* Free a buffer as defined by the above mapping .
*/
static void __arm_dma_free ( struct device * dev , size_t size , void * cpu_addr ,
dma_addr_t handle , struct dma_attrs * attrs ,
dma_addr_t handle , unsigned long attrs ,
bool is_coherent )
{
struct page * page = pfn_to_page ( dma_to_pfn ( dev , handle ) ) ;
@ -908,7 +907,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
. size = PAGE_ALIGN ( size ) ,
. cpu_addr = cpu_addr ,
. page = page ,
. want_vaddr = ! dma_get_attr ( DMA_ATTR_NO_KERNEL_MAPPING , attrs ) ,
. want_vaddr = ( ( attrs & DMA_ATTR_NO_KERNEL_MAPPING ) = = 0 ) ,
} ;
buf = arm_dma_buffer_find ( cpu_addr ) ;
@ -920,20 +919,20 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
}
void arm_dma_free ( struct device * dev , size_t size , void * cpu_addr ,
dma_addr_t handle , struct dma_attrs * attrs )
dma_addr_t handle , unsigned long attrs )
{
__arm_dma_free ( dev , size , cpu_addr , handle , attrs , false ) ;
}
static void arm_coherent_dma_free ( struct device * dev , size_t size , void * cpu_addr ,
dma_addr_t handle , struct dma_attrs * attrs )
dma_addr_t handle , unsigned long attrs )
{
__arm_dma_free ( dev , size , cpu_addr , handle , attrs , true ) ;
}
int arm_dma_get_sgtable ( struct device * dev , struct sg_table * sgt ,
void * cpu_addr , dma_addr_t handle , size_t size ,
struct dma_attrs * attrs )
unsigned long attrs )
{
struct page * page = pfn_to_page ( dma_to_pfn ( dev , handle ) ) ;
int ret ;
@ -1066,7 +1065,7 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
* here .
*/
int arm_dma_map_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction dir , struct dma_attrs * attrs )
enum dma_data_direction dir , unsigned long attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
struct scatterlist * s ;
@ -1100,7 +1099,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* rules concerning calls here are the same as for dma_unmap_single ( ) .
*/
void arm_dma_unmap_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction dir , struct dma_attrs * attrs )
enum dma_data_direction dir , unsigned long attrs )
{
struct dma_map_ops * ops = get_dma_ops ( dev ) ;
struct scatterlist * s ;
@ -1273,7 +1272,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
static const int iommu_order_array [ ] = { 9 , 8 , 4 , 0 } ;
static struct page * * __iommu_alloc_buffer ( struct device * dev , size_t size ,
gfp_t gfp , struct dma_attrs * attrs ,
gfp_t gfp , unsigned long attrs ,
int coherent_flag )
{
struct page * * pages ;
@ -1289,7 +1288,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
if ( ! pages )
return NULL ;
if ( dma_get_attr ( DMA_ATTR_FORCE_CONTIGUOUS , attrs ) )
if ( attrs & DMA_ATTR_FORCE_CONTIGUOUS )
{
unsigned long order = get_order ( size ) ;
struct page * page ;
@ -1307,7 +1306,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
}
/* Go straight to 4K chunks if caller says it's OK. */
if ( dma_get_attr ( DMA_ATTR_ALLOC_SINGLE_PAGES , attrs ) )
if ( attrs & DMA_ATTR_ALLOC_SINGLE_PAGES )
order_idx = ARRAY_SIZE ( iommu_order_array ) - 1 ;
/*
@ -1363,12 +1362,12 @@ error:
}
static int __iommu_free_buffer ( struct device * dev , struct page * * pages ,
size_t size , struct dma_attrs * attrs )
size_t size , unsigned long attrs )
{
int count = size > > PAGE_SHIFT ;
int i ;
if ( dma_get_attr ( DMA_ATTR_FORCE_CONTIGUOUS , attrs ) ) {
if ( attrs & DMA_ATTR_FORCE_CONTIGUOUS ) {
dma_release_from_contiguous ( dev , pages [ 0 ] , count ) ;
} else {
for ( i = 0 ; i < count ; i + + )
@ -1460,14 +1459,14 @@ static struct page **__atomic_get_pages(void *addr)
return ( struct page * * ) page ;
}
static struct page * * __iommu_get_pages ( void * cpu_addr , struct dma_attrs * attrs )
static struct page * * __iommu_get_pages ( void * cpu_addr , unsigned long attrs )
{
struct vm_struct * area ;
if ( __in_atomic_pool ( cpu_addr , PAGE_SIZE ) )
return __atomic_get_pages ( cpu_addr ) ;
if ( dma_get_attr ( DMA_ATTR_NO_KERNEL_MAPPING , attrs ) )
if ( attrs & DMA_ATTR_NO_KERNEL_MAPPING )
return cpu_addr ;
area = find_vm_area ( cpu_addr ) ;
@ -1511,7 +1510,7 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
}
static void * __arm_iommu_alloc_attrs ( struct device * dev , size_t size ,
dma_addr_t * handle , gfp_t gfp , struct dma_attrs * attrs ,
dma_addr_t * handle , gfp_t gfp , unsigned long attrs ,
int coherent_flag )
{
pgprot_t prot = __get_dma_pgprot ( attrs , PAGE_KERNEL ) ;
@ -1542,7 +1541,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
if ( * handle = = DMA_ERROR_CODE )
goto err_buffer ;
if ( dma_get_attr ( DMA_ATTR_NO_KERNEL_MAPPING , attrs ) )
if ( attrs & DMA_ATTR_NO_KERNEL_MAPPING )
return pages ;
addr = __iommu_alloc_remap ( pages , size , gfp , prot ,
@ -1560,20 +1559,20 @@ err_buffer:
}
static void * arm_iommu_alloc_attrs ( struct device * dev , size_t size ,
dma_addr_t * handle , gfp_t gfp , struct dma_attrs * attrs )
dma_addr_t * handle , gfp_t gfp , unsigned long attrs )
{
return __arm_iommu_alloc_attrs ( dev , size , handle , gfp , attrs , NORMAL ) ;
}
static void * arm_coherent_iommu_alloc_attrs ( struct device * dev , size_t size ,
dma_addr_t * handle , gfp_t gfp , struct dma_attrs * attrs )
dma_addr_t * handle , gfp_t gfp , unsigned long attrs )
{
return __arm_iommu_alloc_attrs ( dev , size , handle , gfp , attrs , COHERENT ) ;
}
static int __arm_iommu_mmap_attrs ( struct device * dev , struct vm_area_struct * vma ,
void * cpu_addr , dma_addr_t dma_addr , size_t size ,
struct dma_attrs * attrs )
unsigned long attrs )
{
unsigned long uaddr = vma - > vm_start ;
unsigned long usize = vma - > vm_end - vma - > vm_start ;
@ -1603,7 +1602,7 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma
}
static int arm_iommu_mmap_attrs ( struct device * dev ,
struct vm_area_struct * vma , void * cpu_addr ,
dma_addr_t dma_addr , size_t size , struct dma_attrs * attrs )
dma_addr_t dma_addr , size_t size , unsigned long attrs )
{
vma - > vm_page_prot = __get_dma_pgprot ( attrs , vma - > vm_page_prot ) ;
@ -1612,7 +1611,7 @@ static int arm_iommu_mmap_attrs(struct device *dev,
static int arm_coherent_iommu_mmap_attrs ( struct device * dev ,
struct vm_area_struct * vma , void * cpu_addr ,
dma_addr_t dma_addr , size_t size , struct dma_attrs * attrs )
dma_addr_t dma_addr , size_t size , unsigned long attrs )
{
return __arm_iommu_mmap_attrs ( dev , vma , cpu_addr , dma_addr , size , attrs ) ;
}
@ -1622,7 +1621,7 @@ static int arm_coherent_iommu_mmap_attrs(struct device *dev,
* Must not be called with IRQs disabled .
*/
void __arm_iommu_free_attrs ( struct device * dev , size_t size , void * cpu_addr ,
dma_addr_t handle , struct dma_attrs * attrs , int coherent_flag )
dma_addr_t handle , unsigned long attrs , int coherent_flag )
{
struct page * * pages ;
size = PAGE_ALIGN ( size ) ;
@ -1638,7 +1637,7 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
return ;
}
if ( ! dma_get_attr ( DMA_ATTR_NO_KERNEL_MAPPING , attrs ) ) {
if ( ( attrs & DMA_ATTR_NO_KERNEL_MAPPING ) = = 0 ) {
dma_common_free_remap ( cpu_addr , size ,
VM_ARM_DMA_CONSISTENT | VM_USERMAP ) ;
}
@ -1648,20 +1647,20 @@ void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
void arm_iommu_free_attrs ( struct device * dev , size_t size ,
void * cpu_addr , dma_addr_t handle , struct dma_attrs * attrs )
void * cpu_addr , dma_addr_t handle , unsigned long attrs )
{
__arm_iommu_free_attrs ( dev , size , cpu_addr , handle , attrs , NORMAL ) ;
}
void arm_coherent_iommu_free_attrs ( struct device * dev , size_t size ,
void * cpu_addr , dma_addr_t handle , struct dma_attrs * attrs )
void * cpu_addr , dma_addr_t handle , unsigned long attrs )
{
__arm_iommu_free_attrs ( dev , size , cpu_addr , handle , attrs , COHERENT ) ;
}
static int arm_iommu_get_sgtable ( struct device * dev , struct sg_table * sgt ,
void * cpu_addr , dma_addr_t dma_addr ,
size_t size , struct dma_attrs * attrs )
size_t size , unsigned long attrs )
{
unsigned int count = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
struct page * * pages = __iommu_get_pages ( cpu_addr , attrs ) ;
@ -1699,7 +1698,7 @@ static int __dma_direction_to_prot(enum dma_data_direction dir)
*/
static int __map_sg_chunk ( struct device * dev , struct scatterlist * sg ,
size_t size , dma_addr_t * handle ,
enum dma_data_direction dir , struct dma_attrs * attrs ,
enum dma_data_direction dir , unsigned long attrs ,
bool is_coherent )
{
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping ( dev ) ;
@ -1720,8 +1719,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
phys_addr_t phys = page_to_phys ( sg_page ( s ) ) ;
unsigned int len = PAGE_ALIGN ( s - > offset + s - > length ) ;
if ( ! is_coherent & &
! dma_get_attr ( DMA_ATTR_SKIP_CPU_SYNC , attrs ) )
if ( ! is_coherent & & ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) = = 0 )
__dma_page_cpu_to_dev ( sg_page ( s ) , s - > offset , s - > length , dir ) ;
prot = __dma_direction_to_prot ( dir ) ;
@ -1742,7 +1740,7 @@ fail:
}
static int __iommu_map_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction dir , struct dma_attrs * attrs ,
enum dma_data_direction dir , unsigned long attrs ,
bool is_coherent )
{
struct scatterlist * s = sg , * dma = sg , * start = sg ;
@ -1800,7 +1798,7 @@ bad_mapping:
* obtained via sg_dma_ { address , length } .
*/
int arm_coherent_iommu_map_sg ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir , struct dma_attrs * attrs )
int nents , enum dma_data_direction dir , unsigned long attrs )
{
return __iommu_map_sg ( dev , sg , nents , dir , attrs , true ) ;
}
@ -1818,14 +1816,14 @@ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
* sg_dma_ { address , length } .
*/
int arm_iommu_map_sg ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir , struct dma_attrs * attrs )
int nents , enum dma_data_direction dir , unsigned long attrs )
{
return __iommu_map_sg ( dev , sg , nents , dir , attrs , false ) ;
}
static void __iommu_unmap_sg ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir , struct dma_attrs * attrs ,
bool is_coherent )
int nents , enum dma_data_direction dir ,
unsigned long attrs , bool is_coherent )
{
struct scatterlist * s ;
int i ;
@ -1834,8 +1832,7 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
if ( sg_dma_len ( s ) )
__iommu_remove_mapping ( dev , sg_dma_address ( s ) ,
sg_dma_len ( s ) ) ;
if ( ! is_coherent & &
! dma_get_attr ( DMA_ATTR_SKIP_CPU_SYNC , attrs ) )
if ( ! is_coherent & & ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) = = 0 )
__dma_page_dev_to_cpu ( sg_page ( s ) , s - > offset ,
s - > length , dir ) ;
}
@ -1852,7 +1849,8 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
* rules concerning calls here are the same as for dma_unmap_single ( ) .
*/
void arm_coherent_iommu_unmap_sg ( struct device * dev , struct scatterlist * sg ,
int nents , enum dma_data_direction dir , struct dma_attrs * attrs )
int nents , enum dma_data_direction dir ,
unsigned long attrs )
{
__iommu_unmap_sg ( dev , sg , nents , dir , attrs , true ) ;
}
@ -1868,7 +1866,8 @@ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
* rules concerning calls here are the same as for dma_unmap_single ( ) .
*/
void arm_iommu_unmap_sg ( struct device * dev , struct scatterlist * sg , int nents ,
enum dma_data_direction dir , struct dma_attrs * attrs )
enum dma_data_direction dir ,
unsigned long attrs )
{
__iommu_unmap_sg ( dev , sg , nents , dir , attrs , false ) ;
}
@ -1921,7 +1920,7 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
static dma_addr_t arm_coherent_iommu_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , enum dma_data_direction dir ,
struct dma_attrs * attrs )
unsigned long attrs )
{
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping ( dev ) ;
dma_addr_t dma_addr ;
@ -1955,9 +1954,9 @@ fail:
*/
static dma_addr_t arm_iommu_map_page ( struct device * dev , struct page * page ,
unsigned long offset , size_t size , enum dma_data_direction dir ,
struct dma_attrs * attrs )
unsigned long attrs )
{
if ( ! dma_get_attr ( DMA_ATTR_SKIP_CPU_SYNC , attrs ) )
if ( ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) = = 0 )
__dma_page_cpu_to_dev ( page , offset , size , dir ) ;
return arm_coherent_iommu_map_page ( dev , page , offset , size , dir , attrs ) ;
@ -1973,8 +1972,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
* Coherent IOMMU aware version of arm_dma_unmap_page ( )
*/
static void arm_coherent_iommu_unmap_page ( struct device * dev , dma_addr_t handle ,
size_t size , enum dma_data_direction dir ,
struct dma_attrs * attrs )
size_t size , enum dma_data_direction dir , unsigned long attrs )
{
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping ( dev ) ;
dma_addr_t iova = handle & PAGE_MASK ;
@ -1998,8 +1996,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
* IOMMU aware version of arm_dma_unmap_page ( )
*/
static void arm_iommu_unmap_page ( struct device * dev , dma_addr_t handle ,
size_t size , enum dma_data_direction dir ,
struct dma_attrs * attrs )
size_t size , enum dma_data_direction dir , unsigned long attrs )
{
struct dma_iommu_mapping * mapping = to_dma_iommu_mapping ( dev ) ;
dma_addr_t iova = handle & PAGE_MASK ;
@ -2010,7 +2007,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
if ( ! iova )
return ;
if ( ! dma_get_attr ( DMA_ATTR_SKIP_CPU_SYNC , attrs ) )
if ( ( attrs & DMA_ATTR_SKIP_CPU_SYNC ) = = 0 )
__dma_page_dev_to_cpu ( page , offset , size , dir ) ;
iommu_unmap ( mapping - > domain , iova , len ) ;