dma-mapping: implement DMA_ATTR_MMIO for dma_(un)map_page_attrs()

Make dma_map_page_attrs() and dma_map_page_attrs() respect
DMA_ATTR_MMIO.

DMA_ATR_MMIO makes the functions behave the same as
dma_(un)map_resource():
 - No swiotlb is possible
 - Legacy dma_ops arches use ops->map_resource()
 - No kmsan
 - No arch_dma_map_phys_direct()

The prior patches have made the internal functions called here
support DMA_ATTR_MMIO.

This is also preparation for turning dma_map_resource() into an inline
calling dma_map_phys(DMA_ATTR_MMIO) to consolidate the flows.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/3660e2c78ea409d6c483a215858fb3af52cd0ed3.1757423202.git.leonro@nvidia.com
This commit is contained in:
Leon Romanovsky
2025-09-09 16:27:37 +03:00
committed by Marek Szyprowski
parent 6eb1e769b2
commit 18c9cbb042

View File

@@ -158,6 +158,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
{
const struct dma_map_ops *ops = get_dma_ops(dev);
phys_addr_t phys = page_to_phys(page) + offset;
bool is_mmio = attrs & DMA_ATTR_MMIO;
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
@@ -166,14 +167,25 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR;
if (dma_map_direct(dev, ops) ||
arch_dma_map_phys_direct(dev, phys + size))
(!is_mmio && arch_dma_map_phys_direct(dev, phys + size)))
addr = dma_direct_map_phys(dev, phys, size, dir, attrs);
else if (use_dma_iommu(dev))
addr = iommu_dma_map_phys(dev, phys, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
else if (is_mmio) {
if (!ops->map_resource)
return DMA_MAPPING_ERROR;
kmsan_handle_dma(phys, size, dir);
addr = ops->map_resource(dev, phys, size, dir, attrs);
} else {
/*
* The dma_ops API contract for ops->map_page() requires
* kmappable memory, while ops->map_resource() does not.
*/
addr = ops->map_page(dev, page, offset, size, dir, attrs);
}
if (!is_mmio)
kmsan_handle_dma(phys, size, dir);
trace_dma_map_phys(dev, phys, addr, size, dir, attrs);
debug_dma_map_phys(dev, phys, size, dir, addr, attrs);
@@ -185,14 +197,18 @@ void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
bool is_mmio = attrs & DMA_ATTR_MMIO;
BUG_ON(!valid_dma_direction(dir));
if (dma_map_direct(dev, ops) ||
arch_dma_unmap_phys_direct(dev, addr + size))
(!is_mmio && arch_dma_unmap_phys_direct(dev, addr + size)))
dma_direct_unmap_phys(dev, addr, size, dir, attrs);
else if (use_dma_iommu(dev))
iommu_dma_unmap_phys(dev, addr, size, dir, attrs);
else
else if (is_mmio) {
if (ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs);
} else
ops->unmap_page(dev, addr, size, dir, attrs);
trace_dma_unmap_phys(dev, addr, size, dir, attrs);
debug_dma_unmap_phys(dev, addr, size, dir);