dma_addr_t addr;
kmemcheck_mark_initialized(ptr, size);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs);
for_each_sg(sg, s, nents, i)
kmemcheck_mark_initialized(sg_virt(s), s->length);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
debug_dma_map_sg(dev, sg, nents, ents, dir);
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
if (ops->unmap_sg)
dma_addr_t addr;
kmemcheck_mark_initialized(page_address(page) + offset, size);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, page, offset, size, dir, NULL);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, NULL);
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_range_for_cpu) {
ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_range_for_device) {
ops->sync_single_range_for_device(dev, addr, offset, size, dir);
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
{
struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir);