Lines Matching +full:align +full:- +full:end
1 // SPDX-License-Identifier: GPL-2.0
14 #include <linux/dma-map-ops.h>
15 #include <linux/iommu-helper.h>
44 return (paddr >> (PAGE_SHIFT-1)) | 1; in mk_iommu_pte()
61 unsigned long window_size, unsigned long align) in iommu_arena_new_node() argument
70 the size of the window. Retain the align parameter so that in iommu_arena_new_node()
71 particular systems can over-align the arena. */ in iommu_arena_new_node()
72 if (align < mem_size) in iommu_arena_new_node()
73 align = mem_size; in iommu_arena_new_node()
76 arena->ptes = memblock_alloc_or_panic(mem_size, align); in iommu_arena_new_node()
78 spin_lock_init(&arena->lock); in iommu_arena_new_node()
79 arena->hose = hose; in iommu_arena_new_node()
80 arena->dma_base = base; in iommu_arena_new_node()
81 arena->size = window_size; in iommu_arena_new_node()
82 arena->next_entry = 0; in iommu_arena_new_node()
84 /* Align allocations to a multiple of a page size. Not needed in iommu_arena_new_node()
86 arena->align_entry = 1; in iommu_arena_new_node()
93 unsigned long window_size, unsigned long align) in iommu_arena_new() argument
95 return iommu_arena_new_node(0, hose, base, window_size, align); in iommu_arena_new()
109 base = arena->dma_base >> PAGE_SHIFT; in iommu_arena_find_pages()
112 /* Search forward for the first mask-aligned sequence of N free ptes */ in iommu_arena_find_pages()
113 ptes = arena->ptes; in iommu_arena_find_pages()
114 nent = arena->size >> PAGE_SHIFT; in iommu_arena_find_pages()
115 p = ALIGN(arena->next_entry, mask + 1); in iommu_arena_find_pages()
121 p = ALIGN(p + 1, mask + 1); in iommu_arena_find_pages()
126 p = ALIGN(p + i + 1, mask + 1); in iommu_arena_find_pages()
136 * Reached the end. Flush the TLB and restart in iommu_arena_find_pages()
139 alpha_mv.mv_pci_tbi(arena->hose, 0, -1); in iommu_arena_find_pages()
146 return -1; in iommu_arena_find_pages()
156 unsigned int align) in iommu_arena_alloc() argument
162 spin_lock_irqsave(&arena->lock, flags); in iommu_arena_alloc()
165 ptes = arena->ptes; in iommu_arena_alloc()
166 mask = max(align, arena->align_entry) - 1; in iommu_arena_alloc()
169 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
170 return -1; in iommu_arena_alloc()
180 arena->next_entry = p + n; in iommu_arena_alloc()
181 spin_unlock_irqrestore(&arena->lock, flags); in iommu_arena_alloc()
192 p = arena->ptes + ofs; in iommu_arena_free()
211 if ((dac_offset & dev->dma_mask) != dac_offset) in pci_dac_dma_supported()
222 mode. The 32-bit PCI bus mastering address to use is returned.
230 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; in pci_map_single_1()
231 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; in pci_map_single_1()
236 unsigned int align = 0; in pci_map_single_1() local
237 struct device *dev = pdev ? &pdev->dev : NULL; in pci_map_single_1()
243 if (paddr + size + __direct_map_base - 1 <= max_dma in pci_map_single_1()
247 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n", in pci_map_single_1()
258 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n", in pci_map_single_1()
272 arena = hose->sg_pci; in pci_map_single_1()
273 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in pci_map_single_1()
274 arena = hose->sg_isa; in pci_map_single_1()
280 align = 8; in pci_map_single_1()
281 dma_ofs = iommu_arena_alloc(dev, arena, npages, align); in pci_map_single_1()
290 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); in pci_map_single_1()
292 ret = arena->dma_base + dma_ofs * PAGE_SIZE; in pci_map_single_1()
295 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n", in pci_map_single_1()
301 /* Helper for generic DMA-mapping functions. */
307 /* Assume that non-PCI devices asking for DMA are either ISA or EISA, in alpha_gendev_to_pci()
311 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA in alpha_gendev_to_pci()
313 if (!dev || !dev->dma_mask || !*dev->dma_mask) in alpha_gendev_to_pci()
318 if (*dev->dma_mask >= isa_bridge->dma_mask) in alpha_gendev_to_pci()
335 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; in alpha_pci_map_page()
352 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_unmap_page()
374 arena = hose->sg_pci; in alpha_pci_unmap_page()
375 if (!arena || dma_addr < arena->dma_base) in alpha_pci_unmap_page()
376 arena = hose->sg_isa; in alpha_pci_unmap_page()
378 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_page()
379 if (dma_ofs * PAGE_SIZE >= arena->size) { in alpha_pci_unmap_page()
382 dma_addr, arena->dma_base, arena->size); in alpha_pci_unmap_page()
389 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_page()
396 if (dma_ofs >= arena->next_entry) in alpha_pci_unmap_page()
397 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1); in alpha_pci_unmap_page()
399 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_page()
406 device. Returns non-NULL cpu-view pointer to the buffer if
443 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n", in alpha_pci_alloc_coherent()
460 dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL); in alpha_pci_free_coherent()
471 -1 : Not leader, physically adjacent to previous.
472 -2 : Not leader, virtually adjacent to previous.
480 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end, in sg_classify() argument
490 leader_length = leader->length; in sg_classify()
495 for (++sg; sg < end; ++sg) { in sg_classify()
498 len = sg->length; in sg_classify()
504 sg->dma_address = -1; in sg_classify()
507 sg->dma_address = -2; in sg_classify()
512 leader->dma_address = leader_flag; in sg_classify()
513 leader->dma_length = leader_length; in sg_classify()
522 leader->dma_address = leader_flag; in sg_classify()
523 leader->dma_length = leader_length; in sg_classify()
530 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, in sg_fill() argument
535 long size = leader->dma_length; in sg_fill()
542 fall into the direct-map window, use it. */ in sg_fill()
543 if (leader->dma_address == 0 in sg_fill()
544 && paddr + size + __direct_map_base - 1 <= max_dma in sg_fill()
546 out->dma_address = paddr + __direct_map_base; in sg_fill()
547 out->dma_length = size; in sg_fill()
549 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n", in sg_fill()
550 __va(paddr), size, out->dma_address); in sg_fill()
557 if (leader->dma_address == 0 && dac_allowed) { in sg_fill()
558 out->dma_address = paddr + alpha_mv.pci_dac_offset; in sg_fill()
559 out->dma_length = size; in sg_fill()
561 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n", in sg_fill()
562 __va(paddr), size, out->dma_address); in sg_fill()
575 if (leader->dma_address == 0) in sg_fill()
576 return -1; in sg_fill()
580 sg_classify(dev, leader, end, 0); in sg_fill()
581 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed); in sg_fill()
584 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; in sg_fill()
585 out->dma_length = size; in sg_fill()
587 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n", in sg_fill()
588 __va(paddr), size, out->dma_address, npages); in sg_fill()
592 ptes = &arena->ptes[dma_ofs]; in sg_fill()
599 size = sg->length; in sg_fill()
602 while (sg+1 < end && (int) sg[1].dma_address == -1) { in sg_fill()
615 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), in sg_fill()
616 last_sg->length, npages); in sg_fill()
619 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), in sg_fill()
620 last_sg->length); in sg_fill()
623 } while (++sg < end && (int) sg->dma_address < 0); in sg_fill()
633 struct scatterlist *start, *end, *out; in alpha_pci_map_sg() local
641 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; in alpha_pci_map_sg()
645 sg->dma_length = sg->length; in alpha_pci_map_sg()
646 sg->dma_address in alpha_pci_map_sg()
648 sg->length, dac_allowed); in alpha_pci_map_sg()
649 if (sg->dma_address == DMA_MAPPING_ERROR) in alpha_pci_map_sg()
650 return -EIO; in alpha_pci_map_sg()
655 end = sg + nents; in alpha_pci_map_sg()
658 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0); in alpha_pci_map_sg()
662 hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_map_sg()
663 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; in alpha_pci_map_sg()
664 arena = hose->sg_pci; in alpha_pci_map_sg()
665 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_map_sg()
666 arena = hose->sg_isa; in alpha_pci_map_sg()
668 max_dma = -1; in alpha_pci_map_sg()
675 for (out = sg; sg < end; ++sg) { in alpha_pci_map_sg()
676 if ((int) sg->dma_address < 0) in alpha_pci_map_sg()
678 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0) in alpha_pci_map_sg()
683 /* Mark the end of the list for pci_unmap_sg. */ in alpha_pci_map_sg()
684 if (out < end) in alpha_pci_map_sg()
685 out->dma_length = 0; in alpha_pci_map_sg()
687 if (out - start == 0) { in alpha_pci_map_sg()
689 return -ENOMEM; in alpha_pci_map_sg()
691 DBGA("pci_map_sg: %ld entries\n", out - start); in alpha_pci_map_sg()
693 return out - start; in alpha_pci_map_sg()
702 dma_unmap_sg(&pdev->dev, start, out - start, dir); in alpha_pci_map_sg()
703 return -ENOMEM; in alpha_pci_map_sg()
718 struct scatterlist *end; in alpha_pci_unmap_sg() local
727 hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_unmap_sg()
728 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; in alpha_pci_unmap_sg()
729 arena = hose->sg_pci; in alpha_pci_unmap_sg()
730 if (!arena || arena->dma_base + arena->size - 1 > max_dma) in alpha_pci_unmap_sg()
731 arena = hose->sg_isa; in alpha_pci_unmap_sg()
733 fbeg = -1, fend = 0; in alpha_pci_unmap_sg()
735 spin_lock_irqsave(&arena->lock, flags); in alpha_pci_unmap_sg()
737 for (end = sg + nents; sg < end; ++sg) { in alpha_pci_unmap_sg()
743 addr = sg->dma_address; in alpha_pci_unmap_sg()
744 size = sg->dma_length; in alpha_pci_unmap_sg()
749 /* It's a DAC address -- nothing to do. */ in alpha_pci_unmap_sg()
751 sg - end + nents, addr, size); in alpha_pci_unmap_sg()
759 sg - end + nents, addr, size); in alpha_pci_unmap_sg()
764 sg - end + nents, addr, size); in alpha_pci_unmap_sg()
767 ofs = (addr - arena->dma_base) >> PAGE_SHIFT; in alpha_pci_unmap_sg()
770 tend = addr + size - 1; in alpha_pci_unmap_sg()
778 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) in alpha_pci_unmap_sg()
781 spin_unlock_irqrestore(&arena->lock, flags); in alpha_pci_unmap_sg()
783 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); in alpha_pci_unmap_sg()
799 && (__direct_map_base + __direct_map_size - 1 <= mask || in alpha_pci_supported()
800 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask)) in alpha_pci_supported()
803 /* Check that we have a scatter-gather arena that fits. */ in alpha_pci_supported()
804 hose = pdev ? pdev->sysdata : pci_isa_hose; in alpha_pci_supported()
805 arena = hose->sg_isa; in alpha_pci_supported()
806 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
808 arena = hose->sg_pci; in alpha_pci_supported()
809 if (arena && arena->dma_base + arena->size - 1 <= mask) in alpha_pci_supported()
813 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask) in alpha_pci_supported()
830 if (!arena) return -EINVAL; in iommu_reserve()
832 spin_lock_irqsave(&arena->lock, flags); in iommu_reserve()
835 ptes = arena->ptes; in iommu_reserve()
838 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
839 return -1; in iommu_reserve()
848 arena->next_entry = p + pg_count; in iommu_reserve()
849 spin_unlock_irqrestore(&arena->lock, flags); in iommu_reserve()
860 if (!arena) return -EINVAL; in iommu_release()
862 ptes = arena->ptes; in iommu_release()
867 return -EBUSY; in iommu_release()
881 if (!arena) return -EINVAL; in iommu_bind()
883 spin_lock_irqsave(&arena->lock, flags); in iommu_bind()
885 ptes = arena->ptes; in iommu_bind()
889 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
890 return -EBUSY; in iommu_bind()
897 spin_unlock_irqrestore(&arena->lock, flags); in iommu_bind()
908 if (!arena) return -EINVAL; in iommu_unbind()
910 p = arena->ptes + pg_start; in iommu_unbind()