diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-18 10:55:13 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-05-18 10:55:13 -0700 |
commit | 0cc6f45cecb46cefe89c17ec816dc8cd58a2229a (patch) | |
tree | 8996380ed4473b25607175aafa79756a74c2acf5 /drivers/of | |
parent | f0cd69b8cca6a5096463644d6dacc9f991bfa521 (diff) | |
parent | 2bd5059c6cc04b02073d4d9f57137ab74e1d8e7a (diff) |
Merge tag 'iommu-updates-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
"Core:
- IOMMU memory usage observability - This will make the memory used
for IO page tables explicitly visible.
- Simplify arch_setup_dma_ops()
Intel VT-d:
- Consolidate domain cache invalidation
- Remove private data from page fault message
- Allocate DMAR fault interrupts locally
- Cleanup and refactoring
ARM-SMMUv2:
- Support for fault debugging hardware on Qualcomm implementations
- Re-land support for the ->domain_alloc_paging() callback
ARM-SMMUv3:
- Improve handling of MSI allocation failure
- Drop support for the "disable_bypass" cmdline option
- Major rework of the CD creation code, following on directly from
the STE rework merged last time around.
- Add unit tests for the new STE/CD manipulation logic
AMD-Vi:
- Final part of SVA changes with generic IO page fault handling
Renesas IPMMU:
- Add support for R8A779H0 hardware
... and a couple smaller fixes and updates across the sub-tree"
* tag 'iommu-updates-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (80 commits)
iommu/arm-smmu-v3: Make the kunit into a module
arm64: Properly clean up iommu-dma remnants
iommu/amd: Enable Guest Translation after reading IOMMU feature register
iommu/vt-d: Decouple igfx_off from graphic identity mapping
iommu/amd: Fix compilation error
iommu/arm-smmu-v3: Add unit tests for arm_smmu_write_entry
iommu/arm-smmu-v3: Build the whole CD in arm_smmu_make_s1_cd()
iommu/arm-smmu-v3: Move the CD generation for SVA into a function
iommu/arm-smmu-v3: Allocate the CD table entry in advance
iommu/arm-smmu-v3: Make arm_smmu_alloc_cd_ptr()
iommu/arm-smmu-v3: Consolidate clearing a CD table entry
iommu/arm-smmu-v3: Move the CD generation for S1 domains into a function
iommu/arm-smmu-v3: Make CD programming use arm_smmu_write_entry()
iommu/arm-smmu-v3: Add an ops indirection to the STE code
iommu/arm-smmu-qcom: Don't build debug features as a kernel module
iommu/amd: Add SVA domain support
iommu: Add ops->domain_alloc_sva()
iommu/amd: Initial SVA support for AMD IOMMU
iommu/amd: Add support for enable/disable IOPF
iommu/amd: Add IO page fault notifier handler
...
Diffstat (limited to 'drivers/of')
-rw-r--r-- | drivers/of/device.c | 42 |
1 files changed, 7 insertions, 35 deletions
diff --git a/drivers/of/device.c b/drivers/of/device.c index de89f9906375..312c63361211 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -95,8 +95,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, { const struct bus_dma_region *map = NULL; struct device_node *bus_np; - u64 dma_start = 0; - u64 mask, end, size = 0; + u64 mask, end = 0; bool coherent; int iommu_ret; int ret; @@ -117,34 +116,8 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, if (!force_dma) return ret == -ENODEV ? 0 : ret; } else { - const struct bus_dma_region *r = map; - u64 dma_end = 0; - /* Determine the overall bounds of all DMA regions */ - for (dma_start = ~0; r->size; r++) { - /* Take lower and upper limits */ - if (r->dma_start < dma_start) - dma_start = r->dma_start; - if (r->dma_start + r->size > dma_end) - dma_end = r->dma_start + r->size; - } - size = dma_end - dma_start; - - /* - * Add a work around to treat the size as mask + 1 in case - * it is defined in DT as a mask. - */ - if (size & 1) { - dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n", - size); - size = size + 1; - } - - if (!size) { - dev_err(dev, "Adjusted size 0x%llx invalid\n", size); - kfree(map); - return -EINVAL; - } + end = dma_range_map_max(map); } /* @@ -158,16 +131,15 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, dev->dma_mask = &dev->coherent_dma_mask; } - if (!size && dev->coherent_dma_mask) - size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); - else if (!size) - size = 1ULL << 32; + if (!end && dev->coherent_dma_mask) + end = dev->coherent_dma_mask; + else if (!end) + end = (1ULL << 32) - 1; /* * Limit coherent and dma mask based on size and default mask * set by the driver. */ - end = dma_start + size - 1; mask = DMA_BIT_MASK(ilog2(end) + 1); dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; @@ -201,7 +173,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, } else dev_dbg(dev, "device is behind an iommu\n"); - arch_setup_dma_ops(dev, dma_start, size, coherent); + arch_setup_dma_ops(dev, coherent); if (iommu_ret) of_dma_set_restricted_buffer(dev, np); |