diff options
author | Nadav Amit <namit@vmware.com> | 2021-03-23 14:06:19 -0700 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2021-04-08 17:08:16 +0200 |
commit | 268aa4548277a1e50f326c6fbca75dd1073574d4 (patch) | |
tree | 95cf7fa5c616f0231ea5094a2550b880ad0b3333 /drivers/iommu/amd | |
parent | fc1b6620501f1a4b88f583549c63666180bea177 (diff) |
iommu/amd: Page-specific invalidations for more than one page
Currently, IOMMU invalidations and device-IOTLB invalidations using
AMD IOMMU fall back to full address-space invalidation if more than a
single page need to be flushed.
Full flushes are especially inefficient when the IOMMU is virtualized by
a hypervisor, since it requires the hypervisor to synchronize the entire
address-space.
AMD IOMMUs allow to provide a mask to perform page-specific
invalidations for multiple pages that match the address. The mask is
encoded as part of the address, and the first zero bit in the address
(in bits [51:12]) indicates the mask size.
Use this hardware feature to perform selective IOMMU and IOTLB flushes.
Combine the logic between both for better code reuse.
The IOMMU invalidations passed a smoke-test. The device IOTLB
invalidations are untested.
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Will Deacon <will@kernel.org>
Cc: Jiajun Cao <caojiajun@vmware.com>
Cc: iommu@lists.linux-foundation.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Nadav Amit <namit@vmware.com>
Link: https://lore.kernel.org/r/20210323210619.513069-1-namit@vmware.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd')
-rw-r--r-- | drivers/iommu/amd/iommu.c | 76 |
1 files changed, 42 insertions, 34 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index a519aaec50e4..f164f31909f8 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -852,33 +852,58 @@ static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); } -static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, - size_t size, u16 domid, int pde) +/* + * Builds an invalidation address which is suitable for one page or multiple + * pages. Sets the size bit (S) as needed is more than one page is flushed. + */ +static inline u64 build_inv_address(u64 address, size_t size) { - u64 pages; - bool s; + u64 pages, end, msb_diff; pages = iommu_num_pages(address, size, PAGE_SIZE); - s = false; - if (pages > 1) { + if (pages == 1) + return address & PAGE_MASK; + + end = address + size - 1; + + /* + * msb_diff would hold the index of the most significant bit that + * flipped between the start and end. + */ + msb_diff = fls64(end ^ address) - 1; + + /* + * Bits 63:52 are sign extended. If for some reason bit 51 is different + * between the start and the end, invalidate everything. + */ + if (unlikely(msb_diff > 51)) { + address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; + } else { /* - * If we have to flush more than one page, flush all - * TLB entries for this domain + * The msb-bit must be clear on the address. Just set all the + * lower bits. */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = true; + address |= 1ull << (msb_diff - 1); } + /* Clear bits 11:0 */ address &= PAGE_MASK; + /* Set the size bit - we flush more than one 4kb page */ + return address | CMD_INV_IOMMU_PAGES_SIZE_MASK; +} + +static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, + size_t size, u16 domid, int pde) +{ + u64 inv_address = build_inv_address(address, size); + memset(cmd, 0, sizeof(*cmd)); cmd->data[1] |= domid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); + cmd->data[2] = lower_32_bits(inv_address); + cmd->data[3] = upper_32_bits(inv_address); CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); - if (s) /* size bit - we flush more than one 4kb page */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; if (pde) /* PDE bit - we want to flush everything, not only the PTEs */ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; } @@ -886,32 +911,15 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, u64 address, size_t size) { - u64 pages; - bool s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = false; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = true; - } - - address &= PAGE_MASK; + u64 inv_address = build_inv_address(address, size); memset(cmd, 0, sizeof(*cmd)); cmd->data[0] = devid; cmd->data[0] |= (qdep & 0xff) << 24; cmd->data[1] = devid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); + cmd->data[2] = lower_32_bits(inv_address); + cmd->data[3] = upper_32_bits(inv_address); CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); - if (s) - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; } static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid, |