summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/pci-swiotlb_64.c
blob: 3f0d9924dd1c72a8bf3756364e0d1dd074143185 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
/* Glue code to lib/swiotlb.c */

#include <linux/pci.h>
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/swiotlb.h>
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>

#include <asm/iommu.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>

int swiotlb __read_mostly;

void * __init swiotlb_alloc_boot(size_t size, unsigned long nslabs)
{
	return alloc_bootmem_low_pages(size);
}

void *swiotlb_alloc(unsigned order, unsigned long nslabs)
{
	return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
}

dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
{
	return paddr;
}

phys_addr_t swiotlb_bus_to_phys(dma_addr_t baddr)
{
	return baddr;
}

int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size)
{
	return 0;
}

/* these will be moved to lib/swiotlb.c later on */

static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
				   unsigned long offset, size_t size,
				   enum dma_data_direction dir,
				   struct dma_attrs *attrs)
{
	return swiotlb_map_single(dev, page_address(page) + offset, size, dir);
}

static void swiotlb_unmap_page(struct device *dev, dma_addr_t dma_handle,
			       size_t size, enum dma_data_direction dir,
			       struct dma_attrs *attrs)
{
	swiotlb_unmap_single(dev, dma_handle, size, dir);
}

static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
					dma_addr_t *dma_handle, gfp_t flags)
{
	void *vaddr;

	vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
	if (vaddr)
		return vaddr;

	return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}

struct dma_map_ops swiotlb_dma_ops = {
	.mapping_error = swiotlb_dma_mapping_error,
	.alloc_coherent = x86_swiotlb_alloc_coherent,
	.free_coherent = swiotlb_free_coherent,
	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
	.sync_single_for_device = swiotlb_sync_single_for_device,
	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
	.sync_sg_for_device = swiotlb_sync_sg_for_device,
	.map_sg = swiotlb_map_sg_attrs,
	.unmap_sg = swiotlb_unmap_sg_attrs,
	.map_page = swiotlb_map_page,
	.unmap_page = swiotlb_unmap_page,
	.dma_supported = NULL,
};

void __init pci_swiotlb_init(void)
{
	/* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64
	if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
	       swiotlb = 1;
#endif
	if (swiotlb_force)
		swiotlb = 1;
	if (swiotlb) {
		printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
		swiotlb_init();
		dma_ops = &swiotlb_dma_ops;
	}
}