summaryrefslogtreecommitdiff
path: root/net/core/devmem.h
blob: 76099ef9c482f52ac3d72e7e31ce16948471b08e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 * Device memory TCP support
 *
 * Authors:	Mina Almasry <almasrymina@google.com>
 *		Willem de Bruijn <willemb@google.com>
 *		Kaiyuan Zhang <kaiyuanz@google.com>
 *
 */
#ifndef _NET_DEVMEM_H
#define _NET_DEVMEM_H

struct netlink_ext_ack;

struct net_devmem_dmabuf_binding {
	struct dma_buf *dmabuf;
	struct dma_buf_attachment *attachment;
	struct sg_table *sgt;
	struct net_device *dev;
	struct gen_pool *chunk_pool;

	/* The user holds a ref (via the netlink API) for as long as they want
	 * the binding to remain alive. Each page pool using this binding holds
	 * a ref to keep the binding alive. Each allocated net_iov holds a
	 * ref.
	 *
	 * The binding undos itself and unmaps the underlying dmabuf once all
	 * those refs are dropped and the binding is no longer desired or in
	 * use.
	 */
	refcount_t ref;

	/* The list of bindings currently active. Used for netlink to notify us
	 * of the user dropping the bind.
	 */
	struct list_head list;

	/* rxq's this binding is active on. */
	struct xarray bound_rxqs;

	/* ID of this binding. Globally unique to all bindings currently
	 * active.
	 */
	u32 id;
};

#if defined(CONFIG_NET_DEVMEM)
/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
 * this owner struct to keep track of some metadata necessary to create
 * allocations from this chunk.
 */
struct dmabuf_genpool_chunk_owner {
	/* Offset into the dma-buf where this chunk starts.  */
	unsigned long base_virtual;

	/* dma_addr of the start of the chunk.  */
	dma_addr_t base_dma_addr;

	/* Array of net_iovs for this chunk. */
	struct net_iov *niovs;
	size_t num_niovs;

	struct net_devmem_dmabuf_binding *binding;
};

void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
		       struct netlink_ext_ack *extack);
void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
				    struct net_devmem_dmabuf_binding *binding,
				    struct netlink_ext_ack *extack);
void dev_dmabuf_uninstall(struct net_device *dev);

static inline struct dmabuf_genpool_chunk_owner *
net_iov_owner(const struct net_iov *niov)
{
	return niov->owner;
}

static inline unsigned int net_iov_idx(const struct net_iov *niov)
{
	return niov - net_iov_owner(niov)->niovs;
}

static inline struct net_devmem_dmabuf_binding *
net_iov_binding(const struct net_iov *niov)
{
	return net_iov_owner(niov)->binding;
}

static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
{
	struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);

	return owner->base_virtual +
	       ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
}

static inline u32 net_iov_binding_id(const struct net_iov *niov)
{
	return net_iov_owner(niov)->binding->id;
}

static inline void
net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
{
	refcount_inc(&binding->ref);
}

static inline void
net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
{
	if (!refcount_dec_and_test(&binding->ref))
		return;

	__net_devmem_dmabuf_binding_free(binding);
}

struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
void net_devmem_free_dmabuf(struct net_iov *ppiov);

#else
struct net_devmem_dmabuf_binding;

static inline void
__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
{
}

static inline struct net_devmem_dmabuf_binding *
net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
		       struct netlink_ext_ack *extack)
{
	return ERR_PTR(-EOPNOTSUPP);
}

static inline void
net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
{
}

static inline int
net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
				struct net_devmem_dmabuf_binding *binding,
				struct netlink_ext_ack *extack)

{
	return -EOPNOTSUPP;
}

static inline void dev_dmabuf_uninstall(struct net_device *dev)
{
}

static inline struct net_iov *
net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
{
	return NULL;
}

static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
{
}

static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
{
	return 0;
}

static inline u32 net_iov_binding_id(const struct net_iov *niov)
{
	return 0;
}
#endif

#endif /* _NET_DEVMEM_H */