diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2016-01-14 15:21:14 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-14 16:00:49 -0800 |
commit | e805605c721021879a1469bdae45c6f80bc985f4 (patch) | |
tree | c0743f5fa5e70ebf1483415c5bcc53dffce23c64 /include/net | |
parent | 80f23124f57c77915a7b4201d8dcba38a38b23f0 (diff) |
net: tcp_memcontrol: sanitize tcp memory accounting callbacks
There won't be a tcp control soft limit, so integrating the memcg code
into the global skmem limiting scheme complicates things unnecessarily.
Replace this with simple and clear charge and uncharge calls--hidden
behind a jump label--to account skb memory.
Note that this is not purely aesthetic: as a result of shoehorning the
per-memcg code into the same memory accounting functions that handle the
global level, the old code would compare the per-memcg consumption
against the smaller of the per-memcg limit and the global limit. This
allowed the total consumption of multiple sockets to exceed the global
limit, as long as the individual sockets stayed within bounds. After
this change, the code will always compare the per-memcg consumption to
the per-memcg limit, and the global consumption to the global limit, and
thus close this loophole.
Without a soft limit, the per-memcg memory pressure state in sockets is
generally questionable. However, we did it until now, so we continue to
enter it when the hard limit is hit, and packets are dropped, to let
other sockets in the cgroup know that they shouldn't grow their transmit
windows, either. However, keep it simple in the new callback model and
leave memory pressure lazily when the next packet is accepted (as
opposed to doing it synchroneously when packets are processed). When
packets are dropped, network performance will already be in the toilet,
so that should be a reasonable trade-off.
As described above, consumption is now checked on the per-memcg level
and the global level separately. Likewise, memory pressure states are
maintained on both the per-memcg level and the global level, and a
socket is considered under pressure when either level asserts as much.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/net')
-rw-r--r-- | include/net/sock.h | 64 | ||||
-rw-r--r-- | include/net/tcp.h | 5 |
2 files changed, 11 insertions, 58 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 8b1f8e5d3a48..94a6c1a740b9 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1129,8 +1129,9 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) if (!sk->sk_prot->memory_pressure) return false; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return !!sk->sk_cgrp->memory_pressure; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp && + mem_cgroup_under_socket_pressure(sk->sk_cgrp)) + return true; return !!*sk->sk_prot->memory_pressure; } @@ -1144,9 +1145,6 @@ static inline void sk_leave_memory_pressure(struct sock *sk) if (*memory_pressure) *memory_pressure = 0; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - sk->sk_cgrp->memory_pressure = 0; } static inline void sk_enter_memory_pressure(struct sock *sk) @@ -1154,76 +1152,30 @@ static inline void sk_enter_memory_pressure(struct sock *sk) if (!sk->sk_prot->enter_memory_pressure) return; - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - sk->sk_cgrp->memory_pressure = 1; - sk->sk_prot->enter_memory_pressure(sk); } static inline long sk_prot_mem_limits(const struct sock *sk, int index) { - long limit = sk->sk_prot->sysctl_mem[index]; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - limit = min_t(long, limit, sk->sk_cgrp->memory_allocated.limit); - - return limit; -} - -static inline void memcg_memory_allocated_add(struct cg_proto *prot, - unsigned long amt, - int *parent_status) -{ - struct page_counter *counter; - - if (page_counter_try_charge(&prot->memory_allocated, amt, &counter)) - return; - - page_counter_charge(&prot->memory_allocated, amt); - *parent_status = OVER_LIMIT; -} - -static inline void memcg_memory_allocated_sub(struct cg_proto *prot, - unsigned long amt) -{ - page_counter_uncharge(&prot->memory_allocated, amt); + return sk->sk_prot->sysctl_mem[index]; } static inline long sk_memory_allocated(const struct sock *sk) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return page_counter_read(&sk->sk_cgrp->memory_allocated); - - return atomic_long_read(prot->memory_allocated); + return atomic_long_read(sk->sk_prot->memory_allocated); } static inline long -sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) +sk_memory_allocated_add(struct sock *sk, int amt) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) { - memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); - /* update the root cgroup regardless */ - atomic_long_add_return(amt, prot->memory_allocated); - return page_counter_read(&sk->sk_cgrp->memory_allocated); - } - - return atomic_long_add_return(amt, prot->memory_allocated); + return atomic_long_add_return(amt, sk->sk_prot->memory_allocated); } static inline void sk_memory_allocated_sub(struct sock *sk, int amt) { - struct proto *prot = sk->sk_prot; - - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - memcg_memory_allocated_sub(sk->sk_cgrp, amt); - - atomic_long_sub(amt, prot->memory_allocated); + atomic_long_sub(amt, sk->sk_prot->memory_allocated); } static inline void sk_sockets_allocated_dec(struct sock *sk) diff --git a/include/net/tcp.h b/include/net/tcp.h index a80255f4ca33..d9df80deba31 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -289,8 +289,9 @@ extern int tcp_memory_pressure; /* optimized version of sk_under_memory_pressure() for TCP sockets */ static inline bool tcp_under_memory_pressure(const struct sock *sk) { - if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return !!sk->sk_cgrp->memory_pressure; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp && + mem_cgroup_under_socket_pressure(sk->sk_cgrp)) + return true; return tcp_memory_pressure; } |