diff options
Diffstat (limited to 'tools')
22 files changed, 1947 insertions, 91 deletions
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c index e04ab89a1013..ebc6dceddb58 100644 --- a/tools/build/feature/test-bpf.c +++ b/tools/build/feature/test-bpf.c @@ -9,6 +9,9 @@ # define __NR_bpf 321 # elif defined(__aarch64__) # define __NR_bpf 280 +# elif defined(__sparc__) +# define __NR_bpf 349 +# else # error __NR_bpf not defined. libbpf does not support your arch. # endif #endif diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh index 4aa5369ffa4e..d85968cb1bf2 100755 --- a/tools/hv/bondvf.sh +++ b/tools/hv/bondvf.sh @@ -101,9 +101,25 @@ function create_bond_cfg_redhat { echo BONDING_OPTS=\"mode=active-backup miimon=100 primary=$2\" >>$fn } +function del_eth_cfg_ubuntu { + local fn=$cfgdir/interfaces + local tmpfl=$(mktemp) + + local nic_start='^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+'$1 + local nic_end='^[ \t]*(auto|iface|mapping|allow-.*|source)' + + awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" $fn >$tmpfl + + cp $tmpfl $fn + + rm $tmpfl +} + function create_eth_cfg_ubuntu { local fn=$cfgdir/interfaces + del_eth_cfg_ubuntu $1 + echo $'\n'auto $1 >>$fn echo iface $1 inet manual >>$fn echo bond-master $2 >>$fn @@ -119,6 +135,8 @@ function create_eth_cfg_pri_ubuntu { function create_bond_cfg_ubuntu { local fn=$cfgdir/interfaces + del_eth_cfg_ubuntu $1 + echo $'\n'auto $1 >>$fn echo iface $1 inet dhcp >>$fn echo bond-mode active-backup >>$fn diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0539a0ceef38..e553529929f6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -81,6 +81,7 @@ enum bpf_cmd { BPF_OBJ_GET, BPF_PROG_ATTACH, BPF_PROG_DETACH, + BPF_PROG_TEST_RUN, }; enum bpf_map_type { @@ -96,6 +97,8 @@ enum bpf_map_type { BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH, BPF_MAP_TYPE_LPM_TRIE, + BPF_MAP_TYPE_ARRAY_OF_MAPS, + BPF_MAP_TYPE_HASH_OF_MAPS, }; enum bpf_prog_type { @@ -152,6 +155,7 @@ union bpf_attr { __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ __u32 map_flags; /* prealloc or not */ + __u32 inner_map_fd; /* fd pointing to the inner map */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ @@ -186,6 +190,17 @@ union bpf_attr { __u32 attach_type; __u32 attach_flags; }; + + struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __aligned_u64 data_in; + __aligned_u64 data_out; + __u32 repeat; + __u32 duration; + } test; } __attribute__((aligned(8))); /* BPF helper function descriptions: @@ -456,6 +471,18 @@ union bpf_attr { * Return: * > 0 length of the string including the trailing NUL on success * < 0 error + * + * u64 bpf_get_socket_cookie(skb) + * Get the cookie for the socket stored inside sk_buff. + * @skb: pointer to skb + * Return: 8 Bytes non-decreasing number on success or 0 if the socket + * field is missing inside sk_buff + * + * u32 bpf_get_socket_uid(skb) + * Get the owner uid of the socket stored inside sk_buff. + * @skb: pointer to skb + * Return: uid of the socket owner on success or 0 if the socket pointer + * inside sk_buff is NULL */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -503,7 +530,9 @@ union bpf_attr { FN(get_numa_node_id), \ FN(skb_change_head), \ FN(xdp_adjust_head), \ - FN(probe_read_str), + FN(probe_read_str), \ + FN(get_socket_cookie), \ + FN(get_socket_uid), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -574,6 +603,7 @@ struct __sk_buff { __u32 tc_classid; __u32 data; __u32 data_end; + __u32 napi_id; }; struct bpf_tunnel_key { diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 207c2eeddab0..4fe444b8092e 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -37,6 +37,8 @@ # define __NR_bpf 321 # elif defined(__aarch64__) # define __NR_bpf 280 +# elif defined(__sparc__) +# define __NR_bpf 349 # else # error __NR_bpf not defined. libbpf does not support your arch. # endif @@ -69,6 +71,23 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } +int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, + int inner_map_fd, int max_entries, __u32 map_flags) +{ + union bpf_attr attr; + + memset(&attr, '\0', sizeof(attr)); + + attr.map_type = map_type; + attr.key_size = key_size; + attr.value_size = 4; + attr.inner_map_fd = inner_map_fd; + attr.max_entries = max_entries; + attr.map_flags = map_flags; + + return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); +} + int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz) @@ -192,3 +211,27 @@ int bpf_prog_detach(int target_fd, enum bpf_attach_type type) return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr)); } + +int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, + void *data_out, __u32 *size_out, __u32 *retval, + __u32 *duration) +{ + union bpf_attr attr; + int ret; + + bzero(&attr, sizeof(attr)); + attr.test.prog_fd = prog_fd; + attr.test.data_in = ptr_to_u64(data); + attr.test.data_out = ptr_to_u64(data_out); + attr.test.data_size_in = size; + attr.test.repeat = repeat; + + ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); + if (size_out) + *size_out = attr.test.data_size_out; + if (retval) + *retval = attr.test.retval; + if (duration) + *duration = attr.test.duration; + return ret; +} diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 09c3dcac0496..edb4daeff7a5 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -26,6 +26,8 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags); +int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, + int inner_map_fd, int max_entries, __u32 map_flags); /* Recommend log buffer size */ #define BPF_LOG_BUF_SIZE 65536 @@ -45,6 +47,8 @@ int bpf_obj_get(const char *pathname); int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, unsigned int flags); int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); - +int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, + void *data_out, __u32 *size_out, __u32 *retval, + __u32 *duration); #endif diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ac6eb863b2a4..1a2c07eb7795 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1618,8 +1618,7 @@ int bpf_program__nth_fd(struct bpf_program *prog, int n) return fd; } -static void bpf_program__set_type(struct bpf_program *prog, - enum bpf_prog_type type) +void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) { prog->type = type; } diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index b30394f9947a..32c7252f734e 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -25,6 +25,7 @@ #include <stdint.h> #include <stdbool.h> #include <sys/types.h> // for size_t +#include <linux/bpf.h> enum libbpf_errno { __LIBBPF_ERRNO__START = 4000, @@ -185,6 +186,7 @@ int bpf_program__set_sched_cls(struct bpf_program *prog); int bpf_program__set_sched_act(struct bpf_program *prog); int bpf_program__set_xdp(struct bpf_program *prog); int bpf_program__set_perf_event(struct bpf_program *prog); +void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type); bool bpf_program__is_socket_filter(struct bpf_program *prog); bool bpf_program__is_tracepoint(struct bpf_program *prog); diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c index 544b05a53b70..ad572e6cdbd0 100644 --- a/tools/net/bpf_jit_disasm.c +++ b/tools/net/bpf_jit_disasm.c @@ -229,6 +229,7 @@ static void usage(void) { printf("Usage: bpf_jit_disasm [...]\n"); printf(" -o Also display related opcodes (default: off).\n"); + printf(" -O <file> Write binary image of code to file, don't disassemble to stdout.\n"); printf(" -f <file> Read last image dump from file or stdin (default: klog).\n"); printf(" -h Display this help.\n"); } @@ -238,12 +239,19 @@ int main(int argc, char **argv) unsigned int len, klen, opt, opcodes = 0; static uint8_t image[32768]; char *kbuff, *file = NULL; + char *ofile = NULL; + int ofd; + ssize_t nr; + uint8_t *pos; - while ((opt = getopt(argc, argv, "of:")) != -1) { + while ((opt = getopt(argc, argv, "of:O:")) != -1) { switch (opt) { case 'o': opcodes = 1; break; + case 'O': + ofile = optarg; + break; case 'f': file = optarg; break; @@ -263,11 +271,35 @@ int main(int argc, char **argv) } len = get_last_jit_image(kbuff, klen, image, sizeof(image)); - if (len > 0) - get_asm_insns(image, len, opcodes); - else + if (len <= 0) { fprintf(stderr, "No JIT image found!\n"); + goto done; + } + if (!ofile) { + get_asm_insns(image, len, opcodes); + goto done; + } + + ofd = open(ofile, O_WRONLY | O_CREAT | O_TRUNC, DEFFILEMODE); + if (ofd < 0) { + fprintf(stderr, "Could not open file %s for writing: ", ofile); + perror(NULL); + goto done; + } + pos = image; + do { + nr = write(ofd, pos, len); + if (nr < 0) { + fprintf(stderr, "Could not write data to %s: ", ofile); + perror(NULL); + goto done; + } + len -= nr; + pos += nr; + } while (len); + close(ofd); +done: put_log_buff(kbuff); return 0; } diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 9af09e8099c0..d8d94b9bd76c 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -8,16 +8,18 @@ ifneq ($(wildcard $(GENHDR)),) GENFLAGS := -DHAVE_GENHDR endif -CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -LDLIBS += -lcap +CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include +LDLIBS += -lcap -lelf -TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map +TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs + +TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o TEST_PROGS := test_kmod.sh include ../lib.mk -BPFOBJ := $(OUTPUT)/bpf.o +BPFOBJ := $(OUTPUT)/libbpf.a $(TEST_GEN_PROGS): $(BPFOBJ) @@ -28,3 +30,10 @@ force: $(BPFOBJ): force $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ + +CLANG ?= clang + +%.o: %.c + $(CLANG) -I../../../include/uapi -I../../../../samples/bpf/ \ + -D__x86_64__ -Wno-compare-distinct-pointer-types \ + -O2 -target bpf -c $< -o $@ diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h new file mode 100644 index 000000000000..19d0604f8694 --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_endian.h @@ -0,0 +1,23 @@ +#ifndef __BPF_ENDIAN__ +#define __BPF_ENDIAN__ + +#include <asm/byteorder.h> + +#if __BYTE_ORDER == __LITTLE_ENDIAN +# define __bpf_ntohs(x) __builtin_bswap16(x) +# define __bpf_htons(x) __builtin_bswap16(x) +#elif __BYTE_ORDER == __BIG_ENDIAN +# define __bpf_ntohs(x) (x) +# define __bpf_htons(x) (x) +#else +# error "Fix your __BYTE_ORDER?!" +#endif + +#define bpf_htons(x) \ + (__builtin_constant_p(x) ? \ + __constant_htons(x) : __bpf_htons(x)) +#define bpf_ntohs(x) \ + (__builtin_constant_p(x) ? \ + __constant_ntohs(x) : __bpf_ntohs(x)) + +#endif diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 84a5d1823f02..20ecbaa0d85d 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h @@ -35,4 +35,11 @@ static inline unsigned int bpf_num_possible_cpus(void) return possible_cpus; } +#define __bpf_percpu_val_align __attribute__((__aligned__(8))) + +#define BPF_DECLARE_PERCPU(type, name) \ + struct { type v; /* padding */ } __bpf_percpu_val_align \ + name[bpf_num_possible_cpus()] +#define bpf_percpu(name, cpu) name[(cpu)].v + #endif /* __BPF_UTIL__ */ diff --git a/tools/testing/selftests/bpf/test_iptunnel_common.h b/tools/testing/selftests/bpf/test_iptunnel_common.h new file mode 100644 index 000000000000..e4cd252a1b20 --- /dev/null +++ b/tools/testing/selftests/bpf/test_iptunnel_common.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#ifndef _TEST_IPTNL_COMMON_H +#define _TEST_IPTNL_COMMON_H + +#include <linux/types.h> + +#define MAX_IPTNL_ENTRIES 256U + +struct vip { + union { + __u32 v6[4]; + __u32 v4; + } daddr; + __u16 dport; + __u16 family; + __u8 protocol; +}; + +struct iptnl_info { + union { + __u32 v6[4]; + __u32 v4; + } saddr; + union { + __u32 v6[4]; + __u32 v4; + } daddr; + __u16 family; + __u8 dmac[6]; +}; + +#endif diff --git a/tools/testing/selftests/bpf/test_l4lb.c b/tools/testing/selftests/bpf/test_l4lb.c new file mode 100644 index 000000000000..1e10c9590991 --- /dev/null +++ b/tools/testing/selftests/bpf/test_l4lb.c @@ -0,0 +1,473 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <stddef.h> +#include <stdbool.h> +#include <string.h> +#include <linux/pkt_cls.h> +#include <linux/bpf.h> +#include <linux/in.h> +#include <linux/if_ether.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/icmp.h> +#include <linux/icmpv6.h> +#include <linux/tcp.h> +#include <linux/udp.h> +#include "bpf_helpers.h" +#include "test_iptunnel_common.h" +#include "bpf_endian.h" + +int _version SEC("version") = 1; + +static inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> ((-shift) & 31)); +} + +/* copy paste of jhash from kernel sources to make sure llvm + * can compile it into valid sequence of bpf instructions + */ +#define __jhash_mix(a, b, c) \ +{ \ + a -= c; a ^= rol32(c, 4); c += b; \ + b -= a; b ^= rol32(a, 6); a += c; \ + c -= b; c ^= rol32(b, 8); b += a; \ + a -= c; a ^= rol32(c, 16); c += b; \ + b -= a; b ^= rol32(a, 19); a += c; \ + c -= b; c ^= rol32(b, 4); b += a; \ +} + +#define __jhash_final(a, b, c) \ +{ \ + c ^= b; c -= rol32(b, 14); \ + a ^= c; a -= rol32(c, 11); \ + b ^= a; b -= rol32(a, 25); \ + c ^= b; c -= rol32(b, 16); \ + a ^= c; a -= rol32(c, 4); \ + b ^= a; b -= rol32(a, 14); \ + c ^= b; c -= rol32(b, 24); \ +} + +#define JHASH_INITVAL 0xdeadbeef + +typedef unsigned int u32; + +static inline u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const unsigned char *k = key; + + a = b = c = JHASH_INITVAL + length + initval; + + while (length > 12) { + a += *(u32 *)(k); + b += *(u32 *)(k + 4); + c += *(u32 *)(k + 8); + __jhash_mix(a, b, c); + length -= 12; + k += 12; + } + switch (length) { + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + +static inline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) +{ + a += initval; + b += initval; + c += initval; + __jhash_final(a, b, c); + return c; +} + +static inline u32 jhash_2words(u32 a, u32 b, u32 initval) +{ + return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); +} + +#define PCKT_FRAGMENTED 65343 +#define IPV4_HDR_LEN_NO_OPT 20 +#define IPV4_PLUS_ICMP_HDR 28 +#define IPV6_PLUS_ICMP_HDR 48 +#define RING_SIZE 2 +#define MAX_VIPS 12 +#define MAX_REALS 5 +#define CTL_MAP_SIZE 16 +#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE) +#define F_IPV6 (1 << 0) +#define F_HASH_NO_SRC_PORT (1 << 0) +#define F_ICMP (1 << 0) +#define F_SYN_SET (1 << 1) + +struct packet_description { + union { + __be32 src; + __be32 srcv6[4]; + }; + union { + __be32 dst; + __be32 dstv6[4]; + }; + union { + __u32 ports; + __u16 port16[2]; + }; + __u8 proto; + __u8 flags; +}; + +struct ctl_value { + union { + __u64 value; + __u32 ifindex; + __u8 mac[6]; + }; +}; + +struct vip_meta { + __u32 flags; + __u32 vip_num; +}; + +struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; +}; + +struct vip_stats { + __u64 bytes; + __u64 pkts; +}; + +struct eth_hdr { + unsigned char eth_dest[ETH_ALEN]; + unsigned char eth_source[ETH_ALEN]; + unsigned short eth_proto; +}; + +struct bpf_map_def SEC("maps") vip_map = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(struct vip), + .value_size = sizeof(struct vip_meta), + .max_entries = MAX_VIPS, +}; + +struct bpf_map_def SEC("maps") ch_rings = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = CH_RINGS_SIZE, +}; + +struct bpf_map_def SEC("maps") reals = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct real_definition), + .max_entries = MAX_REALS, +}; + +struct bpf_map_def SEC("maps") stats = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct vip_stats), + .max_entries = MAX_VIPS, +}; + +struct bpf_map_def SEC("maps") ctl_array = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(struct ctl_value), + .max_entries = CTL_MAP_SIZE, +}; + +static __always_inline __u32 get_packet_hash(struct packet_description *pckt, + bool ipv6) +{ + if (ipv6) + return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS), + pckt->ports, CH_RINGS_SIZE); + else + return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE); +} + +static __always_inline bool get_packet_dst(struct real_definition **real, + struct packet_description *pckt, + struct vip_meta *vip_info, + bool is_ipv6) +{ + __u32 hash = get_packet_hash(pckt, is_ipv6) % RING_SIZE; + __u32 key = RING_SIZE * vip_info->vip_num + hash; + __u32 *real_pos; + + real_pos = bpf_map_lookup_elem(&ch_rings, &key); + if (!real_pos) + return false; + key = *real_pos; + *real = bpf_map_lookup_elem(&reals, &key); + if (!(*real)) + return false; + return true; +} + +static __always_inline int parse_icmpv6(void *data, void *data_end, __u64 off, + struct packet_description *pckt) +{ + struct icmp6hdr *icmp_hdr; + struct ipv6hdr *ip6h; + + icmp_hdr = data + off; + if (icmp_hdr + 1 > data_end) + return TC_ACT_SHOT; + if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG) + return TC_ACT_OK; + off += sizeof(struct icmp6hdr); + ip6h = data + off; + if (ip6h + 1 > data_end) + return TC_ACT_SHOT; + pckt->proto = ip6h->nexthdr; + pckt->flags |= F_ICMP; + memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16); + memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16); + return TC_ACT_UNSPEC; +} + +static __always_inline int parse_icmp(void *data, void *data_end, __u64 off, + struct packet_description *pckt) +{ + struct icmphdr *icmp_hdr; + struct iphdr *iph; + + icmp_hdr = data + off; + if (icmp_hdr + 1 > data_end) + return TC_ACT_SHOT; + if (icmp_hdr->type != ICMP_DEST_UNREACH || + icmp_hdr->code != ICMP_FRAG_NEEDED) + return TC_ACT_OK; + off += sizeof(struct icmphdr); + iph = data + off; + if (iph + 1 > data_end) + return TC_ACT_SHOT; + if (iph->ihl != 5) + return TC_ACT_SHOT; + pckt->proto = iph->protocol; + pckt->flags |= F_ICMP; + pckt->src = iph->daddr; + pckt->dst = iph->saddr; + return TC_ACT_UNSPEC; +} + +static __always_inline bool parse_udp(void *data, __u64 off, void *data_end, + struct packet_description *pckt) +{ + struct udphdr *udp; + udp = data + off; + + if (udp + 1 > data_end) + return false; + + if (!(pckt->flags & F_ICMP)) { + pckt->port16[0] = udp->source; + pckt->port16[1] = udp->dest; + } else { + pckt->port16[0] = udp->dest; + pckt->port16[1] = udp->source; + } + return true; +} + +static __always_inline bool parse_tcp(void *data, __u64 off, void *data_end, + struct packet_description *pckt) +{ + struct tcphdr *tcp; + + tcp = data + off; + if (tcp + 1 > data_end) + return false; + + if (tcp->syn) + pckt->flags |= F_SYN_SET; + + if (!(pckt->flags & F_ICMP)) { + pckt->port16[0] = tcp->source; + pckt->port16[1] = tcp->dest; + } else { + pckt->port16[0] = tcp->dest; + pckt->port16[1] = tcp->source; + } + return true; +} + +static __always_inline int process_packet(void *data, __u64 off, void *data_end, + bool is_ipv6, struct __sk_buff *skb) +{ + void *pkt_start = (void *)(long)skb->data; + struct packet_description pckt = {}; + struct eth_hdr *eth = pkt_start; + struct bpf_tunnel_key tkey = {}; + struct vip_stats *data_stats; + struct real_definition *dst; + struct vip_meta *vip_info; + struct ctl_value *cval; + __u32 v4_intf_pos = 1; + __u32 v6_intf_pos = 2; + struct ipv6hdr *ip6h; + struct vip vip = {}; + struct iphdr *iph; + int tun_flag = 0; + __u16 pkt_bytes; + __u64 iph_len; + __u32 ifindex; + __u8 protocol; + __u32 vip_num; + int action; + + tkey.tunnel_ttl = 64; + if (is_ipv6) { + ip6h = data + off; + if (ip6h + 1 > data_end) + return TC_ACT_SHOT; + + iph_len = sizeof(struct ipv6hdr); + protocol = ip6h->nexthdr; + pckt.proto = protocol; + pkt_bytes = bpf_ntohs(ip6h->payload_len); + off += iph_len; + if (protocol == IPPROTO_FRAGMENT) { + return TC_ACT_SHOT; + } else if (protocol == IPPROTO_ICMPV6) { + action = parse_icmpv6(data, data_end, off, &pckt); + if (action >= 0) + return action; + off += IPV6_PLUS_ICMP_HDR; + } else { + memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16); + memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16); + } + } else { + iph = data + off; + if (iph + 1 > data_end) + return TC_ACT_SHOT; + if (iph->ihl != 5) + return TC_ACT_SHOT; + + protocol = iph->protocol; + pckt.proto = protocol; + pkt_bytes = bpf_ntohs(iph->tot_len); + off += IPV4_HDR_LEN_NO_OPT; + + if (iph->frag_off & PCKT_FRAGMENTED) + return TC_ACT_SHOT; + if (protocol == IPPROTO_ICMP) { + action = parse_icmp(data, data_end, off, &pckt); + if (action >= 0) + return action; + off += IPV4_PLUS_ICMP_HDR; + } else { + pckt.src = iph->saddr; + pckt.dst = iph->daddr; + } + } + protocol = pckt.proto; + + if (protocol == IPPROTO_TCP) { + if (!parse_tcp(data, off, data_end, &pckt)) + return TC_ACT_SHOT; + } else if (protocol == IPPROTO_UDP) { + if (!parse_udp(data, off, data_end, &pckt)) + return TC_ACT_SHOT; + } else { + return TC_ACT_SHOT; + } + + if (is_ipv6) + memcpy(vip.daddr.v6, pckt.dstv6, 16); + else + vip.daddr.v4 = pckt.dst; + + vip.dport = pckt.port16[1]; + vip.protocol = pckt.proto; + vip_info = bpf_map_lookup_elem(&vip_map, &vip); + if (!vip_info) { + vip.dport = 0; + vip_info = bpf_map_lookup_elem(&vip_map, &vip); + if (!vip_info) + return TC_ACT_SHOT; + pckt.port16[1] = 0; + } + + if (vip_info->flags & F_HASH_NO_SRC_PORT) + pckt.port16[0] = 0; + + if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6)) + return TC_ACT_SHOT; + + if (dst->flags & F_IPV6) { + cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos); + if (!cval) + return TC_ACT_SHOT; + ifindex = cval->ifindex; + memcpy(tkey.remote_ipv6, dst->dstv6, 16); + tun_flag = BPF_F_TUNINFO_IPV6; + } else { + cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos); + if (!cval) + return TC_ACT_SHOT; + ifindex = cval->ifindex; + tkey.remote_ipv4 = dst->dst; + } + vip_num = vip_info->vip_num; + data_stats = bpf_map_lookup_elem(&stats, &vip_num); + if (!data_stats) + return TC_ACT_SHOT; + data_stats->pkts++; + data_stats->bytes += pkt_bytes; + bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag); + *(u32 *)eth->eth_dest = tkey.remote_ipv4; + return bpf_redirect(ifindex, 0); +} + +SEC("l4lb-demo") +int balancer_ingress(struct __sk_buff *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct eth_hdr *eth = data; + __u32 eth_proto; + __u32 nh_off; + + nh_off = sizeof(struct eth_hdr); + if (data + nh_off > data_end) + return TC_ACT_SHOT; + eth_proto = eth->eth_proto; + if (eth_proto == bpf_htons(ETH_P_IP)) + return process_packet(data, nh_off, data_end, false, ctx); + else if (eth_proto == bpf_htons(ETH_P_IPV6)) + return process_packet(data, nh_off, data_end, true, ctx); + else + return TC_ACT_SHOT; +} +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 00b0aff56e2e..8c10c9180c1a 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -22,7 +22,7 @@ #include "bpf_util.h" #define LOCAL_FREE_TARGET (128) -#define PERCPU_FREE_TARGET (16) +#define PERCPU_FREE_TARGET (4) static int nr_cpus; @@ -191,12 +191,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) int next_cpu = 0; if (map_flags & BPF_F_NO_COMMON_LRU) - /* Ther percpu lru list (i.e each cpu has its own LRU - * list) does not have a local free list. Hence, - * it will only free old nodes till there is no free - * from the LRU list. Hence, this test does not apply - * to BPF_F_NO_COMMON_LRU - */ + /* This test is only applicable to common LRU list */ return; printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, @@ -227,7 +222,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free) for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } /* Insert 1+tgt_free to 2*tgt_free @@ -273,12 +268,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) int next_cpu = 0; if (map_flags & BPF_F_NO_COMMON_LRU) - /* Ther percpu lru list (i.e each cpu has its own LRU - * list) does not have a local free list. Hence, - * it will only free old nodes till there is no free - * from the LRU list. Hence, this test does not apply - * to BPF_F_NO_COMMON_LRU - */ + /* This test is only applicable to common LRU list */ return; printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, @@ -290,11 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) assert(batch_size * 2 == tgt_free); map_size = tgt_free + batch_size; - if (map_flags & BPF_F_NO_COMMON_LRU) - lru_map_fd = create_map(map_type, map_flags, - map_size * nr_cpus); - else - lru_map_fd = create_map(map_type, map_flags, map_size); + lru_map_fd = create_map(map_type, map_flags, map_size); assert(lru_map_fd != -1); expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size); @@ -341,7 +327,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(value[0] == 4321); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } value[0] = 1234; @@ -361,7 +347,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -387,6 +373,10 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) unsigned int map_size; int next_cpu = 0; + if (map_flags & BPF_F_NO_COMMON_LRU) + /* This test is only applicable to common LRU list */ + return; + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, map_flags); @@ -396,11 +386,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) assert(batch_size * 2 == tgt_free); map_size = tgt_free * 2; - if (map_flags & BPF_F_NO_COMMON_LRU) - lru_map_fd = create_map(map_type, map_flags, - map_size * nr_cpus); - else - lru_map_fd = create_map(map_type, map_flags, map_size); + lru_map_fd = create_map(map_type, map_flags, map_size); assert(lru_map_fd != -1); expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size); @@ -419,7 +405,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) for (key = 1; key < end_key; key++) { assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } /* Add 1+2*tgt_free to tgt_free*5/2 @@ -431,7 +417,7 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -491,7 +477,7 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free) assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); assert(!bpf_map_update_elem(expected_map_fd, &key, value, - BPF_NOEXIST)); + BPF_NOEXIST)); } assert(map_equal(lru_map_fd, expected_map_fd)); @@ -566,6 +552,65 @@ static void test_lru_sanity5(int map_type, int map_flags) printf("Pass\n"); } +/* Test list rotation for BPF_F_NO_COMMON_LRU map */ +static void test_lru_sanity6(int map_type, int map_flags, int tgt_free) +{ + int lru_map_fd, expected_map_fd; + unsigned long long key, value[nr_cpus]; + unsigned int map_size = tgt_free * 2; + int next_cpu = 0; + + if (!(map_flags & BPF_F_NO_COMMON_LRU)) + return; + + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, + map_flags); + + assert(sched_next_online(0, &next_cpu) != -1); + + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, map_size); + assert(expected_map_fd != -1); + + lru_map_fd = create_map(map_type, map_flags, map_size * nr_cpus); + assert(lru_map_fd != -1); + + value[0] = 1234; + + for (key = 1; key <= tgt_free; key++) { + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + for (; key <= tgt_free * 2; key++) { + unsigned long long stable_key; + + /* Make ref bit sticky for key: [1, tgt_free] */ + for (stable_key = 1; stable_key <= tgt_free; stable_key++) { + /* Mark the ref bit */ + assert(!bpf_map_lookup_elem(lru_map_fd, &stable_key, + value)); + } + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + } + + for (; key <= tgt_free * 3; key++) { + assert(!bpf_map_update_elem(lru_map_fd, &key, value, + BPF_NOEXIST)); + assert(!bpf_map_update_elem(expected_map_fd, &key, value, + BPF_NOEXIST)); + } + + assert(map_equal(lru_map_fd, expected_map_fd)); + + close(expected_map_fd); + close(lru_map_fd); + + printf("Pass\n"); +} + int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; @@ -593,6 +638,7 @@ int main(int argc, char **argv) test_lru_sanity3(map_types[t], map_flags[f], tgt_free); test_lru_sanity4(map_types[t], map_flags[f], tgt_free); test_lru_sanity5(map_types[t], map_flags[f]); + test_lru_sanity6(map_types[t], map_flags[f], tgt_free); printf("\n"); } diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 20f1871874df..93314524de0d 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -28,7 +28,7 @@ static int map_flags; static void test_hashmap(int task, void *data) { - long long key, next_key, value; + long long key, next_key, first_key, value; int fd; fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), @@ -89,10 +89,13 @@ static void test_hashmap(int task, void *data) assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); /* Iterate over two elements. */ + assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 && + (first_key == 1 || first_key == 2)); assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && - (next_key == 1 || next_key == 2)); + (next_key == first_key)); assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && - (next_key == 1 || next_key == 2)); + (next_key == 1 || next_key == 2) && + (next_key != first_key)); assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 && errno == ENOENT); @@ -105,6 +108,8 @@ static void test_hashmap(int task, void *data) key = 0; /* Check that map is empty. */ + assert(bpf_map_get_next_key(fd, NULL, &next_key) == -1 && + errno == ENOENT); assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 && errno == ENOENT); @@ -132,20 +137,20 @@ static void test_hashmap_sizes(int task, void *data) static void test_hashmap_percpu(int task, void *data) { unsigned int nr_cpus = bpf_num_possible_cpus(); - long long value[nr_cpus]; - long long key, next_key; + BPF_DECLARE_PERCPU(long, value); + long long key, next_key, first_key; int expected_key_mask = 0; int fd, i; fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key), - sizeof(value[0]), 2, map_flags); + sizeof(bpf_percpu(value, 0)), 2, map_flags); if (fd < 0) { printf("Failed to create hashmap '%s'!\n", strerror(errno)); exit(1); } for (i = 0; i < nr_cpus; i++) - value[i] = i + 100; + bpf_percpu(value, i) = i + 100; key = 1; /* Insert key=1 element. */ @@ -165,8 +170,9 @@ static void test_hashmap_percpu(int task, void *data) /* Check that key=1 can be found. Value could be 0 if the lookup * was run from a different CPU. */ - value[0] = 1; - assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100); + bpf_percpu(value, 0) = 1; + assert(bpf_map_lookup_elem(fd, &key, value) == 0 && + bpf_percpu(value, 0) == 100); key = 2; /* Check that key=2 is not found. */ @@ -193,14 +199,20 @@ static void test_hashmap_percpu(int task, void *data) assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT); /* Iterate over two elements. */ + assert(bpf_map_get_next_key(fd, NULL, &first_key) == 0 && + ((expected_key_mask & first_key) == first_key)); while (!bpf_map_get_next_key(fd, &key, &next_key)) { + if (first_key) { + assert(next_key == first_key); + first_key = 0; + } assert((expected_key_mask & next_key) == next_key); expected_key_mask &= ~next_key; assert(bpf_map_lookup_elem(fd, &next_key, value) == 0); for (i = 0; i < nr_cpus; i++) - assert(value[i] == i + 100); + assert(bpf_percpu(value, i) == i + 100); key = next_key; } @@ -219,6 +231,8 @@ static void test_hashmap_percpu(int task, void *data) key = 0; /* Check that map is empty. */ + assert(bpf_map_get_next_key(fd, NULL, &next_key) == -1 && + errno == ENOENT); assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 && errno == ENOENT); @@ -264,6 +278,8 @@ static void test_arraymap(int task, void *data) assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); /* Iterate over two elements. */ + assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 && + next_key == 0); assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && next_key == 0); assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && @@ -281,34 +297,36 @@ static void test_arraymap(int task, void *data) static void test_arraymap_percpu(int task, void *data) { unsigned int nr_cpus = bpf_num_possible_cpus(); + BPF_DECLARE_PERCPU(long, values); int key, next_key, fd, i; - long long values[nr_cpus]; fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(values[0]), 2, 0); + sizeof(bpf_percpu(values, 0)), 2, 0); if (fd < 0) { printf("Failed to create arraymap '%s'!\n", strerror(errno)); exit(1); } for (i = 0; i < nr_cpus; i++) - values[i] = i + 100; + bpf_percpu(values, i) = i + 100; key = 1; /* Insert key=1 element. */ assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); - values[0] = 0; + bpf_percpu(values, 0) = 0; assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 && errno == EEXIST); /* Check that key=1 can be found. */ - assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100); + assert(bpf_map_lookup_elem(fd, &key, values) == 0 && + bpf_percpu(values, 0) == 100); key = 0; /* Check that key=0 is also found and zero initialized. */ assert(bpf_map_lookup_elem(fd, &key, values) == 0 && - values[0] == 0 && values[nr_cpus - 1] == 0); + bpf_percpu(values, 0) == 0 && + bpf_percpu(values, nr_cpus - 1) == 0); /* Check that key=2 cannot be inserted due to max_entries limit. */ key = 2; @@ -319,6 +337,8 @@ static void test_arraymap_percpu(int task, void *data) assert(bpf_map_lookup_elem(fd, &key, values) == -1 && errno == ENOENT); /* Iterate over two elements. */ + assert(bpf_map_get_next_key(fd, NULL, &next_key) == 0 && + next_key == 0); assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 && next_key == 0); assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 && @@ -336,15 +356,15 @@ static void test_arraymap_percpu(int task, void *data) static void test_arraymap_percpu_many_keys(void) { unsigned int nr_cpus = bpf_num_possible_cpus(); + BPF_DECLARE_PERCPU(long, values); /* nr_keys is not too large otherwise the test stresses percpu * allocator more than anything else */ unsigned int nr_keys = 2000; - long long values[nr_cpus]; int key, fd, i; fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key), - sizeof(values[0]), nr_keys, 0); + sizeof(bpf_percpu(values, 0)), nr_keys, 0); if (fd < 0) { printf("Failed to create per-cpu arraymap '%s'!\n", strerror(errno)); @@ -352,19 +372,19 @@ static void test_arraymap_percpu_many_keys(void) } for (i = 0; i < nr_cpus; i++) - values[i] = i + 10; + bpf_percpu(values, i) = i + 10; for (key = 0; key < nr_keys; key++) assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0); for (key = 0; key < nr_keys; key++) { for (i = 0; i < nr_cpus; i++) - values[i] = 0; + bpf_percpu(values, i) = 0; assert(bpf_map_lookup_elem(fd, &key, values) == 0); for (i = 0; i < nr_cpus; i++) - assert(values[i] == i + 10); + assert(bpf_percpu(values, i) == i + 10); } close(fd); @@ -400,6 +420,8 @@ static void test_map_large(void) errno == E2BIG); /* Iterate through all elements. */ + assert(bpf_map_get_next_key(fd, NULL, &key) == 0); + key.c = -1; for (i = 0; i < MAP_SIZE; i++) assert(bpf_map_get_next_key(fd, &key, &key) == 0); assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); @@ -499,6 +521,7 @@ static void test_map_parallel(void) errno == EEXIST); /* Check that all elements were inserted. */ + assert(bpf_map_get_next_key(fd, NULL, &key) == 0); key = -1; for (i = 0; i < MAP_SIZE; i++) assert(bpf_map_get_next_key(fd, &key, &key) == 0); @@ -518,6 +541,7 @@ static void test_map_parallel(void) /* Nothing should be left. */ key = -1; + assert(bpf_map_get_next_key(fd, NULL, &key) == -1 && errno == ENOENT); assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); } diff --git a/tools/testing/selftests/bpf/test_pkt_access.c b/tools/testing/selftests/bpf/test_pkt_access.c new file mode 100644 index 000000000000..39387bb7e08c --- /dev/null +++ b/tools/testing/selftests/bpf/test_pkt_access.c @@ -0,0 +1,64 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <stddef.h> +#include <linux/bpf.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/in.h> +#include <linux/tcp.h> +#include <linux/pkt_cls.h> +#include "bpf_helpers.h" +#include "bpf_endian.h" + +#define barrier() __asm__ __volatile__("": : :"memory") +int _version SEC("version") = 1; + +SEC("test1") +int process(struct __sk_buff *skb) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + struct ethhdr *eth = (struct ethhdr *)(data); + struct tcphdr *tcp = NULL; + __u8 proto = 255; + __u64 ihl_len; + + if (eth + 1 > data_end) + return TC_ACT_SHOT; + + if (eth->h_proto == bpf_htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)(eth + 1); + + if (iph + 1 > data_end) + return TC_ACT_SHOT; + ihl_len = iph->ihl * 4; + proto = iph->protocol; + tcp = (struct tcphdr *)((void *)(iph) + ihl_len); + } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)(eth + 1); + + if (ip6h + 1 > data_end) + return TC_ACT_SHOT; + ihl_len = sizeof(*ip6h); + proto = ip6h->nexthdr; + tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len); + } + + if (tcp) { + if (((void *)(tcp) + 20) > data_end || proto != 6) + return TC_ACT_SHOT; + barrier(); /* to force ordering of checks */ + if (((void *)(tcp) + 18) > data_end) + return TC_ACT_SHOT; + if (tcp->urg_ptr == 123) + return TC_ACT_OK; + } + + return TC_ACT_UNSPEC; +} diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c new file mode 100644 index 000000000000..4ed049a0b14b --- /dev/null +++ b/tools/testing/selftests/bpf/test_progs.c @@ -0,0 +1,283 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <stdio.h> +#include <unistd.h> +#include <errno.h> +#include <string.h> +#include <assert.h> +#include <stdlib.h> + +#include <linux/types.h> +typedef __u16 __sum16; +#include <arpa/inet.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> + +#include <sys/wait.h> +#include <sys/resource.h> + +#include <linux/bpf.h> +#include <linux/err.h> +#include <bpf/bpf.h> +#include <bpf/libbpf.h> +#include "test_iptunnel_common.h" +#include "bpf_util.h" +#include "bpf_endian.h" + +static int error_cnt, pass_cnt; + +#define MAGIC_BYTES 123 + +/* ipv4 test vector */ +static struct { + struct ethhdr eth; + struct iphdr iph; + struct tcphdr tcp; +} __packed pkt_v4 = { + .eth.h_proto = bpf_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = 6, + .iph.tot_len = bpf_htons(MAGIC_BYTES), + .tcp.urg_ptr = 123, +}; + +/* ipv6 test vector */ +static struct { + struct ethhdr eth; + struct ipv6hdr iph; + struct tcphdr tcp; +} __packed pkt_v6 = { + .eth.h_proto = bpf_htons(ETH_P_IPV6), + .iph.nexthdr = 6, + .iph.payload_len = bpf_htons(MAGIC_BYTES), + .tcp.urg_ptr = 123, +}; + +#define CHECK(condition, tag, format...) ({ \ + int __ret = !!(condition); \ + if (__ret) { \ + error_cnt++; \ + printf("%s:FAIL:%s ", __func__, tag); \ + printf(format); \ + } else { \ + pass_cnt++; \ + printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\ + } \ +}) + +static int bpf_prog_load(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd) +{ + struct bpf_program *prog; + struct bpf_object *obj; + int err; + + obj = bpf_object__open(file); + if (IS_ERR(obj)) { + error_cnt++; + return -ENOENT; + } + + prog = bpf_program__next(NULL, obj); + if (!prog) { + bpf_object__close(obj); + error_cnt++; + return -ENOENT; + } + + bpf_program__set_type(prog, type); + err = bpf_object__load(obj); + if (err) { + bpf_object__close(obj); + error_cnt++; + return -EINVAL; + } + + *pobj = obj; + *prog_fd = bpf_program__fd(prog); + return 0; +} + +static int bpf_find_map(const char *test, struct bpf_object *obj, + const char *name) +{ + struct bpf_map *map; + + map = bpf_object__find_map_by_name(obj, name); + if (!map) { + printf("%s:FAIL:map '%s' not found\n", test, name); + error_cnt++; + return -1; + } + return bpf_map__fd(map); +} + +static void test_pkt_access(void) +{ + const char *file = "./test_pkt_access.o"; + struct bpf_object *obj; + __u32 duration, retval; + int err, prog_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) + return; + + err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), + NULL, NULL, &retval, &duration); + CHECK(err || errno || retval, "ipv4", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || errno || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + bpf_object__close(obj); +} + +static void test_xdp(void) +{ + struct vip key4 = {.protocol = 6, .family = AF_INET}; + struct vip key6 = {.protocol = 6, .family = AF_INET6}; + struct iptnl_info value4 = {.family = AF_INET}; + struct iptnl_info value6 = {.family = AF_INET6}; + const char *file = "./test_xdp.o"; + struct bpf_object *obj; + char buf[128]; + struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr); + struct iphdr *iph = (void *)buf + sizeof(struct ethhdr); + __u32 duration, retval, size; + int err, prog_fd, map_fd; + + err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (err) + return; + + map_fd = bpf_find_map(__func__, obj, "vip2tnl"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &key4, &value4, 0); + bpf_map_update_elem(map_fd, &key6, &value6, 0); + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + + CHECK(err || errno || retval != XDP_TX || size != 74 || + iph->protocol != IPPROTO_IPIP, "ipv4", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size); + + err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), + buf, &size, &retval, &duration); + CHECK(err || errno || retval != XDP_TX || size != 114 || + iph6->nexthdr != IPPROTO_IPV6, "ipv6", + "err %d errno %d retval %d size %d\n", + err, errno, retval, size); +out: + bpf_object__close(obj); +} + +#define MAGIC_VAL 0x1234 +#define NUM_ITER 100000 +#define VIP_NUM 5 + +static void test_l4lb(void) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + const char *file = "./test_l4lb.o"; + struct vip key = {.protocol = 6}; + struct vip_meta { + __u32 flags; + __u32 vip_num; + } value = {.vip_num = VIP_NUM}; + __u32 stats_key = VIP_NUM; + struct vip_stats { + __u64 bytes; + __u64 pkts; + } stats[nr_cpus]; + struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; + } real_def = {.dst = MAGIC_VAL}; + __u32 ch_key = 11, real_num = 3; + __u32 duration, retval, size; + int err, i, prog_fd, map_fd; + __u64 bytes = 0, pkts = 0; + struct bpf_object *obj; + char buf[128]; + u32 *magic = (u32 *)buf; + + err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (err) + return; + + map_fd = bpf_find_map(__func__, obj, "vip_map"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &key, &value, 0); + + map_fd = bpf_find_map(__func__, obj, "ch_rings"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); + + map_fd = bpf_find_map(__func__, obj, "reals"); + if (map_fd < 0) + goto out; + bpf_map_update_elem(map_fd, &real_num, &real_def, 0); + + err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), + buf, &size, &retval, &duration); + CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || + *magic != MAGIC_VAL, "ipv4", + "err %d errno %d retval %d size %d magic %x\n", + err, errno, retval, size, *magic); + + err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), + buf, &size, &retval, &duration); + CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || + *magic != MAGIC_VAL, "ipv6", + "err %d errno %d retval %d size %d magic %x\n", + err, errno, retval, size, *magic); + + map_fd = bpf_find_map(__func__, obj, "stats"); + if (map_fd < 0) + goto out; + bpf_map_lookup_elem(map_fd, &stats_key, stats); + for (i = 0; i < nr_cpus; i++) { + bytes += stats[i].bytes; + pkts += stats[i].pkts; + } + if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { + error_cnt++; + printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); + } +out: + bpf_object__close(obj); +} + +int main(void) +{ + struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; + + setrlimit(RLIMIT_MEMLOCK, &rinf); + + test_pkt_access(); + test_xdp(); + test_l4lb(); + + printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); + return 0; +} diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c848e90b6421..3773562056da 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -46,6 +46,7 @@ #define MAX_INSNS 512 #define MAX_FIXUPS 8 +#define MAX_NR_MAPS 4 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) @@ -55,6 +56,7 @@ struct bpf_test { int fixup_map1[MAX_FIXUPS]; int fixup_map2[MAX_FIXUPS]; int fixup_prog[MAX_FIXUPS]; + int fixup_map_in_map[MAX_FIXUPS]; const char *errstr; const char *errstr_unpriv; enum { @@ -189,6 +191,86 @@ static struct bpf_test tests[] = { .result = REJECT, }, { + "test6 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0), + BPF_RAW_INSN(0, 0, 0, 0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "test7 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { + "test8 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 1, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "uses reserved fields", + .result = REJECT, + }, + { + "test9 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, 0, 1, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test10 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, BPF_REG_1, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test11 ld_imm64", + .insns = { + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 1), + BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { + "test12 ld_imm64", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), + BPF_RAW_INSN(0, 0, 0, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "not pointing to valid bpf_map", + .result = REJECT, + }, + { + "test13 ld_imm64", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, BPF_REG_1, 0, 1), + BPF_RAW_INSN(0, 0, BPF_REG_1, 0, 1), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_ld_imm64 insn", + .result = REJECT, + }, + { "no bpf_exit", .insns = { BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), @@ -329,6 +411,30 @@ static struct bpf_test tests[] = { .result = REJECT, }, { + "invalid fp arithmetic", + /* If this gets ever changed, make sure JITs can deal with it. */ + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "R1 pointer arithmetic", + .result_unpriv = REJECT, + .errstr = "R1 invalid mem access", + .result = REJECT, + }, + { + "non-invalid fp arithmetic", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + }, + { "invalid argument register", .insns = { BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, @@ -770,6 +876,9 @@ static struct bpf_test tests[] = { BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct __sk_buff, vlan_tci)), BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, napi_id)), + BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0), BPF_EXIT_INSN(), }, .result = ACCEPT, @@ -1796,6 +1905,20 @@ static struct bpf_test tests[] = { .result = ACCEPT, }, { + "unpriv: adding of fp", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, + .errstr_unpriv = "pointer arithmetic prohibited", + .result_unpriv = REJECT, + .errstr = "R1 invalid mem access", + .result = REJECT, + }, + { "unpriv: cmp of stack pointer", .insns = { BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -1809,16 +1932,22 @@ static struct bpf_test tests[] = { .result = ACCEPT, }, { - "unpriv: obfuscate stack pointer", + "stack pointer arithmetic", .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_MOV64_IMM(BPF_REG_1, 4), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), + BPF_ST_MEM(0, BPF_REG_2, 4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), + BPF_ST_MEM(0, BPF_REG_2, 4, 0), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R2 pointer arithmetic", - .result_unpriv = REJECT, .result = ACCEPT, }, { @@ -2467,6 +2596,25 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { + "direct packet access: test16 (arith on data_end)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid access to packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { "helper access to packet: test1, valid packet_ptr range", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -4720,6 +4868,75 @@ static struct bpf_test tests[] = { .result = REJECT, .result_unpriv = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + }, + { + "map in map access", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 3 }, + .result = ACCEPT, + }, + { + "invalid inner map pointer", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 3 }, + .errstr = "R1 type=inv expected=map_ptr", + .errstr_unpriv = "R1 pointer arithmetic prohibited", + .result = REJECT, + }, + { + "forgot null checking on the inner map pointer", + .insns = { + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_ST_MEM(0, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_MOV64_REG(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_in_map = { 3 }, + .errstr = "R1 type=map_value_or_null expected=map_ptr", + .result = REJECT, } }; @@ -4757,42 +4974,73 @@ static int create_prog_array(void) return fd; } +static int create_map_in_map(void) +{ + int inner_map_fd, outer_map_fd; + + inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), + sizeof(int), 1, 0); + if (inner_map_fd < 0) { + printf("Failed to create array '%s'!\n", strerror(errno)); + return inner_map_fd; + } + + outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, + sizeof(int), inner_map_fd, 1, 0); + if (outer_map_fd < 0) + printf("Failed to create array of maps '%s'!\n", + strerror(errno)); + + close(inner_map_fd); + + return outer_map_fd; +} + static char bpf_vlog[32768]; static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog, - int *fd_f1, int *fd_f2, int *fd_f3) + int *map_fds) { int *fixup_map1 = test->fixup_map1; int *fixup_map2 = test->fixup_map2; int *fixup_prog = test->fixup_prog; + int *fixup_map_in_map = test->fixup_map_in_map; /* Allocating HTs with 1 elem is fine here, since we only test * for verifier and not do a runtime lookup, so the only thing * that really matters is value size in this case. */ if (*fixup_map1) { - *fd_f1 = create_map(sizeof(long long), 1); + map_fds[0] = create_map(sizeof(long long), 1); do { - prog[*fixup_map1].imm = *fd_f1; + prog[*fixup_map1].imm = map_fds[0]; fixup_map1++; } while (*fixup_map1); } if (*fixup_map2) { - *fd_f2 = create_map(sizeof(struct test_val), 1); + map_fds[1] = create_map(sizeof(struct test_val), 1); do { - prog[*fixup_map2].imm = *fd_f2; + prog[*fixup_map2].imm = map_fds[1]; fixup_map2++; } while (*fixup_map2); } if (*fixup_prog) { - *fd_f3 = create_prog_array(); + map_fds[2] = create_prog_array(); do { - prog[*fixup_prog].imm = *fd_f3; + prog[*fixup_prog].imm = map_fds[2]; fixup_prog++; } while (*fixup_prog); } + + if (*fixup_map_in_map) { + map_fds[3] = create_map_in_map(); + do { + prog[*fixup_map_in_map].imm = map_fds[3]; + fixup_map_in_map++; + } while (*fixup_map_in_map); + } } static void do_test_single(struct bpf_test *test, bool unpriv, @@ -4802,10 +5050,14 @@ static void do_test_single(struct bpf_test *test, bool unpriv, struct bpf_insn *prog = test->insns; int prog_len = probe_filter_length(prog); int prog_type = test->prog_type; - int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1; + int map_fds[MAX_NR_MAPS]; const char *expected_err; + int i; + + for (i = 0; i < MAX_NR_MAPS; i++) + map_fds[i] = -1; - do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3); + do_test_fixup(test, prog, map_fds); fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, prog, prog_len, "GPL", 0, bpf_vlog, @@ -4848,9 +5100,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv, " (NOTE: reject due to unknown alignment)" : ""); close_fds: close(fd_prog); - close(fd_f1); - close(fd_f2); - close(fd_f3); + for (i = 0; i < MAX_NR_MAPS; i++) + close(map_fds[i]); sched_yield(); return; fail_log: diff --git a/tools/testing/selftests/bpf/test_xdp.c b/tools/testing/selftests/bpf/test_xdp.c new file mode 100644 index 000000000000..5e7df8bb5b5d --- /dev/null +++ b/tools/testing/selftests/bpf/test_xdp.c @@ -0,0 +1,235 @@ +/* Copyright (c) 2016,2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <stddef.h> +#include <string.h> +#include <linux/bpf.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/in.h> +#include <linux/udp.h> +#include <linux/tcp.h> +#include <linux/pkt_cls.h> +#include <sys/socket.h> +#include "bpf_helpers.h" +#include "bpf_endian.h" +#include "test_iptunnel_common.h" + +int _version SEC("version") = 1; + +struct bpf_map_def SEC("maps") rxcnt = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u64), + .max_entries = 256, +}; + +struct bpf_map_def SEC("maps") vip2tnl = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(struct vip), + .value_size = sizeof(struct iptnl_info), + .max_entries = MAX_IPTNL_ENTRIES, +}; + +static __always_inline void count_tx(__u32 protocol) +{ + __u64 *rxcnt_count; + + rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol); + if (rxcnt_count) + *rxcnt_count += 1; +} + +static __always_inline int get_dport(void *trans_data, void *data_end, + __u8 protocol) +{ + struct tcphdr *th; + struct udphdr *uh; + + switch (protocol) { + case IPPROTO_TCP: + th = (struct tcphdr *)trans_data; + if (th + 1 > data_end) + return -1; + return th->dest; + case IPPROTO_UDP: + uh = (struct udphdr *)trans_data; + if (uh + 1 > data_end) + return -1; + return uh->dest; + default: + return 0; + } +} + +static __always_inline void set_ethhdr(struct ethhdr *new_eth, + const struct ethhdr *old_eth, + const struct iptnl_info *tnl, + __be16 h_proto) +{ + memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source)); + memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest)); + new_eth->h_proto = h_proto; +} + +static __always_inline int handle_ipv4(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + struct iphdr *iph = data + sizeof(struct ethhdr); + __u16 *next_iph; + __u16 payload_len; + struct vip vip = {}; + int dport; + __u32 csum = 0; + int i; + + if (iph + 1 > data_end) + return XDP_DROP; + + dport = get_dport(iph + 1, data_end, iph->protocol); + if (dport == -1) + return XDP_DROP; + + vip.protocol = iph->protocol; + vip.family = AF_INET; + vip.daddr.v4 = iph->daddr; + vip.dport = dport; + payload_len = bpf_ntohs(iph->tot_len); + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v4-in-v4 */ + if (!tnl || tnl->family != AF_INET) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) + return XDP_DROP; + + data = (void *)(long)xdp->data; + data_end = (void *)(long)xdp->data_end; + + new_eth = data; + iph = data + sizeof(*new_eth); + old_eth = data + sizeof(*iph); + + if (new_eth + 1 > data_end || + old_eth + 1 > data_end || + iph + 1 > data_end) + return XDP_DROP; + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP)); + + iph->version = 4; + iph->ihl = sizeof(*iph) >> 2; + iph->frag_off = 0; + iph->protocol = IPPROTO_IPIP; + iph->check = 0; + iph->tos = 0; + iph->tot_len = bpf_htons(payload_len + sizeof(*iph)); + iph->daddr = tnl->daddr.v4; + iph->saddr = tnl->saddr.v4; + iph->ttl = 8; + + next_iph = (__u16 *)iph; +#pragma clang loop unroll(full) + for (i = 0; i < sizeof(*iph) >> 1; i++) + csum += *next_iph++; + + iph->check = ~((csum & 0xffff) + (csum >> 16)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +static __always_inline int handle_ipv6(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + struct ipv6hdr *ip6h = data + sizeof(struct ethhdr); + __u16 payload_len; + struct vip vip = {}; + int dport; + + if (ip6h + 1 > data_end) + return XDP_DROP; + + dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr); + if (dport == -1) + return XDP_DROP; + + vip.protocol = ip6h->nexthdr; + vip.family = AF_INET6; + memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr)); + vip.dport = dport; + payload_len = ip6h->payload_len; + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v6-in-v6 */ + if (!tnl || tnl->family != AF_INET6) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) + return XDP_DROP; + + data = (void *)(long)xdp->data; + data_end = (void *)(long)xdp->data_end; + + new_eth = data; + ip6h = data + sizeof(*new_eth); + old_eth = data + sizeof(*ip6h); + + if (new_eth + 1 > data_end || old_eth + 1 > data_end || + ip6h + 1 > data_end) + return XDP_DROP; + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6)); + + ip6h->version = 6; + ip6h->priority = 0; + memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl)); + ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + sizeof(*ip6h)); + ip6h->nexthdr = IPPROTO_IPV6; + ip6h->hop_limit = 8; + memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6)); + memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +SEC("xdp_tx_iptunnel") +int _xdp_tx_iptunnel(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + struct ethhdr *eth = data; + __u16 h_proto; + + if (eth + 1 > data_end) + return XDP_DROP; + + h_proto = eth->h_proto; + + if (h_proto == bpf_htons(ETH_P_IP)) + return handle_ipv4(xdp); + else if (h_proto == bpf_htons(ETH_P_IPV6)) + + return handle_ipv6(xdp); + else + return XDP_DROP; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index fbfe5d0d5c2e..35cbb4cba410 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -5,7 +5,7 @@ CFLAGS += -I../../../../usr/include/ reuseport_bpf_numa: LDFLAGS += -lnuma -TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh +TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh TEST_GEN_FILES = socket TEST_GEN_FILES += psock_fanout psock_tpacket TEST_GEN_FILES += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa diff --git a/tools/testing/selftests/net/netdevice.sh b/tools/testing/selftests/net/netdevice.sh new file mode 100755 index 000000000000..4e00568d70c2 --- /dev/null +++ b/tools/testing/selftests/net/netdevice.sh @@ -0,0 +1,200 @@ +#!/bin/sh +# +# This test is for checking network interface +# For the moment it tests only ethernet interface (but wifi could be easily added) +# +# We assume that all network driver are loaded +# if not they probably have failed earlier in the boot process and their logged error will be catched by another test +# + +# this function will try to up the interface +# if already up, nothing done +# arg1: network interface name +kci_net_start() +{ + netdev=$1 + + ip link show "$netdev" |grep -q UP + if [ $? -eq 0 ];then + echo "SKIP: $netdev: interface already up" + return 0 + fi + + ip link set "$netdev" up + if [ $? -ne 0 ];then + echo "FAIL: $netdev: Fail to up interface" + return 1 + else + echo "PASS: $netdev: set interface up" + NETDEV_STARTED=1 + fi + return 0 +} + +# this function will try to setup an IP and MAC address on a network interface +# Doing nothing if the interface was already up +# arg1: network interface name +kci_net_setup() +{ + netdev=$1 + + # do nothing if the interface was already up + if [ $NETDEV_STARTED -eq 0 ];then + return 0 + fi + + MACADDR='02:03:04:05:06:07' + ip link set dev $netdev address "$MACADDR" + if [ $? -ne 0 ];then + echo "FAIL: $netdev: Cannot set MAC address" + else + ip link show $netdev |grep -q "$MACADDR" + if [ $? -eq 0 ];then + echo "PASS: $netdev: set MAC address" + else + echo "FAIL: $netdev: Cannot set MAC address" + fi + fi + + #check that the interface did not already have an IP + ip address show "$netdev" |grep '^[[:space:]]*inet' + if [ $? -eq 0 ];then + echo "SKIP: $netdev: already have an IP" + return 0 + fi + + # TODO what ipaddr to set ? DHCP ? + echo "SKIP: $netdev: set IP address" + return 0 +} + +# test an ethtool command +# arg1: return code for not supported (see ethtool code source) +# arg2: summary of the command +# arg3: command to execute +kci_netdev_ethtool_test() +{ + if [ $# -le 2 ];then + echo "SKIP: $netdev: ethtool: invalid number of arguments" + return 1 + fi + $3 >/dev/null + ret=$? + if [ $ret -ne 0 ];then + if [ $ret -eq "$1" ];then + echo "SKIP: $netdev: ethtool $2 not supported" + else + echo "FAIL: $netdev: ethtool $2" + return 1 + fi + else + echo "PASS: $netdev: ethtool $2" + fi + return 0 +} + +# test ethtool commands +# arg1: network interface name +kci_netdev_ethtool() +{ + netdev=$1 + + #check presence of ethtool + ethtool --version 2>/dev/null >/dev/null + if [ $? -ne 0 ];then + echo "SKIP: ethtool not present" + return 1 + fi + + TMP_ETHTOOL_FEATURES="$(mktemp)" + if [ ! -e "$TMP_ETHTOOL_FEATURES" ];then + echo "SKIP: Cannot create a tmp file" + return 1 + fi + + ethtool -k "$netdev" > "$TMP_ETHTOOL_FEATURES" + if [ $? -ne 0 ];then + echo "FAIL: $netdev: ethtool list features" + rm "$TMP_ETHTOOL_FEATURES" + return 1 + fi + echo "PASS: $netdev: ethtool list features" + #TODO for each non fixed features, try to turn them on/off + rm "$TMP_ETHTOOL_FEATURES" + + kci_netdev_ethtool_test 74 'dump' "ethtool -d $netdev" + kci_netdev_ethtool_test 94 'stats' "ethtool -S $netdev" + return 0 +} + +# stop a netdev +# arg1: network interface name +kci_netdev_stop() +{ + netdev=$1 + + if [ $NETDEV_STARTED -eq 0 ];then + echo "SKIP: $netdev: interface kept up" + return 0 + fi + + ip link set "$netdev" down + if [ $? -ne 0 ];then + echo "FAIL: $netdev: stop interface" + return 1 + fi + echo "PASS: $netdev: stop interface" + return 0 +} + +# run all test on a netdev +# arg1: network interface name +kci_test_netdev() +{ + NETDEV_STARTED=0 + IFACE_TO_UPDOWN="$1" + IFACE_TO_TEST="$1" + #check for VLAN interface + MASTER_IFACE="$(echo $1 | cut -d@ -f2)" + if [ ! -z "$MASTER_IFACE" ];then + IFACE_TO_UPDOWN="$MASTER_IFACE" + IFACE_TO_TEST="$(echo $1 | cut -d@ -f1)" + fi + + NETDEV_STARTED=0 + kci_net_start "$IFACE_TO_UPDOWN" + + kci_net_setup "$IFACE_TO_TEST" + + kci_netdev_ethtool "$IFACE_TO_TEST" + + kci_netdev_stop "$IFACE_TO_UPDOWN" + return 0 +} + +#check for needed privileges +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit 0 +fi + +ip -Version 2>/dev/null >/dev/null +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without the ip tool" + exit 0 +fi + +TMP_LIST_NETDEV="$(mktemp)" +if [ ! -e "$TMP_LIST_NETDEV" ];then + echo "FAIL: Cannot create a tmp file" + exit 1 +fi + +ip link show |grep '^[0-9]' | grep -oE '[[:space:]].*eth[0-9]*:|[[:space:]].*enp[0-9]s[0-9]:' | cut -d\ -f2 | cut -d: -f1> "$TMP_LIST_NETDEV" +while read netdev +do + kci_test_netdev "$netdev" +done < "$TMP_LIST_NETDEV" + +rm "$TMP_LIST_NETDEV" +exit 0 diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c index e62bb354820c..989f917068d1 100644 --- a/tools/testing/selftests/net/psock_fanout.c +++ b/tools/testing/selftests/net/psock_fanout.c @@ -71,7 +71,7 @@ /* Open a socket in a given fanout mode. * @return -1 if mode is bad, a valid socket otherwise */ -static int sock_fanout_open(uint16_t typeflags, int num_packets) +static int sock_fanout_open(uint16_t typeflags, uint16_t group_id) { int fd, val; @@ -81,8 +81,7 @@ static int sock_fanout_open(uint16_t typeflags, int num_packets) exit(1); } - /* fanout group ID is always 0: tests whether old groups are deleted */ - val = ((int) typeflags) << 16; + val = (((int) typeflags) << 16) | group_id; if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val))) { if (close(fd)) { perror("close packet"); @@ -113,6 +112,20 @@ static void sock_fanout_set_cbpf(int fd) } } +static void sock_fanout_getopts(int fd, uint16_t *typeflags, uint16_t *group_id) +{ + int sockopt; + socklen_t sockopt_len = sizeof(sockopt); + + if (getsockopt(fd, SOL_PACKET, PACKET_FANOUT, + &sockopt, &sockopt_len)) { + perror("failed to getsockopt"); + exit(1); + } + *typeflags = sockopt >> 16; + *group_id = sockopt & 0xfffff; +} + static void sock_fanout_set_ebpf(int fd) { const int len_off = __builtin_offsetof(struct __sk_buff, len); @@ -241,26 +254,26 @@ static void test_control_group(void) fprintf(stderr, "test: control multiple sockets\n"); - fds[0] = sock_fanout_open(PACKET_FANOUT_HASH, 20); + fds[0] = sock_fanout_open(PACKET_FANOUT_HASH, 0); if (fds[0] == -1) { fprintf(stderr, "ERROR: failed to open HASH socket\n"); exit(1); } if (sock_fanout_open(PACKET_FANOUT_HASH | - PACKET_FANOUT_FLAG_DEFRAG, 10) != -1) { + PACKET_FANOUT_FLAG_DEFRAG, 0) != -1) { fprintf(stderr, "ERROR: joined group with wrong flag defrag\n"); exit(1); } if (sock_fanout_open(PACKET_FANOUT_HASH | - PACKET_FANOUT_FLAG_ROLLOVER, 10) != -1) { + PACKET_FANOUT_FLAG_ROLLOVER, 0) != -1) { fprintf(stderr, "ERROR: joined group with wrong flag ro\n"); exit(1); } - if (sock_fanout_open(PACKET_FANOUT_CPU, 10) != -1) { + if (sock_fanout_open(PACKET_FANOUT_CPU, 0) != -1) { fprintf(stderr, "ERROR: joined group with wrong mode\n"); exit(1); } - fds[1] = sock_fanout_open(PACKET_FANOUT_HASH, 20); + fds[1] = sock_fanout_open(PACKET_FANOUT_HASH, 0); if (fds[1] == -1) { fprintf(stderr, "ERROR: failed to join group\n"); exit(1); @@ -271,6 +284,61 @@ static void test_control_group(void) } } +/* Test creating a unique fanout group ids */ +static void test_unique_fanout_group_ids(void) +{ + int fds[3]; + uint16_t typeflags, first_group_id, second_group_id; + + fprintf(stderr, "test: unique ids\n"); + + fds[0] = sock_fanout_open(PACKET_FANOUT_HASH | + PACKET_FANOUT_FLAG_UNIQUEID, 0); + if (fds[0] == -1) { + fprintf(stderr, "ERROR: failed to create a unique id group.\n"); + exit(1); + } + + sock_fanout_getopts(fds[0], &typeflags, &first_group_id); + if (typeflags != PACKET_FANOUT_HASH) { + fprintf(stderr, "ERROR: unexpected typeflags %x\n", typeflags); + exit(1); + } + + if (sock_fanout_open(PACKET_FANOUT_CPU, first_group_id) != -1) { + fprintf(stderr, "ERROR: joined group with wrong type.\n"); + exit(1); + } + + fds[1] = sock_fanout_open(PACKET_FANOUT_HASH, first_group_id); + if (fds[1] == -1) { + fprintf(stderr, + "ERROR: failed to join previously created group.\n"); + exit(1); + } + + fds[2] = sock_fanout_open(PACKET_FANOUT_HASH | + PACKET_FANOUT_FLAG_UNIQUEID, 0); + if (fds[2] == -1) { + fprintf(stderr, + "ERROR: failed to create a second unique id group.\n"); + exit(1); + } + + sock_fanout_getopts(fds[2], &typeflags, &second_group_id); + if (sock_fanout_open(PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_UNIQUEID, + second_group_id) != -1) { + fprintf(stderr, + "ERROR: specified a group id when requesting unique id\n"); + exit(1); + } + + if (close(fds[0]) || close(fds[1]) || close(fds[2])) { + fprintf(stderr, "ERROR: closing sockets\n"); + exit(1); + } +} + static int test_datapath(uint16_t typeflags, int port_off, const int expect1[], const int expect2[]) { @@ -281,8 +349,8 @@ static int test_datapath(uint16_t typeflags, int port_off, fprintf(stderr, "test: datapath 0x%hx\n", typeflags); - fds[0] = sock_fanout_open(typeflags, 20); - fds[1] = sock_fanout_open(typeflags, 20); + fds[0] = sock_fanout_open(typeflags, 0); + fds[1] = sock_fanout_open(typeflags, 0); if (fds[0] == -1 || fds[1] == -1) { fprintf(stderr, "ERROR: failed open\n"); exit(1); @@ -349,10 +417,12 @@ int main(int argc, char **argv) const int expect_cpu0[2][2] = { { 20, 0 }, { 20, 0 } }; const int expect_cpu1[2][2] = { { 0, 20 }, { 0, 20 } }; const int expect_bpf[2][2] = { { 15, 5 }, { 15, 20 } }; + const int expect_uniqueid[2][2] = { { 20, 20}, { 20, 20 } }; int port_off = 2, tries = 5, ret; test_control_single(); test_control_group(); + test_unique_fanout_group_ids(); /* find a set of ports that do not collide onto the same socket */ ret = test_datapath(PACKET_FANOUT_HASH, port_off, @@ -383,6 +453,9 @@ int main(int argc, char **argv) ret |= test_datapath(PACKET_FANOUT_CPU, port_off, expect_cpu1[0], expect_cpu1[1]); + ret |= test_datapath(PACKET_FANOUT_FLAG_UNIQUEID, port_off, + expect_uniqueid[0], expect_uniqueid[1]); + if (ret) return 1; |