aboutsummaryrefslogtreecommitdiffstats
diff options
authorAlexei Starovoitov <ast@kernel.org>2023-07-25 17:20:59 -0700
committerAlexei Starovoitov <ast@kernel.org>2024-01-25 17:43:30 -0800
commit8282490ee6de4e3e8877978f7aa82709aa265e3e (patch)
tree36fa0c78260ea777ac1d561cf73e6b83608b58be
parent4863262990d12329a5645a9ce83e68497684cca8 (diff)
downloadbpf-uptr.tar.gz
selftests/bpf: Add uptr test.uptr
Implement simple hash table where all data structures are shared between bpf program and user space process. The implementation is in uptr_htab.h. It's dual compiled. Once as a bpf program and once as native C code. Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uptr.c95
-rw-r--r--tools/testing/selftests/bpf/progs/uptr.c61
-rw-r--r--tools/testing/selftests/bpf/uptr_htab.h193
3 files changed, 349 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/uptr.c b/tools/testing/selftests/bpf/prog_tests/uptr.c
new file mode 100644
index 00000000000000..5fa47c476729f9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/uptr.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <sys/mman.h>
+#include <network_helpers.h>
+
+#include "uptr.skel.h"
+
+#undef offsetof
+#define offsetof(type, member) ((unsigned long)&((type *)0)->member)
+
+/* redefined container_of() to ensure we use the above offsetof() macro */
+#undef container_of
+#define container_of(ptr, type, member) \
+ ({ \
+ void *__mptr = (void *)(ptr); \
+ ((type *)(__mptr - offsetof(type, member))); \
+ })
+
+#define __uptr
+#define PAGE_SIZE 4096
+
+char arena[1];
+void __uptr* bpf_uptr_alloc_pages(void *map, __u32 page_cnt) { return 0; }
+void bpf_uptr_free_pages(void *map, void __uptr *ptr, __u32 page_cnt) { }
+
+#define bpf_cast_as(var, addr_space) /* nop for user space */
+
+#include "uptr_htab.h"
+
+struct args {
+ __u64 log_buf;
+ __u32 log_size;
+ int max_entries;
+ int map_fd;
+ int prog_fd;
+ int btf_fd;
+};
+
+static void test_uptr_basic(void)
+{
+ static char verifier_log[8192];
+ struct args ctx = {
+ .max_entries = 1024,
+ .log_buf = (uintptr_t) verifier_log,
+ .log_size = sizeof(verifier_log),
+ };
+ LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = &ctx,
+ .ctx_size_in = sizeof(ctx),
+ );
+ struct uptr *skel;
+ size_t arena_sz;
+ void *area;
+ int ret, i;
+ struct htab *htab, *htab2;
+
+ skel = uptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
+ return;
+
+ area = bpf_map__initial_value(skel->maps.arena, &arena_sz);
+ /* fault-in a page with pgoff == 0 as sanity check */
+ *(volatile int*)area = 0x55aa;
+
+ /* bpf prog will allocate more pages */
+ ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.uptr_basic), &opts);
+ ASSERT_OK(ret, "ret");
+ ASSERT_OK(opts.retval, "retval");
+
+ /* hack1: internal detail of bpf_alloc() from uptr_htab.h */
+ htab = area + 4096 * 2 - sizeof(*htab) - 8;
+ /* hack2: the verifier doesn't set _yet_ upper 32-bit of uptr when prog stores it into bss */
+ htab2 = (void *)(((__u64)area) & ~(__u64)~0U) + (__u32)(long)skel->bss->htab_for_user;
+ ASSERT_EQ(htab, htab2, "two ways to compute htab pointer");
+ printf("htab_for_user %p\n", skel->bss->htab_for_user);
+ printf("htab %p buckets %p n_buckets %d\n", htab, htab->buckets, htab->n_buckets);
+ for (i = 0; htab->buckets && i < 16; i++) {
+ /*
+ * Walk htab buckets and link lists since all pointers are correct,
+ * though they were written by bpf program.
+ */
+ int val = htab_lookup_elem(htab, i);
+
+ ASSERT_EQ(i, val, "key == value");
+ }
+ uptr__destroy(skel);
+}
+
+void test_uptr_success(void)
+{
+ if (test__start_subtest("uptr_basic"))
+ test_uptr_basic();
+}
+
diff --git a/tools/testing/selftests/bpf/progs/uptr.c b/tools/testing/selftests/bpf/progs/uptr.c
new file mode 100644
index 00000000000000..f8d873cb8e8628
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/uptr.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "bpf_experimental.h"
+
+#ifndef WRITE_ONCE
+#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
+#endif
+
+#define __uptr __attribute__((address_space(1))) __attribute__((btf_type_tag("uptr")))
+//#define __uptr __attribute__((btf_type_tag("uptr")))
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARENA);
+ __uint(map_flags, BPF_F_MMAPABLE);
+ __uint(max_entries, 1u << 20);
+ __type(key, __u64);
+ __type(value, __u64);
+} arena SEC(".maps");
+
+void __uptr* bpf_uptr_alloc_pages(void *map, __u32 page_cnt) __ksym;
+void bpf_uptr_free_pages(void *map, void __uptr *ptr, __u32 page_cnt) __ksym;
+
+#include "uptr_htab.h"
+
+static volatile int zero = 0;
+
+void __uptr *htab_for_user;
+
+SEC("syscall")
+int uptr_basic(void *ctx)
+{
+ struct htab __uptr *htab;
+ __u64 i;
+
+ htab = bpf_alloc(sizeof(*htab));
+ bpf_cast_as(htab, 1);
+ htab_init(htab);
+ htab_for_user = htab;
+
+ /* first run. No old elems in the table */
+ bpf_for(i, 0, 1000) {
+ zero = i;
+ htab_update_elem(htab, zero, zero);
+ }
+ /* should replace all elems with new ones */
+ bpf_for(i, 0, 1000) {
+ zero = i;
+ htab_update_elem(htab, zero, zero);
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/uptr_htab.h b/tools/testing/selftests/bpf/uptr_htab.h
new file mode 100644
index 00000000000000..9f82f3c9e83157
--- /dev/null
+++ b/tools/testing/selftests/bpf/uptr_htab.h
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+struct uptr_list_node;
+
+typedef struct uptr_list_node __uptr uptr_list_node_t;
+
+struct uptr_list_node {
+ uptr_list_node_t *next;
+ uptr_list_node_t * __uptr *pprev;
+};
+
+struct uptr_list_head {
+ struct uptr_list_node __uptr *first;
+};
+typedef struct uptr_list_head __uptr uptr_list_head_t;
+
+struct htab_bucket {
+ struct uptr_list_head head;
+};
+typedef struct htab_bucket __uptr htab_bucket_t;
+
+struct htab {
+ htab_bucket_t *buckets;
+ int n_buckets;
+ int pad;
+};
+typedef struct htab __uptr htab_t;
+
+static inline htab_bucket_t *__select_bucket(htab_t *htab, __u32 hash)
+{
+ return &htab->buckets[hash & (htab->n_buckets - 1)];
+}
+
+static inline uptr_list_head_t *select_bucket(htab_t *htab, __u32 hash)
+{
+ return &__select_bucket(htab, hash)->head;
+}
+
+struct hashtab_elem {
+ struct uptr_list_node hash_node;
+ int hash;
+ int key;
+ int value;
+ int pad;
+};
+typedef struct hashtab_elem __uptr hashtab_elem_t;
+
+#define ulist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define ulist_entry_safe(ptr, type, member) \
+ ({ typeof(*ptr) * ___ptr = (ptr); \
+ ___ptr ? ulist_entry(___ptr, type, member) : NULL; \
+ })
+
+#define ulist_for_each_entry(pos, head, member) \
+ for (__i = 0, pos = ulist_entry_safe((head)->first, typeof(*(pos)), member);\
+ pos && __i < 10; \
+ pos = ulist_entry_safe((pos)->member.next, typeof(*(pos)), member), __i++)
+
+static inline void ulist_add_head(uptr_list_node_t *n, uptr_list_head_t *h)
+{
+ uptr_list_node_t *first = h->first;
+ WRITE_ONCE(n->next, first);
+ if (first)
+ WRITE_ONCE(first->pprev, &n->next);
+ WRITE_ONCE(h->first, n);
+ WRITE_ONCE(n->pprev, &h->first);
+}
+
+static inline void __ulist_del(uptr_list_node_t *n)
+{
+ uptr_list_node_t *next = n->next;
+ uptr_list_node_t * __uptr *pprev = n->pprev;
+
+ WRITE_ONCE(*pprev, next);
+ if (next)
+ WRITE_ONCE(next->pprev, pprev);
+}
+
+#define POISON_POINTER_DELTA 0
+
+#define LIST_POISON1 ((void __uptr *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void __uptr *) 0x122 + POISON_POINTER_DELTA)
+
+static inline void ulist_del(uptr_list_node_t *n)
+{
+ __ulist_del(n);
+ n->next = LIST_POISON1;
+ n->pprev = LIST_POISON2;
+}
+
+static hashtab_elem_t *lookup_elem_raw(uptr_list_head_t *head, __u32 hash, int key)
+{
+ hashtab_elem_t *l;
+ int __i;
+
+ ulist_for_each_entry(l, head, hash_node)
+ if (l->hash == hash && l->key == key)
+ return l;
+
+ return NULL;
+}
+
+static int htab_hash(int key)
+{
+ return key;
+}
+
+void __uptr *cur_page;
+int cur_offset;
+
+/* Simple page_frag allocator */
+static void __uptr* bpf_alloc(int size)
+{
+ __u64 __uptr *obj_cnt;
+ void __uptr *page = cur_page;
+ int offset;
+ int retry = 0;
+
+ bpf_cast_as(page, 1);
+ if (!page) {
+refill:
+ page = bpf_uptr_alloc_pages(&arena, 1);
+ bpf_cast_as(page, 1);
+ cur_page = page;
+ cur_offset = PAGE_SIZE - 8;
+ obj_cnt = page + PAGE_SIZE - 8;
+ *obj_cnt = 0;
+ } else {
+ obj_cnt = page + PAGE_SIZE - 8;
+ }
+
+ offset = cur_offset - size;
+ if (offset < 0 && retry++ < 3)
+ goto refill;
+
+ (*obj_cnt)++;
+ cur_offset = offset;
+ return page + offset;
+}
+
+static void bpf_free(void __uptr *addr)
+{
+ __u64 __uptr *obj_cnt;
+
+ addr = (void __uptr *)(((long)addr) & ~(PAGE_SIZE - 1));
+ obj_cnt = addr + PAGE_SIZE - 8;
+ if (--(*obj_cnt) == 0)
+ bpf_uptr_free_pages(&arena, addr, 1);
+}
+
+__weak int htab_lookup_elem(htab_t *htab, int key)
+{
+ hashtab_elem_t *l_old;
+ uptr_list_head_t *head;
+
+ head = select_bucket(htab, key);
+ l_old = lookup_elem_raw(head, htab_hash(key), key);
+ if (l_old)
+ return l_old->value;
+ return 0;
+}
+
+__weak int htab_update_elem(struct htab __uptr *htab, int key, int value)
+{
+ hashtab_elem_t *l_new = NULL, *l_old;
+ uptr_list_head_t *head;
+
+ head = select_bucket(htab, key);
+ l_old = lookup_elem_raw(head, htab_hash(key), key);
+
+ l_new = bpf_alloc(sizeof(*l_new));
+ l_new->key = key;
+ l_new->hash = htab_hash(key);
+ l_new->value = value;
+
+ ulist_add_head(&l_new->hash_node, head);
+ if (l_old) {
+ ulist_del(&l_old->hash_node);
+ bpf_free(l_old);
+ }
+ return 0;
+}
+
+void htab_init(htab_t *htab)
+{
+ void __uptr *buckets = bpf_uptr_alloc_pages(&arena, 2);
+
+ bpf_cast_as(buckets, 1);
+ htab->buckets = buckets;
+ htab->n_buckets = 2 * PAGE_SIZE / sizeof(struct htab_bucket);
+}