Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include "bpf_misc.h" #include "bpf_experimental.h" #include "bpf_arena_common.h" struct { __uint(type, BPF_MAP_TYPE_ARENA); __uint(map_flags, BPF_F_MMAPABLE); __uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/ #ifdef __TARGET_ARCH_arm64 __ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */ #else __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */ #endif } arena SEC(".maps"); SEC("syscall") __success __retval(0) int basic_alloc1(void *ctx) { #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) volatile int __arena *page1, *page2, *no_page, *page3; page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); if (!page1) return 1; *page1 = 1; page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); if (!page2) return 2; *page2 = 2; no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); if (no_page) return 3; if (*page1 != 1) return 4; if (*page2 != 2) return 5; bpf_arena_free_pages(&arena, (void __arena *)page2, 1); if (*page1 != 1) return 6; if (*page2 != 0) /* use-after-free should return 0 */ return 7; page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); if (!page3) return 8; *page3 = 3; if (page2 != page3) return 9; if (*page1 != 1) return 10; #endif return 0; } SEC("syscall") __success __retval(0) int basic_alloc2(void *ctx) { #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) volatile char __arena *page1, *page2, *page3, *page4; page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); if (!page1) return 1; page2 = page1 + __PAGE_SIZE; page3 = page1 + __PAGE_SIZE * 2; page4 = page1 - __PAGE_SIZE; *page1 = 1; *page2 = 2; *page3 = 3; *page4 = 4; if (*page1 != 1) return 1; if (*page2 != 2) return 2; if (*page3 != 0) return 3; if (*page4 != 0) return 4; bpf_arena_free_pages(&arena, (void __arena *)page1, 2); if (*page1 != 0) return 5; if (*page2 != 0) return 6; if (*page3 != 0) return 7; if (*page4 != 0) return 8; #endif return 0; } struct bpf_arena___l { struct bpf_map map; } __attribute__((preserve_access_index)); SEC("syscall") __success __retval(0) __log_level(2) int basic_alloc3(void *ctx) { struct bpf_arena___l *ar = (struct bpf_arena___l *)&arena; volatile char __arena *pages; pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0); if (!pages) return 1; return 0; } SEC("iter.s/bpf_map") __success __log_level(2) int iter_maps1(struct bpf_iter__bpf_map *ctx) { struct bpf_map *map = ctx->map; if (!map) return 0; bpf_arena_alloc_pages(map, NULL, map->max_entries, 0, 0); return 0; } SEC("iter.s/bpf_map") __failure __msg("expected pointer to STRUCT bpf_map") int iter_maps2(struct bpf_iter__bpf_map *ctx) { struct seq_file *seq = ctx->meta->seq; bpf_arena_alloc_pages((void *)seq, NULL, 1, 0, 0); return 0; } SEC("iter.s/bpf_map") __failure __msg("untrusted_ptr_bpf_map") int iter_maps3(struct bpf_iter__bpf_map *ctx) { struct bpf_map *map = ctx->map; if (!map) return 0; bpf_arena_alloc_pages(map->inner_map_meta, NULL, map->max_entries, 0, 0); return 0; } char _license[] SEC("license") = "GPL"; |