Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 | // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (c) 2014 Samsung Electronics Co., Ltd. * Author: Andrey Ryabinin <a.ryabinin@samsung.com> */ #define pr_fmt(fmt) "kasan test: %s " fmt, __func__ #include <linux/mman.h> #include <linux/module.h> #include <linux/printk.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "kasan.h" static noinline void __init copy_user_test(void) { char *kmem; char __user *usermem; size_t size = 128 - KASAN_GRANULE_SIZE; int __maybe_unused unused; kmem = kmalloc(size, GFP_KERNEL); if (!kmem) return; usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (IS_ERR(usermem)) { pr_err("Failed to allocate user memory\n"); kfree(kmem); return; } OPTIMIZER_HIDE_VAR(size); pr_info("out-of-bounds in copy_from_user()\n"); unused = copy_from_user(kmem, usermem, size + 1); pr_info("out-of-bounds in copy_to_user()\n"); unused = copy_to_user(usermem, kmem, size + 1); pr_info("out-of-bounds in __copy_from_user()\n"); unused = __copy_from_user(kmem, usermem, size + 1); pr_info("out-of-bounds in __copy_to_user()\n"); unused = __copy_to_user(usermem, kmem, size + 1); pr_info("out-of-bounds in __copy_from_user_inatomic()\n"); unused = __copy_from_user_inatomic(kmem, usermem, size + 1); pr_info("out-of-bounds in __copy_to_user_inatomic()\n"); unused = __copy_to_user_inatomic(usermem, kmem, size + 1); pr_info("out-of-bounds in strncpy_from_user()\n"); unused = strncpy_from_user(kmem, usermem, size + 1); vm_munmap((unsigned long)usermem, PAGE_SIZE); kfree(kmem); } static struct kasan_rcu_info { int i; struct rcu_head rcu; } *global_rcu_ptr; static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp) { struct kasan_rcu_info *fp = container_of(rp, struct kasan_rcu_info, rcu); kfree(fp); ((volatile struct kasan_rcu_info *)fp)->i; } static noinline void __init kasan_rcu_uaf(void) { struct kasan_rcu_info *ptr; pr_info("use-after-free in kasan_rcu_reclaim\n"); ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL); if (!ptr) { pr_err("Allocation failed\n"); return; } global_rcu_ptr = rcu_dereference_protected(ptr, NULL); call_rcu(&global_rcu_ptr->rcu, kasan_rcu_reclaim); } static noinline void __init kasan_workqueue_work(struct work_struct *work) { kfree(work); } static noinline void __init kasan_workqueue_uaf(void) { struct workqueue_struct *workqueue; struct work_struct *work; workqueue = create_workqueue("kasan_wq_test"); if (!workqueue) { pr_err("Allocation failed\n"); return; } work = kmalloc(sizeof(struct work_struct), GFP_KERNEL); if (!work) { pr_err("Allocation failed\n"); return; } INIT_WORK(work, kasan_workqueue_work); queue_work(workqueue, work); destroy_workqueue(workqueue); pr_info("use-after-free on workqueue\n"); ((volatile struct work_struct *)work)->data; } static int __init test_kasan_module_init(void) { /* * Temporarily enable multi-shot mode. Otherwise, KASAN would only * report the first detected bug and panic the kernel if panic_on_warn * is enabled. */ bool multishot = kasan_save_enable_multi_shot(); copy_user_test(); kasan_rcu_uaf(); kasan_workqueue_uaf(); kasan_restore_multi_shot(multishot); return -EAGAIN; } module_init(test_kasan_module_init); MODULE_LICENSE("GPL"); |