Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/bug.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/string.h> #include <linux/list_sort.h> #include <linux/list.h> #define MAX_LIST_LENGTH_BITS 20 /* * Returns a list organized in an intermediate format suited * to chaining of merge() calls: null-terminated, no reserved or * sentinel head node, "prev" links not maintained. */ static struct list_head *merge(void *priv, int (*cmp)(void *priv, struct list_head *a, struct list_head *b), struct list_head *a, struct list_head *b) { struct list_head head, *tail = &head; while (a && b) { /* if equal, take 'a' -- important for sort stability */ if ((*cmp)(priv, a, b) <= 0) { tail->next = a; a = a->next; } else { tail->next = b; b = b->next; } tail = tail->next; } tail->next = a?:b; return head.next; } /* * Combine final list merge with restoration of standard doubly-linked * list structure. This approach duplicates code from merge(), but * runs faster than the tidier alternatives of either a separate final * prev-link restoration pass, or maintaining the prev links * throughout. */ static void merge_and_restore_back_links(void *priv, int (*cmp)(void *priv, struct list_head *a, struct list_head *b), struct list_head *head, struct list_head *a, struct list_head *b) { struct list_head *tail = head; u8 count = 0; while (a && b) { /* if equal, take 'a' -- important for sort stability */ if ((*cmp)(priv, a, b) <= 0) { tail->next = a; a->prev = tail; a = a->next; } else { tail->next = b; b->prev = tail; b = b->next; } tail = tail->next; } tail->next = a ? : b; do { /* * In worst cases this loop may run many iterations. * Continue callbacks to the client even though no * element comparison is needed, so the client's cmp() * routine can invoke cond_resched() periodically. */ if (unlikely(!(++count))) (*cmp)(priv, tail->next, tail->next); tail->next->prev = tail; tail = tail->next; } while (tail->next); tail->next = head; head->prev = tail; } /** * list_sort - sort a list * @priv: private data, opaque to list_sort(), passed to @cmp * @head: the list to sort * @cmp: the elements comparison function * * This function implements "merge sort", which has O(nlog(n)) * complexity. * * The comparison function @cmp must return a negative value if @a * should sort before @b, and a positive value if @a should sort after * @b. If @a and @b are equivalent, and their original relative * ordering is to be preserved, @cmp must return 0. */ void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, struct list_head *b)) { struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists -- last slot is a sentinel */ int lev; /* index into part[] */ int max_lev = 0; struct list_head *list; if (list_empty(head)) return; memset(part, 0, sizeof(part)); head->prev->next = NULL; list = head->next; while (list) { struct list_head *cur = list; list = list->next; cur->next = NULL; for (lev = 0; part[lev]; lev++) { cur = merge(priv, cmp, part[lev], cur); part[lev] = NULL; } if (lev > max_lev) { if (unlikely(lev >= ARRAY_SIZE(part)-1)) { printk_once(KERN_DEBUG "list too long for efficiency\n"); lev--; } max_lev = lev; } part[lev] = cur; } for (lev = 0; lev < max_lev; lev++) if (part[lev]) list = merge(priv, cmp, part[lev], list); merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); } EXPORT_SYMBOL(list_sort); |