Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2014 Sergey Senozhatsky. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/cpu.h> #include <linux/crypto.h> #include "zcomp.h" static const char * const backends[] = { #if IS_ENABLED(CONFIG_CRYPTO_LZO) "lzo", "lzo-rle", #endif #if IS_ENABLED(CONFIG_CRYPTO_LZ4) "lz4", #endif #if IS_ENABLED(CONFIG_CRYPTO_LZ4HC) "lz4hc", #endif #if IS_ENABLED(CONFIG_CRYPTO_842) "842", #endif #if IS_ENABLED(CONFIG_CRYPTO_ZSTD) "zstd", #endif }; static void zcomp_strm_free(struct zcomp_strm *zstrm) { if (!IS_ERR_OR_NULL(zstrm->tfm)) crypto_free_comp(zstrm->tfm); free_pages((unsigned long)zstrm->buffer, 1); zstrm->tfm = NULL; zstrm->buffer = NULL; } /* * Initialize zcomp_strm structure with ->tfm initialized by backend, and * ->buffer. Return a negative value on error. */ static int zcomp_strm_init(struct zcomp_strm *zstrm, struct zcomp *comp) { zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0); /* * allocate 2 pages. 1 for compressed data, plus 1 extra for the * case when compressed size is larger than the original one */ zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) { zcomp_strm_free(zstrm); return -ENOMEM; } return 0; } bool zcomp_available_algorithm(const char *comp) { /* * Crypto does not ignore a trailing new line symbol, * so make sure you don't supply a string containing * one. * This also means that we permit zcomp initialisation * with any compressing algorithm known to crypto api. */ return crypto_has_comp(comp, 0, 0) == 1; } /* show available compressors */ ssize_t zcomp_available_show(const char *comp, char *buf) { bool known_algorithm = false; ssize_t sz = 0; int i; for (i = 0; i < ARRAY_SIZE(backends); i++) { if (!strcmp(comp, backends[i])) { known_algorithm = true; sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "[%s] ", backends[i]); } else { sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "%s ", backends[i]); } } /* * Out-of-tree module known to crypto api or a missing * entry in `backends'. */ if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1) sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "[%s] ", comp); sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n"); return sz; } struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { local_lock(&comp->stream->lock); return this_cpu_ptr(comp->stream); } void zcomp_stream_put(struct zcomp *comp) { local_unlock(&comp->stream->lock); } int zcomp_compress(struct zcomp_strm *zstrm, const void *src, unsigned int *dst_len) { /* * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized * because sometimes we can endup having a bigger compressed data * due to various reasons: for example compression algorithms tend * to add some padding to the compressed buffer. Speaking of padding, * comp algorithm `842' pads the compressed length to multiple of 8 * and returns -ENOSP when the dst memory is not big enough, which * is not something that ZRAM wants to see. We can handle the * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we * receive -ERRNO from the compressing backend we can't help it * anymore. To make `842' happy we need to tell the exact size of * the dst buffer, zram_drv will take care of the fact that * compressed buffer is too big. */ *dst_len = PAGE_SIZE * 2; return crypto_comp_compress(zstrm->tfm, src, PAGE_SIZE, zstrm->buffer, dst_len); } int zcomp_decompress(struct zcomp_strm *zstrm, const void *src, unsigned int src_len, void *dst) { unsigned int dst_len = PAGE_SIZE; return crypto_comp_decompress(zstrm->tfm, src, src_len, dst, &dst_len); } int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) { struct zcomp *comp = hlist_entry(node, struct zcomp, node); struct zcomp_strm *zstrm; int ret; zstrm = per_cpu_ptr(comp->stream, cpu); local_lock_init(&zstrm->lock); ret = zcomp_strm_init(zstrm, comp); if (ret) pr_err("Can't allocate a compression stream\n"); return ret; } int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node) { struct zcomp *comp = hlist_entry(node, struct zcomp, node); struct zcomp_strm *zstrm; zstrm = per_cpu_ptr(comp->stream, cpu); zcomp_strm_free(zstrm); return 0; } static int zcomp_init(struct zcomp *comp) { int ret; comp->stream = alloc_percpu(struct zcomp_strm); if (!comp->stream) return -ENOMEM; ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node); if (ret < 0) goto cleanup; return 0; cleanup: free_percpu(comp->stream); return ret; } void zcomp_destroy(struct zcomp *comp) { cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node); free_percpu(comp->stream); kfree(comp); } /* * search available compressors for requested algorithm. * allocate new zcomp and initialize it. return compressing * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL) * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in * case of allocation error, or any other error potentially * returned by zcomp_init(). */ struct zcomp *zcomp_create(const char *alg) { struct zcomp *comp; int error; /* * Crypto API will execute /sbin/modprobe if the compression module * is not loaded yet. We must do it here, otherwise we are about to * call /sbin/modprobe under CPU hot-plug lock. */ if (!zcomp_available_algorithm(alg)) return ERR_PTR(-EINVAL); comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL); if (!comp) return ERR_PTR(-ENOMEM); comp->name = alg; error = zcomp_init(comp); if (error) { kfree(comp); return ERR_PTR(error); } return comp; } |