1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 */
6 #include "compress.h"
7 #include <linux/module.h>
8 #include <linux/lz4.h>
9
10 #ifndef LZ4_DISTANCE_MAX /* history window size */
11 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
12 #endif
13
14 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
17 #endif
18
z_erofs_load_lz4_config(struct super_block * sb,struct erofs_super_block * dsb,struct z_erofs_lz4_cfgs * lz4,int size)19 int z_erofs_load_lz4_config(struct super_block *sb,
20 struct erofs_super_block *dsb,
21 struct z_erofs_lz4_cfgs *lz4, int size)
22 {
23 struct erofs_sb_info *sbi = EROFS_SB(sb);
24 u16 distance;
25
26 if (lz4) {
27 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
28 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
29 return -EINVAL;
30 }
31 distance = le16_to_cpu(lz4->max_distance);
32
33 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
34 if (!sbi->lz4.max_pclusterblks) {
35 sbi->lz4.max_pclusterblks = 1; /* reserved case */
36 } else if (sbi->lz4.max_pclusterblks >
37 Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
38 erofs_err(sb, "too large lz4 pclusterblks %u",
39 sbi->lz4.max_pclusterblks);
40 return -EINVAL;
41 } else if (sbi->lz4.max_pclusterblks >= 2) {
42 erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
43 }
44 } else {
45 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
46 sbi->lz4.max_pclusterblks = 1;
47 }
48
49 sbi->lz4.max_distance_pages = distance ?
50 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
51 LZ4_MAX_DISTANCE_PAGES;
52 return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
53 }
54
55 /*
56 * Fill all gaps with bounce pages if it's a sparse page list. Also check if
57 * all physical pages are consecutive, which can be seen for moderate CR.
58 */
z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req * rq,struct page ** pagepool)59 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
60 struct page **pagepool)
61 {
62 const unsigned int nr =
63 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
64 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
65 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
66 BITS_PER_LONG)] = { 0 };
67 unsigned int lz4_max_distance_pages =
68 EROFS_SB(rq->sb)->lz4.max_distance_pages;
69 void *kaddr = NULL;
70 unsigned int i, j, top;
71
72 top = 0;
73 for (i = j = 0; i < nr; ++i, ++j) {
74 struct page *const page = rq->out[i];
75 struct page *victim;
76
77 if (j >= lz4_max_distance_pages)
78 j = 0;
79
80 /* 'valid' bounced can only be tested after a complete round */
81 if (test_bit(j, bounced)) {
82 DBG_BUGON(i < lz4_max_distance_pages);
83 DBG_BUGON(top >= lz4_max_distance_pages);
84 availables[top++] = rq->out[i - lz4_max_distance_pages];
85 }
86
87 if (page) {
88 __clear_bit(j, bounced);
89 if (kaddr) {
90 if (kaddr + PAGE_SIZE == page_address(page))
91 kaddr += PAGE_SIZE;
92 else
93 kaddr = NULL;
94 } else if (!i) {
95 kaddr = page_address(page);
96 }
97 continue;
98 }
99 kaddr = NULL;
100 __set_bit(j, bounced);
101
102 if (top) {
103 victim = availables[--top];
104 get_page(victim);
105 } else {
106 victim = erofs_allocpage(pagepool,
107 GFP_KERNEL | __GFP_NOFAIL);
108 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
109 }
110 rq->out[i] = victim;
111 }
112 return kaddr ? 1 : 0;
113 }
114
z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req * rq,void * inpage,unsigned int * inputmargin,int * maptype,bool support_0padding)115 static void *z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req *rq,
116 void *inpage, unsigned int *inputmargin, int *maptype,
117 bool support_0padding)
118 {
119 unsigned int nrpages_in, nrpages_out;
120 unsigned int ofull, oend, inputsize, total, i, j;
121 struct page **in;
122 void *src, *tmp;
123
124 inputsize = rq->inputsize;
125 nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
126 oend = rq->pageofs_out + rq->outputsize;
127 ofull = PAGE_ALIGN(oend);
128 nrpages_out = ofull >> PAGE_SHIFT;
129
130 if (rq->inplace_io) {
131 if (rq->partial_decoding || !support_0padding ||
132 ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
133 goto docopy;
134
135 for (i = 0; i < nrpages_in; ++i) {
136 DBG_BUGON(rq->in[i] == NULL);
137 for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
138 if (rq->out[j] == rq->in[i])
139 goto docopy;
140 }
141 }
142
143 if (nrpages_in <= 1) {
144 *maptype = 0;
145 return inpage;
146 }
147 kunmap_atomic(inpage);
148 might_sleep();
149 src = erofs_vm_map_ram(rq->in, nrpages_in);
150 if (!src)
151 return ERR_PTR(-ENOMEM);
152 *maptype = 1;
153 return src;
154
155 docopy:
156 /* Or copy compressed data which can be overlapped to per-CPU buffer */
157 in = rq->in;
158 src = erofs_get_pcpubuf(nrpages_in);
159 if (!src) {
160 DBG_BUGON(1);
161 kunmap_atomic(inpage);
162 return ERR_PTR(-EFAULT);
163 }
164
165 tmp = src;
166 total = rq->inputsize;
167 while (total) {
168 unsigned int page_copycnt =
169 min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
170
171 if (!inpage)
172 inpage = kmap_atomic(*in);
173 memcpy(tmp, inpage + *inputmargin, page_copycnt);
174 kunmap_atomic(inpage);
175 inpage = NULL;
176 tmp += page_copycnt;
177 total -= page_copycnt;
178 ++in;
179 *inputmargin = 0;
180 }
181 *maptype = 2;
182 return src;
183 }
184
z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req * rq,u8 * out)185 static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq,
186 u8 *out)
187 {
188 unsigned int inputmargin;
189 u8 *headpage, *src;
190 bool support_0padding;
191 int ret, maptype;
192
193 DBG_BUGON(*rq->in == NULL);
194 headpage = kmap_atomic(*rq->in);
195 inputmargin = 0;
196 support_0padding = false;
197
198 /* decompression inplace is only safe when 0padding is enabled */
199 if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
200 support_0padding = true;
201
202 while (!headpage[inputmargin & ~PAGE_MASK])
203 if (!(++inputmargin & ~PAGE_MASK))
204 break;
205
206 if (inputmargin >= rq->inputsize) {
207 kunmap_atomic(headpage);
208 return -EIO;
209 }
210 }
211
212 rq->inputsize -= inputmargin;
213 src = z_erofs_lz4_handle_inplace_io(rq, headpage, &inputmargin,
214 &maptype, support_0padding);
215 if (IS_ERR(src))
216 return PTR_ERR(src);
217
218 /* legacy format could compress extra data in a pcluster. */
219 if (rq->partial_decoding || !support_0padding)
220 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
221 rq->inputsize, rq->outputsize, rq->outputsize);
222 else
223 ret = LZ4_decompress_safe(src + inputmargin, out,
224 rq->inputsize, rq->outputsize);
225
226 if (ret != rq->outputsize) {
227 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
228 ret, rq->inputsize, inputmargin, rq->outputsize);
229
230 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
231 16, 1, src + inputmargin, rq->inputsize, true);
232 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
233 16, 1, out, rq->outputsize, true);
234
235 if (ret >= 0)
236 memset(out + ret, 0, rq->outputsize - ret);
237 ret = -EIO;
238 } else {
239 ret = 0;
240 }
241
242 if (maptype == 0) {
243 kunmap_atomic(src);
244 } else if (maptype == 1) {
245 vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
246 } else if (maptype == 2) {
247 erofs_put_pcpubuf(src);
248 } else {
249 DBG_BUGON(1);
250 return -EFAULT;
251 }
252 return ret;
253 }
254
z_erofs_lz4_decompress(struct z_erofs_decompress_req * rq,struct page ** pagepool)255 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
256 struct page **pagepool)
257 {
258 const unsigned int nrpages_out =
259 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
260 unsigned int dst_maptype;
261 void *dst;
262 int ret;
263
264 /* one optimized fast path only for non bigpcluster cases yet */
265 if (rq->inputsize <= PAGE_SIZE && nrpages_out == 1 && !rq->inplace_io) {
266 DBG_BUGON(!*rq->out);
267 dst = kmap_atomic(*rq->out);
268 dst_maptype = 0;
269 goto dstmap_out;
270 }
271
272 /* general decoding path which can be used for all cases */
273 ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
274 if (ret < 0)
275 return ret;
276 if (ret) {
277 dst = page_address(*rq->out);
278 dst_maptype = 1;
279 goto dstmap_out;
280 }
281
282 dst = erofs_vm_map_ram(rq->out, nrpages_out);
283 if (!dst)
284 return -ENOMEM;
285 dst_maptype = 2;
286
287 dstmap_out:
288 ret = z_erofs_lz4_decompress_mem(rq, dst + rq->pageofs_out);
289
290 if (!dst_maptype)
291 kunmap_atomic(dst);
292 else if (dst_maptype == 2)
293 vm_unmap_ram(dst, nrpages_out);
294 return ret;
295 }
296
z_erofs_shifted_transform(struct z_erofs_decompress_req * rq,struct page ** pagepool)297 static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq,
298 struct page **pagepool)
299 {
300 const unsigned int nrpages_out =
301 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
302 const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
303 unsigned char *src, *dst;
304
305 if (nrpages_out > 2) {
306 DBG_BUGON(1);
307 return -EIO;
308 }
309
310 if (rq->out[0] == *rq->in) {
311 DBG_BUGON(nrpages_out != 1);
312 return 0;
313 }
314
315 src = kmap_atomic(*rq->in);
316 if (rq->out[0]) {
317 dst = kmap_atomic(rq->out[0]);
318 memcpy(dst + rq->pageofs_out, src, righthalf);
319 kunmap_atomic(dst);
320 }
321
322 if (nrpages_out == 2) {
323 DBG_BUGON(!rq->out[1]);
324 if (rq->out[1] == *rq->in) {
325 memmove(src, src + righthalf, rq->pageofs_out);
326 } else {
327 dst = kmap_atomic(rq->out[1]);
328 memcpy(dst, src + righthalf, rq->pageofs_out);
329 kunmap_atomic(dst);
330 }
331 }
332 kunmap_atomic(src);
333 return 0;
334 }
335
336 static struct z_erofs_decompressor decompressors[] = {
337 [Z_EROFS_COMPRESSION_SHIFTED] = {
338 .decompress = z_erofs_shifted_transform,
339 .name = "shifted"
340 },
341 [Z_EROFS_COMPRESSION_LZ4] = {
342 .decompress = z_erofs_lz4_decompress,
343 .name = "lz4"
344 },
345 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
346 [Z_EROFS_COMPRESSION_LZMA] = {
347 .decompress = z_erofs_lzma_decompress,
348 .name = "lzma"
349 },
350 #endif
351 };
352
z_erofs_decompress(struct z_erofs_decompress_req * rq,struct page ** pagepool)353 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
354 struct page **pagepool)
355 {
356 return decompressors[rq->alg].decompress(rq, pagepool);
357 }
358