1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Oracle. All rights reserved.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/mm.h>
9 #include <linux/init.h>
10 #include <linux/err.h>
11 #include <linux/sched.h>
12 #include <linux/pagemap.h>
13 #include <linux/bio.h>
14 #include <linux/lzo.h>
15 #include <linux/refcount.h>
16 #include "compression.h"
17 #include "ctree.h"
18
19 #define LZO_LEN 4
20
21 /*
22 * Btrfs LZO compression format
23 *
24 * Regular and inlined LZO compressed data extents consist of:
25 *
26 * 1. Header
27 * Fixed size. LZO_LEN (4) bytes long, LE32.
28 * Records the total size (including the header) of compressed data.
29 *
30 * 2. Segment(s)
31 * Variable size. Each segment includes one segment header, followed by data
32 * payload.
33 * One regular LZO compressed extent can have one or more segments.
34 * For inlined LZO compressed extent, only one segment is allowed.
35 * One segment represents at most one sector of uncompressed data.
36 *
37 * 2.1 Segment header
38 * Fixed size. LZO_LEN (4) bytes long, LE32.
39 * Records the total size of the segment (not including the header).
40 * Segment header never crosses sector boundary, thus it's possible to
41 * have at most 3 padding zeros at the end of the sector.
42 *
43 * 2.2 Data Payload
44 * Variable size. Size up limit should be lzo1x_worst_compress(sectorsize)
45 * which is 4419 for a 4KiB sectorsize.
46 *
47 * Example with 4K sectorsize:
48 * Page 1:
49 * 0 0x2 0x4 0x6 0x8 0xa 0xc 0xe 0x10
50 * 0x0000 | Header | SegHdr 01 | Data payload 01 ... |
51 * ...
52 * 0x0ff0 | SegHdr N | Data payload N ... |00|
53 * ^^ padding zeros
54 * Page 2:
55 * 0x1000 | SegHdr N+1| Data payload N+1 ... |
56 */
57
58 struct workspace {
59 void *mem;
60 void *buf; /* where decompressed data goes */
61 void *cbuf; /* where compressed data goes */
62 struct list_head list;
63 };
64
65 static struct workspace_manager wsm;
66
lzo_free_workspace(struct list_head * ws)67 void lzo_free_workspace(struct list_head *ws)
68 {
69 struct workspace *workspace = list_entry(ws, struct workspace, list);
70
71 kvfree(workspace->buf);
72 kvfree(workspace->cbuf);
73 kvfree(workspace->mem);
74 kfree(workspace);
75 }
76
lzo_alloc_workspace(unsigned int level)77 struct list_head *lzo_alloc_workspace(unsigned int level)
78 {
79 struct workspace *workspace;
80
81 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
82 if (!workspace)
83 return ERR_PTR(-ENOMEM);
84
85 workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
86 workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
87 workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
88 if (!workspace->mem || !workspace->buf || !workspace->cbuf)
89 goto fail;
90
91 INIT_LIST_HEAD(&workspace->list);
92
93 return &workspace->list;
94 fail:
95 lzo_free_workspace(&workspace->list);
96 return ERR_PTR(-ENOMEM);
97 }
98
write_compress_length(char * buf,size_t len)99 static inline void write_compress_length(char *buf, size_t len)
100 {
101 __le32 dlen;
102
103 dlen = cpu_to_le32(len);
104 memcpy(buf, &dlen, LZO_LEN);
105 }
106
read_compress_length(const char * buf)107 static inline size_t read_compress_length(const char *buf)
108 {
109 __le32 dlen;
110
111 memcpy(&dlen, buf, LZO_LEN);
112 return le32_to_cpu(dlen);
113 }
114
115 /*
116 * Will do:
117 *
118 * - Write a segment header into the destination
119 * - Copy the compressed buffer into the destination
120 * - Make sure we have enough space in the last sector to fit a segment header
121 * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros.
122 *
123 * Will allocate new pages when needed.
124 */
copy_compressed_data_to_page(char * compressed_data,size_t compressed_size,struct page ** out_pages,unsigned long max_nr_page,u32 * cur_out,const u32 sectorsize)125 static int copy_compressed_data_to_page(char *compressed_data,
126 size_t compressed_size,
127 struct page **out_pages,
128 unsigned long max_nr_page,
129 u32 *cur_out,
130 const u32 sectorsize)
131 {
132 u32 sector_bytes_left;
133 u32 orig_out;
134 struct page *cur_page;
135 char *kaddr;
136
137 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
138 return -E2BIG;
139
140 /*
141 * We never allow a segment header crossing sector boundary, previous
142 * run should ensure we have enough space left inside the sector.
143 */
144 ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize);
145
146 cur_page = out_pages[*cur_out / PAGE_SIZE];
147 /* Allocate a new page */
148 if (!cur_page) {
149 cur_page = alloc_page(GFP_NOFS);
150 if (!cur_page)
151 return -ENOMEM;
152 out_pages[*cur_out / PAGE_SIZE] = cur_page;
153 }
154
155 kaddr = kmap(cur_page);
156 write_compress_length(kaddr + offset_in_page(*cur_out),
157 compressed_size);
158 *cur_out += LZO_LEN;
159
160 orig_out = *cur_out;
161
162 /* Copy compressed data */
163 while (*cur_out - orig_out < compressed_size) {
164 u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize,
165 orig_out + compressed_size - *cur_out);
166
167 kunmap(cur_page);
168
169 if ((*cur_out / PAGE_SIZE) >= max_nr_page)
170 return -E2BIG;
171
172 cur_page = out_pages[*cur_out / PAGE_SIZE];
173 /* Allocate a new page */
174 if (!cur_page) {
175 cur_page = alloc_page(GFP_NOFS);
176 if (!cur_page)
177 return -ENOMEM;
178 out_pages[*cur_out / PAGE_SIZE] = cur_page;
179 }
180 kaddr = kmap(cur_page);
181
182 memcpy(kaddr + offset_in_page(*cur_out),
183 compressed_data + *cur_out - orig_out, copy_len);
184
185 *cur_out += copy_len;
186 }
187
188 /*
189 * Check if we can fit the next segment header into the remaining space
190 * of the sector.
191 */
192 sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out;
193 if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0)
194 goto out;
195
196 /* The remaining size is not enough, pad it with zeros */
197 memset(kaddr + offset_in_page(*cur_out), 0,
198 sector_bytes_left);
199 *cur_out += sector_bytes_left;
200
201 out:
202 kunmap(cur_page);
203 return 0;
204 }
205
lzo_compress_pages(struct list_head * ws,struct address_space * mapping,u64 start,struct page ** pages,unsigned long * out_pages,unsigned long * total_in,unsigned long * total_out)206 int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
207 u64 start, struct page **pages, unsigned long *out_pages,
208 unsigned long *total_in, unsigned long *total_out)
209 {
210 struct workspace *workspace = list_entry(ws, struct workspace, list);
211 const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
212 struct page *page_in = NULL;
213 char *sizes_ptr;
214 const unsigned long max_nr_page = *out_pages;
215 int ret = 0;
216 /* Points to the file offset of input data */
217 u64 cur_in = start;
218 /* Points to the current output byte */
219 u32 cur_out = 0;
220 u32 len = *total_out;
221
222 ASSERT(max_nr_page > 0);
223 *out_pages = 0;
224 *total_out = 0;
225 *total_in = 0;
226
227 /*
228 * Skip the header for now, we will later come back and write the total
229 * compressed size
230 */
231 cur_out += LZO_LEN;
232 while (cur_in < start + len) {
233 char *data_in;
234 const u32 sectorsize_mask = sectorsize - 1;
235 u32 sector_off = (cur_in - start) & sectorsize_mask;
236 u32 in_len;
237 size_t out_len;
238
239 /* Get the input page first */
240 if (!page_in) {
241 page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT);
242 ASSERT(page_in);
243 }
244
245 /* Compress at most one sector of data each time */
246 in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off);
247 ASSERT(in_len);
248 data_in = kmap(page_in);
249 ret = lzo1x_1_compress(data_in +
250 offset_in_page(cur_in), in_len,
251 workspace->cbuf, &out_len,
252 workspace->mem);
253 kunmap(page_in);
254 if (ret < 0) {
255 pr_debug("BTRFS: lzo in loop returned %d\n", ret);
256 ret = -EIO;
257 goto out;
258 }
259
260 ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
261 pages, max_nr_page,
262 &cur_out, sectorsize);
263 if (ret < 0)
264 goto out;
265
266 cur_in += in_len;
267
268 /*
269 * Check if we're making it bigger after two sectors. And if
270 * it is so, give up.
271 */
272 if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) {
273 ret = -E2BIG;
274 goto out;
275 }
276
277 /* Check if we have reached page boundary */
278 if (IS_ALIGNED(cur_in, PAGE_SIZE)) {
279 put_page(page_in);
280 page_in = NULL;
281 }
282 }
283
284 /* Store the size of all chunks of compressed data */
285 sizes_ptr = kmap_local_page(pages[0]);
286 write_compress_length(sizes_ptr, cur_out);
287 kunmap_local(sizes_ptr);
288
289 ret = 0;
290 *total_out = cur_out;
291 *total_in = cur_in - start;
292 out:
293 if (page_in)
294 put_page(page_in);
295 *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
296 return ret;
297 }
298
299 /*
300 * Copy the compressed segment payload into @dest.
301 *
302 * For the payload there will be no padding, just need to do page switching.
303 */
copy_compressed_segment(struct compressed_bio * cb,char * dest,u32 len,u32 * cur_in)304 static void copy_compressed_segment(struct compressed_bio *cb,
305 char *dest, u32 len, u32 *cur_in)
306 {
307 u32 orig_in = *cur_in;
308
309 while (*cur_in < orig_in + len) {
310 char *kaddr;
311 struct page *cur_page;
312 u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
313 orig_in + len - *cur_in);
314
315 ASSERT(copy_len);
316 cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
317
318 kaddr = kmap(cur_page);
319 memcpy(dest + *cur_in - orig_in,
320 kaddr + offset_in_page(*cur_in),
321 copy_len);
322 kunmap(cur_page);
323
324 *cur_in += copy_len;
325 }
326 }
327
lzo_decompress_bio(struct list_head * ws,struct compressed_bio * cb)328 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
329 {
330 struct workspace *workspace = list_entry(ws, struct workspace, list);
331 const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
332 const u32 sectorsize = fs_info->sectorsize;
333 char *kaddr;
334 int ret;
335 /* Compressed data length, can be unaligned */
336 u32 len_in;
337 /* Offset inside the compressed data */
338 u32 cur_in = 0;
339 /* Bytes decompressed so far */
340 u32 cur_out = 0;
341
342 kaddr = kmap(cb->compressed_pages[0]);
343 len_in = read_compress_length(kaddr);
344 kunmap(cb->compressed_pages[0]);
345 cur_in += LZO_LEN;
346
347 /*
348 * LZO header length check
349 *
350 * The total length should not exceed the maximum extent length,
351 * and all sectors should be used.
352 * If this happens, it means the compressed extent is corrupted.
353 */
354 if (len_in > min_t(size_t, BTRFS_MAX_COMPRESSED, cb->compressed_len) ||
355 round_up(len_in, sectorsize) < cb->compressed_len) {
356 btrfs_err(fs_info,
357 "invalid lzo header, lzo len %u compressed len %u",
358 len_in, cb->compressed_len);
359 return -EUCLEAN;
360 }
361
362 /* Go through each lzo segment */
363 while (cur_in < len_in) {
364 struct page *cur_page;
365 /* Length of the compressed segment */
366 u32 seg_len;
367 u32 sector_bytes_left;
368 size_t out_len = lzo1x_worst_compress(sectorsize);
369
370 /*
371 * We should always have enough space for one segment header
372 * inside current sector.
373 */
374 ASSERT(cur_in / sectorsize ==
375 (cur_in + LZO_LEN - 1) / sectorsize);
376 cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
377 ASSERT(cur_page);
378 kaddr = kmap(cur_page);
379 seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
380 kunmap(cur_page);
381 cur_in += LZO_LEN;
382
383 /* Copy the compressed segment payload into workspace */
384 copy_compressed_segment(cb, workspace->cbuf, seg_len, &cur_in);
385
386 /* Decompress the data */
387 ret = lzo1x_decompress_safe(workspace->cbuf, seg_len,
388 workspace->buf, &out_len);
389 if (ret != LZO_E_OK) {
390 btrfs_err(fs_info, "failed to decompress");
391 ret = -EIO;
392 goto out;
393 }
394
395 /* Copy the data into inode pages */
396 ret = btrfs_decompress_buf2page(workspace->buf, out_len, cb, cur_out);
397 cur_out += out_len;
398
399 /* All data read, exit */
400 if (ret == 0)
401 goto out;
402 ret = 0;
403
404 /* Check if the sector has enough space for a segment header */
405 sector_bytes_left = sectorsize - (cur_in % sectorsize);
406 if (sector_bytes_left >= LZO_LEN)
407 continue;
408
409 /* Skip the padding zeros */
410 cur_in += sector_bytes_left;
411 }
412 out:
413 if (!ret)
414 zero_fill_bio(cb->orig_bio);
415 return ret;
416 }
417
lzo_decompress(struct list_head * ws,unsigned char * data_in,struct page * dest_page,unsigned long start_byte,size_t srclen,size_t destlen)418 int lzo_decompress(struct list_head *ws, unsigned char *data_in,
419 struct page *dest_page, unsigned long start_byte, size_t srclen,
420 size_t destlen)
421 {
422 struct workspace *workspace = list_entry(ws, struct workspace, list);
423 size_t in_len;
424 size_t out_len;
425 size_t max_segment_len = lzo1x_worst_compress(PAGE_SIZE);
426 int ret = 0;
427 char *kaddr;
428 unsigned long bytes;
429
430 if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
431 return -EUCLEAN;
432
433 in_len = read_compress_length(data_in);
434 if (in_len != srclen)
435 return -EUCLEAN;
436 data_in += LZO_LEN;
437
438 in_len = read_compress_length(data_in);
439 if (in_len != srclen - LZO_LEN * 2) {
440 ret = -EUCLEAN;
441 goto out;
442 }
443 data_in += LZO_LEN;
444
445 out_len = PAGE_SIZE;
446 ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
447 if (ret != LZO_E_OK) {
448 pr_warn("BTRFS: decompress failed!\n");
449 ret = -EIO;
450 goto out;
451 }
452
453 if (out_len < start_byte) {
454 ret = -EIO;
455 goto out;
456 }
457
458 /*
459 * the caller is already checking against PAGE_SIZE, but lets
460 * move this check closer to the memcpy/memset
461 */
462 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
463 bytes = min_t(unsigned long, destlen, out_len - start_byte);
464
465 kaddr = kmap_local_page(dest_page);
466 memcpy(kaddr, workspace->buf + start_byte, bytes);
467
468 /*
469 * btrfs_getblock is doing a zero on the tail of the page too,
470 * but this will cover anything missing from the decompressed
471 * data.
472 */
473 if (bytes < destlen)
474 memset(kaddr+bytes, 0, destlen-bytes);
475 kunmap_local(kaddr);
476 out:
477 return ret;
478 }
479
480 const struct btrfs_compress_op btrfs_lzo_compress = {
481 .workspace_manager = &wsm,
482 .max_level = 1,
483 .default_level = 1,
484 };
485