blob: f0d999d81d3a8be1f508ff1bb218cf668bc41a0b [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
3 *
4 * Author: Lasse Collin <lasse.collin@tukaani.org>
5 *
6 * This file has been put into the public domain.
7 * You can do whatever you want with this file.
8 */
9
10/*
11 * Important notes about in-place decompression
12 *
13 * At least on x86, the kernel is decompressed in place: the compressed data
14 * is placed to the end of the output buffer, and the decompressor overwrites
15 * most of the compressed data. There must be enough safety margin to
16 * guarantee that the write position is always behind the read position.
17 *
18 * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
19 * Note that the margin with XZ is bigger than with Deflate (gzip)!
20 *
21 * The worst case for in-place decompression is that the beginning of
22 * the file is compressed extremely well, and the rest of the file is
23 * uncompressible. Thus, we must look for worst-case expansion when the
24 * compressor is encoding uncompressible data.
25 *
26 * The structure of the .xz file in case of a compresed kernel is as follows.
27 * Sizes (as bytes) of the fields are in parenthesis.
28 *
29 * Stream Header (12)
30 * Block Header:
31 * Block Header (8-12)
32 * Compressed Data (N)
33 * Block Padding (0-3)
34 * CRC32 (4)
35 * Index (8-20)
36 * Stream Footer (12)
37 *
38 * Normally there is exactly one Block, but let's assume that there are
39 * 2-4 Blocks just in case. Because Stream Header and also Block Header
40 * of the first Block don't make the decompressor produce any uncompressed
41 * data, we can ignore them from our calculations. Block Headers of possible
42 * additional Blocks have to be taken into account still. With these
43 * assumptions, it is safe to assume that the total header overhead is
44 * less than 128 bytes.
45 *
46 * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
47 * doesn't change the size of the data, it is enough to calculate the
48 * safety margin for LZMA2.
49 *
50 * LZMA2 stores the data in chunks. Each chunk has a header whose size is
51 * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
52 * the maximum chunk header size is 8 bytes. After the chunk header, there
53 * may be up to 64 KiB of actual payload in the chunk. Often the payload is
54 * quite a bit smaller though; to be safe, let's assume that an average
55 * chunk has only 32 KiB of payload.
56 *
57 * The maximum uncompressed size of the payload is 2 MiB. The minimum
58 * uncompressed size of the payload is in practice never less than the
59 * payload size itself. The LZMA2 format would allow uncompressed size
60 * to be less than the payload size, but no sane compressor creates such
61 * files. LZMA2 supports storing uncompressible data in uncompressed form,
62 * so there's never a need to create payloads whose uncompressed size is
63 * smaller than the compressed size.
64 *
65 * The assumption, that the uncompressed size of the payload is never
66 * smaller than the payload itself, is valid only when talking about
67 * the payload as a whole. It is possible that the payload has parts where
68 * the decompressor consumes more input than it produces output. Calculating
69 * the worst case for this would be tricky. Instead of trying to do that,
70 * let's simply make sure that the decompressor never overwrites any bytes
71 * of the payload which it is currently reading.
72 *
73 * Now we have enough information to calculate the safety margin. We need
74 * - 128 bytes for the .xz file format headers;
75 * - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
76 * per chunk, each chunk having average payload size of 32 KiB); and
77 * - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
78 * the decompressor never overwrites anything from the LZMA2 chunk
79 * payload it is currently reading.
80 *
81 * We get the following formula:
82 *
83 * safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
84 * = 128 + (uncompressed_size >> 12) + 65536
85 *
86 * For comparison, according to arch/x86/boot/compressed/misc.c, the
87 * equivalent formula for Deflate is this:
88 *
89 * safety_margin = 18 + (uncompressed_size >> 12) + 32768
90 *
91 * Thus, when updating Deflate-only in-place kernel decompressor to
92 * support XZ, the fixed overhead has to be increased from 18+32768 bytes
93 * to 128+65536 bytes.
94 */
95
96/*
97 * STATIC is defined to "static" if we are being built for kernel
98 * decompression (pre-boot code). <linux/decompress/mm.h> will define
99 * STATIC to empty if it wasn't already defined. Since we will need to
100 * know later if we are being used for kernel decompression, we define
101 * XZ_PREBOOT here.
102 */
103#ifdef STATIC
104# define XZ_PREBOOT
105#endif
106#ifdef __KERNEL__
107# include <linux/decompress/mm.h>
108#endif
109#define XZ_EXTERN STATIC
110
111#ifndef XZ_PREBOOT
112# include <linux/slab.h>
113# include <linux/xz.h>
114#else
115/*
116 * Use the internal CRC32 code instead of kernel's CRC32 module, which
117 * is not available in early phase of booting.
118 */
119#define XZ_INTERNAL_CRC32 1
120
121/*
122 * For boot time use, we enable only the BCJ filter of the current
123 * architecture or none if no BCJ filter is available for the architecture.
124 */
125#ifdef CONFIG_X86
126# define XZ_DEC_X86
127#endif
128#ifdef CONFIG_PPC
129# define XZ_DEC_POWERPC
130#endif
131#ifdef CONFIG_ARM
132# define XZ_DEC_ARM
133#endif
134#ifdef CONFIG_ARM_THUMB
135# define XZ_DEC_ARMTHUMB
136#endif
137#ifdef CONFIG_IA64
138# define XZ_DEC_IA64
139#endif
140#ifdef CONFIG_SPARC
141# define XZ_DEC_SPARC
142#endif
143
144/*
145 * This will get the basic headers so that memeq() and others
146 * can be defined.
147 */
148#include "xz/xz_private.h"
149
150/*
151 * Replace the normal allocation functions with the versions from
152 * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL)
153 * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
154 * Workaround it here because the other decompressors don't need it.
155 */
156#undef kmalloc
157#undef kfree
158#undef vmalloc
159#undef vfree
160#define kmalloc(size, flags) malloc(size)
161#define kfree(ptr) free(ptr)
162#define vmalloc(size) malloc(size)
163#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
164
165/*
166 * FIXME: Not all basic memory functions are provided in architecture-specific
167 * files (yet). We define our own versions here for now, but this should be
168 * only a temporary solution.
169 *
170 * memeq and memzero are not used much and any remotely sane implementation
171 * is fast enough. memcpy/memmove speed matters in multi-call mode, but
172 * the kernel image is decompressed in single-call mode, in which only
173 * memmove speed can matter and only if there is a lot of uncompressible data
174 * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
175 * functions below should just be kept small; it's probably not worth
176 * optimizing for speed.
177 */
178
179#ifndef memeq
180static bool memeq(const void *a, const void *b, size_t size)
181{
182 const uint8_t *x = a;
183 const uint8_t *y = b;
184 size_t i;
185
186 for (i = 0; i < size; ++i)
187 if (x[i] != y[i])
188 return false;
189
190 return true;
191}
192#endif
193
194#ifndef memzero
195static void memzero(void *buf, size_t size)
196{
197 uint8_t *b = buf;
198 uint8_t *e = b + size;
199
200 while (b != e)
201 *b++ = '\0';
202}
203#endif
204
205#ifndef memmove
206/* Not static to avoid a conflict with the prototype in the Linux headers. */
207void *memmove(void *dest, const void *src, size_t size)
208{
209 uint8_t *d = dest;
210 const uint8_t *s = src;
211 size_t i;
212
213 if (d < s) {
214 for (i = 0; i < size; ++i)
215 d[i] = s[i];
216 } else if (d > s) {
217 i = size;
218 while (i-- > 0)
219 d[i] = s[i];
220 }
221
222 return dest;
223}
224#endif
225
226/*
227 * Since we need memmove anyway, would use it as memcpy too.
228 * Commented out for now to avoid breaking things.
229 */
230/*
231#ifndef memcpy
232# define memcpy memmove
233#endif
234*/
235
236#include "xz/xz_crc32.c"
237#include "xz/xz_dec_stream.c"
238#include "xz/xz_dec_lzma2.c"
239#include "xz/xz_dec_bcj.c"
240
241#endif /* XZ_PREBOOT */
242
243/* Size of the input and output buffers in multi-call mode */
244#define XZ_IOBUF_SIZE 4096
245
246/*
247 * This function implements the API defined in <linux/decompress/generic.h>.
248 *
249 * This wrapper will automatically choose single-call or multi-call mode
250 * of the native XZ decoder API. The single-call mode can be used only when
251 * both input and output buffers are available as a single chunk, i.e. when
252 * fill() and flush() won't be used.
253 */
254STATIC int INIT unxz(unsigned char *in, long in_size,
255 long (*fill)(void *dest, unsigned long size),
256 long (*flush)(void *src, unsigned long size),
257 unsigned char *out, long *in_used,
258 void (*error)(char *x))
259{
260 struct xz_buf b;
261 struct xz_dec *s;
262 enum xz_ret ret;
263 bool must_free_in = false;
264
265#if XZ_INTERNAL_CRC32
266 xz_crc32_init();
267#endif
268
269 if (in_used != NULL)
270 *in_used = 0;
271
272 if (fill == NULL && flush == NULL)
273 s = xz_dec_init(XZ_SINGLE, 0);
274 else
275 s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
276
277 if (s == NULL)
278 goto error_alloc_state;
279
280 if (flush == NULL) {
281 b.out = out;
282 b.out_size = (size_t)-1;
283 } else {
284 b.out_size = XZ_IOBUF_SIZE;
285 b.out = malloc(XZ_IOBUF_SIZE);
286 if (b.out == NULL)
287 goto error_alloc_out;
288 }
289
290 if (in == NULL) {
291 must_free_in = true;
292 in = malloc(XZ_IOBUF_SIZE);
293 if (in == NULL)
294 goto error_alloc_in;
295 }
296
297 b.in = in;
298 b.in_pos = 0;
299 b.in_size = in_size;
300 b.out_pos = 0;
301
302 if (fill == NULL && flush == NULL) {
303 ret = xz_dec_run(s, &b);
304 } else {
305 do {
306 if (b.in_pos == b.in_size && fill != NULL) {
307 if (in_used != NULL)
308 *in_used += b.in_pos;
309
310 b.in_pos = 0;
311
312 in_size = fill(in, XZ_IOBUF_SIZE);
313 if (in_size < 0) {
314 /*
315 * This isn't an optimal error code
316 * but it probably isn't worth making
317 * a new one either.
318 */
319 ret = XZ_BUF_ERROR;
320 break;
321 }
322
323 b.in_size = in_size;
324 }
325
326 ret = xz_dec_run(s, &b);
327
328 if (flush != NULL && (b.out_pos == b.out_size
329 || (ret != XZ_OK && b.out_pos > 0))) {
330 /*
331 * Setting ret here may hide an error
332 * returned by xz_dec_run(), but probably
333 * it's not too bad.
334 */
335 if (flush(b.out, b.out_pos) != (long)b.out_pos)
336 ret = XZ_BUF_ERROR;
337
338 b.out_pos = 0;
339 }
340 } while (ret == XZ_OK);
341
342 if (must_free_in)
343 free(in);
344
345 if (flush != NULL)
346 free(b.out);
347 }
348
349 if (in_used != NULL)
350 *in_used += b.in_pos;
351
352 xz_dec_end(s);
353
354 switch (ret) {
355 case XZ_STREAM_END:
356 return 0;
357
358 case XZ_MEM_ERROR:
359 /* This can occur only in multi-call mode. */
360 error("XZ decompressor ran out of memory");
361 break;
362
363 case XZ_FORMAT_ERROR:
364 error("Input is not in the XZ format (wrong magic bytes)");
365 break;
366
367 case XZ_OPTIONS_ERROR:
368 error("Input was encoded with settings that are not "
369 "supported by this XZ decoder");
370 break;
371
372 case XZ_DATA_ERROR:
373 case XZ_BUF_ERROR:
374 error("XZ-compressed data is corrupt");
375 break;
376
377 default:
378 error("Bug in the XZ decompressor");
379 break;
380 }
381
382 return -1;
383
384error_alloc_in:
385 if (flush != NULL)
386 free(b.out);
387
388error_alloc_out:
389 xz_dec_end(s);
390
391error_alloc_state:
392 error("XZ decompressor ran out of memory");
393 return -1;
394}
395
396/*
397 * This macro is used by architecture-specific files to decompress
398 * the kernel image.
399 */
400#ifdef XZ_PREBOOT
401STATIC int INIT __decompress(unsigned char *buf, long len,
402 long (*fill)(void*, unsigned long),
403 long (*flush)(void*, unsigned long),
404 unsigned char *out_buf, long olen,
405 long *pos,
406 void (*error)(char *x))
407{
408 return unxz(buf, len, fill, flush, out_buf, pos, error);
409}
410#endif