xref: /aosp_15_r20/external/xz-embedded/userspace/xz_config.h (revision d2c16535d139cb185e89120452531bba6b36d3c6)
1 /* SPDX-License-Identifier: 0BSD */
2 
3 /*
4  * Private includes and definitions for userspace use of XZ Embedded
5  *
6  * Author: Lasse Collin <[email protected]>
7  */
8 
9 #ifndef XZ_CONFIG_H
10 #define XZ_CONFIG_H
11 
12 /* Uncomment to enable building of xz_dec_catrun(). */
13 /* #define XZ_DEC_CONCATENATED */
14 
15 /* Uncomment to enable CRC64 support. */
16 /* #define XZ_USE_CRC64 */
17 
18 /* Uncomment as needed to enable BCJ filter decoders. */
19 /* #define XZ_DEC_X86 */
20 /* #define XZ_DEC_ARM */
21 /* #define XZ_DEC_ARMTHUMB */
22 /* #define XZ_DEC_ARM64 */
23 /* #define XZ_DEC_RISCV */
24 /* #define XZ_DEC_POWERPC */
25 /* #define XZ_DEC_IA64 */
26 /* #define XZ_DEC_SPARC */
27 
28 /*
29  * Visual Studio 2013 update 2 supports only __inline, not inline.
30  * MSVC v19.0 / VS 2015 and newer support both.
31  */
32 #if defined(_MSC_VER) && _MSC_VER < 1900 && !defined(inline)
33 #	define inline __inline
34 #endif
35 
36 #include <stdbool.h>
37 #include <stdlib.h>
38 #include <string.h>
39 
40 #include "xz.h"
41 
42 #define kmalloc(size, flags) malloc(size)
43 #define kfree(ptr) free(ptr)
44 #define vmalloc(size) malloc(size)
45 #define vfree(ptr) free(ptr)
46 
47 #define memeq(a, b, size) (memcmp(a, b, size) == 0)
48 #define memzero(buf, size) memset(buf, 0, size)
49 
50 #ifndef min
51 #	define min(x, y) ((x) < (y) ? (x) : (y))
52 #endif
53 #define min_t(type, x, y) min(x, y)
54 
55 #ifndef fallthrough
56 #	if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202000
57 #		define fallthrough [[fallthrough]]
58 #	elif defined(__GNUC__) && __GNUC__ >= 7
59 #		define fallthrough __attribute__((__fallthrough__))
60 #	else
61 #		define fallthrough do {} while (0)
62 #	endif
63 #endif
64 
65 /*
66  * Some functions have been marked with __always_inline to keep the
67  * performance reasonable even when the compiler is optimizing for
68  * small code size. You may be able to save a few bytes by #defining
69  * __always_inline to plain inline, but don't complain if the code
70  * becomes slow.
71  *
72  * NOTE: System headers on GNU/Linux may #define this macro already,
73  * so if you want to change it, you need to #undef it first.
74  */
75 #ifndef __always_inline
76 #	ifdef __GNUC__
77 #		define __always_inline \
78 			inline __attribute__((__always_inline__))
79 #	else
80 #		define __always_inline inline
81 #	endif
82 #endif
83 
84 /* Inline functions to access unaligned unsigned 32-bit integers */
85 #ifndef get_unaligned_le32
get_unaligned_le32(const uint8_t * buf)86 static inline uint32_t get_unaligned_le32(const uint8_t *buf)
87 {
88 	return (uint32_t)buf[0]
89 			| ((uint32_t)buf[1] << 8)
90 			| ((uint32_t)buf[2] << 16)
91 			| ((uint32_t)buf[3] << 24);
92 }
93 #endif
94 
95 #ifndef get_unaligned_be32
get_unaligned_be32(const uint8_t * buf)96 static inline uint32_t get_unaligned_be32(const uint8_t *buf)
97 {
98 	return (uint32_t)(buf[0] << 24)
99 			| ((uint32_t)buf[1] << 16)
100 			| ((uint32_t)buf[2] << 8)
101 			| (uint32_t)buf[3];
102 }
103 #endif
104 
105 #ifndef put_unaligned_le32
put_unaligned_le32(uint32_t val,uint8_t * buf)106 static inline void put_unaligned_le32(uint32_t val, uint8_t *buf)
107 {
108 	buf[0] = (uint8_t)val;
109 	buf[1] = (uint8_t)(val >> 8);
110 	buf[2] = (uint8_t)(val >> 16);
111 	buf[3] = (uint8_t)(val >> 24);
112 }
113 #endif
114 
115 #ifndef put_unaligned_be32
put_unaligned_be32(uint32_t val,uint8_t * buf)116 static inline void put_unaligned_be32(uint32_t val, uint8_t *buf)
117 {
118 	buf[0] = (uint8_t)(val >> 24);
119 	buf[1] = (uint8_t)(val >> 16);
120 	buf[2] = (uint8_t)(val >> 8);
121 	buf[3] = (uint8_t)val;
122 }
123 #endif
124 
125 /*
126  * Use get_unaligned_le32() also for aligned access for simplicity. On
127  * little endian systems, #define get_le32(ptr) (*(const uint32_t *)(ptr))
128  * could save a few bytes in code size.
129  */
130 #ifndef get_le32
131 #	define get_le32 get_unaligned_le32
132 #endif
133 
134 #endif
135