sl@0
|
1 |
/* inflate.c -- Not copyrighted 1992 by Mark Adler
|
sl@0
|
2 |
version c10p1, 10 January 1993 */
|
sl@0
|
3 |
|
sl@0
|
4 |
/* You can do whatever you like with this source file, though I would
|
sl@0
|
5 |
prefer that if you modify it and redistribute it that you include
|
sl@0
|
6 |
comments to that effect with your name and the date. Thank you.
|
sl@0
|
7 |
[The history has been moved to the file ChangeLog.]
|
sl@0
|
8 |
*/
|
sl@0
|
9 |
|
sl@0
|
10 |
/*
|
sl@0
|
11 |
Inflate deflated (PKZIP's method 8 compressed) data. The compression
|
sl@0
|
12 |
method searches for as much of the current string of bytes (up to a
|
sl@0
|
13 |
length of 258) in the previous 32K bytes. If it doesn't find any
|
sl@0
|
14 |
matches (of at least length 3), it codes the next byte. Otherwise, it
|
sl@0
|
15 |
codes the length of the matched string and its distance backwards from
|
sl@0
|
16 |
the current position. There is a single Huffman code that codes both
|
sl@0
|
17 |
single bytes (called "literals") and match lengths. A second Huffman
|
sl@0
|
18 |
code codes the distance information, which follows a length code. Each
|
sl@0
|
19 |
length or distance code actually represents a base value and a number
|
sl@0
|
20 |
of "extra" (sometimes zero) bits to get to add to the base value. At
|
sl@0
|
21 |
the end of each deflated block is a special end-of-block (EOB) literal/
|
sl@0
|
22 |
length code. The decoding process is basically: get a literal/length
|
sl@0
|
23 |
code; if EOB then done; if a literal, emit the decoded byte; if a
|
sl@0
|
24 |
length then get the distance and emit the referred-to bytes from the
|
sl@0
|
25 |
sliding window of previously emitted data.
|
sl@0
|
26 |
|
sl@0
|
27 |
There are (currently) three kinds of inflate blocks: stored, fixed, and
|
sl@0
|
28 |
dynamic. The compressor deals with some chunk of data at a time, and
|
sl@0
|
29 |
decides which method to use on a chunk-by-chunk basis. A chunk might
|
sl@0
|
30 |
typically be 32K or 64K. If the chunk is uncompressible, then the
|
sl@0
|
31 |
"stored" method is used. In this case, the bytes are simply stored as
|
sl@0
|
32 |
is, eight bits per byte, with none of the above coding. The bytes are
|
sl@0
|
33 |
preceded by a count, since there is no longer an EOB code.
|
sl@0
|
34 |
|
sl@0
|
35 |
If the data is compressible, then either the fixed or dynamic methods
|
sl@0
|
36 |
are used. In the dynamic method, the compressed data is preceded by
|
sl@0
|
37 |
an encoding of the literal/length and distance Huffman codes that are
|
sl@0
|
38 |
to be used to decode this block. The representation is itself Huffman
|
sl@0
|
39 |
coded, and so is preceded by a description of that code. These code
|
sl@0
|
40 |
descriptions take up a little space, and so for small blocks, there is
|
sl@0
|
41 |
a predefined set of codes, called the fixed codes. The fixed method is
|
sl@0
|
42 |
used if the block codes up smaller that way (usually for quite small
|
sl@0
|
43 |
chunks), otherwise the dynamic method is used. In the latter case, the
|
sl@0
|
44 |
codes are customized to the probabilities in the current block, and so
|
sl@0
|
45 |
can code it much better than the pre-determined fixed codes.
|
sl@0
|
46 |
|
sl@0
|
47 |
The Huffman codes themselves are decoded using a mutli-level table
|
sl@0
|
48 |
lookup, in order to maximize the speed of decoding plus the speed of
|
sl@0
|
49 |
building the decoding tables. See the comments below that precede the
|
sl@0
|
50 |
lbits and dbits tuning parameters.
|
sl@0
|
51 |
*/
|
sl@0
|
52 |
|
sl@0
|
53 |
|
sl@0
|
54 |
/*
|
sl@0
|
55 |
Notes beyond the 1.93a appnote.txt:
|
sl@0
|
56 |
|
sl@0
|
57 |
1. Distance pointers never point before the beginning of the output
|
sl@0
|
58 |
stream.
|
sl@0
|
59 |
2. Distance pointers can point back across blocks, up to 32k away.
|
sl@0
|
60 |
3. There is an implied maximum of 7 bits for the bit length table and
|
sl@0
|
61 |
15 bits for the actual data.
|
sl@0
|
62 |
4. If only one code exists, then it is encoded using one bit. (Zero
|
sl@0
|
63 |
would be more efficient, but perhaps a little confusing.) If two
|
sl@0
|
64 |
codes exist, they are coded using one bit each (0 and 1).
|
sl@0
|
65 |
5. There is no way of sending zero distance codes--a dummy must be
|
sl@0
|
66 |
sent if there are none. (History: a pre 2.0 version of PKZIP would
|
sl@0
|
67 |
store blocks with no distance codes, but this was discovered to be
|
sl@0
|
68 |
too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
|
sl@0
|
69 |
zero distance codes, which is sent as one code of zero bits in
|
sl@0
|
70 |
length.
|
sl@0
|
71 |
6. There are up to 286 literal/length codes. Code 256 represents the
|
sl@0
|
72 |
end-of-block. Note however that the static length tree defines
|
sl@0
|
73 |
288 codes just to fill out the Huffman codes. Codes 286 and 287
|
sl@0
|
74 |
cannot be used though, since there is no length base or extra bits
|
sl@0
|
75 |
defined for them. Similarly, there are up to 30 distance codes.
|
sl@0
|
76 |
However, static trees define 32 codes (all 5 bits) to fill out the
|
sl@0
|
77 |
Huffman codes, but the last two had better not show up in the data.
|
sl@0
|
78 |
7. Unzip can check dynamic Huffman blocks for complete code sets.
|
sl@0
|
79 |
The exception is that a single code would not be complete (see #4).
|
sl@0
|
80 |
8. The five bits following the block type is really the number of
|
sl@0
|
81 |
literal codes sent minus 257.
|
sl@0
|
82 |
9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
|
sl@0
|
83 |
(1+6+6). Therefore, to output three times the length, you output
|
sl@0
|
84 |
three codes (1+1+1), whereas to output four times the same length,
|
sl@0
|
85 |
you only need two codes (1+3). Hmm.
|
sl@0
|
86 |
10. In the tree reconstruction algorithm, Code = Code + Increment
|
sl@0
|
87 |
only if BitLength(i) is not zero. (Pretty obvious.)
|
sl@0
|
88 |
11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
|
sl@0
|
89 |
12. Note: length code 284 can represent 227-258, but length code 285
|
sl@0
|
90 |
really is 258. The last length deserves its own, short code
|
sl@0
|
91 |
since it gets used a lot in very redundant files. The length
|
sl@0
|
92 |
258 is special since 258 - 3 (the min match length) is 255.
|
sl@0
|
93 |
13. The literal/length and distance code bit lengths are read as a
|
sl@0
|
94 |
single stream of lengths. It is possible (and advantageous) for
|
sl@0
|
95 |
a repeat code (16, 17, or 18) to go across the boundary between
|
sl@0
|
96 |
the two sets of lengths.
|
sl@0
|
97 |
*/
|
sl@0
|
98 |
|
sl@0
|
99 |
#include "inflate.h"
|
sl@0
|
100 |
|
sl@0
|
101 |
extern void* memcpy(void*, const void*, unsigned);
|
sl@0
|
102 |
extern void* memset(void*, int, unsigned);
|
sl@0
|
103 |
|
sl@0
|
104 |
/* Huffman code lookup table entry--this entry is four bytes for machines
|
sl@0
|
105 |
that have 16-bit pointers (e.g. PC's in the small or medium model).
|
sl@0
|
106 |
Valid extra bits are 0..13. e == 15 is EOB (end of block), e == 16
|
sl@0
|
107 |
means that v is a literal, 16 < e < 32 means that v is a pointer to
|
sl@0
|
108 |
the next table, which codes e - 16 bits, and lastly e == 99 indicates
|
sl@0
|
109 |
an unused code. If a code with e == 99 is looked up, this implies an
|
sl@0
|
110 |
error in the data. */
|
sl@0
|
111 |
struct huft {
|
sl@0
|
112 |
uch e; /* number of extra bits or operation */
|
sl@0
|
113 |
uch b; /* number of bits in this code or subcode */
|
sl@0
|
114 |
union {
|
sl@0
|
115 |
ush n; /* literal, length base, or distance base */
|
sl@0
|
116 |
struct huft *t; /* pointer to next level of table */
|
sl@0
|
117 |
} v;
|
sl@0
|
118 |
};
|
sl@0
|
119 |
|
sl@0
|
120 |
|
sl@0
|
121 |
/* Function prototypes */
|
sl@0
|
122 |
int huft_build(unsigned *, unsigned, unsigned, const ush *, const ush *,
|
sl@0
|
123 |
struct huft **, int *);
|
sl@0
|
124 |
int huft_free(struct huft *);
|
sl@0
|
125 |
int inflate_codes(struct huft *, struct huft *, int, int);
|
sl@0
|
126 |
int inflate_stored(void);
|
sl@0
|
127 |
int inflate_fixed(void);
|
sl@0
|
128 |
int inflate_dynamic(void);
|
sl@0
|
129 |
int inflate_block(int *);
|
sl@0
|
130 |
int inflate(void);
|
sl@0
|
131 |
|
sl@0
|
132 |
|
sl@0
|
133 |
/* The inflate algorithm uses a sliding 32K byte window on the uncompressed
|
sl@0
|
134 |
stream to find repeated byte strings. This is implemented here as a
|
sl@0
|
135 |
circular buffer. The index is updated simply by incrementing and then
|
sl@0
|
136 |
and'ing with 0x7fff (32K-1). */
|
sl@0
|
137 |
/* It is left to other modules to supply the 32K area. It is assumed
|
sl@0
|
138 |
to be usable as if it were declared "uch slide[32768];" or as just
|
sl@0
|
139 |
"uch *slide;" and then malloc'ed in the latter case. The definition
|
sl@0
|
140 |
must be in unzip.h, included above. */
|
sl@0
|
141 |
/* unsigned wp; current position in slide */
|
sl@0
|
142 |
/*#define wp outcnt*/
|
sl@0
|
143 |
/*#define flush_output(w) (wp=(w),flush_window())*/
|
sl@0
|
144 |
|
sl@0
|
145 |
/* Tables for deflate from PKZIP's appnote.txt. */
|
sl@0
|
146 |
static const unsigned border[] = { /* Order of the bit length code lengths */
|
sl@0
|
147 |
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
|
sl@0
|
148 |
static const ush cplens[] = { /* Copy lengths for literal codes 257..285 */
|
sl@0
|
149 |
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
|
sl@0
|
150 |
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
|
sl@0
|
151 |
/* note: see note #13 above about the 258 in this list. */
|
sl@0
|
152 |
static const ush cplext[] = { /* Extra bits for literal codes 257..285 */
|
sl@0
|
153 |
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
|
sl@0
|
154 |
3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 99, 99}; /* 99==invalid */
|
sl@0
|
155 |
static const ush cpdist[] = { /* Copy offsets for distance codes 0..29 */
|
sl@0
|
156 |
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
|
sl@0
|
157 |
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
|
sl@0
|
158 |
8193, 12289, 16385, 24577};
|
sl@0
|
159 |
static const ush cpdext[] = { /* Extra bits for distance codes */
|
sl@0
|
160 |
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
|
sl@0
|
161 |
7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
|
sl@0
|
162 |
12, 12, 13, 13};
|
sl@0
|
163 |
|
sl@0
|
164 |
|
sl@0
|
165 |
|
sl@0
|
166 |
/* Macros for inflate() bit peeking and grabbing.
|
sl@0
|
167 |
The usage is:
|
sl@0
|
168 |
|
sl@0
|
169 |
NEEDBITS(j)
|
sl@0
|
170 |
x = b & mask_bits[j];
|
sl@0
|
171 |
DUMPBITS(j)
|
sl@0
|
172 |
|
sl@0
|
173 |
where NEEDBITS makes sure that b has at least j bits in it, and
|
sl@0
|
174 |
DUMPBITS removes the bits from b. The macros use the variable k
|
sl@0
|
175 |
for the number of bits in b. Normally, b and k are register
|
sl@0
|
176 |
variables for speed, and are initialized at the beginning of a
|
sl@0
|
177 |
routine that uses these macros from a global bit buffer and count.
|
sl@0
|
178 |
|
sl@0
|
179 |
If we assume that EOB will be the longest code, then we will never
|
sl@0
|
180 |
ask for bits with NEEDBITS that are beyond the end of the stream.
|
sl@0
|
181 |
So, NEEDBITS should not read any more bytes than are needed to
|
sl@0
|
182 |
meet the request. Then no bytes need to be "returned" to the buffer
|
sl@0
|
183 |
at the end of the last block.
|
sl@0
|
184 |
|
sl@0
|
185 |
However, this assumption is not true for fixed blocks--the EOB code
|
sl@0
|
186 |
is 7 bits, but the other literal/length codes can be 8 or 9 bits.
|
sl@0
|
187 |
(The EOB code is shorter than other codes because fixed blocks are
|
sl@0
|
188 |
generally short. So, while a block always has an EOB, many other
|
sl@0
|
189 |
literal/length codes have a significantly lower probability of
|
sl@0
|
190 |
showing up at all.) However, by making the first table have a
|
sl@0
|
191 |
lookup of seven bits, the EOB code will be found in that first
|
sl@0
|
192 |
lookup, and so will not require that too many bits be pulled from
|
sl@0
|
193 |
the stream.
|
sl@0
|
194 |
*/
|
sl@0
|
195 |
|
sl@0
|
196 |
ulg bb; /* bit buffer */
|
sl@0
|
197 |
unsigned bk; /* bits in bit buffer */
|
sl@0
|
198 |
|
sl@0
|
199 |
static const ush mask_bits[] = {
|
sl@0
|
200 |
0x0000,
|
sl@0
|
201 |
0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
|
sl@0
|
202 |
0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
|
sl@0
|
203 |
};
|
sl@0
|
204 |
|
sl@0
|
205 |
#define get_byte() (inptr < inbuf_end ? *inptr++ : fill_inbuf())
|
sl@0
|
206 |
#define NEXTBYTE() (uch)get_byte()
|
sl@0
|
207 |
#define NEEDBITS(n) {while(k<(n)){b|=((ulg)NEXTBYTE())<<k;k+=8;}}
|
sl@0
|
208 |
#define DUMPBITS(n) {b>>=(n);k-=(n);}
|
sl@0
|
209 |
|
sl@0
|
210 |
|
sl@0
|
211 |
/*
|
sl@0
|
212 |
Huffman code decoding is performed using a multi-level table lookup.
|
sl@0
|
213 |
The fastest way to decode is to simply build a lookup table whose
|
sl@0
|
214 |
size is determined by the longest code. However, the time it takes
|
sl@0
|
215 |
to build this table can also be a factor if the data being decoded
|
sl@0
|
216 |
is not very long. The most common codes are necessarily the
|
sl@0
|
217 |
shortest codes, so those codes dominate the decoding time, and hence
|
sl@0
|
218 |
the speed. The idea is you can have a shorter table that decodes the
|
sl@0
|
219 |
shorter, more probable codes, and then point to subsidiary tables for
|
sl@0
|
220 |
the longer codes. The time it costs to decode the longer codes is
|
sl@0
|
221 |
then traded against the time it takes to make longer tables.
|
sl@0
|
222 |
|
sl@0
|
223 |
This results of this trade are in the variables lbits and dbits
|
sl@0
|
224 |
below. lbits is the number of bits the first level table for literal/
|
sl@0
|
225 |
length codes can decode in one step, and dbits is the same thing for
|
sl@0
|
226 |
the distance codes. Subsequent tables are also less than or equal to
|
sl@0
|
227 |
those sizes. These values may be adjusted either when all of the
|
sl@0
|
228 |
codes are shorter than that, in which case the longest code length in
|
sl@0
|
229 |
bits is used, or when the shortest code is *longer* than the requested
|
sl@0
|
230 |
table size, in which case the length of the shortest code in bits is
|
sl@0
|
231 |
used.
|
sl@0
|
232 |
|
sl@0
|
233 |
There are two different values for the two tables, since they code a
|
sl@0
|
234 |
different number of possibilities each. The literal/length table
|
sl@0
|
235 |
codes 286 possible values, or in a flat code, a little over eight
|
sl@0
|
236 |
bits. The distance table codes 30 possible values, or a little less
|
sl@0
|
237 |
than five bits, flat. The optimum values for speed end up being
|
sl@0
|
238 |
about one bit more than those, so lbits is 8+1 and dbits is 5+1.
|
sl@0
|
239 |
The optimum values may differ though from machine to machine, and
|
sl@0
|
240 |
possibly even between compilers. Your mileage may vary.
|
sl@0
|
241 |
*/
|
sl@0
|
242 |
|
sl@0
|
243 |
|
sl@0
|
244 |
static const int lbits = 9; /* bits in base literal/length lookup table */
|
sl@0
|
245 |
static const int dbits = 6; /* bits in base distance lookup table */
|
sl@0
|
246 |
|
sl@0
|
247 |
|
sl@0
|
248 |
/* If BMAX needs to be larger than 16, then h and x[] should be ulg. */
|
sl@0
|
249 |
#define BMAX 16 /* maximum bit length of any code (16 for explode) */
|
sl@0
|
250 |
#define N_MAX 288 /* maximum number of codes in any set */
|
sl@0
|
251 |
|
sl@0
|
252 |
|
sl@0
|
253 |
unsigned hufts; /* track memory usage */
|
sl@0
|
254 |
|
sl@0
|
255 |
|
sl@0
|
256 |
int huft_build(
|
sl@0
|
257 |
unsigned *b, /* code lengths in bits (all assumed <= BMAX) */
|
sl@0
|
258 |
unsigned n, /* number of codes (assumed <= N_MAX) */
|
sl@0
|
259 |
unsigned s, /* number of simple-valued codes (0..s-1) */
|
sl@0
|
260 |
const ush *d, /* list of base values for non-simple codes */
|
sl@0
|
261 |
const ush *e, /* list of extra bits for non-simple codes */
|
sl@0
|
262 |
struct huft **t, /* result: starting table */
|
sl@0
|
263 |
int *m /* maximum lookup bits, returns actual */
|
sl@0
|
264 |
)
|
sl@0
|
265 |
/* Given a list of code lengths and a maximum table size, make a set of
|
sl@0
|
266 |
tables to decode that set of codes. Return zero on success, one if
|
sl@0
|
267 |
the given code set is incomplete (the tables are still built in this
|
sl@0
|
268 |
case), two if the input is invalid (all zero length codes or an
|
sl@0
|
269 |
oversubscribed set of lengths), and three if not enough memory. */
|
sl@0
|
270 |
{
|
sl@0
|
271 |
unsigned a; /* counter for codes of length k */
|
sl@0
|
272 |
unsigned c[BMAX+1]; /* bit length count table */
|
sl@0
|
273 |
unsigned f; /* i repeats in table every f entries */
|
sl@0
|
274 |
int g; /* maximum code length */
|
sl@0
|
275 |
int h; /* table level */
|
sl@0
|
276 |
register unsigned i; /* counter, current code */
|
sl@0
|
277 |
register unsigned j; /* counter */
|
sl@0
|
278 |
register int k; /* number of bits in current code */
|
sl@0
|
279 |
int l; /* bits per table (returned in m) */
|
sl@0
|
280 |
register unsigned *p; /* pointer into c[], b[], or v[] */
|
sl@0
|
281 |
register struct huft *q; /* points to current table */
|
sl@0
|
282 |
struct huft r; /* table entry for structure assignment */
|
sl@0
|
283 |
struct huft *u[BMAX]; /* table stack */
|
sl@0
|
284 |
unsigned v[N_MAX]; /* values in order of bit length */
|
sl@0
|
285 |
register int w; /* bits before this table == (l * h) */
|
sl@0
|
286 |
unsigned x[BMAX+1]; /* bit offsets, then code stack */
|
sl@0
|
287 |
unsigned *xp; /* pointer into x */
|
sl@0
|
288 |
int y; /* number of dummy codes added */
|
sl@0
|
289 |
unsigned z; /* number of entries in current table */
|
sl@0
|
290 |
|
sl@0
|
291 |
|
sl@0
|
292 |
/* Generate counts for each bit length */
|
sl@0
|
293 |
memset(c, 0, sizeof(c));
|
sl@0
|
294 |
p = b; i = n;
|
sl@0
|
295 |
do {
|
sl@0
|
296 |
/* Tracecv(*p, (stderr, (n-i >= ' ' && n-i <= '~' ? "%c %d\n" : "0x%x %d\n"),
|
sl@0
|
297 |
n-i, *p));*/
|
sl@0
|
298 |
c[*p]++; /* assume all entries <= BMAX */
|
sl@0
|
299 |
p++; /* Can't combine with above line (Solaris bug) */
|
sl@0
|
300 |
} while (--i);
|
sl@0
|
301 |
if (c[0] == n) /* null input--all zero length codes */
|
sl@0
|
302 |
{
|
sl@0
|
303 |
*t = (struct huft *)NULL;
|
sl@0
|
304 |
*m = 0;
|
sl@0
|
305 |
return 0;
|
sl@0
|
306 |
}
|
sl@0
|
307 |
|
sl@0
|
308 |
|
sl@0
|
309 |
/* Find minimum and maximum length, bound *m by those */
|
sl@0
|
310 |
l = *m;
|
sl@0
|
311 |
for (j = 1; j <= BMAX; j++)
|
sl@0
|
312 |
if (c[j])
|
sl@0
|
313 |
break;
|
sl@0
|
314 |
k = j; /* minimum code length */
|
sl@0
|
315 |
if ((unsigned)l < j)
|
sl@0
|
316 |
l = j;
|
sl@0
|
317 |
for (i = BMAX; i; i--)
|
sl@0
|
318 |
if (c[i])
|
sl@0
|
319 |
break;
|
sl@0
|
320 |
g = i; /* maximum code length */
|
sl@0
|
321 |
if ((unsigned)l > i)
|
sl@0
|
322 |
l = i;
|
sl@0
|
323 |
*m = l;
|
sl@0
|
324 |
|
sl@0
|
325 |
|
sl@0
|
326 |
/* Adjust last length count to fill out codes, if needed */
|
sl@0
|
327 |
for (y = 1 << j; j < i; j++, y <<= 1)
|
sl@0
|
328 |
if ((y -= c[j]) < 0)
|
sl@0
|
329 |
return 2; /* bad input: more codes than bits */
|
sl@0
|
330 |
if ((y -= c[i]) < 0)
|
sl@0
|
331 |
return 2;
|
sl@0
|
332 |
c[i] += y;
|
sl@0
|
333 |
|
sl@0
|
334 |
|
sl@0
|
335 |
/* Generate starting offsets into the value table for each length */
|
sl@0
|
336 |
x[1] = j = 0;
|
sl@0
|
337 |
p = c + 1; xp = x + 2;
|
sl@0
|
338 |
while (--i) { /* note that i == g from above */
|
sl@0
|
339 |
*xp++ = (j += *p++);
|
sl@0
|
340 |
}
|
sl@0
|
341 |
|
sl@0
|
342 |
|
sl@0
|
343 |
/* Make a table of values in order of bit lengths */
|
sl@0
|
344 |
p = b; i = 0;
|
sl@0
|
345 |
do {
|
sl@0
|
346 |
if ((j = *p++) != 0)
|
sl@0
|
347 |
v[x[j]++] = i;
|
sl@0
|
348 |
} while (++i < n);
|
sl@0
|
349 |
|
sl@0
|
350 |
|
sl@0
|
351 |
/* Generate the Huffman codes and for each, make the table entries */
|
sl@0
|
352 |
x[0] = i = 0; /* first Huffman code is zero */
|
sl@0
|
353 |
p = v; /* grab values in bit order */
|
sl@0
|
354 |
h = -1; /* no tables yet--level -1 */
|
sl@0
|
355 |
w = -l; /* bits decoded == (l * h) */
|
sl@0
|
356 |
u[0] = (struct huft *)NULL; /* just to keep compilers happy */
|
sl@0
|
357 |
q = (struct huft *)NULL; /* ditto */
|
sl@0
|
358 |
z = 0; /* ditto */
|
sl@0
|
359 |
|
sl@0
|
360 |
/* go through the bit lengths (k already is bits in shortest code) */
|
sl@0
|
361 |
for (; k <= g; k++)
|
sl@0
|
362 |
{
|
sl@0
|
363 |
a = c[k];
|
sl@0
|
364 |
while (a--)
|
sl@0
|
365 |
{
|
sl@0
|
366 |
/* here i is the Huffman code of length k bits for value *p */
|
sl@0
|
367 |
/* make tables up to required level */
|
sl@0
|
368 |
while (k > w + l)
|
sl@0
|
369 |
{
|
sl@0
|
370 |
h++;
|
sl@0
|
371 |
w += l; /* previous table always l bits */
|
sl@0
|
372 |
|
sl@0
|
373 |
/* compute minimum size table less than or equal to l bits */
|
sl@0
|
374 |
z = (z = g - w) > (unsigned)l ? l : z; /* upper limit on table size */
|
sl@0
|
375 |
if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
|
sl@0
|
376 |
{ /* too few codes for k-w bit table */
|
sl@0
|
377 |
f -= a + 1; /* deduct codes from patterns left */
|
sl@0
|
378 |
xp = c + k;
|
sl@0
|
379 |
while (++j < z) /* try smaller tables up to z bits */
|
sl@0
|
380 |
{
|
sl@0
|
381 |
if ((f <<= 1) <= *++xp)
|
sl@0
|
382 |
break; /* enough codes to use up j bits */
|
sl@0
|
383 |
f -= *xp; /* else deduct codes from patterns */
|
sl@0
|
384 |
}
|
sl@0
|
385 |
}
|
sl@0
|
386 |
z = 1 << j; /* table entries for j-bit table */
|
sl@0
|
387 |
|
sl@0
|
388 |
/* allocate and link in new table */
|
sl@0
|
389 |
if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) ==
|
sl@0
|
390 |
(struct huft *)NULL)
|
sl@0
|
391 |
{
|
sl@0
|
392 |
if (h)
|
sl@0
|
393 |
huft_free(u[0]);
|
sl@0
|
394 |
return 3; /* not enough memory */
|
sl@0
|
395 |
}
|
sl@0
|
396 |
hufts += z + 1; /* track memory usage */
|
sl@0
|
397 |
*t = q + 1; /* link to list for huft_free() */
|
sl@0
|
398 |
*(t = &(q->v.t)) = (struct huft *)NULL;
|
sl@0
|
399 |
u[h] = ++q; /* table starts after link */
|
sl@0
|
400 |
|
sl@0
|
401 |
/* connect to last table, if there is one */
|
sl@0
|
402 |
if (h)
|
sl@0
|
403 |
{
|
sl@0
|
404 |
x[h] = i; /* save pattern for backing up */
|
sl@0
|
405 |
r.b = (uch)l; /* bits to dump before this table */
|
sl@0
|
406 |
r.e = (uch)(16 + j); /* bits in this table */
|
sl@0
|
407 |
r.v.t = q; /* pointer to this table */
|
sl@0
|
408 |
j = i >> (w - l); /* (get around Turbo C bug) */
|
sl@0
|
409 |
u[h-1][j] = r; /* connect to last table */
|
sl@0
|
410 |
}
|
sl@0
|
411 |
}
|
sl@0
|
412 |
|
sl@0
|
413 |
/* set up table entry in r */
|
sl@0
|
414 |
r.b = (uch)(k - w);
|
sl@0
|
415 |
if (p >= v + n)
|
sl@0
|
416 |
r.e = 99; /* out of values--invalid code */
|
sl@0
|
417 |
else if (*p < s)
|
sl@0
|
418 |
{
|
sl@0
|
419 |
r.e = (uch)(*p < 256 ? 16 : 15); /* 256 is end-of-block code */
|
sl@0
|
420 |
r.v.n = (ush)(*p); /* simple code is just the value */
|
sl@0
|
421 |
p++; /* one compiler does not like *p++ */
|
sl@0
|
422 |
}
|
sl@0
|
423 |
else
|
sl@0
|
424 |
{
|
sl@0
|
425 |
r.e = (uch)e[*p - s]; /* non-simple--look up in lists */
|
sl@0
|
426 |
r.v.n = d[*p++ - s];
|
sl@0
|
427 |
}
|
sl@0
|
428 |
|
sl@0
|
429 |
/* fill code-like entries with r */
|
sl@0
|
430 |
f = 1 << (k - w);
|
sl@0
|
431 |
for (j = i >> w; j < z; j += f)
|
sl@0
|
432 |
q[j] = r;
|
sl@0
|
433 |
|
sl@0
|
434 |
/* backwards increment the k-bit code i */
|
sl@0
|
435 |
for (j = 1 << (k - 1); i & j; j >>= 1)
|
sl@0
|
436 |
i ^= j;
|
sl@0
|
437 |
i ^= j;
|
sl@0
|
438 |
|
sl@0
|
439 |
/* backup over finished tables */
|
sl@0
|
440 |
while ((i & ((1 << w) - 1)) != x[h])
|
sl@0
|
441 |
{
|
sl@0
|
442 |
h--; /* don't need to update q */
|
sl@0
|
443 |
w -= l;
|
sl@0
|
444 |
}
|
sl@0
|
445 |
}
|
sl@0
|
446 |
}
|
sl@0
|
447 |
|
sl@0
|
448 |
|
sl@0
|
449 |
/* Return true (1) if we were given an incomplete table */
|
sl@0
|
450 |
return y != 0 && g != 1;
|
sl@0
|
451 |
}
|
sl@0
|
452 |
|
sl@0
|
453 |
|
sl@0
|
454 |
|
sl@0
|
455 |
int huft_free(struct huft *t)
|
sl@0
|
456 |
/* Free the malloc'ed tables built by huft_build(), which makes a linked
|
sl@0
|
457 |
list of the tables it made, with the links in a dummy first entry of
|
sl@0
|
458 |
each table. */
|
sl@0
|
459 |
{
|
sl@0
|
460 |
register struct huft *p, *q;
|
sl@0
|
461 |
|
sl@0
|
462 |
|
sl@0
|
463 |
/* Go through linked list, freeing from the malloced (t[-1]) address. */
|
sl@0
|
464 |
p = t;
|
sl@0
|
465 |
while (p != (struct huft *)NULL)
|
sl@0
|
466 |
{
|
sl@0
|
467 |
q = (--p)->v.t;
|
sl@0
|
468 |
free((char*)p);
|
sl@0
|
469 |
p = q;
|
sl@0
|
470 |
}
|
sl@0
|
471 |
return 0;
|
sl@0
|
472 |
}
|
sl@0
|
473 |
|
sl@0
|
474 |
|
sl@0
|
475 |
int inflate_codes(
|
sl@0
|
476 |
struct huft *tl,
|
sl@0
|
477 |
struct huft *td, /* literal/length and distance decoder tables */
|
sl@0
|
478 |
int bl,
|
sl@0
|
479 |
int bd /* number of bits decoded by tl[] and td[] */
|
sl@0
|
480 |
)
|
sl@0
|
481 |
/* inflate (decompress) the codes in a deflated (compressed) block.
|
sl@0
|
482 |
Return an error code or zero if it all goes ok. */
|
sl@0
|
483 |
{
|
sl@0
|
484 |
register unsigned e; /* table entry flag/number of extra bits */
|
sl@0
|
485 |
unsigned n, d; /* length and index for copy */
|
sl@0
|
486 |
struct huft *t; /* pointer to table entry */
|
sl@0
|
487 |
unsigned ml, md; /* masks for bl and bd bits */
|
sl@0
|
488 |
register ulg b=bb; /* bit buffer */
|
sl@0
|
489 |
register unsigned k=bk; /* number of bits in bit buffer */
|
sl@0
|
490 |
register uch* p=(uch*)outptr;
|
sl@0
|
491 |
|
sl@0
|
492 |
/* inflate the coded data */
|
sl@0
|
493 |
ml = mask_bits[bl]; /* precompute masks for speed */
|
sl@0
|
494 |
md = mask_bits[bd];
|
sl@0
|
495 |
for (;;) /* do until end of block */
|
sl@0
|
496 |
{
|
sl@0
|
497 |
NEEDBITS((unsigned)bl)
|
sl@0
|
498 |
if ((e = (t = tl + ((unsigned)b & ml))->e) > 16)
|
sl@0
|
499 |
do {
|
sl@0
|
500 |
if (e == 99)
|
sl@0
|
501 |
return 1;
|
sl@0
|
502 |
DUMPBITS(t->b)
|
sl@0
|
503 |
e -= 16;
|
sl@0
|
504 |
NEEDBITS(e)
|
sl@0
|
505 |
} while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
|
sl@0
|
506 |
DUMPBITS(t->b)
|
sl@0
|
507 |
if (e == 16) /* then it's a literal */
|
sl@0
|
508 |
{
|
sl@0
|
509 |
*p++ = (uch)t->v.n;
|
sl@0
|
510 |
}
|
sl@0
|
511 |
else /* it's an EOB or a length */
|
sl@0
|
512 |
{
|
sl@0
|
513 |
/* exit if end of block */
|
sl@0
|
514 |
if (e == 15)
|
sl@0
|
515 |
break;
|
sl@0
|
516 |
|
sl@0
|
517 |
/* get length of block to copy */
|
sl@0
|
518 |
NEEDBITS(e)
|
sl@0
|
519 |
n = t->v.n + ((unsigned)b & mask_bits[e]);
|
sl@0
|
520 |
DUMPBITS(e);
|
sl@0
|
521 |
|
sl@0
|
522 |
/* decode distance of block to copy */
|
sl@0
|
523 |
NEEDBITS((unsigned)bd)
|
sl@0
|
524 |
if ((e = (t = td + ((unsigned)b & md))->e) > 16)
|
sl@0
|
525 |
do {
|
sl@0
|
526 |
if (e == 99)
|
sl@0
|
527 |
return 1;
|
sl@0
|
528 |
DUMPBITS(t->b)
|
sl@0
|
529 |
e -= 16;
|
sl@0
|
530 |
NEEDBITS(e)
|
sl@0
|
531 |
} while ((e = (t = t->v.t + ((unsigned)b & mask_bits[e]))->e) > 16);
|
sl@0
|
532 |
DUMPBITS(t->b)
|
sl@0
|
533 |
NEEDBITS(e)
|
sl@0
|
534 |
d = t->v.n + ((unsigned)b & mask_bits[e]);
|
sl@0
|
535 |
d &= ZIP_WINDOW_SIZE-1;
|
sl@0
|
536 |
DUMPBITS(e)
|
sl@0
|
537 |
|
sl@0
|
538 |
/* do the copy */
|
sl@0
|
539 |
if (d>=n)
|
sl@0
|
540 |
{
|
sl@0
|
541 |
memcpy(p, p-d, n);
|
sl@0
|
542 |
p+=n;
|
sl@0
|
543 |
}
|
sl@0
|
544 |
else
|
sl@0
|
545 |
{
|
sl@0
|
546 |
uch* q=p-d;
|
sl@0
|
547 |
while(n--) *p++=*q++;
|
sl@0
|
548 |
}
|
sl@0
|
549 |
}
|
sl@0
|
550 |
}
|
sl@0
|
551 |
|
sl@0
|
552 |
|
sl@0
|
553 |
/* restore the globals from the locals */
|
sl@0
|
554 |
outptr=p;
|
sl@0
|
555 |
bb = b; /* restore global bit buffer */
|
sl@0
|
556 |
bk = k;
|
sl@0
|
557 |
|
sl@0
|
558 |
/* done */
|
sl@0
|
559 |
return 0;
|
sl@0
|
560 |
}
|
sl@0
|
561 |
|
sl@0
|
562 |
|
sl@0
|
563 |
|
sl@0
|
564 |
int inflate_stored()
|
sl@0
|
565 |
/* "decompress" an inflated type 0 (stored) block. */
|
sl@0
|
566 |
{
|
sl@0
|
567 |
unsigned n; /* number of bytes in block */
|
sl@0
|
568 |
register ulg b; /* bit buffer */
|
sl@0
|
569 |
register unsigned k; /* number of bits in bit buffer */
|
sl@0
|
570 |
|
sl@0
|
571 |
register uch* p=(uch*)outptr;
|
sl@0
|
572 |
|
sl@0
|
573 |
|
sl@0
|
574 |
/* make local copies of globals */
|
sl@0
|
575 |
b = bb; /* initialize bit buffer */
|
sl@0
|
576 |
k = bk;
|
sl@0
|
577 |
|
sl@0
|
578 |
|
sl@0
|
579 |
/* go to byte boundary */
|
sl@0
|
580 |
n = k & 7;
|
sl@0
|
581 |
DUMPBITS(n);
|
sl@0
|
582 |
|
sl@0
|
583 |
|
sl@0
|
584 |
/* get the length and its complement */
|
sl@0
|
585 |
NEEDBITS(16)
|
sl@0
|
586 |
n = ((unsigned)b & 0xffff);
|
sl@0
|
587 |
DUMPBITS(16)
|
sl@0
|
588 |
NEEDBITS(16)
|
sl@0
|
589 |
if (n != (unsigned)((~b) & 0xffff))
|
sl@0
|
590 |
return 1; /* error in compressed data */
|
sl@0
|
591 |
DUMPBITS(16)
|
sl@0
|
592 |
|
sl@0
|
593 |
|
sl@0
|
594 |
/* read and output the compressed data */
|
sl@0
|
595 |
while (n--)
|
sl@0
|
596 |
{
|
sl@0
|
597 |
NEEDBITS(8)
|
sl@0
|
598 |
*p++=(uch)b;
|
sl@0
|
599 |
DUMPBITS(8)
|
sl@0
|
600 |
}
|
sl@0
|
601 |
|
sl@0
|
602 |
|
sl@0
|
603 |
/* restore the globals from the locals */
|
sl@0
|
604 |
outptr=p;
|
sl@0
|
605 |
bb = b; /* restore global bit buffer */
|
sl@0
|
606 |
bk = k;
|
sl@0
|
607 |
return 0;
|
sl@0
|
608 |
}
|
sl@0
|
609 |
|
sl@0
|
610 |
|
sl@0
|
611 |
|
sl@0
|
612 |
int inflate_fixed()
|
sl@0
|
613 |
/* decompress an inflated type 1 (fixed Huffman codes) block. We should
|
sl@0
|
614 |
either replace this with a custom decoder, or at least precompute the
|
sl@0
|
615 |
Huffman tables. */
|
sl@0
|
616 |
{
|
sl@0
|
617 |
int i; /* temporary variable */
|
sl@0
|
618 |
struct huft *tl; /* literal/length code table */
|
sl@0
|
619 |
struct huft *td; /* distance code table */
|
sl@0
|
620 |
int bl; /* lookup bits for tl */
|
sl@0
|
621 |
int bd; /* lookup bits for td */
|
sl@0
|
622 |
unsigned l[288]; /* length list for huft_build */
|
sl@0
|
623 |
|
sl@0
|
624 |
|
sl@0
|
625 |
/* set up literal table */
|
sl@0
|
626 |
for (i = 0; i < 144; i++)
|
sl@0
|
627 |
l[i] = 8;
|
sl@0
|
628 |
for (; i < 256; i++)
|
sl@0
|
629 |
l[i] = 9;
|
sl@0
|
630 |
for (; i < 280; i++)
|
sl@0
|
631 |
l[i] = 7;
|
sl@0
|
632 |
for (; i < 288; i++) /* make a complete, but wrong code set */
|
sl@0
|
633 |
l[i] = 8;
|
sl@0
|
634 |
bl = 7;
|
sl@0
|
635 |
if ((i = huft_build(l, 288, 257, cplens, cplext, &tl, &bl)) != 0)
|
sl@0
|
636 |
return i;
|
sl@0
|
637 |
|
sl@0
|
638 |
|
sl@0
|
639 |
/* set up distance table */
|
sl@0
|
640 |
for (i = 0; i < 30; i++) /* make an incomplete code set */
|
sl@0
|
641 |
l[i] = 5;
|
sl@0
|
642 |
bd = 5;
|
sl@0
|
643 |
if ((i = huft_build(l, 30, 0, cpdist, cpdext, &td, &bd)) > 1)
|
sl@0
|
644 |
{
|
sl@0
|
645 |
huft_free(tl);
|
sl@0
|
646 |
return i;
|
sl@0
|
647 |
}
|
sl@0
|
648 |
|
sl@0
|
649 |
|
sl@0
|
650 |
/* decompress until an end-of-block code */
|
sl@0
|
651 |
if (inflate_codes(tl, td, bl, bd))
|
sl@0
|
652 |
return 1;
|
sl@0
|
653 |
|
sl@0
|
654 |
|
sl@0
|
655 |
/* free the decoding tables, return */
|
sl@0
|
656 |
huft_free(tl);
|
sl@0
|
657 |
huft_free(td);
|
sl@0
|
658 |
return 0;
|
sl@0
|
659 |
}
|
sl@0
|
660 |
|
sl@0
|
661 |
|
sl@0
|
662 |
|
sl@0
|
663 |
int inflate_dynamic()
|
sl@0
|
664 |
/* decompress an inflated type 2 (dynamic Huffman codes) block. */
|
sl@0
|
665 |
{
|
sl@0
|
666 |
int i; /* temporary variables */
|
sl@0
|
667 |
unsigned j;
|
sl@0
|
668 |
unsigned l; /* last length */
|
sl@0
|
669 |
unsigned m; /* mask for bit lengths table */
|
sl@0
|
670 |
unsigned n; /* number of lengths to get */
|
sl@0
|
671 |
struct huft *tl; /* literal/length code table */
|
sl@0
|
672 |
struct huft *td; /* distance code table */
|
sl@0
|
673 |
int bl; /* lookup bits for tl */
|
sl@0
|
674 |
int bd; /* lookup bits for td */
|
sl@0
|
675 |
unsigned nb; /* number of bit length codes */
|
sl@0
|
676 |
unsigned nl; /* number of literal/length codes */
|
sl@0
|
677 |
unsigned nd; /* number of distance codes */
|
sl@0
|
678 |
#ifdef PKZIP_BUG_WORKAROUND
|
sl@0
|
679 |
unsigned ll[288+32]; /* literal/length and distance code lengths */
|
sl@0
|
680 |
#else
|
sl@0
|
681 |
unsigned ll[286+30]; /* literal/length and distance code lengths */
|
sl@0
|
682 |
#endif
|
sl@0
|
683 |
register ulg b; /* bit buffer */
|
sl@0
|
684 |
register unsigned k; /* number of bits in bit buffer */
|
sl@0
|
685 |
|
sl@0
|
686 |
|
sl@0
|
687 |
/* make local bit buffer */
|
sl@0
|
688 |
b = bb;
|
sl@0
|
689 |
k = bk;
|
sl@0
|
690 |
|
sl@0
|
691 |
|
sl@0
|
692 |
/* read in table lengths */
|
sl@0
|
693 |
NEEDBITS(5)
|
sl@0
|
694 |
nl = 257 + ((unsigned)b & 0x1f); /* number of literal/length codes */
|
sl@0
|
695 |
DUMPBITS(5)
|
sl@0
|
696 |
NEEDBITS(5)
|
sl@0
|
697 |
nd = 1 + ((unsigned)b & 0x1f); /* number of distance codes */
|
sl@0
|
698 |
DUMPBITS(5)
|
sl@0
|
699 |
NEEDBITS(4)
|
sl@0
|
700 |
nb = 4 + ((unsigned)b & 0xf); /* number of bit length codes */
|
sl@0
|
701 |
DUMPBITS(4)
|
sl@0
|
702 |
#ifdef PKZIP_BUG_WORKAROUND
|
sl@0
|
703 |
if (nl > 288 || nd > 32)
|
sl@0
|
704 |
#else
|
sl@0
|
705 |
if (nl > 286 || nd > 30)
|
sl@0
|
706 |
#endif
|
sl@0
|
707 |
return 1; /* bad lengths */
|
sl@0
|
708 |
|
sl@0
|
709 |
|
sl@0
|
710 |
/* read in bit-length-code lengths */
|
sl@0
|
711 |
for (j = 0; j < nb; j++)
|
sl@0
|
712 |
{
|
sl@0
|
713 |
NEEDBITS(3)
|
sl@0
|
714 |
ll[border[j]] = (unsigned)b & 7;
|
sl@0
|
715 |
DUMPBITS(3)
|
sl@0
|
716 |
}
|
sl@0
|
717 |
for (; j < 19; j++)
|
sl@0
|
718 |
ll[border[j]] = 0;
|
sl@0
|
719 |
|
sl@0
|
720 |
|
sl@0
|
721 |
/* build decoding table for trees--single level, 7 bit lookup */
|
sl@0
|
722 |
bl = 7;
|
sl@0
|
723 |
if ((i = huft_build(ll, 19, 19, (ush*)NULL, (ush*)NULL, &tl, &bl)) != 0)
|
sl@0
|
724 |
{
|
sl@0
|
725 |
if (i == 1)
|
sl@0
|
726 |
huft_free(tl);
|
sl@0
|
727 |
return i; /* incomplete code set */
|
sl@0
|
728 |
}
|
sl@0
|
729 |
|
sl@0
|
730 |
|
sl@0
|
731 |
/* read in literal and distance code lengths */
|
sl@0
|
732 |
n = nl + nd;
|
sl@0
|
733 |
m = mask_bits[bl];
|
sl@0
|
734 |
i = l = 0;
|
sl@0
|
735 |
while ((unsigned)i < n)
|
sl@0
|
736 |
{
|
sl@0
|
737 |
NEEDBITS((unsigned)bl)
|
sl@0
|
738 |
j = (td = tl + ((unsigned)b & m))->b;
|
sl@0
|
739 |
DUMPBITS(j)
|
sl@0
|
740 |
j = td->v.n;
|
sl@0
|
741 |
if (j < 16) /* length of code in bits (0..15) */
|
sl@0
|
742 |
ll[i++] = l = j; /* save last length in l */
|
sl@0
|
743 |
else if (j == 16) /* repeat last length 3 to 6 times */
|
sl@0
|
744 |
{
|
sl@0
|
745 |
NEEDBITS(2)
|
sl@0
|
746 |
j = 3 + ((unsigned)b & 3);
|
sl@0
|
747 |
DUMPBITS(2)
|
sl@0
|
748 |
if ((unsigned)i + j > n)
|
sl@0
|
749 |
return 1;
|
sl@0
|
750 |
while (j--)
|
sl@0
|
751 |
ll[i++] = l;
|
sl@0
|
752 |
}
|
sl@0
|
753 |
else if (j == 17) /* 3 to 10 zero length codes */
|
sl@0
|
754 |
{
|
sl@0
|
755 |
NEEDBITS(3)
|
sl@0
|
756 |
j = 3 + ((unsigned)b & 7);
|
sl@0
|
757 |
DUMPBITS(3)
|
sl@0
|
758 |
if ((unsigned)i + j > n)
|
sl@0
|
759 |
return 1;
|
sl@0
|
760 |
while (j--)
|
sl@0
|
761 |
ll[i++] = 0;
|
sl@0
|
762 |
l = 0;
|
sl@0
|
763 |
}
|
sl@0
|
764 |
else /* j == 18: 11 to 138 zero length codes */
|
sl@0
|
765 |
{
|
sl@0
|
766 |
NEEDBITS(7)
|
sl@0
|
767 |
j = 11 + ((unsigned)b & 0x7f);
|
sl@0
|
768 |
DUMPBITS(7)
|
sl@0
|
769 |
if ((unsigned)i + j > n)
|
sl@0
|
770 |
return 1;
|
sl@0
|
771 |
while (j--)
|
sl@0
|
772 |
ll[i++] = 0;
|
sl@0
|
773 |
l = 0;
|
sl@0
|
774 |
}
|
sl@0
|
775 |
}
|
sl@0
|
776 |
|
sl@0
|
777 |
|
sl@0
|
778 |
/* free decoding table for trees */
|
sl@0
|
779 |
huft_free(tl);
|
sl@0
|
780 |
|
sl@0
|
781 |
|
sl@0
|
782 |
/* restore the global bit buffer */
|
sl@0
|
783 |
bb = b;
|
sl@0
|
784 |
bk = k;
|
sl@0
|
785 |
|
sl@0
|
786 |
|
sl@0
|
787 |
/* build the decoding tables for literal/length and distance codes */
|
sl@0
|
788 |
bl = lbits;
|
sl@0
|
789 |
if ((i = huft_build(ll, nl, 257, cplens, cplext, &tl, &bl)) != 0)
|
sl@0
|
790 |
{
|
sl@0
|
791 |
if (i == 1) {
|
sl@0
|
792 |
/* fprintf(stderr, " incomplete literal tree\n");*/
|
sl@0
|
793 |
huft_free(tl);
|
sl@0
|
794 |
}
|
sl@0
|
795 |
return i; /* incomplete code set */
|
sl@0
|
796 |
}
|
sl@0
|
797 |
bd = dbits;
|
sl@0
|
798 |
if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0)
|
sl@0
|
799 |
{
|
sl@0
|
800 |
if (i == 1) {
|
sl@0
|
801 |
/* fprintf(stderr, " incomplete distance tree\n");*/
|
sl@0
|
802 |
#ifdef PKZIP_BUG_WORKAROUND
|
sl@0
|
803 |
i = 0;
|
sl@0
|
804 |
}
|
sl@0
|
805 |
#else
|
sl@0
|
806 |
huft_free(td);
|
sl@0
|
807 |
}
|
sl@0
|
808 |
huft_free(tl);
|
sl@0
|
809 |
return i; /* incomplete code set */
|
sl@0
|
810 |
#endif
|
sl@0
|
811 |
}
|
sl@0
|
812 |
|
sl@0
|
813 |
|
sl@0
|
814 |
/* decompress until an end-of-block code */
|
sl@0
|
815 |
if (inflate_codes(tl, td, bl, bd))
|
sl@0
|
816 |
return 1;
|
sl@0
|
817 |
|
sl@0
|
818 |
|
sl@0
|
819 |
/* free the decoding tables, return */
|
sl@0
|
820 |
huft_free(tl);
|
sl@0
|
821 |
huft_free(td);
|
sl@0
|
822 |
return 0;
|
sl@0
|
823 |
}
|
sl@0
|
824 |
|
sl@0
|
825 |
|
sl@0
|
826 |
|
sl@0
|
827 |
int inflate_block(int* e)
|
sl@0
|
828 |
/* decompress an inflated block */
|
sl@0
|
829 |
{
|
sl@0
|
830 |
unsigned t; /* block type */
|
sl@0
|
831 |
register ulg b; /* bit buffer */
|
sl@0
|
832 |
register unsigned k; /* number of bits in bit buffer */
|
sl@0
|
833 |
|
sl@0
|
834 |
|
sl@0
|
835 |
/* make local bit buffer */
|
sl@0
|
836 |
b = bb;
|
sl@0
|
837 |
k = bk;
|
sl@0
|
838 |
|
sl@0
|
839 |
|
sl@0
|
840 |
/* read in last block bit */
|
sl@0
|
841 |
NEEDBITS(1)
|
sl@0
|
842 |
*e = (int)b & 1;
|
sl@0
|
843 |
DUMPBITS(1)
|
sl@0
|
844 |
|
sl@0
|
845 |
|
sl@0
|
846 |
/* read in block type */
|
sl@0
|
847 |
NEEDBITS(2)
|
sl@0
|
848 |
t = (unsigned)b & 3;
|
sl@0
|
849 |
DUMPBITS(2)
|
sl@0
|
850 |
|
sl@0
|
851 |
|
sl@0
|
852 |
/* restore the global bit buffer */
|
sl@0
|
853 |
bb = b;
|
sl@0
|
854 |
bk = k;
|
sl@0
|
855 |
|
sl@0
|
856 |
|
sl@0
|
857 |
/* inflate that block type */
|
sl@0
|
858 |
if (t == 2)
|
sl@0
|
859 |
return inflate_dynamic();
|
sl@0
|
860 |
if (t == 0)
|
sl@0
|
861 |
return inflate_stored();
|
sl@0
|
862 |
if (t == 1)
|
sl@0
|
863 |
return inflate_fixed();
|
sl@0
|
864 |
|
sl@0
|
865 |
|
sl@0
|
866 |
/* bad block type */
|
sl@0
|
867 |
return 2;
|
sl@0
|
868 |
}
|
sl@0
|
869 |
|
sl@0
|
870 |
|
sl@0
|
871 |
|
sl@0
|
872 |
int inflate()
|
sl@0
|
873 |
/* decompress an inflated entry */
|
sl@0
|
874 |
{
|
sl@0
|
875 |
int e; /* last block flag */
|
sl@0
|
876 |
int r; /* result code */
|
sl@0
|
877 |
unsigned h; /* maximum struct huft's malloc'ed */
|
sl@0
|
878 |
|
sl@0
|
879 |
|
sl@0
|
880 |
/* initialize window, bit buffer */
|
sl@0
|
881 |
/* wp = 0;*/
|
sl@0
|
882 |
bk = 0;
|
sl@0
|
883 |
bb = 0;
|
sl@0
|
884 |
|
sl@0
|
885 |
|
sl@0
|
886 |
/* decompress until the last block */
|
sl@0
|
887 |
h = 0;
|
sl@0
|
888 |
do {
|
sl@0
|
889 |
hufts = 0;
|
sl@0
|
890 |
r=inflate_block(&e);
|
sl@0
|
891 |
process_block(r);
|
sl@0
|
892 |
if (r!=0)
|
sl@0
|
893 |
return r;
|
sl@0
|
894 |
if (hufts > h)
|
sl@0
|
895 |
h = hufts;
|
sl@0
|
896 |
} while (!e);
|
sl@0
|
897 |
|
sl@0
|
898 |
/* Undo too much lookahead. The next read will be byte aligned so we
|
sl@0
|
899 |
* can discard unused bits in the last meaningful byte.
|
sl@0
|
900 |
*/
|
sl@0
|
901 |
/* while (bk >= 8) {
|
sl@0
|
902 |
bk -= 8;
|
sl@0
|
903 |
inptr--;
|
sl@0
|
904 |
}*/
|
sl@0
|
905 |
|
sl@0
|
906 |
/* flush out slide */
|
sl@0
|
907 |
/* flush_output(wp);*/
|
sl@0
|
908 |
|
sl@0
|
909 |
|
sl@0
|
910 |
/* return success */
|
sl@0
|
911 |
#ifdef DEBUG
|
sl@0
|
912 |
/* fprintf(stderr, "<%u> ", h);*/
|
sl@0
|
913 |
#endif /* DEBUG */
|
sl@0
|
914 |
|
sl@0
|
915 |
return 0;
|
sl@0
|
916 |
}
|