Blob


2 /*-------------------------------------------------------------*/
3 /*--- Compression machinery (not incl block sorting) ---*/
4 /*--- compress.c ---*/
5 /*-------------------------------------------------------------*/
7 /*--
8 This file is a part of bzip2 and/or libbzip2, a program and
9 library for lossless, block-sorting data compression.
11 Copyright (C) 1996-2000 Julian R Seward. All rights reserved.
13 Redistribution and use in source and binary forms, with or without
14 modification, are permitted provided that the following conditions
15 are met:
17 1. Redistributions of source code must retain the above copyright
18 notice, this list of conditions and the following disclaimer.
20 2. The origin of this software must not be misrepresented; you must
21 not claim that you wrote the original software. If you use this
22 software in a product, an acknowledgment in the product
23 documentation would be appreciated but is not required.
25 3. Altered source versions must be plainly marked as such, and must
26 not be misrepresented as being the original software.
28 4. The name of the author may not be used to endorse or promote
29 products derived from this software without specific prior written
30 permission.
32 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
33 OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
34 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35 ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
36 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
38 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
39 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
40 WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
41 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
42 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 Julian Seward, Cambridge, UK.
45 jseward@acm.org
46 bzip2/libbzip2 version 1.0 of 21 March 2000
48 This program is based on (at least) the work of:
49 Mike Burrows
50 David Wheeler
51 Peter Fenwick
52 Alistair Moffat
53 Radford Neal
54 Ian H. Witten
55 Robert Sedgewick
56 Jon L. Bentley
58 For more information on these sources, see the manual.
59 --*/
61 /*--
62 CHANGES
63 ~~~~~~~
64 0.9.0 -- original version.
66 0.9.0a/b -- no changes in this file.
68 0.9.0c
69 * changed setting of nGroups in sendMTFValues() so as to
70 do a bit better on small files
71 --*/
73 #include "os.h"
74 #include "bzlib.h"
75 #include "bzlib_private.h"
78 /*---------------------------------------------------*/
79 /*--- Bit stream I/O ---*/
80 /*---------------------------------------------------*/
82 /*---------------------------------------------------*/
83 void BZ2_bsInitWrite ( EState* s )
84 {
85 s->bsLive = 0;
86 s->bsBuff = 0;
87 }
90 /*---------------------------------------------------*/
91 static
92 void bsFinishWrite ( EState* s )
93 {
94 while (s->bsLive > 0) {
95 s->zbits[s->numZ] = (UChar)(s->bsBuff >> 24);
96 s->numZ++;
97 s->bsBuff <<= 8;
98 s->bsLive -= 8;
99 }
103 /*---------------------------------------------------*/
104 #define bsNEEDW(nz) \
105 { \
106 while (s->bsLive >= 8) { \
107 s->zbits[s->numZ] \
108 = (UChar)(s->bsBuff >> 24); \
109 s->numZ++; \
110 s->bsBuff <<= 8; \
111 s->bsLive -= 8; \
112 } \
116 /*---------------------------------------------------*/
117 static
118 __inline__
119 void bsW ( EState* s, Int32 n, UInt32 v )
121 bsNEEDW ( n );
122 s->bsBuff |= (v << (32 - s->bsLive - n));
123 s->bsLive += n;
127 /*---------------------------------------------------*/
128 static
129 void bsPutUInt32 ( EState* s, UInt32 u )
131 bsW ( s, 8, (u >> 24) & 0xffL );
132 bsW ( s, 8, (u >> 16) & 0xffL );
133 bsW ( s, 8, (u >> 8) & 0xffL );
134 bsW ( s, 8, u & 0xffL );
138 /*---------------------------------------------------*/
139 static
140 void bsPutUChar ( EState* s, UChar c )
142 bsW( s, 8, (UInt32)c );
146 /*---------------------------------------------------*/
147 /*--- The back end proper ---*/
148 /*---------------------------------------------------*/
150 /*---------------------------------------------------*/
151 static
152 void makeMaps_e ( EState* s )
154 Int32 i;
155 s->nInUse = 0;
156 for (i = 0; i < 256; i++)
157 if (s->inUse[i]) {
158 s->unseqToSeq[i] = s->nInUse;
159 s->nInUse++;
164 /*---------------------------------------------------*/
165 static
166 void generateMTFValues ( EState* s )
168 UChar yy[256];
169 Int32 i, j;
170 Int32 zPend;
171 Int32 wr;
172 Int32 EOB;
174 /*
175 After sorting (eg, here),
176 s->arr1 [ 0 .. s->nblock-1 ] holds sorted order,
177 and
178 ((UChar*)s->arr2) [ 0 .. s->nblock-1 ]
179 holds the original block data.
181 The first thing to do is generate the MTF values,
182 and put them in
183 ((UInt16*)s->arr1) [ 0 .. s->nblock-1 ].
184 Because there are strictly fewer or equal MTF values
185 than block values, ptr values in this area are overwritten
186 with MTF values only when they are no longer needed.
188 The final compressed bitstream is generated into the
189 area starting at
190 (UChar*) (&((UChar*)s->arr2)[s->nblock])
192 These storage aliases are set up in bzCompressInit(),
193 except for the last one, which is arranged in
194 compressBlock().
195 */
196 UInt32* ptr = s->ptr;
197 UChar* block = s->block;
198 UInt16* mtfv = s->mtfv;
200 makeMaps_e ( s );
201 EOB = s->nInUse+1;
203 for (i = 0; i <= EOB; i++) s->mtfFreq[i] = 0;
205 wr = 0;
206 zPend = 0;
207 for (i = 0; i < s->nInUse; i++) yy[i] = (UChar) i;
209 for (i = 0; i < s->nblock; i++) {
210 UChar ll_i;
211 AssertD ( wr <= i, "generateMTFValues(1)" );
212 j = ptr[i]-1; if (j < 0) j += s->nblock;
213 ll_i = s->unseqToSeq[block[j]];
214 AssertD ( ll_i < s->nInUse, "generateMTFValues(2a)" );
216 if (yy[0] == ll_i) {
217 zPend++;
218 } else {
220 if (zPend > 0) {
221 zPend--;
222 while (True) {
223 if (zPend & 1) {
224 mtfv[wr] = BZ_RUNB; wr++;
225 s->mtfFreq[BZ_RUNB]++;
226 } else {
227 mtfv[wr] = BZ_RUNA; wr++;
228 s->mtfFreq[BZ_RUNA]++;
230 if (zPend < 2) break;
231 zPend = (zPend - 2) / 2;
232 };
233 zPend = 0;
236 register UChar rtmp;
237 register UChar* ryy_j;
238 register UChar rll_i;
239 rtmp = yy[1];
240 yy[1] = yy[0];
241 ryy_j = &(yy[1]);
242 rll_i = ll_i;
243 while ( rll_i != rtmp ) {
244 register UChar rtmp2;
245 ryy_j++;
246 rtmp2 = rtmp;
247 rtmp = *ryy_j;
248 *ryy_j = rtmp2;
249 };
250 yy[0] = rtmp;
251 j = ryy_j - &(yy[0]);
252 mtfv[wr] = j+1; wr++; s->mtfFreq[j+1]++;
258 if (zPend > 0) {
259 zPend--;
260 while (True) {
261 if (zPend & 1) {
262 mtfv[wr] = BZ_RUNB; wr++;
263 s->mtfFreq[BZ_RUNB]++;
264 } else {
265 mtfv[wr] = BZ_RUNA; wr++;
266 s->mtfFreq[BZ_RUNA]++;
268 if (zPend < 2) break;
269 zPend = (zPend - 2) / 2;
270 };
271 /*rsc: not used zPend = 0; */
274 mtfv[wr] = EOB; wr++; s->mtfFreq[EOB]++;
276 s->nMTF = wr;
280 /*---------------------------------------------------*/
281 #define BZ_LESSER_ICOST 0
282 #define BZ_GREATER_ICOST 15
284 static
285 void sendMTFValues ( EState* s )
287 Int32 v, t, i, j, gs, ge, totc, bt, bc, iter;
288 Int32 nSelectors, alphaSize, minLen, maxLen, selCtr;
289 Int32 nGroups, nBytes;
291 /*--
292 UChar len [BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
293 is a global since the decoder also needs it.
295 Int32 code[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
296 Int32 rfreq[BZ_N_GROUPS][BZ_MAX_ALPHA_SIZE];
297 are also globals only used in this proc.
298 Made global to keep stack frame size small.
299 --*/
302 UInt16 cost[BZ_N_GROUPS];
303 Int32 fave[BZ_N_GROUPS];
305 UInt16* mtfv = s->mtfv;
307 if (s->verbosity >= 3)
308 VPrintf3( " %d in block, %d after MTF & 1-2 coding, "
309 "%d+2 syms in use\n",
310 s->nblock, s->nMTF, s->nInUse );
312 alphaSize = s->nInUse+2;
313 for (t = 0; t < BZ_N_GROUPS; t++)
314 for (v = 0; v < alphaSize; v++)
315 s->len[t][v] = BZ_GREATER_ICOST;
317 /*--- Decide how many coding tables to use ---*/
318 AssertH ( s->nMTF > 0, 3001 );
319 if (s->nMTF < 200) nGroups = 2; else
320 if (s->nMTF < 600) nGroups = 3; else
321 if (s->nMTF < 1200) nGroups = 4; else
322 if (s->nMTF < 2400) nGroups = 5; else
323 nGroups = 6;
325 /*--- Generate an initial set of coding tables ---*/
327 Int32 nPart, remF, tFreq, aFreq;
329 nPart = nGroups;
330 remF = s->nMTF;
331 gs = 0;
332 while (nPart > 0) {
333 tFreq = remF / nPart;
334 ge = gs-1;
335 aFreq = 0;
336 while (aFreq < tFreq && ge < alphaSize-1) {
337 ge++;
338 aFreq += s->mtfFreq[ge];
341 if (ge > gs
342 && nPart != nGroups && nPart != 1
343 && ((nGroups-nPart) % 2 == 1)) {
344 aFreq -= s->mtfFreq[ge];
345 ge--;
348 if (s->verbosity >= 3)
349 VPrintf5( " initial group %d, [%d .. %d], "
350 "has %d syms (%4.1f%%)\n",
351 nPart, gs, ge, aFreq,
352 (100.0 * (float)aFreq) / (float)(s->nMTF) );
354 for (v = 0; v < alphaSize; v++)
355 if (v >= gs && v <= ge)
356 s->len[nPart-1][v] = BZ_LESSER_ICOST; else
357 s->len[nPart-1][v] = BZ_GREATER_ICOST;
359 nPart--;
360 gs = ge+1;
361 remF -= aFreq;
365 /*---
366 Iterate up to BZ_N_ITERS times to improve the tables.
367 ---*/
368 nSelectors = 40000; /* shut up some compilers' warnings about used and not set */
370 for (iter = 0; iter < BZ_N_ITERS; iter++) {
372 for (t = 0; t < nGroups; t++) fave[t] = 0;
374 for (t = 0; t < nGroups; t++)
375 for (v = 0; v < alphaSize; v++)
376 s->rfreq[t][v] = 0;
378 /*---
379 Set up an auxiliary length table which is used to fast-track
380 the common case (nGroups == 6).
381 ---*/
382 if (nGroups == 6) {
383 for (v = 0; v < alphaSize; v++) {
384 s->len_pack[v][0] = (s->len[1][v] << 16) | s->len[0][v];
385 s->len_pack[v][1] = (s->len[3][v] << 16) | s->len[2][v];
386 s->len_pack[v][2] = (s->len[5][v] << 16) | s->len[4][v];
390 nSelectors = 0;
391 totc = 0;
392 gs = 0;
393 while (True) {
395 /*--- Set group start & end marks. --*/
396 if (gs >= s->nMTF) break;
397 ge = gs + BZ_G_SIZE - 1;
398 if (ge >= s->nMTF) ge = s->nMTF-1;
400 /*--
401 Calculate the cost of this group as coded
402 by each of the coding tables.
403 --*/
404 for (t = 0; t < nGroups; t++) cost[t] = 0;
406 if (nGroups == 6 && 50 == ge-gs+1) {
407 /*--- fast track the common case ---*/
408 register UInt32 cost01, cost23, cost45;
409 register UInt16 icv;
410 cost01 = cost23 = cost45 = 0;
412 # define BZ_ITER(nn) \
413 icv = mtfv[gs+(nn)]; \
414 cost01 += s->len_pack[icv][0]; \
415 cost23 += s->len_pack[icv][1]; \
416 cost45 += s->len_pack[icv][2]; \
418 BZ_ITER(0); BZ_ITER(1); BZ_ITER(2); BZ_ITER(3); BZ_ITER(4);
419 BZ_ITER(5); BZ_ITER(6); BZ_ITER(7); BZ_ITER(8); BZ_ITER(9);
420 BZ_ITER(10); BZ_ITER(11); BZ_ITER(12); BZ_ITER(13); BZ_ITER(14);
421 BZ_ITER(15); BZ_ITER(16); BZ_ITER(17); BZ_ITER(18); BZ_ITER(19);
422 BZ_ITER(20); BZ_ITER(21); BZ_ITER(22); BZ_ITER(23); BZ_ITER(24);
423 BZ_ITER(25); BZ_ITER(26); BZ_ITER(27); BZ_ITER(28); BZ_ITER(29);
424 BZ_ITER(30); BZ_ITER(31); BZ_ITER(32); BZ_ITER(33); BZ_ITER(34);
425 BZ_ITER(35); BZ_ITER(36); BZ_ITER(37); BZ_ITER(38); BZ_ITER(39);
426 BZ_ITER(40); BZ_ITER(41); BZ_ITER(42); BZ_ITER(43); BZ_ITER(44);
427 BZ_ITER(45); BZ_ITER(46); BZ_ITER(47); BZ_ITER(48); BZ_ITER(49);
429 # undef BZ_ITER
431 cost[0] = cost01 & 0xffff; cost[1] = cost01 >> 16;
432 cost[2] = cost23 & 0xffff; cost[3] = cost23 >> 16;
433 cost[4] = cost45 & 0xffff; cost[5] = cost45 >> 16;
435 } else {
436 /*--- slow version which correctly handles all situations ---*/
437 for (i = gs; i <= ge; i++) {
438 UInt16 icv = mtfv[i];
439 for (t = 0; t < nGroups; t++) cost[t] += s->len[t][icv];
443 /*--
444 Find the coding table which is best for this group,
445 and record its identity in the selector table.
446 --*/
447 bc = 999999999; bt = -1;
448 for (t = 0; t < nGroups; t++)
449 if (cost[t] < bc) { bc = cost[t]; bt = t; };
450 totc += bc;
451 fave[bt]++;
452 s->selector[nSelectors] = bt;
453 nSelectors++;
455 /*--
456 Increment the symbol frequencies for the selected table.
457 --*/
458 if (nGroups == 6 && 50 == ge-gs+1) {
459 /*--- fast track the common case ---*/
461 # define BZ_ITUR(nn) s->rfreq[bt][ mtfv[gs+(nn)] ]++
463 BZ_ITUR(0); BZ_ITUR(1); BZ_ITUR(2); BZ_ITUR(3); BZ_ITUR(4);
464 BZ_ITUR(5); BZ_ITUR(6); BZ_ITUR(7); BZ_ITUR(8); BZ_ITUR(9);
465 BZ_ITUR(10); BZ_ITUR(11); BZ_ITUR(12); BZ_ITUR(13); BZ_ITUR(14);
466 BZ_ITUR(15); BZ_ITUR(16); BZ_ITUR(17); BZ_ITUR(18); BZ_ITUR(19);
467 BZ_ITUR(20); BZ_ITUR(21); BZ_ITUR(22); BZ_ITUR(23); BZ_ITUR(24);
468 BZ_ITUR(25); BZ_ITUR(26); BZ_ITUR(27); BZ_ITUR(28); BZ_ITUR(29);
469 BZ_ITUR(30); BZ_ITUR(31); BZ_ITUR(32); BZ_ITUR(33); BZ_ITUR(34);
470 BZ_ITUR(35); BZ_ITUR(36); BZ_ITUR(37); BZ_ITUR(38); BZ_ITUR(39);
471 BZ_ITUR(40); BZ_ITUR(41); BZ_ITUR(42); BZ_ITUR(43); BZ_ITUR(44);
472 BZ_ITUR(45); BZ_ITUR(46); BZ_ITUR(47); BZ_ITUR(48); BZ_ITUR(49);
474 # undef BZ_ITUR
476 } else {
477 /*--- slow version which correctly handles all situations ---*/
478 for (i = gs; i <= ge; i++)
479 s->rfreq[bt][ mtfv[i] ]++;
482 gs = ge+1;
484 if (s->verbosity >= 3) {
485 VPrintf2 ( " pass %d: size is %d, grp uses are ",
486 iter+1, totc/8 );
487 for (t = 0; t < nGroups; t++)
488 VPrintf1 ( "%d ", fave[t] );
489 VPrintf0 ( "\n" );
492 /*--
493 Recompute the tables based on the accumulated frequencies.
494 --*/
495 for (t = 0; t < nGroups; t++)
496 BZ2_hbMakeCodeLengths ( &(s->len[t][0]), &(s->rfreq[t][0]),
497 alphaSize, 20 );
501 AssertH( nGroups < 8, 3002 );
502 AssertH( nSelectors < 32768 &&
503 nSelectors <= (2 + (900000 / BZ_G_SIZE)),
504 3003 );
507 /*--- Compute MTF values for the selectors. ---*/
509 UChar pos[BZ_N_GROUPS], ll_i, tmp2, tmp;
510 for (i = 0; i < nGroups; i++) pos[i] = i;
511 for (i = 0; i < nSelectors; i++) {
512 ll_i = s->selector[i];
513 j = 0;
514 tmp = pos[j];
515 while ( ll_i != tmp ) {
516 j++;
517 tmp2 = tmp;
518 tmp = pos[j];
519 pos[j] = tmp2;
520 };
521 pos[0] = tmp;
522 s->selectorMtf[i] = j;
524 };
526 /*--- Assign actual codes for the tables. --*/
527 for (t = 0; t < nGroups; t++) {
528 minLen = 32;
529 maxLen = 0;
530 for (i = 0; i < alphaSize; i++) {
531 if (s->len[t][i] > maxLen) maxLen = s->len[t][i];
532 if (s->len[t][i] < minLen) minLen = s->len[t][i];
534 AssertH ( !(maxLen > 20), 3004 );
535 AssertH ( !(minLen < 1), 3005 );
536 BZ2_hbAssignCodes ( &(s->code[t][0]), &(s->len[t][0]),
537 minLen, maxLen, alphaSize );
540 /*--- Transmit the mapping table. ---*/
542 Bool inUse16[16];
543 for (i = 0; i < 16; i++) {
544 inUse16[i] = False;
545 for (j = 0; j < 16; j++)
546 if (s->inUse[i * 16 + j]) inUse16[i] = True;
549 nBytes = s->numZ;
550 for (i = 0; i < 16; i++)
551 if (inUse16[i]) bsW(s,1,1); else bsW(s,1,0);
553 for (i = 0; i < 16; i++)
554 if (inUse16[i])
555 for (j = 0; j < 16; j++) {
556 if (s->inUse[i * 16 + j]) bsW(s,1,1); else bsW(s,1,0);
559 if (s->verbosity >= 3)
560 VPrintf1( " bytes: mapping %d, ", s->numZ-nBytes );
563 /*--- Now the selectors. ---*/
564 nBytes = s->numZ;
565 bsW ( s, 3, nGroups );
566 bsW ( s, 15, nSelectors );
567 for (i = 0; i < nSelectors; i++) {
568 for (j = 0; j < s->selectorMtf[i]; j++) bsW(s,1,1);
569 bsW(s,1,0);
571 if (s->verbosity >= 3)
572 VPrintf1( "selectors %d, ", s->numZ-nBytes );
574 /*--- Now the coding tables. ---*/
575 nBytes = s->numZ;
577 for (t = 0; t < nGroups; t++) {
578 Int32 curr = s->len[t][0];
579 bsW ( s, 5, curr );
580 for (i = 0; i < alphaSize; i++) {
581 while (curr < s->len[t][i]) { bsW(s,2,2); curr++; /* 10 */ };
582 while (curr > s->len[t][i]) { bsW(s,2,3); curr--; /* 11 */ };
583 bsW ( s, 1, 0 );
587 if (s->verbosity >= 3)
588 VPrintf1 ( "code lengths %d, ", s->numZ-nBytes );
590 /*--- And finally, the block data proper ---*/
591 nBytes = s->numZ;
592 selCtr = 0;
593 gs = 0;
594 while (True) {
595 if (gs >= s->nMTF) break;
596 ge = gs + BZ_G_SIZE - 1;
597 if (ge >= s->nMTF) ge = s->nMTF-1;
598 AssertH ( s->selector[selCtr] < nGroups, 3006 );
600 if (nGroups == 6 && 50 == ge-gs+1) {
601 /*--- fast track the common case ---*/
602 UInt16 mtfv_i;
603 UChar* s_len_sel_selCtr
604 = &(s->len[s->selector[selCtr]][0]);
605 Int32* s_code_sel_selCtr
606 = &(s->code[s->selector[selCtr]][0]);
608 # define BZ_ITAH(nn) \
609 mtfv_i = mtfv[gs+(nn)]; \
610 bsW ( s, \
611 s_len_sel_selCtr[mtfv_i], \
612 s_code_sel_selCtr[mtfv_i] )
614 BZ_ITAH(0); BZ_ITAH(1); BZ_ITAH(2); BZ_ITAH(3); BZ_ITAH(4);
615 BZ_ITAH(5); BZ_ITAH(6); BZ_ITAH(7); BZ_ITAH(8); BZ_ITAH(9);
616 BZ_ITAH(10); BZ_ITAH(11); BZ_ITAH(12); BZ_ITAH(13); BZ_ITAH(14);
617 BZ_ITAH(15); BZ_ITAH(16); BZ_ITAH(17); BZ_ITAH(18); BZ_ITAH(19);
618 BZ_ITAH(20); BZ_ITAH(21); BZ_ITAH(22); BZ_ITAH(23); BZ_ITAH(24);
619 BZ_ITAH(25); BZ_ITAH(26); BZ_ITAH(27); BZ_ITAH(28); BZ_ITAH(29);
620 BZ_ITAH(30); BZ_ITAH(31); BZ_ITAH(32); BZ_ITAH(33); BZ_ITAH(34);
621 BZ_ITAH(35); BZ_ITAH(36); BZ_ITAH(37); BZ_ITAH(38); BZ_ITAH(39);
622 BZ_ITAH(40); BZ_ITAH(41); BZ_ITAH(42); BZ_ITAH(43); BZ_ITAH(44);
623 BZ_ITAH(45); BZ_ITAH(46); BZ_ITAH(47); BZ_ITAH(48); BZ_ITAH(49);
625 # undef BZ_ITAH
627 } else {
628 /*--- slow version which correctly handles all situations ---*/
629 for (i = gs; i <= ge; i++) {
630 bsW ( s,
631 s->len [s->selector[selCtr]] [mtfv[i]],
632 s->code [s->selector[selCtr]] [mtfv[i]] );
637 gs = ge+1;
638 selCtr++;
640 AssertH( selCtr == nSelectors, 3007 );
642 if (s->verbosity >= 3)
643 VPrintf1( "codes %d\n", s->numZ-nBytes );
647 /*---------------------------------------------------*/
648 void BZ2_compressBlock ( EState* s, Bool is_last_block )
650 if (s->nblock > 0) {
652 BZ_FINALISE_CRC ( s->blockCRC );
653 s->combinedCRC = (s->combinedCRC << 1) | (s->combinedCRC >> 31);
654 s->combinedCRC ^= s->blockCRC;
655 if (s->blockNo > 1) s->numZ = 0;
657 if (s->verbosity >= 2)
658 VPrintf4( " block %d: crc = 0x%8x, "
659 "combined CRC = 0x%8x, size = %d\n",
660 s->blockNo, s->blockCRC, s->combinedCRC, s->nblock );
662 BZ2_blockSort ( s );
665 s->zbits = (UChar*) (&((UChar*)s->arr2)[s->nblock]);
667 /*-- If this is the first block, create the stream header. --*/
668 if (s->blockNo == 1) {
669 BZ2_bsInitWrite ( s );
670 bsPutUChar ( s, 'B' );
671 bsPutUChar ( s, 'Z' );
672 bsPutUChar ( s, 'h' );
673 bsPutUChar ( s, (UChar)('0' + s->blockSize100k) );
676 if (s->nblock > 0) {
678 bsPutUChar ( s, 0x31 ); bsPutUChar ( s, 0x41 );
679 bsPutUChar ( s, 0x59 ); bsPutUChar ( s, 0x26 );
680 bsPutUChar ( s, 0x53 ); bsPutUChar ( s, 0x59 );
682 /*-- Now the block's CRC, so it is in a known place. --*/
683 bsPutUInt32 ( s, s->blockCRC );
685 /*--
686 Now a single bit indicating (non-)randomisation.
687 As of version 0.9.5, we use a better sorting algorithm
688 which makes randomisation unnecessary. So always set
689 the randomised bit to 'no'. Of course, the decoder
690 still needs to be able to handle randomised blocks
691 so as to maintain backwards compatibility with
692 older versions of bzip2.
693 --*/
694 bsW(s,1,0);
696 bsW ( s, 24, s->origPtr );
697 generateMTFValues ( s );
698 sendMTFValues ( s );
702 /*-- If this is the last block, add the stream trailer. --*/
703 if (is_last_block) {
705 bsPutUChar ( s, 0x17 ); bsPutUChar ( s, 0x72 );
706 bsPutUChar ( s, 0x45 ); bsPutUChar ( s, 0x38 );
707 bsPutUChar ( s, 0x50 ); bsPutUChar ( s, 0x90 );
708 bsPutUInt32 ( s, s->combinedCRC );
709 if (s->verbosity >= 2)
710 VPrintf1( " final combined CRC = 0x%x\n ", s->combinedCRC );
711 bsFinishWrite ( s );
716 /*-------------------------------------------------------------*/
717 /*--- end compress.c ---*/
718 /*-------------------------------------------------------------*/