1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /*
18 * This program constructs binary patches for images -- such as boot.img and recovery.img -- that
19 * consist primarily of large chunks of gzipped data interspersed with uncompressed data. Doing a
20 * naive bsdiff of these files is not useful because small changes in the data lead to large
21 * changes in the compressed bitstream; bsdiff patches of gzipped data are typically as large as
22 * the data itself.
23 *
24 * To patch these usefully, we break the source and target images up into chunks of two types:
25 * "normal" and "gzip". Normal chunks are simply patched using a plain bsdiff. Gzip chunks are
26 * first expanded, then a bsdiff is applied to the uncompressed data, then the patched data is
27 * gzipped using the same encoder parameters. Patched chunks are concatenated together to create
28 * the output file; the output image should be *exactly* the same series of bytes as the target
29 * image used originally to generate the patch.
30 *
31 * To work well with this tool, the gzipped sections of the target image must have been generated
32 * using the same deflate encoder that is available in applypatch, namely, the one in the zlib
33 * library. In practice this means that images should be compressed using the "minigzip" tool
34 * included in the zlib distribution, not the GNU gzip program.
35 *
36 * An "imgdiff" patch consists of a header describing the chunk structure of the file and any
37 * encoding parameters needed for the gzipped chunks, followed by N bsdiff patches, one per chunk.
38 *
39 * For a diff to be generated, the source and target must be in well-formed zip archive format;
40 * or they are image files with the same "chunk" structure: that is, the same number of gzipped and
41 * normal chunks in the same order. Android boot and recovery images currently consist of five
42 * chunks: a small normal header, a gzipped kernel, a small normal section, a gzipped ramdisk, and
43 * finally a small normal footer.
44 *
45 * Caveats: we locate gzipped sections within the source and target images by searching for the
46 * byte sequence 1f8b0800: 1f8b is the gzip magic number; 08 specifies the "deflate" encoding
47 * [the only encoding supported by the gzip standard]; and 00 is the flags byte. We do not
48 * currently support any extra header fields (which would be indicated by a nonzero flags byte).
49 * We also don't handle the case when that byte sequence appears spuriously in the file. (Note
50 * that it would have to occur spuriously within a normal chunk to be a problem.)
51 *
52 *
53 * The imgdiff patch header looks like this:
54 *
55 * "IMGDIFF2" (8) [magic number and version]
56 * chunk count (4)
57 * for each chunk:
58 * chunk type (4) [CHUNK_{NORMAL, GZIP, DEFLATE, RAW}]
59 * if chunk type == CHUNK_NORMAL:
60 * source start (8)
61 * source len (8)
62 * bsdiff patch offset (8) [from start of patch file]
63 * if chunk type == CHUNK_GZIP: (version 1 only)
64 * source start (8)
65 * source len (8)
66 * bsdiff patch offset (8) [from start of patch file]
67 * source expanded len (8) [size of uncompressed source]
68 * target expected len (8) [size of uncompressed target]
69 * gzip level (4)
70 * method (4)
71 * windowBits (4)
72 * memLevel (4)
73 * strategy (4)
74 * gzip header len (4)
75 * gzip header (gzip header len)
76 * gzip footer (8)
77 * if chunk type == CHUNK_DEFLATE: (version 2 only)
78 * source start (8)
79 * source len (8)
80 * bsdiff patch offset (8) [from start of patch file]
81 * source expanded len (8) [size of uncompressed source]
82 * target expected len (8) [size of uncompressed target]
83 * gzip level (4)
84 * method (4)
85 * windowBits (4)
86 * memLevel (4)
87 * strategy (4)
88 * if chunk type == RAW: (version 2 only)
89 * target len (4)
90 * data (target len)
91 *
92 * All integers are little-endian. "source start" and "source len" specify the section of the
93 * input image that comprises this chunk, including the gzip header and footer for gzip chunks.
94 * "source expanded len" is the size of the uncompressed source data. "target expected len" is the
95 * size of the uncompressed data after applying the bsdiff patch. The next five parameters
96 * specify the zlib parameters to be used when compressing the patched data, and the next three
97 * specify the header and footer to be wrapped around the compressed data to create the output
98 * chunk (so that header contents like the timestamp are recreated exactly).
99 *
100 * After the header there are 'chunk count' bsdiff patches; the offset of each from the beginning
101 * of the file is specified in the header.
102 *
103 * This tool can take an optional file of "bonus data". This is an extra file of data that is
104 * appended to chunk #1 after it is compressed (it must be a CHUNK_DEFLATE chunk). The same file
105 * must be available (and passed to applypatch with -b) when applying the patch. This is used to
106 * reduce the size of recovery-from-boot patches by combining the boot image with recovery ramdisk
107 * information that is stored on the system partition.
108 *
109 * When generating the patch between two zip files, this tool has an option "--block-limit" to
110 * split the large source/target files into several pair of pieces, with each piece has at most
111 * *limit* blocks. When this option is used, we also need to output the split info into the file
112 * path specified by "--split-info".
113 *
114 * Format of split info file:
115 * 2 [version of imgdiff]
116 * n [count of split pieces]
117 * <patch_size>, <tgt_size>, <src_range> [size and ranges for split piece#1]
118 * ...
119 * <patch_size>, <tgt_size>, <src_range> [size and ranges for split piece#n]
120 *
121 * To split a pair of large zip files, we walk through the chunks in target zip and search by its
122 * entry_name in the source zip. If the entry_name is non-empty and a matching entry in source
123 * is found, we'll add the source entry to the current split source image; otherwise we'll skip
124 * this chunk and later do bsdiff between all the skipped trunks and the whole split source image.
125 * We move on to the next pair of pieces if the size of the split source image reaches the block
126 * limit.
127 *
128 * After the split, the target pieces are continuous and block aligned, while the source pieces
129 * are mutually exclusive. Some of the source blocks may not be used if there's no matching
130 * entry_name in the target; as a result, they won't be included in any of these split source
131 * images. Then we will generate patches accordingly between each split image pairs; in particular,
132 * the unmatched trunks in the split target will diff against the entire split source image.
133 *
134 * For example:
135 * Input: [src_image, tgt_image]
136 * Split: [src-0, tgt-0; src-1, tgt-1, src-2, tgt-2]
137 * Diff: [ patch-0; patch-1; patch-2]
138 *
139 * Patch: [(src-0, patch-0) = tgt-0; (src-1, patch-1) = tgt-1; (src-2, patch-2) = tgt-2]
140 * Concatenate: [tgt-0 + tgt-1 + tgt-2 = tgt_image]
141 */
142
143 #include "applypatch/imgdiff.h"
144
145 #include <errno.h>
146 #include <fcntl.h>
147 #include <getopt.h>
148 #include <stdio.h>
149 #include <stdlib.h>
150 #include <string.h>
151 #include <sys/stat.h>
152 #include <sys/types.h>
153 #include <unistd.h>
154
155 #include <algorithm>
156 #include <string>
157 #include <vector>
158
159 #include <android-base/file.h>
160 #include <android-base/logging.h>
161 #include <android-base/memory.h>
162 #include <android-base/parseint.h>
163 #include <android-base/stringprintf.h>
164 #include <android-base/strings.h>
165 #include <android-base/unique_fd.h>
166 #include <bsdiff/bsdiff.h>
167 #include <ziparchive/zip_archive.h>
168 #include <zlib.h>
169
170 #include "applypatch/imgdiff_image.h"
171 #include "otautil/rangeset.h"
172
173 using android::base::get_unaligned;
174
175 static constexpr size_t VERSION = 2;
176
177 // We assume the header "IMGDIFF#" is 8 bytes.
178 static_assert(VERSION <= 9, "VERSION occupies more than one byte");
179
180 static constexpr size_t BLOCK_SIZE = 4096;
181 static constexpr size_t BUFFER_SIZE = 0x8000;
182
183 // If we use this function to write the offset and length (type size_t), their values should not
184 // exceed 2^63; because the signed bit will be casted away.
Write8(int fd,int64_t value)185 static inline bool Write8(int fd, int64_t value) {
186 return android::base::WriteFully(fd, &value, sizeof(int64_t));
187 }
188
189 // Similarly, the value should not exceed 2^31 if we are casting from size_t (e.g. target chunk
190 // size).
Write4(int fd,int32_t value)191 static inline bool Write4(int fd, int32_t value) {
192 return android::base::WriteFully(fd, &value, sizeof(int32_t));
193 }
194
195 // Trim the head or tail to align with the block size. Return false if the chunk has nothing left
196 // after alignment.
AlignHead(size_t * start,size_t * length)197 static bool AlignHead(size_t* start, size_t* length) {
198 size_t residual = (*start % BLOCK_SIZE == 0) ? 0 : BLOCK_SIZE - *start % BLOCK_SIZE;
199
200 if (*length <= residual) {
201 *length = 0;
202 return false;
203 }
204
205 // Trim the data in the beginning.
206 *start += residual;
207 *length -= residual;
208 return true;
209 }
210
AlignTail(size_t * start,size_t * length)211 static bool AlignTail(size_t* start, size_t* length) {
212 size_t residual = (*start + *length) % BLOCK_SIZE;
213 if (*length <= residual) {
214 *length = 0;
215 return false;
216 }
217
218 // Trim the data in the end.
219 *length -= residual;
220 return true;
221 }
222
223 // Remove the used blocks from the source chunk to make sure the source ranges are mutually
224 // exclusive after split. Return false if we fail to get the non-overlapped ranges. In such
225 // a case, we'll skip the entire source chunk.
RemoveUsedBlocks(size_t * start,size_t * length,const SortedRangeSet & used_ranges)226 static bool RemoveUsedBlocks(size_t* start, size_t* length, const SortedRangeSet& used_ranges) {
227 if (!used_ranges.Overlaps(*start, *length)) {
228 return true;
229 }
230
231 // TODO find the largest non-overlap chunk.
232 LOG(INFO) << "Removing block " << used_ranges.ToString() << " from " << *start << " - "
233 << *start + *length - 1;
234
235 // If there's no duplicate entry name, we should only overlap in the head or tail block. Try to
236 // trim both blocks. Skip this source chunk in case it still overlaps with the used ranges.
237 if (AlignHead(start, length) && !used_ranges.Overlaps(*start, *length)) {
238 return true;
239 }
240 if (AlignTail(start, length) && !used_ranges.Overlaps(*start, *length)) {
241 return true;
242 }
243
244 LOG(WARNING) << "Failed to remove the overlapped block ranges; skip the source";
245 return false;
246 }
247
248 static const struct option OPTIONS[] = {
249 { "zip-mode", no_argument, nullptr, 'z' },
250 { "bonus-file", required_argument, nullptr, 'b' },
251 { "block-limit", required_argument, nullptr, 0 },
252 { "debug-dir", required_argument, nullptr, 0 },
253 { "split-info", required_argument, nullptr, 0 },
254 { "verbose", no_argument, nullptr, 'v' },
255 { nullptr, 0, nullptr, 0 },
256 };
257
ImageChunk(int type,size_t start,const std::vector<uint8_t> * file_content,size_t raw_data_len,std::string entry_name)258 ImageChunk::ImageChunk(int type, size_t start, const std::vector<uint8_t>* file_content,
259 size_t raw_data_len, std::string entry_name)
260 : type_(type),
261 start_(start),
262 input_file_ptr_(file_content),
263 raw_data_len_(raw_data_len),
264 compress_level_(6),
265 entry_name_(std::move(entry_name)) {
266 CHECK(file_content != nullptr) << "input file container can't be nullptr";
267 }
268
GetRawData() const269 const uint8_t* ImageChunk::GetRawData() const {
270 CHECK_LE(start_ + raw_data_len_, input_file_ptr_->size());
271 return input_file_ptr_->data() + start_;
272 }
273
DataForPatch() const274 const uint8_t * ImageChunk::DataForPatch() const {
275 if (type_ == CHUNK_DEFLATE) {
276 return uncompressed_data_.data();
277 }
278 return GetRawData();
279 }
280
DataLengthForPatch() const281 size_t ImageChunk::DataLengthForPatch() const {
282 if (type_ == CHUNK_DEFLATE) {
283 return uncompressed_data_.size();
284 }
285 return raw_data_len_;
286 }
287
Dump(size_t index) const288 void ImageChunk::Dump(size_t index) const {
289 LOG(INFO) << "chunk: " << index << ", type: " << type_ << ", start: " << start_
290 << ", len: " << DataLengthForPatch() << ", name: " << entry_name_;
291 }
292
operator ==(const ImageChunk & other) const293 bool ImageChunk::operator==(const ImageChunk& other) const {
294 if (type_ != other.type_) {
295 return false;
296 }
297 return (raw_data_len_ == other.raw_data_len_ &&
298 memcmp(GetRawData(), other.GetRawData(), raw_data_len_) == 0);
299 }
300
SetUncompressedData(std::vector<uint8_t> data)301 void ImageChunk::SetUncompressedData(std::vector<uint8_t> data) {
302 uncompressed_data_ = std::move(data);
303 }
304
SetBonusData(const std::vector<uint8_t> & bonus_data)305 bool ImageChunk::SetBonusData(const std::vector<uint8_t>& bonus_data) {
306 if (type_ != CHUNK_DEFLATE) {
307 return false;
308 }
309 uncompressed_data_.insert(uncompressed_data_.end(), bonus_data.begin(), bonus_data.end());
310 return true;
311 }
312
ChangeDeflateChunkToNormal()313 void ImageChunk::ChangeDeflateChunkToNormal() {
314 if (type_ != CHUNK_DEFLATE) return;
315 type_ = CHUNK_NORMAL;
316 // No need to clear the entry name.
317 uncompressed_data_.clear();
318 }
319
IsAdjacentNormal(const ImageChunk & other) const320 bool ImageChunk::IsAdjacentNormal(const ImageChunk& other) const {
321 if (type_ != CHUNK_NORMAL || other.type_ != CHUNK_NORMAL) {
322 return false;
323 }
324 return (other.start_ == start_ + raw_data_len_);
325 }
326
MergeAdjacentNormal(const ImageChunk & other)327 void ImageChunk::MergeAdjacentNormal(const ImageChunk& other) {
328 CHECK(IsAdjacentNormal(other));
329 raw_data_len_ = raw_data_len_ + other.raw_data_len_;
330 }
331
MakePatch(const ImageChunk & tgt,const ImageChunk & src,std::vector<uint8_t> * patch_data,bsdiff::SuffixArrayIndexInterface ** bsdiff_cache)332 bool ImageChunk::MakePatch(const ImageChunk& tgt, const ImageChunk& src,
333 std::vector<uint8_t>* patch_data,
334 bsdiff::SuffixArrayIndexInterface** bsdiff_cache) {
335 #if defined(__ANDROID__)
336 char ptemp[] = "/data/local/tmp/imgdiff-patch-XXXXXX";
337 #else
338 char ptemp[] = "/tmp/imgdiff-patch-XXXXXX";
339 #endif
340
341 int fd = mkstemp(ptemp);
342 if (fd == -1) {
343 PLOG(ERROR) << "MakePatch failed to create a temporary file";
344 return false;
345 }
346 close(fd);
347
348 int r = bsdiff::bsdiff(src.DataForPatch(), src.DataLengthForPatch(), tgt.DataForPatch(),
349 tgt.DataLengthForPatch(), ptemp, bsdiff_cache);
350 if (r != 0) {
351 LOG(ERROR) << "bsdiff() failed: " << r;
352 return false;
353 }
354
355 android::base::unique_fd patch_fd(open(ptemp, O_RDONLY));
356 if (patch_fd == -1) {
357 PLOG(ERROR) << "Failed to open " << ptemp;
358 return false;
359 }
360 struct stat st;
361 if (fstat(patch_fd, &st) != 0) {
362 PLOG(ERROR) << "Failed to stat patch file " << ptemp;
363 return false;
364 }
365
366 size_t sz = static_cast<size_t>(st.st_size);
367
368 patch_data->resize(sz);
369 if (!android::base::ReadFully(patch_fd, patch_data->data(), sz)) {
370 PLOG(ERROR) << "Failed to read " << ptemp;
371 unlink(ptemp);
372 return false;
373 }
374
375 unlink(ptemp);
376
377 return true;
378 }
379
ReconstructDeflateChunk()380 bool ImageChunk::ReconstructDeflateChunk() {
381 if (type_ != CHUNK_DEFLATE) {
382 LOG(ERROR) << "Attempted to reconstruct non-deflate chunk";
383 return false;
384 }
385
386 // We only check two combinations of encoder parameters: level 6 (the default) and level 9
387 // (the maximum).
388 for (int level = 6; level <= 9; level += 3) {
389 if (TryReconstruction(level)) {
390 compress_level_ = level;
391 return true;
392 }
393 }
394
395 return false;
396 }
397
398 /*
399 * Takes the uncompressed data stored in the chunk, compresses it using the zlib parameters stored
400 * in the chunk, and checks that it matches exactly the compressed data we started with (also
401 * stored in the chunk).
402 */
TryReconstruction(int level)403 bool ImageChunk::TryReconstruction(int level) {
404 z_stream strm;
405 strm.zalloc = Z_NULL;
406 strm.zfree = Z_NULL;
407 strm.opaque = Z_NULL;
408 strm.avail_in = uncompressed_data_.size();
409 strm.next_in = uncompressed_data_.data();
410 int ret = deflateInit2(&strm, level, METHOD, WINDOWBITS, MEMLEVEL, STRATEGY);
411 if (ret < 0) {
412 LOG(ERROR) << "Failed to initialize deflate: " << ret;
413 return false;
414 }
415
416 std::vector<uint8_t> buffer(BUFFER_SIZE);
417 size_t offset = 0;
418 do {
419 strm.avail_out = buffer.size();
420 strm.next_out = buffer.data();
421 ret = deflate(&strm, Z_FINISH);
422 if (ret < 0) {
423 LOG(ERROR) << "Failed to deflate: " << ret;
424 return false;
425 }
426
427 size_t compressed_size = buffer.size() - strm.avail_out;
428 if (memcmp(buffer.data(), input_file_ptr_->data() + start_ + offset, compressed_size) != 0) {
429 // mismatch; data isn't the same.
430 deflateEnd(&strm);
431 return false;
432 }
433 offset += compressed_size;
434 } while (ret != Z_STREAM_END);
435 deflateEnd(&strm);
436
437 if (offset != raw_data_len_) {
438 // mismatch; ran out of data before we should have.
439 return false;
440 }
441 return true;
442 }
443
PatchChunk(const ImageChunk & tgt,const ImageChunk & src,std::vector<uint8_t> data)444 PatchChunk::PatchChunk(const ImageChunk& tgt, const ImageChunk& src, std::vector<uint8_t> data)
445 : type_(tgt.GetType()),
446 source_start_(src.GetStartOffset()),
447 source_len_(src.GetRawDataLength()),
448 source_uncompressed_len_(src.DataLengthForPatch()),
449 target_start_(tgt.GetStartOffset()),
450 target_len_(tgt.GetRawDataLength()),
451 target_uncompressed_len_(tgt.DataLengthForPatch()),
452 target_compress_level_(tgt.GetCompressLevel()),
453 data_(std::move(data)) {}
454
455 // Construct a CHUNK_RAW patch from the target data directly.
PatchChunk(const ImageChunk & tgt)456 PatchChunk::PatchChunk(const ImageChunk& tgt)
457 : type_(CHUNK_RAW),
458 source_start_(0),
459 source_len_(0),
460 source_uncompressed_len_(0),
461 target_start_(tgt.GetStartOffset()),
462 target_len_(tgt.GetRawDataLength()),
463 target_uncompressed_len_(tgt.DataLengthForPatch()),
464 target_compress_level_(tgt.GetCompressLevel()),
465 data_(tgt.GetRawData(), tgt.GetRawData() + tgt.GetRawDataLength()) {}
466
467 // Return true if raw data is smaller than the patch size.
RawDataIsSmaller(const ImageChunk & tgt,size_t patch_size)468 bool PatchChunk::RawDataIsSmaller(const ImageChunk& tgt, size_t patch_size) {
469 size_t target_len = tgt.GetRawDataLength();
470 return target_len < patch_size || (tgt.GetType() == CHUNK_NORMAL && target_len <= 160);
471 }
472
UpdateSourceOffset(const SortedRangeSet & src_range)473 void PatchChunk::UpdateSourceOffset(const SortedRangeSet& src_range) {
474 if (type_ == CHUNK_DEFLATE) {
475 source_start_ = src_range.GetOffsetInRangeSet(source_start_);
476 }
477 }
478
479 // Header size:
480 // header_type 4 bytes
481 // CHUNK_NORMAL 8*3 = 24 bytes
482 // CHUNK_DEFLATE 8*5 + 4*5 = 60 bytes
483 // CHUNK_RAW 4 bytes + patch_size
GetHeaderSize() const484 size_t PatchChunk::GetHeaderSize() const {
485 switch (type_) {
486 case CHUNK_NORMAL:
487 return 4 + 8 * 3;
488 case CHUNK_DEFLATE:
489 return 4 + 8 * 5 + 4 * 5;
490 case CHUNK_RAW:
491 return 4 + 4 + data_.size();
492 default:
493 CHECK(false) << "unexpected chunk type: " << type_; // Should not reach here.
494 return 0;
495 }
496 }
497
498 // Return the offset of the next patch into the patch data.
WriteHeaderToFd(int fd,size_t offset,size_t index) const499 size_t PatchChunk::WriteHeaderToFd(int fd, size_t offset, size_t index) const {
500 Write4(fd, type_);
501 switch (type_) {
502 case CHUNK_NORMAL:
503 LOG(INFO) << android::base::StringPrintf("chunk %zu: normal (%10zu, %10zu) %10zu", index,
504 target_start_, target_len_, data_.size());
505 Write8(fd, static_cast<int64_t>(source_start_));
506 Write8(fd, static_cast<int64_t>(source_len_));
507 Write8(fd, static_cast<int64_t>(offset));
508 return offset + data_.size();
509 case CHUNK_DEFLATE:
510 LOG(INFO) << android::base::StringPrintf("chunk %zu: deflate (%10zu, %10zu) %10zu", index,
511 target_start_, target_len_, data_.size());
512 Write8(fd, static_cast<int64_t>(source_start_));
513 Write8(fd, static_cast<int64_t>(source_len_));
514 Write8(fd, static_cast<int64_t>(offset));
515 Write8(fd, static_cast<int64_t>(source_uncompressed_len_));
516 Write8(fd, static_cast<int64_t>(target_uncompressed_len_));
517 Write4(fd, target_compress_level_);
518 Write4(fd, ImageChunk::METHOD);
519 Write4(fd, ImageChunk::WINDOWBITS);
520 Write4(fd, ImageChunk::MEMLEVEL);
521 Write4(fd, ImageChunk::STRATEGY);
522 return offset + data_.size();
523 case CHUNK_RAW:
524 LOG(INFO) << android::base::StringPrintf("chunk %zu: raw (%10zu, %10zu)", index,
525 target_start_, target_len_);
526 Write4(fd, static_cast<int32_t>(data_.size()));
527 if (!android::base::WriteFully(fd, data_.data(), data_.size())) {
528 CHECK(false) << "Failed to write " << data_.size() << " bytes patch";
529 }
530 return offset;
531 default:
532 CHECK(false) << "unexpected chunk type: " << type_;
533 return offset;
534 }
535 }
536
PatchSize() const537 size_t PatchChunk::PatchSize() const {
538 if (type_ == CHUNK_RAW) {
539 return GetHeaderSize();
540 }
541 return GetHeaderSize() + data_.size();
542 }
543
544 // Write the contents of |patch_chunks| to |patch_fd|.
WritePatchDataToFd(const std::vector<PatchChunk> & patch_chunks,int patch_fd)545 bool PatchChunk::WritePatchDataToFd(const std::vector<PatchChunk>& patch_chunks, int patch_fd) {
546 // Figure out how big the imgdiff file header is going to be, so that we can correctly compute
547 // the offset of each bsdiff patch within the file.
548 size_t total_header_size = 12;
549 for (const auto& patch : patch_chunks) {
550 total_header_size += patch.GetHeaderSize();
551 }
552
553 size_t offset = total_header_size;
554
555 // Write out the headers.
556 if (!android::base::WriteStringToFd("IMGDIFF" + std::to_string(VERSION), patch_fd)) {
557 PLOG(ERROR) << "Failed to write \"IMGDIFF" << VERSION << "\"";
558 return false;
559 }
560
561 Write4(patch_fd, static_cast<int32_t>(patch_chunks.size()));
562 LOG(INFO) << "Writing " << patch_chunks.size() << " patch headers...";
563 for (size_t i = 0; i < patch_chunks.size(); ++i) {
564 offset = patch_chunks[i].WriteHeaderToFd(patch_fd, offset, i);
565 }
566
567 // Append each chunk's bsdiff patch, in order.
568 for (const auto& patch : patch_chunks) {
569 if (patch.type_ == CHUNK_RAW) {
570 continue;
571 }
572 if (!android::base::WriteFully(patch_fd, patch.data_.data(), patch.data_.size())) {
573 PLOG(ERROR) << "Failed to write " << patch.data_.size() << " bytes patch to patch_fd";
574 return false;
575 }
576 }
577
578 return true;
579 }
580
operator [](size_t i)581 ImageChunk& Image::operator[](size_t i) {
582 CHECK_LT(i, chunks_.size());
583 return chunks_[i];
584 }
585
operator [](size_t i) const586 const ImageChunk& Image::operator[](size_t i) const {
587 CHECK_LT(i, chunks_.size());
588 return chunks_[i];
589 }
590
MergeAdjacentNormalChunks()591 void Image::MergeAdjacentNormalChunks() {
592 size_t merged_last = 0, cur = 0;
593 while (cur < chunks_.size()) {
594 // Look for normal chunks adjacent to the current one. If such chunk exists, extend the
595 // length of the current normal chunk.
596 size_t to_check = cur + 1;
597 while (to_check < chunks_.size() && chunks_[cur].IsAdjacentNormal(chunks_[to_check])) {
598 chunks_[cur].MergeAdjacentNormal(chunks_[to_check]);
599 to_check++;
600 }
601
602 if (merged_last != cur) {
603 chunks_[merged_last] = std::move(chunks_[cur]);
604 }
605 merged_last++;
606 cur = to_check;
607 }
608 if (merged_last < chunks_.size()) {
609 chunks_.erase(chunks_.begin() + merged_last, chunks_.end());
610 }
611 }
612
DumpChunks() const613 void Image::DumpChunks() const {
614 std::string type = is_source_ ? "source" : "target";
615 LOG(INFO) << "Dumping chunks for " << type;
616 for (size_t i = 0; i < chunks_.size(); ++i) {
617 chunks_[i].Dump(i);
618 }
619 }
620
ReadFile(const std::string & filename,std::vector<uint8_t> * file_content)621 bool Image::ReadFile(const std::string& filename, std::vector<uint8_t>* file_content) {
622 CHECK(file_content != nullptr);
623
624 android::base::unique_fd fd(open(filename.c_str(), O_RDONLY));
625 if (fd == -1) {
626 PLOG(ERROR) << "Failed to open " << filename;
627 return false;
628 }
629 struct stat st;
630 if (fstat(fd, &st) != 0) {
631 PLOG(ERROR) << "Failed to stat " << filename;
632 return false;
633 }
634
635 size_t sz = static_cast<size_t>(st.st_size);
636 file_content->resize(sz);
637 if (!android::base::ReadFully(fd, file_content->data(), sz)) {
638 PLOG(ERROR) << "Failed to read " << filename;
639 return false;
640 }
641 fd.reset();
642
643 return true;
644 }
645
Initialize(const std::string & filename)646 bool ZipModeImage::Initialize(const std::string& filename) {
647 if (!ReadFile(filename, &file_content_)) {
648 return false;
649 }
650
651 // Omit the trailing zeros before we pass the file to ziparchive handler.
652 size_t zipfile_size;
653 if (!GetZipFileSize(&zipfile_size)) {
654 LOG(ERROR) << "Failed to parse the actual size of " << filename;
655 return false;
656 }
657 ZipArchiveHandle handle;
658 int err = OpenArchiveFromMemory(const_cast<uint8_t*>(file_content_.data()), zipfile_size,
659 filename.c_str(), &handle);
660 if (err != 0) {
661 LOG(ERROR) << "Failed to open zip file " << filename << ": " << ErrorCodeString(err);
662 CloseArchive(handle);
663 return false;
664 }
665
666 if (!InitializeChunks(filename, handle)) {
667 CloseArchive(handle);
668 return false;
669 }
670
671 CloseArchive(handle);
672 return true;
673 }
674
675 // Iterate the zip entries and compose the image chunks accordingly.
InitializeChunks(const std::string & filename,ZipArchiveHandle handle)676 bool ZipModeImage::InitializeChunks(const std::string& filename, ZipArchiveHandle handle) {
677 void* cookie;
678 int ret = StartIteration(handle, &cookie);
679 if (ret != 0) {
680 LOG(ERROR) << "Failed to iterate over entries in " << filename << ": " << ErrorCodeString(ret);
681 return false;
682 }
683
684 // Create a list of deflated zip entries, sorted by offset.
685 std::vector<std::pair<std::string, ZipEntry>> temp_entries;
686 std::string name;
687 ZipEntry entry;
688 while ((ret = Next(cookie, &entry, &name)) == 0) {
689 if (entry.method == kCompressDeflated || limit_ > 0) {
690 temp_entries.emplace_back(name, entry);
691 }
692 }
693
694 if (ret != -1) {
695 LOG(ERROR) << "Error while iterating over zip entries: " << ErrorCodeString(ret);
696 return false;
697 }
698 std::sort(temp_entries.begin(), temp_entries.end(),
699 [](auto& entry1, auto& entry2) { return entry1.second.offset < entry2.second.offset; });
700
701 EndIteration(cookie);
702
703 // For source chunks, we don't need to compose chunks for the metadata.
704 if (is_source_) {
705 for (auto& entry : temp_entries) {
706 if (!AddZipEntryToChunks(handle, entry.first, &entry.second)) {
707 LOG(ERROR) << "Failed to add " << entry.first << " to source chunks";
708 return false;
709 }
710 }
711
712 // Add the end of zip file (mainly central directory) as a normal chunk.
713 size_t entries_end = 0;
714 if (!temp_entries.empty()) {
715 entries_end = static_cast<size_t>(temp_entries.back().second.offset +
716 temp_entries.back().second.compressed_length);
717 }
718 CHECK_LT(entries_end, file_content_.size());
719 chunks_.emplace_back(CHUNK_NORMAL, entries_end, &file_content_,
720 file_content_.size() - entries_end);
721
722 return true;
723 }
724
725 // For target chunks, add the deflate entries as CHUNK_DEFLATE and the contents between two
726 // deflate entries as CHUNK_NORMAL.
727 size_t pos = 0;
728 size_t nextentry = 0;
729 while (pos < file_content_.size()) {
730 if (nextentry < temp_entries.size() &&
731 static_cast<off64_t>(pos) == temp_entries[nextentry].second.offset) {
732 // Add the next zip entry.
733 std::string entry_name = temp_entries[nextentry].first;
734 if (!AddZipEntryToChunks(handle, entry_name, &temp_entries[nextentry].second)) {
735 LOG(ERROR) << "Failed to add " << entry_name << " to target chunks";
736 return false;
737 }
738
739 pos += temp_entries[nextentry].second.compressed_length;
740 ++nextentry;
741 continue;
742 }
743
744 // Use a normal chunk to take all the data up to the start of the next entry.
745 size_t raw_data_len;
746 if (nextentry < temp_entries.size()) {
747 raw_data_len = temp_entries[nextentry].second.offset - pos;
748 } else {
749 raw_data_len = file_content_.size() - pos;
750 }
751 chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, raw_data_len);
752
753 pos += raw_data_len;
754 }
755
756 return true;
757 }
758
AddZipEntryToChunks(ZipArchiveHandle handle,const std::string & entry_name,ZipEntry * entry)759 bool ZipModeImage::AddZipEntryToChunks(ZipArchiveHandle handle, const std::string& entry_name,
760 ZipEntry* entry) {
761 size_t compressed_len = entry->compressed_length;
762 if (compressed_len == 0) return true;
763
764 // Split the entry into several normal chunks if it's too large.
765 if (limit_ > 0 && compressed_len > limit_) {
766 int count = 0;
767 while (compressed_len > 0) {
768 size_t length = std::min(limit_, compressed_len);
769 std::string name = entry_name + "-" + std::to_string(count);
770 chunks_.emplace_back(CHUNK_NORMAL, entry->offset + limit_ * count, &file_content_, length,
771 name);
772
773 count++;
774 compressed_len -= length;
775 }
776 } else if (entry->method == kCompressDeflated) {
777 size_t uncompressed_len = entry->uncompressed_length;
778 std::vector<uint8_t> uncompressed_data(uncompressed_len);
779 int ret = ExtractToMemory(handle, entry, uncompressed_data.data(), uncompressed_len);
780 if (ret != 0) {
781 LOG(ERROR) << "Failed to extract " << entry_name << " with size " << uncompressed_len << ": "
782 << ErrorCodeString(ret);
783 return false;
784 }
785 ImageChunk curr(CHUNK_DEFLATE, entry->offset, &file_content_, compressed_len, entry_name);
786 curr.SetUncompressedData(std::move(uncompressed_data));
787 chunks_.push_back(std::move(curr));
788 } else {
789 chunks_.emplace_back(CHUNK_NORMAL, entry->offset, &file_content_, compressed_len, entry_name);
790 }
791
792 return true;
793 }
794
795 // EOCD record
796 // offset 0: signature 0x06054b50, 4 bytes
797 // offset 4: number of this disk, 2 bytes
798 // ...
799 // offset 20: comment length, 2 bytes
800 // offset 22: comment, n bytes
GetZipFileSize(size_t * input_file_size)801 bool ZipModeImage::GetZipFileSize(size_t* input_file_size) {
802 if (file_content_.size() < 22) {
803 LOG(ERROR) << "File is too small to be a zip file";
804 return false;
805 }
806
807 // Look for End of central directory record of the zip file, and calculate the actual
808 // zip_file size.
809 for (int i = file_content_.size() - 22; i >= 0; i--) {
810 if (file_content_[i] == 0x50) {
811 if (get_unaligned<uint32_t>(&file_content_[i]) == 0x06054b50) {
812 // double-check: this archive consists of a single "disk".
813 CHECK_EQ(get_unaligned<uint16_t>(&file_content_[i + 4]), 0);
814
815 uint16_t comment_length = get_unaligned<uint16_t>(&file_content_[i + 20]);
816 size_t file_size = i + 22 + comment_length;
817 CHECK_LE(file_size, file_content_.size());
818 *input_file_size = file_size;
819 return true;
820 }
821 }
822 }
823
824 // EOCD not found, this file is likely not a valid zip file.
825 return false;
826 }
827
PseudoSource() const828 ImageChunk ZipModeImage::PseudoSource() const {
829 CHECK(is_source_);
830 return ImageChunk(CHUNK_NORMAL, 0, &file_content_, file_content_.size());
831 }
832
FindChunkByName(const std::string & name,bool find_normal) const833 const ImageChunk* ZipModeImage::FindChunkByName(const std::string& name, bool find_normal) const {
834 if (name.empty()) {
835 return nullptr;
836 }
837 for (auto& chunk : chunks_) {
838 if (chunk.GetType() != CHUNK_DEFLATE && !find_normal) {
839 continue;
840 }
841
842 if (chunk.GetEntryName() == name) {
843 return &chunk;
844 }
845
846 // Edge case when target chunk is split due to size limit but source chunk isn't.
847 if (name == (chunk.GetEntryName() + "-0") || chunk.GetEntryName() == (name + "-0")) {
848 return &chunk;
849 }
850
851 // TODO handle the .so files with incremental version number.
852 // (e.g. lib/arm64-v8a/libcronet.59.0.3050.4.so)
853 }
854
855 return nullptr;
856 }
857
FindChunkByName(const std::string & name,bool find_normal)858 ImageChunk* ZipModeImage::FindChunkByName(const std::string& name, bool find_normal) {
859 return const_cast<ImageChunk*>(
860 static_cast<const ZipModeImage*>(this)->FindChunkByName(name, find_normal));
861 }
862
CheckAndProcessChunks(ZipModeImage * tgt_image,ZipModeImage * src_image)863 bool ZipModeImage::CheckAndProcessChunks(ZipModeImage* tgt_image, ZipModeImage* src_image) {
864 for (auto& tgt_chunk : *tgt_image) {
865 if (tgt_chunk.GetType() != CHUNK_DEFLATE) {
866 continue;
867 }
868
869 ImageChunk* src_chunk = src_image->FindChunkByName(tgt_chunk.GetEntryName());
870 if (src_chunk == nullptr) {
871 tgt_chunk.ChangeDeflateChunkToNormal();
872 } else if (tgt_chunk == *src_chunk) {
873 // If two deflate chunks are identical (eg, the kernel has not changed between two builds),
874 // treat them as normal chunks. This makes applypatch much faster -- it can apply a trivial
875 // patch to the compressed data, rather than uncompressing and recompressing to apply the
876 // trivial patch to the uncompressed data.
877 tgt_chunk.ChangeDeflateChunkToNormal();
878 src_chunk->ChangeDeflateChunkToNormal();
879 } else if (!tgt_chunk.ReconstructDeflateChunk()) {
880 // We cannot recompress the data and get exactly the same bits as are in the input target
881 // image. Treat the chunk as a normal non-deflated chunk.
882 LOG(WARNING) << "Failed to reconstruct target deflate chunk [" << tgt_chunk.GetEntryName()
883 << "]; treating as normal";
884
885 tgt_chunk.ChangeDeflateChunkToNormal();
886 src_chunk->ChangeDeflateChunkToNormal();
887 }
888 }
889
890 // For zips, we only need merge normal chunks for the target: deflated chunks are matched via
891 // filename, and normal chunks are patched using the entire source file as the source.
892 if (tgt_image->limit_ == 0) {
893 tgt_image->MergeAdjacentNormalChunks();
894 tgt_image->DumpChunks();
895 }
896
897 return true;
898 }
899
900 // For each target chunk, look for the corresponding source chunk by the zip_entry name. If
901 // found, add the range of this chunk in the original source file to the block aligned source
902 // ranges. Construct the split src & tgt image once the size of source range reaches limit.
SplitZipModeImageWithLimit(const ZipModeImage & tgt_image,const ZipModeImage & src_image,std::vector<ZipModeImage> * split_tgt_images,std::vector<ZipModeImage> * split_src_images,std::vector<SortedRangeSet> * split_src_ranges)903 bool ZipModeImage::SplitZipModeImageWithLimit(const ZipModeImage& tgt_image,
904 const ZipModeImage& src_image,
905 std::vector<ZipModeImage>* split_tgt_images,
906 std::vector<ZipModeImage>* split_src_images,
907 std::vector<SortedRangeSet>* split_src_ranges) {
908 CHECK_EQ(tgt_image.limit_, src_image.limit_);
909 size_t limit = tgt_image.limit_;
910
911 src_image.DumpChunks();
912 LOG(INFO) << "Splitting " << tgt_image.NumOfChunks() << " tgt chunks...";
913
914 SortedRangeSet used_src_ranges; // ranges used for previous split source images.
915
916 // Reserve the central directory in advance for the last split image.
917 const auto& central_directory = src_image.cend() - 1;
918 CHECK_EQ(CHUNK_NORMAL, central_directory->GetType());
919 used_src_ranges.Insert(central_directory->GetStartOffset(),
920 central_directory->DataLengthForPatch());
921
922 SortedRangeSet src_ranges;
923 std::vector<ImageChunk> split_src_chunks;
924 std::vector<ImageChunk> split_tgt_chunks;
925 for (auto tgt = tgt_image.cbegin(); tgt != tgt_image.cend(); tgt++) {
926 const ImageChunk* src = src_image.FindChunkByName(tgt->GetEntryName(), true);
927 if (src == nullptr) {
928 split_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt->GetStartOffset(), &tgt_image.file_content_,
929 tgt->GetRawDataLength());
930 continue;
931 }
932
933 size_t src_offset = src->GetStartOffset();
934 size_t src_length = src->GetRawDataLength();
935
936 CHECK(src_length > 0);
937 CHECK_LE(src_length, limit);
938
939 // Make sure this source range hasn't been used before so that the src_range pieces don't
940 // overlap with each other.
941 if (!RemoveUsedBlocks(&src_offset, &src_length, used_src_ranges)) {
942 split_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt->GetStartOffset(), &tgt_image.file_content_,
943 tgt->GetRawDataLength());
944 } else if (src_ranges.blocks() * BLOCK_SIZE + src_length <= limit) {
945 src_ranges.Insert(src_offset, src_length);
946
947 // Add the deflate source chunk if it hasn't been aligned.
948 if (src->GetType() == CHUNK_DEFLATE && src_length == src->GetRawDataLength()) {
949 split_src_chunks.push_back(*src);
950 split_tgt_chunks.push_back(*tgt);
951 } else {
952 // TODO split smarter to avoid alignment of large deflate chunks
953 split_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt->GetStartOffset(), &tgt_image.file_content_,
954 tgt->GetRawDataLength());
955 }
956 } else {
957 bool added_image = ZipModeImage::AddSplitImageFromChunkList(
958 tgt_image, src_image, src_ranges, split_tgt_chunks, split_src_chunks, split_tgt_images,
959 split_src_images);
960
961 split_tgt_chunks.clear();
962 split_src_chunks.clear();
963 // No need to update the split_src_ranges if we don't update the split source images.
964 if (added_image) {
965 used_src_ranges.Insert(src_ranges);
966 split_src_ranges->push_back(std::move(src_ranges));
967 }
968 src_ranges = {};
969
970 // We don't have enough space for the current chunk; start a new split image and handle
971 // this chunk there.
972 tgt--;
973 }
974 }
975
976 // TODO Trim it in case the CD exceeds limit too much.
977 src_ranges.Insert(central_directory->GetStartOffset(), central_directory->DataLengthForPatch());
978 bool added_image = ZipModeImage::AddSplitImageFromChunkList(tgt_image, src_image, src_ranges,
979 split_tgt_chunks, split_src_chunks,
980 split_tgt_images, split_src_images);
981 if (added_image) {
982 split_src_ranges->push_back(std::move(src_ranges));
983 }
984
985 ValidateSplitImages(*split_tgt_images, *split_src_images, *split_src_ranges,
986 tgt_image.file_content_.size());
987
988 return true;
989 }
990
AddSplitImageFromChunkList(const ZipModeImage & tgt_image,const ZipModeImage & src_image,const SortedRangeSet & split_src_ranges,const std::vector<ImageChunk> & split_tgt_chunks,const std::vector<ImageChunk> & split_src_chunks,std::vector<ZipModeImage> * split_tgt_images,std::vector<ZipModeImage> * split_src_images)991 bool ZipModeImage::AddSplitImageFromChunkList(const ZipModeImage& tgt_image,
992 const ZipModeImage& src_image,
993 const SortedRangeSet& split_src_ranges,
994 const std::vector<ImageChunk>& split_tgt_chunks,
995 const std::vector<ImageChunk>& split_src_chunks,
996 std::vector<ZipModeImage>* split_tgt_images,
997 std::vector<ZipModeImage>* split_src_images) {
998 CHECK(!split_tgt_chunks.empty());
999
1000 std::vector<ImageChunk> aligned_tgt_chunks;
1001
1002 // Align the target chunks in the beginning with BLOCK_SIZE.
1003 size_t i = 0;
1004 while (i < split_tgt_chunks.size()) {
1005 size_t tgt_start = split_tgt_chunks[i].GetStartOffset();
1006 size_t tgt_length = split_tgt_chunks[i].GetRawDataLength();
1007
1008 // Current ImageChunk is long enough to align.
1009 if (AlignHead(&tgt_start, &tgt_length)) {
1010 aligned_tgt_chunks.emplace_back(CHUNK_NORMAL, tgt_start, &tgt_image.file_content_,
1011 tgt_length);
1012 break;
1013 }
1014
1015 i++;
1016 }
1017
1018 // Nothing left after alignment in the current split tgt chunks; skip adding the split_tgt_image.
1019 if (i == split_tgt_chunks.size()) {
1020 return false;
1021 }
1022
1023 aligned_tgt_chunks.insert(aligned_tgt_chunks.end(), split_tgt_chunks.begin() + i + 1,
1024 split_tgt_chunks.end());
1025 CHECK(!aligned_tgt_chunks.empty());
1026
1027 // Add a normal chunk to align the contents in the end.
1028 size_t end_offset =
1029 aligned_tgt_chunks.back().GetStartOffset() + aligned_tgt_chunks.back().GetRawDataLength();
1030 if (end_offset % BLOCK_SIZE != 0 && end_offset < tgt_image.file_content_.size()) {
1031 size_t tail_block_length = std::min<size_t>(tgt_image.file_content_.size() - end_offset,
1032 BLOCK_SIZE - (end_offset % BLOCK_SIZE));
1033 aligned_tgt_chunks.emplace_back(CHUNK_NORMAL, end_offset, &tgt_image.file_content_,
1034 tail_block_length);
1035 }
1036
1037 ZipModeImage split_tgt_image(false);
1038 split_tgt_image.Initialize(aligned_tgt_chunks, {});
1039 split_tgt_image.MergeAdjacentNormalChunks();
1040
1041 // Construct the split source file based on the split src ranges.
1042 std::vector<uint8_t> split_src_content;
1043 for (const auto& r : split_src_ranges) {
1044 size_t end = std::min(src_image.file_content_.size(), r.second * BLOCK_SIZE);
1045 split_src_content.insert(split_src_content.end(),
1046 src_image.file_content_.begin() + r.first * BLOCK_SIZE,
1047 src_image.file_content_.begin() + end);
1048 }
1049
1050 // We should not have an empty src in our design; otherwise we will encounter an error in
1051 // bsdiff since split_src_content.data() == nullptr.
1052 CHECK(!split_src_content.empty());
1053
1054 ZipModeImage split_src_image(true);
1055 split_src_image.Initialize(split_src_chunks, split_src_content);
1056
1057 split_tgt_images->push_back(std::move(split_tgt_image));
1058 split_src_images->push_back(std::move(split_src_image));
1059
1060 return true;
1061 }
1062
ValidateSplitImages(const std::vector<ZipModeImage> & split_tgt_images,const std::vector<ZipModeImage> & split_src_images,std::vector<SortedRangeSet> & split_src_ranges,size_t total_tgt_size)1063 void ZipModeImage::ValidateSplitImages(const std::vector<ZipModeImage>& split_tgt_images,
1064 const std::vector<ZipModeImage>& split_src_images,
1065 std::vector<SortedRangeSet>& split_src_ranges,
1066 size_t total_tgt_size) {
1067 CHECK_EQ(split_tgt_images.size(), split_src_images.size());
1068
1069 LOG(INFO) << "Validating " << split_tgt_images.size() << " images";
1070
1071 // Verify that the target image pieces is continuous and can add up to the total size.
1072 size_t last_offset = 0;
1073 for (const auto& tgt_image : split_tgt_images) {
1074 CHECK(!tgt_image.chunks_.empty());
1075
1076 CHECK_EQ(last_offset, tgt_image.chunks_.front().GetStartOffset());
1077 CHECK(last_offset % BLOCK_SIZE == 0);
1078
1079 // Check the target chunks within the split image are continuous.
1080 for (const auto& chunk : tgt_image.chunks_) {
1081 CHECK_EQ(last_offset, chunk.GetStartOffset());
1082 last_offset += chunk.GetRawDataLength();
1083 }
1084 }
1085 CHECK_EQ(total_tgt_size, last_offset);
1086
1087 // Verify that the source ranges are mutually exclusive.
1088 CHECK_EQ(split_src_images.size(), split_src_ranges.size());
1089 SortedRangeSet used_src_ranges;
1090 for (size_t i = 0; i < split_src_ranges.size(); i++) {
1091 CHECK(!used_src_ranges.Overlaps(split_src_ranges[i]))
1092 << "src range " << split_src_ranges[i].ToString() << " overlaps "
1093 << used_src_ranges.ToString();
1094 used_src_ranges.Insert(split_src_ranges[i]);
1095 }
1096 }
1097
GeneratePatchesInternal(const ZipModeImage & tgt_image,const ZipModeImage & src_image,std::vector<PatchChunk> * patch_chunks)1098 bool ZipModeImage::GeneratePatchesInternal(const ZipModeImage& tgt_image,
1099 const ZipModeImage& src_image,
1100 std::vector<PatchChunk>* patch_chunks) {
1101 LOG(INFO) << "Constructing patches for " << tgt_image.NumOfChunks() << " chunks...";
1102 patch_chunks->clear();
1103
1104 bsdiff::SuffixArrayIndexInterface* bsdiff_cache = nullptr;
1105 for (size_t i = 0; i < tgt_image.NumOfChunks(); i++) {
1106 const auto& tgt_chunk = tgt_image[i];
1107
1108 if (PatchChunk::RawDataIsSmaller(tgt_chunk, 0)) {
1109 patch_chunks->emplace_back(tgt_chunk);
1110 continue;
1111 }
1112
1113 const ImageChunk* src_chunk = (tgt_chunk.GetType() != CHUNK_DEFLATE)
1114 ? nullptr
1115 : src_image.FindChunkByName(tgt_chunk.GetEntryName());
1116
1117 const auto& src_ref = (src_chunk == nullptr) ? src_image.PseudoSource() : *src_chunk;
1118 bsdiff::SuffixArrayIndexInterface** bsdiff_cache_ptr =
1119 (src_chunk == nullptr) ? &bsdiff_cache : nullptr;
1120
1121 std::vector<uint8_t> patch_data;
1122 if (!ImageChunk::MakePatch(tgt_chunk, src_ref, &patch_data, bsdiff_cache_ptr)) {
1123 LOG(ERROR) << "Failed to generate patch, name: " << tgt_chunk.GetEntryName();
1124 return false;
1125 }
1126
1127 LOG(INFO) << "patch " << i << " is " << patch_data.size() << " bytes (of "
1128 << tgt_chunk.GetRawDataLength() << ")";
1129
1130 if (PatchChunk::RawDataIsSmaller(tgt_chunk, patch_data.size())) {
1131 patch_chunks->emplace_back(tgt_chunk);
1132 } else {
1133 patch_chunks->emplace_back(tgt_chunk, src_ref, std::move(patch_data));
1134 }
1135 }
1136 delete bsdiff_cache;
1137
1138 CHECK_EQ(patch_chunks->size(), tgt_image.NumOfChunks());
1139 return true;
1140 }
1141
GeneratePatches(const ZipModeImage & tgt_image,const ZipModeImage & src_image,const std::string & patch_name)1142 bool ZipModeImage::GeneratePatches(const ZipModeImage& tgt_image, const ZipModeImage& src_image,
1143 const std::string& patch_name) {
1144 std::vector<PatchChunk> patch_chunks;
1145
1146 ZipModeImage::GeneratePatchesInternal(tgt_image, src_image, &patch_chunks);
1147
1148 CHECK_EQ(tgt_image.NumOfChunks(), patch_chunks.size());
1149
1150 android::base::unique_fd patch_fd(
1151 open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1152 if (patch_fd == -1) {
1153 PLOG(ERROR) << "Failed to open " << patch_name;
1154 return false;
1155 }
1156
1157 return PatchChunk::WritePatchDataToFd(patch_chunks, patch_fd);
1158 }
1159
GeneratePatches(const std::vector<ZipModeImage> & split_tgt_images,const std::vector<ZipModeImage> & split_src_images,const std::vector<SortedRangeSet> & split_src_ranges,const std::string & patch_name,const std::string & split_info_file,const std::string & debug_dir)1160 bool ZipModeImage::GeneratePatches(const std::vector<ZipModeImage>& split_tgt_images,
1161 const std::vector<ZipModeImage>& split_src_images,
1162 const std::vector<SortedRangeSet>& split_src_ranges,
1163 const std::string& patch_name,
1164 const std::string& split_info_file,
1165 const std::string& debug_dir) {
1166 LOG(INFO) << "Constructing patches for " << split_tgt_images.size() << " split images...";
1167
1168 android::base::unique_fd patch_fd(
1169 open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1170 if (patch_fd == -1) {
1171 PLOG(ERROR) << "Failed to open " << patch_name;
1172 return false;
1173 }
1174
1175 std::vector<std::string> split_info_list;
1176 for (size_t i = 0; i < split_tgt_images.size(); i++) {
1177 std::vector<PatchChunk> patch_chunks;
1178 if (!ZipModeImage::GeneratePatchesInternal(split_tgt_images[i], split_src_images[i],
1179 &patch_chunks)) {
1180 LOG(ERROR) << "Failed to generate split patch";
1181 return false;
1182 }
1183
1184 size_t total_patch_size = 12;
1185 for (auto& p : patch_chunks) {
1186 p.UpdateSourceOffset(split_src_ranges[i]);
1187 total_patch_size += p.PatchSize();
1188 }
1189
1190 if (!PatchChunk::WritePatchDataToFd(patch_chunks, patch_fd)) {
1191 return false;
1192 }
1193
1194 size_t split_tgt_size = split_tgt_images[i].chunks_.back().GetStartOffset() +
1195 split_tgt_images[i].chunks_.back().GetRawDataLength() -
1196 split_tgt_images[i].chunks_.front().GetStartOffset();
1197 std::string split_info = android::base::StringPrintf(
1198 "%zu %zu %s", total_patch_size, split_tgt_size, split_src_ranges[i].ToString().c_str());
1199 split_info_list.push_back(split_info);
1200
1201 // Write the split source & patch into the debug directory.
1202 if (!debug_dir.empty()) {
1203 std::string src_name = android::base::StringPrintf("%s/src-%zu", debug_dir.c_str(), i);
1204 android::base::unique_fd fd(
1205 open(src_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1206
1207 if (fd == -1) {
1208 PLOG(ERROR) << "Failed to open " << src_name;
1209 return false;
1210 }
1211 if (!android::base::WriteFully(fd, split_src_images[i].PseudoSource().DataForPatch(),
1212 split_src_images[i].PseudoSource().DataLengthForPatch())) {
1213 PLOG(ERROR) << "Failed to write split source data into " << src_name;
1214 return false;
1215 }
1216
1217 std::string patch_name = android::base::StringPrintf("%s/patch-%zu", debug_dir.c_str(), i);
1218 fd.reset(open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1219
1220 if (fd == -1) {
1221 PLOG(ERROR) << "Failed to open " << patch_name;
1222 return false;
1223 }
1224 if (!PatchChunk::WritePatchDataToFd(patch_chunks, fd)) {
1225 return false;
1226 }
1227 }
1228 }
1229
1230 // Store the split in the following format:
1231 // Line 0: imgdiff version#
1232 // Line 1: number of pieces
1233 // Line 2: patch_size_1 tgt_size_1 src_range_1
1234 // ...
1235 // Line n+1: patch_size_n tgt_size_n src_range_n
1236 std::string split_info_string = android::base::StringPrintf(
1237 "%zu\n%zu\n", VERSION, split_info_list.size()) + android::base::Join(split_info_list, '\n');
1238 if (!android::base::WriteStringToFile(split_info_string, split_info_file)) {
1239 PLOG(ERROR) << "Failed to write split info to " << split_info_file;
1240 return false;
1241 }
1242
1243 return true;
1244 }
1245
Initialize(const std::string & filename)1246 bool ImageModeImage::Initialize(const std::string& filename) {
1247 if (!ReadFile(filename, &file_content_)) {
1248 return false;
1249 }
1250
1251 size_t sz = file_content_.size();
1252 size_t pos = 0;
1253 while (pos < sz) {
1254 // 0x00 no header flags, 0x08 deflate compression, 0x1f8b gzip magic number
1255 if (sz - pos >= 4 && get_unaligned<uint32_t>(file_content_.data() + pos) == 0x00088b1f) {
1256 // 'pos' is the offset of the start of a gzip chunk.
1257 size_t chunk_offset = pos;
1258
1259 // The remaining data is too small to be a gzip chunk; treat them as a normal chunk.
1260 if (sz - pos < GZIP_HEADER_LEN + GZIP_FOOTER_LEN) {
1261 chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, sz - pos);
1262 break;
1263 }
1264
1265 // We need three chunks for the deflated image in total, one normal chunk for the header,
1266 // one deflated chunk for the body, and another normal chunk for the footer.
1267 chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, GZIP_HEADER_LEN);
1268 pos += GZIP_HEADER_LEN;
1269
1270 // We must decompress this chunk in order to discover where it ends, and so we can update
1271 // the uncompressed_data of the image body and its length.
1272
1273 z_stream strm;
1274 strm.zalloc = Z_NULL;
1275 strm.zfree = Z_NULL;
1276 strm.opaque = Z_NULL;
1277 strm.avail_in = sz - pos;
1278 strm.next_in = file_content_.data() + pos;
1279
1280 // -15 means we are decoding a 'raw' deflate stream; zlib will
1281 // not expect zlib headers.
1282 int ret = inflateInit2(&strm, -15);
1283 if (ret < 0) {
1284 LOG(ERROR) << "Failed to initialize inflate: " << ret;
1285 return false;
1286 }
1287
1288 size_t allocated = BUFFER_SIZE;
1289 std::vector<uint8_t> uncompressed_data(allocated);
1290 size_t uncompressed_len = 0, raw_data_len = 0;
1291 do {
1292 strm.avail_out = allocated - uncompressed_len;
1293 strm.next_out = uncompressed_data.data() + uncompressed_len;
1294 ret = inflate(&strm, Z_NO_FLUSH);
1295 if (ret < 0) {
1296 LOG(WARNING) << "Inflate failed [" << strm.msg << "] at offset [" << chunk_offset
1297 << "]; treating as a normal chunk";
1298 break;
1299 }
1300 uncompressed_len = allocated - strm.avail_out;
1301 if (strm.avail_out == 0) {
1302 allocated *= 2;
1303 uncompressed_data.resize(allocated);
1304 }
1305 } while (ret != Z_STREAM_END);
1306
1307 raw_data_len = sz - strm.avail_in - pos;
1308 inflateEnd(&strm);
1309
1310 if (ret < 0) {
1311 continue;
1312 }
1313
1314 // The footer contains the size of the uncompressed data. Double-check to make sure that it
1315 // matches the size of the data we got when we actually did the decompression.
1316 size_t footer_index = pos + raw_data_len + GZIP_FOOTER_LEN - 4;
1317 if (sz - footer_index < 4) {
1318 LOG(WARNING) << "invalid footer position; treating as a normal chunk";
1319 continue;
1320 }
1321 size_t footer_size = get_unaligned<uint32_t>(file_content_.data() + footer_index);
1322 if (footer_size != uncompressed_len) {
1323 LOG(WARNING) << "footer size " << footer_size << " != " << uncompressed_len
1324 << "; treating as a normal chunk";
1325 continue;
1326 }
1327
1328 ImageChunk body(CHUNK_DEFLATE, pos, &file_content_, raw_data_len);
1329 uncompressed_data.resize(uncompressed_len);
1330 body.SetUncompressedData(std::move(uncompressed_data));
1331 chunks_.push_back(std::move(body));
1332
1333 pos += raw_data_len;
1334
1335 // create a normal chunk for the footer
1336 chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, GZIP_FOOTER_LEN);
1337
1338 pos += GZIP_FOOTER_LEN;
1339 } else {
1340 // Use a normal chunk to take all the contents until the next gzip chunk (or EOF); we expect
1341 // the number of chunks to be small (5 for typical boot and recovery images).
1342
1343 // Scan forward until we find a gzip header.
1344 size_t data_len = 0;
1345 while (data_len + pos < sz) {
1346 if (data_len + pos + 4 <= sz &&
1347 get_unaligned<uint32_t>(file_content_.data() + pos + data_len) == 0x00088b1f) {
1348 break;
1349 }
1350 data_len++;
1351 }
1352 chunks_.emplace_back(CHUNK_NORMAL, pos, &file_content_, data_len);
1353
1354 pos += data_len;
1355 }
1356 }
1357
1358 return true;
1359 }
1360
SetBonusData(const std::vector<uint8_t> & bonus_data)1361 bool ImageModeImage::SetBonusData(const std::vector<uint8_t>& bonus_data) {
1362 CHECK(is_source_);
1363 if (chunks_.size() < 2 || !chunks_[1].SetBonusData(bonus_data)) {
1364 LOG(ERROR) << "Failed to set bonus data";
1365 DumpChunks();
1366 return false;
1367 }
1368
1369 LOG(INFO) << " using " << bonus_data.size() << " bytes of bonus data";
1370 return true;
1371 }
1372
1373 // In Image Mode, verify that the source and target images have the same chunk structure (ie, the
1374 // same sequence of deflate and normal chunks).
CheckAndProcessChunks(ImageModeImage * tgt_image,ImageModeImage * src_image)1375 bool ImageModeImage::CheckAndProcessChunks(ImageModeImage* tgt_image, ImageModeImage* src_image) {
1376 // In image mode, merge the gzip header and footer in with any adjacent normal chunks.
1377 tgt_image->MergeAdjacentNormalChunks();
1378 src_image->MergeAdjacentNormalChunks();
1379
1380 if (tgt_image->NumOfChunks() != src_image->NumOfChunks()) {
1381 LOG(ERROR) << "Source and target don't have same number of chunks!";
1382 tgt_image->DumpChunks();
1383 src_image->DumpChunks();
1384 return false;
1385 }
1386 for (size_t i = 0; i < tgt_image->NumOfChunks(); ++i) {
1387 if ((*tgt_image)[i].GetType() != (*src_image)[i].GetType()) {
1388 LOG(ERROR) << "Source and target don't have same chunk structure! (chunk " << i << ")";
1389 tgt_image->DumpChunks();
1390 src_image->DumpChunks();
1391 return false;
1392 }
1393 }
1394
1395 for (size_t i = 0; i < tgt_image->NumOfChunks(); ++i) {
1396 auto& tgt_chunk = (*tgt_image)[i];
1397 auto& src_chunk = (*src_image)[i];
1398 if (tgt_chunk.GetType() != CHUNK_DEFLATE) {
1399 continue;
1400 }
1401
1402 // If two deflate chunks are identical treat them as normal chunks.
1403 if (tgt_chunk == src_chunk) {
1404 tgt_chunk.ChangeDeflateChunkToNormal();
1405 src_chunk.ChangeDeflateChunkToNormal();
1406 } else if (!tgt_chunk.ReconstructDeflateChunk()) {
1407 // We cannot recompress the data and get exactly the same bits as are in the input target
1408 // image, fall back to normal
1409 LOG(WARNING) << "Failed to reconstruct target deflate chunk " << i << " ["
1410 << tgt_chunk.GetEntryName() << "]; treating as normal";
1411 tgt_chunk.ChangeDeflateChunkToNormal();
1412 src_chunk.ChangeDeflateChunkToNormal();
1413 }
1414 }
1415
1416 // For images, we need to maintain the parallel structure of the chunk lists, so do the merging
1417 // in both the source and target lists.
1418 tgt_image->MergeAdjacentNormalChunks();
1419 src_image->MergeAdjacentNormalChunks();
1420 if (tgt_image->NumOfChunks() != src_image->NumOfChunks()) {
1421 // This shouldn't happen.
1422 LOG(ERROR) << "Merging normal chunks went awry";
1423 return false;
1424 }
1425
1426 return true;
1427 }
1428
1429 // In image mode, generate patches against the given source chunks and bonus_data; write the
1430 // result to |patch_name|.
GeneratePatches(const ImageModeImage & tgt_image,const ImageModeImage & src_image,const std::string & patch_name)1431 bool ImageModeImage::GeneratePatches(const ImageModeImage& tgt_image,
1432 const ImageModeImage& src_image,
1433 const std::string& patch_name) {
1434 LOG(INFO) << "Constructing patches for " << tgt_image.NumOfChunks() << " chunks...";
1435 std::vector<PatchChunk> patch_chunks;
1436 patch_chunks.reserve(tgt_image.NumOfChunks());
1437
1438 for (size_t i = 0; i < tgt_image.NumOfChunks(); i++) {
1439 const auto& tgt_chunk = tgt_image[i];
1440 const auto& src_chunk = src_image[i];
1441
1442 if (PatchChunk::RawDataIsSmaller(tgt_chunk, 0)) {
1443 patch_chunks.emplace_back(tgt_chunk);
1444 continue;
1445 }
1446
1447 std::vector<uint8_t> patch_data;
1448 if (!ImageChunk::MakePatch(tgt_chunk, src_chunk, &patch_data, nullptr)) {
1449 LOG(ERROR) << "Failed to generate patch for target chunk " << i;
1450 return false;
1451 }
1452 LOG(INFO) << "patch " << i << " is " << patch_data.size() << " bytes (of "
1453 << tgt_chunk.GetRawDataLength() << ")";
1454
1455 if (PatchChunk::RawDataIsSmaller(tgt_chunk, patch_data.size())) {
1456 patch_chunks.emplace_back(tgt_chunk);
1457 } else {
1458 patch_chunks.emplace_back(tgt_chunk, src_chunk, std::move(patch_data));
1459 }
1460 }
1461
1462 CHECK_EQ(tgt_image.NumOfChunks(), patch_chunks.size());
1463
1464 android::base::unique_fd patch_fd(
1465 open(patch_name.c_str(), O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR));
1466 if (patch_fd == -1) {
1467 PLOG(ERROR) << "Failed to open " << patch_name;
1468 return false;
1469 }
1470
1471 return PatchChunk::WritePatchDataToFd(patch_chunks, patch_fd);
1472 }
1473
imgdiff(int argc,const char ** argv)1474 int imgdiff(int argc, const char** argv) {
1475 bool verbose = false;
1476 bool zip_mode = false;
1477 std::vector<uint8_t> bonus_data;
1478 size_t blocks_limit = 0;
1479 std::string split_info_file;
1480 std::string debug_dir;
1481
1482 int opt;
1483 int option_index;
1484 optind = 0; // Reset the getopt state so that we can call it multiple times for test.
1485
1486 while ((opt = getopt_long(argc, const_cast<char**>(argv), "zb:v", OPTIONS, &option_index)) !=
1487 -1) {
1488 switch (opt) {
1489 case 'z':
1490 zip_mode = true;
1491 break;
1492 case 'b': {
1493 android::base::unique_fd fd(open(optarg, O_RDONLY));
1494 if (fd == -1) {
1495 PLOG(ERROR) << "Failed to open bonus file " << optarg;
1496 return 1;
1497 }
1498 struct stat st;
1499 if (fstat(fd, &st) != 0) {
1500 PLOG(ERROR) << "Failed to stat bonus file " << optarg;
1501 return 1;
1502 }
1503
1504 size_t bonus_size = st.st_size;
1505 bonus_data.resize(bonus_size);
1506 if (!android::base::ReadFully(fd, bonus_data.data(), bonus_size)) {
1507 PLOG(ERROR) << "Failed to read bonus file " << optarg;
1508 return 1;
1509 }
1510 break;
1511 }
1512 case 'v':
1513 verbose = true;
1514 break;
1515 case 0: {
1516 std::string name = OPTIONS[option_index].name;
1517 if (name == "block-limit" && !android::base::ParseUint(optarg, &blocks_limit)) {
1518 LOG(ERROR) << "Failed to parse size blocks_limit: " << optarg;
1519 return 1;
1520 } else if (name == "split-info") {
1521 split_info_file = optarg;
1522 } else if (name == "debug-dir") {
1523 debug_dir = optarg;
1524 }
1525 break;
1526 }
1527 default:
1528 LOG(ERROR) << "unexpected opt: " << static_cast<char>(opt);
1529 return 2;
1530 }
1531 }
1532
1533 if (!verbose) {
1534 android::base::SetMinimumLogSeverity(android::base::WARNING);
1535 }
1536
1537 if (argc - optind != 3) {
1538 LOG(ERROR) << "usage: " << argv[0] << " [options] <src-img> <tgt-img> <patch-file>";
1539 LOG(ERROR)
1540 << " -z <zip-mode>, Generate patches in zip mode, src and tgt should be zip files.\n"
1541 " -b <bonus-file>, Bonus file in addition to src, image mode only.\n"
1542 " --block-limit, For large zips, split the src and tgt based on the block limit;\n"
1543 " and generate patches between each pair of pieces. Concatenate "
1544 "these\n"
1545 " patches together and output them into <patch-file>.\n"
1546 " --split-info, Output the split information (patch_size, tgt_size, src_ranges);\n"
1547 " zip mode with block-limit only.\n"
1548 " --debug-dir, Debug directory to put the split srcs and patches, zip mode only.\n"
1549 " -v, --verbose, Enable verbose logging.";
1550 return 2;
1551 }
1552
1553 if (zip_mode) {
1554 ZipModeImage src_image(true, blocks_limit * BLOCK_SIZE);
1555 ZipModeImage tgt_image(false, blocks_limit * BLOCK_SIZE);
1556
1557 if (!src_image.Initialize(argv[optind])) {
1558 return 1;
1559 }
1560 if (!tgt_image.Initialize(argv[optind + 1])) {
1561 return 1;
1562 }
1563
1564 if (!ZipModeImage::CheckAndProcessChunks(&tgt_image, &src_image)) {
1565 return 1;
1566 }
1567
1568 // Compute bsdiff patches for each chunk's data (the uncompressed data, in the case of
1569 // deflate chunks).
1570 if (blocks_limit > 0) {
1571 if (split_info_file.empty()) {
1572 LOG(ERROR) << "split-info path cannot be empty when generating patches with a block-limit";
1573 return 1;
1574 }
1575
1576 std::vector<ZipModeImage> split_tgt_images;
1577 std::vector<ZipModeImage> split_src_images;
1578 std::vector<SortedRangeSet> split_src_ranges;
1579 ZipModeImage::SplitZipModeImageWithLimit(tgt_image, src_image, &split_tgt_images,
1580 &split_src_images, &split_src_ranges);
1581
1582 if (!ZipModeImage::GeneratePatches(split_tgt_images, split_src_images, split_src_ranges,
1583 argv[optind + 2], split_info_file, debug_dir)) {
1584 return 1;
1585 }
1586
1587 } else if (!ZipModeImage::GeneratePatches(tgt_image, src_image, argv[optind + 2])) {
1588 return 1;
1589 }
1590 } else {
1591 ImageModeImage src_image(true);
1592 ImageModeImage tgt_image(false);
1593
1594 if (!src_image.Initialize(argv[optind])) {
1595 return 1;
1596 }
1597 if (!tgt_image.Initialize(argv[optind + 1])) {
1598 return 1;
1599 }
1600
1601 if (!ImageModeImage::CheckAndProcessChunks(&tgt_image, &src_image)) {
1602 return 1;
1603 }
1604
1605 if (!bonus_data.empty() && !src_image.SetBonusData(bonus_data)) {
1606 return 1;
1607 }
1608
1609 if (!ImageModeImage::GeneratePatches(tgt_image, src_image, argv[optind + 2])) {
1610 return 1;
1611 }
1612 }
1613
1614 return 0;
1615 }
1616