1 //
2 // Copyright (C) 2012 The Android Open Source Project
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 #include "update_engine/payload_consumer/delta_performer.h"
18
19 #include <errno.h>
20 #include <linux/fs.h>
21
22 #include <algorithm>
23 #include <cstring>
24 #include <map>
25 #include <memory>
26 #include <set>
27 #include <string>
28 #include <utility>
29 #include <vector>
30
31 #include <base/files/file_util.h>
32 #include <base/format_macros.h>
33 #include <base/metrics/histogram_macros.h>
34 #include <base/strings/string_number_conversions.h>
35 #include <base/strings/string_util.h>
36 #include <base/strings/stringprintf.h>
37 #include <base/time/time.h>
38 #include <brillo/data_encoding.h>
39 #include <bsdiff/bspatch.h>
40 #include <google/protobuf/repeated_field.h>
41 #include <puffin/puffpatch.h>
42
43 #include "update_engine/common/constants.h"
44 #include "update_engine/common/hardware_interface.h"
45 #include "update_engine/common/prefs_interface.h"
46 #include "update_engine/common/subprocess.h"
47 #include "update_engine/common/terminator.h"
48 #include "update_engine/payload_consumer/bzip_extent_writer.h"
49 #include "update_engine/payload_consumer/cached_file_descriptor.h"
50 #include "update_engine/payload_consumer/certificate_parser_interface.h"
51 #include "update_engine/payload_consumer/download_action.h"
52 #include "update_engine/payload_consumer/extent_reader.h"
53 #include "update_engine/payload_consumer/extent_writer.h"
54 #include "update_engine/payload_consumer/partition_update_generator_interface.h"
55 #if USE_FEC
56 #include "update_engine/payload_consumer/fec_file_descriptor.h"
57 #endif // USE_FEC
58 #include "update_engine/payload_consumer/file_descriptor_utils.h"
59 #include "update_engine/payload_consumer/mount_history.h"
60 #include "update_engine/payload_consumer/payload_constants.h"
61 #include "update_engine/payload_consumer/payload_verifier.h"
62 #include "update_engine/payload_consumer/xz_extent_writer.h"
63
64 using google::protobuf::RepeatedPtrField;
65 using std::min;
66 using std::string;
67 using std::vector;
68
69 namespace chromeos_update_engine {
70 const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
71 const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
72 const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
73 const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
74 const uint64_t DeltaPerformer::kCheckpointFrequencySeconds = 1;
75
76 namespace {
77 const int kUpdateStateOperationInvalid = -1;
78 const int kMaxResumedUpdateFailures = 10;
79
80 const uint64_t kCacheSize = 1024 * 1024; // 1MB
81
82 // Opens path for read/write. On success returns an open FileDescriptor
83 // and sets *err to 0. On failure, sets *err to errno and returns nullptr.
OpenFile(const char * path,int mode,bool cache_writes,int * err)84 FileDescriptorPtr OpenFile(const char* path,
85 int mode,
86 bool cache_writes,
87 int* err) {
88 // Try to mark the block device read-only based on the mode. Ignore any
89 // failure since this won't work when passing regular files.
90 bool read_only = (mode & O_ACCMODE) == O_RDONLY;
91 utils::SetBlockDeviceReadOnly(path, read_only);
92
93 FileDescriptorPtr fd(new EintrSafeFileDescriptor());
94 if (cache_writes && !read_only) {
95 fd = FileDescriptorPtr(new CachedFileDescriptor(fd, kCacheSize));
96 LOG(INFO) << "Caching writes.";
97 }
98 if (!fd->Open(path, mode, 000)) {
99 *err = errno;
100 PLOG(ERROR) << "Unable to open file " << path;
101 return nullptr;
102 }
103 *err = 0;
104 return fd;
105 }
106
107 // Discard the tail of the block device referenced by |fd|, from the offset
108 // |data_size| until the end of the block device. Returns whether the data was
109 // discarded.
DiscardPartitionTail(const FileDescriptorPtr & fd,uint64_t data_size)110 bool DiscardPartitionTail(const FileDescriptorPtr& fd, uint64_t data_size) {
111 uint64_t part_size = fd->BlockDevSize();
112 if (!part_size || part_size <= data_size)
113 return false;
114
115 struct blkioctl_request {
116 int number;
117 const char* name;
118 };
119 const vector<blkioctl_request> blkioctl_requests = {
120 {BLKDISCARD, "BLKDISCARD"},
121 {BLKSECDISCARD, "BLKSECDISCARD"},
122 #ifdef BLKZEROOUT
123 {BLKZEROOUT, "BLKZEROOUT"},
124 #endif
125 };
126 for (const auto& req : blkioctl_requests) {
127 int error = 0;
128 if (fd->BlkIoctl(req.number, data_size, part_size - data_size, &error) &&
129 error == 0) {
130 return true;
131 }
132 LOG(WARNING) << "Error discarding the last "
133 << (part_size - data_size) / 1024 << " KiB using ioctl("
134 << req.name << ")";
135 }
136 return false;
137 }
138
139 } // namespace
140
141 // Computes the ratio of |part| and |total|, scaled to |norm|, using integer
142 // arithmetic.
IntRatio(uint64_t part,uint64_t total,uint64_t norm)143 static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
144 return part * norm / total;
145 }
146
LogProgress(const char * message_prefix)147 void DeltaPerformer::LogProgress(const char* message_prefix) {
148 // Format operations total count and percentage.
149 string total_operations_str("?");
150 string completed_percentage_str("");
151 if (num_total_operations_) {
152 total_operations_str = std::to_string(num_total_operations_);
153 // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
154 completed_percentage_str = base::StringPrintf(
155 " (%" PRIu64 "%%)",
156 IntRatio(next_operation_num_, num_total_operations_, 100));
157 }
158
159 // Format download total count and percentage.
160 size_t payload_size = payload_->size;
161 string payload_size_str("?");
162 string downloaded_percentage_str("");
163 if (payload_size) {
164 payload_size_str = std::to_string(payload_size);
165 // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
166 downloaded_percentage_str = base::StringPrintf(
167 " (%" PRIu64 "%%)", IntRatio(total_bytes_received_, payload_size, 100));
168 }
169
170 LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
171 << "/" << total_operations_str << " operations"
172 << completed_percentage_str << ", " << total_bytes_received_ << "/"
173 << payload_size_str << " bytes downloaded"
174 << downloaded_percentage_str << ", overall progress "
175 << overall_progress_ << "%";
176 }
177
UpdateOverallProgress(bool force_log,const char * message_prefix)178 void DeltaPerformer::UpdateOverallProgress(bool force_log,
179 const char* message_prefix) {
180 // Compute our download and overall progress.
181 unsigned new_overall_progress = 0;
182 static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
183 "Progress weights don't add up");
184 // Only consider download progress if its total size is known; otherwise
185 // adjust the operations weight to compensate for the absence of download
186 // progress. Also, make sure to cap the download portion at
187 // kProgressDownloadWeight, in case we end up downloading more than we
188 // initially expected (this indicates a problem, but could generally happen).
189 // TODO(garnold) the correction of operations weight when we do not have the
190 // total payload size, as well as the conditional guard below, should both be
191 // eliminated once we ensure that the payload_size in the install plan is
192 // always given and is non-zero. This currently isn't the case during unit
193 // tests (see chromium-os:37969).
194 size_t payload_size = payload_->size;
195 unsigned actual_operations_weight = kProgressOperationsWeight;
196 if (payload_size)
197 new_overall_progress +=
198 min(static_cast<unsigned>(IntRatio(
199 total_bytes_received_, payload_size, kProgressDownloadWeight)),
200 kProgressDownloadWeight);
201 else
202 actual_operations_weight += kProgressDownloadWeight;
203
204 // Only add completed operations if their total number is known; we definitely
205 // expect an update to have at least one operation, so the expectation is that
206 // this will eventually reach |actual_operations_weight|.
207 if (num_total_operations_)
208 new_overall_progress += IntRatio(
209 next_operation_num_, num_total_operations_, actual_operations_weight);
210
211 // Progress ratio cannot recede, unless our assumptions about the total
212 // payload size, total number of operations, or the monotonicity of progress
213 // is breached.
214 if (new_overall_progress < overall_progress_) {
215 LOG(WARNING) << "progress counter receded from " << overall_progress_
216 << "% down to " << new_overall_progress << "%; this is a bug";
217 force_log = true;
218 }
219 overall_progress_ = new_overall_progress;
220
221 // Update chunk index, log as needed: if forced by called, or we completed a
222 // progress chunk, or a timeout has expired.
223 base::TimeTicks curr_time = base::TimeTicks::Now();
224 unsigned curr_progress_chunk =
225 overall_progress_ * kProgressLogMaxChunks / 100;
226 if (force_log || curr_progress_chunk > last_progress_chunk_ ||
227 curr_time > forced_progress_log_time_) {
228 forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
229 LogProgress(message_prefix);
230 }
231 last_progress_chunk_ = curr_progress_chunk;
232 }
233
CopyDataToBuffer(const char ** bytes_p,size_t * count_p,size_t max)234 size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p,
235 size_t* count_p,
236 size_t max) {
237 const size_t count = *count_p;
238 if (!count)
239 return 0; // Special case shortcut.
240 size_t read_len = min(count, max - buffer_.size());
241 const char* bytes_start = *bytes_p;
242 const char* bytes_end = bytes_start + read_len;
243 buffer_.reserve(max);
244 buffer_.insert(buffer_.end(), bytes_start, bytes_end);
245 *bytes_p = bytes_end;
246 *count_p = count - read_len;
247 return read_len;
248 }
249
HandleOpResult(bool op_result,const char * op_type_name,ErrorCode * error)250 bool DeltaPerformer::HandleOpResult(bool op_result,
251 const char* op_type_name,
252 ErrorCode* error) {
253 if (op_result)
254 return true;
255
256 size_t partition_first_op_num =
257 current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0;
258 LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
259 << next_operation_num_ << ", which is the operation "
260 << next_operation_num_ - partition_first_op_num
261 << " in partition \""
262 << partitions_[current_partition_].partition_name() << "\"";
263 if (*error == ErrorCode::kSuccess)
264 *error = ErrorCode::kDownloadOperationExecutionError;
265 return false;
266 }
267
Close()268 int DeltaPerformer::Close() {
269 int err = -CloseCurrentPartition();
270 LOG_IF(ERROR,
271 !payload_hash_calculator_.Finalize() ||
272 !signed_hash_calculator_.Finalize())
273 << "Unable to finalize the hash.";
274 if (!buffer_.empty()) {
275 LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
276 if (err >= 0)
277 err = 1;
278 }
279 return -err;
280 }
281
CloseCurrentPartition()282 int DeltaPerformer::CloseCurrentPartition() {
283 int err = 0;
284 if (source_fd_ && !source_fd_->Close()) {
285 err = errno;
286 PLOG(ERROR) << "Error closing source partition";
287 if (!err)
288 err = 1;
289 }
290 source_fd_.reset();
291 if (source_ecc_fd_ && !source_ecc_fd_->Close()) {
292 err = errno;
293 PLOG(ERROR) << "Error closing ECC source partition";
294 if (!err)
295 err = 1;
296 }
297 source_ecc_fd_.reset();
298 source_ecc_open_failure_ = false;
299 source_path_.clear();
300
301 if (target_fd_ && !target_fd_->Close()) {
302 err = errno;
303 PLOG(ERROR) << "Error closing target partition";
304 if (!err)
305 err = 1;
306 }
307 target_fd_.reset();
308 target_path_.clear();
309 return -err;
310 }
311
OpenCurrentPartition()312 bool DeltaPerformer::OpenCurrentPartition() {
313 if (current_partition_ >= partitions_.size())
314 return false;
315
316 const PartitionUpdate& partition = partitions_[current_partition_];
317 size_t num_previous_partitions =
318 install_plan_->partitions.size() - partitions_.size();
319 const InstallPlan::Partition& install_part =
320 install_plan_->partitions[num_previous_partitions + current_partition_];
321 // Open source fds if we have a delta payload, or for partitions in the
322 // partial update.
323 bool source_may_exist = manifest_.partial_update() ||
324 payload_->type == InstallPayloadType::kDelta;
325 // We shouldn't open the source partition in certain cases, e.g. some dynamic
326 // partitions in delta payload, partitions included in the full payload for
327 // partial updates. Use the source size as the indicator.
328 if (source_may_exist && install_part.source_size > 0) {
329 source_path_ = install_part.source_path;
330 int err;
331 source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, false, &err);
332 if (!source_fd_) {
333 LOG(ERROR) << "Unable to open source partition "
334 << partition.partition_name() << " on slot "
335 << BootControlInterface::SlotName(install_plan_->source_slot)
336 << ", file " << source_path_;
337 return false;
338 }
339 }
340
341 target_path_ = install_part.target_path;
342 int err;
343
344 int flags = O_RDWR;
345 if (!interactive_)
346 flags |= O_DSYNC;
347
348 LOG(INFO) << "Opening " << target_path_ << " partition with"
349 << (interactive_ ? "out" : "") << " O_DSYNC";
350
351 target_fd_ = OpenFile(target_path_.c_str(), flags, true, &err);
352 if (!target_fd_) {
353 LOG(ERROR) << "Unable to open target partition "
354 << partition.partition_name() << " on slot "
355 << BootControlInterface::SlotName(install_plan_->target_slot)
356 << ", file " << target_path_;
357 return false;
358 }
359
360 LOG(INFO) << "Applying " << partition.operations().size()
361 << " operations to partition \"" << partition.partition_name()
362 << "\"";
363
364 // Discard the end of the partition, but ignore failures.
365 DiscardPartitionTail(target_fd_, install_part.target_size);
366
367 return true;
368 }
369
OpenCurrentECCPartition()370 bool DeltaPerformer::OpenCurrentECCPartition() {
371 if (source_ecc_fd_)
372 return true;
373
374 if (source_ecc_open_failure_)
375 return false;
376
377 if (current_partition_ >= partitions_.size())
378 return false;
379
380 // No support for ECC for full payloads.
381 if (payload_->type == InstallPayloadType::kFull)
382 return false;
383
384 #if USE_FEC
385 const PartitionUpdate& partition = partitions_[current_partition_];
386 size_t num_previous_partitions =
387 install_plan_->partitions.size() - partitions_.size();
388 const InstallPlan::Partition& install_part =
389 install_plan_->partitions[num_previous_partitions + current_partition_];
390 string path = install_part.source_path;
391 FileDescriptorPtr fd(new FecFileDescriptor());
392 if (!fd->Open(path.c_str(), O_RDONLY, 0)) {
393 PLOG(ERROR) << "Unable to open ECC source partition "
394 << partition.partition_name() << " on slot "
395 << BootControlInterface::SlotName(install_plan_->source_slot)
396 << ", file " << path;
397 source_ecc_open_failure_ = true;
398 return false;
399 }
400 source_ecc_fd_ = fd;
401 #else
402 // No support for ECC compiled.
403 source_ecc_open_failure_ = true;
404 #endif // USE_FEC
405
406 return !source_ecc_open_failure_;
407 }
408
409 namespace {
410
LogPartitionInfoHash(const PartitionInfo & info,const string & tag)411 void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
412 string sha256 = brillo::data_encoding::Base64Encode(info.hash());
413 LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
414 << " size: " << info.size();
415 }
416
LogPartitionInfo(const vector<PartitionUpdate> & partitions)417 void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
418 for (const PartitionUpdate& partition : partitions) {
419 if (partition.has_old_partition_info()) {
420 LogPartitionInfoHash(partition.old_partition_info(),
421 "old " + partition.partition_name());
422 }
423 LogPartitionInfoHash(partition.new_partition_info(),
424 "new " + partition.partition_name());
425 }
426 }
427
428 } // namespace
429
GetMinorVersion() const430 uint32_t DeltaPerformer::GetMinorVersion() const {
431 if (manifest_.has_minor_version()) {
432 return manifest_.minor_version();
433 }
434 return payload_->type == InstallPayloadType::kDelta
435 ? kMaxSupportedMinorPayloadVersion
436 : kFullPayloadMinorVersion;
437 }
438
IsHeaderParsed() const439 bool DeltaPerformer::IsHeaderParsed() const {
440 return metadata_size_ != 0;
441 }
442
ParsePayloadMetadata(const brillo::Blob & payload,ErrorCode * error)443 MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
444 const brillo::Blob& payload, ErrorCode* error) {
445 *error = ErrorCode::kSuccess;
446
447 if (!IsHeaderParsed()) {
448 MetadataParseResult result =
449 payload_metadata_.ParsePayloadHeader(payload, error);
450 if (result != MetadataParseResult::kSuccess)
451 return result;
452
453 metadata_size_ = payload_metadata_.GetMetadataSize();
454 metadata_signature_size_ = payload_metadata_.GetMetadataSignatureSize();
455 major_payload_version_ = payload_metadata_.GetMajorVersion();
456
457 // If the metadata size is present in install plan, check for it immediately
458 // even before waiting for that many number of bytes to be downloaded in the
459 // payload. This will prevent any attack which relies on us downloading data
460 // beyond the expected metadata size.
461 if (install_plan_->hash_checks_mandatory) {
462 if (payload_->metadata_size != metadata_size_) {
463 LOG(ERROR) << "Mandatory metadata size in Omaha response ("
464 << payload_->metadata_size
465 << ") is missing/incorrect, actual = " << metadata_size_;
466 *error = ErrorCode::kDownloadInvalidMetadataSize;
467 return MetadataParseResult::kError;
468 }
469 }
470
471 // Check that the |metadata signature size_| and |metadata_size_| are not
472 // very big numbers. This is necessary since |update_engine| needs to write
473 // these values into the buffer before being able to use them, and if an
474 // attacker sets these values to a very big number, the buffer will overflow
475 // and |update_engine| will crash. A simple way of solving this is to check
476 // that the size of both values is smaller than the payload itself.
477 if (metadata_size_ + metadata_signature_size_ > payload_->size) {
478 LOG(ERROR) << "The size of the metadata_size(" << metadata_size_ << ")"
479 << " or metadata signature(" << metadata_signature_size_ << ")"
480 << " is greater than the size of the payload"
481 << "(" << payload_->size << ")";
482 *error = ErrorCode::kDownloadInvalidMetadataSize;
483 return MetadataParseResult::kError;
484 }
485 }
486
487 // Now that we have validated the metadata size, we should wait for the full
488 // metadata and its signature (if exist) to be read in before we can parse it.
489 if (payload.size() < metadata_size_ + metadata_signature_size_)
490 return MetadataParseResult::kInsufficientData;
491
492 // Log whether we validated the size or simply trusting what's in the payload
493 // here. This is logged here (after we received the full metadata data) so
494 // that we just log once (instead of logging n times) if it takes n
495 // DeltaPerformer::Write calls to download the full manifest.
496 if (payload_->metadata_size == metadata_size_) {
497 LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
498 } else {
499 // For mandatory-cases, we'd have already returned a kMetadataParseError
500 // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
501 LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
502 << payload_->metadata_size
503 << ") in Omaha response as validation is not mandatory. "
504 << "Trusting metadata size in payload = " << metadata_size_;
505 }
506
507 auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
508 if (!payload_verifier) {
509 LOG(ERROR) << "Failed to create payload verifier.";
510 *error = ErrorCode::kDownloadMetadataSignatureVerificationError;
511 if (perform_verification) {
512 return MetadataParseResult::kError;
513 }
514 } else {
515 // We have the full metadata in |payload|. Verify its integrity
516 // and authenticity based on the information we have in Omaha response.
517 *error = payload_metadata_.ValidateMetadataSignature(
518 payload, payload_->metadata_signature, *payload_verifier);
519 }
520 if (*error != ErrorCode::kSuccess) {
521 if (install_plan_->hash_checks_mandatory) {
522 // The autoupdate_CatchBadSignatures test checks for this string
523 // in log-files. Keep in sync.
524 LOG(ERROR) << "Mandatory metadata signature validation failed";
525 return MetadataParseResult::kError;
526 }
527
528 // For non-mandatory cases, just send a UMA stat.
529 LOG(WARNING) << "Ignoring metadata signature validation failures";
530 *error = ErrorCode::kSuccess;
531 }
532
533 // The payload metadata is deemed valid, it's safe to parse the protobuf.
534 if (!payload_metadata_.GetManifest(payload, &manifest_)) {
535 LOG(ERROR) << "Unable to parse manifest in update file.";
536 *error = ErrorCode::kDownloadManifestParseError;
537 return MetadataParseResult::kError;
538 }
539
540 manifest_parsed_ = true;
541 return MetadataParseResult::kSuccess;
542 }
543
544 #define OP_DURATION_HISTOGRAM(_op_name, _start_time) \
545 LOCAL_HISTOGRAM_CUSTOM_TIMES( \
546 "UpdateEngine.DownloadAction.InstallOperation::" _op_name ".Duration", \
547 (base::TimeTicks::Now() - _start_time), \
548 base::TimeDelta::FromMilliseconds(10), \
549 base::TimeDelta::FromMinutes(5), \
550 20);
551
552 // Wrapper around write. Returns true if all requested bytes
553 // were written, or false on any error, regardless of progress
554 // and stores an action exit code in |error|.
Write(const void * bytes,size_t count,ErrorCode * error)555 bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode* error) {
556 *error = ErrorCode::kSuccess;
557 const char* c_bytes = reinterpret_cast<const char*>(bytes);
558
559 // Update the total byte downloaded count and the progress logs.
560 total_bytes_received_ += count;
561 UpdateOverallProgress(false, "Completed ");
562
563 while (!manifest_valid_) {
564 // Read data up to the needed limit; this is either maximium payload header
565 // size, or the full metadata size (once it becomes known).
566 const bool do_read_header = !IsHeaderParsed();
567 CopyDataToBuffer(
568 &c_bytes,
569 &count,
570 (do_read_header ? kMaxPayloadHeaderSize
571 : metadata_size_ + metadata_signature_size_));
572
573 MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
574 if (result == MetadataParseResult::kError)
575 return false;
576 if (result == MetadataParseResult::kInsufficientData) {
577 // If we just processed the header, make an attempt on the manifest.
578 if (do_read_header && IsHeaderParsed())
579 continue;
580
581 return true;
582 }
583
584 // Checks the integrity of the payload manifest.
585 if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
586 return false;
587 manifest_valid_ = true;
588 if (!install_plan_->is_resume) {
589 prefs_->SetString(kPrefsManifestBytes, {buffer_.begin(), buffer_.end()});
590 }
591
592 // Clear the download buffer.
593 DiscardBuffer(false, metadata_size_);
594
595 block_size_ = manifest_.block_size();
596
597 // This populates |partitions_| and the |install_plan.partitions| with the
598 // list of partitions from the manifest.
599 if (!ParseManifestPartitions(error))
600 return false;
601
602 // |install_plan.partitions| was filled in, nothing need to be done here if
603 // the payload was already applied, returns false to terminate http fetcher,
604 // but keep |error| as ErrorCode::kSuccess.
605 if (payload_->already_applied)
606 return false;
607
608 num_total_operations_ = 0;
609 for (const auto& partition : partitions_) {
610 num_total_operations_ += partition.operations_size();
611 acc_num_operations_.push_back(num_total_operations_);
612 }
613
614 LOG_IF(WARNING,
615 !prefs_->SetInt64(kPrefsManifestMetadataSize, metadata_size_))
616 << "Unable to save the manifest metadata size.";
617 LOG_IF(WARNING,
618 !prefs_->SetInt64(kPrefsManifestSignatureSize,
619 metadata_signature_size_))
620 << "Unable to save the manifest signature size.";
621
622 if (!PrimeUpdateState()) {
623 *error = ErrorCode::kDownloadStateInitializationError;
624 LOG(ERROR) << "Unable to prime the update state.";
625 return false;
626 }
627
628 if (next_operation_num_ < acc_num_operations_[current_partition_]) {
629 if (!OpenCurrentPartition()) {
630 *error = ErrorCode::kInstallDeviceOpenError;
631 return false;
632 }
633 }
634
635 if (next_operation_num_ > 0)
636 UpdateOverallProgress(true, "Resuming after ");
637 LOG(INFO) << "Starting to apply update payload operations";
638 }
639
640 while (next_operation_num_ < num_total_operations_) {
641 // Check if we should cancel the current attempt for any reason.
642 // In this case, *error will have already been populated with the reason
643 // why we're canceling.
644 if (download_delegate_ && download_delegate_->ShouldCancel(error))
645 return false;
646
647 // We know there are more operations to perform because we didn't reach the
648 // |num_total_operations_| limit yet.
649 if (next_operation_num_ >= acc_num_operations_[current_partition_]) {
650 CloseCurrentPartition();
651 // Skip until there are operations for current_partition_.
652 while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
653 current_partition_++;
654 }
655 if (!OpenCurrentPartition()) {
656 *error = ErrorCode::kInstallDeviceOpenError;
657 return false;
658 }
659 }
660 const size_t partition_operation_num =
661 next_operation_num_ -
662 (current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
663
664 const InstallOperation& op =
665 partitions_[current_partition_].operations(partition_operation_num);
666
667 CopyDataToBuffer(&c_bytes, &count, op.data_length());
668
669 // Check whether we received all of the next operation's data payload.
670 if (!CanPerformInstallOperation(op))
671 return true;
672
673 // Validate the operation only if the metadata signature is present.
674 // Otherwise, keep the old behavior. This serves as a knob to disable
675 // the validation logic in case we find some regression after rollout.
676 // NOTE: If hash checks are mandatory and if metadata_signature is empty,
677 // we would have already failed in ParsePayloadMetadata method and thus not
678 // even be here. So no need to handle that case again here.
679 if (!payload_->metadata_signature.empty()) {
680 // Note: Validate must be called only if CanPerformInstallOperation is
681 // called. Otherwise, we might be failing operations before even if there
682 // isn't sufficient data to compute the proper hash.
683 *error = ValidateOperationHash(op);
684 if (*error != ErrorCode::kSuccess) {
685 if (install_plan_->hash_checks_mandatory) {
686 LOG(ERROR) << "Mandatory operation hash check failed";
687 return false;
688 }
689
690 // For non-mandatory cases, just send a UMA stat.
691 LOG(WARNING) << "Ignoring operation validation errors";
692 *error = ErrorCode::kSuccess;
693 }
694 }
695
696 // Makes sure we unblock exit when this operation completes.
697 ScopedTerminatorExitUnblocker exit_unblocker =
698 ScopedTerminatorExitUnblocker(); // Avoids a compiler unused var bug.
699
700 base::TimeTicks op_start_time = base::TimeTicks::Now();
701
702 bool op_result;
703 switch (op.type()) {
704 case InstallOperation::REPLACE:
705 case InstallOperation::REPLACE_BZ:
706 case InstallOperation::REPLACE_XZ:
707 op_result = PerformReplaceOperation(op);
708 OP_DURATION_HISTOGRAM("REPLACE", op_start_time);
709 break;
710 case InstallOperation::ZERO:
711 case InstallOperation::DISCARD:
712 op_result = PerformZeroOrDiscardOperation(op);
713 OP_DURATION_HISTOGRAM("ZERO_OR_DISCARD", op_start_time);
714 break;
715 case InstallOperation::SOURCE_COPY:
716 op_result = PerformSourceCopyOperation(op, error);
717 OP_DURATION_HISTOGRAM("SOURCE_COPY", op_start_time);
718 break;
719 case InstallOperation::SOURCE_BSDIFF:
720 case InstallOperation::BROTLI_BSDIFF:
721 op_result = PerformSourceBsdiffOperation(op, error);
722 OP_DURATION_HISTOGRAM("SOURCE_BSDIFF", op_start_time);
723 break;
724 case InstallOperation::PUFFDIFF:
725 op_result = PerformPuffDiffOperation(op, error);
726 OP_DURATION_HISTOGRAM("PUFFDIFF", op_start_time);
727 break;
728 default:
729 op_result = false;
730 }
731 if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error))
732 return false;
733
734 if (!target_fd_->Flush()) {
735 return false;
736 }
737
738 next_operation_num_++;
739 UpdateOverallProgress(false, "Completed ");
740 CheckpointUpdateProgress(false);
741 }
742
743 // In major version 2, we don't add unused operation to the payload.
744 // If we already extracted the signature we should skip this step.
745 if (manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
746 signatures_message_data_.empty()) {
747 if (manifest_.signatures_offset() != buffer_offset_) {
748 LOG(ERROR) << "Payload signatures offset points to blob offset "
749 << manifest_.signatures_offset()
750 << " but signatures are expected at offset " << buffer_offset_;
751 *error = ErrorCode::kDownloadPayloadVerificationError;
752 return false;
753 }
754 CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
755 // Needs more data to cover entire signature.
756 if (buffer_.size() < manifest_.signatures_size())
757 return true;
758 if (!ExtractSignatureMessage()) {
759 LOG(ERROR) << "Extract payload signature failed.";
760 *error = ErrorCode::kDownloadPayloadVerificationError;
761 return false;
762 }
763 DiscardBuffer(true, 0);
764 // Since we extracted the SignatureMessage we need to advance the
765 // checkpoint, otherwise we would reload the signature and try to extract
766 // it again.
767 // This is the last checkpoint for an update, force this checkpoint to be
768 // saved.
769 CheckpointUpdateProgress(true);
770 }
771
772 return true;
773 }
774
IsManifestValid()775 bool DeltaPerformer::IsManifestValid() {
776 return manifest_valid_;
777 }
778
ParseManifestPartitions(ErrorCode * error)779 bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
780 partitions_.clear();
781 for (const PartitionUpdate& partition : manifest_.partitions()) {
782 partitions_.push_back(partition);
783 }
784
785 // For VAB and partial updates, the partition preparation will copy the
786 // dynamic partitions metadata to the target metadata slot, and rename the
787 // slot suffix of the partitions in the metadata.
788 if (install_plan_->target_slot != BootControlInterface::kInvalidSlot) {
789 uint64_t required_size = 0;
790 if (!PreparePartitionsForUpdate(&required_size)) {
791 if (required_size > 0) {
792 *error = ErrorCode::kNotEnoughSpace;
793 } else {
794 *error = ErrorCode::kInstallDeviceOpenError;
795 }
796 return false;
797 }
798 }
799
800 // Partitions in manifest are no longer needed after preparing partitions.
801 manifest_.clear_partitions();
802 // TODO(xunchang) TBD: allow partial update only on devices with dynamic
803 // partition.
804 if (manifest_.partial_update()) {
805 std::set<std::string> touched_partitions;
806 for (const auto& partition_update : partitions_) {
807 touched_partitions.insert(partition_update.partition_name());
808 }
809
810 auto generator = partition_update_generator::Create(boot_control_,
811 manifest_.block_size());
812 std::vector<PartitionUpdate> untouched_static_partitions;
813 TEST_AND_RETURN_FALSE(
814 generator->GenerateOperationsForPartitionsNotInPayload(
815 install_plan_->source_slot,
816 install_plan_->target_slot,
817 touched_partitions,
818 &untouched_static_partitions));
819 partitions_.insert(partitions_.end(),
820 untouched_static_partitions.begin(),
821 untouched_static_partitions.end());
822
823 // Save the untouched dynamic partitions in install plan.
824 std::vector<std::string> dynamic_partitions;
825 if (!boot_control_->GetDynamicPartitionControl()
826 ->ListDynamicPartitionsForSlot(install_plan_->source_slot,
827 &dynamic_partitions)) {
828 LOG(ERROR) << "Failed to load dynamic partitions from slot "
829 << install_plan_->source_slot;
830 return false;
831 }
832 install_plan_->untouched_dynamic_partitions.clear();
833 for (const auto& name : dynamic_partitions) {
834 if (touched_partitions.find(name) == touched_partitions.end()) {
835 install_plan_->untouched_dynamic_partitions.push_back(name);
836 }
837 }
838 }
839
840 // Fill in the InstallPlan::partitions based on the partitions from the
841 // payload.
842 for (const auto& partition : partitions_) {
843 InstallPlan::Partition install_part;
844 install_part.name = partition.partition_name();
845 install_part.run_postinstall =
846 partition.has_run_postinstall() && partition.run_postinstall();
847 if (install_part.run_postinstall) {
848 install_part.postinstall_path =
849 (partition.has_postinstall_path() ? partition.postinstall_path()
850 : kPostinstallDefaultScript);
851 install_part.filesystem_type = partition.filesystem_type();
852 install_part.postinstall_optional = partition.postinstall_optional();
853 }
854
855 if (partition.has_old_partition_info()) {
856 const PartitionInfo& info = partition.old_partition_info();
857 install_part.source_size = info.size();
858 install_part.source_hash.assign(info.hash().begin(), info.hash().end());
859 }
860
861 if (!partition.has_new_partition_info()) {
862 LOG(ERROR) << "Unable to get new partition hash info on partition "
863 << install_part.name << ".";
864 *error = ErrorCode::kDownloadNewPartitionInfoError;
865 return false;
866 }
867 const PartitionInfo& info = partition.new_partition_info();
868 install_part.target_size = info.size();
869 install_part.target_hash.assign(info.hash().begin(), info.hash().end());
870
871 install_part.block_size = block_size_;
872 if (partition.has_hash_tree_extent()) {
873 Extent extent = partition.hash_tree_data_extent();
874 install_part.hash_tree_data_offset = extent.start_block() * block_size_;
875 install_part.hash_tree_data_size = extent.num_blocks() * block_size_;
876 extent = partition.hash_tree_extent();
877 install_part.hash_tree_offset = extent.start_block() * block_size_;
878 install_part.hash_tree_size = extent.num_blocks() * block_size_;
879 uint64_t hash_tree_data_end =
880 install_part.hash_tree_data_offset + install_part.hash_tree_data_size;
881 if (install_part.hash_tree_offset < hash_tree_data_end) {
882 LOG(ERROR) << "Invalid hash tree extents, hash tree data ends at "
883 << hash_tree_data_end << ", but hash tree starts at "
884 << install_part.hash_tree_offset;
885 *error = ErrorCode::kDownloadNewPartitionInfoError;
886 return false;
887 }
888 install_part.hash_tree_algorithm = partition.hash_tree_algorithm();
889 install_part.hash_tree_salt.assign(partition.hash_tree_salt().begin(),
890 partition.hash_tree_salt().end());
891 }
892 if (partition.has_fec_extent()) {
893 Extent extent = partition.fec_data_extent();
894 install_part.fec_data_offset = extent.start_block() * block_size_;
895 install_part.fec_data_size = extent.num_blocks() * block_size_;
896 extent = partition.fec_extent();
897 install_part.fec_offset = extent.start_block() * block_size_;
898 install_part.fec_size = extent.num_blocks() * block_size_;
899 uint64_t fec_data_end =
900 install_part.fec_data_offset + install_part.fec_data_size;
901 if (install_part.fec_offset < fec_data_end) {
902 LOG(ERROR) << "Invalid fec extents, fec data ends at " << fec_data_end
903 << ", but fec starts at " << install_part.fec_offset;
904 *error = ErrorCode::kDownloadNewPartitionInfoError;
905 return false;
906 }
907 install_part.fec_roots = partition.fec_roots();
908 }
909
910 install_plan_->partitions.push_back(install_part);
911 }
912
913 // TODO(xunchang) only need to load the partitions for those in payload.
914 // Because we have already loaded the other once when generating SOURCE_COPY
915 // operations.
916 if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
917 LOG(ERROR) << "Unable to determine all the partition devices.";
918 *error = ErrorCode::kInstallDeviceOpenError;
919 return false;
920 }
921 LogPartitionInfo(partitions_);
922 return true;
923 }
924
PreparePartitionsForUpdate(uint64_t * required_size)925 bool DeltaPerformer::PreparePartitionsForUpdate(uint64_t* required_size) {
926 // Call static PreparePartitionsForUpdate with hash from
927 // kPrefsUpdateCheckResponseHash to ensure hash of payload that space is
928 // preallocated for is the same as the hash of payload being applied.
929 string update_check_response_hash;
930 ignore_result(prefs_->GetString(kPrefsUpdateCheckResponseHash,
931 &update_check_response_hash));
932 return PreparePartitionsForUpdate(prefs_,
933 boot_control_,
934 install_plan_->target_slot,
935 manifest_,
936 update_check_response_hash,
937 required_size);
938 }
939
PreparePartitionsForUpdate(PrefsInterface * prefs,BootControlInterface * boot_control,BootControlInterface::Slot target_slot,const DeltaArchiveManifest & manifest,const std::string & update_check_response_hash,uint64_t * required_size)940 bool DeltaPerformer::PreparePartitionsForUpdate(
941 PrefsInterface* prefs,
942 BootControlInterface* boot_control,
943 BootControlInterface::Slot target_slot,
944 const DeltaArchiveManifest& manifest,
945 const std::string& update_check_response_hash,
946 uint64_t* required_size) {
947 string last_hash;
948 ignore_result(
949 prefs->GetString(kPrefsDynamicPartitionMetadataUpdated, &last_hash));
950
951 bool is_resume = !update_check_response_hash.empty() &&
952 last_hash == update_check_response_hash;
953
954 if (is_resume) {
955 LOG(INFO) << "Using previously prepared partitions for update. hash = "
956 << last_hash;
957 } else {
958 LOG(INFO) << "Preparing partitions for new update. last hash = "
959 << last_hash << ", new hash = " << update_check_response_hash;
960 }
961
962 if (!boot_control->GetDynamicPartitionControl()->PreparePartitionsForUpdate(
963 boot_control->GetCurrentSlot(),
964 target_slot,
965 manifest,
966 !is_resume /* should update */,
967 required_size)) {
968 LOG(ERROR) << "Unable to initialize partition metadata for slot "
969 << BootControlInterface::SlotName(target_slot);
970 return false;
971 }
972
973 TEST_AND_RETURN_FALSE(prefs->SetString(kPrefsDynamicPartitionMetadataUpdated,
974 update_check_response_hash));
975 LOG(INFO) << "PreparePartitionsForUpdate done.";
976
977 return true;
978 }
979
CanPerformInstallOperation(const chromeos_update_engine::InstallOperation & operation)980 bool DeltaPerformer::CanPerformInstallOperation(
981 const chromeos_update_engine::InstallOperation& operation) {
982 // If we don't have a data blob we can apply it right away.
983 if (!operation.has_data_offset() && !operation.has_data_length())
984 return true;
985
986 // See if we have the entire data blob in the buffer
987 if (operation.data_offset() < buffer_offset_) {
988 LOG(ERROR) << "we threw away data it seems?";
989 return false;
990 }
991
992 return (operation.data_offset() + operation.data_length() <=
993 buffer_offset_ + buffer_.size());
994 }
995
PerformReplaceOperation(const InstallOperation & operation)996 bool DeltaPerformer::PerformReplaceOperation(
997 const InstallOperation& operation) {
998 CHECK(operation.type() == InstallOperation::REPLACE ||
999 operation.type() == InstallOperation::REPLACE_BZ ||
1000 operation.type() == InstallOperation::REPLACE_XZ);
1001
1002 // Since we delete data off the beginning of the buffer as we use it,
1003 // the data we need should be exactly at the beginning of the buffer.
1004 TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1005 TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1006
1007 // Setup the ExtentWriter stack based on the operation type.
1008 std::unique_ptr<ExtentWriter> writer = std::make_unique<DirectExtentWriter>();
1009
1010 if (operation.type() == InstallOperation::REPLACE_BZ) {
1011 writer.reset(new BzipExtentWriter(std::move(writer)));
1012 } else if (operation.type() == InstallOperation::REPLACE_XZ) {
1013 writer.reset(new XzExtentWriter(std::move(writer)));
1014 }
1015
1016 TEST_AND_RETURN_FALSE(
1017 writer->Init(target_fd_, operation.dst_extents(), block_size_));
1018 TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
1019
1020 // Update buffer
1021 DiscardBuffer(true, buffer_.size());
1022 return true;
1023 }
1024
PerformZeroOrDiscardOperation(const InstallOperation & operation)1025 bool DeltaPerformer::PerformZeroOrDiscardOperation(
1026 const InstallOperation& operation) {
1027 CHECK(operation.type() == InstallOperation::DISCARD ||
1028 operation.type() == InstallOperation::ZERO);
1029
1030 // These operations have no blob.
1031 TEST_AND_RETURN_FALSE(!operation.has_data_offset());
1032 TEST_AND_RETURN_FALSE(!operation.has_data_length());
1033
1034 #ifdef BLKZEROOUT
1035 bool attempt_ioctl = true;
1036 int request =
1037 (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
1038 #else // !defined(BLKZEROOUT)
1039 bool attempt_ioctl = false;
1040 int request = 0;
1041 #endif // !defined(BLKZEROOUT)
1042
1043 brillo::Blob zeros;
1044 for (const Extent& extent : operation.dst_extents()) {
1045 const uint64_t start = extent.start_block() * block_size_;
1046 const uint64_t length = extent.num_blocks() * block_size_;
1047 if (attempt_ioctl) {
1048 int result = 0;
1049 if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
1050 continue;
1051 attempt_ioctl = false;
1052 }
1053 // In case of failure, we fall back to writing 0 to the selected region.
1054 zeros.resize(16 * block_size_);
1055 for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
1056 uint64_t chunk_length =
1057 min(length - offset, static_cast<uint64_t>(zeros.size()));
1058 TEST_AND_RETURN_FALSE(utils::PWriteAll(
1059 target_fd_, zeros.data(), chunk_length, start + offset));
1060 }
1061 }
1062 return true;
1063 }
1064
ValidateSourceHash(const brillo::Blob & calculated_hash,const InstallOperation & operation,const FileDescriptorPtr source_fd,ErrorCode * error)1065 bool DeltaPerformer::ValidateSourceHash(const brillo::Blob& calculated_hash,
1066 const InstallOperation& operation,
1067 const FileDescriptorPtr source_fd,
1068 ErrorCode* error) {
1069 brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
1070 operation.src_sha256_hash().end());
1071 if (calculated_hash != expected_source_hash) {
1072 LOG(ERROR) << "The hash of the source data on disk for this operation "
1073 << "doesn't match the expected value. This could mean that the "
1074 << "delta update payload was targeted for another version, or "
1075 << "that the source partition was modified after it was "
1076 << "installed, for example, by mounting a filesystem.";
1077 LOG(ERROR) << "Expected: sha256|hex = "
1078 << base::HexEncode(expected_source_hash.data(),
1079 expected_source_hash.size());
1080 LOG(ERROR) << "Calculated: sha256|hex = "
1081 << base::HexEncode(calculated_hash.data(),
1082 calculated_hash.size());
1083
1084 vector<string> source_extents;
1085 for (const Extent& ext : operation.src_extents()) {
1086 source_extents.push_back(
1087 base::StringPrintf("%" PRIu64 ":%" PRIu64,
1088 static_cast<uint64_t>(ext.start_block()),
1089 static_cast<uint64_t>(ext.num_blocks())));
1090 }
1091 LOG(ERROR) << "Operation source (offset:size) in blocks: "
1092 << base::JoinString(source_extents, ",");
1093
1094 // Log remount history if this device is an ext4 partition.
1095 LogMountHistory(source_fd);
1096
1097 *error = ErrorCode::kDownloadStateInitializationError;
1098 return false;
1099 }
1100 return true;
1101 }
1102
PerformSourceCopyOperation(const InstallOperation & operation,ErrorCode * error)1103 bool DeltaPerformer::PerformSourceCopyOperation(
1104 const InstallOperation& operation, ErrorCode* error) {
1105 if (operation.has_src_length())
1106 TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1107 if (operation.has_dst_length())
1108 TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1109
1110 TEST_AND_RETURN_FALSE(source_fd_ != nullptr);
1111
1112 // The device may optimize the SOURCE_COPY operation.
1113 // Being this a device-specific optimization let DynamicPartitionController
1114 // decide it the operation should be skipped.
1115 const PartitionUpdate& partition = partitions_[current_partition_];
1116 const auto& partition_control = boot_control_->GetDynamicPartitionControl();
1117
1118 InstallOperation buf;
1119 bool should_optimize = partition_control->OptimizeOperation(
1120 partition.partition_name(), operation, &buf);
1121 const InstallOperation& optimized = should_optimize ? buf : operation;
1122
1123 if (operation.has_src_sha256_hash()) {
1124 bool read_ok;
1125 brillo::Blob source_hash;
1126 brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
1127 operation.src_sha256_hash().end());
1128
1129 // We fall back to use the error corrected device if the hash of the raw
1130 // device doesn't match or there was an error reading the source partition.
1131 // Note that this code will also fall back if writing the target partition
1132 // fails.
1133 if (should_optimize) {
1134 // Hash operation.src_extents(), then copy optimized.src_extents to
1135 // optimized.dst_extents.
1136 read_ok =
1137 fd_utils::ReadAndHashExtents(
1138 source_fd_, operation.src_extents(), block_size_, &source_hash) &&
1139 fd_utils::CopyAndHashExtents(source_fd_,
1140 optimized.src_extents(),
1141 target_fd_,
1142 optimized.dst_extents(),
1143 block_size_,
1144 nullptr /* skip hashing */);
1145 } else {
1146 read_ok = fd_utils::CopyAndHashExtents(source_fd_,
1147 operation.src_extents(),
1148 target_fd_,
1149 operation.dst_extents(),
1150 block_size_,
1151 &source_hash);
1152 }
1153 if (read_ok && expected_source_hash == source_hash)
1154 return true;
1155
1156 if (!OpenCurrentECCPartition()) {
1157 // The following function call will return false since the source hash
1158 // mismatches, but we still want to call it so it prints the appropriate
1159 // log message.
1160 return ValidateSourceHash(source_hash, operation, source_fd_, error);
1161 }
1162
1163 LOG(WARNING) << "Source hash from RAW device mismatched: found "
1164 << base::HexEncode(source_hash.data(), source_hash.size())
1165 << ", expected "
1166 << base::HexEncode(expected_source_hash.data(),
1167 expected_source_hash.size());
1168
1169 if (should_optimize) {
1170 TEST_AND_RETURN_FALSE(fd_utils::ReadAndHashExtents(
1171 source_ecc_fd_, operation.src_extents(), block_size_, &source_hash));
1172 TEST_AND_RETURN_FALSE(
1173 fd_utils::CopyAndHashExtents(source_ecc_fd_,
1174 optimized.src_extents(),
1175 target_fd_,
1176 optimized.dst_extents(),
1177 block_size_,
1178 nullptr /* skip hashing */));
1179 } else {
1180 TEST_AND_RETURN_FALSE(
1181 fd_utils::CopyAndHashExtents(source_ecc_fd_,
1182 operation.src_extents(),
1183 target_fd_,
1184 operation.dst_extents(),
1185 block_size_,
1186 &source_hash));
1187 }
1188 TEST_AND_RETURN_FALSE(
1189 ValidateSourceHash(source_hash, operation, source_ecc_fd_, error));
1190 // At this point reading from the the error corrected device worked, but
1191 // reading from the raw device failed, so this is considered a recovered
1192 // failure.
1193 source_ecc_recovered_failures_++;
1194 } else {
1195 // When the operation doesn't include a source hash, we attempt the error
1196 // corrected device first since we can't verify the block in the raw device
1197 // at this point, but we fall back to the raw device since the error
1198 // corrected device can be shorter or not available.
1199
1200 if (OpenCurrentECCPartition() &&
1201 fd_utils::CopyAndHashExtents(source_ecc_fd_,
1202 optimized.src_extents(),
1203 target_fd_,
1204 optimized.dst_extents(),
1205 block_size_,
1206 nullptr)) {
1207 return true;
1208 }
1209 TEST_AND_RETURN_FALSE(fd_utils::CopyAndHashExtents(source_fd_,
1210 optimized.src_extents(),
1211 target_fd_,
1212 optimized.dst_extents(),
1213 block_size_,
1214 nullptr));
1215 }
1216 return true;
1217 }
1218
ChooseSourceFD(const InstallOperation & operation,ErrorCode * error)1219 FileDescriptorPtr DeltaPerformer::ChooseSourceFD(
1220 const InstallOperation& operation, ErrorCode* error) {
1221 if (source_fd_ == nullptr) {
1222 LOG(ERROR) << "ChooseSourceFD fail: source_fd_ == nullptr";
1223 return nullptr;
1224 }
1225
1226 if (!operation.has_src_sha256_hash()) {
1227 // When the operation doesn't include a source hash, we attempt the error
1228 // corrected device first since we can't verify the block in the raw device
1229 // at this point, but we first need to make sure all extents are readable
1230 // since the error corrected device can be shorter or not available.
1231 if (OpenCurrentECCPartition() &&
1232 fd_utils::ReadAndHashExtents(
1233 source_ecc_fd_, operation.src_extents(), block_size_, nullptr)) {
1234 return source_ecc_fd_;
1235 }
1236 return source_fd_;
1237 }
1238
1239 brillo::Blob source_hash;
1240 brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
1241 operation.src_sha256_hash().end());
1242 if (fd_utils::ReadAndHashExtents(
1243 source_fd_, operation.src_extents(), block_size_, &source_hash) &&
1244 source_hash == expected_source_hash) {
1245 return source_fd_;
1246 }
1247 // We fall back to use the error corrected device if the hash of the raw
1248 // device doesn't match or there was an error reading the source partition.
1249 if (!OpenCurrentECCPartition()) {
1250 // The following function call will return false since the source hash
1251 // mismatches, but we still want to call it so it prints the appropriate
1252 // log message.
1253 ValidateSourceHash(source_hash, operation, source_fd_, error);
1254 return nullptr;
1255 }
1256 LOG(WARNING) << "Source hash from RAW device mismatched: found "
1257 << base::HexEncode(source_hash.data(), source_hash.size())
1258 << ", expected "
1259 << base::HexEncode(expected_source_hash.data(),
1260 expected_source_hash.size());
1261
1262 if (fd_utils::ReadAndHashExtents(
1263 source_ecc_fd_, operation.src_extents(), block_size_, &source_hash) &&
1264 ValidateSourceHash(source_hash, operation, source_ecc_fd_, error)) {
1265 // At this point reading from the the error corrected device worked, but
1266 // reading from the raw device failed, so this is considered a recovered
1267 // failure.
1268 source_ecc_recovered_failures_++;
1269 return source_ecc_fd_;
1270 }
1271 return nullptr;
1272 }
1273
ExtentsToBsdiffPositionsString(const RepeatedPtrField<Extent> & extents,uint64_t block_size,uint64_t full_length,string * positions_string)1274 bool DeltaPerformer::ExtentsToBsdiffPositionsString(
1275 const RepeatedPtrField<Extent>& extents,
1276 uint64_t block_size,
1277 uint64_t full_length,
1278 string* positions_string) {
1279 string ret;
1280 uint64_t length = 0;
1281 for (const Extent& extent : extents) {
1282 int64_t start = extent.start_block() * block_size;
1283 uint64_t this_length =
1284 min(full_length - length,
1285 static_cast<uint64_t>(extent.num_blocks()) * block_size);
1286 ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length);
1287 length += this_length;
1288 }
1289 TEST_AND_RETURN_FALSE(length == full_length);
1290 if (!ret.empty())
1291 ret.resize(ret.size() - 1); // Strip trailing comma off
1292 *positions_string = ret;
1293 return true;
1294 }
1295
1296 namespace {
1297
1298 class BsdiffExtentFile : public bsdiff::FileInterface {
1299 public:
BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,size_t size)1300 BsdiffExtentFile(std::unique_ptr<ExtentReader> reader, size_t size)
1301 : BsdiffExtentFile(std::move(reader), nullptr, size) {}
BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer,size_t size)1302 BsdiffExtentFile(std::unique_ptr<ExtentWriter> writer, size_t size)
1303 : BsdiffExtentFile(nullptr, std::move(writer), size) {}
1304
1305 ~BsdiffExtentFile() override = default;
1306
Read(void * buf,size_t count,size_t * bytes_read)1307 bool Read(void* buf, size_t count, size_t* bytes_read) override {
1308 TEST_AND_RETURN_FALSE(reader_->Read(buf, count));
1309 *bytes_read = count;
1310 offset_ += count;
1311 return true;
1312 }
1313
Write(const void * buf,size_t count,size_t * bytes_written)1314 bool Write(const void* buf, size_t count, size_t* bytes_written) override {
1315 TEST_AND_RETURN_FALSE(writer_->Write(buf, count));
1316 *bytes_written = count;
1317 offset_ += count;
1318 return true;
1319 }
1320
Seek(off_t pos)1321 bool Seek(off_t pos) override {
1322 if (reader_ != nullptr) {
1323 TEST_AND_RETURN_FALSE(reader_->Seek(pos));
1324 offset_ = pos;
1325 } else {
1326 // For writes technically there should be no change of position, or it
1327 // should be equivalent of current offset.
1328 TEST_AND_RETURN_FALSE(offset_ == static_cast<uint64_t>(pos));
1329 }
1330 return true;
1331 }
1332
Close()1333 bool Close() override { return true; }
1334
GetSize(uint64_t * size)1335 bool GetSize(uint64_t* size) override {
1336 *size = size_;
1337 return true;
1338 }
1339
1340 private:
BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,std::unique_ptr<ExtentWriter> writer,size_t size)1341 BsdiffExtentFile(std::unique_ptr<ExtentReader> reader,
1342 std::unique_ptr<ExtentWriter> writer,
1343 size_t size)
1344 : reader_(std::move(reader)),
1345 writer_(std::move(writer)),
1346 size_(size),
1347 offset_(0) {}
1348
1349 std::unique_ptr<ExtentReader> reader_;
1350 std::unique_ptr<ExtentWriter> writer_;
1351 uint64_t size_;
1352 uint64_t offset_;
1353
1354 DISALLOW_COPY_AND_ASSIGN(BsdiffExtentFile);
1355 };
1356
1357 } // namespace
1358
PerformSourceBsdiffOperation(const InstallOperation & operation,ErrorCode * error)1359 bool DeltaPerformer::PerformSourceBsdiffOperation(
1360 const InstallOperation& operation, ErrorCode* error) {
1361 // Since we delete data off the beginning of the buffer as we use it,
1362 // the data we need should be exactly at the beginning of the buffer.
1363 TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1364 TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1365 if (operation.has_src_length())
1366 TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1367 if (operation.has_dst_length())
1368 TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1369
1370 FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
1371 TEST_AND_RETURN_FALSE(source_fd != nullptr);
1372
1373 auto reader = std::make_unique<DirectExtentReader>();
1374 TEST_AND_RETURN_FALSE(
1375 reader->Init(source_fd, operation.src_extents(), block_size_));
1376 auto src_file = std::make_unique<BsdiffExtentFile>(
1377 std::move(reader),
1378 utils::BlocksInExtents(operation.src_extents()) * block_size_);
1379
1380 auto writer = std::make_unique<DirectExtentWriter>();
1381 TEST_AND_RETURN_FALSE(
1382 writer->Init(target_fd_, operation.dst_extents(), block_size_));
1383 auto dst_file = std::make_unique<BsdiffExtentFile>(
1384 std::move(writer),
1385 utils::BlocksInExtents(operation.dst_extents()) * block_size_);
1386
1387 TEST_AND_RETURN_FALSE(bsdiff::bspatch(std::move(src_file),
1388 std::move(dst_file),
1389 buffer_.data(),
1390 buffer_.size()) == 0);
1391 DiscardBuffer(true, buffer_.size());
1392 return true;
1393 }
1394
1395 namespace {
1396
1397 // A class to be passed to |puffpatch| for reading from |source_fd_| and writing
1398 // into |target_fd_|.
1399 class PuffinExtentStream : public puffin::StreamInterface {
1400 public:
1401 // Constructor for creating a stream for reading from an |ExtentReader|.
PuffinExtentStream(std::unique_ptr<ExtentReader> reader,uint64_t size)1402 PuffinExtentStream(std::unique_ptr<ExtentReader> reader, uint64_t size)
1403 : PuffinExtentStream(std::move(reader), nullptr, size) {}
1404
1405 // Constructor for creating a stream for writing to an |ExtentWriter|.
PuffinExtentStream(std::unique_ptr<ExtentWriter> writer,uint64_t size)1406 PuffinExtentStream(std::unique_ptr<ExtentWriter> writer, uint64_t size)
1407 : PuffinExtentStream(nullptr, std::move(writer), size) {}
1408
1409 ~PuffinExtentStream() override = default;
1410
GetSize(uint64_t * size) const1411 bool GetSize(uint64_t* size) const override {
1412 *size = size_;
1413 return true;
1414 }
1415
GetOffset(uint64_t * offset) const1416 bool GetOffset(uint64_t* offset) const override {
1417 *offset = offset_;
1418 return true;
1419 }
1420
Seek(uint64_t offset)1421 bool Seek(uint64_t offset) override {
1422 if (is_read_) {
1423 TEST_AND_RETURN_FALSE(reader_->Seek(offset));
1424 offset_ = offset;
1425 } else {
1426 // For writes technically there should be no change of position, or it
1427 // should equivalent of current offset.
1428 TEST_AND_RETURN_FALSE(offset_ == offset);
1429 }
1430 return true;
1431 }
1432
Read(void * buffer,size_t count)1433 bool Read(void* buffer, size_t count) override {
1434 TEST_AND_RETURN_FALSE(is_read_);
1435 TEST_AND_RETURN_FALSE(reader_->Read(buffer, count));
1436 offset_ += count;
1437 return true;
1438 }
1439
Write(const void * buffer,size_t count)1440 bool Write(const void* buffer, size_t count) override {
1441 TEST_AND_RETURN_FALSE(!is_read_);
1442 TEST_AND_RETURN_FALSE(writer_->Write(buffer, count));
1443 offset_ += count;
1444 return true;
1445 }
1446
Close()1447 bool Close() override { return true; }
1448
1449 private:
PuffinExtentStream(std::unique_ptr<ExtentReader> reader,std::unique_ptr<ExtentWriter> writer,uint64_t size)1450 PuffinExtentStream(std::unique_ptr<ExtentReader> reader,
1451 std::unique_ptr<ExtentWriter> writer,
1452 uint64_t size)
1453 : reader_(std::move(reader)),
1454 writer_(std::move(writer)),
1455 size_(size),
1456 offset_(0),
1457 is_read_(reader_ ? true : false) {}
1458
1459 std::unique_ptr<ExtentReader> reader_;
1460 std::unique_ptr<ExtentWriter> writer_;
1461 uint64_t size_;
1462 uint64_t offset_;
1463 bool is_read_;
1464
1465 DISALLOW_COPY_AND_ASSIGN(PuffinExtentStream);
1466 };
1467
1468 } // namespace
1469
PerformPuffDiffOperation(const InstallOperation & operation,ErrorCode * error)1470 bool DeltaPerformer::PerformPuffDiffOperation(const InstallOperation& operation,
1471 ErrorCode* error) {
1472 // Since we delete data off the beginning of the buffer as we use it,
1473 // the data we need should be exactly at the beginning of the buffer.
1474 TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1475 TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1476
1477 FileDescriptorPtr source_fd = ChooseSourceFD(operation, error);
1478 TEST_AND_RETURN_FALSE(source_fd != nullptr);
1479
1480 auto reader = std::make_unique<DirectExtentReader>();
1481 TEST_AND_RETURN_FALSE(
1482 reader->Init(source_fd, operation.src_extents(), block_size_));
1483 puffin::UniqueStreamPtr src_stream(new PuffinExtentStream(
1484 std::move(reader),
1485 utils::BlocksInExtents(operation.src_extents()) * block_size_));
1486
1487 auto writer = std::make_unique<DirectExtentWriter>();
1488 TEST_AND_RETURN_FALSE(
1489 writer->Init(target_fd_, operation.dst_extents(), block_size_));
1490 puffin::UniqueStreamPtr dst_stream(new PuffinExtentStream(
1491 std::move(writer),
1492 utils::BlocksInExtents(operation.dst_extents()) * block_size_));
1493
1494 const size_t kMaxCacheSize = 5 * 1024 * 1024; // Total 5MB cache.
1495 TEST_AND_RETURN_FALSE(puffin::PuffPatch(std::move(src_stream),
1496 std::move(dst_stream),
1497 buffer_.data(),
1498 buffer_.size(),
1499 kMaxCacheSize));
1500 DiscardBuffer(true, buffer_.size());
1501 return true;
1502 }
1503
ExtractSignatureMessage()1504 bool DeltaPerformer::ExtractSignatureMessage() {
1505 TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
1506 TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
1507 TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
1508 signatures_message_data_.assign(
1509 buffer_.begin(), buffer_.begin() + manifest_.signatures_size());
1510
1511 // Save the signature blob because if the update is interrupted after the
1512 // download phase we don't go through this path anymore. Some alternatives to
1513 // consider:
1514 //
1515 // 1. On resume, re-download the signature blob from the server and re-verify
1516 // it.
1517 //
1518 // 2. Verify the signature as soon as it's received and don't checkpoint the
1519 // blob and the signed sha-256 context.
1520 LOG_IF(WARNING,
1521 !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1522 signatures_message_data_))
1523 << "Unable to store the signature blob.";
1524
1525 LOG(INFO) << "Extracted signature data of size "
1526 << manifest_.signatures_size() << " at "
1527 << manifest_.signatures_offset();
1528 return true;
1529 }
1530
GetPublicKey(string * out_public_key)1531 bool DeltaPerformer::GetPublicKey(string* out_public_key) {
1532 out_public_key->clear();
1533
1534 if (utils::FileExists(public_key_path_.c_str())) {
1535 LOG(INFO) << "Verifying using public key: " << public_key_path_;
1536 return utils::ReadFile(public_key_path_, out_public_key);
1537 }
1538
1539 // If this is an official build then we are not allowed to use public key from
1540 // Omaha response.
1541 if (!hardware_->IsOfficialBuild() && !install_plan_->public_key_rsa.empty()) {
1542 LOG(INFO) << "Verifying using public key from Omaha response.";
1543 return brillo::data_encoding::Base64Decode(install_plan_->public_key_rsa,
1544 out_public_key);
1545 }
1546 LOG(INFO) << "No public keys found for verification.";
1547 return true;
1548 }
1549
1550 std::pair<std::unique_ptr<PayloadVerifier>, bool>
CreatePayloadVerifier()1551 DeltaPerformer::CreatePayloadVerifier() {
1552 if (utils::FileExists(update_certificates_path_.c_str())) {
1553 LOG(INFO) << "Verifying using certificates: " << update_certificates_path_;
1554 return {
1555 PayloadVerifier::CreateInstanceFromZipPath(update_certificates_path_),
1556 true};
1557 }
1558
1559 string public_key;
1560 if (!GetPublicKey(&public_key)) {
1561 LOG(ERROR) << "Failed to read public key";
1562 return {nullptr, true};
1563 }
1564
1565 // Skips the verification if the public key is empty.
1566 if (public_key.empty()) {
1567 return {nullptr, false};
1568 }
1569 return {PayloadVerifier::CreateInstance(public_key), true};
1570 }
1571
ValidateManifest()1572 ErrorCode DeltaPerformer::ValidateManifest() {
1573 // Perform assorted checks to validation check the manifest, make sure it
1574 // matches data from other sources, and that it is a supported version.
1575 bool has_old_fields = std::any_of(manifest_.partitions().begin(),
1576 manifest_.partitions().end(),
1577 [](const PartitionUpdate& partition) {
1578 return partition.has_old_partition_info();
1579 });
1580
1581 // The presence of an old partition hash is the sole indicator for a delta
1582 // update. Also, always treat the partial update as delta so that we can
1583 // perform the minor version check correctly.
1584 InstallPayloadType actual_payload_type =
1585 (has_old_fields || manifest_.partial_update())
1586 ? InstallPayloadType::kDelta
1587 : InstallPayloadType::kFull;
1588
1589 if (payload_->type == InstallPayloadType::kUnknown) {
1590 LOG(INFO) << "Detected a '"
1591 << InstallPayloadTypeToString(actual_payload_type)
1592 << "' payload.";
1593 payload_->type = actual_payload_type;
1594 } else if (payload_->type != actual_payload_type) {
1595 LOG(ERROR) << "InstallPlan expected a '"
1596 << InstallPayloadTypeToString(payload_->type)
1597 << "' payload but the downloaded manifest contains a '"
1598 << InstallPayloadTypeToString(actual_payload_type)
1599 << "' payload.";
1600 return ErrorCode::kPayloadMismatchedType;
1601 }
1602 // Check that the minor version is compatible.
1603 // TODO(xunchang) increment minor version & add check for partial update
1604 if (actual_payload_type == InstallPayloadType::kFull) {
1605 if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1606 LOG(ERROR) << "Manifest contains minor version "
1607 << manifest_.minor_version()
1608 << ", but all full payloads should have version "
1609 << kFullPayloadMinorVersion << ".";
1610 return ErrorCode::kUnsupportedMinorPayloadVersion;
1611 }
1612 } else {
1613 if (manifest_.minor_version() < kMinSupportedMinorPayloadVersion ||
1614 manifest_.minor_version() > kMaxSupportedMinorPayloadVersion) {
1615 LOG(ERROR) << "Manifest contains minor version "
1616 << manifest_.minor_version()
1617 << " not in the range of supported minor versions ["
1618 << kMinSupportedMinorPayloadVersion << ", "
1619 << kMaxSupportedMinorPayloadVersion << "].";
1620 return ErrorCode::kUnsupportedMinorPayloadVersion;
1621 }
1622 }
1623
1624 if (manifest_.has_old_rootfs_info() || manifest_.has_new_rootfs_info() ||
1625 manifest_.has_old_kernel_info() || manifest_.has_new_kernel_info() ||
1626 manifest_.install_operations_size() != 0 ||
1627 manifest_.kernel_install_operations_size() != 0) {
1628 LOG(ERROR) << "Manifest contains deprecated fields.";
1629 return ErrorCode::kPayloadMismatchedType;
1630 }
1631
1632 if (manifest_.max_timestamp() < hardware_->GetBuildTimestamp()) {
1633 LOG(ERROR) << "The current OS build timestamp ("
1634 << hardware_->GetBuildTimestamp()
1635 << ") is newer than the maximum timestamp in the manifest ("
1636 << manifest_.max_timestamp() << ")";
1637 if (!hardware_->AllowDowngrade()) {
1638 return ErrorCode::kPayloadTimestampError;
1639 }
1640 LOG(INFO) << "The current OS build allows downgrade, continuing to apply"
1641 " the payload with an older timestamp.";
1642 }
1643
1644 // TODO(crbug.com/37661) we should be adding more and more manifest checks,
1645 // such as partition boundaries, etc.
1646
1647 return ErrorCode::kSuccess;
1648 }
1649
ValidateOperationHash(const InstallOperation & operation)1650 ErrorCode DeltaPerformer::ValidateOperationHash(
1651 const InstallOperation& operation) {
1652 if (!operation.data_sha256_hash().size()) {
1653 if (!operation.data_length()) {
1654 // Operations that do not have any data blob won't have any operation hash
1655 // either. So, these operations are always considered validated since the
1656 // metadata that contains all the non-data-blob portions of the operation
1657 // has already been validated. This is true for both HTTP and HTTPS cases.
1658 return ErrorCode::kSuccess;
1659 }
1660
1661 // No hash is present for an operation that has data blobs. This shouldn't
1662 // happen normally for any client that has this code, because the
1663 // corresponding update should have been produced with the operation
1664 // hashes. So if it happens it means either we've turned operation hash
1665 // generation off in DeltaDiffGenerator or it's a regression of some sort.
1666 // One caveat though: The last operation is a unused signature operation
1667 // that doesn't have a hash at the time the manifest is created. So we
1668 // should not complaint about that operation. This operation can be
1669 // recognized by the fact that it's offset is mentioned in the manifest.
1670 if (manifest_.signatures_offset() &&
1671 manifest_.signatures_offset() == operation.data_offset()) {
1672 LOG(INFO) << "Skipping hash verification for signature operation "
1673 << next_operation_num_ + 1;
1674 } else {
1675 if (install_plan_->hash_checks_mandatory) {
1676 LOG(ERROR) << "Missing mandatory operation hash for operation "
1677 << next_operation_num_ + 1;
1678 return ErrorCode::kDownloadOperationHashMissingError;
1679 }
1680
1681 LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1682 << " as there's no operation hash in manifest";
1683 }
1684 return ErrorCode::kSuccess;
1685 }
1686
1687 brillo::Blob expected_op_hash;
1688 expected_op_hash.assign(operation.data_sha256_hash().data(),
1689 (operation.data_sha256_hash().data() +
1690 operation.data_sha256_hash().size()));
1691
1692 brillo::Blob calculated_op_hash;
1693 if (!HashCalculator::RawHashOfBytes(
1694 buffer_.data(), operation.data_length(), &calculated_op_hash)) {
1695 LOG(ERROR) << "Unable to compute actual hash of operation "
1696 << next_operation_num_;
1697 return ErrorCode::kDownloadOperationHashVerificationError;
1698 }
1699
1700 if (calculated_op_hash != expected_op_hash) {
1701 LOG(ERROR) << "Hash verification failed for operation "
1702 << next_operation_num_ << ". Expected hash = ";
1703 utils::HexDumpVector(expected_op_hash);
1704 LOG(ERROR) << "Calculated hash over " << operation.data_length()
1705 << " bytes at offset: " << operation.data_offset() << " = ";
1706 utils::HexDumpVector(calculated_op_hash);
1707 return ErrorCode::kDownloadOperationHashMismatch;
1708 }
1709
1710 return ErrorCode::kSuccess;
1711 }
1712
1713 #define TEST_AND_RETURN_VAL(_retval, _condition) \
1714 do { \
1715 if (!(_condition)) { \
1716 LOG(ERROR) << "VerifyPayload failure: " << #_condition; \
1717 return _retval; \
1718 } \
1719 } while (0);
1720
VerifyPayload(const brillo::Blob & update_check_response_hash,const uint64_t update_check_response_size)1721 ErrorCode DeltaPerformer::VerifyPayload(
1722 const brillo::Blob& update_check_response_hash,
1723 const uint64_t update_check_response_size) {
1724 // Verifies the download size.
1725 if (update_check_response_size !=
1726 metadata_size_ + metadata_signature_size_ + buffer_offset_) {
1727 LOG(ERROR) << "update_check_response_size (" << update_check_response_size
1728 << ") doesn't match metadata_size (" << metadata_size_
1729 << ") + metadata_signature_size (" << metadata_signature_size_
1730 << ") + buffer_offset (" << buffer_offset_ << ").";
1731 return ErrorCode::kPayloadSizeMismatchError;
1732 }
1733
1734 // Verifies the payload hash.
1735 TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1736 !payload_hash_calculator_.raw_hash().empty());
1737 TEST_AND_RETURN_VAL(
1738 ErrorCode::kPayloadHashMismatchError,
1739 payload_hash_calculator_.raw_hash() == update_check_response_hash);
1740
1741 TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1742 !signatures_message_data_.empty());
1743 brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1744 TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1745 hash_data.size() == kSHA256Size);
1746
1747 auto [payload_verifier, perform_verification] = CreatePayloadVerifier();
1748 if (!perform_verification) {
1749 LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1750 return ErrorCode::kSuccess;
1751 }
1752 if (!payload_verifier) {
1753 LOG(ERROR) << "Failed to create the payload verifier.";
1754 return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1755 }
1756 if (!payload_verifier->VerifySignature(signatures_message_data_, hash_data)) {
1757 // The autoupdate_CatchBadSignatures test checks for this string
1758 // in log-files. Keep in sync.
1759 LOG(ERROR) << "Public key verification failed, thus update failed.";
1760 return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1761 }
1762
1763 LOG(INFO) << "Payload hash matches value in payload.";
1764 return ErrorCode::kSuccess;
1765 }
1766
DiscardBuffer(bool do_advance_offset,size_t signed_hash_buffer_size)1767 void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1768 size_t signed_hash_buffer_size) {
1769 // Update the buffer offset.
1770 if (do_advance_offset)
1771 buffer_offset_ += buffer_.size();
1772
1773 // Hash the content.
1774 payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1775 signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1776
1777 // Swap content with an empty vector to ensure that all memory is released.
1778 brillo::Blob().swap(buffer_);
1779 }
1780
CanResumeUpdate(PrefsInterface * prefs,const string & update_check_response_hash)1781 bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1782 const string& update_check_response_hash) {
1783 int64_t next_operation = kUpdateStateOperationInvalid;
1784 if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1785 next_operation != kUpdateStateOperationInvalid && next_operation > 0))
1786 return false;
1787
1788 string interrupted_hash;
1789 if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1790 !interrupted_hash.empty() &&
1791 interrupted_hash == update_check_response_hash))
1792 return false;
1793
1794 int64_t resumed_update_failures;
1795 // Note that storing this value is optional, but if it is there it should not
1796 // be more than the limit.
1797 if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
1798 resumed_update_failures > kMaxResumedUpdateFailures)
1799 return false;
1800
1801 // Validation check the rest.
1802 int64_t next_data_offset = -1;
1803 if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1804 next_data_offset >= 0))
1805 return false;
1806
1807 string sha256_context;
1808 if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1809 !sha256_context.empty()))
1810 return false;
1811
1812 int64_t manifest_metadata_size = 0;
1813 if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1814 manifest_metadata_size > 0))
1815 return false;
1816
1817 int64_t manifest_signature_size = 0;
1818 if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
1819 &manifest_signature_size) &&
1820 manifest_signature_size >= 0))
1821 return false;
1822
1823 return true;
1824 }
1825
ResetUpdateProgress(PrefsInterface * prefs,bool quick,bool skip_dynamic_partititon_metadata_updated)1826 bool DeltaPerformer::ResetUpdateProgress(
1827 PrefsInterface* prefs,
1828 bool quick,
1829 bool skip_dynamic_partititon_metadata_updated) {
1830 TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1831 kUpdateStateOperationInvalid));
1832 if (!quick) {
1833 prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1834 prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1835 prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1836 prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1837 prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1838 prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1839 prefs->SetInt64(kPrefsManifestSignatureSize, -1);
1840 prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1841 prefs->Delete(kPrefsPostInstallSucceeded);
1842 prefs->Delete(kPrefsVerityWritten);
1843
1844 if (!skip_dynamic_partititon_metadata_updated) {
1845 LOG(INFO) << "Resetting recorded hash for prepared partitions.";
1846 prefs->Delete(kPrefsDynamicPartitionMetadataUpdated);
1847 }
1848 }
1849 return true;
1850 }
1851
CheckpointUpdateProgress(bool force)1852 bool DeltaPerformer::CheckpointUpdateProgress(bool force) {
1853 base::TimeTicks curr_time = base::TimeTicks::Now();
1854 if (force || curr_time > update_checkpoint_time_) {
1855 update_checkpoint_time_ = curr_time + update_checkpoint_wait_;
1856 } else {
1857 return false;
1858 }
1859
1860 Terminator::set_exit_blocked(true);
1861 if (last_updated_buffer_offset_ != buffer_offset_) {
1862 // Resets the progress in case we die in the middle of the state update.
1863 ResetUpdateProgress(prefs_, true);
1864 TEST_AND_RETURN_FALSE(prefs_->SetString(
1865 kPrefsUpdateStateSHA256Context, payload_hash_calculator_.GetContext()));
1866 TEST_AND_RETURN_FALSE(
1867 prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1868 signed_hash_calculator_.GetContext()));
1869 TEST_AND_RETURN_FALSE(
1870 prefs_->SetInt64(kPrefsUpdateStateNextDataOffset, buffer_offset_));
1871 last_updated_buffer_offset_ = buffer_offset_;
1872
1873 if (next_operation_num_ < num_total_operations_) {
1874 size_t partition_index = current_partition_;
1875 while (next_operation_num_ >= acc_num_operations_[partition_index])
1876 partition_index++;
1877 const size_t partition_operation_num =
1878 next_operation_num_ -
1879 (partition_index ? acc_num_operations_[partition_index - 1] : 0);
1880 const InstallOperation& op =
1881 partitions_[partition_index].operations(partition_operation_num);
1882 TEST_AND_RETURN_FALSE(
1883 prefs_->SetInt64(kPrefsUpdateStateNextDataLength, op.data_length()));
1884 } else {
1885 TEST_AND_RETURN_FALSE(
1886 prefs_->SetInt64(kPrefsUpdateStateNextDataLength, 0));
1887 }
1888 }
1889 TEST_AND_RETURN_FALSE(
1890 prefs_->SetInt64(kPrefsUpdateStateNextOperation, next_operation_num_));
1891 return true;
1892 }
1893
PrimeUpdateState()1894 bool DeltaPerformer::PrimeUpdateState() {
1895 CHECK(manifest_valid_);
1896
1897 int64_t next_operation = kUpdateStateOperationInvalid;
1898 if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1899 next_operation == kUpdateStateOperationInvalid || next_operation <= 0) {
1900 // Initiating a new update, no more state needs to be initialized.
1901 return true;
1902 }
1903 next_operation_num_ = next_operation;
1904
1905 // Resuming an update -- load the rest of the update state.
1906 int64_t next_data_offset = -1;
1907 TEST_AND_RETURN_FALSE(
1908 prefs_->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1909 next_data_offset >= 0);
1910 buffer_offset_ = next_data_offset;
1911
1912 // The signed hash context and the signature blob may be empty if the
1913 // interrupted update didn't reach the signature.
1914 string signed_hash_context;
1915 if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1916 &signed_hash_context)) {
1917 TEST_AND_RETURN_FALSE(
1918 signed_hash_calculator_.SetContext(signed_hash_context));
1919 }
1920
1921 prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signatures_message_data_);
1922
1923 string hash_context;
1924 TEST_AND_RETURN_FALSE(
1925 prefs_->GetString(kPrefsUpdateStateSHA256Context, &hash_context) &&
1926 payload_hash_calculator_.SetContext(hash_context));
1927
1928 int64_t manifest_metadata_size = 0;
1929 TEST_AND_RETURN_FALSE(
1930 prefs_->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1931 manifest_metadata_size > 0);
1932 metadata_size_ = manifest_metadata_size;
1933
1934 int64_t manifest_signature_size = 0;
1935 TEST_AND_RETURN_FALSE(
1936 prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) &&
1937 manifest_signature_size >= 0);
1938 metadata_signature_size_ = manifest_signature_size;
1939
1940 // Advance the download progress to reflect what doesn't need to be
1941 // re-downloaded.
1942 total_bytes_received_ += buffer_offset_;
1943
1944 // Speculatively count the resume as a failure.
1945 int64_t resumed_update_failures;
1946 if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1947 resumed_update_failures++;
1948 } else {
1949 resumed_update_failures = 1;
1950 }
1951 prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1952 return true;
1953 }
1954
1955 } // namespace chromeos_update_engine
1956