1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::collections::{BTreeMap, BTreeSet};
6 use std::fs::{copy, read_to_string, File};
7 use std::io::{BufRead, BufReader, Read, Write};
8 use std::path::{Path, PathBuf};
9
10 use anyhow::{anyhow, bail, Context, Result};
11 use serde::{Deserialize, Serialize};
12 use sha2::{Digest, Sha256};
13
14 /// JSON serde struct.
15 #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)]
16 pub struct PatchDictSchema {
17 pub metadata: Option<BTreeMap<String, serde_json::Value>>,
18 #[serde(default, skip_serializing_if = "BTreeSet::is_empty")]
19 pub platforms: BTreeSet<String>,
20 pub rel_patch_path: String,
21 pub version_range: Option<VersionRange>,
22 }
23
24 #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
25 pub struct VersionRange {
26 pub from: Option<u64>,
27 pub until: Option<u64>,
28 }
29
30 impl PatchDictSchema {
31 /// Return the first version this patch applies to.
get_from_version(&self) -> Option<u64>32 pub fn get_from_version(&self) -> Option<u64> {
33 self.version_range.and_then(|x| x.from)
34 }
35
36 /// Return the version after the last version this patch
37 /// applies to.
get_until_version(&self) -> Option<u64>38 pub fn get_until_version(&self) -> Option<u64> {
39 self.version_range.and_then(|x| x.until)
40 }
41 }
42
43 /// Struct to keep track of patches and their relative paths.
44 #[derive(Debug, Clone)]
45 pub struct PatchCollection {
46 pub patches: Vec<PatchDictSchema>,
47 pub workdir: PathBuf,
48 pub indent_len: usize,
49 }
50
51 impl PatchCollection {
52 /// Create a `PatchCollection` from a PATCHES.
parse_from_file(json_file: &Path) -> Result<Self>53 pub fn parse_from_file(json_file: &Path) -> Result<Self> {
54 // We can't just use a file reader because we
55 // need to know what the original indent_len is.
56 let contents = read_to_string(json_file)?;
57 Self::parse_from_str(
58 json_file
59 .parent()
60 .ok_or_else(|| anyhow!("failed to get json_file parent"))?
61 .to_path_buf(),
62 &contents,
63 )
64 }
65
66 /// Create a `PatchCollection` from a string literal and a workdir.
parse_from_str(workdir: PathBuf, contents: &str) -> Result<Self>67 pub fn parse_from_str(workdir: PathBuf, contents: &str) -> Result<Self> {
68 Ok(Self {
69 patches: serde_json::from_str(contents).context("parsing from str")?,
70 workdir,
71 indent_len: predict_indent(contents),
72 })
73 }
74
75 /// Copy this collection with patches filtered by given criterion.
filter_patches<F>(&self, mut f: F) -> Self where F: FnMut(&PatchDictSchema) -> bool,76 pub fn filter_patches<F>(&self, mut f: F) -> Self
77 where
78 F: FnMut(&PatchDictSchema) -> bool,
79 {
80 Self {
81 patches: self.patches.iter().filter(|&x| f(x)).cloned().collect(),
82 workdir: self.workdir.clone(),
83 indent_len: self.indent_len,
84 }
85 }
86
87 /// Map over the patches.
map_patches(&self, f: impl FnMut(&PatchDictSchema) -> PatchDictSchema) -> Self88 pub fn map_patches(&self, f: impl FnMut(&PatchDictSchema) -> PatchDictSchema) -> Self {
89 Self {
90 patches: self.patches.iter().map(f).collect(),
91 workdir: self.workdir.clone(),
92 indent_len: self.indent_len,
93 }
94 }
95
96 /// Return true if the collection is tracking any patches.
is_empty(&self) -> bool97 pub fn is_empty(&self) -> bool {
98 self.patches.is_empty()
99 }
100
101 /// Compute the set-set subtraction, returning a new `PatchCollection` which
102 /// keeps the minuend's workdir and indent level.
subtract(&self, subtrahend: &Self) -> Result<Self>103 pub fn subtract(&self, subtrahend: &Self) -> Result<Self> {
104 let mut new_patches = Vec::new();
105 // This is O(n^2) when it could be much faster, but n is always going to be less
106 // than 1k and speed is not important here.
107 for our_patch in &self.patches {
108 let found_in_sub = subtrahend.patches.iter().any(|sub_patch| {
109 let hash1 = subtrahend
110 .hash_from_rel_patch(sub_patch)
111 .expect("getting hash from subtrahend patch");
112 let hash2 = self
113 .hash_from_rel_patch(our_patch)
114 .expect("getting hash from our patch");
115 hash1 == hash2
116 });
117 if !found_in_sub {
118 new_patches.push(our_patch.clone());
119 }
120 }
121 Ok(Self {
122 patches: new_patches,
123 workdir: self.workdir.clone(),
124 indent_len: self.indent_len,
125 })
126 }
127
union(&self, other: &Self) -> Result<Self>128 pub fn union(&self, other: &Self) -> Result<Self> {
129 self.union_helper(
130 other,
131 |p| self.hash_from_rel_patch(p),
132 |p| other.hash_from_rel_patch(p),
133 )
134 }
135
136 /// Vec of every PatchDictSchema with differing
137 /// version ranges but the same rel_patch_paths.
version_range_diffs(&self, other: &Self) -> Vec<(String, Option<VersionRange>)>138 fn version_range_diffs(&self, other: &Self) -> Vec<(String, Option<VersionRange>)> {
139 let other_map: BTreeMap<_, _> = other
140 .patches
141 .iter()
142 .map(|p| (p.rel_patch_path.clone(), p))
143 .collect();
144 self.patches
145 .iter()
146 .filter_map(|ours| match other_map.get(&ours.rel_patch_path) {
147 Some(theirs) => {
148 if ours.get_from_version() != theirs.get_from_version()
149 || ours.get_until_version() != theirs.get_until_version()
150 {
151 Some((ours.rel_patch_path.clone(), ours.version_range))
152 } else {
153 None
154 }
155 }
156 _ => None,
157 })
158 .collect()
159 }
160
161 /// Given a vector of tuples with (rel_patch_path, Option<VersionRange>), replace
162 /// all version ranges in this collection with a matching one in the new_versions parameter.
update_version_ranges(&self, new_versions: &[(String, Option<VersionRange>)]) -> Self163 pub fn update_version_ranges(&self, new_versions: &[(String, Option<VersionRange>)]) -> Self {
164 // new_versions should be really tiny (len() <= 2 for the most part), so
165 // the overhead of O(1) lookups is not worth it.
166 let get_updated_version = |rel_patch_path: &str| -> Option<Option<VersionRange>> {
167 // The first Option indicates whether we are updating it at all.
168 // The second Option indicates we can update it with None.
169 new_versions
170 .iter()
171 .find(|i| i.0 == rel_patch_path)
172 .map(|x| x.1)
173 };
174 let cloned_patches = self
175 .patches
176 .iter()
177 .map(|p| match get_updated_version(&p.rel_patch_path) {
178 Some(version_range) => PatchDictSchema {
179 version_range,
180 ..p.clone()
181 },
182 _ => p.clone(),
183 })
184 .collect();
185 Self {
186 workdir: self.workdir.clone(),
187 indent_len: self.indent_len,
188 patches: cloned_patches,
189 }
190 }
191
union_helper( &self, other: &Self, our_hash_f: impl Fn(&PatchDictSchema) -> Result<String>, their_hash_f: impl Fn(&PatchDictSchema) -> Result<String>, ) -> Result<Self>192 fn union_helper(
193 &self,
194 other: &Self,
195 our_hash_f: impl Fn(&PatchDictSchema) -> Result<String>,
196 their_hash_f: impl Fn(&PatchDictSchema) -> Result<String>,
197 ) -> Result<Self> {
198 // 1. For all our patches:
199 // a. If there exists a matching patch hash from `other`:
200 // i. Create a new patch with merged platform info,
201 // ii. add the new patch to our new collection.
202 // iii. Mark the other patch as "merged"
203 // b. Otherwise, copy our patch to the new collection
204 // 2. For all unmerged patches from the `other`
205 // a. Copy their patch into the new collection
206 let mut combined_patches = Vec::new();
207 let mut other_merged = vec![false; other.patches.len()];
208
209 // 1.
210 for p in &self.patches {
211 let our_hash = our_hash_f(p)?;
212 let mut found = false;
213 // a.
214 for (idx, merged) in other_merged.iter_mut().enumerate() {
215 if !*merged {
216 let other_p = &other.patches[idx];
217 let their_hash = their_hash_f(other_p)?;
218 if our_hash == their_hash {
219 // i.
220 let new_platforms =
221 p.platforms.union(&other_p.platforms).cloned().collect();
222 // ii.
223 combined_patches.push(PatchDictSchema {
224 rel_patch_path: p.rel_patch_path.clone(),
225 platforms: new_platforms,
226 metadata: p.metadata.clone(),
227 version_range: p.version_range,
228 });
229 // iii.
230 *merged = true;
231 found = true;
232 break;
233 }
234 }
235 }
236 // b.
237 if !found {
238 combined_patches.push(p.clone());
239 }
240 }
241 // 2.
242 // Add any remaining, other-only patches.
243 for (idx, merged) in other_merged.iter().enumerate() {
244 if !*merged {
245 combined_patches.push(other.patches[idx].clone());
246 }
247 }
248
249 Ok(Self {
250 workdir: self.workdir.clone(),
251 indent_len: self.indent_len,
252 patches: combined_patches,
253 })
254 }
255
256 /// Copy all patches from this collection into another existing collection, and write that
257 /// to the existing collection's file.
transpose_write(&self, existing_collection: &mut Self) -> Result<()>258 pub fn transpose_write(&self, existing_collection: &mut Self) -> Result<()> {
259 for p in &self.patches {
260 let original_file_path = self.workdir.join(&p.rel_patch_path);
261 let copy_file_path = existing_collection.workdir.join(&p.rel_patch_path);
262 copy_create_parents(&original_file_path, ©_file_path)?;
263 existing_collection.patches.push(p.clone());
264 }
265 existing_collection.write_patches_json("PATCHES.json")
266 }
267
268 /// Write out the patch collection contents to a PATCHES.json file.
write_patches_json(&self, filename: &str) -> Result<()>269 fn write_patches_json(&self, filename: &str) -> Result<()> {
270 let write_path = self.workdir.join(filename);
271 let mut new_patches_file = File::create(&write_path)
272 .with_context(|| format!("writing to {}", write_path.display()))?;
273 new_patches_file.write_all(self.serialize_patches()?.as_bytes())?;
274 Ok(())
275 }
276
serialize_patches(&self) -> Result<String>277 pub fn serialize_patches(&self) -> Result<String> {
278 let indent_str = " ".repeat(self.indent_len);
279 let mut serialization_buffer = Vec::<u8>::new();
280 // Four spaces to indent json serialization.
281 let mut serializer = serde_json::Serializer::with_formatter(
282 &mut serialization_buffer,
283 serde_json::ser::PrettyFormatter::with_indent(indent_str.as_bytes()),
284 );
285 self.patches
286 .serialize(&mut serializer)
287 .context("serializing patches to JSON")?;
288 // Append a newline at the end if not present. This is necessary to get
289 // past some pre-upload hooks.
290 if serialization_buffer.last() != Some(&b'\n') {
291 serialization_buffer.push(b'\n');
292 }
293 Ok(std::str::from_utf8(&serialization_buffer)?.to_string())
294 }
295
296 /// Return whether a given patch actually exists on the file system.
patch_exists(&self, patch: &PatchDictSchema) -> bool297 pub fn patch_exists(&self, patch: &PatchDictSchema) -> bool {
298 self.workdir.join(&patch.rel_patch_path).exists()
299 }
300
hash_from_rel_patch(&self, patch: &PatchDictSchema) -> Result<String>301 fn hash_from_rel_patch(&self, patch: &PatchDictSchema) -> Result<String> {
302 hash_from_patch_path(&self.workdir.join(&patch.rel_patch_path))
303 }
304 }
305
306 impl std::fmt::Display for PatchCollection {
fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result307 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
308 for (i, p) in self.patches.iter().enumerate() {
309 let title = p
310 .metadata
311 .as_ref()
312 .and_then(|x| x.get("title"))
313 .and_then(serde_json::Value::as_str)
314 .unwrap_or("[No Title]");
315 let path = self.workdir.join(&p.rel_patch_path);
316 let from = p.get_from_version();
317 let until = p.get_until_version();
318 writeln!(f, "* {}", title)?;
319 if i == self.patches.len() - 1 {
320 write!(
321 f,
322 " {}\n r{}-r{}",
323 path.display(),
324 from.map_or("None".to_string(), |x| x.to_string()),
325 until.map_or("None".to_string(), |x| x.to_string())
326 )?;
327 } else {
328 writeln!(
329 f,
330 " {}\n r{}-r{}",
331 path.display(),
332 from.map_or("None".to_string(), |x| x.to_string()),
333 until.map_or("None".to_string(), |x| x.to_string())
334 )?;
335 }
336 }
337 Ok(())
338 }
339 }
340
341 /// Represents information which changed between now and an old version of a PATCHES.json file.
342 pub struct PatchTemporalDiff {
343 pub cur_collection: PatchCollection,
344 pub new_patches: PatchCollection,
345 // Store version_updates as a vec, not a map, as it's likely to be very small (<=2),
346 // and the overhead of using a O(1) look up structure isn't worth it.
347 pub version_updates: Vec<(String, Option<VersionRange>)>,
348 }
349
350 /// Generate a PatchCollection incorporating only the diff between current patches and old patch
351 /// contents.
new_patches( patches_path: &Path, old_patch_contents: &str, platform: &str, ) -> Result<PatchTemporalDiff>352 pub fn new_patches(
353 patches_path: &Path,
354 old_patch_contents: &str,
355 platform: &str,
356 ) -> Result<PatchTemporalDiff> {
357 // Set up the current patch collection.
358 let cur_collection = PatchCollection::parse_from_file(patches_path)
359 .with_context(|| format!("parsing {} PATCHES.json", platform))?;
360 validate_patches(&cur_collection, platform)?;
361
362 // Set up the old patch collection.
363 let old_collection = PatchCollection::parse_from_str(
364 patches_path.parent().unwrap().to_path_buf(),
365 old_patch_contents,
366 )?;
367 let old_collection = old_collection.filter_patches(|p| old_collection.patch_exists(p));
368
369 // Set up the differential values
370 let version_updates = cur_collection.version_range_diffs(&old_collection);
371 let new_patches: PatchCollection = cur_collection.subtract(&old_collection)?;
372 let new_patches = new_patches.map_patches(|p| {
373 let mut platforms = BTreeSet::new();
374 platforms.extend(["android".to_string(), "chromiumos".to_string()]);
375 PatchDictSchema {
376 platforms: platforms.union(&p.platforms).cloned().collect(),
377 ..p.to_owned()
378 }
379 });
380 Ok(PatchTemporalDiff {
381 cur_collection,
382 new_patches,
383 version_updates,
384 })
385 }
386
387 /// Create a new collection with only the patches that apply to the
388 /// given platform.
389 ///
390 /// If there's no platform listed, the patch should still apply if the patch file exists.
filter_patches_by_platform(collection: &PatchCollection, platform: &str) -> PatchCollection391 pub fn filter_patches_by_platform(collection: &PatchCollection, platform: &str) -> PatchCollection {
392 collection.filter_patches(|p| {
393 p.platforms.contains(platform) || (p.platforms.is_empty() && collection.patch_exists(p))
394 })
395 }
396
397 /// Verify the patches all exist and apply to the given platform.
398 ///
399 /// If all good, return Unit. Otherwise, return an Err.
validate_patches(collection: &PatchCollection, platform: &str) -> Result<()>400 pub fn validate_patches(collection: &PatchCollection, platform: &str) -> Result<()> {
401 for p in &collection.patches {
402 if !collection.patch_exists(p) {
403 bail!("Patch {} does not exist", p.rel_patch_path);
404 }
405 if !p.platforms.is_empty() && !p.platforms.contains(platform) {
406 bail!(
407 "Patch {} did not apply to platform {}",
408 p.rel_patch_path,
409 platform
410 );
411 }
412 }
413 Ok(())
414 }
415
416 /// Get the hash from the patch file contents.
417 ///
418 /// Not every patch file actually contains its own hash,
419 /// we must compute the hash ourselves when it's not found.
hash_from_patch(patch_contents: impl Read) -> Result<String>420 fn hash_from_patch(patch_contents: impl Read) -> Result<String> {
421 let mut reader = BufReader::new(patch_contents);
422 let mut buf = String::new();
423 reader.read_line(&mut buf)?;
424 let mut first_line_iter = buf.trim().split(' ').fuse();
425 let (fst_word, snd_word) = (first_line_iter.next(), first_line_iter.next());
426 if let (Some("commit" | "From"), Some(hash_str)) = (fst_word, snd_word) {
427 // If the first line starts with either "commit" or "From", the following
428 // text is almost certainly a commit hash.
429 Ok(hash_str.to_string())
430 } else {
431 // This is an annoying case where the patch isn't actually a commit.
432 // So we'll hash the entire file, and hope that's sufficient.
433 let mut hasher = Sha256::new();
434 hasher.update(&buf); // Have to hash the first line.
435 reader.read_to_string(&mut buf)?;
436 hasher.update(buf); // Hash the rest of the file.
437 let sha = hasher.finalize();
438 Ok(format!("{:x}", &sha))
439 }
440 }
441
hash_from_patch_path(patch: &Path) -> Result<String>442 fn hash_from_patch_path(patch: &Path) -> Result<String> {
443 let f = File::open(patch).with_context(|| format!("opening patch file {}", patch.display()))?;
444 hash_from_patch(f)
445 }
446
447 /// Copy a file from one path to another, and create any parent
448 /// directories along the way.
copy_create_parents(from: &Path, to: &Path) -> Result<()>449 fn copy_create_parents(from: &Path, to: &Path) -> Result<()> {
450 let to_parent = to
451 .parent()
452 .with_context(|| format!("getting parent of {}", to.display()))?;
453 if !to_parent.exists() {
454 std::fs::create_dir_all(to_parent)?;
455 }
456
457 copy(from, to)
458 .with_context(|| format!("copying file from {} to {}", &from.display(), &to.display()))?;
459 Ok(())
460 }
461
462 /// Given a json string, predict and return the maximum indentation unit the json string uses.
predict_indent(json: &str) -> usize463 pub fn predict_indent(json: &str) -> usize {
464 let indents = json
465 .split('\n')
466 .map(|line| line.len() - line.trim_start_matches(' ').len())
467 .collect::<Vec<usize>>();
468 if indents.iter().all(|x| x % 4 == 0) {
469 return 4;
470 }
471 if indents.iter().all(|x| x % 2 == 0) {
472 return 2;
473 }
474 if indents.iter().all(|x| *x == 0) {
475 return 0;
476 }
477 1
478 }
479
480 #[cfg(test)]
481 mod test {
482
483 use super::*;
484
485 /// Test we can extract the hash from patch files.
486 #[test]
test_hash_from_patch()487 fn test_hash_from_patch() {
488 // Example git patch from Gerrit
489 let desired_hash = "004be4037e1e9c6092323c5c9268acb3ecf9176c";
490 let test_file_contents = "commit 004be4037e1e9c6092323c5c9268acb3ecf9176c\n\
491 Author: An Author <some_email>\n\
492 Date: Thu Aug 6 12:34:16 2020 -0700";
493 assert_eq!(
494 &hash_from_patch(test_file_contents.as_bytes()).unwrap(),
495 desired_hash
496 );
497
498 // Example git patch from upstream
499 let desired_hash = "6f85225ef3791357f9b1aa097b575b0a2b0dff48";
500 let test_file_contents = "From 6f85225ef3791357f9b1aa097b575b0a2b0dff48\n\
501 Mon Sep 17 00:00:00 2001\n\
502 From: Another Author <another_email>\n\
503 Date: Wed, 18 Aug 2021 15:03:03 -0700";
504 assert_eq!(
505 &hash_from_patch(test_file_contents.as_bytes()).unwrap(),
506 desired_hash
507 );
508 }
509
510 #[test]
test_keep_indent2()511 fn test_keep_indent2() {
512 let example_json = "\
513 [
514 {
515 \"rel_patch_path\": \"some_patch.\",
516 \"metadata\": null,
517 \"platforms\": []
518 }
519 ]
520 ";
521 let collection1 = PatchCollection::parse_from_str(PathBuf::new(), example_json)
522 .expect("could not parse example_json");
523 assert_eq!(collection1.indent_len, 2);
524 let collection2 = PatchCollection::parse_from_str(
525 PathBuf::new(),
526 &collection1
527 .serialize_patches()
528 .expect("expected to serialize patches"),
529 )
530 .expect("could not parse from serialization");
531 assert_eq!(collection2.indent_len, 2);
532 let mut collection3 = collection1;
533 collection3.indent_len = 4;
534 let collection4 = PatchCollection::parse_from_str(
535 PathBuf::new(),
536 &collection3
537 .serialize_patches()
538 .expect("expected to serialize patches"),
539 )
540 .expect("could not parse from serialization");
541 assert_eq!(collection4.indent_len, 4)
542 }
543
544 #[test]
test_union()545 fn test_union() {
546 let patch1 = PatchDictSchema {
547 rel_patch_path: "a".into(),
548 metadata: None,
549 platforms: BTreeSet::from(["x".into()]),
550 version_range: Some(VersionRange {
551 from: Some(0),
552 until: Some(1),
553 }),
554 };
555 let patch2 = PatchDictSchema {
556 rel_patch_path: "b".into(),
557 platforms: BTreeSet::from(["x".into(), "y".into()]),
558 ..patch1.clone()
559 };
560 let patch3 = PatchDictSchema {
561 platforms: BTreeSet::from(["z".into(), "x".into()]),
562 ..patch1.clone()
563 };
564 let collection1 = PatchCollection {
565 workdir: PathBuf::new(),
566 patches: vec![patch1, patch2],
567 indent_len: 0,
568 };
569 let collection2 = PatchCollection {
570 workdir: PathBuf::new(),
571 patches: vec![patch3],
572 indent_len: 0,
573 };
574 let union = collection1
575 .union_helper(
576 &collection2,
577 |p| Ok(p.rel_patch_path.to_string()),
578 |p| Ok(p.rel_patch_path.to_string()),
579 )
580 .expect("could not create union");
581 assert_eq!(union.patches.len(), 2);
582 assert_eq!(
583 union.patches[0].platforms.iter().collect::<Vec<&String>>(),
584 vec!["x", "z"]
585 );
586 assert_eq!(
587 union.patches[1].platforms.iter().collect::<Vec<&String>>(),
588 vec!["x", "y"]
589 );
590 }
591
592 #[test]
test_union_empties()593 fn test_union_empties() {
594 let patch1 = PatchDictSchema {
595 rel_patch_path: "a".into(),
596 metadata: None,
597 platforms: Default::default(),
598 version_range: Some(VersionRange {
599 from: Some(0),
600 until: Some(1),
601 }),
602 };
603 let collection1 = PatchCollection {
604 workdir: PathBuf::new(),
605 patches: vec![patch1.clone()],
606 indent_len: 4,
607 };
608 let collection2 = PatchCollection {
609 workdir: PathBuf::new(),
610 patches: vec![patch1],
611 indent_len: 4,
612 };
613 let union = collection1
614 .union_helper(
615 &collection2,
616 |p| Ok(p.rel_patch_path.to_string()),
617 |p| Ok(p.rel_patch_path.to_string()),
618 )
619 .expect("could not create union");
620 assert_eq!(union.patches.len(), 1);
621 assert_eq!(union.patches[0].platforms.len(), 0);
622 }
623
624 #[test]
test_version_differentials()625 fn test_version_differentials() {
626 let fixture = version_range_fixture();
627 let diff = fixture[0].version_range_diffs(&fixture[1]);
628 assert_eq!(diff.len(), 1);
629 assert_eq!(
630 &diff,
631 &[(
632 "a".to_string(),
633 Some(VersionRange {
634 from: Some(0),
635 until: Some(1)
636 })
637 )]
638 );
639 let diff = fixture[1].version_range_diffs(&fixture[2]);
640 assert_eq!(diff.len(), 0);
641 }
642
643 #[test]
test_version_updates()644 fn test_version_updates() {
645 let fixture = version_range_fixture();
646 let collection = fixture[0].update_version_ranges(&[("a".into(), None)]);
647 assert_eq!(collection.patches[0].version_range, None);
648 assert_eq!(collection.patches[1], fixture[1].patches[1]);
649 let new_version_range = Some(VersionRange {
650 from: Some(42),
651 until: Some(43),
652 });
653 let collection = fixture[0].update_version_ranges(&[("a".into(), new_version_range)]);
654 assert_eq!(collection.patches[0].version_range, new_version_range);
655 assert_eq!(collection.patches[1], fixture[1].patches[1]);
656 }
657
version_range_fixture() -> Vec<PatchCollection>658 fn version_range_fixture() -> Vec<PatchCollection> {
659 let patch1 = PatchDictSchema {
660 rel_patch_path: "a".into(),
661 metadata: None,
662 platforms: Default::default(),
663 version_range: Some(VersionRange {
664 from: Some(0),
665 until: Some(1),
666 }),
667 };
668 let patch1_updated = PatchDictSchema {
669 version_range: Some(VersionRange {
670 from: Some(0),
671 until: Some(3),
672 }),
673 ..patch1.clone()
674 };
675 let patch2 = PatchDictSchema {
676 rel_patch_path: "b".into(),
677 ..patch1.clone()
678 };
679 let collection1 = PatchCollection {
680 workdir: PathBuf::new(),
681 patches: vec![patch1, patch2.clone()],
682 indent_len: 0,
683 };
684 let collection2 = PatchCollection {
685 workdir: PathBuf::new(),
686 patches: vec![patch1_updated, patch2.clone()],
687 indent_len: 0,
688 };
689 let collection3 = PatchCollection {
690 workdir: PathBuf::new(),
691 patches: vec![patch2],
692 indent_len: 0,
693 };
694 vec![collection1, collection2, collection3]
695 }
696 }
697