xref: /aosp_15_r20/external/pytorch/caffe2/serialize/versions.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 #include <cstdint>
3 
4 namespace caffe2 {
5 namespace serialize {
6 
7 constexpr uint64_t kMinSupportedFileFormatVersion = 0x1L;
8 
9 constexpr uint64_t kMaxSupportedFileFormatVersion = 0xAL;
10 
11 // Versions (i.e. why was the version number bumped?)
12 
13 // Note [Dynamic Versions and torch.jit.save vs. torch.save]
14 //
15 // Our versioning scheme has a "produced file format version" which
16 // describes how an archive is to be read. The version written in an archive
17 // is at least this current produced file format version, but may be greater
18 // if it includes certain symbols. We refer to these conditional versions
19 // as "dynamic," since they are identified at runtime.
20 //
21 // Dynamic versioning is useful when an operator's semantics are updated.
22 // When using torch.jit.save we want those semantics to be preserved. If
23 // we bumped the produced file format version on every change, however,
24 // then older versions of PyTorch couldn't read even simple archives, like
25 // a single tensor, from newer versions of PyTorch. Instead, we
26 // assign dynamic versions to these changes that override the
27 // produced file format version as needed. That is, when the semantics
28 // of torch.div changed it was assigned dynamic version 4, and when
29 // torch.jit.saving modules that use torch.div those archives also have
30 // (at least) version 4. This prevents earlier versions of PyTorch
31 // from accidentally performing the wrong kind of division. Modules
32 // that don't use torch.div or other operators with dynamic versions
33 // can write the produced file format version, and these programs will
34 // run as expected on earlier versions of PyTorch.
35 //
36 // While torch.jit.save attempts to preserve operator semantics,
37 // torch.save does not. torch.save is analogous to pickling Python, so
38 // a function that uses torch.div will have different behavior if torch.saved
39 // and torch.loaded across PyTorch versions. From a technical perspective,
40 // torch.save ignores dynamic versioning.
41 
42 // 1. Initial version
43 // 2. Removed op_version_set version numbers
44 // 3. Added type tags to pickle serialization of container types
45 // 4. (Dynamic) Stopped integer division using torch.div
46 //      (a versioned symbol preserves the historic behavior of versions 1--3)
47 // 5. (Dynamic) Stops torch.full inferring a floating point dtype
48 //      when given bool or integer fill values.
49 // 6. Write version string to `./data/version` instead of `version`.
50 
51 // [12/15/2021]
52 // kProducedFileFormatVersion is set to 7 from 3 due to a different
53 // interpretation of what file format version is.
54 // Whenever there is new upgrader introduced,
55 // this number should be bumped.
56 // The reasons that version is bumped in the past:
57 //     1. aten::div is changed at version 4
58 //     2. aten::full is changed at version 5
59 //     3. torch.package uses version 6
60 //     4. Introduce new upgrader design and set the version number to 7
61 //        mark this change
62 // --------------------------------------------------
63 // We describe new operator version bump reasons here:
64 // 1) [01/24/2022]
65 //     We bump the version number to 8 to update aten::linspace
66 //     and aten::linspace.out to error out when steps is not
67 //     provided. (see: https://github.com/pytorch/pytorch/issues/55951)
68 // 2) [01/30/2022]
69 //     Bump the version number to 9 to update aten::logspace and
70 //     and aten::logspace.out to error out when steps is not
71 //     provided. (see: https://github.com/pytorch/pytorch/issues/55951)
72 // 3) [02/11/2022]
73 //     Bump the version number to 10 to update aten::gelu and
74 //     and aten::gelu.out to support the new approximate kwarg.
75 //     (see: https://github.com/pytorch/pytorch/pull/61439)
76 constexpr uint64_t kProducedFileFormatVersion = 0xAL;
77 
78 // Absolute minimum version we will write packages. This
79 // means that every package from now on will always be
80 // greater than this number.
81 constexpr uint64_t kMinProducedFileFormatVersion = 0x3L;
82 
83 // The version we write when the archive contains bytecode.
84 // It must be higher or eq to kProducedFileFormatVersion.
85 // Because torchscript changes is likely introduce bytecode change.
86 // If kProducedFileFormatVersion is increased, kProducedBytecodeVersion
87 // should be increased too. The relationship is:
88 // kMaxSupportedFileFormatVersion >= (most likely ==) kProducedBytecodeVersion
89 //   >= kProducedFileFormatVersion
90 // If a format change is forward compatible (still readable by older
91 // executables), we will not increment the version number, to minimize the
92 // risk of breaking existing clients. TODO: A better way would be to allow
93 // the caller that creates a model to specify a maximum version that its
94 // clients can accept.
95 // Versions:
96 //  0x1L: Initial version
97 //  0x2L: (Comment missing)
98 //  0x3L: (Comment missing)
99 //  0x4L: (update) Added schema to function tuple. Forward-compatible change.
100 //  0x5L: (update) Update bytecode is sharing constant tensor files from
101 //  torchscript, and only serialize extra tensors that are not in the
102 //  torchscript constant table. Also update tensor storage schema adapting to
103 //  the unify format, the root key of tensor storage is updated from {index} to
104 //  {the_pointer_value_the_tensor.storage}, for example:
105 //  `140245072983168.storage` Forward-compatibility change.
106 //  0x6L: Implicit opereator versioning using number of specified argument.
107 //  Refer to the summary of https://github.com/pytorch/pytorch/pull/56845 for
108 //  details.
109 //  0x7L: Enable support for operators with default arguments plus out
110 //  arguments. Refer. See https://github.com/pytorch/pytorch/pull/63651 for
111 //  details.
112 //  0x8L: Emit promoted operators as instructions. See
113 //  https://github.com/pytorch/pytorch/pull/71662 for details.
114 //  0x9L: Change serialization format from pickle to format This version is to
115 //  serve migration. v8 pickle and v9 flatbuffer are the same. Refer to the
116 //  summary of https://github.com/pytorch/pytorch/pull/75201 for more details.
117 constexpr uint64_t kProducedBytecodeVersion = 0x8L;
118 
119 // static_assert(
120 //     kProducedBytecodeVersion >= kProducedFileFormatVersion,
121 //     "kProducedBytecodeVersion must be higher or equal to
122 //     kProducedFileFormatVersion.");
123 
124 // Introduce kMinSupportedBytecodeVersion and kMaxSupportedBytecodeVersion
125 // for limited backward/forward compatibility support of bytecode. If
126 // kMinSupportedBytecodeVersion <= model_version <= kMaxSupportedBytecodeVersion
127 // (in loader), we should support this model_version. For example, we provide a
128 // wrapper to handle an updated operator.
129 constexpr uint64_t kMinSupportedBytecodeVersion = 0x4L;
130 constexpr uint64_t kMaxSupportedBytecodeVersion = 0x9L;
131 
132 } // namespace serialize
133 } // namespace caffe2
134