xref: /aosp_15_r20/external/pytorch/aten/src/ATen/core/Formatting.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <ATen/core/Formatting.h>
2 #include <c10/util/irange.h>
3 
4 #include <cmath>
5 #include <cstdint>
6 #include <iomanip>
7 #include <iostream>
8 #include <tuple>
9 
10 namespace c10 {
operator <<(std::ostream & out,Backend b)11 std::ostream& operator<<(std::ostream & out, Backend b) {
12   return out << toString(b);
13 }
14 
operator <<(std::ostream & out,const Scalar & s)15 std::ostream& operator<<(std::ostream & out, const Scalar& s) {
16   if (s.isFloatingPoint()) {
17     return out << s.toDouble();
18   }
19   if (s.isComplex()) {
20     return out << s.toComplexDouble();
21   }
22   if (s.isBoolean()) {
23     return out << (s.toBool() ? "true" : "false");
24   }
25   if (s.isSymInt()) {
26     return out << (s.toSymInt());
27   }
28   if (s.isSymFloat()) {
29     return out << (s.toSymFloat());
30   }
31   if (s.isIntegral(false)) {
32     return out << s.toLong();
33   }
34   throw std::logic_error("Unknown type in Scalar");
35 }
36 
toString(const Scalar & s)37 std::string toString(const Scalar& s) {
38   std::stringstream out;
39   out << s;
40   return out.str();
41 }
42 }
43 namespace at {
44 
45 //not all C++ compilers have default float so we define our own here
defaultfloat(std::ios_base & __base)46 inline std::ios_base& defaultfloat(std::ios_base& __base) {
47   __base.unsetf(std::ios_base::floatfield);
48   return __base;
49 }
50 //saves/restores number formatting inside scope
51 struct FormatGuard {
FormatGuardat::FormatGuard52   FormatGuard(std::ostream & out)
53   : out(out), saved(nullptr) {
54     saved.copyfmt(out);
55   }
~FormatGuardat::FormatGuard56   ~FormatGuard() {
57     out.copyfmt(saved);
58   }
59 private:
60   // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
61   std::ostream & out;
62   std::ios saved;
63 };
64 
operator <<(std::ostream & out,const DeprecatedTypeProperties & t)65 std::ostream& operator<<(std::ostream & out, const DeprecatedTypeProperties& t) {
66   return out << t.toString();
67 }
68 
__printFormat(std::ostream & stream,const Tensor & self)69 static std::tuple<double, int> __printFormat(std::ostream& stream, const Tensor& self) {
70   auto size = self.numel();
71   if(size == 0) {
72     return std::make_tuple(1., 0);
73   }
74   bool intMode = true;
75   auto self_p = self.const_data_ptr<double>();
76   for (const auto i : c10::irange(size)) {
77     auto z = self_p[i];
78     if(std::isfinite(z)) {
79       if(z != std::ceil(z)) {
80         intMode = false;
81         break;
82       }
83     }
84   }
85   int64_t offset = 0;
86   while(!std::isfinite(self_p[offset])) {
87     offset = offset + 1;
88     if(offset == size) {
89       break;
90     }
91   }
92   double expMin = 1;
93   double expMax = 1;
94   if(offset != size) {
95     expMin = fabs(self_p[offset]);
96     expMax = fabs(self_p[offset]);
97     for (const auto i : c10::irange(offset, size)) {
98       double z = fabs(self_p[i]);
99       if(std::isfinite(z)) {
100         if(z < expMin) {
101           expMin = z;
102         }
103         if(self_p[i] > expMax) {
104           expMax = z;
105         }
106       }
107     }
108     if(expMin != 0) {
109       expMin = std::floor(std::log10(expMin)) + 1;
110     } else {
111       expMin = 1;
112     }
113     if(expMax != 0) {
114       expMax = std::floor(std::log10(expMax)) + 1;
115     } else {
116       expMax = 1;
117     }
118   }
119   double scale = 1;
120   int sz = 11;
121   if(intMode) {
122     if(expMax > 9) {
123       sz = 11;
124       stream << std::scientific << std::setprecision(4);
125     } else {
126       sz = static_cast<int>(expMax) + 1;
127       stream << defaultfloat;
128     }
129   } else {
130     if(expMax-expMin > 4) {
131       sz = 11;
132       if(std::fabs(expMax) > 99 || std::fabs(expMin) > 99) {
133         sz = sz + 1;
134       }
135       stream << std::scientific << std::setprecision(4);
136     } else {
137       if(expMax > 5 || expMax < 0) {
138         sz = 7;
139         scale = std::pow(10, expMax-1);
140         stream << std::fixed << std::setprecision(4);
141       } else {
142         if(expMax == 0) {
143           sz = 7;
144         } else {
145           sz = static_cast<int>(expMax) + 6;
146         }
147         stream << std::fixed << std::setprecision(4);
148       }
149     }
150   }
151   return std::make_tuple(scale, sz);
152 }
153 
__printIndent(std::ostream & stream,int64_t indent)154 static void __printIndent(std::ostream &stream, int64_t indent)
155 {
156   for (C10_UNUSED const auto i : c10::irange(indent)) {
157     stream << " ";
158   }
159 }
160 
printScale(std::ostream & stream,double scale)161 static void printScale(std::ostream & stream, double scale) {
162   FormatGuard guard(stream);
163   stream << defaultfloat << scale << " *" << '\n';
164 }
__printMatrix(std::ostream & stream,const Tensor & self,int64_t linesize,int64_t indent)165 static void __printMatrix(std::ostream& stream, const Tensor& self, int64_t linesize, int64_t indent)
166 {
167   auto [scale, sz] = __printFormat(stream, self);
168 
169   __printIndent(stream, indent);
170   int64_t nColumnPerLine = (linesize-indent)/(sz+1);
171   int64_t firstColumn = 0;
172   int64_t lastColumn = -1;
173   while(firstColumn < self.size(1)) {
174     if(firstColumn + nColumnPerLine <= self.size(1)) {
175       lastColumn = firstColumn + nColumnPerLine - 1;
176     } else {
177       lastColumn = self.size(1) - 1;
178     }
179     if(nColumnPerLine < self.size(1)) {
180       if(firstColumn != 0) {
181         stream << '\n';
182       }
183       stream << "Columns " << firstColumn+1 << " to " << lastColumn+1;
184       __printIndent(stream, indent);
185     }
186     if(scale != 1) {
187       printScale(stream,scale);
188       __printIndent(stream, indent);
189     }
190     for (const auto l : c10::irange(self.size(0))) {
191       Tensor row = self.select(0,l);
192       const double *row_ptr = row.const_data_ptr<double>();
193       for (const auto c : c10::irange(firstColumn, lastColumn+1)) {
194         stream << std::setw(sz) << row_ptr[c]/scale;
195         if(c == lastColumn) {
196           stream << '\n';
197           if(l != self.size(0)-1) {
198             if(scale != 1) {
199               __printIndent(stream, indent);
200               stream << " ";
201             } else {
202               __printIndent(stream, indent);
203             }
204           }
205         } else {
206           stream << " ";
207         }
208       }
209     }
210     firstColumn = lastColumn + 1;
211   }
212 }
213 
__printTensor(std::ostream & stream,Tensor & self,int64_t linesize)214 static void __printTensor(std::ostream& stream, Tensor& self, int64_t linesize)
215 {
216   std::vector<int64_t> counter(self.ndimension()-2);
217   bool start = true;
218   bool finished = false;
219   counter[0] = -1;
220   for (const auto i : c10::irange(1, counter.size())) {
221     counter[i] = 0;
222   }
223   while(true) {
224     for(int64_t i = 0; self.ndimension()-2; i++) {
225       counter[i] = counter[i] + 1;
226       if(counter[i] >= self.size(i)) {
227         if(i == self.ndimension()-3) {
228           finished = true;
229           break;
230         }
231         counter[i] = 0;
232       } else {
233         break;
234       }
235     }
236     if(finished) {
237       break;
238     }
239     if(start) {
240       start = false;
241     } else {
242       stream << '\n';
243     }
244     stream << "(";
245     Tensor tensor = self;
246     for (const auto i : c10::irange(self.ndimension()-2)) {
247       tensor = tensor.select(0, counter[i]);
248       stream << counter[i]+1 << ",";
249     }
250     stream << ".,.) = " << '\n';
251     __printMatrix(stream, tensor, linesize, 1);
252   }
253 }
254 
print(const Tensor & t,int64_t linesize)255 void print(const Tensor & t, int64_t linesize) {
256   print(std::cout,t,linesize);
257 }
print(std::ostream & stream,const Tensor & tensor_,int64_t linesize)258 std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesize) {
259   FormatGuard guard(stream);
260   if(!tensor_.defined()) {
261     stream << "[ Tensor (undefined) ]";
262   } else if (tensor_.is_sparse()) {
263     stream << "[ " << tensor_.toString() << "{}\n";
264     stream << "indices:\n" << tensor_._indices() << "\n";
265     stream << "values:\n" << tensor_._values() << "\n";
266     stream << "size:\n" << tensor_.sizes() << "\n";
267     stream << "]";
268   } else {
269     Tensor tensor;
270     if (tensor_.is_quantized()) {
271       tensor = tensor_.dequantize().to(kCPU, kDouble).contiguous();
272     } else if (tensor_.is_mkldnn()) {
273       stream << "MKLDNN Tensor: ";
274       tensor = tensor_.to_dense().to(kCPU, kDouble).contiguous();
275     } else if (tensor_.is_mps()) {
276       // MPS does not support double tensors, so first copy then convert
277       tensor = tensor_.to(kCPU).to(kDouble).contiguous();
278     } else {
279       tensor = tensor_.to(kCPU, kDouble).contiguous();
280     }
281     if(tensor.ndimension() == 0) {
282       stream << defaultfloat << tensor.const_data_ptr<double>()[0] << '\n';
283       stream << "[ " << tensor_.toString() << "{}";
284     } else if(tensor.ndimension() == 1) {
285       if (tensor.numel() > 0) {
286         auto [scale, sz] = __printFormat(stream, tensor);
287         if(scale != 1) {
288           printScale(stream, scale);
289         }
290         const double* tensor_p = tensor.const_data_ptr<double>();
291         for (const auto i : c10::irange(tensor.size(0))) {
292           stream << std::setw(sz) << tensor_p[i]/scale << '\n';
293         }
294       }
295       stream << "[ " << tensor_.toString() << "{" << tensor.size(0) << "}";
296     } else if(tensor.ndimension() == 2) {
297       if (tensor.numel() > 0) {
298         __printMatrix(stream, tensor, linesize, 0);
299       }
300       stream << "[ " << tensor_.toString() << "{" << tensor.size(0) << "," <<  tensor.size(1) << "}";
301     } else {
302       if (tensor.numel() > 0) {
303         __printTensor(stream, tensor, linesize);
304       }
305       stream << "[ " << tensor_.toString() << "{" << tensor.size(0);
306       for (const auto i : c10::irange(1, tensor.ndimension())) {
307         stream << "," << tensor.size(i);
308       }
309       stream << "}";
310     }
311     if (tensor_.is_quantized()) {
312       stream << ", qscheme: " << toString(tensor_.qscheme());
313       if (tensor_.qscheme() == c10::kPerTensorAffine) {
314         stream << ", scale: " << tensor_.q_scale();
315         stream << ", zero_point: " << tensor_.q_zero_point();
316       } else if (tensor_.qscheme() == c10::kPerChannelAffine ||
317           tensor_.qscheme() == c10::kPerChannelAffineFloatQParams) {
318         stream << ", scales: ";
319         Tensor scales = tensor_.q_per_channel_scales();
320         print(stream, scales, linesize);
321         stream << ", zero_points: ";
322         Tensor zero_points = tensor_.q_per_channel_zero_points();
323         print(stream, zero_points, linesize);
324         stream << ", axis: " << tensor_.q_per_channel_axis();
325       }
326     }
327 
328     // Proxy check for if autograd was built
329     if (tensor.getIntrusivePtr()->autograd_meta()) {
330       auto& fw_grad = tensor._fw_grad(/* level */ 0);
331       if (fw_grad.defined()) {
332         stream << ", tangent:" << '\n' << fw_grad;
333       }
334     }
335     stream << " ]";
336   }
337   return stream;
338 }
339 
340 }
341