xref: /aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/register_prim_ops.cpp (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #include <ATen/autocast_mode.h>
2 #include <ATen/core/Generator.h>
3 #include <c10/util/irange.h>
4 #include <torch/csrc/jit/mobile/promoted_prim_ops.h>
5 #include <torch/csrc/jit/runtime/custom_operator.h>
6 #include <torch/csrc/jit/runtime/operator.h>
7 #include <torch/csrc/jit/runtime/register_ops_utils.h>
8 #include <torch/csrc/jit/runtime/slice_indices_adjust.h>
9 #include <torch/library.h>
10 #include <optional>
11 
12 #include <algorithm>
13 #include <bitset>
14 #include <cctype>
15 #include <cmath>
16 #include <iostream>
17 #include <memory>
18 #include <ostream>
19 #include <stdexcept>
20 #include <string>
21 #include <utility>
22 #include <vector>
23 
24 namespace torch::jit {
25 
26 namespace {
27 
stringSlice(std::string string,std::optional<int64_t> start,std::optional<int64_t> end,int64_t step)28 std::string stringSlice(
29     std::string string,
30     std::optional<int64_t> start,
31     std::optional<int64_t> end,
32     int64_t step) {
33   int64_t start_val = start.has_value() ? start.value() : INT64_MAX;
34   int64_t end_val = end.has_value() ? end.value() : INT64_MAX;
35 
36   const int64_t num_vals =
37       slice_indices_adjust(string.size(), &start_val, &end_val, step);
38 
39   int64_t i = start_val;
40   std::string result = "";
41   for (const auto j : c10::irange(num_vals)) {
42     (void)j; // Suppress unused variable warning
43     result += string[i];
44     i += step;
45   }
46 
47   return result;
48 }
49 
50 // consecutive whitespace are regarded as a single separator,
51 // the result will contain no empty strings at the start or end
52 // if the string has leading or trailing whitespace.
splitNoneSeparator(const std::string & string)53 c10::List<std::string> splitNoneSeparator(const std::string& string) {
54   c10::List<std::string> splits;
55   // whitespaces includes tab, space and
56   // the delimiters defined in the implementation of splitlines
57   std::string whitespaces =
58       " \t\n\r\r\n\v\x0b\f\x0c\x1c\x1d\x1e\x85\u2028\u2029";
59   std::string::size_type prev_pos = 0;
60   std::string::size_type pos = 0;
61 
62   while ((pos = string.find_first_of(whitespaces, pos)) != std::string::npos) {
63     auto substr = string.substr(prev_pos, pos - prev_pos);
64     // skip the whitespaces as the Python split() method
65     if (!substr.empty()) {
66       splits.emplace_back(substr);
67     }
68     pos++;
69     prev_pos = pos;
70   }
71   if (prev_pos != string.size()) {
72     splits.emplace_back(string.substr(prev_pos));
73   }
74   return splits;
75 }
76 
isSortableTupleType(const TupleTypePtr & tuple_type,std::stringstream & why_not)77 bool isSortableTupleType(
78     const TupleTypePtr& tuple_type,
79     std::stringstream& why_not) {
80   for (const TypePtr& ele_type : tuple_type->containedTypes()) {
81     switch (ele_type->kind()) {
82       case TypeKind::IntType:
83       case TypeKind::BoolType:
84       case TypeKind::FloatType:
85       case TypeKind::StringType:
86       case TypeKind::TensorType:
87         continue;
88       case TypeKind::TupleType:
89         if (!isSortableTupleType(ele_type->expect<TupleType>(), why_not)) {
90           return false;
91         }
92         continue;
93       case TypeKind::ClassType:
94         if (!c10::checkObjectSortSchema(
95                 ele_type->expect<ClassType>(), why_not)) {
96           return false;
97         }
98         continue;
99       default:
100         why_not << "Contained elements in " << *tuple_type
101                 << " are not sortable. Only Int, Bool, Float, String, Tensor, "
102                 << "a User Defined Class with __lt__ method defined or Tuples "
103                 << "of aforementionted types can be sorted.";
104         return false;
105     }
106   }
107 
108   return true;
109 }
110 
isSortableListOfObjectsOrTuples(c10::List<IValue> & ivalues,std::stringstream & why_not)111 bool isSortableListOfObjectsOrTuples(
112     c10::List<IValue>& ivalues,
113     std::stringstream& why_not) {
114   if (ivalues.empty()) {
115     return true;
116   }
117 
118   auto type = ivalues.get(0).type();
119   // We assume lists have homogenous types, use first element to determine
120   // best sorting methods. If in the future we need to support heterogenous
121   // types inside list, then sorting needs to have runtime sortable checks.
122   const size_t n = ivalues.size();
123   for (const auto i : c10::irange(n)) {
124     const IValue& v = ivalues.get(i);
125     auto curr_type = v.type();
126     if (*curr_type != *type) {
127       why_not << "Only values of same type can be compared. "
128               << "Found " << type->repr_str() << " and "
129               << curr_type->repr_str();
130       return false;
131     }
132   }
133 
134   if (auto tuple_type = type->cast<TupleType>()) {
135     return isSortableTupleType(tuple_type, why_not);
136   }
137 
138   if (auto class_type = type->cast<ClassType>()) {
139     return c10::checkObjectSortSchema(class_type, why_not) != nullptr;
140   }
141 
142   // Basic types like tensors/ints/floats/bools/strs are not checked in this
143   // method because they should have been schema matched to specialized
144   // aten::sort kernels using listSort<T>.
145   why_not << "Only list of Tensors, ints, floats, bools, strs, "
146           << "a User Defined Class that defines the __lt__ compare method "
147           << "or Tuples of aforementioned types can be sorted, got list of "
148           << type->repr_str() << "\n";
149   return false;
150 }
151 
152 template <bool has_reverse_arg, bool copy_return_list>
sort_op(Stack & stack)153 void sort_op(Stack& stack) {
154   bool reverse = has_reverse_arg ? pop(stack).toBool() : false;
155   auto g_list = pop(stack).toList();
156 
157   if (copy_return_list) {
158     g_list = g_list.copy();
159   }
160 
161   if (!g_list.empty()) {
162     std::stringstream error_str;
163     if (!isSortableListOfObjectsOrTuples(g_list, error_str)) {
164       throw std::runtime_error(error_str.str());
165     }
166 
167     c10::IValueComparator comparator;
168     if (reverse) {
169       comparator = c10::getGreaterThanComparator(g_list.get(0));
170     } else {
171       comparator = c10::getLessThanComparator(g_list.get(0));
172     }
173     std::sort(g_list.begin(), g_list.end(), comparator);
174   }
175 
176   if (copy_return_list) {
177     push(stack, g_list);
178   }
179 }
180 
181 template <typename T, typename U>
powWrapper(T a,U b)182 auto powWrapper(T a, U b) {
183   TORCH_CHECK(
184       !(a == 0.0 && b < 0.0), "0.0 cannot be raised to a negative power")
185   return pow(a, b);
186 }
187 
188 static const std::vector<OperatorGeneratorArgs> opGenArgs{
189     OperatorGeneratorArgs(
190         TORCH_SELECTIVE_SCHEMA("aten::str(t elem) -> str"),
__anonbfe5918f0202() 191         [](Stack& stack) {
192           std::stringstream ss;
193           ss << pop(stack);
194           push(stack, ss.str());
195         },
196         aliasAnalysisFromSchema()),
197     OperatorGeneratorArgs(
198         TORCH_SELECTIVE_SCHEMA("aten::list(str t) -> str[]"),
__anonbfe5918f0302() 199         [](Stack& stack) {
200           auto str = pop(stack).toStringRef();
201           c10::List<std::string> chars;
202           chars.reserve(str.size());
203           for (auto c : str) {
204             chars.push_back(std::string(1, c));
205           }
206           push(stack, std::move(chars));
207         },
208         aliasAnalysisFromSchema()),
209     OperatorGeneratorArgs(
210         TORCH_SELECTIVE_SCHEMA("aten::cpu(Tensor(a) self) -> Tensor(a|b)"),
__anonbfe5918f0402() 211         [](Stack& stack) {
212           at::Tensor a;
213           pop(stack, a);
214           push(stack, a.cpu());
215         },
216         aliasAnalysisFromSchema()),
217     OperatorGeneratorArgs(
218         TORCH_SELECTIVE_SCHEMA("aten::numpy_T.a(Tensor(a) self) -> Tensor(a)"),
__anonbfe5918f0502() 219         [](Stack& stack) {
220           at::Tensor a;
221           pop(stack, a);
222           push(stack, a.numpy_T());
223         },
224         aliasAnalysisFromSchema()),
225     OperatorGeneratorArgs(
226         TORCH_SELECTIVE_SCHEMA("aten::matrix_H.a(Tensor(a) self) -> Tensor(a)"),
__anonbfe5918f0602() 227         [](Stack& stack) {
228           at::Tensor a;
229           pop(stack, a);
230           push(stack, a.matrix_H());
231         },
232         aliasAnalysisFromSchema()),
233     OperatorGeneratorArgs(
234         TORCH_SELECTIVE_SCHEMA("aten::mT.a(Tensor(a) self) -> Tensor(a)"),
__anonbfe5918f0702() 235         [](Stack& stack) {
236           at::Tensor a;
237           pop(stack, a);
238           push(stack, a.mT());
239         },
240         aliasAnalysisFromSchema()),
241     OperatorGeneratorArgs(
242         TORCH_SELECTIVE_SCHEMA("aten::mH.a(Tensor(a) self) -> Tensor(a)"),
__anonbfe5918f0802() 243         [](Stack& stack) {
244           at::Tensor a;
245           pop(stack, a);
246           push(stack, a.mH());
247         },
248         aliasAnalysisFromSchema()),
249 
250     // only used internally in range() translation
251     OperatorGeneratorArgs(
252         TORCH_SELECTIVE_SCHEMA(
253             "aten::__range_length(int lo, int hi, int step) -> int"),
__anonbfe5918f0902() 254         [](Stack& stack) {
255           int64_t lo = 0, hi = 0, step = 0;
256           pop(stack, lo, hi, step);
257           // error handling when step_val = 0 during runtime
258           if (step == 0) {
259             throw std::runtime_error("range() arg 3 must not be zero");
260           }
261           if (step > 0 && lo < hi) {
262             push(stack, 1 + (hi - 1 - lo) / step);
263           } else if (step < 0 && lo > hi) {
264             push(stack, 1 + (lo - 1 - hi) / (0 - step));
265           } else {
266             push(stack, 0);
267           }
268         },
269         aliasAnalysisFromSchema()),
270     OperatorGeneratorArgs(
271         TORCH_SELECTIVE_SCHEMA(
272             "aten::__derive_index(int index, int start, int step) -> int"),
__anonbfe5918f0a02() 273         [](Stack& stack) {
274           int64_t index = 0, start = 0, step = 0;
275           pop(stack, index, start, step);
276           push(stack, start + index * step);
277         },
278         aliasAnalysisFromSchema()),
279     OperatorGeneratorArgs(
280         TORCH_SELECTIVE_SCHEMA("prim::TupleUnpack(Any tup) -> ..."),
__anonbfe5918f0b02() 281         [](Stack& stack) { tupleUnpack(stack); },
282         aliasAnalysisSpecialCase()),
283     OperatorGeneratorArgs(
284         TORCH_SELECTIVE_SCHEMA("prim::unchecked_cast(t x) -> t"),
285         noop,
286         aliasAnalysisSpecialCase()),
287     OperatorGeneratorArgs(
288         TORCH_SELECTIVE_SCHEMA("aten::IntImplicit(Tensor a) -> int"),
__anonbfe5918f0c02() 289         [](Stack& stack) {
290           at::Tensor a;
291           pop(stack, a);
292           checkImplicitTensorToNum(a, /*to int*/ true);
293           push(stack, a.item<int64_t>());
294         },
295         aliasAnalysisFromSchema()),
296     OperatorGeneratorArgs(
297         TORCH_SELECTIVE_SCHEMA("aten::ComplexImplicit(Tensor a) -> complex"),
__anonbfe5918f0d02() 298         [](Stack& stack) {
299           at::Tensor a;
300           pop(stack, a);
301           checkImplicitTensorToNum(a, /*to int*/ false);
302           push(stack, a.item<c10::complex<double>>());
303         },
304         aliasAnalysisFromSchema()),
305     OperatorGeneratorArgs(
306         TORCH_SELECTIVE_SCHEMA("aten::FloatImplicit(Tensor a) -> float"),
__anonbfe5918f0e02() 307         [](Stack& stack) {
308           at::Tensor a;
309           pop(stack, a);
310           checkImplicitTensorToNum(a, /*to int*/ false);
311           push(stack, a.item<double>());
312         },
313         aliasAnalysisFromSchema()),
314     OperatorGeneratorArgs(
315         TORCH_SELECTIVE_SCHEMA("aten::ScalarImplicit(Tensor a) -> Scalar"),
__anonbfe5918f0f02() 316         [](Stack& stack) {
317           at::Tensor a;
318           pop(stack, a);
319           checkImplicitTensorToNum(a, /*to int*/ false);
320           push(stack, a.item());
321         },
322         aliasAnalysisFromSchema()),
323     OperatorGeneratorArgs(
324         TORCH_SELECTIVE_SCHEMA("aten::Bool.Tensor(Tensor a) -> bool"),
325         boolTensor,
326         aliasAnalysisFromSchema()),
327     OperatorGeneratorArgs(
328         TORCH_SELECTIVE_SCHEMA("aten::Bool.int(int a) -> bool"),
__anonbfe5918f1002() 329         [](Stack& stack) {
330           int64_t i = 0;
331           pop(stack, i);
332           push(stack, (bool)i);
333         },
334         aliasAnalysisFromSchema()),
335     OperatorGeneratorArgs(
336         TORCH_SELECTIVE_SCHEMA("aten::Bool.float(float a) -> bool"),
__anonbfe5918f1102() 337         [](Stack& stack) {
338           double d = 0;
339           pop(stack, d);
340           push(stack, (bool)d);
341         },
342         aliasAnalysisFromSchema()),
343     OperatorGeneratorArgs(
344         TORCH_SELECTIVE_SCHEMA("aten::Int.Tensor(Tensor a) -> int"),
__anonbfe5918f1202() 345         [](Stack& stack) {
346           at::Tensor a;
347           pop(stack, a);
348           push(stack, a.item<int64_t>());
349         },
350         aliasAnalysisFromSchema()),
351     OperatorGeneratorArgs(
352         TORCH_SELECTIVE_SCHEMA("aten::Int.bool(bool a) -> int"),
__anonbfe5918f1302() 353         [](Stack& stack) {
354           bool b = false;
355           pop(stack, b);
356           push(stack, static_cast<int64_t>(b));
357         },
358         aliasAnalysisFromSchema()),
359     OperatorGeneratorArgs(
360         TORCH_SELECTIVE_SCHEMA("aten::Int.float(float a) -> int"),
__anonbfe5918f1402() 361         [](Stack& stack) {
362           double d = 0;
363           pop(stack, d);
364           push(stack, static_cast<int64_t>(d));
365         },
366         aliasAnalysisFromSchema()),
367     OperatorGeneratorArgs(
368         TORCH_SELECTIVE_SCHEMA("aten::Int.Scalar(Scalar a) -> int"),
__anonbfe5918f1502() 369         [](Stack& stack) {
370           IValue scalar;
371           pop(stack, scalar);
372           if (scalar.isInt()) {
373             push(stack, std::move(scalar));
374           } else {
375             // toScalar() needed to avoid strict type check in IValue::toInt.
376             push(stack, static_cast<int64_t>(scalar.toScalar().toInt()));
377           }
378         },
379         aliasAnalysisFromSchema()),
380     OperatorGeneratorArgs(
381         TORCH_SELECTIVE_SCHEMA("aten::Int.str(str a) -> int"),
__anonbfe5918f1602() 382         [](Stack& stack) {
383           auto s = pop(stack).toString();
384           std::string::size_type sz = 0;
385           int64_t val = static_cast<int64_t>(std::stoll(s->string(), &sz));
386           if (sz == s->string().size()) {
387             push(stack, val);
388           } else {
389             std::stringstream error_str;
390             error_str << "invalid literal for int() "
391                       << "with base 10: '" << s->string() << "'";
392             throw std::runtime_error(error_str.str());
393           }
394         },
395         aliasAnalysisFromSchema()),
396     OperatorGeneratorArgs(
397         TORCH_SELECTIVE_SCHEMA("aten::Float.Tensor(Tensor a) -> float"),
__anonbfe5918f1702() 398         [](Stack& stack) {
399           at::Tensor a;
400           pop(stack, a);
401           push(stack, a.item<double>());
402         },
403         aliasAnalysisFromSchema()),
404     OperatorGeneratorArgs(
405         TORCH_SELECTIVE_SCHEMA("aten::Float.Scalar(Scalar a) -> float"),
__anonbfe5918f1802() 406         [](Stack& stack) {
407           IValue scalar;
408           pop(stack, scalar);
409           if (scalar.isDouble()) {
410             push(stack, std::move(scalar));
411           } else if (scalar.isComplexDouble()) {
412             push(stack, scalar.toComplexDouble().real());
413           } else {
414             push(stack, static_cast<double>(scalar.toInt()));
415           }
416         },
417         aliasAnalysisFromSchema()),
418     OperatorGeneratorArgs(
419         TORCH_SELECTIVE_SCHEMA("aten::Float.int(int a) -> float"),
__anonbfe5918f1902() 420         [](Stack& stack) {
421           int64_t i = 0;
422           pop(stack, i);
423           push(stack, (float)i);
424         },
425         aliasAnalysisFromSchema()),
426     OperatorGeneratorArgs(
427         TORCH_SELECTIVE_SCHEMA("aten::Float.bool(bool a) -> float"),
__anonbfe5918f1a02() 428         [](Stack& stack) {
429           bool b = false;
430           pop(stack, b);
431           push(stack, (float)b);
432         },
433         aliasAnalysisFromSchema()),
434     OperatorGeneratorArgs(
435         TORCH_SELECTIVE_SCHEMA("aten::Float.str(str a) -> float"),
__anonbfe5918f1b02() 436         [](Stack& stack) {
437           auto s = pop(stack).toString();
438           std::string::size_type sz = 0;
439           double b = std::stod(s->string(), &sz);
440           if (sz == s->string().size()) {
441             push(stack, b);
442           } else {
443             std::stringstream error_str;
444             error_str << "could not convert string "
445                       << "to float: '" << s->string() << "'";
446             throw std::runtime_error(error_str.str());
447           }
448         },
449         aliasAnalysisFromSchema()),
450     OperatorGeneratorArgs(
451         TORCH_SELECTIVE_SCHEMA("aten::Complex.Scalar(Scalar a) -> complex"),
__anonbfe5918f1c02() 452         [](Stack& stack) {
453           IValue scalar;
454           pop(stack, scalar);
455           if (scalar.isComplexDouble()) {
456             push(stack, std::move(scalar));
457           } else if (scalar.isDouble()) {
458             push(stack, c10::complex<double>(scalar.toDouble(), 0));
459           } else {
460             push(stack, c10::complex<double>(scalar.toInt(), 0));
461           }
462         },
463         aliasAnalysisFromSchema()),
464     OperatorGeneratorArgs(
465         TORCH_SELECTIVE_SCHEMA(
466             "aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex"),
__anonbfe5918f1d02() 467         [](Stack& stack) {
468           at::Tensor a, b;
469           pop(stack, a, b);
470           push(stack, c10::complex<double>(a.item<double>(), b.item<double>()));
471         },
472         aliasAnalysisFromSchema()),
473     OperatorGeneratorArgs(
474         TORCH_SELECTIVE_SCHEMA("aten::format(str self, ...) -> str"),
__anonbfe5918f1e02() 475         [](Stack& stack) { aten_format(stack); },
476         aliasAnalysisFromSchema()),
477     OperatorGeneratorArgs(
478         TORCH_SELECTIVE_SCHEMA("aten::einsum.sublist(Tensor a, ...) -> Tensor"),
__anonbfe5918f1f02() 479         [](Stack& stack) {
480           size_t num_inputs = pop(stack).toInt();
481           einsum(stack, num_inputs);
482         },
483         aliasAnalysisFromSchema()),
484     OperatorGeneratorArgs(
485         TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.Scalar(Scalar a) -> Tensor"),
486         numToTensorScalar,
487         aliasAnalysisFromSchema()),
488     OperatorGeneratorArgs(
489         TORCH_SELECTIVE_SCHEMA(
490             "prim::RaiseException(str msg, str? cls=None) -> ()"),
491         raiseException,
492         aliasAnalysisFromSchema()),
493     OperatorGeneratorArgs(
494         TORCH_SELECTIVE_SCHEMA("aten::Size(int[] sizes) -> int[]"),
__anonbfe5918f2002() 495         [](Stack& stack) {},
496         aliasAnalysisFromSchema()),
497     OperatorGeneratorArgs(
498         TORCH_SELECTIVE_SCHEMA("aten::size(Tensor self) -> int[]"),
499         size,
500         aliasAnalysisFromSchema()),
501     OperatorGeneratorArgs(
502         TORCH_SELECTIVE_SCHEMA("aten::sym_size(Tensor self) -> SymInt[]"),
503         sym_size,
504         aliasAnalysisFromSchema()),
505     OperatorGeneratorArgs(
506         TORCH_SELECTIVE_SCHEMA("aten::stride(Tensor self) -> int[]"),
__anonbfe5918f2102() 507         [](Stack& stack) {
508           at::Tensor arg = pop(stack).toTensor();
509           push(stack, arg.strides());
510         },
511         aliasAnalysisFromSchema()),
512     OperatorGeneratorArgs(
513         TORCH_SELECTIVE_SCHEMA("aten::sym_stride(Tensor self) -> SymInt[]"),
514         sym_stride,
515         aliasAnalysisFromSchema()),
516     OperatorGeneratorArgs(
517         TORCH_SELECTIVE_SCHEMA("prim::EnumName(AnyEnumType enum) -> str"),
__anonbfe5918f2202() 518         [](Stack& stack) {
519           IValue e = pop(stack);
520           push(stack, e.toEnumHolder()->name());
521         },
522         aliasAnalysisFromSchema()),
523     OperatorGeneratorArgs(
524         TORCH_SELECTIVE_SCHEMA("prim::EnumValue.int(AnyEnumType enum) -> int"),
__anonbfe5918f2302() 525         [](Stack& stack) {
526           IValue e = pop(stack);
527           push(stack, e.toEnumHolder()->value());
528         },
529         aliasAnalysisFromSchema()),
530     OperatorGeneratorArgs(
531         TORCH_SELECTIVE_SCHEMA(
532             "prim::EnumValue.float(AnyEnumType enum) -> float"),
__anonbfe5918f2402() 533         [](Stack& stack) {
534           IValue e = pop(stack);
535           push(stack, e.toEnumHolder()->value());
536         },
537         aliasAnalysisFromSchema()),
538     OperatorGeneratorArgs(
539         TORCH_SELECTIVE_SCHEMA("prim::EnumValue.str(AnyEnumType enum) -> str"),
__anonbfe5918f2502() 540         [](Stack& stack) {
541           IValue e = pop(stack);
542           push(stack, e.toEnumHolder()->value());
543         },
544         aliasAnalysisFromSchema()),
545     OperatorGeneratorArgs(
546         // note the compiler knows to type TupleIndex more accurately than it
547         // is listed here.
548         TORCH_SELECTIVE_SCHEMA("prim::TupleIndex(Any tup, int i) -> Any"),
549         tupleIndex,
550         aliasAnalysisSpecialCase()),
551     OperatorGeneratorArgs(
552         TORCH_SELECTIVE_SCHEMA("aten::ne.int_list(int[] a, int[] b) -> bool"),
553         listNe<int64_t>,
554         aliasAnalysisFromSchema()),
555     OperatorGeneratorArgs(
556         TORCH_SELECTIVE_SCHEMA(
557             "prim::unchecked_unwrap_optional(t(a)? optional) -> t(a)"),
558         noop,
559         aliasAnalysisFromSchema()),
560     OperatorGeneratorArgs(
561         TORCH_SELECTIVE_SCHEMA("prim::device(Tensor a) -> Device"),
562         device,
563         aliasAnalysisFromSchema()),
564     OperatorGeneratorArgs(
565         TORCH_SELECTIVE_SCHEMA("prim::dtype(Tensor a) -> int"),
566         dtype,
567         aliasAnalysisFromSchema()),
568     OperatorGeneratorArgs(
569         TORCH_SELECTIVE_SCHEMA("prim::layout(Tensor a) -> Layout"),
570         layout,
571         aliasAnalysisFromSchema()),
572     OperatorGeneratorArgs(
573         TORCH_SELECTIVE_SCHEMA("aten::__not__(bool self) -> bool"),
574         _not,
575         aliasAnalysisFromSchema()),
576     OperatorGeneratorArgs(
577         TORCH_SELECTIVE_SCHEMA("aten::__is__(t1 self, t2 obj) -> bool"),
578         is,
579         aliasAnalysisFromSchema()),
580     OperatorGeneratorArgs(
581         TORCH_SELECTIVE_SCHEMA("aten::__isnot__(t1 self, t2 obj) -> bool"),
582         isNot,
583         aliasAnalysisFromSchema()),
584     OperatorGeneratorArgs(
585         TORCH_SELECTIVE_SCHEMA("aten::element_size(Tensor self) -> int"),
__anonbfe5918f2602() 586         [](Stack& stack) {
587           at::Tensor arg = pop(stack).toTensor();
588           push(stack, arg.element_size());
589         },
590         aliasAnalysisFromSchema()),
591     OperatorGeneratorArgs(
592         TORCH_SELECTIVE_SCHEMA("aten::numel(Tensor self) -> int"),
__anonbfe5918f2702() 593         [](Stack& stack) {
594           at::Tensor arg = pop(stack).toTensor();
595           push(stack, arg.numel());
596         },
597         aliasAnalysisFromSchema()),
598     OperatorGeneratorArgs(
599         TORCH_SELECTIVE_SCHEMA("aten::dim(Tensor self) -> int"),
600         dim,
601         aliasAnalysisFromSchema()),
602     OperatorGeneratorArgs(
603         TORCH_SELECTIVE_SCHEMA("aten::get_device(Tensor self) -> int"),
__anonbfe5918f2802() 604         [](Stack& stack) {
605           RECORD_FUNCTION("get_device", c10::ArrayRef<const c10::IValue>{});
606           auto result =
607               at::get_device((std::move(peek(stack, 0, 1))).toTensor());
608           drop(stack, 1);
609           pack(stack, result);
610         },
611         aliasAnalysisFromSchema()),
612     OperatorGeneratorArgs(
613         TORCH_SELECTIVE_SCHEMA("aten::storage_offset(Tensor self) -> int"),
__anonbfe5918f2902() 614         [](Stack& stack) {
615           RECORD_FUNCTION("storage_offset", c10::ArrayRef<const c10::IValue>{});
616           auto result =
617               ((std::move(peek(stack, 0, 1))).toTensor()).storage_offset();
618           drop(stack, 1);
619           pack(stack, result);
620         },
621         aliasAnalysisFromSchema()),
622     OperatorGeneratorArgs(
623         TORCH_SELECTIVE_SCHEMA("aten::is_contiguous(Tensor self) -> bool"),
__anonbfe5918f2a02() 624         [](Stack& stack) {
625           RECORD_FUNCTION("is_contiguous", c10::ArrayRef<const c10::IValue>{});
626           auto result =
627               ((std::move(peek(stack, 0, 1))).toTensor()).is_contiguous();
628           drop(stack, 1);
629           pack(stack, result);
630         },
631         aliasAnalysisFromSchema()),
632     OperatorGeneratorArgs(
633         TORCH_SELECTIVE_SCHEMA(
634             "aten::is_contiguous.memory_format(Tensor self, MemoryFormat memory_format) -> bool"),
__anonbfe5918f2b02() 635         [](Stack& stack) {
636           auto memory_format = pop(stack).toMemoryFormat();
637           auto t = pop(stack).toTensor();
638           push(stack, t.is_contiguous(memory_format));
639         },
640         aliasAnalysisFromSchema()),
641     OperatorGeneratorArgs(
642         // NB: intentionally suffixed with extra _format to prevent tests for
643         // "_like" suffix from triggering on this
644         TORCH_SELECTIVE_SCHEMA(
645             "aten::is_strides_like_format(Tensor self, MemoryFormat memory_format) -> bool"),
__anonbfe5918f2c02() 646         [](Stack& stack) {
647           auto memory_format = pop(stack).toMemoryFormat();
648           auto t = pop(stack).toTensor();
649           push(stack, t.unsafeGetTensorImpl()->is_strides_like(memory_format));
650         },
651         aliasAnalysisFromSchema()),
652     OperatorGeneratorArgs(
653         TORCH_SELECTIVE_SCHEMA(
654             "aten::is_non_overlapping_and_dense(Tensor self) -> bool"),
__anonbfe5918f2d02() 655         [](Stack& stack) {
656           auto t = pop(stack).toTensor();
657           push(stack, t.unsafeGetTensorImpl()->is_non_overlapping_and_dense());
658         },
659         aliasAnalysisFromSchema()),
660     // these ops are generic over the list element type.
661     // CREATING GENERIC_LIST_OPS
662     OperatorGeneratorArgs(
663         TORCH_SELECTIVE_SCHEMA("aten::select.t(t[](a) list, int idx) -> t(*)"),
664         listSelect,
665         aliasAnalysisFromSchema()),
666     OperatorGeneratorArgs(
667         TORCH_SELECTIVE_SCHEMA(
668             "aten::__getitem__.t(t[](a) list, int idx) -> t(*)"),
669         listSelect,
670         aliasAnalysisFromSchema()),
671     OperatorGeneratorArgs(
672         TORCH_SELECTIVE_SCHEMA(
673             "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)"),
674         listAppend,
675         aliasAnalysisFromSchema()),
676     OperatorGeneratorArgs(
677         TORCH_SELECTIVE_SCHEMA("aten::reverse.t(t[](a!) self) -> ()"),
678         listReverse,
679         aliasAnalysisFromSchema()),
680     OperatorGeneratorArgs(
681         TORCH_SELECTIVE_SCHEMA("aten::extend.t(t[](a!) self, t[] other) -> ()"),
682         listExtend,
683         aliasAnalysisFromSchema()),
684     OperatorGeneratorArgs(
685         TORCH_SELECTIVE_SCHEMA("aten::copy.t(t[](a) self) -> t[]"),
686         listCopy,
687         aliasAnalysisFromSchema()),
688     OperatorGeneratorArgs(
689         TORCH_SELECTIVE_SCHEMA(
690             "aten::_set_item.t(t [](a!) l, int idx, t(b -> *) el) -> t[](a!)"),
691         listSetItem,
692         aliasAnalysisFromSchema()),
693     OperatorGeneratorArgs(
694         TORCH_SELECTIVE_SCHEMA("aten::clear.t(t[](a!) self) -> ()"),
695         listClear,
696         aliasAnalysisFromSchema()),
697     OperatorGeneratorArgs(
698         TORCH_SELECTIVE_SCHEMA("aten::Delete.t(t[](a!) self, int idx) -> ()"),
699         listDelete,
700         aliasAnalysisFromSchema()),
701     OperatorGeneratorArgs(
702         TORCH_SELECTIVE_SCHEMA(
703             "aten::insert.t(t[](a!) self, int idx, t(b -> *) el) -> ()"),
704         listInsert,
705         aliasAnalysisFromSchema()),
706     OperatorGeneratorArgs(
707         TORCH_SELECTIVE_SCHEMA("aten::pop.t(t[](a!) self, int idx=-1) -> t(*)"),
708         listPop,
709         aliasAnalysisFromSchema()),
710     OperatorGeneratorArgs(
711         TORCH_SELECTIVE_SCHEMA("aten::add.t(t[] a, t[] b) -> t[]"),
712         listAdd,
713         aliasAnalysisFromSchema()),
714     OperatorGeneratorArgs(
715         TORCH_SELECTIVE_SCHEMA("aten::add_.t(t[](a!) self, t[] b) -> t[]"),
716         listInplaceAdd,
717         aliasAnalysisFromSchema()),
718     OperatorGeneratorArgs(
719         TORCH_SELECTIVE_SCHEMA(
720             "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]"),
721         listSlice,
722         aliasAnalysisFromSchema()),
723     OperatorGeneratorArgs(
724         TORCH_SELECTIVE_SCHEMA("aten::list.t(t[] l) -> t[]"),
725         listList,
726         aliasAnalysisFromSchema()),
727     OperatorGeneratorArgs(
728         TORCH_SELECTIVE_SCHEMA("aten::mul.left_t(t[] l, int n) -> t[]"),
729         listMulIntLeft,
730         aliasAnalysisFromSchema()),
731     OperatorGeneratorArgs(
732         TORCH_SELECTIVE_SCHEMA("aten::mul.right_(int n, t[] l) -> t[]"),
733         listMulIntRight,
734         aliasAnalysisFromSchema()),
735     OperatorGeneratorArgs(
736         TORCH_SELECTIVE_SCHEMA("aten::mul_.t(t[](a!) l, int n) -> t[](a!)"),
737         listMulIntLeftInPlace,
738         aliasAnalysisFromSchema()),
739     OperatorGeneratorArgs(
740         TORCH_SELECTIVE_SCHEMA("aten::len.t(t[] a) -> int"),
741         listLen,
742         aliasAnalysisFromSchema()),
743     OperatorGeneratorArgs(
744         TORCH_SELECTIVE_SCHEMA("aten::eq.int_list(int[] a, int[] b) -> bool"),
745         listEq<int64_t>,
746         aliasAnalysisFromSchema()),
747     OperatorGeneratorArgs(
748         TORCH_SELECTIVE_SCHEMA("aten::eq.device(Device a, Device b) -> bool"),
__anonbfe5918f2e02() 749         [](Stack& stack) {
750           auto a = pop(stack).toDevice();
751           auto b = pop(stack).toDevice();
752           push(stack, a == b);
753         },
754         aliasAnalysisFromSchema()),
755     OperatorGeneratorArgs(
756         TORCH_SELECTIVE_SCHEMA("aten::ne.device(Device a, Device b) -> bool"),
__anonbfe5918f2f02() 757         [](Stack& stack) {
758           auto a = pop(stack).toDevice();
759           auto b = pop(stack).toDevice();
760           push(stack, a != b);
761         },
762         aliasAnalysisFromSchema()),
763     OperatorGeneratorArgs(
764         TORCH_SELECTIVE_SCHEMA("aten::eq.bool(bool a, bool b) -> bool"),
__anonbfe5918f3002() 765         [](Stack& stack) {
766           auto a = pop(stack);
767           auto b = pop(stack);
768           push(stack, a == b);
769         },
770         aliasAnalysisFromSchema()),
771     OperatorGeneratorArgs(
772         TORCH_SELECTIVE_SCHEMA("aten::ne.bool(bool a, bool b) -> bool"),
__anonbfe5918f3102() 773         [](Stack& stack) {
774           auto a = pop(stack);
775           auto b = pop(stack);
776           push(stack, a != b);
777         },
778         aliasAnalysisFromSchema()),
779     OperatorGeneratorArgs(
780         TORCH_SELECTIVE_SCHEMA("aten::is_autocast_enabled() -> bool"),
__anonbfe5918f3202() 781         [](Stack& stack) {
782 #if defined BUILD_LITE_INTERPRETER || defined C10_MOBILE
783           bool enabled = false;
784 #else
785           bool enabled = at::autocast::is_autocast_enabled(at::kCUDA);
786 #endif
787           push(stack, enabled);
788         },
789         aliasAnalysisConservative()),
790     OperatorGeneratorArgs(
791         TORCH_SELECTIVE_SCHEMA("aten::is_autocast_cpu_enabled() -> bool"),
__anonbfe5918f3302() 792         [](Stack& stack) {
793 #if defined BUILD_LITE_INTERPRETER || defined C10_MOBILE
794           bool enabled = false;
795 #else
796           bool enabled = at::autocast::is_autocast_enabled(at::kCPU);
797 #endif
798           push(stack, enabled);
799         },
800         aliasAnalysisConservative()),
801     OperatorGeneratorArgs(
802         TORCH_SELECTIVE_SCHEMA(
803             "aten::get_autocast_dtype(str device_type) -> ScalarType"),
__anonbfe5918f3402() 804         [](Stack& stack) {
805 #if defined BUILD_LITE_INTERPRETER || defined C10_MOBILE
806           // autocast is not supported.
807           at::ScalarType dtype = at::ScalarType::Undefined;
808 #else
809           at::DeviceType device_type =
810               at::Device(pop(stack).toStringRef()).type();
811           at::ScalarType dtype = at::autocast::get_autocast_dtype(device_type);
812 #endif
813           push(stack, dtype);
814         },
815         aliasAnalysisConservative()),
816     OperatorGeneratorArgs(
817         TORCH_SELECTIVE_SCHEMA("prim::Uninitialized() -> Any"),
818         unInitialized,
819         aliasAnalysisSpecialCase()),
820     OperatorGeneratorArgs(
821         TORCH_SELECTIVE_SCHEMA("prim::Print(...) -> ()"),
__anonbfe5918f3502() 822         [](Stack& stack) {
823           auto num_inputs = pop(stack).toInt();
824           std::stringstream ss;
825           bool first = true;
826           for (const IValue& i : last(stack, num_inputs)) {
827             if (!first)
828               ss << " ";
829             first = false;
830             ss << i;
831           }
832           drop(stack, num_inputs);
833           ss << '\n';
834           auto* handler = getPrintHandler();
835           TORCH_INTERNAL_ASSERT(handler);
836           handler(ss.str());
837         },
838         aliasAnalysisSpecialCase()),
839     // This is an alternative to aten::cat op that takes variable number of
840     // parameters as input.
841     // Format:
842     //    prim::VarConcat(Tensors..., dim) -> Tensor
843     OperatorGeneratorArgs(
844         TORCH_SELECTIVE_SCHEMA("prim::VarConcat(...) -> Tensor"),
__anonbfe5918f3602() 845         [](Stack& stack) {
846           auto num_inputs = pop(stack).toInt();
847           auto dim = pop(stack).toInt();
848           std::vector<at::Tensor> inputs(num_inputs - 1);
849           for (int i = 0; i < num_inputs - 1; ++i) {
850             inputs[num_inputs - 2 - i] = pop(stack).toTensor();
851           }
852           push(stack, at::cat(inputs, dim));
853         },
854         aliasAnalysisFromSchema()),
855     OperatorGeneratorArgs(
856         TORCH_SELECTIVE_SCHEMA("prim::VarStack(...) -> Tensor"),
__anonbfe5918f3702() 857         [](Stack& stack) {
858           auto num_inputs = pop(stack).toInt();
859           auto dim = pop(stack).toInt();
860           std::vector<at::Tensor> inputs(num_inputs - 1);
861           for (int i = 0; i < num_inputs - 1; ++i) {
862             inputs[num_inputs - 2 - i] = pop(stack).toTensor();
863           }
864           push(stack, at::stack(inputs, dim));
865         },
866         aliasAnalysisFromSchema()),
867     OperatorGeneratorArgs(
868         TORCH_SELECTIVE_SCHEMA(
869             "prim::IfThenElse(bool cond, Any(a) x, Any(b) y) -> Any(a|b)"),
__anonbfe5918f3802() 870         [](Stack& stack) {
871           const auto cond = stack[stack.size() - 3].toBool();
872           stack[stack.size() - 3] =
873               std::move(stack[stack.size() - (cond ? 2 : 1)]);
874           stack.pop_back();
875           stack.pop_back();
876         },
877         aliasAnalysisFromSchema()),
878     OperatorGeneratorArgs(
879         TORCH_SELECTIVE_SCHEMA(
880             "aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool"),
__anonbfe5918f3902() 881         [](Stack& stack) {
882           IValue x = pop(stack);
883           IValue y = pop(stack);
884           push(stack, x == y);
885         },
886         aliasAnalysisFromSchema()),
887     OperatorGeneratorArgs(
888         TORCH_SELECTIVE_SCHEMA(
889             "aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool"),
__anonbfe5918f3a02() 890         [](Stack& stack) {
891           IValue x = pop(stack);
892           IValue y = pop(stack);
893           push(stack, x != y);
894         },
895         aliasAnalysisFromSchema()),
896     // We define aten::dequantize in both native_functions.yaml and here,
897     // however, aten::dequantize.any defined here overrides
898     // aten::dequantize.tensors in native_functions.yaml. The variants here
899     // are only for graph mode quantization, and they should be removed once
900     // we deprecate graph mode quantization, and use the variants in
901     // native_functions.yaml.
902     OperatorGeneratorArgs(
903         TORCH_SELECTIVE_SCHEMA(
904             "aten::dequantize.tensor(Tensor qtensor) -> Tensor"),
__anonbfe5918f3b02() 905         [](Stack& stack) {
906           at::Tensor qtensor;
907           pop(stack, qtensor);
908           push(stack, at::dequantize(qtensor));
909         },
910         aliasAnalysisFromSchema()),
911     OperatorGeneratorArgs(
912         TORCH_SELECTIVE_SCHEMA(
913             "aten::dequantize.list(Tensor[] qtensors) -> Tensor[]"),
__anonbfe5918f3c02() 914         [](Stack& stack) {
915           auto qtensors = pop(stack).toTensorVector();
916           push(stack, at::dequantize(qtensors));
917         },
918         aliasAnalysisFromSchema()),
919     OperatorGeneratorArgs(
920         TORCH_SELECTIVE_SCHEMA("aten::dequantize.any(Any tensors) -> Any"),
__anonbfe5918f3d02() 921         [](Stack& stack) { dequantize(stack); },
922         aliasAnalysisFromSchema()),
923     DEFINE_UNARY_OP_WITH_COMPLEX(aten::log, std::log(a), float, float),
924     DEFINE_STRING_OP(aten::add, a + b, str),
925     DEFINE_COMPARISON_OP_WITH_COMPLEX(aten::eq, a == b),
926     DEFINE_COMPARISON_OP_WITH_COMPLEX(aten::ne, a != b),
927     DEFINE_GENERIC_OP(
928         aten::polar,
929         c10::polar(static_cast<double>(a), static_cast<double>(b)),
930         c10::polar(static_cast<double>(a), static_cast<double>(b)),
931         complex,
932         complex),
933     DEFINE_INT_FLOAT_OP(
934         aten::polar,
935         c10::polar(static_cast<double>(a), static_cast<double>(b)),
936         complex),
937     DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION(
938         aten::polar,
939         c10::polar(static_cast<double>(a), static_cast<double>(b)),
940         c10::polar(static_cast<double>(a), static_cast<double>(b)),
941         Scalar),
942     DEFINE_COMPARISON_OP(aten::lt, a < b),
943     DEFINE_COMPARISON_OP(aten::gt, a > b),
944     DEFINE_COMPARISON_OP(aten::le, a <= b),
945     DEFINE_COMPARISON_OP(aten::ge, a >= b),
946     DEFINE_BINARY_OP_WITH_COMPLEX(aten::add, a + b),
947     DEFINE_BINARY_OP_WITH_COMPLEX(aten::sub, a - b),
948     DEFINE_BINARY_OP_WITH_COMPLEX(aten::mul, a* b),
949     DEFINE_BOOL_OP(aten::__and__, a&& b),
950     DEFINE_BOOL_OP(aten::__or__, a || b),
951     DEFINE_BOOL_OP(aten::__xor__, a != b),
952     DEFINE_UNARY_OP(aten::round, round_to_even(a), float, float),
953     DEFINE_UNARY_OP(aten::floor, floor(a), int, int),
954     DEFINE_UNARY_OP(aten::ceil, ceil(a), int, int),
955     DEFINE_UNARY_OP_WITH_COMPLEX(aten::neg, -a, int, float),
956     DEFINE_UNARY_OP_WITH_COMPLEX(aten::exp, std::exp(a), float, float),
957     // Pass in two ops for handling int and float separately as % in C++ only
958     // works for int The modulus calculation is different between C++ and
959     // Python (on negative), we preserve the python behavior as it's more
960     // common and match python syntax, hence the conversion.
961     DEFINE_GENERIC_OP(
962         aten::remainder,
963         (b + (a % b)) % b,
964         fmod((b + fmod(a, b)), b),
965         int,
966         float),
967     DEFINE_INT_FLOAT_OP(aten::remainder, fmod((b + fmod(a, b)), b), float),
968     DEFINE_SCALAR_BINARY_OP(
969         aten::remainder,
970         (b + (a % b)) % b,
971         fmod((b + fmod(a, b)), b),
972         Scalar),
973     // NB: This is the python truediv operation
974     DEFINE_GENERIC_OP_WITH_COMPLEX(
975         aten::div,
976         static_cast<double>(a) / static_cast<double>(b),
977         a / b,
978         a / b,
979         float,
980         float,
981         complex),
982     DEFINE_SCALAR_BINARY_OP(
983         aten::div,
984         static_cast<double>(a) / static_cast<double>(b),
985         a / b,
986         float),
987     DEFINE_GENERIC_OP(
988         aten::floordiv,
989         floordiv(a, b),
990         std::floor(a / b),
991         int,
992         float),
993     DEFINE_INT_FLOAT_OP(aten::floordiv, std::floor(a / b), float),
994     DEFINE_SCALAR_BINARY_OP(
995         aten::floordiv,
996         floordiv(a, b),
997         std::floor(a / b),
998         Scalar),
999     // int ** int produces a float, because negative exponents produce float
1000     // results
1001     DEFINE_GENERIC_OP_WITH_COMPLEX(
1002         aten::pow,
1003         static_cast<double>(powWrapper(a, b)),
1004         static_cast<double>(powWrapper(a, b)),
1005         static_cast<c10::complex<double>>(pow(a, b)),
1006         float,
1007         float,
1008         complex),
1009     DEFINE_INT_FLOAT_OP(
1010         aten::pow,
1011         static_cast<double>(powWrapper(a, b)),
1012         float),
1013     DEFINE_FLOAT_COMPLEX_OP(aten::pow, pow(a, b), complex),
1014     DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION(
1015         aten::pow,
1016         static_cast<double>(pow(a, b)),
1017         static_cast<double>(pow(a, b)),
1018         float),
1019     OperatorGeneratorArgs(
1020         TORCH_SELECTIVE_SCHEMA("aten::pow.int_to_int(int a, int b) -> int"),
__anonbfe5918f3e02() 1021         [](Stack& stack) {
1022           int64_t a = 0, b = 0;
1023           pop(stack, a, b);
1024           push(stack, powWrapper(a, b));
1025         },
1026         aliasAnalysisFromSchema()),
1027     // min and max are in prim:: because there is a difference between
1028     // the python builtin 'min' and 'torch.min'
1029     DEFINE_BINARY_OP(prim::min, a < b ? a : b),
1030     DEFINE_BINARY_OP(prim::max, a > b ? a : b),
1031     OperatorGeneratorArgs(
1032         TORCH_SELECTIVE_SCHEMA("prim::type(Device self) -> str"),
__anonbfe5918f3f02() 1033         [](Stack& stack) {
1034           auto d = pop(stack);
1035           push(
1036               stack, DeviceTypeName(d.toDevice().type(), /* lower_case=*/true));
1037         },
1038         aliasAnalysisFromSchema()),
1039     // tensor length op (size of 1st dimension)
1040     OperatorGeneratorArgs(
1041         TORCH_SELECTIVE_SCHEMA("aten::len.Tensor(Tensor t) -> int"),
__anonbfe5918f4002() 1042         [](Stack& stack) {
1043           at::Tensor t = pop(stack).toTensor();
1044           if (t.dim() == 0) {
1045             AT_ERROR("len() of a 0-d tensor");
1046           }
1047           push(stack, t.sizes()[0]);
1048         },
1049         aliasAnalysisFromSchema()),
1050     OperatorGeneratorArgs(
1051         TORCH_SELECTIVE_SCHEMA("aten::ord(str string) -> int"),
__anonbfe5918f4102() 1052         [](Stack& stack) {
1053           auto string = pop(stack).toStringRef();
1054           TORCH_CHECK(
1055               string.size() == 1,
1056               "String for ord() must be 1 character, found ",
1057               string.size());
1058           uint8_t ord = string.at(0);
1059           push(stack, int64_t(ord));
1060         },
1061         aliasAnalysisFromSchema()),
1062     OperatorGeneratorArgs(
1063         TORCH_SELECTIVE_SCHEMA("aten::lower(str self) -> str"),
__anonbfe5918f4202() 1064         [](Stack& stack) {
1065           auto string = pop(stack).toStringRef();
1066           std::stringstream ss;
1067           for (char c : string) {
1068             ss << static_cast<char>(::tolower(c));
1069           }
1070           push(stack, ss.str());
1071         },
1072         aliasAnalysisFromSchema()),
1073     OperatorGeneratorArgs(
1074         TORCH_SELECTIVE_SCHEMA(
1075             "aten::__contains__.int_list(int[] l, int item) -> bool"),
1076         listContains<int64_t>,
1077         aliasAnalysisFromSchema()),
1078     OperatorGeneratorArgs(
1079         TORCH_SELECTIVE_SCHEMA(
1080             "aten::__contains__.str_list(str[] l, str item) -> bool"),
1081         listContains<std::string>,
1082         aliasAnalysisFromSchema()),
1083     OperatorGeneratorArgs(
1084         TORCH_SELECTIVE_SCHEMA("aten::len.str(str s) -> int"),
__anonbfe5918f4302() 1085         [](Stack& stack) {
1086           auto string = pop(stack).toStringRef();
1087           push(stack, static_cast<int64_t>(string.size()));
1088         },
1089         aliasAnalysisFromSchema()),
1090     OperatorGeneratorArgs(
1091         TORCH_SELECTIVE_SCHEMA("aten::dict() -> Dict(str, Tensor)"),
__anonbfe5918f4402() 1092         [](Stack& stack) {
1093           auto dict =
1094               c10::impl::GenericDict(StringType::get(), TensorType::get());
1095           push(stack, dict);
1096         },
1097         aliasAnalysisFromSchema()),
1098     OperatorGeneratorArgs(
1099         TORCH_SELECTIVE_SCHEMA(
1100             "aten::__getitem__.str(str s, int index) -> str"),
__anonbfe5918f4502() 1101         [](Stack& stack) {
1102           auto index = pop(stack).toInt();
1103           auto string = pop(stack).toStringRef();
1104           auto norm_index = normalizeIndex(index, string.size());
1105           char c = string.at(norm_index);
1106           push(stack, std::string(&c, 1));
1107         },
1108         aliasAnalysisFromSchema()),
1109 #define CREATE_COPY_OP(other_type, c_type)                               \
1110   OperatorGeneratorArgs(                                                 \
1111       TORCH_SELECTIVE_SCHEMA("aten::copy_." #other_type                  \
1112                              "(Tensor(a!) self, " #other_type            \
1113                              " other) -> Tensor(a!)"),                   \
1114       [](Stack& stack) {                                                 \
1115         at::Tensor t;                                                    \
1116         c_type other;                                                    \
1117         pop(stack, t, other);                                            \
1118         std::move(t) = other; /* NOLINT(bugprone-use-after-move) */      \
1119         push(stack, std::move(t)); /* NOLINT(bugprone-use-after-move) */ \
1120       },                                                                 \
1121       aliasAnalysisFromSchema())
1122 
1123     CREATE_COPY_OP(Tensor, at::Tensor),
1124     CREATE_COPY_OP(int, int64_t),
1125     CREATE_COPY_OP(float, double),
1126 #undef CREATE_COPY_OP
1127     OperatorGeneratorArgs(
1128         TORCH_SELECTIVE_SCHEMA(
1129             "aten::backward(Tensor self, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()"),
__anonbfe5918f4602() 1130         [](Stack& stack) {
1131           bool create_graph = pop(stack).toBool();
1132           auto retain_graph = pop(stack).toOptional<bool>();
1133           IValue gradient_ivalue = pop(stack);
1134           at::Tensor gradient = gradient_ivalue.isNone()
1135               ? at::Tensor()
1136               : gradient_ivalue.toTensor();
1137           at::Tensor self = pop(stack).toTensor();
1138           bool keep_graph = retain_graph ? retain_graph.value() : create_graph;
1139           self.backward(gradient, keep_graph, create_graph);
1140         },
1141         aliasAnalysisConservative()),
1142     //
1143     // create a clone of these declarations with a _hacked_twin overload name
1144     // and nullability scrubbed from TensorList arg types
1145     // TOOD find out why this exists and how to do it without the hack
1146     //
1147     OperatorGeneratorArgs(
1148         TORCH_SELECTIVE_SCHEMA(
1149             "aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"),
__anonbfe5918f4702() 1150         [](Stack& stack) {
1151           auto indices = pop(stack).to<c10::List<at::Tensor>>();
1152           c10::List<std::optional<at::Tensor>> opt_list_indices;
1153           opt_list_indices.reserve(indices.size());
1154           for (const auto& ten : indices) {
1155             opt_list_indices.push_back(ten);
1156           }
1157           auto self = pop(stack).toTensor();
1158           auto result = at::index(self, opt_list_indices);
1159           push(stack, std::move(result));
1160         },
1161         aliasAnalysisFromSchema()),
1162     OperatorGeneratorArgs(
1163         TORCH_SELECTIVE_SCHEMA(
1164             "aten::_unsafe_index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor"),
__anonbfe5918f4802() 1165         [](Stack& stack) {
1166           auto indices = pop(stack).to<c10::List<at::Tensor>>();
1167           c10::List<std::optional<at::Tensor>> opt_list_indices;
1168           opt_list_indices.reserve(indices.size());
1169           for (const auto& ten : indices) {
1170             opt_list_indices.push_back(ten);
1171           }
1172           auto self = pop(stack).toTensor();
1173           auto result = at::_unsafe_index(self, opt_list_indices);
1174           push(stack, std::move(result));
1175         },
1176         aliasAnalysisFromSchema()),
1177     OperatorGeneratorArgs(
1178         TORCH_SELECTIVE_SCHEMA(
1179             "aten::_index_put_impl_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!)"),
__anonbfe5918f4902() 1180         [](Stack& stack) {
1181           auto unsafe = pop(stack).toBool();
1182           auto accumulate = pop(stack).toBool();
1183           auto values = pop(stack).toTensor();
1184           auto indices = pop(stack).to<c10::List<at::Tensor>>();
1185           c10::List<std::optional<at::Tensor>> opt_list_indices;
1186           opt_list_indices.reserve(indices.size());
1187           for (const auto& ten : indices) {
1188             opt_list_indices.push_back(ten);
1189           }
1190           auto self = pop(stack).toTensor();
1191           auto result = at::_index_put_impl_(
1192               self, opt_list_indices, values, accumulate, unsafe);
1193           push(stack, std::move(result));
1194         },
1195         aliasAnalysisFromSchema()),
1196     OperatorGeneratorArgs(
1197         TORCH_SELECTIVE_SCHEMA(
1198             "aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)"),
__anonbfe5918f4a02() 1199         [](Stack& stack) {
1200           auto accumulate = pop(stack).toBool();
1201           auto values = pop(stack).toTensor();
1202           auto indices = pop(stack).to<c10::List<at::Tensor>>();
1203           c10::List<std::optional<at::Tensor>> opt_list_indices;
1204           opt_list_indices.reserve(indices.size());
1205           for (const auto& ten : indices) {
1206             opt_list_indices.push_back(ten);
1207           }
1208           auto self = pop(stack).toTensor();
1209           auto result =
1210               at::index_put_(self, opt_list_indices, values, accumulate);
1211           push(stack, std::move(result));
1212         },
1213         aliasAnalysisFromSchema()),
1214     OperatorGeneratorArgs(
1215         TORCH_SELECTIVE_SCHEMA(
1216             "aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor"),
__anonbfe5918f4b02() 1217         [](Stack& stack) {
1218           auto accumulate = pop(stack).toBool();
1219           auto values = pop(stack).toTensor();
1220           auto indices = pop(stack).to<c10::List<at::Tensor>>();
1221           c10::List<std::optional<at::Tensor>> opt_list_indices;
1222           opt_list_indices.reserve(indices.size());
1223           for (const auto& ten : indices) {
1224             opt_list_indices.push_back(ten);
1225           }
1226           auto self = pop(stack).toTensor();
1227           auto result =
1228               at::index_put(self, opt_list_indices, values, accumulate);
1229           push(stack, std::move(result));
1230         },
1231         aliasAnalysisFromSchema()),
1232     OperatorGeneratorArgs(
1233         TORCH_SELECTIVE_SCHEMA(
1234             "aten::_unsafe_index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor"),
__anonbfe5918f4c02() 1235         [](Stack& stack) {
1236           auto accumulate = pop(stack).toBool();
1237           auto values = pop(stack).toTensor();
1238           auto indices = pop(stack).to<c10::List<at::Tensor>>();
1239           c10::List<std::optional<at::Tensor>> opt_list_indices;
1240           opt_list_indices.reserve(indices.size());
1241           for (const auto& ten : indices) {
1242             opt_list_indices.push_back(ten);
1243           }
1244           auto self = pop(stack).toTensor();
1245           auto result =
1246               at::_unsafe_index_put(self, opt_list_indices, values, accumulate);
1247           push(stack, std::move(result));
1248         },
1249         aliasAnalysisFromSchema()),
1250     // reference function parse_to_conversion in python_arg_parsing.h
1251     OperatorGeneratorArgs(
1252         TORCH_SELECTIVE_SCHEMA(
1253             "aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
__anonbfe5918f4d02() 1254         [](Stack& stack) {
1255           bool non_blocking = false;
1256           bool copy = false;
1257           pop(stack, non_blocking, copy);
1258           std::optional<at::ScalarType> scalarType =
1259               pop(stack).toOptional<at::ScalarType>();
1260           std::optional<c10::Device> device =
1261               pop(stack).toOptional<c10::Device>();
1262           at::Tensor self = pop(stack).toTensor();
1263           push(
1264               stack, to_dispatch(self, device, scalarType, non_blocking, copy));
1265         },
1266         aliasAnalysisFromSchema()),
1267     OperatorGeneratorArgs(
1268         TORCH_SELECTIVE_SCHEMA(
1269             "aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
1270         toPrimDType,
1271         aliasAnalysisFromSchema()),
1272     OperatorGeneratorArgs(
1273         TORCH_SELECTIVE_SCHEMA("prim::is_cuda(Tensor a) -> bool"),
1274         isCuda,
1275         aliasAnalysisFromSchema()),
1276     OperatorGeneratorArgs(
1277         TORCH_SELECTIVE_SCHEMA("prim::is_cpu(Tensor a) -> bool"),
__anonbfe5918f4e02() 1278         [](Stack& stack) {
1279           at::Tensor a;
1280           pop(stack, a);
1281           push(stack, a.is_cpu());
1282         },
1283         aliasAnalysisFromSchema()),
1284     OperatorGeneratorArgs(
1285         TORCH_SELECTIVE_SCHEMA("prim::is_xla(Tensor a) -> bool"),
__anonbfe5918f4f02() 1286         [](Stack& stack) {
1287           at::Tensor a;
1288           pop(stack, a);
1289           push(stack, a.is_xla());
1290         },
1291         aliasAnalysisFromSchema()),
1292     OperatorGeneratorArgs(
1293         TORCH_SELECTIVE_SCHEMA("prim::is_mtia(Tensor a) -> bool"),
__anonbfe5918f5002() 1294         [](Stack& stack) {
1295           at::Tensor a;
1296           pop(stack, a);
1297           push(stack, a.is_mtia());
1298         },
1299         aliasAnalysisFromSchema()),
1300     OperatorGeneratorArgs(
1301         TORCH_SELECTIVE_SCHEMA("prim::is_xpu(Tensor a) -> bool"),
__anonbfe5918f5102() 1302         [](Stack& stack) {
1303           at::Tensor a;
1304           pop(stack, a);
1305           push(stack, a.is_xpu());
1306         },
1307         aliasAnalysisFromSchema()),
1308     OperatorGeneratorArgs(
1309         TORCH_SELECTIVE_SCHEMA("prim::data(Tensor(a) a) -> Tensor(a)"),
__anonbfe5918f5202() 1310         [](Stack& stack) {
1311           at::Tensor a;
1312           pop(stack, a);
1313           push(stack, autograd::Variable(a).variable_data());
1314         },
1315         aliasAnalysisFromSchema()),
1316 // these ops are not defined for Tensor
1317 #define CREATE_COMPARATOR_LIST_OPS_SPECIALIZED(decl_type, value_type)        \
1318   OperatorGeneratorArgs(                                                     \
1319       TORCH_SELECTIVE_SCHEMA("prim::min." decl_type "_list(" decl_type       \
1320                              "[] l, " decl_type "[] r) -> " decl_type "[]"), \
1321       minList<value_type>,                                                   \
1322       aliasAnalysisFromSchema()),                                            \
1323       OperatorGeneratorArgs(                                                 \
1324           TORCH_SELECTIVE_SCHEMA("prim::max." decl_type "_list(" decl_type   \
1325                                  "[] l, " decl_type "[] r) -> " decl_type    \
1326                                  "[]"),                                      \
1327           maxList<value_type>,                                               \
1328           aliasAnalysisFromSchema()),                                        \
1329       OperatorGeneratorArgs(                                                 \
1330           TORCH_SELECTIVE_SCHEMA("prim::min.self_" decl_type "(" decl_type   \
1331                                  "[] self) -> " decl_type),                  \
1332           listMin<value_type>,                                               \
1333           aliasAnalysisFromSchema()),                                        \
1334       OperatorGeneratorArgs(                                                 \
1335           TORCH_SELECTIVE_SCHEMA("prim::max.self_" decl_type "(" decl_type   \
1336                                  "[] self) -> " decl_type),                  \
1337           listMax<value_type>,                                               \
1338           aliasAnalysisFromSchema()),
1339     CREATE_COMPARATOR_LIST_OPS_SPECIALIZED("int", int64_t)
1340         CREATE_COMPARATOR_LIST_OPS_SPECIALIZED("float", double)
1341             CREATE_COMPARATOR_LIST_OPS_SPECIALIZED("bool", bool)
1342 #undef CREATE_COMPARATOR_LIST_OPS_SPECIALIZED
1343 // python string is methods return false if empty
1344 #define DEFINE_STRING_IS_OP(op_name, char_op)                          \
1345   OperatorGeneratorArgs(                                               \
1346       TORCH_SELECTIVE_SCHEMA(#op_name "(str self) -> bool"),           \
1347       [](Stack& stack) {                                               \
1348         auto string = pop(stack).toStringRef();                        \
1349         push(                                                          \
1350             stack,                                                     \
1351             string.size() != 0 &&                                      \
1352                 std::all_of(string.begin(), string.end(), [](char c) { \
1353                   return char_op(c);                                   \
1354                 }));                                                   \
1355       },                                                               \
1356       aliasAnalysisFromSchema())
1357 
1358                 DEFINE_STRING_IS_OP(aten::isdigit, ::isdigit),
1359     DEFINE_STRING_IS_OP(aten::isspace, ::isspace),
1360     DEFINE_STRING_IS_OP(aten::isalnum, ::isalnum),
1361     DEFINE_STRING_IS_OP(aten::isalpha, ::isalpha),
1362     DEFINE_STRING_IS_OP(aten::isdecimal, ::isdigit),
1363     DEFINE_STRING_IS_OP(aten::isnumeric, ::isdigit),
1364 
1365 #define DEFINE_STRING_CHAR_MAP_OP(op_name, char_op)         \
1366   OperatorGeneratorArgs(                                    \
1367       TORCH_SELECTIVE_SCHEMA(#op_name "(str self) -> str"), \
1368       [](Stack& stack) {                                    \
1369         auto string = pop(stack).toStringRef();             \
1370         std::stringstream ss;                               \
1371         for (char c : string) {                             \
1372           ss << static_cast<char>(char_op(c));              \
1373         }                                                   \
1374         push(stack, ss.str());                              \
1375       },                                                    \
1376       aliasAnalysisFromSchema())
1377 
1378     DEFINE_STRING_CHAR_MAP_OP(aten::upper, ::toupper),
__anonbfe5918f5302() 1379     DEFINE_STRING_CHAR_MAP_OP(aten::swapcase, ([](char c) {
1380                                 if (c == static_cast<char>(::toupper(c))) {
1381                                   return static_cast<char>(::tolower(c));
1382                                 } else {
1383                                   return static_cast<char>(::toupper(c));
1384                                 }
1385                               }))};
1386 
createOperators(const std::vector<OperatorGeneratorArgs> & args)1387 static std::vector<std::optional<Operator>> createOperators(
1388     const std::vector<OperatorGeneratorArgs>& args) {
1389   std::vector<std::optional<Operator>> result;
1390   result.reserve(args.size());
1391   for (const auto& arg : args) {
1392     if (arg.schema_str) {
1393       if (arg.isOperationCreator) {
1394         result.push_back(OperatorGenerator(
1395             arg.schema_str, arg.operationCreator, arg.aliasAnalysis));
1396       } else {
1397         result.push_back(OperatorGenerator(
1398             arg.schema_str, arg.operation, arg.aliasAnalysis));
1399       }
1400     }
1401   }
1402   return result;
1403 }
1404 
__anonbfe5918f5402() 1405 RegisterOperators reg(([]() {
1406   auto v = createOperators(opGenArgs);
1407   v.emplace_back(Operator(
1408       prim::tolist,
1409       // This operator has to be unschematized because the return type
1410       // depends on the type hint and input. The implementation of this
1411       // operator below is intended to be as close to the Python
1412       // implementation in torch/csrc/utils/tensor_list.cpp as possible.
1413       [](const Node* /*node*/) -> Operation { return toList; },
1414       aliasAnalysisSpecialCase()));
1415   return v;
1416 })());
1417 
dictSetItem(Stack & stack)1418 void dictSetItem(Stack& stack) {
1419   auto value = pop(stack);
1420   auto idx = pop(stack);
1421   auto dict = pop(stack).toGenericDict();
1422   dict.insert_or_assign(std::move(idx), std::move(value));
1423 }
1424 
dictLen(Stack & stack)1425 void dictLen(Stack& stack) {
1426   auto dict = pop(stack).toGenericDict();
1427   push(stack, int64_t(dict.size()));
1428 }
1429 
dictValues(Stack & stack)1430 void dictValues(Stack& stack) {
1431   auto dict = pop(stack).toGenericDict();
1432   auto values = c10::impl::GenericList(dict.valueType());
1433   for (const auto& entry : dict) {
1434     values.emplace_back(entry.value());
1435   }
1436   push(stack, values);
1437 }
1438 
dictKeys(Stack & stack)1439 void dictKeys(Stack& stack) {
1440   auto dict = pop(stack).toGenericDict();
1441   auto keys = c10::impl::GenericList(dict.keyType());
1442   for (const auto& entry : dict) {
1443     keys.emplace_back(entry.key());
1444   }
1445   push(stack, keys);
1446 }
1447 
1448 template <bool has_default>
dictGet(Stack & stack)1449 void dictGet(Stack& stack) {
1450   IValue default_value;
1451   if (has_default) {
1452     default_value = pop(stack);
1453   }
1454   auto key = pop(stack);
1455   auto dict = pop(stack).toGenericDict();
1456   auto value = dict.find(key);
1457   if (value == dict.end()) {
1458     push(stack, std::move(default_value));
1459   } else {
1460     push(stack, value->value());
1461   }
1462 }
1463 
1464 // If the key is in the dict, return it. Else set it to the default value and
1465 // return that.
dictSetDefault(Stack & stack)1466 void dictSetDefault(Stack& stack) {
1467   auto default_value = pop(stack);
1468   auto key = pop(stack);
1469   auto dict = pop(stack).toGenericDict();
1470   auto value = dict.find(key);
1471   if (value == dict.end()) {
1472     dict.insert(key, default_value);
1473     push(stack, std::move(default_value));
1474   } else {
1475     push(stack, value->value());
1476   }
1477 }
1478 
1479 template <bool has_default>
dictPop(Stack & stack)1480 void dictPop(Stack& stack) {
1481   IValue default_value;
1482   if (has_default) {
1483     default_value = pop(stack);
1484   }
1485   auto key = pop(stack);
1486   auto dict = pop(stack).toGenericDict();
1487   auto iter = dict.find(key);
1488   if (iter == dict.end()) {
1489     if (has_default) {
1490       push(stack, default_value);
1491     } else {
1492       AT_ERROR("KeyError: ", key);
1493     }
1494   } else {
1495     // note: before erase
1496     push(stack, iter->value());
1497     auto erase_count = dict.erase(key);
1498     TORCH_CHECK(
1499         erase_count == 1, "Expected to erase 1 item, found ", erase_count);
1500   }
1501 }
1502 
dictDelete(Stack & stack)1503 void dictDelete(Stack& stack) {
1504   dictPop<false>(stack);
1505   // pop pushes an item on the stack but delete does not, so get rid of it
1506   pop(stack);
1507 }
1508 
dictPopItem(Stack & stack)1509 void dictPopItem(Stack& stack) {
1510   auto dict = pop(stack).toGenericDict();
1511   if (dict.empty()) {
1512     AT_ERROR("popitem(): dictionary is empty");
1513   }
1514   auto head_item = dict.begin();
1515 
1516   IValue tuple =
1517       c10::ivalue::Tuple::create({head_item->key(), head_item->value()});
1518   auto erase_count = dict.erase(head_item->key());
1519   TORCH_CHECK(
1520       erase_count == 1, "Expected to erase 1 item, found ", erase_count);
1521   push(stack, tuple);
1522 }
1523 
dictContains(Stack & stack)1524 void dictContains(Stack& stack) {
1525   auto key = pop(stack);
1526   auto dict = pop(stack).toGenericDict();
1527   push(stack, dict.contains(key));
1528 }
1529 
dictClear(Stack & stack)1530 void dictClear(Stack& stack) {
1531   auto dict = pop(stack).toGenericDict();
1532   dict.clear();
1533 }
1534 
dictUpdate(Stack & stack)1535 void dictUpdate(Stack& stack) {
1536   auto to_add = pop(stack).toGenericDict();
1537   auto dict = pop(stack).toGenericDict();
1538 
1539   for (const auto& item : to_add) {
1540     dict.insert_or_assign(item.key(), item.value());
1541   }
1542 }
1543 
dictItems(Stack & stack)1544 void dictItems(Stack& stack) {
1545   auto dict = pop(stack).toGenericDict();
1546   auto key_type = dict.keyType();
1547   auto value_type = dict.valueType();
1548   auto items =
1549       c10::impl::GenericList(TupleType::create({key_type, value_type}));
1550   items.reserve(dict.size());
1551   for (const auto& item : dict) {
1552     items.emplace_back(c10::ivalue::Tuple::create({item.key(), item.value()}));
1553   }
1554   push(stack, std::move(items));
1555 }
1556 
dictCopy(Stack & stack)1557 void dictCopy(Stack& stack) {
1558   push(stack, pop(stack).toGenericDict().copy());
1559 }
1560 
dictConstructFromList(Stack & stack)1561 void dictConstructFromList(Stack& stack) {
1562   auto input_list = pop(stack);
1563   auto list = input_list.toList();
1564   auto tup_type = list.elementType()->expect<TupleType>();
1565   auto dict = c10::impl::GenericDict(
1566       tup_type->elements().at(0), tup_type->elements().at(1));
1567   dict.reserve(list.size());
1568   for (IValue input : list) {
1569     const auto& tup = input.toTupleRef().elements();
1570     dict.insert_or_assign(tup[0], tup[1]);
1571   }
1572   push(stack, dict);
1573 }
1574 
1575 #define CREATE_DICT_OPS(key_type)                                              \
1576   OperatorGeneratorArgs(                                                       \
1577       TORCH_SELECTIVE_SCHEMA("aten::len.Dict_" key_type "(Dict(" key_type      \
1578                              ", t) self) -> int"),                             \
1579       dictLen,                                                                 \
1580       aliasAnalysisFromSchema()),                                              \
1581       OperatorGeneratorArgs(                                                   \
1582           TORCH_SELECTIVE_SCHEMA("aten::keys." key_type "(Dict(" key_type      \
1583                                  ", t) self) -> " key_type "[](*)"),           \
1584           dictKeys,                                                            \
1585           aliasAnalysisFromSchema()),                                          \
1586       OperatorGeneratorArgs(                                                   \
1587           TORCH_SELECTIVE_SCHEMA("aten::values." key_type "(Dict(" key_type    \
1588                                  ", t) self) -> t[](*)"),                      \
1589           dictValues,                                                          \
1590           aliasAnalysisFromSchema()),                                          \
1591       OperatorGeneratorArgs(                                                   \
1592           TORCH_SELECTIVE_SCHEMA("aten::__getitem__.Dict_" key_type            \
1593                                  "(Dict(" key_type ", t) self, " key_type      \
1594                                  " key) -> t(*)"),                             \
1595           dictIndex,                                                           \
1596           aliasAnalysisFromSchema()),                                          \
1597       OperatorGeneratorArgs(                                                   \
1598           TORCH_SELECTIVE_SCHEMA("aten::get." key_type "(Dict(" key_type       \
1599                                  ", t) self, " key_type " key) -> t(*)?"),     \
1600           dictGet<false>,                                                      \
1601           aliasAnalysisFromSchema()),                                          \
1602       OperatorGeneratorArgs(                                                   \
1603           TORCH_SELECTIVE_SCHEMA("aten::get.default_" key_type                 \
1604                                  "(Dict(" key_type ", t) self, " key_type      \
1605                                  " key, t default_value) -> t(*)"),            \
1606           dictGet<true>,                                                       \
1607           aliasAnalysisFromSchema()),                                          \
1608       OperatorGeneratorArgs(                                                   \
1609           TORCH_SELECTIVE_SCHEMA(                                              \
1610               "aten::setdefault." key_type "(Dict(" key_type                   \
1611               ", t)(a!) self, " key_type                                       \
1612               "(b -> *) key, t(c -> *) default_value) -> t(*)"),               \
1613           dictSetDefault,                                                      \
1614           aliasAnalysisFromSchema()),                                          \
1615       OperatorGeneratorArgs(                                                   \
1616           TORCH_SELECTIVE_SCHEMA("aten::Delete.Dict_" key_type                 \
1617                                  "(Dict(" key_type ", t)(a!) self, " key_type  \
1618                                  " key) -> ()"),                               \
1619           dictDelete,                                                          \
1620           aliasAnalysisFromSchema()),                                          \
1621       OperatorGeneratorArgs(                                                   \
1622           TORCH_SELECTIVE_SCHEMA("aten::pop.Dict_" key_type "(Dict(" key_type  \
1623                                  ", t)(a!) self, " key_type " key) -> t(*)"),  \
1624           dictPop<false>,                                                      \
1625           aliasAnalysisFromSchema()),                                          \
1626       OperatorGeneratorArgs(                                                   \
1627           TORCH_SELECTIVE_SCHEMA("aten::pop.Dict_default_" key_type            \
1628                                  "(Dict(" key_type ", t)(a!) self, " key_type  \
1629                                  " key, t default_value) -> t(*)"),            \
1630           dictPop<true>,                                                       \
1631           aliasAnalysisFromSchema()),                                          \
1632       OperatorGeneratorArgs(                                                   \
1633           TORCH_SELECTIVE_SCHEMA("aten::popitem." key_type "(Dict(" key_type   \
1634                                  ", t)(a!) self) -> ((" key_type ", t))"),     \
1635           dictPopItem,                                                         \
1636           aliasAnalysisFromSchema()),                                          \
1637       OperatorGeneratorArgs(                                                   \
1638           TORCH_SELECTIVE_SCHEMA("aten::clear." key_type "(Dict(" key_type     \
1639                                  ", t)(a!) self) -> ()"),                      \
1640           dictClear,                                                           \
1641           aliasAnalysisFromSchema()),                                          \
1642       OperatorGeneratorArgs(                                                   \
1643           TORCH_SELECTIVE_SCHEMA("aten::update." key_type "(Dict(" key_type    \
1644                                  ", t)(a!) self, Dict(" key_type               \
1645                                  ", t)(a!) to_add) -> ()"),                    \
1646           dictUpdate,                                                          \
1647           aliasAnalysisFromSchema()),                                          \
1648       OperatorGeneratorArgs(                                                   \
1649           TORCH_SELECTIVE_SCHEMA("aten::items." key_type "(Dict(" key_type     \
1650                                  ", t) self) -> ((" key_type ", t)[])"),       \
1651           dictItems,                                                           \
1652           aliasAnalysisFromSchema()),                                          \
1653       OperatorGeneratorArgs(                                                   \
1654           TORCH_SELECTIVE_SCHEMA("aten::copy.Dict_" key_type "(Dict(" key_type \
1655                                  ", t)(a) self) -> Dict(" key_type ", t)"),    \
1656           dictCopy,                                                            \
1657           aliasAnalysisFromSchema()),                                          \
1658       OperatorGeneratorArgs(                                                   \
1659           TORCH_SELECTIVE_SCHEMA("aten::__contains__." key_type                \
1660                                  "(Dict(" key_type ", t) dict, " key_type      \
1661                                  " key) -> bool"),                             \
1662           dictContains,                                                        \
1663           aliasAnalysisFromSchema()),                                          \
1664       OperatorGeneratorArgs(                                                   \
1665           TORCH_SELECTIVE_SCHEMA("aten::_set_item." key_type "(Dict(" key_type \
1666                                  ", t)(a!) l, " key_type                       \
1667                                  "(b -> *) idx, t(c -> *) v) -> ()"),          \
1668           dictSetItem,                                                         \
1669           aliasAnalysisFromSchema()),                                          \
1670       OperatorGeneratorArgs(                                                   \
1671           TORCH_SELECTIVE_SCHEMA("aten::dict." key_type "((" key_type          \
1672                                  ", tVal)[] inputs) -> Dict(" key_type         \
1673                                  ", tVal)"),                                   \
1674           dictConstructFromList,                                               \
1675           aliasAnalysisFromSchema()),                                          \
1676       OperatorGeneratorArgs(                                                   \
1677           TORCH_SELECTIVE_SCHEMA("aten::dict.Dict_" key_type "(Dict(" key_type \
1678                                  ", t)(a) self) -> Dict(" key_type ", t)"),    \
1679           dictCopy,                                                            \
1680           aliasAnalysisFromSchema())
1681 
1682 static const std::vector<OperatorGeneratorArgs> dict_ops{
1683     CREATE_DICT_OPS("str"),
1684     CREATE_DICT_OPS("int"),
1685     CREATE_DICT_OPS("bool"),
1686     CREATE_DICT_OPS("float"),
1687     CREATE_DICT_OPS("complex"),
1688     CREATE_DICT_OPS("Tensor"),
1689 };
1690 RegisterOperators reg_dict_ops(createOperators(dict_ops));
1691 
aliasAnalysisFromSchema()1692 constexpr c10::AliasAnalysisKind aliasAnalysisFromSchema() {
1693   return c10::AliasAnalysisKind::FROM_SCHEMA;
1694 }
1695 
1696 // Convert an python index (which may be negative) into an index usable for a
1697 // C++ container
normalizeIndex(int64_t idx,int64_t list_size)1698 int64_t normalizeIndex(int64_t idx, int64_t list_size) {
1699   if (idx < 0) {
1700     // Handle negative indexing
1701     idx = list_size + idx;
1702   }
1703   return idx;
1704 }
1705 
stringFindImpl(std::string string,const std::string & substr,int64_t start,int64_t end,bool reverse=false)1706 int64_t stringFindImpl(
1707     std::string string,
1708     const std::string& substr,
1709     int64_t start,
1710     int64_t end,
1711     bool reverse = false) {
1712   int64_t size = string.size();
1713   if (start < 0) {
1714     start = std::max(int64_t(0), int64_t(size + start));
1715   }
1716   if (end < 0) {
1717     end = std::max(int64_t(0), int64_t(size + end + 1));
1718   }
1719   if (end > start) {
1720     string = string.substr(start, end - start);
1721   } else {
1722     string = "";
1723   }
1724 
1725   int64_t result = -1;
1726   if (string.size() >= substr.size()) {
1727     auto pos = string.find(substr, 0);
1728     if (reverse) {
1729       auto rpos = pos;
1730       do {
1731         pos = rpos;
1732         rpos = string.find(substr, pos + 1);
1733       } while (rpos != std::string::npos);
1734     }
1735     if (pos != std::string::npos) {
1736       result = pos + start;
1737     }
1738   }
1739   return result;
1740 }
1741 
1742 // String Ops
1743 // Implementations located in torch/csrc/jit/runtime/register_prim_ops.cpp
1744 static const std::vector<OperatorGeneratorArgs> stringOpGenArgs{
1745     OperatorGeneratorArgs(
1746         TORCH_SELECTIVE_SCHEMA(
1747             "aten::slice.str(str string, int? start=None, int? end=None, int step=1) -> str"),
__anonbfe5918f5602() 1748         [](Stack& stack) {
1749           int64_t step = pop(stack).toInt();
1750           std::optional<int64_t> end = pop(stack).toOptional<int64_t>();
1751           std::optional<int64_t> start = pop(stack).toOptional<int64_t>();
1752           std::string string = pop(stack).toStringRef();
1753           push(stack, stringSlice(string, start, end, step));
1754         },
1755         aliasAnalysisFromSchema()),
1756     OperatorGeneratorArgs(
1757         TORCH_SELECTIVE_SCHEMA(
1758             "aten::strip(str self, str chars=' \\n\\t\\f\\v') -> str"),
__anonbfe5918f5702() 1759         [](Stack& stack) {
1760           std::string chars = pop(stack).toStringRef();
1761           std::string string = pop(stack).toStringRef();
1762           auto rindex = string.find_last_not_of(chars);
1763           if (rindex != std::string::npos) {
1764             string = string.substr(0, rindex + 1);
1765           } else {
1766             string = "";
1767           }
1768           auto lindex = string.find_first_not_of(chars);
1769           if (lindex != std::string::npos) {
1770             string = string.substr(lindex, string.size());
1771           } else {
1772             string = "";
1773           }
1774           push(stack, string);
1775         },
1776         aliasAnalysisFromSchema()),
1777     OperatorGeneratorArgs(
1778         TORCH_SELECTIVE_SCHEMA(
1779             "aten::split.str(str self, str? separator=None, int max=-1) -> str[]"),
__anonbfe5918f5802() 1780         [](Stack& stack) {
1781           int64_t max = pop(stack).toInt();
1782           IValue ivalue = pop(stack);
1783           std::string string = pop(stack).toStringRef();
1784 
1785           std::string::size_type prev_pos = 0;
1786           std::string::size_type pos = 0;
1787           c10::List<std::string> splits;
1788           if (ivalue == std::nullopt) {
1789             // if separator is not specified,
1790             // a different splitting algorithm is applied as Python
1791             splits = splitNoneSeparator(string);
1792             push(stack, std::move(splits));
1793             return;
1794           }
1795 
1796           const std::string& separator = ivalue.toStringRef();
1797 
1798           if (separator.empty()) {
1799             throw std::runtime_error("ValueError: empty separator");
1800           }
1801 
1802           auto count = 0;
1803 
1804           while ((pos = string.find(separator, pos)) != std::string::npos) {
1805             count++;
1806             if (max >= 0 && count > max) {
1807               break;
1808             } else {
1809               splits.emplace_back(string.substr(prev_pos, pos - prev_pos));
1810             }
1811             pos += separator.size();
1812             prev_pos = pos;
1813           }
1814           splits.emplace_back(
1815               string.substr(prev_pos, string.size() - prev_pos));
1816           push(stack, std::move(splits));
1817         },
1818         aliasAnalysisFromSchema()),
1819     OperatorGeneratorArgs(
1820         TORCH_SELECTIVE_SCHEMA(
1821             "aten::splitlines(str self, bool keepends=False) -> str[]"),
__anonbfe5918f5902() 1822         [](Stack& stack) {
1823           bool keepends = pop(stack).toBool();
1824           std::string string = pop(stack).toStringRef();
1825           std::string delimiters =
1826               "\n\r\r\n\v\x0b\f\x0c\x1c\x1d\x1e\x85\u2028\u2029";
1827           c10::List<std::string> splits;
1828 
1829           std::string::size_type prev_pos = 0;
1830           std::string::size_type pos = 0;
1831           while ((pos = string.find_first_of(delimiters, pos)) !=
1832                  std::string::npos) {
1833             splits.emplace_back(string.substr(prev_pos, pos - prev_pos));
1834             if (keepends) {
1835               splits.emplace_back(string.substr(pos, 1));
1836             }
1837             pos++;
1838             prev_pos = pos;
1839           }
1840           if (prev_pos != string.size()) {
1841             splits.emplace_back(
1842                 string.substr(prev_pos, string.size() - prev_pos));
1843           }
1844 
1845           push(stack, std::move(splits));
1846         },
1847         aliasAnalysisFromSchema()),
1848     // upper and lower require there to be at least one alpha character,
1849     // and ignore all other characters
1850     OperatorGeneratorArgs(
1851         TORCH_SELECTIVE_SCHEMA("aten::isupper(str self) -> bool"),
__anonbfe5918f5a02() 1852         [](Stack& stack) {
1853           std::string string = pop(stack).toStringRef();
1854           bool found_alpha = false;
1855           bool is_upper = true;
1856           for (size_t i = 0; i < string.size() && is_upper; ++i) {
1857             char c = string[i];
1858             found_alpha |= static_cast<bool>(::isalpha(c));
1859             is_upper &= (!::isalpha(c) || ::isupper(c));
1860           }
1861           push(stack, found_alpha && is_upper);
1862         },
1863         aliasAnalysisFromSchema()),
1864     OperatorGeneratorArgs(
1865         TORCH_SELECTIVE_SCHEMA("aten::islower(str self) -> bool"),
__anonbfe5918f5b02() 1866         [](Stack& stack) {
1867           std::string string = pop(stack).toStringRef();
1868           bool found_alpha = false;
1869           bool is_lower = true;
1870           for (size_t i = 0; i < string.size() && is_lower; ++i) {
1871             char c = string[i];
1872             found_alpha |= static_cast<bool>(::isalpha(c));
1873             is_lower &= (!::isalpha(c) || ::islower(c));
1874           }
1875           push(stack, found_alpha && is_lower);
1876         },
1877         aliasAnalysisFromSchema()),
1878     OperatorGeneratorArgs(
1879         TORCH_SELECTIVE_SCHEMA("aten::capitalize(str self) -> str"),
__anonbfe5918f5c02() 1880         [](Stack& stack) {
1881           std::string string = pop(stack).toStringRef();
1882           std::stringstream ss;
1883           auto first_char = true;
1884           for (char c : string) {
1885             if (first_char) {
1886               ss << static_cast<char>(::toupper(c));
1887               first_char = false;
1888             } else {
1889               ss << static_cast<char>(::tolower(c));
1890             }
1891           }
1892           push(stack, ss.str());
1893         },
1894         aliasAnalysisFromSchema()),
1895     OperatorGeneratorArgs(
1896         TORCH_SELECTIVE_SCHEMA("aten::title(str self) -> str"),
__anonbfe5918f5d02() 1897         [](Stack& stack) {
1898           std::string string = pop(stack).toStringRef();
1899           std::stringstream ss;
1900           bool prev_is_nonalpha = true;
1901           for (char c : string) {
1902             if (prev_is_nonalpha) {
1903               ss << static_cast<char>(::toupper(c));
1904             } else {
1905               ss << static_cast<char>(::tolower(c));
1906             }
1907             if (::isalpha(c)) {
1908               prev_is_nonalpha = false;
1909             } else {
1910               prev_is_nonalpha = true;
1911             }
1912           }
1913           push(stack, ss.str());
1914         },
1915         aliasAnalysisFromSchema()),
1916     OperatorGeneratorArgs(
1917         TORCH_SELECTIVE_SCHEMA(
1918             "aten::center(str self, int width, str fillchar=' ') -> str"),
__anonbfe5918f5e02() 1919         [](Stack& stack) {
1920           std::string fillchar = pop(stack).toStringRef();
1921           int64_t width = pop(stack).toInt();
1922           std::string string = pop(stack).toStringRef();
1923           if (fillchar.size() != 1) {
1924             // TODO: this should be a TypeError
1925             throw std::runtime_error(
1926                 "TypeError: The fill character must be exactly one character long");
1927           }
1928           if (string.size() > static_cast<std::string::size_type>(width)) {
1929             push(stack, string);
1930             return;
1931           }
1932           std::stringstream ss;
1933           std::string::size_type full_padding = width - string.size();
1934           std::string::size_type l_pad = full_padding / 2;
1935           std::string::size_type r_pad = (full_padding + 1) / 2;
1936           if (width % 2) {
1937             auto tmp = r_pad;
1938             r_pad = l_pad;
1939             l_pad = tmp;
1940           }
1941           for (std::string::size_type i = 0; i < l_pad; ++i) {
1942             ss << fillchar;
1943           }
1944           ss << string;
1945           for (std::string::size_type i = 0; i < r_pad; ++i) {
1946             ss << fillchar;
1947           }
1948           push(stack, ss.str());
1949         },
1950         aliasAnalysisFromSchema()),
1951 
1952     // Adapted from
1953     // https://stackoverflow.com/questions/22489073/counting-the-number-of-occurrences-of-a-string-within-a-string
1954     OperatorGeneratorArgs(
1955         TORCH_SELECTIVE_SCHEMA(
1956             "aten::count(str self, str substr, int start=0, int end=-1) -> int"),
__anonbfe5918f5f02() 1957         [](Stack& stack) {
1958           int64_t end = pop(stack).toInt();
1959           int64_t start = pop(stack).toInt();
1960           std::string substr = pop(stack).toStringRef();
1961           std::string string = pop(stack).toStringRef();
1962           int64_t size = string.size();
1963           if (start > size) {
1964             push(stack, 0);
1965             return;
1966           }
1967           if (start < 0) {
1968             start = std::max(int64_t(0), int64_t(size + start));
1969           }
1970           if (end < 0) {
1971             end = std::max(int64_t(0), int64_t(size + end + 1));
1972           }
1973 
1974           int64_t occurrences = 0;
1975           std::string::size_type pos = start;
1976           while ((pos = string.find(substr, pos)) != std::string::npos) {
1977             if (pos < static_cast<std::string::size_type>(end)) {
1978               ++occurrences;
1979             } else {
1980               break;
1981             }
1982             pos += substr.length();
1983           }
1984           push(stack, occurrences);
1985         },
1986         aliasAnalysisFromSchema()),
1987     OperatorGeneratorArgs(
1988         TORCH_SELECTIVE_SCHEMA(
1989             "aten::endswith(str self, str substr, int start=0, int end=-1) -> bool"),
__anonbfe5918f6002() 1990         [](Stack& stack) {
1991           int64_t end = pop(stack).toInt();
1992           int64_t start = pop(stack).toInt();
1993           std::string substr = pop(stack).toStringRef();
1994           std::string string = pop(stack).toStringRef();
1995           int64_t size = string.size();
1996           if (start < 0) {
1997             start = std::max(int64_t(0), int64_t(size + start));
1998           }
1999           if (end < 0) {
2000             end = std::max(int64_t(0), int64_t(size + end + 1));
2001           }
2002 
2003           string = string.substr(start, end - start);
2004 
2005           auto result = false;
2006           if (string.length() >= substr.length()) {
2007             result = !string.compare(
2008                 string.length() - substr.length(), substr.length(), substr);
2009           }
2010           push(stack, result);
2011         },
2012         aliasAnalysisFromSchema()),
2013     OperatorGeneratorArgs(
2014         TORCH_SELECTIVE_SCHEMA(
2015             "aten::startswith(str self, str substr, int start=0, int end=-1) -> bool"),
__anonbfe5918f6102() 2016         [](Stack& stack) {
2017           int64_t end = pop(stack).toInt();
2018           int64_t start = pop(stack).toInt();
2019           std::string substr = pop(stack).toStringRef();
2020           std::string string = pop(stack).toStringRef();
2021           int64_t size = string.size();
2022           if (start < 0) {
2023             start = std::max(int64_t(0), int64_t(size + start));
2024           }
2025           if (end < 0) {
2026             end = std::max(int64_t(0), int64_t(size + end + 1));
2027           }
2028 
2029           string = string.substr(start, end - start);
2030 
2031           auto result = false;
2032           if (string.length() >= substr.length()) {
2033             result = !string.compare(0, substr.length(), substr);
2034           }
2035           push(stack, result);
2036         },
2037         aliasAnalysisFromSchema()),
2038     OperatorGeneratorArgs(
2039         TORCH_SELECTIVE_SCHEMA(
2040             "aten::expandtabs(str self, int tabsize=8) -> str"),
__anonbfe5918f6202() 2041         [](Stack& stack) {
2042           int64_t tabsize = pop(stack).toInt();
2043           std::string string = pop(stack).toStringRef();
2044           std::stringstream ss;
2045           size_t index = 0;
2046           for (const auto& c : string) {
2047             if (c != '\t') {
2048               ss << c;
2049               index++;
2050             } else {
2051               if (tabsize <= 0) {
2052                 continue;
2053               }
2054               do {
2055                 ss << ' ';
2056                 index++;
2057               } while (index % tabsize);
2058             }
2059           }
2060           push(stack, ss.str());
2061         },
2062         aliasAnalysisFromSchema()),
2063     OperatorGeneratorArgs(
2064         TORCH_SELECTIVE_SCHEMA(
2065             "aten::find(str self, str substr, int start=0, int end=-1) -> int"),
__anonbfe5918f6302() 2066         [](Stack& stack) {
2067           int64_t end = pop(stack).toInt();
2068           int64_t start = pop(stack).toInt();
2069           std::string substr = pop(stack).toStringRef();
2070           std::string string = pop(stack).toStringRef();
2071 
2072           push(stack, stringFindImpl(string, substr, start, end));
2073         },
2074         aliasAnalysisFromSchema()),
2075     OperatorGeneratorArgs(
2076         TORCH_SELECTIVE_SCHEMA(
2077             "aten::rfind(str self, str substr, int start=0, int end=-1) -> int"),
__anonbfe5918f6402() 2078         [](Stack& stack) {
2079           int64_t end = pop(stack).toInt();
2080           int64_t start = pop(stack).toInt();
2081           std::string substr = pop(stack).toStringRef();
2082           std::string string = pop(stack).toStringRef();
2083 
2084           push(stack, stringFindImpl(string, substr, start, end, true));
2085         },
2086         aliasAnalysisFromSchema()),
2087     OperatorGeneratorArgs(
2088         TORCH_SELECTIVE_SCHEMA(
2089             "aten::index.str(str self, str substr, int start=0, int end=-1) -> int"),
__anonbfe5918f6502() 2090         [](Stack& stack) {
2091           int64_t end = pop(stack).toInt();
2092           int64_t start = pop(stack).toInt();
2093           std::string substr = pop(stack).toStringRef();
2094           std::string string = pop(stack).toStringRef();
2095           auto result = stringFindImpl(string, substr, start, end);
2096           if (result < 0) {
2097             throw std::runtime_error("ValueError: substring not found");
2098           }
2099           push(stack, result);
2100         },
2101         aliasAnalysisFromSchema()),
2102     OperatorGeneratorArgs(
2103         TORCH_SELECTIVE_SCHEMA(
2104             "aten::rindex(str self, str substr, int start=0, int end=-1) -> int"),
__anonbfe5918f6602() 2105         [](Stack& stack) {
2106           int64_t end = pop(stack).toInt();
2107           int64_t start = pop(stack).toInt();
2108           std::string substr = pop(stack).toStringRef();
2109           std::string string = pop(stack).toStringRef();
2110           auto result = stringFindImpl(string, substr, start, end, true);
2111           if (result < 0) {
2112             throw std::runtime_error("ValueError: substring not found");
2113           }
2114           push(stack, result);
2115         },
2116         aliasAnalysisFromSchema()),
2117     OperatorGeneratorArgs(
2118         TORCH_SELECTIVE_SCHEMA("aten::isidentifier(str self) -> bool"),
__anonbfe5918f6702() 2119         [](Stack& stack) {
2120           std::string string = pop(stack).toStringRef();
2121           LOG(WARNING)
2122               << "The isidentifier() implementation being used is from Python 2\n";
2123           if (string.empty()) {
2124             push(stack, false);
2125             return;
2126           }
2127           if (::isdigit(string[0])) {
2128             push(stack, false);
2129             return;
2130           }
2131           auto result = std::all_of(string.begin(), string.end(), [](char c) {
2132             return ::isalnum(c);
2133           });
2134           push(stack, result);
2135         },
2136         aliasAnalysisFromSchema()),
2137     OperatorGeneratorArgs(
2138         TORCH_SELECTIVE_SCHEMA("aten::istitle(str self) -> bool"),
__anonbfe5918f6902() 2139         [](Stack& stack) {
2140           std::string string = pop(stack).toStringRef();
2141           auto result = false;
2142 
2143           bool prev_is_alpha = false;
2144           for (char c : string) {
2145             if (prev_is_alpha) {
2146               if (c != static_cast<char>(::tolower(c))) {
2147                 result = false;
2148                 break;
2149               }
2150             } else {
2151               if (c != static_cast<char>(::toupper(c))) {
2152                 result = false;
2153                 break;
2154               }
2155               // Only true if there exists at least one alpha
2156               if (::isalpha(c)) {
2157                 result = true;
2158               }
2159             }
2160             if (::isalpha(c)) {
2161               prev_is_alpha = true;
2162             } else {
2163               prev_is_alpha = false;
2164             }
2165           }
2166           push(stack, result);
2167         },
2168         aliasAnalysisFromSchema()),
2169     // Can't reuse DEFINE_STRING_IS_OP because "" is printable
2170     OperatorGeneratorArgs(
2171         TORCH_SELECTIVE_SCHEMA("aten::isprintable(str self) -> bool"),
__anonbfe5918f6a02() 2172         [](Stack& stack) {
2173           std::string string = pop(stack).toStringRef();
2174           auto result = std::all_of(string.begin(), string.end(), [](char c) {
2175             return ::isalnum(c) || ::ispunct(c) || c == ' ';
2176           });
2177           push(stack, result);
2178         },
2179         aliasAnalysisFromSchema()),
2180     OperatorGeneratorArgs(
2181         TORCH_SELECTIVE_SCHEMA(
2182             "aten::ljust(str self, int width, str fillchar=' ') -> str"),
__anonbfe5918f6c02() 2183         [](Stack& stack) {
2184           std::string fillchar = pop(stack).toStringRef();
2185           int64_t width = pop(stack).toInt();
2186           std::string string = pop(stack).toStringRef();
2187           if (fillchar.size() != 1) {
2188             // TODO: this should be a TypeError
2189             throw std::runtime_error(
2190                 "TypeError: The fill character must be exactly one character long");
2191           }
2192           auto to_append =
2193               std::max(int64_t(0), width - static_cast<int64_t>(string.size()));
2194 
2195           std::stringstream ss;
2196           ss << string;
2197           for (const auto i : c10::irange(to_append)) {
2198             (void)i; // Suppress unused variable warning
2199             ss << fillchar;
2200           }
2201           push(stack, ss.str());
2202         },
2203         aliasAnalysisFromSchema()),
2204     OperatorGeneratorArgs(
2205         TORCH_SELECTIVE_SCHEMA(
2206             "aten::rjust(str self, int width, str fillchar=' ') -> str"),
__anonbfe5918f6d02() 2207         [](Stack& stack) {
2208           std::string fillchar = pop(stack).toStringRef();
2209           int64_t width = pop(stack).toInt();
2210           std::string string = pop(stack).toStringRef();
2211           if (fillchar.size() != 1) {
2212             // TODO: this should be a TypeError
2213             throw std::runtime_error(
2214                 "TypeError: The fill character must be exactly one character long");
2215           }
2216           auto to_append =
2217               std::max(int64_t(0), width - static_cast<int64_t>(string.size()));
2218 
2219           std::stringstream ss;
2220           for (const auto i : c10::irange(to_append)) {
2221             (void)i; // Suppress unused variable warning
2222             ss << fillchar;
2223           }
2224           ss << string;
2225           push(stack, ss.str());
2226         },
2227         aliasAnalysisFromSchema()),
2228     OperatorGeneratorArgs(
2229         TORCH_SELECTIVE_SCHEMA("aten::zfill(str self, int width) -> str"),
__anonbfe5918f6e02() 2230         [](Stack& stack) {
2231           int64_t width = pop(stack).toInt();
2232           std::string string = pop(stack).toStringRef();
2233           auto to_append =
2234               std::max(int64_t(0), width - static_cast<int64_t>(string.size()));
2235 
2236           std::stringstream ss;
2237           for (const auto i : c10::irange(to_append)) {
2238             (void)i; // Suppress unused variable warning
2239             ss << '0';
2240           }
2241           ss << string;
2242           push(stack, ss.str());
2243         },
2244         aliasAnalysisFromSchema()),
2245     OperatorGeneratorArgs(
2246         TORCH_SELECTIVE_SCHEMA(
2247             "aten::lstrip(str self, str chars=' \\n\\t\\f\\v') -> str"),
__anonbfe5918f6f02() 2248         [](Stack& stack) {
2249           std::string chars = pop(stack).toStringRef();
2250           std::string string = pop(stack).toStringRef();
2251           auto index = string.find_first_not_of(chars);
2252           if (index != std::string::npos) {
2253             string = string.substr(index, string.size());
2254           } else {
2255             string = "";
2256           }
2257           push(stack, string);
2258         },
2259         aliasAnalysisFromSchema()),
2260     OperatorGeneratorArgs(
2261         TORCH_SELECTIVE_SCHEMA(
2262             "aten::rstrip(str self, str chars=' \\n\\t\\f\\v') -> str"),
__anonbfe5918f7002() 2263         [](Stack& stack) {
2264           std::string chars = pop(stack).toStringRef();
2265           std::string string = pop(stack).toStringRef();
2266           auto index = string.find_last_not_of(chars);
2267           if (index != std::string::npos) {
2268             string = string.substr(0, index + 1);
2269           } else {
2270             string = "";
2271           }
2272           push(stack, string);
2273         },
2274         aliasAnalysisFromSchema()),
2275     OperatorGeneratorArgs(
2276         TORCH_SELECTIVE_SCHEMA(
2277             "aten::replace(str self, str old, str new, int max=-1) -> str"),
__anonbfe5918f7102() 2278         [](Stack& stack) {
2279           int64_t max = pop(stack).toInt();
2280           std::string new_str = pop(stack).toStringRef();
2281           std::string old_str = pop(stack).toStringRef();
2282           std::string string = pop(stack).toStringRef();
2283           int64_t occurrences = 0;
2284           std::string::size_type pos = 0;
2285           while ((pos = string.find(old_str, pos)) != std::string::npos) {
2286             if (max >= 0 && ++occurrences > max) {
2287               break;
2288             }
2289             string = string.replace(pos, old_str.length(), new_str);
2290             pos += new_str.length();
2291           }
2292 
2293           push(stack, string);
2294         },
2295         aliasAnalysisFromSchema()),
2296     OperatorGeneratorArgs(
2297         TORCH_SELECTIVE_SCHEMA(
2298             "aten::partition(str self, str separator) -> (str, str, str)"),
__anonbfe5918f7202() 2299         [](Stack& stack) {
2300           std::string separator = pop(stack).toStringRef();
2301           std::string string = pop(stack).toStringRef();
2302           auto pos = string.find(separator, 0);
2303           if (pos == std::string::npos) {
2304             pos = string.size();
2305             separator = "";
2306           }
2307           auto pre_partition = string.substr(0, pos);
2308           auto post_partition =
2309               string.substr(pos + separator.size(), string.size());
2310           push(stack, pre_partition, separator, post_partition);
2311         },
2312         aliasAnalysisFromSchema()),
2313     OperatorGeneratorArgs(
2314         TORCH_SELECTIVE_SCHEMA(
2315             "aten::rpartition(str self, str separator) -> (str, str, str)"),
__anonbfe5918f7302() 2316         [](Stack& stack) {
2317           std::string separator = pop(stack).toStringRef();
2318           std::string string = pop(stack).toStringRef();
2319           auto pos = string.find(separator, 0);
2320           auto rpos = pos;
2321           do {
2322             pos = rpos;
2323             rpos = string.find(separator, pos + 1);
2324           } while (rpos != std::string::npos);
2325 
2326           if (pos == std::string::npos) {
2327             pos = 0;
2328             separator = "";
2329           }
2330 
2331           auto pre_partition = string.substr(0, pos);
2332           auto post_partition =
2333               string.substr(pos + separator.size(), string.size());
2334           push(stack, pre_partition, separator, post_partition);
2335         },
2336         aliasAnalysisFromSchema()),
2337     OperatorGeneratorArgs(
2338         TORCH_SELECTIVE_SCHEMA(
2339             "aten::rsplit(str self, str separator=' ', int max=-1) -> str[]"),
__anonbfe5918f7402() 2340         [](Stack& stack) {
2341           int64_t max = pop(stack).toInt();
2342           std::string separator = pop(stack).toStringRef();
2343           std::string string = pop(stack).toStringRef();
2344           std::reverse(separator.begin(), separator.end());
2345           std::reverse(string.begin(), string.end());
2346 
2347           std::string::size_type prev_pos = 0;
2348           std::string::size_type pos = 0;
2349           c10::List<std::string> splits;
2350           auto count = 0;
2351           while ((pos = string.find(separator, pos)) != std::string::npos) {
2352             count++;
2353             if (max >= 0 && count > max) {
2354               break;
2355             } else {
2356               auto substr = string.substr(prev_pos, pos - prev_pos);
2357               std::reverse(substr.begin(), substr.end());
2358               splits.emplace(splits.begin(), substr);
2359             }
2360             pos += separator.size();
2361             prev_pos = pos;
2362           }
2363           auto substr = string.substr(prev_pos, string.size() - prev_pos);
2364           std::reverse(substr.begin(), substr.end());
2365           splits.emplace(splits.begin(), substr);
2366           push(stack, std::move(splits));
2367         },
2368         aliasAnalysisFromSchema()),
2369     OperatorGeneratorArgs(
2370         TORCH_SELECTIVE_SCHEMA("aten::join(str self, str[] values) -> str"),
__anonbfe5918f7502() 2371         [](Stack& stack) {
2372           IValue ivalue = pop(stack);
2373           c10::ArrayRef<IValue> ivalues = ivalue.toListRef();
2374           c10::List<std::string> values;
2375           for (const auto& v : ivalues) {
2376             values.emplace_back(v.toStringRef());
2377           }
2378           std::optional<std::string> opt_string =
2379               pop(stack).toOptional<std::string>();
2380           const std::string& string = opt_string.value_or("");
2381           std::stringstream ss;
2382           for (auto it = values.begin(); it != values.end(); ++it) {
2383             ss << static_cast<std::string>(*it);
2384             if (it != values.end() - 1) {
2385               ss << string;
2386             }
2387           }
2388           push(stack, ss.str());
2389         },
2390         aliasAnalysisFromSchema()),
2391 };
2392 
2393 RegisterOperators regStrOps(createOperators(stringOpGenArgs));
2394 
2395 static const std::vector<OperatorGeneratorArgs> opGenArgs1{
2396     OperatorGeneratorArgs(
2397         TORCH_SELECTIVE_SCHEMA("prim::rangelist(int n) -> int[]"),
__anonbfe5918f7602() 2398         [](Stack& stack) {
2399           int64_t n = 0;
2400           pop(stack, n);
2401           c10::List<int64_t> elems;
2402           elems.reserve(n);
2403           for (const auto i : c10::irange(n)) {
2404             elems.push_back(i);
2405           }
2406           push(stack, std::move(elems));
2407         },
2408         aliasAnalysisFromSchema()),
2409     // note: this op needs to share a name with the Scalar -> Tensor conversion
2410     // because all _to_tensor conversion have to have the same operator namet
2411     OperatorGeneratorArgs(
2412         TORCH_SELECTIVE_SCHEMA("prim::NumToTensor.bool(bool a) -> Tensor"),
2413         numToTensorBool,
2414         aliasAnalysisFromSchema()),
2415     OperatorGeneratorArgs(
2416         TORCH_SELECTIVE_SCHEMA("aten::device(str a) -> Device"),
__anonbfe5918f7702() 2417         [](Stack& stack) {
2418           push(stack, c10::Device(pop(stack).toStringRef()));
2419         },
2420         aliasAnalysisFromSchema()),
2421     OperatorGeneratorArgs(
2422         TORCH_SELECTIVE_SCHEMA(
2423             "aten::device.with_index(str type, int index) -> Device"),
2424         device_with_index,
2425         aliasAnalysisFromSchema()),
2426     OperatorGeneratorArgs(
2427         TORCH_SELECTIVE_SCHEMA("aten::percentFormat(str self, ...) -> str"),
__anonbfe5918f7802() 2428         [](Stack& stack) {
2429           size_t num_inputs = pop(stack).toInt();
2430           percentFormat(stack, num_inputs);
2431         },
2432         aliasAnalysisFromSchema()),
2433     OperatorGeneratorArgs(
2434         TORCH_SELECTIVE_SCHEMA(
2435             "aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)"),
__anonbfe5918f7902() 2436         [](Stack& stack) {
2437           at::Tensor self;
2438           bool non_blocking = false;
2439           bool copy = false;
2440           pop(stack, self, non_blocking, copy);
2441           std::optional<c10::Device> device = std::nullopt;
2442           std::optional<at::ScalarType> scalarType = std::nullopt;
2443           push(
2444               stack, to_dispatch(self, device, scalarType, non_blocking, copy));
2445         },
2446         aliasAnalysisFromSchema()),
2447     OperatorGeneratorArgs(
2448         TORCH_SELECTIVE_SCHEMA("prim::requires_grad(Tensor a) -> bool"),
__anonbfe5918f7a02() 2449         [](Stack& stack) {
2450           at::Tensor a;
2451           pop(stack, a);
2452           push(stack, a.requires_grad());
2453         },
2454         aliasAnalysisFromSchema()),
2455     OperatorGeneratorArgs(
2456         TORCH_SELECTIVE_SCHEMA("prim::grad(Tensor a) -> Tensor(*)"),
__anonbfe5918f7b02() 2457         [](Stack& stack) {
2458           at::Tensor a;
2459           pop(stack, a);
2460           push(stack, a.grad());
2461         },
2462         aliasAnalysisFromSchema()),
2463     OperatorGeneratorArgs(
2464         TORCH_SELECTIVE_SCHEMA("prim::is_sparse(Tensor a) -> bool"),
__anonbfe5918f7c02() 2465         [](Stack& stack) {
2466           at::Tensor a;
2467           pop(stack, a);
2468           push(stack, a.is_sparse());
2469         },
2470         aliasAnalysisFromSchema()),
2471     OperatorGeneratorArgs(
2472         TORCH_SELECTIVE_SCHEMA("prim::is_sparse_csr(Tensor a) -> bool"),
__anonbfe5918f7d02() 2473         [](Stack& stack) {
2474           at::Tensor a;
2475           pop(stack, a);
2476           push(stack, a.is_sparse_csr());
2477         },
2478         aliasAnalysisFromSchema()),
2479     OperatorGeneratorArgs(
2480         TORCH_SELECTIVE_SCHEMA("prim::is_mkldnn(Tensor a) -> bool"),
__anonbfe5918f7e02() 2481         [](Stack& stack) {
2482           at::Tensor a;
2483           pop(stack, a);
2484           push(stack, a.is_mkldnn());
2485         },
2486         aliasAnalysisFromSchema()),
2487     OperatorGeneratorArgs(
2488         TORCH_SELECTIVE_SCHEMA("prim::is_mps(Tensor a) -> bool"),
__anonbfe5918f7f02() 2489         [](Stack& stack) {
2490           at::Tensor a;
2491           pop(stack, a);
2492           push(stack, a.is_mps());
2493         },
2494         aliasAnalysisFromSchema()),
2495     OperatorGeneratorArgs(
2496         TORCH_SELECTIVE_SCHEMA("prim::is_vulkan(Tensor a) -> bool"),
__anonbfe5918f8002() 2497         [](Stack& stack) {
2498           at::Tensor a;
2499           pop(stack, a);
2500           push(stack, a.is_vulkan());
2501         },
2502         aliasAnalysisFromSchema()),
2503     OperatorGeneratorArgs(
2504         TORCH_SELECTIVE_SCHEMA("prim::is_ipu(Tensor a) -> bool"),
__anonbfe5918f8102() 2505         [](Stack& stack) {
2506           at::Tensor a;
2507           pop(stack, a);
2508           push(stack, a.is_ipu());
2509         },
2510         aliasAnalysisFromSchema()),
2511     OperatorGeneratorArgs(
2512         TORCH_SELECTIVE_SCHEMA("prim::is_quantized(Tensor a) -> bool"),
__anonbfe5918f8202() 2513         [](Stack& stack) {
2514           at::Tensor a;
2515           pop(stack, a);
2516           push(stack, a.is_quantized());
2517         },
2518         aliasAnalysisFromSchema()),
2519     OperatorGeneratorArgs(
2520         TORCH_SELECTIVE_SCHEMA("prim::is_meta(Tensor a) -> bool"),
__anonbfe5918f8302() 2521         [](Stack& stack) {
2522           at::Tensor a;
2523           pop(stack, a);
2524           push(stack, a.is_meta());
2525         },
2526         aliasAnalysisFromSchema()),
2527     OperatorGeneratorArgs(
2528         TORCH_SELECTIVE_SCHEMA("prim::is_maia(Tensor a) -> bool"),
__anonbfe5918f8402() 2529         [](Stack& stack) {
2530           at::Tensor a;
2531           pop(stack, a);
2532           push(stack, a.is_maia());
2533         },
2534         aliasAnalysisFromSchema()),
2535     OperatorGeneratorArgs(
2536         TORCH_SELECTIVE_SCHEMA("prim::is_nested(Tensor a) -> bool"),
__anonbfe5918f8502() 2537         [](Stack& stack) {
2538           at::Tensor a;
2539           pop(stack, a);
2540           push(stack, a.is_nested());
2541         },
2542         aliasAnalysisFromSchema()),
2543     OperatorGeneratorArgs(
2544         TORCH_SELECTIVE_SCHEMA("prim::name(Tensor a) -> str?"),
__anonbfe5918f8602() 2545         [](Stack& stack) {
2546           at::Tensor a;
2547           pop(stack, a);
2548           if (a.name().empty()) {
2549             push(stack, IValue());
2550           } else {
2551             push(stack, a.name());
2552           }
2553         },
2554         aliasAnalysisFromSchema()),
2555     OperatorGeneratorArgs(
2556         TORCH_SELECTIVE_SCHEMA("prim::nbytes(Tensor a) -> int"),
__anonbfe5918f8702() 2557         [](Stack& stack) {
2558           at::Tensor a;
2559           pop(stack, a);
2560           const auto nbytes = static_cast<int64_t>(a.nbytes());
2561           push(stack, nbytes);
2562         },
2563         aliasAnalysisFromSchema()),
2564     OperatorGeneratorArgs(
2565         TORCH_SELECTIVE_SCHEMA("prim::itemsize(Tensor a) -> int"),
__anonbfe5918f8802() 2566         [](Stack& stack) {
2567           at::Tensor a;
2568           pop(stack, a);
2569           const auto itemsize = static_cast<int64_t>(a.itemsize());
2570           push(stack, itemsize);
2571         },
2572         aliasAnalysisFromSchema()),
2573     OperatorGeneratorArgs(
2574         TORCH_SELECTIVE_SCHEMA("prim::index(Device self) -> int?"),
__anonbfe5918f8902() 2575         [](Stack& stack) {
2576           auto d = pop(stack).toDevice();
2577           if (d.has_index()) {
2578             push(stack, d.index());
2579           } else {
2580             push(stack, IValue());
2581           }
2582         },
2583         aliasAnalysisFromSchema()),
2584     OperatorGeneratorArgs(
2585         // TODO return generator object when torchscript supports RNG
2586         // first-class
2587         TORCH_SELECTIVE_SCHEMA("aten::manual_seed(int seed) -> ()"),
__anonbfe5918f8a02() 2588         [](Stack& stack) { at::manual_seed(pop(stack).toInt()); },
2589         aliasAnalysisFromSchema()),
2590     OperatorGeneratorArgs(
2591         TORCH_SELECTIVE_SCHEMA(
2592             "aten::Generator(*, Device? device=None, int? seed=None) -> Generator"),
__anonbfe5918f8b02() 2593         [](Stack& stack) {
2594           auto seed = pop(stack).toOptional<int64_t>();
2595           auto device = pop(stack).toOptional<c10::Device>();
2596           push(
2597               stack,
2598               torch::jit::make_generator_for_device(
2599                   device.value_or(c10::Device("cpu")), seed));
2600         },
2601         aliasAnalysisFromSchema()),
2602     OperatorGeneratorArgs(
2603         TORCH_SELECTIVE_SCHEMA("aten::initial_seed(Generator self) -> int"),
__anonbfe5918f8c02() 2604         [](Stack& stack) {
2605           auto generator = pop(stack);
2606           auto current_seed = generator.toGenerator().current_seed();
2607           push(stack, (int64_t)current_seed);
2608         },
2609         aliasAnalysisFromSchema()),
2610     OperatorGeneratorArgs(
2611         TORCH_SELECTIVE_SCHEMA(
2612             "aten::manual_seed.generator(Generator(a!) self, int seed) -> Generator(a!)"),
__anonbfe5918f8d02() 2613         [](Stack& stack) {
2614           auto seed = pop(stack).toInt();
2615           auto generator = pop(stack);
2616           generator.toGenerator().set_current_seed(seed);
2617           push(stack, generator);
2618         },
2619         aliasAnalysisFromSchema()),
2620     OperatorGeneratorArgs(
2621         TORCH_SELECTIVE_SCHEMA("aten::seed(Generator(a!) self) -> int"),
__anonbfe5918f8e02() 2622         [](Stack& stack) {
2623           auto generator = pop(stack);
2624           auto current_seed = generator.toGenerator().seed();
2625           push(stack, (int64_t)current_seed);
2626         },
2627         aliasAnalysisFromSchema()),
2628     OperatorGeneratorArgs(
2629         TORCH_SELECTIVE_SCHEMA("aten::cuda(Tensor(a) self) -> Tensor(a|b)"),
__anonbfe5918f8f02() 2630         [](Stack& stack) {
2631           at::Tensor a;
2632           pop(stack, a);
2633           push(stack, a.cuda());
2634         },
2635         aliasAnalysisFromSchema()),
2636     OperatorGeneratorArgs(
2637         TORCH_SELECTIVE_SCHEMA("prim::AutogradZero() -> Tensor"),
__anonbfe5918f9002() 2638         [](Stack& stack) { stack.emplace_back(at::Tensor()); },
2639         aliasAnalysisSpecialCase()),
2640     OperatorGeneratorArgs(
2641         TORCH_SELECTIVE_SCHEMA(
2642             "prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim = False) -> int[]"),
__anonbfe5918f9102() 2643         [](Stack& stack) {
2644           bool keepdim = pop(stack).toBool();
2645           c10::List<int64_t> axes = pop(stack).toIntList();
2646           c10::List<int64_t> size = pop(stack).toIntList();
2647           if (keepdim) {
2648             for (const auto& axis : axes) {
2649               size.set(axis, 1);
2650             }
2651           } else {
2652             int64_t index = 0;
2653             auto iter = size.begin();
2654             std::sort(axes.begin(), axes.end());
2655             for (const auto& axis : axes) {
2656               // move iter to the next axis
2657               iter += axis - index;
2658 
2659               // input iter points to axis and is updated to axis + 1
2660               iter = size.erase(iter);
2661 
2662               // update current index for iter
2663               index = axis + 1;
2664             }
2665           }
2666           push(stack, IValue(std::move(size)));
2667         },
2668         aliasAnalysisFromSchema()),
2669     OperatorGeneratorArgs(
2670         TORCH_SELECTIVE_SCHEMA("prim::BroadcastSizes(...) -> int[]"),
__anonbfe5918f9202() 2671         [](Stack& stack) {
2672           auto num_inputs = pop(stack).toInt();
2673           std::vector<int64_t> size;
2674           size.reserve(8);
2675           for (const auto i : c10::irange(num_inputs)) {
2676             size =
2677                 at::infer_size(size, peek(stack, i, num_inputs).toDimVector());
2678           }
2679           drop(stack, num_inputs);
2680           push(stack, IValue(size));
2681         },
2682         aliasAnalysisSpecialCase()),
2683     OperatorGeneratorArgs(
2684         TORCH_SELECTIVE_SCHEMA(
2685             "aten::warn(str message, int stacklevel=2) -> ()"),
__anonbfe5918f9302() 2686         [](Stack& stack) {
2687           TORCH_CHECK(false, "warn is implemented directly in the interpreter");
2688         },
2689         aliasAnalysisFromSchema()),
2690 
2691     OperatorGeneratorArgs(
2692         TORCH_SELECTIVE_SCHEMA(
2693             "onnx::Reshape(Tensor input, Tensor shape) -> Tensor"),
__anonbfe5918f9402() 2694         [](Stack& stack) {
2695           at::Tensor input, shape;
2696           pop(stack, input, shape);
2697           shape = shape.contiguous();
2698           AT_ASSERT(shape.ndimension() == 1);
2699           at::IntArrayRef shape_list(
2700               shape.const_data_ptr<int64_t>(), shape.size(0));
2701           push(stack, input.reshape(shape_list));
2702         },
2703         aliasAnalysisSpecialCase()),
2704     OperatorGeneratorArgs(
2705         TORCH_SELECTIVE_SCHEMA("onnx::Shape(Tensor t) -> Tensor"),
__anonbfe5918f9502() 2706         [](Stack& stack) {
2707           auto t = pop(stack).toTensor();
2708           at::IntArrayRef sizes = t.sizes();
2709           auto sizes_tensor = torch::empty(
2710               {static_cast<int64_t>(sizes.size())}, at::dtype(at::kLong));
2711           auto accessor = sizes_tensor.accessor<int64_t, 1>();
2712           for (const auto i : c10::irange(sizes.size())) {
2713             accessor[i] = sizes[i];
2714           }
2715           stack.emplace_back(sizes_tensor);
2716         },
2717         aliasAnalysisSpecialCase()),
2718     OperatorGeneratorArgs(
2719         TORCH_SELECTIVE_SCHEMA("prim::AutogradAnyNonZero(...) -> bool"),
__anonbfe5918f9602() 2720         [](Stack& stack) {
2721           auto num_inputs = pop(stack).toInt();
2722           bool result = false;
2723           for (const IValue& v : last(stack, num_inputs)) {
2724             if (v.isTensor()) {
2725               if (v.toTensor().defined()) {
2726                 result = true;
2727                 break;
2728               }
2729             } else if (v.isTensorList()) {
2730               for (const at::Tensor& t : v.toTensorVector()) {
2731                 if (t.defined()) {
2732                   result = true;
2733                 }
2734               }
2735               if (result) {
2736                 break;
2737               }
2738             } else {
2739               TORCH_INTERNAL_ASSERT(false);
2740             }
2741           }
2742           drop(stack, num_inputs);
2743           stack.emplace_back(result);
2744         },
2745         aliasAnalysisFromSchema()),
2746     OperatorGeneratorArgs(
2747         TORCH_SELECTIVE_SCHEMA("prim::AutogradAllZero(...) -> bool"),
__anonbfe5918f9702() 2748         [](Stack& stack) {
2749           auto num_inputs = pop(stack).toInt();
2750           bool result = true;
2751           for (const IValue& v : last(stack, num_inputs)) {
2752             TORCH_INTERNAL_ASSERT(v.isTensor());
2753             if (v.toTensor().defined()) {
2754               result = false;
2755               break;
2756             }
2757           }
2758           drop(stack, num_inputs);
2759           stack.emplace_back(result);
2760         },
2761         aliasAnalysisFromSchema()),
2762     OperatorGeneratorArgs(
2763         TORCH_SELECTIVE_SCHEMA("prim::AutogradAllNonZero(...) -> bool"),
__anonbfe5918f9802() 2764         [](Stack& stack) {
2765           auto num_inputs = pop(stack).toInt();
2766           bool result = true;
2767           for (const IValue& v : last(stack, num_inputs)) {
2768             TORCH_INTERNAL_ASSERT(v.isTensor());
2769             if (!v.toTensor().defined()) {
2770               result = false;
2771               break;
2772             }
2773           }
2774           drop(stack, num_inputs);
2775           stack.emplace_back(result);
2776         },
2777         aliasAnalysisFromSchema()),
2778     OperatorGeneratorArgs(
2779         TORCH_SELECTIVE_SCHEMA("prim::AutogradAdd(Any a, Any b) -> Any"),
__anonbfe5918f9902() 2780         [](Stack& stack) {
2781           IValue i_a = pop(stack);
2782           IValue i_b = pop(stack);
2783           if (i_a.isNone() && i_b.isNone()) {
2784             stack.emplace_back(at::Tensor{});
2785             return;
2786           }
2787           if (i_a.isNone()) {
2788             stack.emplace_back(i_b.toTensor());
2789             return;
2790           }
2791           if (i_b.isNone()) {
2792             stack.emplace_back(i_a.toTensor());
2793             return;
2794           }
2795           at::Tensor a = i_a.toTensor();
2796           at::Tensor b = i_b.toTensor();
2797           // NOLINTNEXTLINE(bugprone-branch-clone)
2798           if (!a.defined() && !b.defined()) {
2799             // undef + undef == undef
2800             stack.emplace_back(a);
2801           } else if (!a.defined()) {
2802             stack.emplace_back(b);
2803           } else if (!b.defined()) {
2804             stack.emplace_back(a);
2805           } else {
2806             stack.emplace_back(a + b);
2807           }
2808         },
2809         aliasAnalysisSpecialCase()),
2810     OperatorGeneratorArgs(
2811         TORCH_SELECTIVE_SCHEMA(
2812             "aten::_size_if_not_equal(int[] self_size, int[] other_size) -> int[]?"),
__anonbfe5918f9a02() 2813         [](Stack& stack) {
2814           IValue self_size, other_size;
2815           pop(stack, self_size, other_size);
2816           auto s = self_size.toDimVector();
2817           auto o = other_size.toDimVector();
2818           if (s == o) {
2819             stack.emplace_back();
2820           } else {
2821             stack.emplace_back(std::move(self_size));
2822           }
2823         },
2824         aliasAnalysisFromSchema()),
2825     OperatorGeneratorArgs(
2826         TORCH_SELECTIVE_SCHEMA(
2827             "aten::_unwrap_optional(t(a)? optional) -> t(a)"),
__anonbfe5918f9b02() 2828         [](Stack& stack) {
2829           auto val = pop(stack);
2830           TORCH_CHECK(!val.isNone(), "Unwrapping null optional");
2831           push(stack, std::move(val));
2832         },
2833         aliasAnalysisFromSchema())};
2834 
2835 RegisterOperators reg1(createOperators(opGenArgs1));
2836 
hashValue(Stack & stack)2837 void hashValue(Stack& stack) {
2838   auto value = pop(stack);
2839   push(stack, value.hash());
2840 }
2841 
2842 static const std::vector<OperatorGeneratorArgs> opGenArgs2{
2843     // registered as Any[] so that heterogenous tuples can be called with len()
2844     OperatorGeneratorArgs(
2845         TORCH_SELECTIVE_SCHEMA("aten::len.any(Any[] a) -> int"),
2846         listLen,
2847         aliasAnalysisFromSchema()),
2848 
2849 // these ops have a specialized implementation for the list element type
2850 #define CREATE_SPECIALIZED_LIST_OPS(decl_type, value_type) \
2851   OperatorGeneratorArgs(                                   \
2852       TORCH_SELECTIVE_SCHEMA(                              \
2853           "aten::remove." decl_type "(" decl_type          \
2854           "[](a!) self,                                                           \
2855         " decl_type " el) -> ()"),                         \
2856       listRemove<value_type>,                              \
2857       aliasAnalysisFromSchema()),                          \
2858       OperatorGeneratorArgs(                               \
2859           TORCH_SELECTIVE_SCHEMA(                          \
2860               "aten::index.list_" decl_type "(" decl_type  \
2861               "[] self,                                                               \
2862         " decl_type " el) -> int"),                        \
2863           listIndex<value_type>,                           \
2864           aliasAnalysisFromSchema()),                      \
2865       OperatorGeneratorArgs(                               \
2866           TORCH_SELECTIVE_SCHEMA(                          \
2867               "aten::count." decl_type "(" decl_type       \
2868               "[] self,                                                               \
2869         " decl_type " el) -> int"),                        \
2870           listCount<value_type>,                           \
2871           aliasAnalysisFromSchema()),
2872 
2873     CREATE_SPECIALIZED_LIST_OPS("int", int64_t)
2874         CREATE_SPECIALIZED_LIST_OPS("float", double)
2875             CREATE_SPECIALIZED_LIST_OPS("bool", bool)
2876                 CREATE_SPECIALIZED_LIST_OPS("Tensor", at::Tensor)
2877                     CREATE_SPECIALIZED_LIST_OPS("str", std::string)
2878 
2879 #undef CREATE_GENERIC_LIST_OPS
2880 #undef CREATE_SPECIALIZED_LIST_OPS
2881 
2882     // `listContains<T>` is not implemented for non-primitive types
2883     // TODO: Add List[bool] once .to<c10::List<bool>> doesn't throw an error
2884     OperatorGeneratorArgs(
2885         TORCH_SELECTIVE_SCHEMA(
2886             "aten::__contains__.float_list(float[] l, float item) -> bool"),
2887         listContains<double>,
2888         aliasAnalysisFromSchema()),
2889     OperatorGeneratorArgs(
2890         TORCH_SELECTIVE_SCHEMA(
2891             "aten::sort.int(int[](a!) self, bool reverse=False) -> ()"),
2892         listSort<int64_t>,
2893         aliasAnalysisFromSchema()),
2894     OperatorGeneratorArgs(
2895         TORCH_SELECTIVE_SCHEMA(
2896             "aten::sort.float(float[](a!) self, bool reverse=False) -> ()"),
2897         listSort<double>,
2898         aliasAnalysisFromSchema()),
2899     OperatorGeneratorArgs(
2900         TORCH_SELECTIVE_SCHEMA(
2901             "aten::sort.Tensor(Tensor[](a!) self, bool reverse=False) -> ()"),
2902         listSort<at::Tensor>,
2903         aliasAnalysisFromSchema()),
2904     OperatorGeneratorArgs(
2905         TORCH_SELECTIVE_SCHEMA(
2906             "aten::sort.bool(bool[](a!) self, bool reverse=False) -> ()"),
2907         listSort<bool>,
2908         aliasAnalysisFromSchema()),
2909     OperatorGeneratorArgs(
2910         TORCH_SELECTIVE_SCHEMA(
2911             "aten::sort.str(str[](a!) self, bool reverse=False) -> ()"),
2912         listSort<std::string>,
2913         aliasAnalysisFromSchema()),
2914     OperatorGeneratorArgs(
2915         TORCH_SELECTIVE_SCHEMA("aten::sorted.int(int[](a) input) -> (int[])"),
2916         listCopyAndSort<int64_t>,
2917         aliasAnalysisFromSchema()),
2918     OperatorGeneratorArgs(
2919         TORCH_SELECTIVE_SCHEMA(
2920             "aten::sorted.float(float[](a) input) -> (float[])"),
2921         listCopyAndSort<double>,
2922         aliasAnalysisFromSchema()),
2923     OperatorGeneratorArgs(
2924         TORCH_SELECTIVE_SCHEMA(
2925             "aten::sorted.Tensor(Tensor[](a) input) -> (Tensor[])"),
2926         listCopyAndSort<at::Tensor>,
2927         aliasAnalysisFromSchema()),
2928     OperatorGeneratorArgs(
2929         TORCH_SELECTIVE_SCHEMA(
2930             "aten::sorted.bool(bool[](a) input) -> (bool[])"),
2931         listCopyAndSort<bool>,
2932         aliasAnalysisFromSchema()),
2933     OperatorGeneratorArgs(
2934         TORCH_SELECTIVE_SCHEMA("aten::sorted.str(str[](a) input) -> (str[])"),
2935         listCopyAndSort<std::string>,
2936         aliasAnalysisFromSchema()),
2937     OperatorGeneratorArgs(
2938         TORCH_SELECTIVE_SCHEMA(
2939             "aten::eq.float_list(float[] a, float[] b) -> bool"),
2940         listEq<double>,
2941         aliasAnalysisFromSchema()),
2942     OperatorGeneratorArgs(
2943         TORCH_SELECTIVE_SCHEMA(
2944             "aten::eq.Tensor_list(Tensor[] a, Tensor[] b) -> bool"),
2945         listEq<at::Tensor>,
2946         aliasAnalysisFromSchema()),
2947     OperatorGeneratorArgs(
2948         TORCH_SELECTIVE_SCHEMA(
2949             "aten::eq.bool_list(bool[] a, bool[] b) -> bool"),
2950         listEq<bool>,
2951         aliasAnalysisFromSchema()),
2952     OperatorGeneratorArgs(
2953         TORCH_SELECTIVE_SCHEMA("aten::eq.str_list(str[] a, str[] b) -> bool"),
2954         listEq<std::string>,
2955         aliasAnalysisFromSchema()),
2956     OperatorGeneratorArgs(
2957         TORCH_SELECTIVE_SCHEMA(
2958             "aten::ne.float_list(float[] a, float[] b) -> bool"),
2959         listNe<double>,
2960         aliasAnalysisFromSchema()),
2961     OperatorGeneratorArgs(
2962         TORCH_SELECTIVE_SCHEMA(
2963             "aten::ne.Tensor_list(Tensor[] a, Tensor[] b) -> bool"),
2964         listNe<at::Tensor>,
2965         aliasAnalysisFromSchema()),
2966     OperatorGeneratorArgs(
2967         TORCH_SELECTIVE_SCHEMA(
2968             "aten::ne.bool_list(bool[] a, bool[] b) -> bool"),
2969         listNe<bool>,
2970         aliasAnalysisFromSchema()),
2971     OperatorGeneratorArgs(
2972         TORCH_SELECTIVE_SCHEMA("aten::ne.str_list(str[] a, str[] b) -> bool"),
2973         listNe<std::string>,
2974         aliasAnalysisFromSchema()),
2975     OperatorGeneratorArgs(
2976         TORCH_SELECTIVE_SCHEMA("aten::sorted.any(t[](a) self) -> (t[])"),
2977         sort_op</*has_reverse_arg*/ false, /*copy_return_list*/ true>,
2978         aliasAnalysisFromSchema()),
2979     OperatorGeneratorArgs(
2980         TORCH_SELECTIVE_SCHEMA(
2981             "aten::sort.any(t[](a!) self, bool reverse=False) -> ()"),
2982         sort_op</*has_reverse_arg*/ true, /*copy_return_list*/ false>,
2983         aliasAnalysisFromSchema()),
2984 
2985 #define DEFINE_CONVERT_BASE_OP(op_name, prefix, char_op) \
2986   OperatorGeneratorArgs(                                 \
2987       TORCH_SELECTIVE_SCHEMA(#op_name "(int i) -> str"), \
2988       [](Stack& stack) {                                 \
2989         auto i = pop(stack).toInt();                     \
2990         std::stringstream ss;                            \
2991         if (i < 0) {                                     \
2992           ss << "-";                                     \
2993           i = -i;                                        \
2994         }                                                \
2995         ss << "0" << prefix << char_op << i;             \
2996         push(stack, ss.str());                           \
2997       },                                                 \
2998       aliasAnalysisFromSchema())
2999 
3000     DEFINE_CONVERT_BASE_OP(aten::hex, "x", std::hex),
3001     DEFINE_CONVERT_BASE_OP(aten::oct, "o", std::oct),
3002 
3003     OperatorGeneratorArgs(
3004         TORCH_SELECTIVE_SCHEMA("aten::bin(int i) -> str"),
__anonbfe5918f9c02() 3005         [](Stack& stack) {
3006           auto i = pop(stack).toInt();
3007           std::stringstream ss;
3008           if (i == 0) {
3009             push(stack, "0b0");
3010           } else {
3011             if (i < 0) {
3012               ss << "-";
3013               i = -i;
3014             }
3015             std::string str = std::bitset<8 * sizeof(i)>(i).to_string();
3016             str.erase(0, std::min(str.find_first_not_of('0'), str.size() - 1));
3017             ss << "0b" << str;
3018             push(stack, ss.str());
3019           }
3020         },
3021         aliasAnalysisFromSchema()),
3022     // TODO: deprecate this in favor of aten::getelem
3023     OperatorGeneratorArgs(
3024         TORCH_SELECTIVE_SCHEMA(
3025             "prim::StringIndex(str string, int index) -> str"),
__anonbfe5918f9d02() 3026         [](Stack& stack) {
3027           auto index = pop(stack).toInt();
3028           auto string = pop(stack).toStringRef();
3029           auto norm_index = normalizeIndex(index, string.size());
3030           char c = string.at(norm_index);
3031           push(stack, std::string(&c, 1));
3032         },
3033         aliasAnalysisFromSchema()),
3034     OperatorGeneratorArgs(
3035         TORCH_SELECTIVE_SCHEMA("aten::chr(int i) -> str"),
__anonbfe5918f9e02() 3036         [](Stack& stack) {
3037           auto i = pop(stack).toInt();
3038           std::stringstream ss;
3039           TORCH_CHECK(
3040               i >= 0 && i < 1114111,
3041               "chr() arg not in range(0x110000), found ",
3042               i);
3043           char c = i;
3044           ss << c;
3045           push(stack, ss.str());
3046         },
3047         aliasAnalysisFromSchema()),
3048 
3049     // only used in loop unrolling, not exposed to end users
3050     DEFINE_INT_OP(aten::__round_to_zero_floordiv, a / b),
3051 
3052     OperatorGeneratorArgs(
3053         TORCH_SELECTIVE_SCHEMA("aten::modf(float a) -> (float, float)"),
__anonbfe5918f9f02() 3054         [](Stack& stack) {
3055           double a = 0;
3056           pop(stack, a);
3057           double c = 0;
3058           double b = modf(a, &c);
3059           push(stack, b, c);
3060         },
3061         aliasAnalysisFromSchema()),
3062     OperatorGeneratorArgs(
3063         TORCH_SELECTIVE_SCHEMA("aten::frexp(float a) -> (float, int)"),
__anonbfe5918fa002() 3064         [](Stack& stack) {
3065           double a = 0;
3066           pop(stack, a);
3067           double m = 0;
3068           int e = 0;
3069           m = std::frexp(a, &e);
3070           push(stack, m, e);
3071         },
3072         aliasAnalysisFromSchema()),
3073     OperatorGeneratorArgs(
3074         TORCH_SELECTIVE_SCHEMA("aten::ldexp(float x, int i) -> float"),
__anonbfe5918fa102() 3075         [](Stack& stack) {
3076           double a = 0;
3077           int64_t b = 0;
3078           pop(stack, a, b);
3079           push(stack, std::ldexp(a, b));
3080         },
3081         aliasAnalysisFromSchema()),
3082     DEFINE_BINARY_FLOAT_OP(aten::mathremainder, std::remainder(a, b)),
3083 
3084     DEFINE_INT_OP(aten::__and__, a& b),
3085     DEFINE_INT_OP(aten::__or__, a | b),
3086     DEFINE_INT_OP(aten::__xor__, a ^ b),
3087     DEFINE_INT_OP(aten::__lshift__, a << b),
3088     DEFINE_INT_OP(aten::__rshift__, a >> b),
3089 
3090     DEFINE_GENERIC_BINARY_OP(
3091         aten::log,
3092         std::log(a) / std::log(b),
3093         float,
3094         complex),
3095     DEFINE_INT_FLOAT_OP(aten::log, std::log(a) / std::log(b), float),
3096     DEFINE_INT_COMPLEX_OP(aten::log, std::log(a) / std::log(b), complex),
3097     DEFINE_FLOAT_COMPLEX_OP(aten::log, std::log(a) / std::log(b), complex),
3098     DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION(
3099         aten::log,
3100         std::log(a) / std::log(b),
3101         std::log(a) / std::log(b),
3102         float),
3103     DEFINE_UNARY_OP(aten::log1p, std::log1p(a), float, float),
3104     DEFINE_UNARY_OP_WITH_COMPLEX(aten::log10, std::log10(a), float, float),
3105     DEFINE_UNARY_OP_WITH_COMPLEX(aten::sqrt, std::sqrt(a), float, float),
3106     DEFINE_UNARY_OP_WITH_COMPLEX(aten::acos, std::acos(a), float, float),
3107     DEFINE_UNARY_OP_WITH_COMPLEX(aten::asin, std::asin(a), float, float),
3108     DEFINE_UNARY_OP_WITH_COMPLEX(aten::atan, std::atan(a), float, float),
3109     DEFINE_GENERIC_OP(
3110         aten::atan2,
3111         std::atan2(a, b),
3112         std::atan2(a, b),
3113         float,
3114         float),
3115     DEFINE_INT_FLOAT_OP(aten::atan2, std::atan2(a, b), float),
3116     DEFINE_SCALAR_BINARY_OP_AVOID_COLLISION(
3117         aten::atan2,
3118         std::atan2(a, b),
3119         std::atan2(a, b),
3120         float),
3121     DEFINE_UNARY_OP_WITH_COMPLEX(aten::cos, std::cos(a), float, float),
3122     DEFINE_UNARY_OP_WITH_COMPLEX(aten::sin, std::sin(a), float, float),
3123     DEFINE_UNARY_OP_WITH_COMPLEX(aten::tan, std::tan(a), float, float),
3124     DEFINE_UNARY_OP_WITH_COMPLEX(aten::asinh, std::asinh(a), float, float),
3125     DEFINE_UNARY_OP_WITH_COMPLEX(aten::atanh, std::atanh(a), float, float),
3126     DEFINE_UNARY_OP_WITH_COMPLEX(aten::acosh, std::acosh(a), float, float),
3127     DEFINE_UNARY_OP_WITH_COMPLEX(aten::sinh, std::sinh(a), float, float),
3128     DEFINE_UNARY_OP_WITH_COMPLEX(aten::cosh, std::cosh(a), float, float),
3129     DEFINE_UNARY_OP_WITH_COMPLEX(aten::tanh, std::tanh(a), float, float),
3130     DEFINE_UNARY_OP_WITH_COMPLEX_CAST(
3131         aten::angle,
3132         std::arg(a),
3133         float,
3134         float,
3135         float,
3136         float),
3137     DEFINE_UNARY_OP(aten::degrees, degrees(a), float, float),
3138     DEFINE_UNARY_OP(aten::radians, radians(a), float, float),
3139     DEFINE_BINARY_FLOAT_OP(aten::fmod, std::fmod(a, b)),
3140     DEFINE_UNARY_INT_OP(aten::factorial, factorial(a), int),
3141     DEFINE_UNARY_FLOAT_OP(aten::isnan, std::isnan(a), bool),
3142     DEFINE_UNARY_FLOAT_OP(aten::isfinite, std::isfinite(a), bool),
3143     DEFINE_UNARY_FLOAT_OP(aten::isinf, std::isinf(a), bool),
3144     DEFINE_UNARY_COMPLEX_OP(
3145         aten::isnan,
3146         std::isnan(a.real()) || std::isnan(a.imag()),
3147         bool),
3148     DEFINE_UNARY_COMPLEX_OP(
3149         aten::isfinite,
3150         std::isfinite(a.real()) && std::isfinite(a.imag()),
3151         bool),
3152     DEFINE_UNARY_COMPLEX_OP(
3153         aten::isinf,
3154         std::isinf(a.real()) || std::isinf(a.imag()),
3155         bool),
3156     DEFINE_UNARY_OP(aten::gamma, std::tgamma(a), float, float),
3157     DEFINE_UNARY_OP(aten::erf, std::erf(a), float, float),
3158     DEFINE_UNARY_OP(aten::erfc, std::erfc(a), float, float),
3159     DEFINE_UNARY_OP(aten::expm1, std::expm1(a), float, float),
3160     DEFINE_UNARY_OP(aten::fabs, std::fabs(a), float, float),
3161     DEFINE_UNARY_OP(aten::lgamma, std::lgamma(a), float, float),
3162 
3163     // TODO: move abs to aten namespace because it's schematized!
3164     DEFINE_UNARY_OP_WITH_COMPLEX_CAST(
3165         prim::abs,
3166         std::abs(a),
3167         int,
3168         float,
3169         float,
3170         float),
3171     OperatorGeneratorArgs(
3172         TORCH_SELECTIVE_SCHEMA("prim::abs(Tensor x) -> Tensor"),
__anonbfe5918fa202() 3173         [](Stack& stack) {
3174           at::Tensor x;
3175           pop(stack, x);
3176           push(stack, x.abs());
3177         },
3178         aliasAnalysisFromSchema()),
3179 
3180     DEFINE_INT_OP(aten::gcd, gcd(a, b)),
3181 
3182     DEFINE_GENERIC_OP(
3183         aten::copysign,
3184         std::copysign(a, b),
3185         std::copysign(a, b),
3186         float,
3187         float),
3188     DEFINE_INT_FLOAT_OP(aten::copysign, std::copysign(a, b), float),
3189     DEFINE_SCALAR_BINARY_OP(
3190         aten::copysign,
3191         std::copysign(a, b),
3192         std::copysign(a, b),
3193         float),
3194     OperatorGeneratorArgs(
3195         TORCH_SELECTIVE_SCHEMA("aten::_tensor_to_list(Tensor self) -> int[]"),
__anonbfe5918fa302() 3196         [](Stack& stack) {
3197           at::Tensor t;
3198           pop(stack, t);
3199           c10::List<int64_t> elems;
3200           elems.reserve(t.size(0));
3201           for (const auto i : c10::irange(t.size(0))) {
3202             elems.push_back(*t[i].const_data_ptr<int32_t>());
3203           }
3204           push(stack, std::move(elems));
3205         },
3206         aliasAnalysisFromSchema()),
3207     OperatorGeneratorArgs(
3208         TORCH_SELECTIVE_SCHEMA("aten::_list_to_tensor(int[] self) -> Tensor"),
__anonbfe5918fa402() 3209         [](Stack& stack) {
3210           c10::List<int64_t> l = pop(stack).toIntList();
3211           auto t = torch::empty(
3212               {static_cast<int64_t>(l.size())}, at::dtype(at::kInt));
3213           for (const auto i : c10::irange(l.size())) {
3214             t[i] = l.get(i);
3215           }
3216           push(stack, std::move(t));
3217         },
3218         aliasAnalysisFromSchema()),
3219     OperatorGeneratorArgs(
3220         TORCH_SELECTIVE_SCHEMA("aten::sum.int(int[] self) -> int"),
__anonbfe5918fa502() 3221         [](Stack& stack) {
3222           c10::List<int64_t> l = pop(stack).toIntList();
3223           auto sum = 0;
3224           for (const auto& elem : l) {
3225             sum += elem;
3226           }
3227           push(stack, sum);
3228         },
3229         aliasAnalysisFromSchema()),
3230     OperatorGeneratorArgs(
3231         TORCH_SELECTIVE_SCHEMA("aten::sum.float(float[] self) -> float"),
__anonbfe5918fa602() 3232         [](Stack& stack) {
3233           c10::List<double> l = pop(stack).toDoubleList();
3234           auto sum = 0.0;
3235           for (const auto& elem : l) {
3236             sum += elem;
3237           }
3238           push(stack, sum);
3239         },
3240         aliasAnalysisFromSchema()),
3241     OperatorGeneratorArgs(
3242         TORCH_SELECTIVE_SCHEMA("aten::sum.complex(complex[] self) -> complex"),
__anonbfe5918fa702() 3243         [](Stack& stack) {
3244           c10::List<c10::complex<double>> l = pop(stack).toComplexDoubleList();
3245           c10::complex<double> sum = 0.0;
3246           for (const auto i : c10::irange(l.size())) {
3247             sum = sum + l.extract(i);
3248           }
3249           push(stack, sum);
3250         },
3251         aliasAnalysisFromSchema()),
3252     OperatorGeneratorArgs(
3253         TORCH_SELECTIVE_SCHEMA("aten::sum.bool(bool[] self) -> int"),
__anonbfe5918fa802() 3254         [](Stack& stack) {
3255           c10::List<bool> l = pop(stack).toBoolList();
3256           auto sum = 0;
3257           for (const auto& elem : l) {
3258             if (elem) {
3259               sum += 1;
3260             }
3261           }
3262           push(stack, sum);
3263         },
3264         aliasAnalysisFromSchema()),
3265     OperatorGeneratorArgs(
3266         TORCH_SELECTIVE_SCHEMA("aten::any.str(str[] self) -> bool"),
__anonbfe5918fa902() 3267         [](Stack& stack) {
3268           auto l = pop(stack).toList();
3269           for (const auto& elem : l) {
3270             if (elem != "") {
3271               push(stack, true);
3272               return;
3273             }
3274           }
3275           push(stack, false);
3276         },
3277         aliasAnalysisFromSchema()),
3278     OperatorGeneratorArgs(
3279         TORCH_SELECTIVE_SCHEMA("aten::any.int(int[] self) -> bool"),
__anonbfe5918faa02() 3280         [](Stack& stack) {
3281           c10::List<int64_t> l = pop(stack).toIntList();
3282           for (const auto& elem : l) {
3283             if (elem) {
3284               push(stack, true);
3285               return;
3286             }
3287           }
3288           push(stack, false);
3289         },
3290         aliasAnalysisFromSchema()),
3291     OperatorGeneratorArgs(
3292         TORCH_SELECTIVE_SCHEMA("aten::any.float(float[] self) -> bool"),
__anonbfe5918fab02() 3293         [](Stack& stack) {
3294           c10::List<double> l = pop(stack).toDoubleList();
3295           for (const auto& elem : l) {
3296             if (elem) {
3297               push(stack, true);
3298               return;
3299             }
3300           }
3301           push(stack, false);
3302         },
3303         aliasAnalysisFromSchema()),
3304     OperatorGeneratorArgs(
3305         TORCH_SELECTIVE_SCHEMA("aten::any.bool(bool[] self) -> bool"),
__anonbfe5918fac02() 3306         [](Stack& stack) {
3307           c10::List<bool> l = pop(stack).toBoolList();
3308           for (const auto& elem : l) {
3309             if (elem) {
3310               push(stack, true);
3311               return;
3312             }
3313           }
3314           push(stack, false);
3315         },
3316         aliasAnalysisFromSchema()),
3317     OperatorGeneratorArgs(
3318         TORCH_SELECTIVE_SCHEMA("aten::all.int(int[] self) -> bool"),
__anonbfe5918fad02() 3319         [](Stack& stack) {
3320           c10::List<int64_t> l = pop(stack).toIntList();
3321           for (const auto& elem : l) {
3322             if (!elem) {
3323               push(stack, false);
3324               return;
3325             }
3326           }
3327           push(stack, true);
3328         },
3329         aliasAnalysisFromSchema()),
3330     OperatorGeneratorArgs(
3331         TORCH_SELECTIVE_SCHEMA("aten::all.float(float[] self) -> bool"),
__anonbfe5918fae02() 3332         [](Stack& stack) {
3333           c10::List<double> l = pop(stack).toDoubleList();
3334           for (const auto& elem : l) {
3335             if (!elem) {
3336               push(stack, false);
3337               return;
3338             }
3339           }
3340           push(stack, true);
3341         },
3342         aliasAnalysisFromSchema()),
3343     OperatorGeneratorArgs(
3344         TORCH_SELECTIVE_SCHEMA("aten::all.bool(bool[] self) -> bool"),
__anonbfe5918faf02() 3345         [](Stack& stack) {
3346           c10::List<bool> l = pop(stack).toBoolList();
3347           for (const auto& elem : l) {
3348             if (!elem) {
3349               push(stack, false);
3350               return;
3351             }
3352           }
3353           push(stack, true);
3354         },
3355         aliasAnalysisFromSchema()),
3356     OperatorGeneratorArgs(
3357         TORCH_SELECTIVE_SCHEMA("aten::divmod.int(int x, int y) -> (int, int)"),
__anonbfe5918fb002() 3358         [](Stack& stack) {
3359           int64_t a = 0, b = 0;
3360           lldiv_t divresult = {};
3361           pop(stack, a, b);
3362           if (b == 0) {
3363             throw std::runtime_error(
3364                 "ZeroDivisionError: integer division or modulo by zero");
3365           }
3366           divresult = lldiv(a, b);
3367           if (divresult.rem && (a < 0) != (b < 0)) {
3368             divresult.quot -= 1;
3369             divresult.rem += b;
3370           }
3371           push(
3372               stack,
3373               static_cast<int64_t>(divresult.quot),
3374               static_cast<int64_t>(divresult.rem));
3375         },
3376         aliasAnalysisFromSchema()),
3377     OperatorGeneratorArgs(
3378         TORCH_SELECTIVE_SCHEMA(
3379             "aten::divmod.float(float x, float y) -> (float, float)"),
__anonbfe5918fb102() 3380         [](Stack& stack) {
3381           double a = 0, b = 0;
3382           pop(stack, a, b);
3383           if (b == 0) {
3384             throw std::runtime_error("ZeroDivisionError: float divmod()");
3385           }
3386           double rem = fmod(a, b);
3387           // NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions,bugprone-narrowing-conversions)
3388           if (rem && (a < 0) != (b < 0)) {
3389             rem += b;
3390           }
3391           push(stack, (a - rem) / b, rem);
3392         },
3393         aliasAnalysisFromSchema()),
3394     OperatorGeneratorArgs(
3395         TORCH_SELECTIVE_SCHEMA("prim::id(AnyClassType? x) -> int"),
__anonbfe5918fb202() 3396         [](Stack& stack) {
3397           IValue a;
3398           pop(stack, a);
3399           if (a.isNone()) {
3400             push(stack, 0);
3401           } else {
3402             push(stack, reinterpret_cast<int64_t>(a.internalToPointer()));
3403           }
3404         },
3405         aliasAnalysisFromSchema()),
3406     // This operator is generated inside the compiler for indexing into
3407     // ModuleList without a statically determinable key. Accordingly,
3408     // self must be a ModuleType and the output must be an InterfaceType.
3409     OperatorGeneratorArgs(
3410         TORCH_SELECTIVE_SCHEMA(
3411             "prim::ModuleContainerIndex.list(Any self, int ind) -> Any"),
__anonbfe5918fb302() 3412         [](Stack& stack) {
3413           IValue ind = pop(stack);
3414           IValue module_dict = pop(stack);
3415           std::stringstream ss;
3416           ss << ind.toInt();
3417           push(
3418               stack, torch::jit::Object(module_dict.toObject()).attr(ss.str()));
3419         },
3420         aliasAnalysisFromSchema()),
3421 
3422 #define DEFINE_DIVMOD_MIXED_OP(type_a, type_b)                               \
3423   OperatorGeneratorArgs(                                                     \
3424       TORCH_SELECTIVE_SCHEMA("aten::divmod." #type_a "_" #type_b "(" #type_a \
3425                              " x," #type_b " y) -> (float, float)"),         \
3426       [](Stack& stack) {                                                     \
3427         type_a a;                                                            \
3428         type_b b;                                                            \
3429         pop(stack, a, b);                                                    \
3430         if (b == 0) {                                                        \
3431           throw std::runtime_error("ZeroDivisionError: float divmod()");     \
3432         }                                                                    \
3433         double quot = floor(a / b);                                          \
3434         double rem = a - (quot * b);                                         \
3435         push(stack, quot, rem);                                              \
3436       },                                                                     \
3437       aliasAnalysisFromSchema())
3438 
3439     DEFINE_DIVMOD_MIXED_OP(int, float),
3440     DEFINE_DIVMOD_MIXED_OP(float, int),
3441 
3442 #undef DEFINE_DIVMOD_MIXED_OP
3443     OperatorGeneratorArgs(
3444         TORCH_SELECTIVE_SCHEMA("aten::hash.generic(t value) -> int"),
3445         hashValue,
3446         aliasAnalysisFromSchema()),
3447 
3448 #define DEFINE_COMPLEX_OP(type_a, type_b, actual_type_a, actual_type_b)       \
3449   OperatorGeneratorArgs(                                                      \
3450       TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_a "_" #type_b "(" #type_a \
3451                              " x," #type_b " y) -> complex"),                 \
3452       [](Stack& stack) {                                                      \
3453         actual_type_a a;                                                      \
3454         actual_type_b b;                                                      \
3455         pop(stack, a, b);                                                     \
3456         auto comp = c10::complex<double>(a, b);                               \
3457         push(stack, comp);                                                    \
3458       },                                                                      \
3459       aliasAnalysisFromSchema())
3460 
3461 #define DEFINE_COMPLEX_OP_WITH_TENSOR_ARG(                                    \
3462     type_a, type_b, actual_type_a, actual_type_b)                             \
3463   OperatorGeneratorArgs(                                                      \
3464       TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_a "_" #type_b "(" #type_a \
3465                              " x," #type_b " y) -> complex"),                 \
3466       [](Stack& stack) {                                                      \
3467         actual_type_a a;                                                      \
3468         actual_type_b b;                                                      \
3469         pop(stack, a, b);                                                     \
3470         auto comp = c10::complex<double>(a.item<double>(), b);                \
3471         push(stack, comp);                                                    \
3472       },                                                                      \
3473       aliasAnalysisFromSchema()),                                             \
3474       OperatorGeneratorArgs(                                                  \
3475           TORCH_SELECTIVE_SCHEMA("aten::Complex." #type_b "_" #type_a         \
3476                                  "(" #type_b " x," #type_a " y) -> complex"), \
3477           [](Stack& stack) {                                                  \
3478             actual_type_b a;                                                  \
3479             actual_type_a b;                                                  \
3480             pop(stack, a, b);                                                 \
3481             auto comp = c10::complex<double>(a, b.item<double>());            \
3482             push(stack, comp);                                                \
3483           },                                                                  \
3484           aliasAnalysisFromSchema())
3485 
3486     DEFINE_COMPLEX_OP(int, bool, int, bool),
3487     DEFINE_COMPLEX_OP(bool, int, bool, int),
3488     DEFINE_COMPLEX_OP(float, bool, double, bool),
3489     DEFINE_COMPLEX_OP(bool, float, bool, double),
3490     DEFINE_COMPLEX_OP(float, int, double, int),
3491     DEFINE_COMPLEX_OP(int, float, int, double),
3492     DEFINE_COMPLEX_OP(int, int, int, int),
3493     DEFINE_COMPLEX_OP(bool, bool, bool, bool),
3494     DEFINE_COMPLEX_OP(float, float, double, double),
3495     DEFINE_COMPLEX_OP_WITH_TENSOR_ARG(Tensor, float, at::Tensor, double),
3496     DEFINE_COMPLEX_OP_WITH_TENSOR_ARG(Tensor, int, at::Tensor, int),
3497     DEFINE_COMPLEX_OP_WITH_TENSOR_ARG(Tensor, bool, at::Tensor, bool),
3498 };
3499 
3500 RegisterOperators reg2(createOperators(opGenArgs2));
3501 
3502 } // namespace
3503 } // namespace torch::jit
3504