1 // @lint-ignore-every CLANGTIDY HOWTOEVEN
2 // AUTO-GENERATED FROM: torchgen/static_runtime/gen_static_runtime_ops.py
3 #include <torch/csrc/jit/runtime/static/ops.h>
4
5 #include <ATen/CPUFunctions.h>
6 #include <ATen/InferSize.h>
7 #include <ATen/NativeFunctions.h>
8 #include <ATen/Parallel.h>
9 #include <ATen/ScalarOps.h>
10 #include <ATen/TensorUtils.h>
11 #include <ATen/cpu/vec/functional.h>
12 #include <ATen/cpu/vec/vec.h>
13 #include <ATen/native/EmbeddingBag.h>
14 #include <ATen/native/Fill.h>
15 #include <ATen/native/IndexingUtils.h>
16 #include <ATen/native/NonSymbolicBC.h>
17 #include <ATen/native/Resize.h>
18 #include <ATen/native/SharedReduceOps.h>
19 #include <ATen/native/TensorAdvancedIndexing.h>
20 #include <ATen/native/cpu/SerialStackImpl.h>
21 #include <ATen/native/layer_norm.h>
22 #include <ATen/native/quantized/cpu/fbgemm_utils.h>
23 #include <ATen/native/quantized/cpu/qembeddingbag.h>
24 #include <ATen/native/quantized/cpu/qembeddingbag_prepack.h>
25 #include <ATen/quantized/QTensorImpl.h>
26 #include <ATen/quantized/Quantizer.h>
27 #include <c10/core/ScalarType.h>
28 #include <c10/core/WrapDimMinimal.h>
29 #include <c10/util/irange.h>
30 #include <torch/csrc/jit/ir/ir.h>
31 #include <torch/csrc/jit/runtime/static/impl.h>
32 #include <torch/csrc/jit/runtime/static/te_wrapper.h>
33 #include <torch/csrc/jit/runtime/vararg_functions.h>
34 #include <torch/csrc/jit/tensorexpr/ir.h>
35 #include <torch/csrc/jit/tensorexpr/ir_simplifier.h>
36 #include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
37 #include <torch/csrc/jit/tensorexpr/loopnest.h>
38
39 namespace torch::jit {
40
41 REGISTER_OPERATOR_FUNCTOR(
42 aten::absolute,
43 aten_absolute,
__anon5d9c3eb90102(Node* n) 44 [](Node* n) -> SROperator {
45 if (n->matches(torch::schema("aten::absolute(Tensor self) -> Tensor"))) {
46 return [](ProcessedNode* p_node) {
47 const auto& self = p_node->Input(0).toTensor();
48 if (p_node->Output(0).isNone()) {
49 p_node->Output(0) = at::native::absolute(self);
50 return;
51 }
52 auto& out = p_node->Output(0).toTensor();
53 fastResizeToZero(out);
54 at::native::absolute_out(self, out);
55 };
56 }
57 LogAndDumpSchema(n);
58 return nullptr;
59 });
60
__anon5d9c3eb90302(Node* n) 61 REGISTER_OPERATOR_FUNCTOR(aten::angle, aten_angle, [](Node* n) -> SROperator {
62 if (n->matches(torch::schema("aten::angle(Tensor self) -> Tensor"))) {
63 return [](ProcessedNode* p_node) {
64 const auto& self = p_node->Input(0).toTensor();
65 if (p_node->Output(0).isNone()) {
66 p_node->Output(0) = at::native::angle(self);
67 return;
68 }
69 auto& out = p_node->Output(0).toTensor();
70 fastResizeToZero(out);
71 at::native::angle_out(self, out);
72 };
73 }
74 LogAndDumpSchema(n);
75 return nullptr;
76 });
77
__anon5d9c3eb90502(Node* n) 78 REGISTER_OPERATOR_FUNCTOR(aten::sgn, aten_sgn, [](Node* n) -> SROperator {
79 if (n->matches(torch::schema("aten::sgn(Tensor self) -> Tensor"))) {
80 return [](ProcessedNode* p_node) {
81 const auto& self = p_node->Input(0).toTensor();
82 if (p_node->Output(0).isNone()) {
83 p_node->Output(0) = at::cpu::sgn(self);
84 return;
85 }
86 auto& out = p_node->Output(0).toTensor();
87 fastResizeToZero(out);
88 at::cpu::sgn_out(out, self);
89 };
90 }
91 LogAndDumpSchema(n);
92 return nullptr;
93 });
94
__anon5d9c3eb90702(Node* n) 95 REGISTER_OPERATOR_FUNCTOR(aten::acos, aten_acos, [](Node* n) -> SROperator {
96 if (n->matches(torch::schema("aten::acos(Tensor self) -> Tensor"))) {
97 return [](ProcessedNode* p_node) {
98 const auto& self = p_node->Input(0).toTensor();
99 if (p_node->Output(0).isNone()) {
100 p_node->Output(0) = at::cpu::acos(self);
101 return;
102 }
103 auto& out = p_node->Output(0).toTensor();
104 fastResizeToZero(out);
105 at::cpu::acos_out(out, self);
106 };
107 }
108 LogAndDumpSchema(n);
109 return nullptr;
110 });
111
__anon5d9c3eb90902(Node* n) 112 REGISTER_OPERATOR_FUNCTOR(aten::arccos, aten_arccos, [](Node* n) -> SROperator {
113 if (n->matches(torch::schema("aten::arccos(Tensor self) -> Tensor"))) {
114 return [](ProcessedNode* p_node) {
115 const auto& self = p_node->Input(0).toTensor();
116 if (p_node->Output(0).isNone()) {
117 p_node->Output(0) = at::native::arccos(self);
118 return;
119 }
120 auto& out = p_node->Output(0).toTensor();
121 fastResizeToZero(out);
122 at::native::arccos_out(self, out);
123 };
124 }
125 LogAndDumpSchema(n);
126 return nullptr;
127 });
128
__anon5d9c3eb90b02(Node* n) 129 REGISTER_OPERATOR_FUNCTOR(aten::_add_relu, aten__add_relu, [](Node* n) -> SROperator {
130 if (n->matches(torch::schema(
131 "aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"))) {
132 return [](ProcessedNode* p_node) {
133 const auto& self = p_node->Input(0).toTensor();
134 const auto& other = p_node->Input(1).toTensor();
135 const auto alpha = p_node->Input(2).toScalar();
136 if (p_node->Output(0).isNone()) {
137 p_node->Output(0) = at::native::add_relu(self, other, alpha);
138 return;
139 }
140 auto& out = p_node->Output(0).toTensor();
141 fastResizeToZero(out);
142 at::native::add_relu_out(self, other, alpha, out);
143 };
144 }
145 LogAndDumpSchema(n);
146 return nullptr;
147 });
148
__anon5d9c3eb90d02(Node* n) 149 REGISTER_OPERATOR_FUNCTOR(aten::addmv, aten_addmv, [](Node* n) -> SROperator {
150 if (n->matches(torch::schema(
151 "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor"))) {
152 return [](ProcessedNode* p_node) {
153 const auto& self = p_node->Input(0).toTensor();
154 const auto& mat = p_node->Input(1).toTensor();
155 const auto& vec = p_node->Input(2).toTensor();
156 const auto beta = p_node->Input(3).toScalar();
157 const auto alpha = p_node->Input(4).toScalar();
158 if (p_node->Output(0).isNone()) {
159 p_node->Output(0) = at::cpu::addmv(self, mat, vec, beta, alpha);
160 return;
161 }
162 auto& out = p_node->Output(0).toTensor();
163 fastResizeToZero(out);
164 at::cpu::addmv_out(out, self, mat, vec, beta, alpha);
165 };
166 }
167 LogAndDumpSchema(n);
168 return nullptr;
169 });
170
__anon5d9c3eb90f02(Node* n) 171 REGISTER_OPERATOR_FUNCTOR(aten::addr, aten_addr, [](Node* n) -> SROperator {
172 if (n->matches(torch::schema(
173 "aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"))) {
174 return [](ProcessedNode* p_node) {
175 const auto& self = p_node->Input(0).toTensor();
176 const auto& vec1 = p_node->Input(1).toTensor();
177 const auto& vec2 = p_node->Input(2).toTensor();
178 const auto beta = p_node->Input(3).toScalar();
179 const auto alpha = p_node->Input(4).toScalar();
180 if (p_node->Output(0).isNone()) {
181 p_node->Output(0) = at::native::addr(self, vec1, vec2, beta, alpha);
182 return;
183 }
184 auto& out = p_node->Output(0).toTensor();
185 fastResizeToZero(out);
186 at::native::addr_out(self, vec1, vec2, beta, alpha, out);
187 };
188 }
189 LogAndDumpSchema(n);
190 return nullptr;
191 });
192
193 REGISTER_OPERATOR_FUNCTOR(
194 aten::_test_functorch_fallback,
195 aten__test_functorch_fallback,
__anon5d9c3eb91102(Node* n) 196 [](Node* n) -> SROperator {
197 if (n->matches(torch::schema(
198 "aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor"))) {
199 return [](ProcessedNode* p_node) {
200 const auto& self = p_node->Input(0).toTensor();
201 const auto& other = p_node->Input(1).toTensor();
202 if (p_node->Output(0).isNone()) {
203 p_node->Output(0) =
204 at::native::_test_functorch_fallback(self, other);
205 return;
206 }
207 auto& out = p_node->Output(0).toTensor();
208 fastResizeToZero(out);
209 at::native::_test_functorch_fallback_out(self, other, out);
210 };
211 }
212 LogAndDumpSchema(n);
213 return nullptr;
214 });
215
__anon5d9c3eb91302(Node* n) 216 REGISTER_OPERATOR_FUNCTOR(aten::argmax, aten_argmax, [](Node* n) -> SROperator {
217 if (n->matches(torch::schema(
218 "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"))) {
219 return [](ProcessedNode* p_node) {
220 const auto& self = p_node->Input(0).toTensor();
221 const auto dim = p_node->Input(1).toOptional<int64_t>();
222 const auto keepdim = p_node->Input(2).toBool();
223 if (p_node->Output(0).isNone()) {
224 p_node->Output(0) = at::cpu::argmax(self, dim, keepdim);
225 return;
226 }
227 auto& out = p_node->Output(0).toTensor();
228 fastResizeToZero(out);
229 at::cpu::argmax_out(out, self, dim, keepdim);
230 };
231 }
232 LogAndDumpSchema(n);
233 return nullptr;
234 });
235
__anon5d9c3eb91502(Node* n) 236 REGISTER_OPERATOR_FUNCTOR(aten::acosh, aten_acosh, [](Node* n) -> SROperator {
237 if (n->matches(torch::schema("aten::acosh(Tensor self) -> Tensor"))) {
238 return [](ProcessedNode* p_node) {
239 const auto& self = p_node->Input(0).toTensor();
240 if (p_node->Output(0).isNone()) {
241 p_node->Output(0) = at::cpu::acosh(self);
242 return;
243 }
244 auto& out = p_node->Output(0).toTensor();
245 fastResizeToZero(out);
246 at::cpu::acosh_out(out, self);
247 };
248 }
249 LogAndDumpSchema(n);
250 return nullptr;
251 });
252
__anon5d9c3eb91702(Node* n) 253 REGISTER_OPERATOR_FUNCTOR(aten::asinh, aten_asinh, [](Node* n) -> SROperator {
254 if (n->matches(torch::schema("aten::asinh(Tensor self) -> Tensor"))) {
255 return [](ProcessedNode* p_node) {
256 const auto& self = p_node->Input(0).toTensor();
257 if (p_node->Output(0).isNone()) {
258 p_node->Output(0) = at::cpu::asinh(self);
259 return;
260 }
261 auto& out = p_node->Output(0).toTensor();
262 fastResizeToZero(out);
263 at::cpu::asinh_out(out, self);
264 };
265 }
266 LogAndDumpSchema(n);
267 return nullptr;
268 });
269
270 REGISTER_OPERATOR_FUNCTOR(
271 aten::arcsinh,
272 aten_arcsinh,
__anon5d9c3eb91902(Node* n) 273 [](Node* n) -> SROperator {
274 if (n->matches(torch::schema("aten::arcsinh(Tensor self) -> Tensor"))) {
275 return [](ProcessedNode* p_node) {
276 const auto& self = p_node->Input(0).toTensor();
277 if (p_node->Output(0).isNone()) {
278 p_node->Output(0) = at::native::arcsinh(self);
279 return;
280 }
281 auto& out = p_node->Output(0).toTensor();
282 fastResizeToZero(out);
283 at::native::arcsinh_out(self, out);
284 };
285 }
286 LogAndDumpSchema(n);
287 return nullptr;
288 });
289
__anon5d9c3eb91b02(Node* n) 290 REGISTER_OPERATOR_FUNCTOR(aten::atanh, aten_atanh, [](Node* n) -> SROperator {
291 if (n->matches(torch::schema("aten::atanh(Tensor self) -> Tensor"))) {
292 return [](ProcessedNode* p_node) {
293 const auto& self = p_node->Input(0).toTensor();
294 if (p_node->Output(0).isNone()) {
295 p_node->Output(0) = at::cpu::atanh(self);
296 return;
297 }
298 auto& out = p_node->Output(0).toTensor();
299 fastResizeToZero(out);
300 at::cpu::atanh_out(out, self);
301 };
302 }
303 LogAndDumpSchema(n);
304 return nullptr;
305 });
306
307 REGISTER_OPERATOR_FUNCTOR(
308 aten::arctanh,
309 aten_arctanh,
__anon5d9c3eb91d02(Node* n) 310 [](Node* n) -> SROperator {
311 if (n->matches(torch::schema("aten::arctanh(Tensor self) -> Tensor"))) {
312 return [](ProcessedNode* p_node) {
313 const auto& self = p_node->Input(0).toTensor();
314 if (p_node->Output(0).isNone()) {
315 p_node->Output(0) = at::native::arctanh(self);
316 return;
317 }
318 auto& out = p_node->Output(0).toTensor();
319 fastResizeToZero(out);
320 at::native::arctanh_out(self, out);
321 };
322 }
323 LogAndDumpSchema(n);
324 return nullptr;
325 });
326
__anon5d9c3eb91f02(Node* n) 327 REGISTER_OPERATOR_FUNCTOR(aten::asin, aten_asin, [](Node* n) -> SROperator {
328 if (n->matches(torch::schema("aten::asin(Tensor self) -> Tensor"))) {
329 return [](ProcessedNode* p_node) {
330 const auto& self = p_node->Input(0).toTensor();
331 if (p_node->Output(0).isNone()) {
332 p_node->Output(0) = at::cpu::asin(self);
333 return;
334 }
335 auto& out = p_node->Output(0).toTensor();
336 fastResizeToZero(out);
337 at::cpu::asin_out(out, self);
338 };
339 }
340 LogAndDumpSchema(n);
341 return nullptr;
342 });
343
__anon5d9c3eb92102(Node* n) 344 REGISTER_OPERATOR_FUNCTOR(aten::arcsin, aten_arcsin, [](Node* n) -> SROperator {
345 if (n->matches(torch::schema("aten::arcsin(Tensor self) -> Tensor"))) {
346 return [](ProcessedNode* p_node) {
347 const auto& self = p_node->Input(0).toTensor();
348 if (p_node->Output(0).isNone()) {
349 p_node->Output(0) = at::native::arcsin(self);
350 return;
351 }
352 auto& out = p_node->Output(0).toTensor();
353 fastResizeToZero(out);
354 at::native::arcsin_out(self, out);
355 };
356 }
357 LogAndDumpSchema(n);
358 return nullptr;
359 });
360
__anon5d9c3eb92302(Node* n) 361 REGISTER_OPERATOR_FUNCTOR(aten::atan, aten_atan, [](Node* n) -> SROperator {
362 if (n->matches(torch::schema("aten::atan(Tensor self) -> Tensor"))) {
363 return [](ProcessedNode* p_node) {
364 const auto& self = p_node->Input(0).toTensor();
365 if (p_node->Output(0).isNone()) {
366 p_node->Output(0) = at::cpu::atan(self);
367 return;
368 }
369 auto& out = p_node->Output(0).toTensor();
370 fastResizeToZero(out);
371 at::cpu::atan_out(out, self);
372 };
373 }
374 LogAndDumpSchema(n);
375 return nullptr;
376 });
377
__anon5d9c3eb92502(Node* n) 378 REGISTER_OPERATOR_FUNCTOR(aten::arctan, aten_arctan, [](Node* n) -> SROperator {
379 if (n->matches(torch::schema("aten::arctan(Tensor self) -> Tensor"))) {
380 return [](ProcessedNode* p_node) {
381 const auto& self = p_node->Input(0).toTensor();
382 if (p_node->Output(0).isNone()) {
383 p_node->Output(0) = at::native::arctan(self);
384 return;
385 }
386 auto& out = p_node->Output(0).toTensor();
387 fastResizeToZero(out);
388 at::native::arctan_out(self, out);
389 };
390 }
391 LogAndDumpSchema(n);
392 return nullptr;
393 });
394
__anon5d9c3eb92702(Node* n) 395 REGISTER_OPERATOR_FUNCTOR(aten::baddbmm, aten_baddbmm, [](Node* n) -> SROperator {
396 if (n->matches(torch::schema(
397 "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"))) {
398 return [](ProcessedNode* p_node) {
399 const auto& self = p_node->Input(0).toTensor();
400 const auto& batch1 = p_node->Input(1).toTensor();
401 const auto& batch2 = p_node->Input(2).toTensor();
402 const auto beta = p_node->Input(3).toScalar();
403 const auto alpha = p_node->Input(4).toScalar();
404 if (p_node->Output(0).isNone()) {
405 p_node->Output(0) = at::cpu::baddbmm(self, batch1, batch2, beta, alpha);
406 return;
407 }
408 auto& out = p_node->Output(0).toTensor();
409 fastResizeToZero(out);
410 at::cpu::baddbmm_out(out, self, batch1, batch2, beta, alpha);
411 };
412 }
413 LogAndDumpSchema(n);
414 return nullptr;
415 });
416
417 REGISTER_OPERATOR_FUNCTOR(
418 aten::bitwise_not,
419 aten_bitwise_not,
__anon5d9c3eb92902(Node* n) 420 [](Node* n) -> SROperator {
421 if (n->matches(
422 torch::schema("aten::bitwise_not(Tensor self) -> Tensor"))) {
423 return [](ProcessedNode* p_node) {
424 const auto& self = p_node->Input(0).toTensor();
425 if (p_node->Output(0).isNone()) {
426 p_node->Output(0) = at::cpu::bitwise_not(self);
427 return;
428 }
429 auto& out = p_node->Output(0).toTensor();
430 fastResizeToZero(out);
431 at::cpu::bitwise_not_out(out, self);
432 };
433 }
434 LogAndDumpSchema(n);
435 return nullptr;
436 });
437
438 REGISTER_OPERATOR_FUNCTOR(
439 aten::copysign,
440 aten_copysign,
__anon5d9c3eb92b02(Node* n) 441 [](Node* n) -> SROperator {
442 if (n->matches(torch::schema(
443 "aten::copysign.Tensor(Tensor self, Tensor other) -> Tensor"))) {
444 return [](ProcessedNode* p_node) {
445 const auto& self = p_node->Input(0).toTensor();
446 const auto& other = p_node->Input(1).toTensor();
447 if (p_node->Output(0).isNone()) {
448 p_node->Output(0) = at::cpu::copysign(self, other);
449 return;
450 }
451 auto& out = p_node->Output(0).toTensor();
452 fastResizeToZero(out);
453 at::cpu::copysign_out(out, self, other);
454 };
455 }
456 LogAndDumpSchema(n);
457 return nullptr;
458 });
459
460 REGISTER_OPERATOR_FUNCTOR(
461 aten::logical_not,
462 aten_logical_not,
__anon5d9c3eb92d02(Node* n) 463 [](Node* n) -> SROperator {
464 if (n->matches(
465 torch::schema("aten::logical_not(Tensor self) -> Tensor"))) {
466 return [](ProcessedNode* p_node) {
467 const auto& self = p_node->Input(0).toTensor();
468 if (p_node->Output(0).isNone()) {
469 p_node->Output(0) = at::native::logical_not(self);
470 return;
471 }
472 auto& out = p_node->Output(0).toTensor();
473 fastResizeToZero(out);
474 at::native::logical_not_out(self, out);
475 };
476 }
477 LogAndDumpSchema(n);
478 return nullptr;
479 });
480
481 REGISTER_OPERATOR_FUNCTOR(
482 aten::logical_xor,
483 aten_logical_xor,
__anon5d9c3eb92f02(Node* n) 484 [](Node* n) -> SROperator {
485 if (n->matches(torch::schema(
486 "aten::logical_xor(Tensor self, Tensor other) -> Tensor"))) {
487 return [](ProcessedNode* p_node) {
488 const auto& self = p_node->Input(0).toTensor();
489 const auto& other = p_node->Input(1).toTensor();
490 if (p_node->Output(0).isNone()) {
491 p_node->Output(0) = at::native::logical_xor(self, other);
492 return;
493 }
494 auto& out = p_node->Output(0).toTensor();
495 fastResizeToZero(out);
496 at::native::logical_xor_out(self, other, out);
497 };
498 }
499 LogAndDumpSchema(n);
500 return nullptr;
501 });
502
503 REGISTER_OPERATOR_FUNCTOR(
504 aten::logical_and,
505 aten_logical_and,
__anon5d9c3eb93102(Node* n) 506 [](Node* n) -> SROperator {
507 if (n->matches(torch::schema(
508 "aten::logical_and(Tensor self, Tensor other) -> Tensor"))) {
509 return [](ProcessedNode* p_node) {
510 const auto& self = p_node->Input(0).toTensor();
511 const auto& other = p_node->Input(1).toTensor();
512 if (p_node->Output(0).isNone()) {
513 p_node->Output(0) = at::native::logical_and(self, other);
514 return;
515 }
516 auto& out = p_node->Output(0).toTensor();
517 fastResizeToZero(out);
518 at::native::logical_and_out(self, other, out);
519 };
520 }
521 LogAndDumpSchema(n);
522 return nullptr;
523 });
524
525 REGISTER_OPERATOR_FUNCTOR(
526 aten::logical_or,
527 aten_logical_or,
__anon5d9c3eb93302(Node* n) 528 [](Node* n) -> SROperator {
529 if (n->matches(torch::schema(
530 "aten::logical_or(Tensor self, Tensor other) -> Tensor"))) {
531 return [](ProcessedNode* p_node) {
532 const auto& self = p_node->Input(0).toTensor();
533 const auto& other = p_node->Input(1).toTensor();
534 if (p_node->Output(0).isNone()) {
535 p_node->Output(0) = at::native::logical_or(self, other);
536 return;
537 }
538 auto& out = p_node->Output(0).toTensor();
539 fastResizeToZero(out);
540 at::native::logical_or_out(self, other, out);
541 };
542 }
543 LogAndDumpSchema(n);
544 return nullptr;
545 });
546
__anon5d9c3eb93502(Node* n) 547 REGISTER_OPERATOR_FUNCTOR(aten::ceil, aten_ceil, [](Node* n) -> SROperator {
548 if (n->matches(torch::schema("aten::ceil(Tensor self) -> Tensor"))) {
549 return [](ProcessedNode* p_node) {
550 const auto& self = p_node->Input(0).toTensor();
551 if (p_node->Output(0).isNone()) {
552 p_node->Output(0) = at::cpu::ceil(self);
553 return;
554 }
555 auto& out = p_node->Output(0).toTensor();
556 fastResizeToZero(out);
557 at::cpu::ceil_out(out, self);
558 };
559 }
560 LogAndDumpSchema(n);
561 return nullptr;
562 });
563
564 REGISTER_OPERATOR_FUNCTOR(
565 aten::clamp_max,
566 aten_clamp_max,
__anon5d9c3eb93702(Node* n) 567 [](Node* n) -> SROperator {
568 if (n->matches(torch::schema(
569 "aten::clamp_max(Tensor self, Scalar max) -> Tensor"))) {
570 return [](ProcessedNode* p_node) {
571 const auto& self = p_node->Input(0).toTensor();
572 const auto max = p_node->Input(1).toScalar();
573 if (p_node->Output(0).isNone()) {
574 p_node->Output(0) = at::cpu::clamp_max(self, max);
575 return;
576 }
577 auto& out = p_node->Output(0).toTensor();
578 fastResizeToZero(out);
579 at::cpu::clamp_max_out(out, self, max);
580 };
581 }
582
583 if (n->matches(torch::schema(
584 "aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor"))) {
585 return [](ProcessedNode* p_node) {
586 const auto& self = p_node->Input(0).toTensor();
587 const auto& max = p_node->Input(1).toTensor();
588 if (p_node->Output(0).isNone()) {
589 p_node->Output(0) = at::cpu::clamp_max(self, max);
590 return;
591 }
592 auto& out = p_node->Output(0).toTensor();
593 fastResizeToZero(out);
594 at::cpu::clamp_max_out(out, self, max);
595 };
596 }
597 LogAndDumpSchema(n);
598 return nullptr;
599 });
600
__anon5d9c3eb93a02(Node* n) 601 REGISTER_OPERATOR_FUNCTOR(aten::clip, aten_clip, [](Node* n) -> SROperator {
602 if (n->matches(torch::schema(
603 "aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor"))) {
604 return [](ProcessedNode* p_node) {
605 const auto& self = p_node->Input(0).toTensor();
606 const auto min = p_node->Input(1).toOptional<at::Scalar>();
607 const auto max = p_node->Input(2).toOptional<at::Scalar>();
608 if (p_node->Output(0).isNone()) {
609 p_node->Output(0) = at::native::clip(self, min, max);
610 return;
611 }
612 auto& out = p_node->Output(0).toTensor();
613 fastResizeToZero(out);
614 at::native::clip_out(self, min, max, out);
615 };
616 }
617 LogAndDumpSchema(n);
618 return nullptr;
619 });
620
621 REGISTER_OPERATOR_FUNCTOR(
622 aten::complex,
623 aten_complex,
__anon5d9c3eb93c02(Node* n) 624 [](Node* n) -> SROperator {
625 if (n->matches(torch::schema(
626 "aten::complex(Tensor real, Tensor imag) -> Tensor"))) {
627 return [](ProcessedNode* p_node) {
628 const auto& real = p_node->Input(0).toTensor();
629 const auto& imag = p_node->Input(1).toTensor();
630 if (p_node->Output(0).isNone()) {
631 p_node->Output(0) = at::native::complex(real, imag);
632 return;
633 }
634 auto& out = p_node->Output(0).toTensor();
635 fastResizeToZero(out);
636 at::native::complex_out(real, imag, out);
637 };
638 }
639 LogAndDumpSchema(n);
640 return nullptr;
641 });
642
__anon5d9c3eb93e02(Node* n) 643 REGISTER_OPERATOR_FUNCTOR(aten::polar, aten_polar, [](Node* n) -> SROperator {
644 if (n->matches(
645 torch::schema("aten::polar(Tensor abs, Tensor angle) -> Tensor"))) {
646 return [](ProcessedNode* p_node) {
647 const auto& abs = p_node->Input(0).toTensor();
648 const auto& angle = p_node->Input(1).toTensor();
649 if (p_node->Output(0).isNone()) {
650 p_node->Output(0) = at::native::polar(abs, angle);
651 return;
652 }
653 auto& out = p_node->Output(0).toTensor();
654 fastResizeToZero(out);
655 at::native::polar_out(abs, angle, out);
656 };
657 }
658 LogAndDumpSchema(n);
659 return nullptr;
660 });
661
__anon5d9c3eb94002(Node* n) 662 REGISTER_OPERATOR_FUNCTOR(aten::cos, aten_cos, [](Node* n) -> SROperator {
663 if (n->matches(torch::schema("aten::cos(Tensor self) -> Tensor"))) {
664 return [](ProcessedNode* p_node) {
665 const auto& self = p_node->Input(0).toTensor();
666 if (p_node->Output(0).isNone()) {
667 p_node->Output(0) = at::cpu::cos(self);
668 return;
669 }
670 auto& out = p_node->Output(0).toTensor();
671 fastResizeToZero(out);
672 at::cpu::cos_out(out, self);
673 };
674 }
675 LogAndDumpSchema(n);
676 return nullptr;
677 });
678
__anon5d9c3eb94202(Node* n) 679 REGISTER_OPERATOR_FUNCTOR(aten::cosh, aten_cosh, [](Node* n) -> SROperator {
680 if (n->matches(torch::schema("aten::cosh(Tensor self) -> Tensor"))) {
681 return [](ProcessedNode* p_node) {
682 const auto& self = p_node->Input(0).toTensor();
683 if (p_node->Output(0).isNone()) {
684 p_node->Output(0) = at::cpu::cosh(self);
685 return;
686 }
687 auto& out = p_node->Output(0).toTensor();
688 fastResizeToZero(out);
689 at::cpu::cosh_out(out, self);
690 };
691 }
692 LogAndDumpSchema(n);
693 return nullptr;
694 });
695
__anon5d9c3eb94402(Node* n) 696 REGISTER_OPERATOR_FUNCTOR(aten::cumprod, aten_cumprod, [](Node* n) -> SROperator {
697 if (n->matches(torch::schema(
698 "aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor"))) {
699 return [](ProcessedNode* p_node) {
700 const auto& self = p_node->Input(0).toTensor();
701 const auto dim = p_node->Input(1).toInt();
702 const auto dtype = p_node->Input(2).toOptional<at::ScalarType>();
703 if (p_node->Output(0).isNone()) {
704 p_node->Output(0) = at::cpu::cumprod(self, dim, dtype);
705 return;
706 }
707 auto& out = p_node->Output(0).toTensor();
708 fastResizeToZero(out);
709 at::cpu::cumprod_out(out, self, dim, dtype);
710 };
711 }
712 LogAndDumpSchema(n);
713 return nullptr;
714 });
715
__anon5d9c3eb94602(Node* n) 716 REGISTER_OPERATOR_FUNCTOR(aten::diff, aten_diff, [](Node* n) -> SROperator {
717 if (n->matches(torch::schema(
718 "aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor"))) {
719 return [](ProcessedNode* p_node) {
720 const auto& self = p_node->Input(0).toTensor();
721 const auto n = p_node->Input(1).toInt();
722 const auto dim = p_node->Input(2).toInt();
723 const auto prepend = p_node->Input(3).toOptional<at::Tensor>();
724 const auto append = p_node->Input(4).toOptional<at::Tensor>();
725 if (p_node->Output(0).isNone()) {
726 p_node->Output(0) = at::native::diff(self, n, dim, prepend, append);
727 return;
728 }
729 auto& out = p_node->Output(0).toTensor();
730 fastResizeToZero(out);
731 at::native::diff_out(self, n, dim, prepend, append, out);
732 };
733 }
734 LogAndDumpSchema(n);
735 return nullptr;
736 });
737
__anon5d9c3eb94802(Node* n) 738 REGISTER_OPERATOR_FUNCTOR(aten::divide, aten_divide, [](Node* n) -> SROperator {
739 if (n->matches(torch::schema(
740 "aten::divide.Tensor(Tensor self, Tensor other) -> Tensor"))) {
741 return [](ProcessedNode* p_node) {
742 const auto& self = p_node->Input(0).toTensor();
743 const auto& other = p_node->Input(1).toTensor();
744 if (p_node->Output(0).isNone()) {
745 p_node->Output(0) = at::native::divide(self, other);
746 return;
747 }
748 auto& out = p_node->Output(0).toTensor();
749 fastResizeToZero(out);
750 at::native::divide_out(self, other, out);
751 };
752 }
753 LogAndDumpSchema(n);
754 return nullptr;
755 });
756
757 REGISTER_OPERATOR_FUNCTOR(
758 aten::true_divide,
759 aten_true_divide,
__anon5d9c3eb94a02(Node* n) 760 [](Node* n) -> SROperator {
761 if (n->matches(torch::schema(
762 "aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor"))) {
763 return [](ProcessedNode* p_node) {
764 const auto& self = p_node->Input(0).toTensor();
765 const auto& other = p_node->Input(1).toTensor();
766 if (p_node->Output(0).isNone()) {
767 p_node->Output(0) = at::native::true_divide(self, other);
768 return;
769 }
770 auto& out = p_node->Output(0).toTensor();
771 fastResizeToZero(out);
772 at::native::true_divide_out(self, other, out);
773 };
774 }
775 LogAndDumpSchema(n);
776 return nullptr;
777 });
778
__anon5d9c3eb94c02(Node* n) 779 REGISTER_OPERATOR_FUNCTOR(aten::dot, aten_dot, [](Node* n) -> SROperator {
780 if (n->matches(
781 torch::schema("aten::dot(Tensor self, Tensor tensor) -> Tensor"))) {
782 return [](ProcessedNode* p_node) {
783 const auto& self = p_node->Input(0).toTensor();
784 const auto& tensor = p_node->Input(1).toTensor();
785 if (p_node->Output(0).isNone()) {
786 p_node->Output(0) = at::native::dot(self, tensor);
787 return;
788 }
789 auto& out = p_node->Output(0).toTensor();
790 fastResizeToZero(out);
791 at::native::dot_out(self, tensor, out);
792 };
793 }
794 LogAndDumpSchema(n);
795 return nullptr;
796 });
797
__anon5d9c3eb94e02(Node* n) 798 REGISTER_OPERATOR_FUNCTOR(aten::vdot, aten_vdot, [](Node* n) -> SROperator {
799 if (n->matches(
800 torch::schema("aten::vdot(Tensor self, Tensor other) -> Tensor"))) {
801 return [](ProcessedNode* p_node) {
802 const auto& self = p_node->Input(0).toTensor();
803 const auto& other = p_node->Input(1).toTensor();
804 if (p_node->Output(0).isNone()) {
805 p_node->Output(0) = at::native::vdot(self, other);
806 return;
807 }
808 auto& out = p_node->Output(0).toTensor();
809 fastResizeToZero(out);
810 at::native::vdot_out(self, other, out);
811 };
812 }
813 LogAndDumpSchema(n);
814 return nullptr;
815 });
816
__anon5d9c3eb95002(Node* n) 817 REGISTER_OPERATOR_FUNCTOR(aten::erf, aten_erf, [](Node* n) -> SROperator {
818 if (n->matches(torch::schema("aten::erf(Tensor self) -> Tensor"))) {
819 return [](ProcessedNode* p_node) {
820 const auto& self = p_node->Input(0).toTensor();
821 if (p_node->Output(0).isNone()) {
822 p_node->Output(0) = at::cpu::erf(self);
823 return;
824 }
825 auto& out = p_node->Output(0).toTensor();
826 fastResizeToZero(out);
827 at::cpu::erf_out(out, self);
828 };
829 }
830 LogAndDumpSchema(n);
831 return nullptr;
832 });
833
__anon5d9c3eb95202(Node* n) 834 REGISTER_OPERATOR_FUNCTOR(aten::erfc, aten_erfc, [](Node* n) -> SROperator {
835 if (n->matches(torch::schema("aten::erfc(Tensor self) -> Tensor"))) {
836 return [](ProcessedNode* p_node) {
837 const auto& self = p_node->Input(0).toTensor();
838 if (p_node->Output(0).isNone()) {
839 p_node->Output(0) = at::cpu::erfc(self);
840 return;
841 }
842 auto& out = p_node->Output(0).toTensor();
843 fastResizeToZero(out);
844 at::cpu::erfc_out(out, self);
845 };
846 }
847 LogAndDumpSchema(n);
848 return nullptr;
849 });
850
__anon5d9c3eb95402(Node* n) 851 REGISTER_OPERATOR_FUNCTOR(aten::exp, aten_exp, [](Node* n) -> SROperator {
852 if (n->matches(torch::schema("aten::exp(Tensor self) -> Tensor"))) {
853 return [](ProcessedNode* p_node) {
854 const auto& self = p_node->Input(0).toTensor();
855 if (p_node->Output(0).isNone()) {
856 p_node->Output(0) = at::cpu::exp(self);
857 return;
858 }
859 auto& out = p_node->Output(0).toTensor();
860 fastResizeToZero(out);
861 at::cpu::exp_out(out, self);
862 };
863 }
864 LogAndDumpSchema(n);
865 return nullptr;
866 });
867
__anon5d9c3eb95602(Node* n) 868 REGISTER_OPERATOR_FUNCTOR(aten::exp2, aten_exp2, [](Node* n) -> SROperator {
869 if (n->matches(torch::schema("aten::exp2(Tensor self) -> Tensor"))) {
870 return [](ProcessedNode* p_node) {
871 const auto& self = p_node->Input(0).toTensor();
872 if (p_node->Output(0).isNone()) {
873 p_node->Output(0) = at::cpu::exp2(self);
874 return;
875 }
876 auto& out = p_node->Output(0).toTensor();
877 fastResizeToZero(out);
878 at::cpu::exp2_out(out, self);
879 };
880 }
881 LogAndDumpSchema(n);
882 return nullptr;
883 });
884
__anon5d9c3eb95802(Node* n) 885 REGISTER_OPERATOR_FUNCTOR(aten::expm1, aten_expm1, [](Node* n) -> SROperator {
886 if (n->matches(torch::schema("aten::expm1(Tensor self) -> Tensor"))) {
887 return [](ProcessedNode* p_node) {
888 const auto& self = p_node->Input(0).toTensor();
889 if (p_node->Output(0).isNone()) {
890 p_node->Output(0) = at::cpu::expm1(self);
891 return;
892 }
893 auto& out = p_node->Output(0).toTensor();
894 fastResizeToZero(out);
895 at::cpu::expm1_out(out, self);
896 };
897 }
898 LogAndDumpSchema(n);
899 return nullptr;
900 });
901
__anon5d9c3eb95a02(Node* n) 902 REGISTER_OPERATOR_FUNCTOR(aten::floor, aten_floor, [](Node* n) -> SROperator {
903 if (n->matches(torch::schema("aten::floor(Tensor self) -> Tensor"))) {
904 return [](ProcessedNode* p_node) {
905 const auto& self = p_node->Input(0).toTensor();
906 if (p_node->Output(0).isNone()) {
907 p_node->Output(0) = at::cpu::floor(self);
908 return;
909 }
910 auto& out = p_node->Output(0).toTensor();
911 fastResizeToZero(out);
912 at::cpu::floor_out(out, self);
913 };
914 }
915 LogAndDumpSchema(n);
916 return nullptr;
917 });
918
__anon5d9c3eb95c02(Node* n) 919 REGISTER_OPERATOR_FUNCTOR(aten::frac, aten_frac, [](Node* n) -> SROperator {
920 if (n->matches(torch::schema("aten::frac(Tensor self) -> Tensor"))) {
921 return [](ProcessedNode* p_node) {
922 const auto& self = p_node->Input(0).toTensor();
923 if (p_node->Output(0).isNone()) {
924 p_node->Output(0) = at::cpu::frac(self);
925 return;
926 }
927 auto& out = p_node->Output(0).toTensor();
928 fastResizeToZero(out);
929 at::cpu::frac_out(out, self);
930 };
931 }
932 LogAndDumpSchema(n);
933 return nullptr;
934 });
935
__anon5d9c3eb95e02(Node* n) 936 REGISTER_OPERATOR_FUNCTOR(aten::gcd, aten_gcd, [](Node* n) -> SROperator {
937 if (n->matches(
938 torch::schema("aten::gcd(Tensor self, Tensor other) -> Tensor"))) {
939 return [](ProcessedNode* p_node) {
940 const auto& self = p_node->Input(0).toTensor();
941 const auto& other = p_node->Input(1).toTensor();
942 if (p_node->Output(0).isNone()) {
943 p_node->Output(0) = at::cpu::gcd(self, other);
944 return;
945 }
946 auto& out = p_node->Output(0).toTensor();
947 fastResizeToZero(out);
948 at::cpu::gcd_out(out, self, other);
949 };
950 }
951 LogAndDumpSchema(n);
952 return nullptr;
953 });
954
__anon5d9c3eb96002(Node* n) 955 REGISTER_OPERATOR_FUNCTOR(aten::lcm, aten_lcm, [](Node* n) -> SROperator {
956 if (n->matches(
957 torch::schema("aten::lcm(Tensor self, Tensor other) -> Tensor"))) {
958 return [](ProcessedNode* p_node) {
959 const auto& self = p_node->Input(0).toTensor();
960 const auto& other = p_node->Input(1).toTensor();
961 if (p_node->Output(0).isNone()) {
962 p_node->Output(0) = at::cpu::lcm(self, other);
963 return;
964 }
965 auto& out = p_node->Output(0).toTensor();
966 fastResizeToZero(out);
967 at::cpu::lcm_out(out, self, other);
968 };
969 }
970 LogAndDumpSchema(n);
971 return nullptr;
972 });
973
__anon5d9c3eb96202(Node* n) 974 REGISTER_OPERATOR_FUNCTOR(aten::index_copy, aten_index_copy, [](Node* n) -> SROperator {
975 if (n->matches(torch::schema(
976 "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor"))) {
977 return [](ProcessedNode* p_node) {
978 const auto& self = p_node->Input(0).toTensor();
979 const auto dim = p_node->Input(1).toInt();
980 const auto& index = p_node->Input(2).toTensor();
981 const auto& source = p_node->Input(3).toTensor();
982 if (p_node->Output(0).isNone()) {
983 p_node->Output(0) = at::cpu::index_copy(self, dim, index, source);
984 return;
985 }
986 auto& out = p_node->Output(0).toTensor();
987 fastResizeToZero(out);
988 at::cpu::index_copy_out(out, self, dim, index, source);
989 };
990 }
991 LogAndDumpSchema(n);
992 return nullptr;
993 });
994
__anon5d9c3eb96402(Node* n) 995 REGISTER_OPERATOR_FUNCTOR(aten::isin, aten_isin, [](Node* n) -> SROperator {
996 if (n->matches(torch::schema(
997 "aten::isin.Tensor_Tensor(Tensor elements, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"))) {
998 return [](ProcessedNode* p_node) {
999 const auto& elements = p_node->Input(0).toTensor();
1000 const auto& test_elements = p_node->Input(1).toTensor();
1001 const auto assume_unique = p_node->Input(2).toBool();
1002 const auto invert = p_node->Input(3).toBool();
1003 if (p_node->Output(0).isNone()) {
1004 p_node->Output(0) =
1005 at::cpu::isin(elements, test_elements, assume_unique, invert);
1006 return;
1007 }
1008 auto& out = p_node->Output(0).toTensor();
1009 fastResizeToZero(out);
1010 at::cpu::isin_out(out, elements, test_elements, assume_unique, invert);
1011 };
1012 }
1013
1014 if (n->matches(torch::schema(
1015 "aten::isin.Tensor_Scalar(Tensor elements, Scalar test_element, *, bool assume_unique=False, bool invert=False) -> Tensor"))) {
1016 return [](ProcessedNode* p_node) {
1017 const auto& elements = p_node->Input(0).toTensor();
1018 const auto test_element = p_node->Input(1).toScalar();
1019 const auto assume_unique = p_node->Input(2).toBool();
1020 const auto invert = p_node->Input(3).toBool();
1021 if (p_node->Output(0).isNone()) {
1022 p_node->Output(0) =
1023 at::cpu::isin(elements, test_element, assume_unique, invert);
1024 return;
1025 }
1026 auto& out = p_node->Output(0).toTensor();
1027 fastResizeToZero(out);
1028 at::cpu::isin_out(out, elements, test_element, assume_unique, invert);
1029 };
1030 }
1031
1032 if (n->matches(torch::schema(
1033 "aten::isin.Scalar_Tensor(Scalar element, Tensor test_elements, *, bool assume_unique=False, bool invert=False) -> Tensor"))) {
1034 return [](ProcessedNode* p_node) {
1035 const auto element = p_node->Input(0).toScalar();
1036 const auto& test_elements = p_node->Input(1).toTensor();
1037 const auto assume_unique = p_node->Input(2).toBool();
1038 const auto invert = p_node->Input(3).toBool();
1039 if (p_node->Output(0).isNone()) {
1040 p_node->Output(0) =
1041 at::cpu::isin(element, test_elements, assume_unique, invert);
1042 return;
1043 }
1044 auto& out = p_node->Output(0).toTensor();
1045 fastResizeToZero(out);
1046 at::cpu::isin_out(out, element, test_elements, assume_unique, invert);
1047 };
1048 }
1049 LogAndDumpSchema(n);
1050 return nullptr;
1051 });
1052
__anon5d9c3eb96802(Node* n) 1053 REGISTER_OPERATOR_FUNCTOR(aten::kron, aten_kron, [](Node* n) -> SROperator {
1054 if (n->matches(
1055 torch::schema("aten::kron(Tensor self, Tensor other) -> Tensor"))) {
1056 return [](ProcessedNode* p_node) {
1057 const auto& self = p_node->Input(0).toTensor();
1058 const auto& other = p_node->Input(1).toTensor();
1059 if (p_node->Output(0).isNone()) {
1060 p_node->Output(0) = at::native::kron(self, other);
1061 return;
1062 }
1063 auto& out = p_node->Output(0).toTensor();
1064 fastResizeToZero(out);
1065 at::native::kron_out(self, other, out);
1066 };
1067 }
1068 LogAndDumpSchema(n);
1069 return nullptr;
1070 });
1071
__anon5d9c3eb96a02(Node* n) 1072 REGISTER_OPERATOR_FUNCTOR(aten::ldexp, aten_ldexp, [](Node* n) -> SROperator {
1073 if (n->matches(torch::schema(
1074 "aten::ldexp.Tensor(Tensor self, Tensor other) -> Tensor"))) {
1075 return [](ProcessedNode* p_node) {
1076 const auto& self = p_node->Input(0).toTensor();
1077 const auto& other = p_node->Input(1).toTensor();
1078 if (p_node->Output(0).isNone()) {
1079 p_node->Output(0) = at::native::ldexp(self, other);
1080 return;
1081 }
1082 auto& out = p_node->Output(0).toTensor();
1083 fastResizeToZero(out);
1084 at::native::ldexp_out(self, other, out);
1085 };
1086 }
1087 LogAndDumpSchema(n);
1088 return nullptr;
1089 });
1090
__anon5d9c3eb96c02(Node* n) 1091 REGISTER_OPERATOR_FUNCTOR(aten::log10, aten_log10, [](Node* n) -> SROperator {
1092 if (n->matches(torch::schema("aten::log10(Tensor self) -> Tensor"))) {
1093 return [](ProcessedNode* p_node) {
1094 const auto& self = p_node->Input(0).toTensor();
1095 if (p_node->Output(0).isNone()) {
1096 p_node->Output(0) = at::cpu::log10(self);
1097 return;
1098 }
1099 auto& out = p_node->Output(0).toTensor();
1100 fastResizeToZero(out);
1101 at::cpu::log10_out(out, self);
1102 };
1103 }
1104 LogAndDumpSchema(n);
1105 return nullptr;
1106 });
1107
__anon5d9c3eb96e02(Node* n) 1108 REGISTER_OPERATOR_FUNCTOR(aten::log1p, aten_log1p, [](Node* n) -> SROperator {
1109 if (n->matches(torch::schema("aten::log1p(Tensor self) -> Tensor"))) {
1110 return [](ProcessedNode* p_node) {
1111 const auto& self = p_node->Input(0).toTensor();
1112 if (p_node->Output(0).isNone()) {
1113 p_node->Output(0) = at::cpu::log1p(self);
1114 return;
1115 }
1116 auto& out = p_node->Output(0).toTensor();
1117 fastResizeToZero(out);
1118 at::cpu::log1p_out(out, self);
1119 };
1120 }
1121 LogAndDumpSchema(n);
1122 return nullptr;
1123 });
1124
__anon5d9c3eb97002(Node* n) 1125 REGISTER_OPERATOR_FUNCTOR(aten::log2, aten_log2, [](Node* n) -> SROperator {
1126 if (n->matches(torch::schema("aten::log2(Tensor self) -> Tensor"))) {
1127 return [](ProcessedNode* p_node) {
1128 const auto& self = p_node->Input(0).toTensor();
1129 if (p_node->Output(0).isNone()) {
1130 p_node->Output(0) = at::cpu::log2(self);
1131 return;
1132 }
1133 auto& out = p_node->Output(0).toTensor();
1134 fastResizeToZero(out);
1135 at::cpu::log2_out(out, self);
1136 };
1137 }
1138 LogAndDumpSchema(n);
1139 return nullptr;
1140 });
1141
1142 REGISTER_OPERATOR_FUNCTOR(
1143 aten::logaddexp,
1144 aten_logaddexp,
__anon5d9c3eb97202(Node* n) 1145 [](Node* n) -> SROperator {
1146 if (n->matches(torch::schema(
1147 "aten::logaddexp(Tensor self, Tensor other) -> Tensor"))) {
1148 return [](ProcessedNode* p_node) {
1149 const auto& self = p_node->Input(0).toTensor();
1150 const auto& other = p_node->Input(1).toTensor();
1151 if (p_node->Output(0).isNone()) {
1152 p_node->Output(0) = at::cpu::logaddexp(self, other);
1153 return;
1154 }
1155 auto& out = p_node->Output(0).toTensor();
1156 fastResizeToZero(out);
1157 at::cpu::logaddexp_out(out, self, other);
1158 };
1159 }
1160 LogAndDumpSchema(n);
1161 return nullptr;
1162 });
1163
1164 REGISTER_OPERATOR_FUNCTOR(
1165 aten::logaddexp2,
1166 aten_logaddexp2,
__anon5d9c3eb97402(Node* n) 1167 [](Node* n) -> SROperator {
1168 if (n->matches(torch::schema(
1169 "aten::logaddexp2(Tensor self, Tensor other) -> Tensor"))) {
1170 return [](ProcessedNode* p_node) {
1171 const auto& self = p_node->Input(0).toTensor();
1172 const auto& other = p_node->Input(1).toTensor();
1173 if (p_node->Output(0).isNone()) {
1174 p_node->Output(0) = at::cpu::logaddexp2(self, other);
1175 return;
1176 }
1177 auto& out = p_node->Output(0).toTensor();
1178 fastResizeToZero(out);
1179 at::cpu::logaddexp2_out(out, self, other);
1180 };
1181 }
1182 LogAndDumpSchema(n);
1183 return nullptr;
1184 });
1185
__anon5d9c3eb97602(Node* n) 1186 REGISTER_OPERATOR_FUNCTOR(aten::xlogy, aten_xlogy, [](Node* n) -> SROperator {
1187 if (n->matches(torch::schema(
1188 "aten::xlogy.Tensor(Tensor self, Tensor other) -> Tensor"))) {
1189 return [](ProcessedNode* p_node) {
1190 const auto& self = p_node->Input(0).toTensor();
1191 const auto& other = p_node->Input(1).toTensor();
1192 if (p_node->Output(0).isNone()) {
1193 p_node->Output(0) = at::cpu::xlogy(self, other);
1194 return;
1195 }
1196 auto& out = p_node->Output(0).toTensor();
1197 fastResizeToZero(out);
1198 at::cpu::xlogy_out(out, self, other);
1199 };
1200 }
1201 LogAndDumpSchema(n);
1202 return nullptr;
1203 });
1204
1205 REGISTER_OPERATOR_FUNCTOR(
1206 aten::_log_softmax,
1207 aten__log_softmax,
__anon5d9c3eb97802(Node* n) 1208 [](Node* n) -> SROperator {
1209 if (n->matches(torch::schema(
1210 "aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor"))) {
1211 return [](ProcessedNode* p_node) {
1212 const auto& self = p_node->Input(0).toTensor();
1213 const auto dim = p_node->Input(1).toInt();
1214 const auto half_to_float = p_node->Input(2).toBool();
1215 if (p_node->Output(0).isNone()) {
1216 p_node->Output(0) = at::cpu::_log_softmax(self, dim, half_to_float);
1217 return;
1218 }
1219 auto& out = p_node->Output(0).toTensor();
1220 fastResizeToZero(out);
1221 at::cpu::_log_softmax_out(out, self, dim, half_to_float);
1222 };
1223 }
1224 LogAndDumpSchema(n);
1225 return nullptr;
1226 });
1227
1228 REGISTER_OPERATOR_FUNCTOR(
1229 aten::_log_softmax_backward_data,
1230 aten__log_softmax_backward_data,
__anon5d9c3eb97a02(Node* n) 1231 [](Node* n) -> SROperator {
1232 if (n->matches(torch::schema(
1233 "aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor"))) {
1234 return [](ProcessedNode* p_node) {
1235 const auto& grad_output = p_node->Input(0).toTensor();
1236 const auto& output = p_node->Input(1).toTensor();
1237 const auto dim = p_node->Input(2).toInt();
1238 const auto input_dtype = p_node->Input(3).toScalarType();
1239 if (p_node->Output(0).isNone()) {
1240 p_node->Output(0) = at::cpu::_log_softmax_backward_data(
1241 grad_output, output, dim, input_dtype);
1242 return;
1243 }
1244 auto& out = p_node->Output(0).toTensor();
1245 fastResizeToZero(out);
1246 at::cpu::_log_softmax_backward_data_out(
1247 out, grad_output, output, dim, input_dtype);
1248 };
1249 }
1250 LogAndDumpSchema(n);
1251 return nullptr;
1252 });
1253
1254 REGISTER_OPERATOR_FUNCTOR(
1255 aten::_logcumsumexp,
1256 aten__logcumsumexp,
__anon5d9c3eb97c02(Node* n) 1257 [](Node* n) -> SROperator {
1258 if (n->matches(torch::schema(
1259 "aten::_logcumsumexp(Tensor self, int dim) -> Tensor"))) {
1260 return [](ProcessedNode* p_node) {
1261 const auto& self = p_node->Input(0).toTensor();
1262 const auto dim = p_node->Input(1).toInt();
1263 if (p_node->Output(0).isNone()) {
1264 p_node->Output(0) = at::native::_logcumsumexp_cpu(self, dim);
1265 return;
1266 }
1267 auto& out = p_node->Output(0).toTensor();
1268 fastResizeToZero(out);
1269 at::native::_logcumsumexp_out_cpu(self, dim, out);
1270 };
1271 }
1272 LogAndDumpSchema(n);
1273 return nullptr;
1274 });
1275
1276 REGISTER_OPERATOR_FUNCTOR(
1277 aten::logcumsumexp,
1278 aten_logcumsumexp,
__anon5d9c3eb97e02(Node* n) 1279 [](Node* n) -> SROperator {
1280 if (n->matches(torch::schema(
1281 "aten::logcumsumexp(Tensor self, int dim) -> Tensor"))) {
1282 return [](ProcessedNode* p_node) {
1283 const auto& self = p_node->Input(0).toTensor();
1284 const auto dim = p_node->Input(1).toInt();
1285 if (p_node->Output(0).isNone()) {
1286 p_node->Output(0) = at::native::logcumsumexp(self, dim);
1287 return;
1288 }
1289 auto& out = p_node->Output(0).toTensor();
1290 fastResizeToZero(out);
1291 at::native::logcumsumexp_out(self, dim, out);
1292 };
1293 }
1294 LogAndDumpSchema(n);
1295 return nullptr;
1296 });
1297
1298 REGISTER_OPERATOR_FUNCTOR(
1299 aten::matrix_power,
1300 aten_matrix_power,
__anon5d9c3eb98002(Node* n) 1301 [](Node* n) -> SROperator {
1302 if (n->matches(torch::schema(
1303 "aten::matrix_power(Tensor self, int n) -> Tensor"))) {
1304 return [](ProcessedNode* p_node) {
1305 const auto& self = p_node->Input(0).toTensor();
1306 const auto n = p_node->Input(1).toInt();
1307 if (p_node->Output(0).isNone()) {
1308 p_node->Output(0) = at::native::matrix_power(self, n);
1309 return;
1310 }
1311 auto& out = p_node->Output(0).toTensor();
1312 fastResizeToZero(out);
1313 at::native::matrix_power_out(self, n, out);
1314 };
1315 }
1316 LogAndDumpSchema(n);
1317 return nullptr;
1318 });
1319
__anon5d9c3eb98202(Node* n) 1320 REGISTER_OPERATOR_FUNCTOR(aten::mm, aten_mm, [](Node* n) -> SROperator {
1321 if (n->matches(
1322 torch::schema("aten::mm(Tensor self, Tensor mat2) -> Tensor"))) {
1323 return [](ProcessedNode* p_node) {
1324 const auto& self = p_node->Input(0).toTensor();
1325 const auto& mat2 = p_node->Input(1).toTensor();
1326 if (p_node->Output(0).isNone()) {
1327 p_node->Output(0) = at::cpu::mm(self, mat2);
1328 return;
1329 }
1330 auto& out = p_node->Output(0).toTensor();
1331 fastResizeToZero(out);
1332 at::cpu::mm_out(out, self, mat2);
1333 };
1334 }
1335 LogAndDumpSchema(n);
1336 return nullptr;
1337 });
1338
1339 REGISTER_OPERATOR_FUNCTOR(
1340 aten::multiply,
1341 aten_multiply,
__anon5d9c3eb98402(Node* n) 1342 [](Node* n) -> SROperator {
1343 if (n->matches(torch::schema(
1344 "aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor"))) {
1345 return [](ProcessedNode* p_node) {
1346 const auto& self = p_node->Input(0).toTensor();
1347 const auto& other = p_node->Input(1).toTensor();
1348 if (p_node->Output(0).isNone()) {
1349 p_node->Output(0) = at::native::multiply(self, other);
1350 return;
1351 }
1352 auto& out = p_node->Output(0).toTensor();
1353 fastResizeToZero(out);
1354 at::native::multiply_out(self, other, out);
1355 };
1356 }
1357 LogAndDumpSchema(n);
1358 return nullptr;
1359 });
1360
__anon5d9c3eb98602(Node* n) 1361 REGISTER_OPERATOR_FUNCTOR(aten::mv, aten_mv, [](Node* n) -> SROperator {
1362 if (n->matches(
1363 torch::schema("aten::mv(Tensor self, Tensor vec) -> Tensor"))) {
1364 return [](ProcessedNode* p_node) {
1365 const auto& self = p_node->Input(0).toTensor();
1366 const auto& vec = p_node->Input(1).toTensor();
1367 if (p_node->Output(0).isNone()) {
1368 p_node->Output(0) = at::native::mv(self, vec);
1369 return;
1370 }
1371 auto& out = p_node->Output(0).toTensor();
1372 fastResizeToZero(out);
1373 at::native::mv_out(self, vec, out);
1374 };
1375 }
1376 LogAndDumpSchema(n);
1377 return nullptr;
1378 });
1379
1380 REGISTER_OPERATOR_FUNCTOR(
1381 aten::mvlgamma,
1382 aten_mvlgamma,
__anon5d9c3eb98802(Node* n) 1383 [](Node* n) -> SROperator {
1384 if (n->matches(
1385 torch::schema("aten::mvlgamma(Tensor self, int p) -> Tensor"))) {
1386 return [](ProcessedNode* p_node) {
1387 const auto& self = p_node->Input(0).toTensor();
1388 const auto p = p_node->Input(1).toInt();
1389 if (p_node->Output(0).isNone()) {
1390 p_node->Output(0) = at::native::mvlgamma(self, p);
1391 return;
1392 }
1393 auto& out = p_node->Output(0).toTensor();
1394 fastResizeToZero(out);
1395 at::native::mvlgamma_out(self, p, out);
1396 };
1397 }
1398 LogAndDumpSchema(n);
1399 return nullptr;
1400 });
1401
1402 REGISTER_OPERATOR_FUNCTOR(
1403 aten::rad2deg,
1404 aten_rad2deg,
__anon5d9c3eb98a02(Node* n) 1405 [](Node* n) -> SROperator {
1406 if (n->matches(torch::schema("aten::rad2deg(Tensor self) -> Tensor"))) {
1407 return [](ProcessedNode* p_node) {
1408 const auto& self = p_node->Input(0).toTensor();
1409 if (p_node->Output(0).isNone()) {
1410 p_node->Output(0) = at::native::rad2deg(self);
1411 return;
1412 }
1413 auto& out = p_node->Output(0).toTensor();
1414 fastResizeToZero(out);
1415 at::native::rad2deg_out(self, out);
1416 };
1417 }
1418 LogAndDumpSchema(n);
1419 return nullptr;
1420 });
1421
1422 REGISTER_OPERATOR_FUNCTOR(
1423 aten::deg2rad,
1424 aten_deg2rad,
__anon5d9c3eb98c02(Node* n) 1425 [](Node* n) -> SROperator {
1426 if (n->matches(torch::schema("aten::deg2rad(Tensor self) -> Tensor"))) {
1427 return [](ProcessedNode* p_node) {
1428 const auto& self = p_node->Input(0).toTensor();
1429 if (p_node->Output(0).isNone()) {
1430 p_node->Output(0) = at::native::deg2rad(self);
1431 return;
1432 }
1433 auto& out = p_node->Output(0).toTensor();
1434 fastResizeToZero(out);
1435 at::native::deg2rad_out(self, out);
1436 };
1437 }
1438 LogAndDumpSchema(n);
1439 return nullptr;
1440 });
1441
1442 REGISTER_OPERATOR_FUNCTOR(
1443 aten::reciprocal,
1444 aten_reciprocal,
__anon5d9c3eb98e02(Node* n) 1445 [](Node* n) -> SROperator {
1446 if (n->matches(
1447 torch::schema("aten::reciprocal(Tensor self) -> Tensor"))) {
1448 return [](ProcessedNode* p_node) {
1449 const auto& self = p_node->Input(0).toTensor();
1450 if (p_node->Output(0).isNone()) {
1451 p_node->Output(0) = at::cpu::reciprocal(self);
1452 return;
1453 }
1454 auto& out = p_node->Output(0).toTensor();
1455 fastResizeToZero(out);
1456 at::cpu::reciprocal_out(out, self);
1457 };
1458 }
1459 LogAndDumpSchema(n);
1460 return nullptr;
1461 });
1462
__anon5d9c3eb99002(Node* n) 1463 REGISTER_OPERATOR_FUNCTOR(aten::neg, aten_neg, [](Node* n) -> SROperator {
1464 if (n->matches(torch::schema("aten::neg(Tensor self) -> Tensor"))) {
1465 return [](ProcessedNode* p_node) {
1466 const auto& self = p_node->Input(0).toTensor();
1467 if (p_node->Output(0).isNone()) {
1468 p_node->Output(0) = at::cpu::neg(self);
1469 return;
1470 }
1471 auto& out = p_node->Output(0).toTensor();
1472 fastResizeToZero(out);
1473 at::cpu::neg_out(out, self);
1474 };
1475 }
1476 LogAndDumpSchema(n);
1477 return nullptr;
1478 });
1479
1480 REGISTER_OPERATOR_FUNCTOR(
1481 aten::negative,
1482 aten_negative,
__anon5d9c3eb99202(Node* n) 1483 [](Node* n) -> SROperator {
1484 if (n->matches(torch::schema("aten::negative(Tensor self) -> Tensor"))) {
1485 return [](ProcessedNode* p_node) {
1486 const auto& self = p_node->Input(0).toTensor();
1487 if (p_node->Output(0).isNone()) {
1488 p_node->Output(0) = at::native::negative(self);
1489 return;
1490 }
1491 auto& out = p_node->Output(0).toTensor();
1492 fastResizeToZero(out);
1493 at::native::negative_out(self, out);
1494 };
1495 }
1496 LogAndDumpSchema(n);
1497 return nullptr;
1498 });
1499
__anon5d9c3eb99402(Node* n) 1500 REGISTER_OPERATOR_FUNCTOR(aten::round, aten_round, [](Node* n) -> SROperator {
1501 if (n->matches(torch::schema("aten::round(Tensor self) -> Tensor"))) {
1502 return [](ProcessedNode* p_node) {
1503 const auto& self = p_node->Input(0).toTensor();
1504 if (p_node->Output(0).isNone()) {
1505 p_node->Output(0) = at::cpu::round(self);
1506 return;
1507 }
1508 auto& out = p_node->Output(0).toTensor();
1509 fastResizeToZero(out);
1510 at::cpu::round_out(out, self);
1511 };
1512 }
1513
1514 if (n->matches(torch::schema(
1515 "aten::round.decimals(Tensor self, *, int decimals) -> Tensor"))) {
1516 return [](ProcessedNode* p_node) {
1517 const auto& self = p_node->Input(0).toTensor();
1518 const auto decimals = p_node->Input(1).toInt();
1519 if (p_node->Output(0).isNone()) {
1520 p_node->Output(0) = at::cpu::round(self, decimals);
1521 return;
1522 }
1523 auto& out = p_node->Output(0).toTensor();
1524 fastResizeToZero(out);
1525 at::cpu::round_out(out, self, decimals);
1526 };
1527 }
1528 LogAndDumpSchema(n);
1529 return nullptr;
1530 });
1531
__anon5d9c3eb99702(Node* n) 1532 REGISTER_OPERATOR_FUNCTOR(aten::gelu, aten_gelu, [](Node* n) -> SROperator {
1533 if (n->matches(torch::schema(
1534 "aten::gelu(Tensor self, *, str approximate='none') -> Tensor"))) {
1535 return [](ProcessedNode* p_node) {
1536 const auto& self = p_node->Input(0).toTensor();
1537 const auto approximate = p_node->Input(1).toStringView();
1538 if (p_node->Output(0).isNone()) {
1539 p_node->Output(0) = at::cpu::gelu(self, approximate);
1540 return;
1541 }
1542 auto& out = p_node->Output(0).toTensor();
1543 fastResizeToZero(out);
1544 at::cpu::gelu_out(out, self, approximate);
1545 };
1546 }
1547 LogAndDumpSchema(n);
1548 return nullptr;
1549 });
1550
1551 REGISTER_OPERATOR_FUNCTOR(
1552 aten::gelu_backward,
1553 aten_gelu_backward,
__anon5d9c3eb99902(Node* n) 1554 [](Node* n) -> SROperator {
1555 if (n->matches(torch::schema(
1556 "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor"))) {
1557 return [](ProcessedNode* p_node) {
1558 const auto& grad_output = p_node->Input(0).toTensor();
1559 const auto& self = p_node->Input(1).toTensor();
1560 const auto approximate = p_node->Input(2).toStringView();
1561 if (p_node->Output(0).isNone()) {
1562 p_node->Output(0) =
1563 at::cpu::gelu_backward(grad_output, self, approximate);
1564 return;
1565 }
1566 auto& grad_input = p_node->Output(0).toTensor();
1567 fastResizeToZero(grad_input);
1568 at::cpu::gelu_backward_out(
1569 grad_input, grad_output, self, approximate);
1570 };
1571 }
1572 LogAndDumpSchema(n);
1573 return nullptr;
1574 });
1575
1576 REGISTER_OPERATOR_FUNCTOR(
1577 aten::hardshrink,
1578 aten_hardshrink,
__anon5d9c3eb99b02(Node* n) 1579 [](Node* n) -> SROperator {
1580 if (n->matches(torch::schema(
1581 "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor"))) {
1582 return [](ProcessedNode* p_node) {
1583 const auto& self = p_node->Input(0).toTensor();
1584 const auto lambd = p_node->Input(1).toScalar();
1585 if (p_node->Output(0).isNone()) {
1586 p_node->Output(0) = at::cpu::hardshrink(self, lambd);
1587 return;
1588 }
1589 auto& out = p_node->Output(0).toTensor();
1590 fastResizeToZero(out);
1591 at::cpu::hardshrink_out(out, self, lambd);
1592 };
1593 }
1594 LogAndDumpSchema(n);
1595 return nullptr;
1596 });
1597
1598 REGISTER_OPERATOR_FUNCTOR(
1599 aten::hardshrink_backward,
1600 aten_hardshrink_backward,
__anon5d9c3eb99d02(Node* n) 1601 [](Node* n) -> SROperator {
1602 if (n->matches(torch::schema(
1603 "aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor"))) {
1604 return [](ProcessedNode* p_node) {
1605 const auto& grad_out = p_node->Input(0).toTensor();
1606 const auto& self = p_node->Input(1).toTensor();
1607 const auto lambd = p_node->Input(2).toScalar();
1608 if (p_node->Output(0).isNone()) {
1609 p_node->Output(0) =
1610 at::cpu::hardshrink_backward(grad_out, self, lambd);
1611 return;
1612 }
1613 auto& grad_input = p_node->Output(0).toTensor();
1614 fastResizeToZero(grad_input);
1615 at::cpu::hardshrink_backward_out(grad_input, grad_out, self, lambd);
1616 };
1617 }
1618 LogAndDumpSchema(n);
1619 return nullptr;
1620 });
1621
__anon5d9c3eb99f02(Node* n) 1622 REGISTER_OPERATOR_FUNCTOR(aten::rsqrt, aten_rsqrt, [](Node* n) -> SROperator {
1623 if (n->matches(torch::schema("aten::rsqrt(Tensor self) -> Tensor"))) {
1624 return [](ProcessedNode* p_node) {
1625 const auto& self = p_node->Input(0).toTensor();
1626 if (p_node->Output(0).isNone()) {
1627 p_node->Output(0) = at::cpu::rsqrt(self);
1628 return;
1629 }
1630 auto& out = p_node->Output(0).toTensor();
1631 fastResizeToZero(out);
1632 at::cpu::rsqrt_out(out, self);
1633 };
1634 }
1635 LogAndDumpSchema(n);
1636 return nullptr;
1637 });
1638
__anon5d9c3eb9a102(Node* n) 1639 REGISTER_OPERATOR_FUNCTOR(aten::silu, aten_silu, [](Node* n) -> SROperator {
1640 if (n->matches(torch::schema("aten::silu(Tensor self) -> Tensor"))) {
1641 return [](ProcessedNode* p_node) {
1642 const auto& self = p_node->Input(0).toTensor();
1643 if (p_node->Output(0).isNone()) {
1644 p_node->Output(0) = at::cpu::silu(self);
1645 return;
1646 }
1647 auto& out = p_node->Output(0).toTensor();
1648 fastResizeToZero(out);
1649 at::cpu::silu_out(out, self);
1650 };
1651 }
1652 LogAndDumpSchema(n);
1653 return nullptr;
1654 });
1655
1656 REGISTER_OPERATOR_FUNCTOR(
1657 aten::silu_backward,
1658 aten_silu_backward,
__anon5d9c3eb9a302(Node* n) 1659 [](Node* n) -> SROperator {
1660 if (n->matches(torch::schema(
1661 "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor"))) {
1662 return [](ProcessedNode* p_node) {
1663 const auto& grad_output = p_node->Input(0).toTensor();
1664 const auto& self = p_node->Input(1).toTensor();
1665 if (p_node->Output(0).isNone()) {
1666 p_node->Output(0) = at::cpu::silu_backward(grad_output, self);
1667 return;
1668 }
1669 auto& grad_input = p_node->Output(0).toTensor();
1670 fastResizeToZero(grad_input);
1671 at::cpu::silu_backward_out(grad_input, grad_output, self);
1672 };
1673 }
1674 LogAndDumpSchema(n);
1675 return nullptr;
1676 });
1677
__anon5d9c3eb9a502(Node* n) 1678 REGISTER_OPERATOR_FUNCTOR(aten::mish, aten_mish, [](Node* n) -> SROperator {
1679 if (n->matches(torch::schema("aten::mish(Tensor self) -> Tensor"))) {
1680 return [](ProcessedNode* p_node) {
1681 const auto& self = p_node->Input(0).toTensor();
1682 if (p_node->Output(0).isNone()) {
1683 p_node->Output(0) = at::cpu::mish(self);
1684 return;
1685 }
1686 auto& out = p_node->Output(0).toTensor();
1687 fastResizeToZero(out);
1688 at::cpu::mish_out(out, self);
1689 };
1690 }
1691 LogAndDumpSchema(n);
1692 return nullptr;
1693 });
1694
__anon5d9c3eb9a702(Node* n) 1695 REGISTER_OPERATOR_FUNCTOR(aten::sin, aten_sin, [](Node* n) -> SROperator {
1696 if (n->matches(torch::schema("aten::sin(Tensor self) -> Tensor"))) {
1697 return [](ProcessedNode* p_node) {
1698 const auto& self = p_node->Input(0).toTensor();
1699 if (p_node->Output(0).isNone()) {
1700 p_node->Output(0) = at::cpu::sin(self);
1701 return;
1702 }
1703 auto& out = p_node->Output(0).toTensor();
1704 fastResizeToZero(out);
1705 at::cpu::sin_out(out, self);
1706 };
1707 }
1708 LogAndDumpSchema(n);
1709 return nullptr;
1710 });
1711
__anon5d9c3eb9a902(Node* n) 1712 REGISTER_OPERATOR_FUNCTOR(aten::sinc, aten_sinc, [](Node* n) -> SROperator {
1713 if (n->matches(torch::schema("aten::sinc(Tensor self) -> Tensor"))) {
1714 return [](ProcessedNode* p_node) {
1715 const auto& self = p_node->Input(0).toTensor();
1716 if (p_node->Output(0).isNone()) {
1717 p_node->Output(0) = at::cpu::sinc(self);
1718 return;
1719 }
1720 auto& out = p_node->Output(0).toTensor();
1721 fastResizeToZero(out);
1722 at::cpu::sinc_out(out, self);
1723 };
1724 }
1725 LogAndDumpSchema(n);
1726 return nullptr;
1727 });
1728
__anon5d9c3eb9ab02(Node* n) 1729 REGISTER_OPERATOR_FUNCTOR(aten::sinh, aten_sinh, [](Node* n) -> SROperator {
1730 if (n->matches(torch::schema("aten::sinh(Tensor self) -> Tensor"))) {
1731 return [](ProcessedNode* p_node) {
1732 const auto& self = p_node->Input(0).toTensor();
1733 if (p_node->Output(0).isNone()) {
1734 p_node->Output(0) = at::cpu::sinh(self);
1735 return;
1736 }
1737 auto& out = p_node->Output(0).toTensor();
1738 fastResizeToZero(out);
1739 at::cpu::sinh_out(out, self);
1740 };
1741 }
1742 LogAndDumpSchema(n);
1743 return nullptr;
1744 });
1745
__anon5d9c3eb9ad02(Node* n) 1746 REGISTER_OPERATOR_FUNCTOR(aten::_softmax, aten__softmax, [](Node* n) -> SROperator {
1747 if (n->matches(torch::schema(
1748 "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor"))) {
1749 return [](ProcessedNode* p_node) {
1750 const auto& self = p_node->Input(0).toTensor();
1751 const auto dim = p_node->Input(1).toInt();
1752 const auto half_to_float = p_node->Input(2).toBool();
1753 if (p_node->Output(0).isNone()) {
1754 p_node->Output(0) = at::cpu::_softmax(self, dim, half_to_float);
1755 return;
1756 }
1757 auto& out = p_node->Output(0).toTensor();
1758 fastResizeToZero(out);
1759 at::cpu::_softmax_out(out, self, dim, half_to_float);
1760 };
1761 }
1762 LogAndDumpSchema(n);
1763 return nullptr;
1764 });
1765
1766 REGISTER_OPERATOR_FUNCTOR(
1767 aten::_softmax_backward_data,
1768 aten__softmax_backward_data,
__anon5d9c3eb9af02(Node* n) 1769 [](Node* n) -> SROperator {
1770 if (n->matches(torch::schema(
1771 "aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, ScalarType input_dtype) -> Tensor"))) {
1772 return [](ProcessedNode* p_node) {
1773 const auto& grad_output = p_node->Input(0).toTensor();
1774 const auto& output = p_node->Input(1).toTensor();
1775 const auto dim = p_node->Input(2).toInt();
1776 const auto input_dtype = p_node->Input(3).toScalarType();
1777 if (p_node->Output(0).isNone()) {
1778 p_node->Output(0) = at::cpu::_softmax_backward_data(
1779 grad_output, output, dim, input_dtype);
1780 return;
1781 }
1782 auto& grad_input = p_node->Output(0).toTensor();
1783 fastResizeToZero(grad_input);
1784 at::cpu::_softmax_backward_data_out(
1785 grad_input, grad_output, output, dim, input_dtype);
1786 };
1787 }
1788 LogAndDumpSchema(n);
1789 return nullptr;
1790 });
1791
__anon5d9c3eb9b102(Node* n) 1792 REGISTER_OPERATOR_FUNCTOR(aten::sqrt, aten_sqrt, [](Node* n) -> SROperator {
1793 if (n->matches(torch::schema("aten::sqrt(Tensor self) -> Tensor"))) {
1794 return [](ProcessedNode* p_node) {
1795 const auto& self = p_node->Input(0).toTensor();
1796 if (p_node->Output(0).isNone()) {
1797 p_node->Output(0) = at::cpu::sqrt(self);
1798 return;
1799 }
1800 auto& out = p_node->Output(0).toTensor();
1801 fastResizeToZero(out);
1802 at::cpu::sqrt_out(out, self);
1803 };
1804 }
1805 LogAndDumpSchema(n);
1806 return nullptr;
1807 });
1808
__anon5d9c3eb9b302(Node* n) 1809 REGISTER_OPERATOR_FUNCTOR(aten::square, aten_square, [](Node* n) -> SROperator {
1810 if (n->matches(torch::schema("aten::square(Tensor self) -> Tensor"))) {
1811 return [](ProcessedNode* p_node) {
1812 const auto& self = p_node->Input(0).toTensor();
1813 if (p_node->Output(0).isNone()) {
1814 p_node->Output(0) = at::native::square(self);
1815 return;
1816 }
1817 auto& out = p_node->Output(0).toTensor();
1818 fastResizeToZero(out);
1819 at::native::square_out(self, out);
1820 };
1821 }
1822 LogAndDumpSchema(n);
1823 return nullptr;
1824 });
1825
__anon5d9c3eb9b502(Node* n) 1826 REGISTER_OPERATOR_FUNCTOR(aten::prod, aten_prod, [](Node* n) -> SROperator {
1827 if (n->matches(torch::schema(
1828 "aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor"))) {
1829 return [](ProcessedNode* p_node) {
1830 const auto& self = p_node->Input(0).toTensor();
1831 const auto dtype = p_node->Input(1).toOptional<at::ScalarType>();
1832 if (p_node->Output(0).isNone()) {
1833 p_node->Output(0) = at::native::prod(self, dtype);
1834 return;
1835 }
1836 auto& out = p_node->Output(0).toTensor();
1837 fastResizeToZero(out);
1838 at::native::prod_out(self, dtype, out);
1839 };
1840 }
1841
1842 if (n->matches(torch::schema(
1843 "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor"))) {
1844 return [](ProcessedNode* p_node) {
1845 const auto& self = p_node->Input(0).toTensor();
1846 const auto dim = p_node->Input(1).toInt();
1847 const auto keepdim = p_node->Input(2).toBool();
1848 const auto dtype = p_node->Input(3).toOptional<at::ScalarType>();
1849 if (p_node->Output(0).isNone()) {
1850 p_node->Output(0) = at::cpu::prod(self, dim, keepdim, dtype);
1851 return;
1852 }
1853 auto& out = p_node->Output(0).toTensor();
1854 fastResizeToZero(out);
1855 at::cpu::prod_out(out, self, dim, keepdim, dtype);
1856 };
1857 }
1858 LogAndDumpSchema(n);
1859 return nullptr;
1860 });
1861
__anon5d9c3eb9b802(Node* n) 1862 REGISTER_OPERATOR_FUNCTOR(aten::tan, aten_tan, [](Node* n) -> SROperator {
1863 if (n->matches(torch::schema("aten::tan(Tensor self) -> Tensor"))) {
1864 return [](ProcessedNode* p_node) {
1865 const auto& self = p_node->Input(0).toTensor();
1866 if (p_node->Output(0).isNone()) {
1867 p_node->Output(0) = at::cpu::tan(self);
1868 return;
1869 }
1870 auto& out = p_node->Output(0).toTensor();
1871 fastResizeToZero(out);
1872 at::cpu::tan_out(out, self);
1873 };
1874 }
1875 LogAndDumpSchema(n);
1876 return nullptr;
1877 });
1878
__anon5d9c3eb9ba02(Node* n) 1879 REGISTER_OPERATOR_FUNCTOR(aten::threshold, aten_threshold, [](Node* n) -> SROperator {
1880 if (n->matches(torch::schema(
1881 "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor"))) {
1882 return [](ProcessedNode* p_node) {
1883 const auto& self = p_node->Input(0).toTensor();
1884 const auto threshold = p_node->Input(1).toScalar();
1885 const auto value = p_node->Input(2).toScalar();
1886 if (p_node->Output(0).isNone()) {
1887 p_node->Output(0) = at::cpu::threshold(self, threshold, value);
1888 return;
1889 }
1890 auto& out = p_node->Output(0).toTensor();
1891 fastResizeToZero(out);
1892 at::cpu::threshold_out(out, self, threshold, value);
1893 };
1894 }
1895 LogAndDumpSchema(n);
1896 return nullptr;
1897 });
1898
1899 REGISTER_OPERATOR_FUNCTOR(
1900 aten::threshold_backward,
1901 aten_threshold_backward,
__anon5d9c3eb9bc02(Node* n) 1902 [](Node* n) -> SROperator {
1903 if (n->matches(torch::schema(
1904 "aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor"))) {
1905 return [](ProcessedNode* p_node) {
1906 const auto& grad_output = p_node->Input(0).toTensor();
1907 const auto& self = p_node->Input(1).toTensor();
1908 const auto threshold = p_node->Input(2).toScalar();
1909 if (p_node->Output(0).isNone()) {
1910 p_node->Output(0) =
1911 at::cpu::threshold_backward(grad_output, self, threshold);
1912 return;
1913 }
1914 auto& grad_input = p_node->Output(0).toTensor();
1915 fastResizeToZero(grad_input);
1916 at::cpu::threshold_backward_out(
1917 grad_input, grad_output, self, threshold);
1918 };
1919 }
1920 LogAndDumpSchema(n);
1921 return nullptr;
1922 });
1923
__anon5d9c3eb9be02(Node* n) 1924 REGISTER_OPERATOR_FUNCTOR(aten::trunc, aten_trunc, [](Node* n) -> SROperator {
1925 if (n->matches(torch::schema("aten::trunc(Tensor self) -> Tensor"))) {
1926 return [](ProcessedNode* p_node) {
1927 const auto& self = p_node->Input(0).toTensor();
1928 if (p_node->Output(0).isNone()) {
1929 p_node->Output(0) = at::cpu::trunc(self);
1930 return;
1931 }
1932 auto& out = p_node->Output(0).toTensor();
1933 fastResizeToZero(out);
1934 at::cpu::trunc_out(out, self);
1935 };
1936 }
1937 LogAndDumpSchema(n);
1938 return nullptr;
1939 });
1940
__anon5d9c3eb9c002(Node* n) 1941 REGISTER_OPERATOR_FUNCTOR(aten::fix, aten_fix, [](Node* n) -> SROperator {
1942 if (n->matches(torch::schema("aten::fix(Tensor self) -> Tensor"))) {
1943 return [](ProcessedNode* p_node) {
1944 const auto& self = p_node->Input(0).toTensor();
1945 if (p_node->Output(0).isNone()) {
1946 p_node->Output(0) = at::native::fix(self);
1947 return;
1948 }
1949 auto& out = p_node->Output(0).toTensor();
1950 fastResizeToZero(out);
1951 at::native::fix_out(self, out);
1952 };
1953 }
1954 LogAndDumpSchema(n);
1955 return nullptr;
1956 });
1957
1958 REGISTER_OPERATOR_FUNCTOR(
1959 aten::nuclear_norm,
1960 aten_nuclear_norm,
__anon5d9c3eb9c202(Node* n) 1961 [](Node* n) -> SROperator {
1962 if (n->matches(torch::schema(
1963 "aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor"))) {
1964 return [](ProcessedNode* p_node) {
1965 const auto& self = p_node->Input(0).toTensor();
1966 const auto keepdim = p_node->Input(1).toBool();
1967 if (p_node->Output(0).isNone()) {
1968 p_node->Output(0) = at::native::nuclear_norm(self, keepdim);
1969 return;
1970 }
1971 auto& out = p_node->Output(0).toTensor();
1972 fastResizeToZero(out);
1973 at::native::nuclear_norm_out(self, keepdim, out);
1974 };
1975 }
1976 LogAndDumpSchema(n);
1977 return nullptr;
1978 });
1979
__anon5d9c3eb9c402(Node* n) 1980 REGISTER_OPERATOR_FUNCTOR(aten::subtract, aten_subtract, [](Node* n) -> SROperator {
1981 if (n->matches(torch::schema(
1982 "aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor"))) {
1983 return [](ProcessedNode* p_node) {
1984 const auto& self = p_node->Input(0).toTensor();
1985 const auto& other = p_node->Input(1).toTensor();
1986 const auto alpha = p_node->Input(2).toScalar();
1987 if (p_node->Output(0).isNone()) {
1988 p_node->Output(0) = at::native::subtract(self, other, alpha);
1989 return;
1990 }
1991 auto& out = p_node->Output(0).toTensor();
1992 fastResizeToZero(out);
1993 at::native::subtract_out(self, other, alpha, out);
1994 };
1995 }
1996 LogAndDumpSchema(n);
1997 return nullptr;
1998 });
1999
2000 REGISTER_OPERATOR_FUNCTOR(
2001 aten::heaviside,
2002 aten_heaviside,
__anon5d9c3eb9c602(Node* n) 2003 [](Node* n) -> SROperator {
2004 if (n->matches(torch::schema(
2005 "aten::heaviside(Tensor self, Tensor values) -> Tensor"))) {
2006 return [](ProcessedNode* p_node) {
2007 const auto& self = p_node->Input(0).toTensor();
2008 const auto& values = p_node->Input(1).toTensor();
2009 if (p_node->Output(0).isNone()) {
2010 p_node->Output(0) = at::cpu::heaviside(self, values);
2011 return;
2012 }
2013 auto& out = p_node->Output(0).toTensor();
2014 fastResizeToZero(out);
2015 at::cpu::heaviside_out(out, self, values);
2016 };
2017 }
2018 LogAndDumpSchema(n);
2019 return nullptr;
2020 });
2021
2022 REGISTER_OPERATOR_FUNCTOR(
2023 aten::_addmm_activation,
2024 aten__addmm_activation,
__anon5d9c3eb9c802(Node* n) 2025 [](Node* n) -> SROperator {
2026 if (n->matches(torch::schema(
2027 "aten::_addmm_activation(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, bool use_gelu=False) -> Tensor"))) {
2028 return [](ProcessedNode* p_node) {
2029 const auto& self = p_node->Input(0).toTensor();
2030 const auto& mat1 = p_node->Input(1).toTensor();
2031 const auto& mat2 = p_node->Input(2).toTensor();
2032 const auto beta = p_node->Input(3).toScalar();
2033 const auto alpha = p_node->Input(4).toScalar();
2034 const auto use_gelu = p_node->Input(5).toBool();
2035 if (p_node->Output(0).isNone()) {
2036 p_node->Output(0) = at::cpu::_addmm_activation(
2037 self, mat1, mat2, beta, alpha, use_gelu);
2038 return;
2039 }
2040 auto& out = p_node->Output(0).toTensor();
2041 fastResizeToZero(out);
2042 at::cpu::_addmm_activation_out(
2043 out, self, mat1, mat2, beta, alpha, use_gelu);
2044 };
2045 }
2046 LogAndDumpSchema(n);
2047 return nullptr;
2048 });
2049
__anon5d9c3eb9ca02(Node* n) 2050 REGISTER_OPERATOR_FUNCTOR(aten::index_add, aten_index_add, [](Node* n) -> SROperator {
2051 if (n->matches(torch::schema(
2052 "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor"))) {
2053 return [](ProcessedNode* p_node) {
2054 const auto& self = p_node->Input(0).toTensor();
2055 const auto dim = p_node->Input(1).toInt();
2056 const auto& index = p_node->Input(2).toTensor();
2057 const auto& source = p_node->Input(3).toTensor();
2058 const auto alpha = p_node->Input(4).toScalar();
2059 if (p_node->Output(0).isNone()) {
2060 p_node->Output(0) = at::cpu::index_add(self, dim, index, source, alpha);
2061 return;
2062 }
2063 auto& out = p_node->Output(0).toTensor();
2064 fastResizeToZero(out);
2065 at::cpu::index_add_out(out, self, dim, index, source, alpha);
2066 };
2067 }
2068 LogAndDumpSchema(n);
2069 return nullptr;
2070 });
2071
__anon5d9c3eb9cc02(Node* n) 2072 REGISTER_OPERATOR_FUNCTOR(aten::scatter, aten_scatter, [](Node* n) -> SROperator {
2073 if (n->matches(torch::schema(
2074 "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"))) {
2075 return [](ProcessedNode* p_node) {
2076 const auto& self = p_node->Input(0).toTensor();
2077 const auto dim = p_node->Input(1).toInt();
2078 const auto& index = p_node->Input(2).toTensor();
2079 const auto& src = p_node->Input(3).toTensor();
2080 if (p_node->Output(0).isNone()) {
2081 p_node->Output(0) = at::cpu::scatter(self, dim, index, src);
2082 return;
2083 }
2084 auto& out = p_node->Output(0).toTensor();
2085 fastResizeToZero(out);
2086 at::cpu::scatter_out(out, self, dim, index, src);
2087 };
2088 }
2089
2090 if (n->matches(torch::schema(
2091 "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor"))) {
2092 return [](ProcessedNode* p_node) {
2093 const auto& self = p_node->Input(0).toTensor();
2094 const auto dim = p_node->Input(1).toInt();
2095 const auto& index = p_node->Input(2).toTensor();
2096 const auto value = p_node->Input(3).toScalar();
2097 if (p_node->Output(0).isNone()) {
2098 p_node->Output(0) = at::cpu::scatter(self, dim, index, value);
2099 return;
2100 }
2101 auto& out = p_node->Output(0).toTensor();
2102 fastResizeToZero(out);
2103 at::cpu::scatter_out(out, self, dim, index, value);
2104 };
2105 }
2106
2107 if (n->matches(torch::schema(
2108 "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor"))) {
2109 return [](ProcessedNode* p_node) {
2110 const auto& self = p_node->Input(0).toTensor();
2111 const auto dim = p_node->Input(1).toInt();
2112 const auto& index = p_node->Input(2).toTensor();
2113 const auto& src = p_node->Input(3).toTensor();
2114 const auto reduce = p_node->Input(4).toStringView();
2115 if (p_node->Output(0).isNone()) {
2116 p_node->Output(0) = at::cpu::scatter(self, dim, index, src, reduce);
2117 return;
2118 }
2119 auto& out = p_node->Output(0).toTensor();
2120 fastResizeToZero(out);
2121 at::cpu::scatter_out(out, self, dim, index, src, reduce);
2122 };
2123 }
2124
2125 if (n->matches(torch::schema(
2126 "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor"))) {
2127 return [](ProcessedNode* p_node) {
2128 const auto& self = p_node->Input(0).toTensor();
2129 const auto dim = p_node->Input(1).toInt();
2130 const auto& index = p_node->Input(2).toTensor();
2131 const auto value = p_node->Input(3).toScalar();
2132 const auto reduce = p_node->Input(4).toStringView();
2133 if (p_node->Output(0).isNone()) {
2134 p_node->Output(0) = at::cpu::scatter(self, dim, index, value, reduce);
2135 return;
2136 }
2137 auto& out = p_node->Output(0).toTensor();
2138 fastResizeToZero(out);
2139 at::cpu::scatter_out(out, self, dim, index, value, reduce);
2140 };
2141 }
2142 LogAndDumpSchema(n);
2143 return nullptr;
2144 });
2145
__anon5d9c3eb9d102(Node* n) 2146 REGISTER_OPERATOR_FUNCTOR(aten::scatter_add, aten_scatter_add, [](Node* n) -> SROperator {
2147 if (n->matches(torch::schema(
2148 "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor"))) {
2149 return [](ProcessedNode* p_node) {
2150 const auto& self = p_node->Input(0).toTensor();
2151 const auto dim = p_node->Input(1).toInt();
2152 const auto& index = p_node->Input(2).toTensor();
2153 const auto& src = p_node->Input(3).toTensor();
2154 if (p_node->Output(0).isNone()) {
2155 p_node->Output(0) = at::cpu::scatter_add(self, dim, index, src);
2156 return;
2157 }
2158 auto& out = p_node->Output(0).toTensor();
2159 fastResizeToZero(out);
2160 at::cpu::scatter_add_out(out, self, dim, index, src);
2161 };
2162 }
2163 LogAndDumpSchema(n);
2164 return nullptr;
2165 });
2166
2167 REGISTER_OPERATOR_FUNCTOR(
2168 aten::scatter_reduce,
2169 aten_scatter_reduce,
__anon5d9c3eb9d302(Node* n) 2170 [](Node* n) -> SROperator {
2171 if (n->matches(torch::schema(
2172 "aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor"))) {
2173 return [](ProcessedNode* p_node) {
2174 const auto& self = p_node->Input(0).toTensor();
2175 const auto dim = p_node->Input(1).toInt();
2176 const auto& index = p_node->Input(2).toTensor();
2177 const auto& src = p_node->Input(3).toTensor();
2178 const auto reduce = p_node->Input(4).toStringView();
2179 const auto include_self = p_node->Input(5).toBool();
2180 if (p_node->Output(0).isNone()) {
2181 p_node->Output(0) = at::cpu::scatter_reduce(
2182 self, dim, index, src, reduce, include_self);
2183 return;
2184 }
2185 auto& out = p_node->Output(0).toTensor();
2186 fastResizeToZero(out);
2187 at::cpu::scatter_reduce_out(
2188 out, self, dim, index, src, reduce, include_self);
2189 };
2190 }
2191 LogAndDumpSchema(n);
2192 return nullptr;
2193 });
2194
__anon5d9c3eb9d502(Node* n) 2195 REGISTER_OPERATOR_FUNCTOR(aten::eq, aten_eq, [](Node* n) -> SROperator {
2196 if (n->matches(torch::schema(
2197 "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor"))) {
2198 return [](ProcessedNode* p_node) {
2199 const auto& self = p_node->Input(0).toTensor();
2200 const auto other = p_node->Input(1).toScalar();
2201 if (p_node->Output(0).isNone()) {
2202 p_node->Output(0) = at::cpu::eq(self, other);
2203 return;
2204 }
2205 auto& out = p_node->Output(0).toTensor();
2206 fastResizeToZero(out);
2207 at::cpu::eq_out(out, self, other);
2208 };
2209 }
2210
2211 if (n->matches(torch::schema(
2212 "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2213 return [](ProcessedNode* p_node) {
2214 const auto& self = p_node->Input(0).toTensor();
2215 const auto& other = p_node->Input(1).toTensor();
2216 if (p_node->Output(0).isNone()) {
2217 p_node->Output(0) = at::cpu::eq(self, other);
2218 return;
2219 }
2220 auto& out = p_node->Output(0).toTensor();
2221 fastResizeToZero(out);
2222 at::cpu::eq_out(out, self, other);
2223 };
2224 }
2225 LogAndDumpSchema(n);
2226 return nullptr;
2227 });
2228
2229 REGISTER_OPERATOR_FUNCTOR(
2230 aten::bitwise_and,
2231 aten_bitwise_and,
__anon5d9c3eb9d802(Node* n) 2232 [](Node* n) -> SROperator {
2233 if (n->matches(torch::schema(
2234 "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2235 return [](ProcessedNode* p_node) {
2236 const auto& self = p_node->Input(0).toTensor();
2237 const auto& other = p_node->Input(1).toTensor();
2238 if (p_node->Output(0).isNone()) {
2239 p_node->Output(0) = at::cpu::bitwise_and(self, other);
2240 return;
2241 }
2242 auto& out = p_node->Output(0).toTensor();
2243 fastResizeToZero(out);
2244 at::cpu::bitwise_and_out(out, self, other);
2245 };
2246 }
2247 LogAndDumpSchema(n);
2248 return nullptr;
2249 });
2250
2251 REGISTER_OPERATOR_FUNCTOR(
2252 aten::bitwise_or,
2253 aten_bitwise_or,
__anon5d9c3eb9da02(Node* n) 2254 [](Node* n) -> SROperator {
2255 if (n->matches(torch::schema(
2256 "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2257 return [](ProcessedNode* p_node) {
2258 const auto& self = p_node->Input(0).toTensor();
2259 const auto& other = p_node->Input(1).toTensor();
2260 if (p_node->Output(0).isNone()) {
2261 p_node->Output(0) = at::cpu::bitwise_or(self, other);
2262 return;
2263 }
2264 auto& out = p_node->Output(0).toTensor();
2265 fastResizeToZero(out);
2266 at::cpu::bitwise_or_out(out, self, other);
2267 };
2268 }
2269 LogAndDumpSchema(n);
2270 return nullptr;
2271 });
2272
2273 REGISTER_OPERATOR_FUNCTOR(
2274 aten::bitwise_xor,
2275 aten_bitwise_xor,
__anon5d9c3eb9dc02(Node* n) 2276 [](Node* n) -> SROperator {
2277 if (n->matches(torch::schema(
2278 "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2279 return [](ProcessedNode* p_node) {
2280 const auto& self = p_node->Input(0).toTensor();
2281 const auto& other = p_node->Input(1).toTensor();
2282 if (p_node->Output(0).isNone()) {
2283 p_node->Output(0) = at::cpu::bitwise_xor(self, other);
2284 return;
2285 }
2286 auto& out = p_node->Output(0).toTensor();
2287 fastResizeToZero(out);
2288 at::cpu::bitwise_xor_out(out, self, other);
2289 };
2290 }
2291 LogAndDumpSchema(n);
2292 return nullptr;
2293 });
2294
2295 REGISTER_OPERATOR_FUNCTOR(
2296 aten::bitwise_left_shift,
2297 aten_bitwise_left_shift,
__anon5d9c3eb9de02(Node* n) 2298 [](Node* n) -> SROperator {
2299 if (n->matches(torch::schema(
2300 "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2301 return [](ProcessedNode* p_node) {
2302 const auto& self = p_node->Input(0).toTensor();
2303 const auto& other = p_node->Input(1).toTensor();
2304 if (p_node->Output(0).isNone()) {
2305 p_node->Output(0) = at::cpu::bitwise_left_shift(self, other);
2306 return;
2307 }
2308 auto& out = p_node->Output(0).toTensor();
2309 fastResizeToZero(out);
2310 at::cpu::bitwise_left_shift_out(out, self, other);
2311 };
2312 }
2313 LogAndDumpSchema(n);
2314 return nullptr;
2315 });
2316
2317 REGISTER_OPERATOR_FUNCTOR(
2318 aten::bitwise_right_shift,
2319 aten_bitwise_right_shift,
__anon5d9c3eb9e002(Node* n) 2320 [](Node* n) -> SROperator {
2321 if (n->matches(torch::schema(
2322 "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2323 return [](ProcessedNode* p_node) {
2324 const auto& self = p_node->Input(0).toTensor();
2325 const auto& other = p_node->Input(1).toTensor();
2326 if (p_node->Output(0).isNone()) {
2327 p_node->Output(0) = at::cpu::bitwise_right_shift(self, other);
2328 return;
2329 }
2330 auto& out = p_node->Output(0).toTensor();
2331 fastResizeToZero(out);
2332 at::cpu::bitwise_right_shift_out(out, self, other);
2333 };
2334 }
2335 LogAndDumpSchema(n);
2336 return nullptr;
2337 });
2338
__anon5d9c3eb9e202(Node* n) 2339 REGISTER_OPERATOR_FUNCTOR(aten::tril, aten_tril, [](Node* n) -> SROperator {
2340 if (n->matches(
2341 torch::schema("aten::tril(Tensor self, int diagonal=0) -> Tensor"))) {
2342 return [](ProcessedNode* p_node) {
2343 const auto& self = p_node->Input(0).toTensor();
2344 const auto diagonal = p_node->Input(1).toInt();
2345 if (p_node->Output(0).isNone()) {
2346 p_node->Output(0) = at::cpu::tril(self, diagonal);
2347 return;
2348 }
2349 auto& out = p_node->Output(0).toTensor();
2350 fastResizeToZero(out);
2351 at::cpu::tril_out(out, self, diagonal);
2352 };
2353 }
2354 LogAndDumpSchema(n);
2355 return nullptr;
2356 });
2357
__anon5d9c3eb9e402(Node* n) 2358 REGISTER_OPERATOR_FUNCTOR(aten::triu, aten_triu, [](Node* n) -> SROperator {
2359 if (n->matches(
2360 torch::schema("aten::triu(Tensor self, int diagonal=0) -> Tensor"))) {
2361 return [](ProcessedNode* p_node) {
2362 const auto& self = p_node->Input(0).toTensor();
2363 const auto diagonal = p_node->Input(1).toInt();
2364 if (p_node->Output(0).isNone()) {
2365 p_node->Output(0) = at::cpu::triu(self, diagonal);
2366 return;
2367 }
2368 auto& out = p_node->Output(0).toTensor();
2369 fastResizeToZero(out);
2370 at::cpu::triu_out(out, self, diagonal);
2371 };
2372 }
2373 LogAndDumpSchema(n);
2374 return nullptr;
2375 });
2376
2377 REGISTER_OPERATOR_FUNCTOR(
2378 aten::digamma,
2379 aten_digamma,
__anon5d9c3eb9e602(Node* n) 2380 [](Node* n) -> SROperator {
2381 if (n->matches(torch::schema("aten::digamma(Tensor self) -> Tensor"))) {
2382 return [](ProcessedNode* p_node) {
2383 const auto& self = p_node->Input(0).toTensor();
2384 if (p_node->Output(0).isNone()) {
2385 p_node->Output(0) = at::cpu::digamma(self);
2386 return;
2387 }
2388 auto& out = p_node->Output(0).toTensor();
2389 fastResizeToZero(out);
2390 at::cpu::digamma_out(out, self);
2391 };
2392 }
2393 LogAndDumpSchema(n);
2394 return nullptr;
2395 });
2396
__anon5d9c3eb9e802(Node* n) 2397 REGISTER_OPERATOR_FUNCTOR(aten::lerp, aten_lerp, [](Node* n) -> SROperator {
2398 if (n->matches(torch::schema(
2399 "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor"))) {
2400 return [](ProcessedNode* p_node) {
2401 const auto& self = p_node->Input(0).toTensor();
2402 const auto& end = p_node->Input(1).toTensor();
2403 const auto weight = p_node->Input(2).toScalar();
2404 if (p_node->Output(0).isNone()) {
2405 p_node->Output(0) = at::cpu::lerp(self, end, weight);
2406 return;
2407 }
2408 auto& out = p_node->Output(0).toTensor();
2409 fastResizeToZero(out);
2410 at::cpu::lerp_out(out, self, end, weight);
2411 };
2412 }
2413
2414 if (n->matches(torch::schema(
2415 "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor"))) {
2416 return [](ProcessedNode* p_node) {
2417 const auto& self = p_node->Input(0).toTensor();
2418 const auto& end = p_node->Input(1).toTensor();
2419 const auto& weight = p_node->Input(2).toTensor();
2420 if (p_node->Output(0).isNone()) {
2421 p_node->Output(0) = at::cpu::lerp(self, end, weight);
2422 return;
2423 }
2424 auto& out = p_node->Output(0).toTensor();
2425 fastResizeToZero(out);
2426 at::cpu::lerp_out(out, self, end, weight);
2427 };
2428 }
2429 LogAndDumpSchema(n);
2430 return nullptr;
2431 });
2432
__anon5d9c3eb9eb02(Node* n) 2433 REGISTER_OPERATOR_FUNCTOR(aten::addbmm, aten_addbmm, [](Node* n) -> SROperator {
2434 if (n->matches(torch::schema(
2435 "aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor"))) {
2436 return [](ProcessedNode* p_node) {
2437 const auto& self = p_node->Input(0).toTensor();
2438 const auto& batch1 = p_node->Input(1).toTensor();
2439 const auto& batch2 = p_node->Input(2).toTensor();
2440 const auto beta = p_node->Input(3).toScalar();
2441 const auto alpha = p_node->Input(4).toScalar();
2442 if (p_node->Output(0).isNone()) {
2443 p_node->Output(0) =
2444 at::native::addbmm(self, batch1, batch2, beta, alpha);
2445 return;
2446 }
2447 auto& out = p_node->Output(0).toTensor();
2448 fastResizeToZero(out);
2449 at::native::addbmm_out(self, batch1, batch2, beta, alpha, out);
2450 };
2451 }
2452 LogAndDumpSchema(n);
2453 return nullptr;
2454 });
2455
__anon5d9c3eb9ed02(Node* n) 2456 REGISTER_OPERATOR_FUNCTOR(aten::diag, aten_diag, [](Node* n) -> SROperator {
2457 if (n->matches(
2458 torch::schema("aten::diag(Tensor self, int diagonal=0) -> Tensor"))) {
2459 return [](ProcessedNode* p_node) {
2460 const auto& self = p_node->Input(0).toTensor();
2461 const auto diagonal = p_node->Input(1).toInt();
2462 if (p_node->Output(0).isNone()) {
2463 p_node->Output(0) = at::native::diag(self, diagonal);
2464 return;
2465 }
2466 auto& out = p_node->Output(0).toTensor();
2467 fastResizeToZero(out);
2468 at::native::diag_out(self, diagonal, out);
2469 };
2470 }
2471 LogAndDumpSchema(n);
2472 return nullptr;
2473 });
2474
__anon5d9c3eb9ef02(Node* n) 2475 REGISTER_OPERATOR_FUNCTOR(aten::cross, aten_cross, [](Node* n) -> SROperator {
2476 if (n->matches(torch::schema(
2477 "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor"))) {
2478 return [](ProcessedNode* p_node) {
2479 const auto& self = p_node->Input(0).toTensor();
2480 const auto& other = p_node->Input(1).toTensor();
2481 const auto dim = p_node->Input(2).toOptional<int64_t>();
2482 if (p_node->Output(0).isNone()) {
2483 p_node->Output(0) = at::native::cross(self, other, dim);
2484 return;
2485 }
2486 auto& out = p_node->Output(0).toTensor();
2487 fastResizeToZero(out);
2488 at::native::cross_out(self, other, dim, out);
2489 };
2490 }
2491 LogAndDumpSchema(n);
2492 return nullptr;
2493 });
2494
__anon5d9c3eb9f102(Node* n) 2495 REGISTER_OPERATOR_FUNCTOR(aten::ne, aten_ne, [](Node* n) -> SROperator {
2496 if (n->matches(torch::schema(
2497 "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor"))) {
2498 return [](ProcessedNode* p_node) {
2499 const auto& self = p_node->Input(0).toTensor();
2500 const auto other = p_node->Input(1).toScalar();
2501 if (p_node->Output(0).isNone()) {
2502 p_node->Output(0) = at::cpu::ne(self, other);
2503 return;
2504 }
2505 auto& out = p_node->Output(0).toTensor();
2506 fastResizeToZero(out);
2507 at::cpu::ne_out(out, self, other);
2508 };
2509 }
2510
2511 if (n->matches(torch::schema(
2512 "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2513 return [](ProcessedNode* p_node) {
2514 const auto& self = p_node->Input(0).toTensor();
2515 const auto& other = p_node->Input(1).toTensor();
2516 if (p_node->Output(0).isNone()) {
2517 p_node->Output(0) = at::cpu::ne(self, other);
2518 return;
2519 }
2520 auto& out = p_node->Output(0).toTensor();
2521 fastResizeToZero(out);
2522 at::cpu::ne_out(out, self, other);
2523 };
2524 }
2525 LogAndDumpSchema(n);
2526 return nullptr;
2527 });
2528
__anon5d9c3eb9f402(Node* n) 2529 REGISTER_OPERATOR_FUNCTOR(aten::ge, aten_ge, [](Node* n) -> SROperator {
2530 if (n->matches(torch::schema(
2531 "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor"))) {
2532 return [](ProcessedNode* p_node) {
2533 const auto& self = p_node->Input(0).toTensor();
2534 const auto other = p_node->Input(1).toScalar();
2535 if (p_node->Output(0).isNone()) {
2536 p_node->Output(0) = at::cpu::ge(self, other);
2537 return;
2538 }
2539 auto& out = p_node->Output(0).toTensor();
2540 fastResizeToZero(out);
2541 at::cpu::ge_out(out, self, other);
2542 };
2543 }
2544
2545 if (n->matches(torch::schema(
2546 "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2547 return [](ProcessedNode* p_node) {
2548 const auto& self = p_node->Input(0).toTensor();
2549 const auto& other = p_node->Input(1).toTensor();
2550 if (p_node->Output(0).isNone()) {
2551 p_node->Output(0) = at::cpu::ge(self, other);
2552 return;
2553 }
2554 auto& out = p_node->Output(0).toTensor();
2555 fastResizeToZero(out);
2556 at::cpu::ge_out(out, self, other);
2557 };
2558 }
2559 LogAndDumpSchema(n);
2560 return nullptr;
2561 });
2562
__anon5d9c3eb9f702(Node* n) 2563 REGISTER_OPERATOR_FUNCTOR(aten::le, aten_le, [](Node* n) -> SROperator {
2564 if (n->matches(torch::schema(
2565 "aten::le.Scalar(Tensor self, Scalar other) -> Tensor"))) {
2566 return [](ProcessedNode* p_node) {
2567 const auto& self = p_node->Input(0).toTensor();
2568 const auto other = p_node->Input(1).toScalar();
2569 if (p_node->Output(0).isNone()) {
2570 p_node->Output(0) = at::cpu::le(self, other);
2571 return;
2572 }
2573 auto& out = p_node->Output(0).toTensor();
2574 fastResizeToZero(out);
2575 at::cpu::le_out(out, self, other);
2576 };
2577 }
2578
2579 if (n->matches(torch::schema(
2580 "aten::le.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2581 return [](ProcessedNode* p_node) {
2582 const auto& self = p_node->Input(0).toTensor();
2583 const auto& other = p_node->Input(1).toTensor();
2584 if (p_node->Output(0).isNone()) {
2585 p_node->Output(0) = at::cpu::le(self, other);
2586 return;
2587 }
2588 auto& out = p_node->Output(0).toTensor();
2589 fastResizeToZero(out);
2590 at::cpu::le_out(out, self, other);
2591 };
2592 }
2593 LogAndDumpSchema(n);
2594 return nullptr;
2595 });
2596
__anon5d9c3eb9fa02(Node* n) 2597 REGISTER_OPERATOR_FUNCTOR(aten::gt, aten_gt, [](Node* n) -> SROperator {
2598 if (n->matches(torch::schema(
2599 "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor"))) {
2600 return [](ProcessedNode* p_node) {
2601 const auto& self = p_node->Input(0).toTensor();
2602 const auto other = p_node->Input(1).toScalar();
2603 if (p_node->Output(0).isNone()) {
2604 p_node->Output(0) = at::cpu::gt(self, other);
2605 return;
2606 }
2607 auto& out = p_node->Output(0).toTensor();
2608 fastResizeToZero(out);
2609 at::cpu::gt_out(out, self, other);
2610 };
2611 }
2612
2613 if (n->matches(torch::schema(
2614 "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2615 return [](ProcessedNode* p_node) {
2616 const auto& self = p_node->Input(0).toTensor();
2617 const auto& other = p_node->Input(1).toTensor();
2618 if (p_node->Output(0).isNone()) {
2619 p_node->Output(0) = at::cpu::gt(self, other);
2620 return;
2621 }
2622 auto& out = p_node->Output(0).toTensor();
2623 fastResizeToZero(out);
2624 at::cpu::gt_out(out, self, other);
2625 };
2626 }
2627 LogAndDumpSchema(n);
2628 return nullptr;
2629 });
2630
__anon5d9c3eb9fd02(Node* n) 2631 REGISTER_OPERATOR_FUNCTOR(aten::lt, aten_lt, [](Node* n) -> SROperator {
2632 if (n->matches(torch::schema(
2633 "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor"))) {
2634 return [](ProcessedNode* p_node) {
2635 const auto& self = p_node->Input(0).toTensor();
2636 const auto other = p_node->Input(1).toScalar();
2637 if (p_node->Output(0).isNone()) {
2638 p_node->Output(0) = at::cpu::lt(self, other);
2639 return;
2640 }
2641 auto& out = p_node->Output(0).toTensor();
2642 fastResizeToZero(out);
2643 at::cpu::lt_out(out, self, other);
2644 };
2645 }
2646
2647 if (n->matches(torch::schema(
2648 "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor"))) {
2649 return [](ProcessedNode* p_node) {
2650 const auto& self = p_node->Input(0).toTensor();
2651 const auto& other = p_node->Input(1).toTensor();
2652 if (p_node->Output(0).isNone()) {
2653 p_node->Output(0) = at::cpu::lt(self, other);
2654 return;
2655 }
2656 auto& out = p_node->Output(0).toTensor();
2657 fastResizeToZero(out);
2658 at::cpu::lt_out(out, self, other);
2659 };
2660 }
2661 LogAndDumpSchema(n);
2662 return nullptr;
2663 });
2664
__anon5d9c3eb910002(Node* n) 2665 REGISTER_OPERATOR_FUNCTOR(aten::take, aten_take, [](Node* n) -> SROperator {
2666 if (n->matches(
2667 torch::schema("aten::take(Tensor self, Tensor index) -> Tensor"))) {
2668 return [](ProcessedNode* p_node) {
2669 const auto& self = p_node->Input(0).toTensor();
2670 const auto& index = p_node->Input(1).toTensor();
2671 if (p_node->Output(0).isNone()) {
2672 p_node->Output(0) = at::native::take(self, index);
2673 return;
2674 }
2675 auto& out = p_node->Output(0).toTensor();
2676 fastResizeToZero(out);
2677 at::native::take_out(self, index, out);
2678 };
2679 }
2680 LogAndDumpSchema(n);
2681 return nullptr;
2682 });
2683
2684 REGISTER_OPERATOR_FUNCTOR(
2685 aten::take_along_dim,
2686 aten_take_along_dim,
__anon5d9c3eb910202(Node* n) 2687 [](Node* n) -> SROperator {
2688 if (n->matches(torch::schema(
2689 "aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor"))) {
2690 return [](ProcessedNode* p_node) {
2691 const auto& self = p_node->Input(0).toTensor();
2692 const auto& indices = p_node->Input(1).toTensor();
2693 const auto dim = p_node->Input(2).toOptional<int64_t>();
2694 if (p_node->Output(0).isNone()) {
2695 p_node->Output(0) = at::native::take_along_dim(self, indices, dim);
2696 return;
2697 }
2698 auto& out = p_node->Output(0).toTensor();
2699 fastResizeToZero(out);
2700 at::native::take_along_dim_out(self, indices, dim, out);
2701 };
2702 }
2703 LogAndDumpSchema(n);
2704 return nullptr;
2705 });
2706
2707 REGISTER_OPERATOR_FUNCTOR(
2708 aten::masked_select,
2709 aten_masked_select,
__anon5d9c3eb910402(Node* n) 2710 [](Node* n) -> SROperator {
2711 if (n->matches(torch::schema(
2712 "aten::masked_select(Tensor self, Tensor mask) -> Tensor"))) {
2713 return [](ProcessedNode* p_node) {
2714 const auto& self = p_node->Input(0).toTensor();
2715 const auto& mask = p_node->Input(1).toTensor();
2716 if (p_node->Output(0).isNone()) {
2717 p_node->Output(0) = at::native::masked_select_cpu(self, mask);
2718 return;
2719 }
2720 auto& out = p_node->Output(0).toTensor();
2721 fastResizeToZero(out);
2722 at::native::masked_select_out_cpu(self, mask, out);
2723 };
2724 }
2725 LogAndDumpSchema(n);
2726 return nullptr;
2727 });
2728
2729 REGISTER_OPERATOR_FUNCTOR(
2730 aten::nonzero_static,
2731 aten_nonzero_static,
__anon5d9c3eb910602(Node* n) 2732 [](Node* n) -> SROperator {
2733 if (n->matches(torch::schema(
2734 "aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor"))) {
2735 return [](ProcessedNode* p_node) {
2736 const auto& self = p_node->Input(0).toTensor();
2737 const auto size = p_node->Input(1).toInt();
2738 const auto fill_value = p_node->Input(2).toInt();
2739 if (p_node->Output(0).isNone()) {
2740 p_node->Output(0) =
2741 at::native::nonzero_static_cpu(self, size, fill_value);
2742 return;
2743 }
2744 auto& out = p_node->Output(0).toTensor();
2745 fastResizeToZero(out);
2746 at::native::nonzero_static_out_cpu(self, size, fill_value, out);
2747 };
2748 }
2749 LogAndDumpSchema(n);
2750 return nullptr;
2751 });
2752
__anon5d9c3eb910802(Node* n) 2753 REGISTER_OPERATOR_FUNCTOR(aten::gather, aten_gather, [](Node* n) -> SROperator {
2754 if (n->matches(torch::schema(
2755 "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor"))) {
2756 return [](ProcessedNode* p_node) {
2757 const auto& self = p_node->Input(0).toTensor();
2758 const auto dim = p_node->Input(1).toInt();
2759 const auto& index = p_node->Input(2).toTensor();
2760 const auto sparse_grad = p_node->Input(3).toBool();
2761 if (p_node->Output(0).isNone()) {
2762 p_node->Output(0) = at::cpu::gather(self, dim, index, sparse_grad);
2763 return;
2764 }
2765 auto& out = p_node->Output(0).toTensor();
2766 fastResizeToZero(out);
2767 at::cpu::gather_out(out, self, dim, index, sparse_grad);
2768 };
2769 }
2770 LogAndDumpSchema(n);
2771 return nullptr;
2772 });
2773
__anon5d9c3eb910a02(Node* n) 2774 REGISTER_OPERATOR_FUNCTOR(aten::addcmul, aten_addcmul, [](Node* n) -> SROperator {
2775 if (n->matches(torch::schema(
2776 "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"))) {
2777 return [](ProcessedNode* p_node) {
2778 const auto& self = p_node->Input(0).toTensor();
2779 const auto& tensor1 = p_node->Input(1).toTensor();
2780 const auto& tensor2 = p_node->Input(2).toTensor();
2781 const auto value = p_node->Input(3).toScalar();
2782 if (p_node->Output(0).isNone()) {
2783 p_node->Output(0) = at::cpu::addcmul(self, tensor1, tensor2, value);
2784 return;
2785 }
2786 auto& out = p_node->Output(0).toTensor();
2787 fastResizeToZero(out);
2788 at::cpu::addcmul_out(out, self, tensor1, tensor2, value);
2789 };
2790 }
2791 LogAndDumpSchema(n);
2792 return nullptr;
2793 });
2794
__anon5d9c3eb910c02(Node* n) 2795 REGISTER_OPERATOR_FUNCTOR(aten::addcdiv, aten_addcdiv, [](Node* n) -> SROperator {
2796 if (n->matches(torch::schema(
2797 "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor"))) {
2798 return [](ProcessedNode* p_node) {
2799 const auto& self = p_node->Input(0).toTensor();
2800 const auto& tensor1 = p_node->Input(1).toTensor();
2801 const auto& tensor2 = p_node->Input(2).toTensor();
2802 const auto value = p_node->Input(3).toScalar();
2803 if (p_node->Output(0).isNone()) {
2804 p_node->Output(0) = at::cpu::addcdiv(self, tensor1, tensor2, value);
2805 return;
2806 }
2807 auto& out = p_node->Output(0).toTensor();
2808 fastResizeToZero(out);
2809 at::cpu::addcdiv_out(out, self, tensor1, tensor2, value);
2810 };
2811 }
2812 LogAndDumpSchema(n);
2813 return nullptr;
2814 });
2815
2816 REGISTER_OPERATOR_FUNCTOR(
2817 aten::linalg_solve_triangular,
2818 aten_linalg_solve_triangular,
__anon5d9c3eb910e02(Node* n) 2819 [](Node* n) -> SROperator {
2820 if (n->matches(torch::schema(
2821 "aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor"))) {
2822 return [](ProcessedNode* p_node) {
2823 const auto& self = p_node->Input(0).toTensor();
2824 const auto& B = p_node->Input(1).toTensor();
2825 const auto upper = p_node->Input(2).toBool();
2826 const auto left = p_node->Input(3).toBool();
2827 const auto unitriangular = p_node->Input(4).toBool();
2828 if (p_node->Output(0).isNone()) {
2829 p_node->Output(0) = at::native::linalg_solve_triangular(
2830 self, B, upper, left, unitriangular);
2831 return;
2832 }
2833 auto& out = p_node->Output(0).toTensor();
2834 fastResizeToZero(out);
2835 at::native::linalg_solve_triangular_out(
2836 self, B, upper, left, unitriangular, out);
2837 };
2838 }
2839 LogAndDumpSchema(n);
2840 return nullptr;
2841 });
2842
2843 REGISTER_OPERATOR_FUNCTOR(
2844 aten::cholesky_solve,
2845 aten_cholesky_solve,
__anon5d9c3eb911002(Node* n) 2846 [](Node* n) -> SROperator {
2847 if (n->matches(torch::schema(
2848 "aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor"))) {
2849 return [](ProcessedNode* p_node) {
2850 const auto& self = p_node->Input(0).toTensor();
2851 const auto& input2 = p_node->Input(1).toTensor();
2852 const auto upper = p_node->Input(2).toBool();
2853 if (p_node->Output(0).isNone()) {
2854 p_node->Output(0) = at::native::cholesky_solve(self, input2, upper);
2855 return;
2856 }
2857 auto& out = p_node->Output(0).toTensor();
2858 fastResizeToZero(out);
2859 at::native::cholesky_solve_out(self, input2, upper, out);
2860 };
2861 }
2862 LogAndDumpSchema(n);
2863 return nullptr;
2864 });
2865
2866 REGISTER_OPERATOR_FUNCTOR(
2867 aten::cholesky_inverse,
2868 aten_cholesky_inverse,
__anon5d9c3eb911202(Node* n) 2869 [](Node* n) -> SROperator {
2870 if (n->matches(torch::schema(
2871 "aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor"))) {
2872 return [](ProcessedNode* p_node) {
2873 const auto& self = p_node->Input(0).toTensor();
2874 const auto upper = p_node->Input(1).toBool();
2875 if (p_node->Output(0).isNone()) {
2876 p_node->Output(0) = at::native::cholesky_inverse(self, upper);
2877 return;
2878 }
2879 auto& out = p_node->Output(0).toTensor();
2880 fastResizeToZero(out);
2881 at::native::cholesky_inverse_out(self, upper, out);
2882 };
2883 }
2884 LogAndDumpSchema(n);
2885 return nullptr;
2886 });
2887
__anon5d9c3eb911402(Node* n) 2888 REGISTER_OPERATOR_FUNCTOR(aten::orgqr, aten_orgqr, [](Node* n) -> SROperator {
2889 if (n->matches(
2890 torch::schema("aten::orgqr(Tensor self, Tensor input2) -> Tensor"))) {
2891 return [](ProcessedNode* p_node) {
2892 const auto& self = p_node->Input(0).toTensor();
2893 const auto& input2 = p_node->Input(1).toTensor();
2894 if (p_node->Output(0).isNone()) {
2895 p_node->Output(0) = at::native::orgqr(self, input2);
2896 return;
2897 }
2898 auto& out = p_node->Output(0).toTensor();
2899 fastResizeToZero(out);
2900 at::native::orgqr_out(self, input2, out);
2901 };
2902 }
2903 LogAndDumpSchema(n);
2904 return nullptr;
2905 });
2906
__anon5d9c3eb911602(Node* n) 2907 REGISTER_OPERATOR_FUNCTOR(aten::ormqr, aten_ormqr, [](Node* n) -> SROperator {
2908 if (n->matches(torch::schema(
2909 "aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor"))) {
2910 return [](ProcessedNode* p_node) {
2911 const auto& self = p_node->Input(0).toTensor();
2912 const auto& input2 = p_node->Input(1).toTensor();
2913 const auto& input3 = p_node->Input(2).toTensor();
2914 const auto left = p_node->Input(3).toBool();
2915 const auto transpose = p_node->Input(4).toBool();
2916 if (p_node->Output(0).isNone()) {
2917 p_node->Output(0) =
2918 at::native::ormqr(self, input2, input3, left, transpose);
2919 return;
2920 }
2921 auto& out = p_node->Output(0).toTensor();
2922 fastResizeToZero(out);
2923 at::native::ormqr_out(self, input2, input3, left, transpose, out);
2924 };
2925 }
2926 LogAndDumpSchema(n);
2927 return nullptr;
2928 });
2929
__anon5d9c3eb911802(Node* n) 2930 REGISTER_OPERATOR_FUNCTOR(aten::lgamma, aten_lgamma, [](Node* n) -> SROperator {
2931 if (n->matches(torch::schema("aten::lgamma(Tensor self) -> Tensor"))) {
2932 return [](ProcessedNode* p_node) {
2933 const auto& self = p_node->Input(0).toTensor();
2934 if (p_node->Output(0).isNone()) {
2935 p_node->Output(0) = at::cpu::lgamma(self);
2936 return;
2937 }
2938 auto& out = p_node->Output(0).toTensor();
2939 fastResizeToZero(out);
2940 at::cpu::lgamma_out(out, self);
2941 };
2942 }
2943 LogAndDumpSchema(n);
2944 return nullptr;
2945 });
2946
2947 REGISTER_OPERATOR_FUNCTOR(
2948 aten::polygamma,
2949 aten_polygamma,
__anon5d9c3eb911a02(Node* n) 2950 [](Node* n) -> SROperator {
2951 if (n->matches(
2952 torch::schema("aten::polygamma(int n, Tensor self) -> Tensor"))) {
2953 return [](ProcessedNode* p_node) {
2954 const auto n = p_node->Input(0).toInt();
2955 const auto& self = p_node->Input(1).toTensor();
2956 if (p_node->Output(0).isNone()) {
2957 p_node->Output(0) = at::cpu::polygamma(n, self);
2958 return;
2959 }
2960 auto& out = p_node->Output(0).toTensor();
2961 fastResizeToZero(out);
2962 at::cpu::polygamma_out(out, n, self);
2963 };
2964 }
2965 LogAndDumpSchema(n);
2966 return nullptr;
2967 });
2968
__anon5d9c3eb911c02(Node* n) 2969 REGISTER_OPERATOR_FUNCTOR(aten::erfinv, aten_erfinv, [](Node* n) -> SROperator {
2970 if (n->matches(torch::schema("aten::erfinv(Tensor self) -> Tensor"))) {
2971 return [](ProcessedNode* p_node) {
2972 const auto& self = p_node->Input(0).toTensor();
2973 if (p_node->Output(0).isNone()) {
2974 p_node->Output(0) = at::cpu::erfinv(self);
2975 return;
2976 }
2977 auto& out = p_node->Output(0).toTensor();
2978 fastResizeToZero(out);
2979 at::cpu::erfinv_out(out, self);
2980 };
2981 }
2982 LogAndDumpSchema(n);
2983 return nullptr;
2984 });
2985
__anon5d9c3eb911e02(Node* n) 2986 REGISTER_OPERATOR_FUNCTOR(aten::i0, aten_i0, [](Node* n) -> SROperator {
2987 if (n->matches(torch::schema("aten::i0(Tensor self) -> Tensor"))) {
2988 return [](ProcessedNode* p_node) {
2989 const auto& self = p_node->Input(0).toTensor();
2990 if (p_node->Output(0).isNone()) {
2991 p_node->Output(0) = at::cpu::i0(self);
2992 return;
2993 }
2994 auto& out = p_node->Output(0).toTensor();
2995 fastResizeToZero(out);
2996 at::cpu::i0_out(out, self);
2997 };
2998 }
2999 LogAndDumpSchema(n);
3000 return nullptr;
3001 });
3002
3003 REGISTER_OPERATOR_FUNCTOR(
3004 aten::signbit,
3005 aten_signbit,
__anon5d9c3eb912002(Node* n) 3006 [](Node* n) -> SROperator {
3007 if (n->matches(torch::schema("aten::signbit(Tensor self) -> Tensor"))) {
3008 return [](ProcessedNode* p_node) {
3009 const auto& self = p_node->Input(0).toTensor();
3010 if (p_node->Output(0).isNone()) {
3011 p_node->Output(0) = at::cpu::signbit(self);
3012 return;
3013 }
3014 auto& out = p_node->Output(0).toTensor();
3015 fastResizeToZero(out);
3016 at::cpu::signbit_out(out, self);
3017 };
3018 }
3019 LogAndDumpSchema(n);
3020 return nullptr;
3021 });
3022
__anon5d9c3eb912202(Node* n) 3023 REGISTER_OPERATOR_FUNCTOR(aten::atan2, aten_atan2, [](Node* n) -> SROperator {
3024 if (n->matches(
3025 torch::schema("aten::atan2(Tensor self, Tensor other) -> Tensor"))) {
3026 return [](ProcessedNode* p_node) {
3027 const auto& self = p_node->Input(0).toTensor();
3028 const auto& other = p_node->Input(1).toTensor();
3029 if (p_node->Output(0).isNone()) {
3030 p_node->Output(0) = at::cpu::atan2(self, other);
3031 return;
3032 }
3033 auto& out = p_node->Output(0).toTensor();
3034 fastResizeToZero(out);
3035 at::cpu::atan2_out(out, self, other);
3036 };
3037 }
3038 LogAndDumpSchema(n);
3039 return nullptr;
3040 });
3041
3042 REGISTER_OPERATOR_FUNCTOR(
3043 aten::arctan2,
3044 aten_arctan2,
__anon5d9c3eb912402(Node* n) 3045 [](Node* n) -> SROperator {
3046 if (n->matches(torch::schema(
3047 "aten::arctan2(Tensor self, Tensor other) -> Tensor"))) {
3048 return [](ProcessedNode* p_node) {
3049 const auto& self = p_node->Input(0).toTensor();
3050 const auto& other = p_node->Input(1).toTensor();
3051 if (p_node->Output(0).isNone()) {
3052 p_node->Output(0) = at::native::arctan2(self, other);
3053 return;
3054 }
3055 auto& out = p_node->Output(0).toTensor();
3056 fastResizeToZero(out);
3057 at::native::arctan2_out(self, other, out);
3058 };
3059 }
3060 LogAndDumpSchema(n);
3061 return nullptr;
3062 });
3063
__anon5d9c3eb912602(Node* n) 3064 REGISTER_OPERATOR_FUNCTOR(aten::histc, aten_histc, [](Node* n) -> SROperator {
3065 if (n->matches(torch::schema(
3066 "aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor"))) {
3067 return [](ProcessedNode* p_node) {
3068 const auto& self = p_node->Input(0).toTensor();
3069 const auto bins = p_node->Input(1).toInt();
3070 const auto min = p_node->Input(2).toScalar();
3071 const auto max = p_node->Input(3).toScalar();
3072 if (p_node->Output(0).isNone()) {
3073 p_node->Output(0) = at::native::histogram_histc(self, bins, min, max);
3074 return;
3075 }
3076 auto& out = p_node->Output(0).toTensor();
3077 fastResizeToZero(out);
3078 at::native::histogram_histc_out(self, bins, min, max, out);
3079 };
3080 }
3081 LogAndDumpSchema(n);
3082 return nullptr;
3083 });
3084
__anon5d9c3eb912802(Node* n) 3085 REGISTER_OPERATOR_FUNCTOR(aten::hypot, aten_hypot, [](Node* n) -> SROperator {
3086 if (n->matches(
3087 torch::schema("aten::hypot(Tensor self, Tensor other) -> Tensor"))) {
3088 return [](ProcessedNode* p_node) {
3089 const auto& self = p_node->Input(0).toTensor();
3090 const auto& other = p_node->Input(1).toTensor();
3091 if (p_node->Output(0).isNone()) {
3092 p_node->Output(0) = at::cpu::hypot(self, other);
3093 return;
3094 }
3095 auto& out = p_node->Output(0).toTensor();
3096 fastResizeToZero(out);
3097 at::cpu::hypot_out(out, self, other);
3098 };
3099 }
3100 LogAndDumpSchema(n);
3101 return nullptr;
3102 });
3103
__anon5d9c3eb912a02(Node* n) 3104 REGISTER_OPERATOR_FUNCTOR(aten::igamma, aten_igamma, [](Node* n) -> SROperator {
3105 if (n->matches(
3106 torch::schema("aten::igamma(Tensor self, Tensor other) -> Tensor"))) {
3107 return [](ProcessedNode* p_node) {
3108 const auto& self = p_node->Input(0).toTensor();
3109 const auto& other = p_node->Input(1).toTensor();
3110 if (p_node->Output(0).isNone()) {
3111 p_node->Output(0) = at::cpu::igamma(self, other);
3112 return;
3113 }
3114 auto& out = p_node->Output(0).toTensor();
3115 fastResizeToZero(out);
3116 at::cpu::igamma_out(out, self, other);
3117 };
3118 }
3119 LogAndDumpSchema(n);
3120 return nullptr;
3121 });
3122
3123 REGISTER_OPERATOR_FUNCTOR(
3124 aten::igammac,
3125 aten_igammac,
__anon5d9c3eb912c02(Node* n) 3126 [](Node* n) -> SROperator {
3127 if (n->matches(torch::schema(
3128 "aten::igammac(Tensor self, Tensor other) -> Tensor"))) {
3129 return [](ProcessedNode* p_node) {
3130 const auto& self = p_node->Input(0).toTensor();
3131 const auto& other = p_node->Input(1).toTensor();
3132 if (p_node->Output(0).isNone()) {
3133 p_node->Output(0) = at::cpu::igammac(self, other);
3134 return;
3135 }
3136 auto& out = p_node->Output(0).toTensor();
3137 fastResizeToZero(out);
3138 at::cpu::igammac_out(out, self, other);
3139 };
3140 }
3141 LogAndDumpSchema(n);
3142 return nullptr;
3143 });
3144
3145 REGISTER_OPERATOR_FUNCTOR(
3146 aten::nextafter,
3147 aten_nextafter,
__anon5d9c3eb912e02(Node* n) 3148 [](Node* n) -> SROperator {
3149 if (n->matches(torch::schema(
3150 "aten::nextafter(Tensor self, Tensor other) -> Tensor"))) {
3151 return [](ProcessedNode* p_node) {
3152 const auto& self = p_node->Input(0).toTensor();
3153 const auto& other = p_node->Input(1).toTensor();
3154 if (p_node->Output(0).isNone()) {
3155 p_node->Output(0) = at::cpu::nextafter(self, other);
3156 return;
3157 }
3158 auto& out = p_node->Output(0).toTensor();
3159 fastResizeToZero(out);
3160 at::cpu::nextafter_out(out, self, other);
3161 };
3162 }
3163 LogAndDumpSchema(n);
3164 return nullptr;
3165 });
3166
__anon5d9c3eb913002(Node* n) 3167 REGISTER_OPERATOR_FUNCTOR(aten::fmin, aten_fmin, [](Node* n) -> SROperator {
3168 if (n->matches(
3169 torch::schema("aten::fmin(Tensor self, Tensor other) -> Tensor"))) {
3170 return [](ProcessedNode* p_node) {
3171 const auto& self = p_node->Input(0).toTensor();
3172 const auto& other = p_node->Input(1).toTensor();
3173 if (p_node->Output(0).isNone()) {
3174 p_node->Output(0) = at::cpu::fmin(self, other);
3175 return;
3176 }
3177 auto& out = p_node->Output(0).toTensor();
3178 fastResizeToZero(out);
3179 at::cpu::fmin_out(out, self, other);
3180 };
3181 }
3182 LogAndDumpSchema(n);
3183 return nullptr;
3184 });
3185
__anon5d9c3eb913202(Node* n) 3186 REGISTER_OPERATOR_FUNCTOR(aten::fmax, aten_fmax, [](Node* n) -> SROperator {
3187 if (n->matches(
3188 torch::schema("aten::fmax(Tensor self, Tensor other) -> Tensor"))) {
3189 return [](ProcessedNode* p_node) {
3190 const auto& self = p_node->Input(0).toTensor();
3191 const auto& other = p_node->Input(1).toTensor();
3192 if (p_node->Output(0).isNone()) {
3193 p_node->Output(0) = at::cpu::fmax(self, other);
3194 return;
3195 }
3196 auto& out = p_node->Output(0).toTensor();
3197 fastResizeToZero(out);
3198 at::cpu::fmax_out(out, self, other);
3199 };
3200 }
3201 LogAndDumpSchema(n);
3202 return nullptr;
3203 });
3204
3205 REGISTER_OPERATOR_FUNCTOR(
3206 aten::maximum,
3207 aten_maximum,
__anon5d9c3eb913402(Node* n) 3208 [](Node* n) -> SROperator {
3209 if (n->matches(torch::schema(
3210 "aten::maximum(Tensor self, Tensor other) -> Tensor"))) {
3211 return [](ProcessedNode* p_node) {
3212 const auto& self = p_node->Input(0).toTensor();
3213 const auto& other = p_node->Input(1).toTensor();
3214 if (p_node->Output(0).isNone()) {
3215 p_node->Output(0) = at::cpu::maximum(self, other);
3216 return;
3217 }
3218 auto& out = p_node->Output(0).toTensor();
3219 fastResizeToZero(out);
3220 at::cpu::maximum_out(out, self, other);
3221 };
3222 }
3223 LogAndDumpSchema(n);
3224 return nullptr;
3225 });
3226
3227 REGISTER_OPERATOR_FUNCTOR(
3228 aten::minimum,
3229 aten_minimum,
__anon5d9c3eb913602(Node* n) 3230 [](Node* n) -> SROperator {
3231 if (n->matches(torch::schema(
3232 "aten::minimum(Tensor self, Tensor other) -> Tensor"))) {
3233 return [](ProcessedNode* p_node) {
3234 const auto& self = p_node->Input(0).toTensor();
3235 const auto& other = p_node->Input(1).toTensor();
3236 if (p_node->Output(0).isNone()) {
3237 p_node->Output(0) = at::cpu::minimum(self, other);
3238 return;
3239 }
3240 auto& out = p_node->Output(0).toTensor();
3241 fastResizeToZero(out);
3242 at::cpu::minimum_out(out, self, other);
3243 };
3244 }
3245 LogAndDumpSchema(n);
3246 return nullptr;
3247 });
3248
__anon5d9c3eb913802(Node* n) 3249 REGISTER_OPERATOR_FUNCTOR(aten::min, aten_min, [](Node* n) -> SROperator {
3250 if (n->matches(torch::schema(
3251 "aten::min.other(Tensor self, Tensor other) -> Tensor"))) {
3252 return [](ProcessedNode* p_node) {
3253 const auto& self = p_node->Input(0).toTensor();
3254 const auto& other = p_node->Input(1).toTensor();
3255 if (p_node->Output(0).isNone()) {
3256 p_node->Output(0) = at::native::min(self, other);
3257 return;
3258 }
3259 auto& out = p_node->Output(0).toTensor();
3260 fastResizeToZero(out);
3261 at::native::min_out(self, other, out);
3262 };
3263 }
3264 LogAndDumpSchema(n);
3265 return nullptr;
3266 });
3267
__anon5d9c3eb913a02(Node* n) 3268 REGISTER_OPERATOR_FUNCTOR(aten::quantile, aten_quantile, [](Node* n) -> SROperator {
3269 if (n->matches(torch::schema(
3270 "aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor"))) {
3271 return [](ProcessedNode* p_node) {
3272 const auto& self = p_node->Input(0).toTensor();
3273 const auto& q = p_node->Input(1).toTensor();
3274 const auto dim = p_node->Input(2).toOptional<int64_t>();
3275 const auto keepdim = p_node->Input(3).toBool();
3276 const auto interpolation = p_node->Input(4).toStringView();
3277 if (p_node->Output(0).isNone()) {
3278 p_node->Output(0) =
3279 at::native::quantile(self, q, dim, keepdim, interpolation);
3280 return;
3281 }
3282 auto& out = p_node->Output(0).toTensor();
3283 fastResizeToZero(out);
3284 at::native::quantile_out(self, q, dim, keepdim, interpolation, out);
3285 };
3286 }
3287 LogAndDumpSchema(n);
3288 return nullptr;
3289 });
3290
__anon5d9c3eb913c02(Node* n) 3291 REGISTER_OPERATOR_FUNCTOR(aten::nanquantile, aten_nanquantile, [](Node* n) -> SROperator {
3292 if (n->matches(torch::schema(
3293 "aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor"))) {
3294 return [](ProcessedNode* p_node) {
3295 const auto& self = p_node->Input(0).toTensor();
3296 const auto& q = p_node->Input(1).toTensor();
3297 const auto dim = p_node->Input(2).toOptional<int64_t>();
3298 const auto keepdim = p_node->Input(3).toBool();
3299 const auto interpolation = p_node->Input(4).toStringView();
3300 if (p_node->Output(0).isNone()) {
3301 p_node->Output(0) =
3302 at::native::nanquantile(self, q, dim, keepdim, interpolation);
3303 return;
3304 }
3305 auto& out = p_node->Output(0).toTensor();
3306 fastResizeToZero(out);
3307 at::native::nanquantile_out(self, q, dim, keepdim, interpolation, out);
3308 };
3309 }
3310 LogAndDumpSchema(n);
3311 return nullptr;
3312 });
3313
__anon5d9c3eb913e02(Node* n) 3314 REGISTER_OPERATOR_FUNCTOR(aten::msort, aten_msort, [](Node* n) -> SROperator {
3315 if (n->matches(torch::schema("aten::msort(Tensor self) -> Tensor"))) {
3316 return [](ProcessedNode* p_node) {
3317 const auto& self = p_node->Input(0).toTensor();
3318 if (p_node->Output(0).isNone()) {
3319 p_node->Output(0) = at::native::msort(self);
3320 return;
3321 }
3322 auto& out = p_node->Output(0).toTensor();
3323 fastResizeToZero(out);
3324 at::native::msort_out(self, out);
3325 };
3326 }
3327 LogAndDumpSchema(n);
3328 return nullptr;
3329 });
3330
__anon5d9c3eb914002(Node* n) 3331 REGISTER_OPERATOR_FUNCTOR(aten::renorm, aten_renorm, [](Node* n) -> SROperator {
3332 if (n->matches(torch::schema(
3333 "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor"))) {
3334 return [](ProcessedNode* p_node) {
3335 const auto& self = p_node->Input(0).toTensor();
3336 const auto p = p_node->Input(1).toScalar();
3337 const auto dim = p_node->Input(2).toInt();
3338 const auto maxnorm = p_node->Input(3).toScalar();
3339 if (p_node->Output(0).isNone()) {
3340 p_node->Output(0) = at::cpu::renorm(self, p, dim, maxnorm);
3341 return;
3342 }
3343 auto& out = p_node->Output(0).toTensor();
3344 fastResizeToZero(out);
3345 at::cpu::renorm_out(out, self, p, dim, maxnorm);
3346 };
3347 }
3348 LogAndDumpSchema(n);
3349 return nullptr;
3350 });
3351
3352 REGISTER_OPERATOR_FUNCTOR(
3353 aten::_convert_indices_from_coo_to_csr,
3354 aten__convert_indices_from_coo_to_csr,
__anon5d9c3eb914202(Node* n) 3355 [](Node* n) -> SROperator {
3356 if (n->matches(torch::schema(
3357 "aten::_convert_indices_from_coo_to_csr(Tensor self, int size, *, bool out_int32=False) -> Tensor"))) {
3358 return [](ProcessedNode* p_node) {
3359 const auto& self = p_node->Input(0).toTensor();
3360 const auto size = p_node->Input(1).toInt();
3361 const auto out_int32 = p_node->Input(2).toBool();
3362 if (p_node->Output(0).isNone()) {
3363 p_node->Output(0) = at::cpu::_convert_indices_from_coo_to_csr(
3364 self, size, out_int32);
3365 return;
3366 }
3367 auto& out = p_node->Output(0).toTensor();
3368 fastResizeToZero(out);
3369 at::cpu::_convert_indices_from_coo_to_csr_out(
3370 out, self, size, out_int32);
3371 };
3372 }
3373 LogAndDumpSchema(n);
3374 return nullptr;
3375 });
3376
3377 REGISTER_OPERATOR_FUNCTOR(
3378 aten::_convert_indices_from_csr_to_coo,
3379 aten__convert_indices_from_csr_to_coo,
__anon5d9c3eb914402(Node* n) 3380 [](Node* n) -> SROperator {
3381 if (n->matches(torch::schema(
3382 "aten::_convert_indices_from_csr_to_coo(Tensor crow_indices, Tensor col_indices, *, bool out_int32=False, bool transpose=False) -> Tensor"))) {
3383 return [](ProcessedNode* p_node) {
3384 const auto& crow_indices = p_node->Input(0).toTensor();
3385 const auto& col_indices = p_node->Input(1).toTensor();
3386 const auto out_int32 = p_node->Input(2).toBool();
3387 const auto transpose = p_node->Input(3).toBool();
3388 if (p_node->Output(0).isNone()) {
3389 p_node->Output(0) = at::cpu::_convert_indices_from_csr_to_coo(
3390 crow_indices, col_indices, out_int32, transpose);
3391 return;
3392 }
3393 auto& out = p_node->Output(0).toTensor();
3394 fastResizeToZero(out);
3395 at::cpu::_convert_indices_from_csr_to_coo_out(
3396 out, crow_indices, col_indices, out_int32, transpose);
3397 };
3398 }
3399 LogAndDumpSchema(n);
3400 return nullptr;
3401 });
3402
__anon5d9c3eb914602(Node* n) 3403 REGISTER_OPERATOR_FUNCTOR(aten::mse_loss, aten_mse_loss, [](Node* n) -> SROperator {
3404 if (n->matches(torch::schema(
3405 "aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor"))) {
3406 return [](ProcessedNode* p_node) {
3407 const auto& self = p_node->Input(0).toTensor();
3408 const auto& target = p_node->Input(1).toTensor();
3409 const auto reduction = p_node->Input(2).toInt();
3410 if (p_node->Output(0).isNone()) {
3411 p_node->Output(0) = at::cpu::mse_loss(self, target, reduction);
3412 return;
3413 }
3414 auto& out = p_node->Output(0).toTensor();
3415 fastResizeToZero(out);
3416 at::cpu::mse_loss_out(out, self, target, reduction);
3417 };
3418 }
3419 LogAndDumpSchema(n);
3420 return nullptr;
3421 });
3422
3423 REGISTER_OPERATOR_FUNCTOR(
3424 aten::multi_margin_loss,
3425 aten_multi_margin_loss,
__anon5d9c3eb914802(Node* n) 3426 [](Node* n) -> SROperator {
3427 if (n->matches(torch::schema(
3428 "aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor"))) {
3429 return [](ProcessedNode* p_node) {
3430 const auto& self = p_node->Input(0).toTensor();
3431 const auto& target = p_node->Input(1).toTensor();
3432 const auto p = p_node->Input(2).toScalar();
3433 const auto margin = p_node->Input(3).toScalar();
3434 const auto weight = p_node->Input(4).toOptional<at::Tensor>();
3435 const auto reduction = p_node->Input(5).toInt();
3436 if (p_node->Output(0).isNone()) {
3437 p_node->Output(0) = at::native::multi_margin_loss_cpu(
3438 self, target, p, margin, weight, reduction);
3439 return;
3440 }
3441 auto& out = p_node->Output(0).toTensor();
3442 fastResizeToZero(out);
3443 at::native::multi_margin_loss_cpu_out(
3444 self, target, p, margin, weight, reduction, out);
3445 };
3446 }
3447 LogAndDumpSchema(n);
3448 return nullptr;
3449 });
3450
3451 REGISTER_OPERATOR_FUNCTOR(
3452 aten::multilabel_margin_loss,
3453 aten_multilabel_margin_loss,
__anon5d9c3eb914a02(Node* n) 3454 [](Node* n) -> SROperator {
3455 if (n->matches(torch::schema(
3456 "aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor"))) {
3457 return [](ProcessedNode* p_node) {
3458 const auto& self = p_node->Input(0).toTensor();
3459 const auto& target = p_node->Input(1).toTensor();
3460 const auto reduction = p_node->Input(2).toInt();
3461 if (p_node->Output(0).isNone()) {
3462 p_node->Output(0) =
3463 at::native::multilabel_margin_loss(self, target, reduction);
3464 return;
3465 }
3466 auto& out = p_node->Output(0).toTensor();
3467 fastResizeToZero(out);
3468 at::native::multilabel_margin_loss_out(self, target, reduction, out);
3469 };
3470 }
3471 LogAndDumpSchema(n);
3472 return nullptr;
3473 });
3474
3475 REGISTER_OPERATOR_FUNCTOR(
3476 aten::soft_margin_loss,
3477 aten_soft_margin_loss,
__anon5d9c3eb914c02(Node* n) 3478 [](Node* n) -> SROperator {
3479 if (n->matches(torch::schema(
3480 "aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor"))) {
3481 return [](ProcessedNode* p_node) {
3482 const auto& self = p_node->Input(0).toTensor();
3483 const auto& target = p_node->Input(1).toTensor();
3484 const auto reduction = p_node->Input(2).toInt();
3485 if (p_node->Output(0).isNone()) {
3486 p_node->Output(0) =
3487 at::native::soft_margin_loss(self, target, reduction);
3488 return;
3489 }
3490 auto& out = p_node->Output(0).toTensor();
3491 fastResizeToZero(out);
3492 at::native::soft_margin_loss_out(self, target, reduction, out);
3493 };
3494 }
3495 LogAndDumpSchema(n);
3496 return nullptr;
3497 });
3498
__anon5d9c3eb914e02(Node* n) 3499 REGISTER_OPERATOR_FUNCTOR(aten::elu, aten_elu, [](Node* n) -> SROperator {
3500 if (n->matches(torch::schema(
3501 "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor"))) {
3502 return [](ProcessedNode* p_node) {
3503 const auto& self = p_node->Input(0).toTensor();
3504 const auto alpha = p_node->Input(1).toScalar();
3505 const auto scale = p_node->Input(2).toScalar();
3506 const auto input_scale = p_node->Input(3).toScalar();
3507 if (p_node->Output(0).isNone()) {
3508 p_node->Output(0) = at::cpu::elu(self, alpha, scale, input_scale);
3509 return;
3510 }
3511 auto& out = p_node->Output(0).toTensor();
3512 fastResizeToZero(out);
3513 at::cpu::elu_out(out, self, alpha, scale, input_scale);
3514 };
3515 }
3516 LogAndDumpSchema(n);
3517 return nullptr;
3518 });
3519
3520 REGISTER_OPERATOR_FUNCTOR(
3521 aten::elu_backward,
3522 aten_elu_backward,
__anon5d9c3eb915002(Node* n) 3523 [](Node* n) -> SROperator {
3524 if (n->matches(torch::schema(
3525 "aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, bool is_result, Tensor self_or_result) -> Tensor"))) {
3526 return [](ProcessedNode* p_node) {
3527 const auto& grad_output = p_node->Input(0).toTensor();
3528 const auto alpha = p_node->Input(1).toScalar();
3529 const auto scale = p_node->Input(2).toScalar();
3530 const auto input_scale = p_node->Input(3).toScalar();
3531 const auto is_result = p_node->Input(4).toBool();
3532 const auto& self_or_result = p_node->Input(5).toTensor();
3533 if (p_node->Output(0).isNone()) {
3534 p_node->Output(0) = at::cpu::elu_backward(
3535 grad_output,
3536 alpha,
3537 scale,
3538 input_scale,
3539 is_result,
3540 self_or_result);
3541 return;
3542 }
3543 auto& grad_input = p_node->Output(0).toTensor();
3544 fastResizeToZero(grad_input);
3545 at::cpu::elu_backward_out(
3546 grad_input,
3547 grad_output,
3548 alpha,
3549 scale,
3550 input_scale,
3551 is_result,
3552 self_or_result);
3553 };
3554 }
3555 LogAndDumpSchema(n);
3556 return nullptr;
3557 });
3558
__anon5d9c3eb915202(Node* n) 3559 REGISTER_OPERATOR_FUNCTOR(aten::glu, aten_glu, [](Node* n) -> SROperator {
3560 if (n->matches(
3561 torch::schema("aten::glu(Tensor self, int dim=-1) -> Tensor"))) {
3562 return [](ProcessedNode* p_node) {
3563 const auto& self = p_node->Input(0).toTensor();
3564 const auto dim = p_node->Input(1).toInt();
3565 if (p_node->Output(0).isNone()) {
3566 p_node->Output(0) = at::cpu::glu(self, dim);
3567 return;
3568 }
3569 auto& out = p_node->Output(0).toTensor();
3570 fastResizeToZero(out);
3571 at::cpu::glu_out(out, self, dim);
3572 };
3573 }
3574 LogAndDumpSchema(n);
3575 return nullptr;
3576 });
3577
3578 REGISTER_OPERATOR_FUNCTOR(
3579 aten::hardsigmoid,
3580 aten_hardsigmoid,
__anon5d9c3eb915402(Node* n) 3581 [](Node* n) -> SROperator {
3582 if (n->matches(
3583 torch::schema("aten::hardsigmoid(Tensor self) -> Tensor"))) {
3584 return [](ProcessedNode* p_node) {
3585 const auto& self = p_node->Input(0).toTensor();
3586 if (p_node->Output(0).isNone()) {
3587 p_node->Output(0) = at::cpu::hardsigmoid(self);
3588 return;
3589 }
3590 auto& out = p_node->Output(0).toTensor();
3591 fastResizeToZero(out);
3592 at::cpu::hardsigmoid_out(out, self);
3593 };
3594 }
3595 LogAndDumpSchema(n);
3596 return nullptr;
3597 });
3598
3599 REGISTER_OPERATOR_FUNCTOR(
3600 aten::hardsigmoid_backward,
3601 aten_hardsigmoid_backward,
__anon5d9c3eb915602(Node* n) 3602 [](Node* n) -> SROperator {
3603 if (n->matches(torch::schema(
3604 "aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor"))) {
3605 return [](ProcessedNode* p_node) {
3606 const auto& grad_output = p_node->Input(0).toTensor();
3607 const auto& self = p_node->Input(1).toTensor();
3608 if (p_node->Output(0).isNone()) {
3609 p_node->Output(0) =
3610 at::cpu::hardsigmoid_backward(grad_output, self);
3611 return;
3612 }
3613 auto& grad_input = p_node->Output(0).toTensor();
3614 fastResizeToZero(grad_input);
3615 at::cpu::hardsigmoid_backward_out(grad_input, grad_output, self);
3616 };
3617 }
3618 LogAndDumpSchema(n);
3619 return nullptr;
3620 });
3621
__anon5d9c3eb915802(Node* n) 3622 REGISTER_OPERATOR_FUNCTOR(aten::hardtanh, aten_hardtanh, [](Node* n) -> SROperator {
3623 if (n->matches(torch::schema(
3624 "aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor"))) {
3625 return [](ProcessedNode* p_node) {
3626 const auto& self = p_node->Input(0).toTensor();
3627 const auto min_val = p_node->Input(1).toScalar();
3628 const auto max_val = p_node->Input(2).toScalar();
3629 if (p_node->Output(0).isNone()) {
3630 p_node->Output(0) = at::native::hardtanh(self, min_val, max_val);
3631 return;
3632 }
3633 auto& out = p_node->Output(0).toTensor();
3634 fastResizeToZero(out);
3635 at::native::hardtanh_out(self, min_val, max_val, out);
3636 };
3637 }
3638 LogAndDumpSchema(n);
3639 return nullptr;
3640 });
3641
3642 REGISTER_OPERATOR_FUNCTOR(
3643 aten::hardswish,
3644 aten_hardswish,
__anon5d9c3eb915a02(Node* n) 3645 [](Node* n) -> SROperator {
3646 if (n->matches(torch::schema("aten::hardswish(Tensor self) -> Tensor"))) {
3647 return [](ProcessedNode* p_node) {
3648 const auto& self = p_node->Input(0).toTensor();
3649 if (p_node->Output(0).isNone()) {
3650 p_node->Output(0) = at::native::hardswish(self);
3651 return;
3652 }
3653 auto& out = p_node->Output(0).toTensor();
3654 fastResizeToZero(out);
3655 at::native::hardswish_out(self, out);
3656 };
3657 }
3658 LogAndDumpSchema(n);
3659 return nullptr;
3660 });
3661
3662 REGISTER_OPERATOR_FUNCTOR(
3663 aten::leaky_relu_backward,
3664 aten_leaky_relu_backward,
__anon5d9c3eb915c02(Node* n) 3665 [](Node* n) -> SROperator {
3666 if (n->matches(torch::schema(
3667 "aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor"))) {
3668 return [](ProcessedNode* p_node) {
3669 const auto& grad_output = p_node->Input(0).toTensor();
3670 const auto& self = p_node->Input(1).toTensor();
3671 const auto negative_slope = p_node->Input(2).toScalar();
3672 const auto self_is_result = p_node->Input(3).toBool();
3673 if (p_node->Output(0).isNone()) {
3674 p_node->Output(0) = at::cpu::leaky_relu_backward(
3675 grad_output, self, negative_slope, self_is_result);
3676 return;
3677 }
3678 auto& grad_input = p_node->Output(0).toTensor();
3679 fastResizeToZero(grad_input);
3680 at::cpu::leaky_relu_backward_out(
3681 grad_input, grad_output, self, negative_slope, self_is_result);
3682 };
3683 }
3684 LogAndDumpSchema(n);
3685 return nullptr;
3686 });
3687
3688 REGISTER_OPERATOR_FUNCTOR(
3689 aten::log_sigmoid,
3690 aten_log_sigmoid,
__anon5d9c3eb915e02(Node* n) 3691 [](Node* n) -> SROperator {
3692 if (n->matches(
3693 torch::schema("aten::log_sigmoid(Tensor self) -> Tensor"))) {
3694 return [](ProcessedNode* p_node) {
3695 const auto& self = p_node->Input(0).toTensor();
3696 if (p_node->Output(0).isNone()) {
3697 p_node->Output(0) = at::native::log_sigmoid(self);
3698 return;
3699 }
3700 auto& out = p_node->Output(0).toTensor();
3701 fastResizeToZero(out);
3702 at::native::log_sigmoid_out(self, out);
3703 };
3704 }
3705 LogAndDumpSchema(n);
3706 return nullptr;
3707 });
3708
__anon5d9c3eb916002(Node* n) 3709 REGISTER_OPERATOR_FUNCTOR(aten::softplus, aten_softplus, [](Node* n) -> SROperator {
3710 if (n->matches(torch::schema(
3711 "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor"))) {
3712 return [](ProcessedNode* p_node) {
3713 const auto& self = p_node->Input(0).toTensor();
3714 const auto beta = p_node->Input(1).toScalar();
3715 const auto threshold = p_node->Input(2).toScalar();
3716 if (p_node->Output(0).isNone()) {
3717 p_node->Output(0) = at::cpu::softplus(self, beta, threshold);
3718 return;
3719 }
3720 auto& out = p_node->Output(0).toTensor();
3721 fastResizeToZero(out);
3722 at::cpu::softplus_out(out, self, beta, threshold);
3723 };
3724 }
3725 LogAndDumpSchema(n);
3726 return nullptr;
3727 });
3728
3729 REGISTER_OPERATOR_FUNCTOR(
3730 aten::softplus_backward,
3731 aten_softplus_backward,
__anon5d9c3eb916202(Node* n) 3732 [](Node* n) -> SROperator {
3733 if (n->matches(torch::schema(
3734 "aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold) -> Tensor"))) {
3735 return [](ProcessedNode* p_node) {
3736 const auto& grad_output = p_node->Input(0).toTensor();
3737 const auto& self = p_node->Input(1).toTensor();
3738 const auto beta = p_node->Input(2).toScalar();
3739 const auto threshold = p_node->Input(3).toScalar();
3740 if (p_node->Output(0).isNone()) {
3741 p_node->Output(0) =
3742 at::cpu::softplus_backward(grad_output, self, beta, threshold);
3743 return;
3744 }
3745 auto& grad_input = p_node->Output(0).toTensor();
3746 fastResizeToZero(grad_input);
3747 at::cpu::softplus_backward_out(
3748 grad_input, grad_output, self, beta, threshold);
3749 };
3750 }
3751 LogAndDumpSchema(n);
3752 return nullptr;
3753 });
3754
3755 REGISTER_OPERATOR_FUNCTOR(
3756 aten::softshrink,
3757 aten_softshrink,
__anon5d9c3eb916402(Node* n) 3758 [](Node* n) -> SROperator {
3759 if (n->matches(torch::schema(
3760 "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor"))) {
3761 return [](ProcessedNode* p_node) {
3762 const auto& self = p_node->Input(0).toTensor();
3763 const auto lambd = p_node->Input(1).toScalar();
3764 if (p_node->Output(0).isNone()) {
3765 p_node->Output(0) = at::cpu::softshrink(self, lambd);
3766 return;
3767 }
3768 auto& out = p_node->Output(0).toTensor();
3769 fastResizeToZero(out);
3770 at::cpu::softshrink_out(out, self, lambd);
3771 };
3772 }
3773 LogAndDumpSchema(n);
3774 return nullptr;
3775 });
3776
3777 REGISTER_OPERATOR_FUNCTOR(
3778 aten::softshrink_backward,
3779 aten_softshrink_backward,
__anon5d9c3eb916602(Node* n) 3780 [](Node* n) -> SROperator {
3781 if (n->matches(torch::schema(
3782 "aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor"))) {
3783 return [](ProcessedNode* p_node) {
3784 const auto& grad_output = p_node->Input(0).toTensor();
3785 const auto& self = p_node->Input(1).toTensor();
3786 const auto lambd = p_node->Input(2).toScalar();
3787 if (p_node->Output(0).isNone()) {
3788 p_node->Output(0) =
3789 at::cpu::softshrink_backward(grad_output, self, lambd);
3790 return;
3791 }
3792 auto& grad_input = p_node->Output(0).toTensor();
3793 fastResizeToZero(grad_input);
3794 at::cpu::softshrink_backward_out(
3795 grad_input, grad_output, self, lambd);
3796 };
3797 }
3798 LogAndDumpSchema(n);
3799 return nullptr;
3800 });
3801
3802 REGISTER_OPERATOR_FUNCTOR(
3803 aten::adaptive_max_pool2d_backward,
3804 aten_adaptive_max_pool2d_backward,
__anon5d9c3eb916802(Node* n) 3805 [](Node* n) -> SROperator {
3806 if (n->matches(torch::schema(
3807 "aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor"))) {
3808 return [](ProcessedNode* p_node) {
3809 const auto& grad_output = p_node->Input(0).toTensor();
3810 const auto& self = p_node->Input(1).toTensor();
3811 const auto& indices = p_node->Input(2).toTensor();
3812 if (p_node->Output(0).isNone()) {
3813 p_node->Output(0) = at::cpu::adaptive_max_pool2d_backward(
3814 grad_output, self, indices);
3815 return;
3816 }
3817 auto& grad_input = p_node->Output(0).toTensor();
3818 fastResizeToZero(grad_input);
3819 at::cpu::adaptive_max_pool2d_backward_out(
3820 grad_input, grad_output, self, indices);
3821 };
3822 }
3823 LogAndDumpSchema(n);
3824 return nullptr;
3825 });
3826
3827 REGISTER_OPERATOR_FUNCTOR(
3828 aten::adaptive_max_pool3d_backward,
3829 aten_adaptive_max_pool3d_backward,
__anon5d9c3eb916a02(Node* n) 3830 [](Node* n) -> SROperator {
3831 if (n->matches(torch::schema(
3832 "aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor"))) {
3833 return [](ProcessedNode* p_node) {
3834 const auto& grad_output = p_node->Input(0).toTensor();
3835 const auto& self = p_node->Input(1).toTensor();
3836 const auto& indices = p_node->Input(2).toTensor();
3837 if (p_node->Output(0).isNone()) {
3838 p_node->Output(0) = at::cpu::adaptive_max_pool3d_backward(
3839 grad_output, self, indices);
3840 return;
3841 }
3842 auto& grad_input = p_node->Output(0).toTensor();
3843 fastResizeToZero(grad_input);
3844 at::cpu::adaptive_max_pool3d_backward_out(
3845 grad_input, grad_output, self, indices);
3846 };
3847 }
3848 LogAndDumpSchema(n);
3849 return nullptr;
3850 });
3851
3852 REGISTER_OPERATOR_FUNCTOR(
3853 aten::sigmoid_backward,
3854 aten_sigmoid_backward,
__anon5d9c3eb916c02(Node* n) 3855 [](Node* n) -> SROperator {
3856 if (n->matches(torch::schema(
3857 "aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor"))) {
3858 return [](ProcessedNode* p_node) {
3859 const auto& grad_output = p_node->Input(0).toTensor();
3860 const auto& output = p_node->Input(1).toTensor();
3861 if (p_node->Output(0).isNone()) {
3862 p_node->Output(0) = at::cpu::sigmoid_backward(grad_output, output);
3863 return;
3864 }
3865 auto& grad_input = p_node->Output(0).toTensor();
3866 fastResizeToZero(grad_input);
3867 at::cpu::sigmoid_backward_out(grad_input, grad_output, output);
3868 };
3869 }
3870 LogAndDumpSchema(n);
3871 return nullptr;
3872 });
3873
3874 REGISTER_OPERATOR_FUNCTOR(
3875 aten::tanh_backward,
3876 aten_tanh_backward,
__anon5d9c3eb916e02(Node* n) 3877 [](Node* n) -> SROperator {
3878 if (n->matches(torch::schema(
3879 "aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor"))) {
3880 return [](ProcessedNode* p_node) {
3881 const auto& grad_output = p_node->Input(0).toTensor();
3882 const auto& output = p_node->Input(1).toTensor();
3883 if (p_node->Output(0).isNone()) {
3884 p_node->Output(0) = at::cpu::tanh_backward(grad_output, output);
3885 return;
3886 }
3887 auto& grad_input = p_node->Output(0).toTensor();
3888 fastResizeToZero(grad_input);
3889 at::cpu::tanh_backward_out(grad_input, grad_output, output);
3890 };
3891 }
3892 LogAndDumpSchema(n);
3893 return nullptr;
3894 });
3895
3896 REGISTER_OPERATOR_FUNCTOR(
3897 aten::isposinf,
3898 aten_isposinf,
__anon5d9c3eb917002(Node* n) 3899 [](Node* n) -> SROperator {
3900 if (n->matches(torch::schema("aten::isposinf(Tensor self) -> Tensor"))) {
3901 return [](ProcessedNode* p_node) {
3902 const auto& self = p_node->Input(0).toTensor();
3903 if (p_node->Output(0).isNone()) {
3904 p_node->Output(0) = at::cpu::isposinf(self);
3905 return;
3906 }
3907 auto& out = p_node->Output(0).toTensor();
3908 fastResizeToZero(out);
3909 at::cpu::isposinf_out(out, self);
3910 };
3911 }
3912 LogAndDumpSchema(n);
3913 return nullptr;
3914 });
3915
3916 REGISTER_OPERATOR_FUNCTOR(
3917 aten::isneginf,
3918 aten_isneginf,
__anon5d9c3eb917202(Node* n) 3919 [](Node* n) -> SROperator {
3920 if (n->matches(torch::schema("aten::isneginf(Tensor self) -> Tensor"))) {
3921 return [](ProcessedNode* p_node) {
3922 const auto& self = p_node->Input(0).toTensor();
3923 if (p_node->Output(0).isNone()) {
3924 p_node->Output(0) = at::cpu::isneginf(self);
3925 return;
3926 }
3927 auto& out = p_node->Output(0).toTensor();
3928 fastResizeToZero(out);
3929 at::cpu::isneginf_out(out, self);
3930 };
3931 }
3932 LogAndDumpSchema(n);
3933 return nullptr;
3934 });
3935
3936 REGISTER_OPERATOR_FUNCTOR(
3937 aten::special_entr,
3938 aten_special_entr,
__anon5d9c3eb917402(Node* n) 3939 [](Node* n) -> SROperator {
3940 if (n->matches(
3941 torch::schema("aten::special_entr(Tensor self) -> Tensor"))) {
3942 return [](ProcessedNode* p_node) {
3943 const auto& self = p_node->Input(0).toTensor();
3944 if (p_node->Output(0).isNone()) {
3945 p_node->Output(0) = at::cpu::special_entr(self);
3946 return;
3947 }
3948 auto& out = p_node->Output(0).toTensor();
3949 fastResizeToZero(out);
3950 at::cpu::special_entr_out(out, self);
3951 };
3952 }
3953 LogAndDumpSchema(n);
3954 return nullptr;
3955 });
3956
3957 REGISTER_OPERATOR_FUNCTOR(
3958 aten::special_ndtri,
3959 aten_special_ndtri,
__anon5d9c3eb917602(Node* n) 3960 [](Node* n) -> SROperator {
3961 if (n->matches(
3962 torch::schema("aten::special_ndtri(Tensor self) -> Tensor"))) {
3963 return [](ProcessedNode* p_node) {
3964 const auto& self = p_node->Input(0).toTensor();
3965 if (p_node->Output(0).isNone()) {
3966 p_node->Output(0) = at::cpu::special_ndtri(self);
3967 return;
3968 }
3969 auto& out = p_node->Output(0).toTensor();
3970 fastResizeToZero(out);
3971 at::cpu::special_ndtri_out(out, self);
3972 };
3973 }
3974 LogAndDumpSchema(n);
3975 return nullptr;
3976 });
3977
3978 REGISTER_OPERATOR_FUNCTOR(
3979 aten::special_log_ndtr,
3980 aten_special_log_ndtr,
__anon5d9c3eb917802(Node* n) 3981 [](Node* n) -> SROperator {
3982 if (n->matches(
3983 torch::schema("aten::special_log_ndtr(Tensor self) -> Tensor"))) {
3984 return [](ProcessedNode* p_node) {
3985 const auto& self = p_node->Input(0).toTensor();
3986 if (p_node->Output(0).isNone()) {
3987 p_node->Output(0) = at::cpu::special_log_ndtr(self);
3988 return;
3989 }
3990 auto& out = p_node->Output(0).toTensor();
3991 fastResizeToZero(out);
3992 at::cpu::special_log_ndtr_out(out, self);
3993 };
3994 }
3995 LogAndDumpSchema(n);
3996 return nullptr;
3997 });
3998
3999 REGISTER_OPERATOR_FUNCTOR(
4000 aten::special_expm1,
4001 aten_special_expm1,
__anon5d9c3eb917a02(Node* n) 4002 [](Node* n) -> SROperator {
4003 if (n->matches(
4004 torch::schema("aten::special_expm1(Tensor self) -> Tensor"))) {
4005 return [](ProcessedNode* p_node) {
4006 const auto& self = p_node->Input(0).toTensor();
4007 if (p_node->Output(0).isNone()) {
4008 p_node->Output(0) = at::native::special_expm1(self);
4009 return;
4010 }
4011 auto& out = p_node->Output(0).toTensor();
4012 fastResizeToZero(out);
4013 at::native::special_expm1_out(self, out);
4014 };
4015 }
4016 LogAndDumpSchema(n);
4017 return nullptr;
4018 });
4019
4020 REGISTER_OPERATOR_FUNCTOR(
4021 aten::special_exp2,
4022 aten_special_exp2,
__anon5d9c3eb917c02(Node* n) 4023 [](Node* n) -> SROperator {
4024 if (n->matches(
4025 torch::schema("aten::special_exp2(Tensor self) -> Tensor"))) {
4026 return [](ProcessedNode* p_node) {
4027 const auto& self = p_node->Input(0).toTensor();
4028 if (p_node->Output(0).isNone()) {
4029 p_node->Output(0) = at::native::special_exp2(self);
4030 return;
4031 }
4032 auto& out = p_node->Output(0).toTensor();
4033 fastResizeToZero(out);
4034 at::native::special_exp2_out(self, out);
4035 };
4036 }
4037 LogAndDumpSchema(n);
4038 return nullptr;
4039 });
4040
4041 REGISTER_OPERATOR_FUNCTOR(
4042 aten::special_psi,
4043 aten_special_psi,
__anon5d9c3eb917e02(Node* n) 4044 [](Node* n) -> SROperator {
4045 if (n->matches(
4046 torch::schema("aten::special_psi(Tensor self) -> Tensor"))) {
4047 return [](ProcessedNode* p_node) {
4048 const auto& self = p_node->Input(0).toTensor();
4049 if (p_node->Output(0).isNone()) {
4050 p_node->Output(0) = at::native::special_psi(self);
4051 return;
4052 }
4053 auto& out = p_node->Output(0).toTensor();
4054 fastResizeToZero(out);
4055 at::native::special_psi_out(self, out);
4056 };
4057 }
4058 LogAndDumpSchema(n);
4059 return nullptr;
4060 });
4061
4062 REGISTER_OPERATOR_FUNCTOR(
4063 aten::special_digamma,
4064 aten_special_digamma,
__anon5d9c3eb918002(Node* n) 4065 [](Node* n) -> SROperator {
4066 if (n->matches(
4067 torch::schema("aten::special_digamma(Tensor self) -> Tensor"))) {
4068 return [](ProcessedNode* p_node) {
4069 const auto& self = p_node->Input(0).toTensor();
4070 if (p_node->Output(0).isNone()) {
4071 p_node->Output(0) = at::native::special_digamma(self);
4072 return;
4073 }
4074 auto& out = p_node->Output(0).toTensor();
4075 fastResizeToZero(out);
4076 at::native::special_digamma_out(self, out);
4077 };
4078 }
4079 LogAndDumpSchema(n);
4080 return nullptr;
4081 });
4082
4083 REGISTER_OPERATOR_FUNCTOR(
4084 aten::special_gammaln,
4085 aten_special_gammaln,
__anon5d9c3eb918202(Node* n) 4086 [](Node* n) -> SROperator {
4087 if (n->matches(
4088 torch::schema("aten::special_gammaln(Tensor self) -> Tensor"))) {
4089 return [](ProcessedNode* p_node) {
4090 const auto& self = p_node->Input(0).toTensor();
4091 if (p_node->Output(0).isNone()) {
4092 p_node->Output(0) = at::native::special_gammaln(self);
4093 return;
4094 }
4095 auto& out = p_node->Output(0).toTensor();
4096 fastResizeToZero(out);
4097 at::native::special_gammaln_out(self, out);
4098 };
4099 }
4100 LogAndDumpSchema(n);
4101 return nullptr;
4102 });
4103
4104 REGISTER_OPERATOR_FUNCTOR(
4105 aten::special_erf,
4106 aten_special_erf,
__anon5d9c3eb918402(Node* n) 4107 [](Node* n) -> SROperator {
4108 if (n->matches(
4109 torch::schema("aten::special_erf(Tensor self) -> Tensor"))) {
4110 return [](ProcessedNode* p_node) {
4111 const auto& self = p_node->Input(0).toTensor();
4112 if (p_node->Output(0).isNone()) {
4113 p_node->Output(0) = at::native::special_erf(self);
4114 return;
4115 }
4116 auto& out = p_node->Output(0).toTensor();
4117 fastResizeToZero(out);
4118 at::native::special_erf_out(self, out);
4119 };
4120 }
4121 LogAndDumpSchema(n);
4122 return nullptr;
4123 });
4124
4125 REGISTER_OPERATOR_FUNCTOR(
4126 aten::special_erfc,
4127 aten_special_erfc,
__anon5d9c3eb918602(Node* n) 4128 [](Node* n) -> SROperator {
4129 if (n->matches(
4130 torch::schema("aten::special_erfc(Tensor self) -> Tensor"))) {
4131 return [](ProcessedNode* p_node) {
4132 const auto& self = p_node->Input(0).toTensor();
4133 if (p_node->Output(0).isNone()) {
4134 p_node->Output(0) = at::native::special_erfc(self);
4135 return;
4136 }
4137 auto& out = p_node->Output(0).toTensor();
4138 fastResizeToZero(out);
4139 at::native::special_erfc_out(self, out);
4140 };
4141 }
4142 LogAndDumpSchema(n);
4143 return nullptr;
4144 });
4145
4146 REGISTER_OPERATOR_FUNCTOR(
4147 aten::special_erfcx,
4148 aten_special_erfcx,
__anon5d9c3eb918802(Node* n) 4149 [](Node* n) -> SROperator {
4150 if (n->matches(
4151 torch::schema("aten::special_erfcx(Tensor self) -> Tensor"))) {
4152 return [](ProcessedNode* p_node) {
4153 const auto& self = p_node->Input(0).toTensor();
4154 if (p_node->Output(0).isNone()) {
4155 p_node->Output(0) = at::cpu::special_erfcx(self);
4156 return;
4157 }
4158 auto& out = p_node->Output(0).toTensor();
4159 fastResizeToZero(out);
4160 at::cpu::special_erfcx_out(out, self);
4161 };
4162 }
4163 LogAndDumpSchema(n);
4164 return nullptr;
4165 });
4166
4167 REGISTER_OPERATOR_FUNCTOR(
4168 aten::special_erfinv,
4169 aten_special_erfinv,
__anon5d9c3eb918a02(Node* n) 4170 [](Node* n) -> SROperator {
4171 if (n->matches(
4172 torch::schema("aten::special_erfinv(Tensor self) -> Tensor"))) {
4173 return [](ProcessedNode* p_node) {
4174 const auto& self = p_node->Input(0).toTensor();
4175 if (p_node->Output(0).isNone()) {
4176 p_node->Output(0) = at::native::special_erfinv(self);
4177 return;
4178 }
4179 auto& out = p_node->Output(0).toTensor();
4180 fastResizeToZero(out);
4181 at::native::special_erfinv_out(self, out);
4182 };
4183 }
4184 LogAndDumpSchema(n);
4185 return nullptr;
4186 });
4187
4188 REGISTER_OPERATOR_FUNCTOR(
4189 aten::special_ndtr,
4190 aten_special_ndtr,
__anon5d9c3eb918c02(Node* n) 4191 [](Node* n) -> SROperator {
4192 if (n->matches(
4193 torch::schema("aten::special_ndtr(Tensor self) -> Tensor"))) {
4194 return [](ProcessedNode* p_node) {
4195 const auto& self = p_node->Input(0).toTensor();
4196 if (p_node->Output(0).isNone()) {
4197 p_node->Output(0) = at::native::special_ndtr(self);
4198 return;
4199 }
4200 auto& out = p_node->Output(0).toTensor();
4201 fastResizeToZero(out);
4202 at::native::special_ndtr_out(self, out);
4203 };
4204 }
4205 LogAndDumpSchema(n);
4206 return nullptr;
4207 });
4208
4209 REGISTER_OPERATOR_FUNCTOR(
4210 aten::special_xlog1py,
4211 aten_special_xlog1py,
__anon5d9c3eb918e02(Node* n) 4212 [](Node* n) -> SROperator {
4213 if (n->matches(torch::schema(
4214 "aten::special_xlog1py(Tensor self, Tensor other) -> Tensor"))) {
4215 return [](ProcessedNode* p_node) {
4216 const auto& self = p_node->Input(0).toTensor();
4217 const auto& other = p_node->Input(1).toTensor();
4218 if (p_node->Output(0).isNone()) {
4219 p_node->Output(0) = at::cpu::special_xlog1py(self, other);
4220 return;
4221 }
4222 auto& out = p_node->Output(0).toTensor();
4223 fastResizeToZero(out);
4224 at::cpu::special_xlog1py_out(out, self, other);
4225 };
4226 }
4227 LogAndDumpSchema(n);
4228 return nullptr;
4229 });
4230
4231 REGISTER_OPERATOR_FUNCTOR(
4232 aten::special_xlogy,
4233 aten_special_xlogy,
__anon5d9c3eb919002(Node* n) 4234 [](Node* n) -> SROperator {
4235 if (n->matches(torch::schema(
4236 "aten::special_xlogy(Tensor self, Tensor other) -> Tensor"))) {
4237 return [](ProcessedNode* p_node) {
4238 const auto& self = p_node->Input(0).toTensor();
4239 const auto& other = p_node->Input(1).toTensor();
4240 if (p_node->Output(0).isNone()) {
4241 p_node->Output(0) = at::native::special_xlogy(self, other);
4242 return;
4243 }
4244 auto& out = p_node->Output(0).toTensor();
4245 fastResizeToZero(out);
4246 at::native::special_xlogy_out(self, other, out);
4247 };
4248 }
4249 LogAndDumpSchema(n);
4250 return nullptr;
4251 });
4252
4253 REGISTER_OPERATOR_FUNCTOR(
4254 aten::special_zeta,
4255 aten_special_zeta,
__anon5d9c3eb919202(Node* n) 4256 [](Node* n) -> SROperator {
4257 if (n->matches(torch::schema(
4258 "aten::special_zeta(Tensor self, Tensor other) -> Tensor"))) {
4259 return [](ProcessedNode* p_node) {
4260 const auto& self = p_node->Input(0).toTensor();
4261 const auto& other = p_node->Input(1).toTensor();
4262 if (p_node->Output(0).isNone()) {
4263 p_node->Output(0) = at::cpu::special_zeta(self, other);
4264 return;
4265 }
4266 auto& out = p_node->Output(0).toTensor();
4267 fastResizeToZero(out);
4268 at::cpu::special_zeta_out(out, self, other);
4269 };
4270 }
4271 LogAndDumpSchema(n);
4272 return nullptr;
4273 });
4274
4275 REGISTER_OPERATOR_FUNCTOR(
4276 aten::special_i0,
4277 aten_special_i0,
__anon5d9c3eb919402(Node* n) 4278 [](Node* n) -> SROperator {
4279 if (n->matches(
4280 torch::schema("aten::special_i0(Tensor self) -> Tensor"))) {
4281 return [](ProcessedNode* p_node) {
4282 const auto& self = p_node->Input(0).toTensor();
4283 if (p_node->Output(0).isNone()) {
4284 p_node->Output(0) = at::native::special_i0(self);
4285 return;
4286 }
4287 auto& out = p_node->Output(0).toTensor();
4288 fastResizeToZero(out);
4289 at::native::special_i0_out(self, out);
4290 };
4291 }
4292 LogAndDumpSchema(n);
4293 return nullptr;
4294 });
4295
4296 REGISTER_OPERATOR_FUNCTOR(
4297 aten::special_i0e,
4298 aten_special_i0e,
__anon5d9c3eb919602(Node* n) 4299 [](Node* n) -> SROperator {
4300 if (n->matches(
4301 torch::schema("aten::special_i0e(Tensor self) -> Tensor"))) {
4302 return [](ProcessedNode* p_node) {
4303 const auto& self = p_node->Input(0).toTensor();
4304 if (p_node->Output(0).isNone()) {
4305 p_node->Output(0) = at::cpu::special_i0e(self);
4306 return;
4307 }
4308 auto& out = p_node->Output(0).toTensor();
4309 fastResizeToZero(out);
4310 at::cpu::special_i0e_out(out, self);
4311 };
4312 }
4313 LogAndDumpSchema(n);
4314 return nullptr;
4315 });
4316
4317 REGISTER_OPERATOR_FUNCTOR(
4318 aten::special_i1,
4319 aten_special_i1,
__anon5d9c3eb919802(Node* n) 4320 [](Node* n) -> SROperator {
4321 if (n->matches(
4322 torch::schema("aten::special_i1(Tensor self) -> Tensor"))) {
4323 return [](ProcessedNode* p_node) {
4324 const auto& self = p_node->Input(0).toTensor();
4325 if (p_node->Output(0).isNone()) {
4326 p_node->Output(0) = at::cpu::special_i1(self);
4327 return;
4328 }
4329 auto& out = p_node->Output(0).toTensor();
4330 fastResizeToZero(out);
4331 at::cpu::special_i1_out(out, self);
4332 };
4333 }
4334 LogAndDumpSchema(n);
4335 return nullptr;
4336 });
4337
4338 REGISTER_OPERATOR_FUNCTOR(
4339 aten::special_i1e,
4340 aten_special_i1e,
__anon5d9c3eb919a02(Node* n) 4341 [](Node* n) -> SROperator {
4342 if (n->matches(
4343 torch::schema("aten::special_i1e(Tensor self) -> Tensor"))) {
4344 return [](ProcessedNode* p_node) {
4345 const auto& self = p_node->Input(0).toTensor();
4346 if (p_node->Output(0).isNone()) {
4347 p_node->Output(0) = at::cpu::special_i1e(self);
4348 return;
4349 }
4350 auto& out = p_node->Output(0).toTensor();
4351 fastResizeToZero(out);
4352 at::cpu::special_i1e_out(out, self);
4353 };
4354 }
4355 LogAndDumpSchema(n);
4356 return nullptr;
4357 });
4358
4359 REGISTER_OPERATOR_FUNCTOR(
4360 aten::special_polygamma,
4361 aten_special_polygamma,
__anon5d9c3eb919c02(Node* n) 4362 [](Node* n) -> SROperator {
4363 if (n->matches(torch::schema(
4364 "aten::special_polygamma(int n, Tensor self) -> Tensor"))) {
4365 return [](ProcessedNode* p_node) {
4366 const auto n = p_node->Input(0).toInt();
4367 const auto& self = p_node->Input(1).toTensor();
4368 if (p_node->Output(0).isNone()) {
4369 p_node->Output(0) = at::native::special_polygamma(n, self);
4370 return;
4371 }
4372 auto& out = p_node->Output(0).toTensor();
4373 fastResizeToZero(out);
4374 at::native::special_polygamma_out(n, self, out);
4375 };
4376 }
4377 LogAndDumpSchema(n);
4378 return nullptr;
4379 });
4380
4381 REGISTER_OPERATOR_FUNCTOR(
4382 aten::special_expit,
4383 aten_special_expit,
__anon5d9c3eb919e02(Node* n) 4384 [](Node* n) -> SROperator {
4385 if (n->matches(
4386 torch::schema("aten::special_expit(Tensor self) -> Tensor"))) {
4387 return [](ProcessedNode* p_node) {
4388 const auto& self = p_node->Input(0).toTensor();
4389 if (p_node->Output(0).isNone()) {
4390 p_node->Output(0) = at::native::special_expit(self);
4391 return;
4392 }
4393 auto& out = p_node->Output(0).toTensor();
4394 fastResizeToZero(out);
4395 at::native::special_expit_out(self, out);
4396 };
4397 }
4398 LogAndDumpSchema(n);
4399 return nullptr;
4400 });
4401
4402 REGISTER_OPERATOR_FUNCTOR(
4403 aten::special_sinc,
4404 aten_special_sinc,
__anon5d9c3eb91a002(Node* n) 4405 [](Node* n) -> SROperator {
4406 if (n->matches(
4407 torch::schema("aten::special_sinc(Tensor self) -> Tensor"))) {
4408 return [](ProcessedNode* p_node) {
4409 const auto& self = p_node->Input(0).toTensor();
4410 if (p_node->Output(0).isNone()) {
4411 p_node->Output(0) = at::native::special_sinc(self);
4412 return;
4413 }
4414 auto& out = p_node->Output(0).toTensor();
4415 fastResizeToZero(out);
4416 at::native::special_sinc_out(self, out);
4417 };
4418 }
4419 LogAndDumpSchema(n);
4420 return nullptr;
4421 });
4422
4423 REGISTER_OPERATOR_FUNCTOR(
4424 aten::special_round,
4425 aten_special_round,
__anon5d9c3eb91a202(Node* n) 4426 [](Node* n) -> SROperator {
4427 if (n->matches(torch::schema(
4428 "aten::special_round(Tensor self, *, int decimals=0) -> Tensor"))) {
4429 return [](ProcessedNode* p_node) {
4430 const auto& self = p_node->Input(0).toTensor();
4431 const auto decimals = p_node->Input(1).toInt();
4432 if (p_node->Output(0).isNone()) {
4433 p_node->Output(0) = at::native::special_round(self, decimals);
4434 return;
4435 }
4436 auto& out = p_node->Output(0).toTensor();
4437 fastResizeToZero(out);
4438 at::native::special_round_out(self, decimals, out);
4439 };
4440 }
4441 LogAndDumpSchema(n);
4442 return nullptr;
4443 });
4444
4445 REGISTER_OPERATOR_FUNCTOR(
4446 aten::special_log1p,
4447 aten_special_log1p,
__anon5d9c3eb91a402(Node* n) 4448 [](Node* n) -> SROperator {
4449 if (n->matches(
4450 torch::schema("aten::special_log1p(Tensor self) -> Tensor"))) {
4451 return [](ProcessedNode* p_node) {
4452 const auto& self = p_node->Input(0).toTensor();
4453 if (p_node->Output(0).isNone()) {
4454 p_node->Output(0) = at::native::special_log1p(self);
4455 return;
4456 }
4457 auto& out = p_node->Output(0).toTensor();
4458 fastResizeToZero(out);
4459 at::native::special_log1p_out(self, out);
4460 };
4461 }
4462 LogAndDumpSchema(n);
4463 return nullptr;
4464 });
4465
4466 REGISTER_OPERATOR_FUNCTOR(
4467 aten::special_gammainc,
4468 aten_special_gammainc,
__anon5d9c3eb91a602(Node* n) 4469 [](Node* n) -> SROperator {
4470 if (n->matches(torch::schema(
4471 "aten::special_gammainc(Tensor self, Tensor other) -> Tensor"))) {
4472 return [](ProcessedNode* p_node) {
4473 const auto& self = p_node->Input(0).toTensor();
4474 const auto& other = p_node->Input(1).toTensor();
4475 if (p_node->Output(0).isNone()) {
4476 p_node->Output(0) = at::native::special_gammainc(self, other);
4477 return;
4478 }
4479 auto& out = p_node->Output(0).toTensor();
4480 fastResizeToZero(out);
4481 at::native::special_gammainc_out(self, other, out);
4482 };
4483 }
4484 LogAndDumpSchema(n);
4485 return nullptr;
4486 });
4487
4488 REGISTER_OPERATOR_FUNCTOR(
4489 aten::special_gammaincc,
4490 aten_special_gammaincc,
__anon5d9c3eb91a802(Node* n) 4491 [](Node* n) -> SROperator {
4492 if (n->matches(torch::schema(
4493 "aten::special_gammaincc(Tensor self, Tensor other) -> Tensor"))) {
4494 return [](ProcessedNode* p_node) {
4495 const auto& self = p_node->Input(0).toTensor();
4496 const auto& other = p_node->Input(1).toTensor();
4497 if (p_node->Output(0).isNone()) {
4498 p_node->Output(0) = at::native::special_gammaincc(self, other);
4499 return;
4500 }
4501 auto& out = p_node->Output(0).toTensor();
4502 fastResizeToZero(out);
4503 at::native::special_gammaincc_out(self, other, out);
4504 };
4505 }
4506 LogAndDumpSchema(n);
4507 return nullptr;
4508 });
4509
4510 REGISTER_OPERATOR_FUNCTOR(
4511 aten::special_multigammaln,
4512 aten_special_multigammaln,
__anon5d9c3eb91aa02(Node* n) 4513 [](Node* n) -> SROperator {
4514 if (n->matches(torch::schema(
4515 "aten::special_multigammaln(Tensor self, int p) -> Tensor"))) {
4516 return [](ProcessedNode* p_node) {
4517 const auto& self = p_node->Input(0).toTensor();
4518 const auto p = p_node->Input(1).toInt();
4519 if (p_node->Output(0).isNone()) {
4520 p_node->Output(0) = at::native::special_multigammaln(self, p);
4521 return;
4522 }
4523 auto& out = p_node->Output(0).toTensor();
4524 fastResizeToZero(out);
4525 at::native::special_multigammaln_out(self, p, out);
4526 };
4527 }
4528 LogAndDumpSchema(n);
4529 return nullptr;
4530 });
4531
4532 REGISTER_OPERATOR_FUNCTOR(
4533 aten::linalg_cross,
4534 aten_linalg_cross,
__anon5d9c3eb91ac02(Node* n) 4535 [](Node* n) -> SROperator {
4536 if (n->matches(torch::schema(
4537 "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor"))) {
4538 return [](ProcessedNode* p_node) {
4539 const auto& self = p_node->Input(0).toTensor();
4540 const auto& other = p_node->Input(1).toTensor();
4541 const auto dim = p_node->Input(2).toInt();
4542 if (p_node->Output(0).isNone()) {
4543 p_node->Output(0) = at::cpu::linalg_cross(self, other, dim);
4544 return;
4545 }
4546 auto& out = p_node->Output(0).toTensor();
4547 fastResizeToZero(out);
4548 at::cpu::linalg_cross_out(out, self, other, dim);
4549 };
4550 }
4551 LogAndDumpSchema(n);
4552 return nullptr;
4553 });
4554
4555 REGISTER_OPERATOR_FUNCTOR(
4556 aten::linalg_det,
4557 aten_linalg_det,
__anon5d9c3eb91ae02(Node* n) 4558 [](Node* n) -> SROperator {
4559 if (n->matches(torch::schema("aten::linalg_det(Tensor A) -> Tensor"))) {
4560 return [](ProcessedNode* p_node) {
4561 const auto& A = p_node->Input(0).toTensor();
4562 if (p_node->Output(0).isNone()) {
4563 p_node->Output(0) = at::native::linalg_det(A);
4564 return;
4565 }
4566 auto& out = p_node->Output(0).toTensor();
4567 fastResizeToZero(out);
4568 at::native::linalg_det_out(A, out);
4569 };
4570 }
4571 LogAndDumpSchema(n);
4572 return nullptr;
4573 });
4574
4575 REGISTER_OPERATOR_FUNCTOR(
4576 aten::linalg_matmul,
4577 aten_linalg_matmul,
__anon5d9c3eb91b002(Node* n) 4578 [](Node* n) -> SROperator {
4579 if (n->matches(torch::schema(
4580 "aten::linalg_matmul(Tensor self, Tensor other) -> Tensor"))) {
4581 return [](ProcessedNode* p_node) {
4582 const auto& self = p_node->Input(0).toTensor();
4583 const auto& other = p_node->Input(1).toTensor();
4584 if (p_node->Output(0).isNone()) {
4585 p_node->Output(0) = at::native::linalg_matmul(self, other);
4586 return;
4587 }
4588 auto& out = p_node->Output(0).toTensor();
4589 fastResizeToZero(out);
4590 at::native::linalg_matmul_out(self, other, out);
4591 };
4592 }
4593 LogAndDumpSchema(n);
4594 return nullptr;
4595 });
4596
4597 REGISTER_OPERATOR_FUNCTOR(
4598 aten::linalg_eigvals,
4599 aten_linalg_eigvals,
__anon5d9c3eb91b202(Node* n) 4600 [](Node* n) -> SROperator {
4601 if (n->matches(
4602 torch::schema("aten::linalg_eigvals(Tensor self) -> Tensor"))) {
4603 return [](ProcessedNode* p_node) {
4604 const auto& self = p_node->Input(0).toTensor();
4605 if (p_node->Output(0).isNone()) {
4606 p_node->Output(0) = at::native::linalg_eigvals(self);
4607 return;
4608 }
4609 auto& out = p_node->Output(0).toTensor();
4610 fastResizeToZero(out);
4611 at::native::linalg_eigvals_out(self, out);
4612 };
4613 }
4614 LogAndDumpSchema(n);
4615 return nullptr;
4616 });
4617
4618 REGISTER_OPERATOR_FUNCTOR(
4619 aten::linalg_inv,
4620 aten_linalg_inv,
__anon5d9c3eb91b402(Node* n) 4621 [](Node* n) -> SROperator {
4622 if (n->matches(torch::schema("aten::linalg_inv(Tensor A) -> Tensor"))) {
4623 return [](ProcessedNode* p_node) {
4624 const auto& A = p_node->Input(0).toTensor();
4625 if (p_node->Output(0).isNone()) {
4626 p_node->Output(0) = at::native::linalg_inv(A);
4627 return;
4628 }
4629 auto& out = p_node->Output(0).toTensor();
4630 fastResizeToZero(out);
4631 at::native::linalg_inv_out(A, out);
4632 };
4633 }
4634 LogAndDumpSchema(n);
4635 return nullptr;
4636 });
4637
4638 REGISTER_OPERATOR_FUNCTOR(
4639 aten::inverse,
4640 aten_inverse,
__anon5d9c3eb91b602(Node* n) 4641 [](Node* n) -> SROperator {
4642 if (n->matches(torch::schema("aten::inverse(Tensor self) -> Tensor"))) {
4643 return [](ProcessedNode* p_node) {
4644 const auto& self = p_node->Input(0).toTensor();
4645 if (p_node->Output(0).isNone()) {
4646 p_node->Output(0) = at::native::inverse(self);
4647 return;
4648 }
4649 auto& out = p_node->Output(0).toTensor();
4650 fastResizeToZero(out);
4651 at::native::inverse_out(self, out);
4652 };
4653 }
4654 LogAndDumpSchema(n);
4655 return nullptr;
4656 });
4657
__anon5d9c3eb91b802(Node* n) 4658 REGISTER_OPERATOR_FUNCTOR(aten::inner, aten_inner, [](Node* n) -> SROperator {
4659 if (n->matches(
4660 torch::schema("aten::inner(Tensor self, Tensor other) -> Tensor"))) {
4661 return [](ProcessedNode* p_node) {
4662 const auto& self = p_node->Input(0).toTensor();
4663 const auto& other = p_node->Input(1).toTensor();
4664 if (p_node->Output(0).isNone()) {
4665 p_node->Output(0) = at::native::inner(self, other);
4666 return;
4667 }
4668 auto& out = p_node->Output(0).toTensor();
4669 fastResizeToZero(out);
4670 at::native::inner_out(self, other, out);
4671 };
4672 }
4673 LogAndDumpSchema(n);
4674 return nullptr;
4675 });
4676
__anon5d9c3eb91ba02(Node* n) 4677 REGISTER_OPERATOR_FUNCTOR(aten::outer, aten_outer, [](Node* n) -> SROperator {
4678 if (n->matches(
4679 torch::schema("aten::outer(Tensor self, Tensor vec2) -> Tensor"))) {
4680 return [](ProcessedNode* p_node) {
4681 const auto& self = p_node->Input(0).toTensor();
4682 const auto& vec2 = p_node->Input(1).toTensor();
4683 if (p_node->Output(0).isNone()) {
4684 p_node->Output(0) = at::native::outer(self, vec2);
4685 return;
4686 }
4687 auto& out = p_node->Output(0).toTensor();
4688 fastResizeToZero(out);
4689 at::native::outer_out(self, vec2, out);
4690 };
4691 }
4692 LogAndDumpSchema(n);
4693 return nullptr;
4694 });
4695
4696 REGISTER_OPERATOR_FUNCTOR(
4697 aten::linalg_cond,
4698 aten_linalg_cond,
__anon5d9c3eb91bc02(Node* n) 4699 [](Node* n) -> SROperator {
4700 if (n->matches(torch::schema(
4701 "aten::linalg_cond(Tensor self, Scalar? p=None) -> Tensor"))) {
4702 return [](ProcessedNode* p_node) {
4703 const auto& self = p_node->Input(0).toTensor();
4704 const auto p = p_node->Input(1).toOptional<at::Scalar>();
4705 if (p_node->Output(0).isNone()) {
4706 p_node->Output(0) = at::native::linalg_cond(self, p);
4707 return;
4708 }
4709 auto& out = p_node->Output(0).toTensor();
4710 fastResizeToZero(out);
4711 at::native::linalg_cond_out(self, p, out);
4712 };
4713 }
4714 LogAndDumpSchema(n);
4715 return nullptr;
4716 });
4717
4718 REGISTER_OPERATOR_FUNCTOR(
4719 aten::linalg_solve,
4720 aten_linalg_solve,
__anon5d9c3eb91be02(Node* n) 4721 [](Node* n) -> SROperator {
4722 if (n->matches(torch::schema(
4723 "aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor"))) {
4724 return [](ProcessedNode* p_node) {
4725 const auto& A = p_node->Input(0).toTensor();
4726 const auto& B = p_node->Input(1).toTensor();
4727 const auto left = p_node->Input(2).toBool();
4728 if (p_node->Output(0).isNone()) {
4729 p_node->Output(0) = at::native::linalg_solve(A, B, left);
4730 return;
4731 }
4732 auto& out = p_node->Output(0).toTensor();
4733 fastResizeToZero(out);
4734 at::native::linalg_solve_out(A, B, left, out);
4735 };
4736 }
4737 LogAndDumpSchema(n);
4738 return nullptr;
4739 });
4740
4741 REGISTER_OPERATOR_FUNCTOR(
4742 aten::linalg_tensorinv,
4743 aten_linalg_tensorinv,
__anon5d9c3eb91c002(Node* n) 4744 [](Node* n) -> SROperator {
4745 if (n->matches(torch::schema(
4746 "aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor"))) {
4747 return [](ProcessedNode* p_node) {
4748 const auto& self = p_node->Input(0).toTensor();
4749 const auto ind = p_node->Input(1).toInt();
4750 if (p_node->Output(0).isNone()) {
4751 p_node->Output(0) = at::native::linalg_tensorinv(self, ind);
4752 return;
4753 }
4754 auto& out = p_node->Output(0).toTensor();
4755 fastResizeToZero(out);
4756 at::native::linalg_tensorinv_out(self, ind, out);
4757 };
4758 }
4759 LogAndDumpSchema(n);
4760 return nullptr;
4761 });
4762
4763 REGISTER_OPERATOR_FUNCTOR(
4764 aten::linalg_matrix_power,
4765 aten_linalg_matrix_power,
__anon5d9c3eb91c202(Node* n) 4766 [](Node* n) -> SROperator {
4767 if (n->matches(torch::schema(
4768 "aten::linalg_matrix_power(Tensor self, int n) -> Tensor"))) {
4769 return [](ProcessedNode* p_node) {
4770 const auto& self = p_node->Input(0).toTensor();
4771 const auto n = p_node->Input(1).toInt();
4772 if (p_node->Output(0).isNone()) {
4773 p_node->Output(0) = at::native::linalg_matrix_power(self, n);
4774 return;
4775 }
4776 auto& out = p_node->Output(0).toTensor();
4777 fastResizeToZero(out);
4778 at::native::linalg_matrix_power_out(self, n, out);
4779 };
4780 }
4781 LogAndDumpSchema(n);
4782 return nullptr;
4783 });
4784
4785 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4786 aten::view_as_real,
4787 aten_view_as_real,
__anon5d9c3eb91c402(Node* n) 4788 [](Node* n) -> SROperator {
4789 if (n->matches(torch::schema(
4790 "aten::view_as_real(Tensor(a) self) -> Tensor(a)"))) {
4791 return [](ProcessedNode* p_node) {
4792 const auto& self = p_node->Input(0).toTensor();
4793 p_node->Output(0) = at::native::view_as_real(self);
4794 };
4795 }
4796 LogAndDumpSchema(n);
4797 return nullptr;
4798 });
4799
4800 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4801 aten::view_as_complex,
4802 aten_view_as_complex,
__anon5d9c3eb91c602(Node* n) 4803 [](Node* n) -> SROperator {
4804 if (n->matches(torch::schema(
4805 "aten::view_as_complex(Tensor(a) self) -> Tensor(a)"))) {
4806 return [](ProcessedNode* p_node) {
4807 const auto& self = p_node->Input(0).toTensor();
4808 p_node->Output(0) = at::native::view_as_complex(self);
4809 };
4810 }
4811 LogAndDumpSchema(n);
4812 return nullptr;
4813 });
4814
4815 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4816 aten::real,
4817 aten_real,
__anon5d9c3eb91c802(Node* n) 4818 [](Node* n) -> SROperator {
4819 if (n->matches(
4820 torch::schema("aten::real(Tensor(a) self) -> Tensor(a)"))) {
4821 return [](ProcessedNode* p_node) {
4822 const auto& self = p_node->Input(0).toTensor();
4823 p_node->Output(0) = at::native::real(self);
4824 };
4825 }
4826 LogAndDumpSchema(n);
4827 return nullptr;
4828 });
4829
4830 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4831 aten::imag,
4832 aten_imag,
__anon5d9c3eb91ca02(Node* n) 4833 [](Node* n) -> SROperator {
4834 if (n->matches(
4835 torch::schema("aten::imag(Tensor(a) self) -> Tensor(a)"))) {
4836 return [](ProcessedNode* p_node) {
4837 const auto& self = p_node->Input(0).toTensor();
4838 p_node->Output(0) = at::native::imag(self);
4839 };
4840 }
4841 LogAndDumpSchema(n);
4842 return nullptr;
4843 });
4844
4845 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4846 aten::_conj,
4847 aten__conj,
__anon5d9c3eb91cc02(Node* n) 4848 [](Node* n) -> SROperator {
4849 if (n->matches(
4850 torch::schema("aten::_conj(Tensor(a) self) -> Tensor(a)"))) {
4851 return [](ProcessedNode* p_node) {
4852 const auto& self = p_node->Input(0).toTensor();
4853 p_node->Output(0) = at::native::_conj(self);
4854 };
4855 }
4856 LogAndDumpSchema(n);
4857 return nullptr;
4858 });
4859
4860 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4861 aten::conj,
4862 aten_conj,
__anon5d9c3eb91ce02(Node* n) 4863 [](Node* n) -> SROperator {
4864 if (n->matches(
4865 torch::schema("aten::conj(Tensor(a) self) -> Tensor(a)"))) {
4866 return [](ProcessedNode* p_node) {
4867 const auto& self = p_node->Input(0).toTensor();
4868 p_node->Output(0) = at::native::conj(self);
4869 };
4870 }
4871 LogAndDumpSchema(n);
4872 return nullptr;
4873 });
4874
4875 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4876 aten::resolve_conj,
4877 aten_resolve_conj,
__anon5d9c3eb91d002(Node* n) 4878 [](Node* n) -> SROperator {
4879 if (n->matches(torch::schema(
4880 "aten::resolve_conj(Tensor(a) self) -> Tensor(a)"))) {
4881 return [](ProcessedNode* p_node) {
4882 const auto& self = p_node->Input(0).toTensor();
4883 p_node->Output(0) = at::native::resolve_conj(self);
4884 };
4885 }
4886 LogAndDumpSchema(n);
4887 return nullptr;
4888 });
4889
4890 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4891 aten::resolve_neg,
4892 aten_resolve_neg,
__anon5d9c3eb91d202(Node* n) 4893 [](Node* n) -> SROperator {
4894 if (n->matches(torch::schema(
4895 "aten::resolve_neg(Tensor(a) self) -> Tensor(a)"))) {
4896 return [](ProcessedNode* p_node) {
4897 const auto& self = p_node->Input(0).toTensor();
4898 p_node->Output(0) = at::native::resolve_neg(self);
4899 };
4900 }
4901 LogAndDumpSchema(n);
4902 return nullptr;
4903 });
4904
4905 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4906 aten::_neg_view,
4907 aten__neg_view,
__anon5d9c3eb91d402(Node* n) 4908 [](Node* n) -> SROperator {
4909 if (n->matches(
4910 torch::schema("aten::_neg_view(Tensor(a) self) -> Tensor(a)"))) {
4911 return [](ProcessedNode* p_node) {
4912 const auto& self = p_node->Input(0).toTensor();
4913 p_node->Output(0) = at::native::_neg_view(self);
4914 };
4915 }
4916 LogAndDumpSchema(n);
4917 return nullptr;
4918 });
4919
__anon5d9c3eb91d602(Node* n) 4920 REGISTER_NATIVE_OPERATOR_FUNCTOR(aten::diagonal, aten_diagonal, [](Node* n) -> SROperator {
4921 if (n->matches(torch::schema(
4922 "aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)"))) {
4923 return [](ProcessedNode* p_node) {
4924 const auto& self = p_node->Input(0).toTensor();
4925 const auto offset = p_node->Input(1).toInt();
4926 const auto dim1 = p_node->Input(2).toInt();
4927 const auto dim2 = p_node->Input(3).toInt();
4928 p_node->Output(0) = at::native::diagonal(self, offset, dim1, dim2);
4929 };
4930 }
4931 LogAndDumpSchema(n);
4932 return nullptr;
4933 });
4934
4935 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4936 aten::linalg_diagonal,
4937 aten_linalg_diagonal,
__anon5d9c3eb91d802(Node* n) 4938 [](Node* n) -> SROperator {
4939 if (n->matches(torch::schema(
4940 "aten::linalg_diagonal(Tensor(a) A, *, int offset=0, int dim1=-2, int dim2=-1) -> Tensor(a)"))) {
4941 return [](ProcessedNode* p_node) {
4942 const auto& A = p_node->Input(0).toTensor();
4943 const auto offset = p_node->Input(1).toInt();
4944 const auto dim1 = p_node->Input(2).toInt();
4945 const auto dim2 = p_node->Input(3).toInt();
4946 p_node->Output(0) =
4947 at::native::linalg_diagonal(A, offset, dim1, dim2);
4948 };
4949 }
4950 LogAndDumpSchema(n);
4951 return nullptr;
4952 });
4953
__anon5d9c3eb91da02(Node* n) 4954 REGISTER_NATIVE_OPERATOR_FUNCTOR(aten::movedim, aten_movedim, [](Node* n) -> SROperator {
4955 if (n->matches(torch::schema(
4956 "aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)"))) {
4957 return [](ProcessedNode* p_node) {
4958 const auto& self = p_node->Input(0).toTensor();
4959 const auto source = p_node->Input(1).toInt();
4960 const auto destination = p_node->Input(2).toInt();
4961 p_node->Output(0) = at::native::movedim(self, source, destination);
4962 };
4963 }
4964 LogAndDumpSchema(n);
4965 return nullptr;
4966 });
4967
__anon5d9c3eb91dc02(Node* n) 4968 REGISTER_NATIVE_OPERATOR_FUNCTOR(aten::moveaxis, aten_moveaxis, [](Node* n) -> SROperator {
4969 if (n->matches(torch::schema(
4970 "aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)"))) {
4971 return [](ProcessedNode* p_node) {
4972 const auto& self = p_node->Input(0).toTensor();
4973 const auto source = p_node->Input(1).toInt();
4974 const auto destination = p_node->Input(2).toInt();
4975 p_node->Output(0) = at::native::moveaxis(self, source, destination);
4976 };
4977 }
4978 LogAndDumpSchema(n);
4979 return nullptr;
4980 });
4981
4982 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4983 aten::numpy_T,
4984 aten_numpy_T,
__anon5d9c3eb91de02(Node* n) 4985 [](Node* n) -> SROperator {
4986 if (n->matches(
4987 torch::schema("aten::numpy_T(Tensor(a) self) -> Tensor(a)"))) {
4988 return [](ProcessedNode* p_node) {
4989 const auto& self = p_node->Input(0).toTensor();
4990 p_node->Output(0) = at::native::numpy_T(self);
4991 };
4992 }
4993 LogAndDumpSchema(n);
4994 return nullptr;
4995 });
4996
4997 REGISTER_NATIVE_OPERATOR_FUNCTOR(
4998 aten::matrix_H,
4999 aten_matrix_H,
__anon5d9c3eb91e002(Node* n) 5000 [](Node* n) -> SROperator {
5001 if (n->matches(
5002 torch::schema("aten::matrix_H(Tensor(a) self) -> Tensor(a)"))) {
5003 return [](ProcessedNode* p_node) {
5004 const auto& self = p_node->Input(0).toTensor();
5005 p_node->Output(0) = at::native::matrix_H(self);
5006 };
5007 }
5008 LogAndDumpSchema(n);
5009 return nullptr;
5010 });
5011
__anon5d9c3eb91e202(Node* n) 5012 REGISTER_NATIVE_OPERATOR_FUNCTOR(aten::mT, aten_mT, [](Node* n) -> SROperator {
5013 if (n->matches(torch::schema("aten::mT(Tensor(a) self) -> Tensor(a)"))) {
5014 return [](ProcessedNode* p_node) {
5015 const auto& self = p_node->Input(0).toTensor();
5016 p_node->Output(0) = at::native::mT(self);
5017 };
5018 }
5019 LogAndDumpSchema(n);
5020 return nullptr;
5021 });
5022
__anon5d9c3eb91e402(Node* n) 5023 REGISTER_NATIVE_OPERATOR_FUNCTOR(aten::mH, aten_mH, [](Node* n) -> SROperator {
5024 if (n->matches(torch::schema("aten::mH(Tensor(a) self) -> Tensor(a)"))) {
5025 return [](ProcessedNode* p_node) {
5026 const auto& self = p_node->Input(0).toTensor();
5027 p_node->Output(0) = at::native::mH(self);
5028 };
5029 }
5030 LogAndDumpSchema(n);
5031 return nullptr;
5032 });
5033
5034 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5035 aten::adjoint,
5036 aten_adjoint,
__anon5d9c3eb91e602(Node* n) 5037 [](Node* n) -> SROperator {
5038 if (n->matches(
5039 torch::schema("aten::adjoint(Tensor(a) self) -> Tensor(a)"))) {
5040 return [](ProcessedNode* p_node) {
5041 const auto& self = p_node->Input(0).toTensor();
5042 p_node->Output(0) = at::native::adjoint(self);
5043 };
5044 }
5045 LogAndDumpSchema(n);
5046 return nullptr;
5047 });
5048
5049 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5050 aten::ravel,
5051 aten_ravel,
__anon5d9c3eb91e802(Node* n) 5052 [](Node* n) -> SROperator {
5053 if (n->matches(
5054 torch::schema("aten::ravel(Tensor(a) self) -> Tensor(a)"))) {
5055 return [](ProcessedNode* p_node) {
5056 const auto& self = p_node->Input(0).toTensor();
5057 p_node->Output(0) = at::native::ravel(self);
5058 };
5059 }
5060 LogAndDumpSchema(n);
5061 return nullptr;
5062 });
5063
__anon5d9c3eb91ea02(Node* n) 5064 REGISTER_NATIVE_OPERATOR_FUNCTOR(aten::t, aten_t, [](Node* n) -> SROperator {
5065 if (n->matches(torch::schema("aten::t(Tensor(a) self) -> Tensor(a)"))) {
5066 return [](ProcessedNode* p_node) {
5067 const auto& self = p_node->Input(0).toTensor();
5068 p_node->Output(0) = at::native::t(self);
5069 };
5070 }
5071 LogAndDumpSchema(n);
5072 return nullptr;
5073 });
5074
5075 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5076 aten::unsqueeze,
5077 aten_unsqueeze,
__anon5d9c3eb91ec02(Node* n) 5078 [](Node* n) -> SROperator {
5079 if (n->matches(torch::schema(
5080 "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)"))) {
5081 return [](ProcessedNode* p_node) {
5082 const auto& self = p_node->Input(0).toTensor();
5083 const auto dim = p_node->Input(1).toInt();
5084 p_node->Output(0) = at::native::unsqueeze(self, dim);
5085 };
5086 }
5087 LogAndDumpSchema(n);
5088 return nullptr;
5089 });
5090
5091 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5092 aten::view_as,
5093 aten_view_as,
__anon5d9c3eb91ee02(Node* n) 5094 [](Node* n) -> SROperator {
5095 if (n->matches(torch::schema(
5096 "aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)"))) {
5097 return [](ProcessedNode* p_node) {
5098 const auto& self = p_node->Input(0).toTensor();
5099 const auto& other = p_node->Input(1).toTensor();
5100 p_node->Output(0) = at::native::view_as(self, other);
5101 };
5102 }
5103 LogAndDumpSchema(n);
5104 return nullptr;
5105 });
5106
5107 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5108 aten::positive,
5109 aten_positive,
__anon5d9c3eb91f002(Node* n) 5110 [](Node* n) -> SROperator {
5111 if (n->matches(
5112 torch::schema("aten::positive(Tensor(a) self) -> Tensor(a)"))) {
5113 return [](ProcessedNode* p_node) {
5114 const auto& self = p_node->Input(0).toTensor();
5115 p_node->Output(0) = at::native::positive(self);
5116 };
5117 }
5118 LogAndDumpSchema(n);
5119 return nullptr;
5120 });
5121
5122 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5123 aten::_autocast_to_reduced_precision,
5124 aten__autocast_to_reduced_precision,
__anon5d9c3eb91f202(Node* n) 5125 [](Node* n) -> SROperator {
5126 if (n->matches(torch::schema(
5127 "aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)"))) {
5128 return [](ProcessedNode* p_node) {
5129 const auto& self = p_node->Input(0).toTensor();
5130 const auto cuda_enabled = p_node->Input(1).toBool();
5131 const auto cpu_enabled = p_node->Input(2).toBool();
5132 const auto cuda_dtype = p_node->Input(3).toScalarType();
5133 const auto cpu_dtype = p_node->Input(4).toScalarType();
5134 p_node->Output(0) = at::native::_autocast_to_reduced_precision(
5135 self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
5136 };
5137 }
5138 LogAndDumpSchema(n);
5139 return nullptr;
5140 });
5141
5142 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5143 aten::_autocast_to_full_precision,
5144 aten__autocast_to_full_precision,
__anon5d9c3eb91f402(Node* n) 5145 [](Node* n) -> SROperator {
5146 if (n->matches(torch::schema(
5147 "aten::_autocast_to_full_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled) -> Tensor(a)"))) {
5148 return [](ProcessedNode* p_node) {
5149 const auto& self = p_node->Input(0).toTensor();
5150 const auto cuda_enabled = p_node->Input(1).toBool();
5151 const auto cpu_enabled = p_node->Input(2).toBool();
5152 p_node->Output(0) = at::native::_autocast_to_full_precision(
5153 self, cuda_enabled, cpu_enabled);
5154 };
5155 }
5156 LogAndDumpSchema(n);
5157 return nullptr;
5158 });
5159
5160 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5161 aten::swapaxes,
5162 aten_swapaxes,
__anon5d9c3eb91f602(Node* n) 5163 [](Node* n) -> SROperator {
5164 if (n->matches(torch::schema(
5165 "aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)"))) {
5166 return [](ProcessedNode* p_node) {
5167 const auto& self = p_node->Input(0).toTensor();
5168 const auto axis0 = p_node->Input(1).toInt();
5169 const auto axis1 = p_node->Input(2).toInt();
5170 p_node->Output(0) = at::native::swapaxes(self, axis0, axis1);
5171 };
5172 }
5173 LogAndDumpSchema(n);
5174 return nullptr;
5175 });
5176
5177 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5178 aten::swapdims,
5179 aten_swapdims,
__anon5d9c3eb91f802(Node* n) 5180 [](Node* n) -> SROperator {
5181 if (n->matches(torch::schema(
5182 "aten::swapdims(Tensor(a) self, int dim0, int dim1) -> Tensor(a)"))) {
5183 return [](ProcessedNode* p_node) {
5184 const auto& self = p_node->Input(0).toTensor();
5185 const auto dim0 = p_node->Input(1).toInt();
5186 const auto dim1 = p_node->Input(2).toInt();
5187 p_node->Output(0) = at::native::swapdims(self, dim0, dim1);
5188 };
5189 }
5190 LogAndDumpSchema(n);
5191 return nullptr;
5192 });
5193
__anon5d9c3eb91fa02(Node* n) 5194 REGISTER_NATIVE_OPERATOR_FUNCTOR(aten::unfold, aten_unfold, [](Node* n) -> SROperator {
5195 if (n->matches(torch::schema(
5196 "aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)"))) {
5197 return [](ProcessedNode* p_node) {
5198 const auto& self = p_node->Input(0).toTensor();
5199 const auto dimension = p_node->Input(1).toInt();
5200 const auto size = p_node->Input(2).toInt();
5201 const auto step = p_node->Input(3).toInt();
5202 p_node->Output(0) = at::native::unfold(self, dimension, size, step);
5203 };
5204 }
5205 LogAndDumpSchema(n);
5206 return nullptr;
5207 });
5208
5209 REGISTER_NATIVE_OPERATOR_FUNCTOR(
5210 aten::alias,
5211 aten_alias,
__anon5d9c3eb91fc02(Node* n) 5212 [](Node* n) -> SROperator {
5213 if (n->matches(
5214 torch::schema("aten::alias(Tensor(a) self) -> Tensor(a)"))) {
5215 return [](ProcessedNode* p_node) {
5216 const auto& self = p_node->Input(0).toTensor();
5217 p_node->Output(0) = at::native::alias(self);
5218 };
5219 }
5220 LogAndDumpSchema(n);
5221 return nullptr;
5222 });
5223
5224 } // namespace torch::jit
5225