/aosp_15_r20/external/pytorch/test/ |
H A D | test_prims.py | 364 res = refs.constant_pad_nd(a, pad=[1] * (2 * ndim)) 373 actual = refs.constant_pad_nd(a, pad=[1] * 8) 374 expect = torch.constant_pad_nd(a, pad=[1] * 8) 383 actual = refs.constant_pad_nd(a, pad=[1] * 8) 384 expect = torch.constant_pad_nd(a, pad=[1] * 8)
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/graph/ops/impl/ |
H A D | Pad.cpp | 100 void constant_pad_nd(ComputeGraph& graph, const std::vector<ValueRef>& args) { in constant_pad_nd() function 105 VK_REGISTER_OP(aten.constant_pad_nd.default, constant_pad_nd);
|
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | pad_mm.py | 689 # dim order is reversed for constant_pad_nd, for every dim we specify right and left padding 693 return aten.constant_pad_nd(mat1, pad_arg) 705 # dim order is reversed for constant_pad_nd, for every dim we specify right and left padding 709 return aten.constant_pad_nd(mat2, pad_arg)
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_fused_attention.py | 169 # constant_pad_nd is a single element tensor that gets expanded 221 constant_pad_nd = torch.ops.aten.constant_pad_nd.default( 225 slice_17 = torch.ops.aten.slice.Tensor(constant_pad_nd, -1, 0, 1) 226 constant_pad_nd = None
|
H A D | test_inductor_freezing.py | 715 constant_pad_nd = torch.ops.aten.constant_pad_nd.default( 720 constant_pad_nd, [8, 384, 2, 20, 12], [153600, 400, 160, 1, 20]
|
H A D | test_torchinductor.py | 1348 padded_idx = torch.constant_pad_nd(idx, (1050, 0)) 5923 aten.constant_pad_nd(a, [0, 1], 6.0), 5924 aten.constant_pad_nd(a, [2, 3], 99.0), 5932 aten.constant_pad_nd(a, (1, 1), 1.0) & b, 5933 aten.constant_pad_nd(a, (1, 1), 0.0) & b, 5944 aten.constant_pad_nd(a, [1, 1, 1, 1], 6.0), 5945 aten.constant_pad_nd(a, [1, 2, 3, 4], 99.0), 5955 aten.constant_pad_nd(a, [1, 2, 3, 4, 5, 6], 6.0), 5956 aten.constant_pad_nd(a, [0, 0, 3, 4, 0, 0], 6.0), 5974 return aten.constant_pad_nd(a, [0, 0]) [all …]
|
H A D | test_cpu_repro.py | 786 constant_pad_nd = torch.ops.aten.constant_pad_nd.default( 789 view = torch.ops.aten.view.default(constant_pad_nd, [12, 1, 512, 513]) 1881 res_aten_eager = torch.constant_pad_nd(x, size) 1882 cfn = torch.compile(torch.constant_pad_nd) 3229 return torch.ops.aten.constant_pad_nd.default(y, [0, 0, 1, 0, 0, 0], 0.0)
|
/aosp_15_r20/external/executorch/kernels/portable/cpu/ |
H A D | op_constant_pad_nd.cpp | 188 Bool, in_type, ctx, "constant_pad_nd.out", CTYPE, [&]() { in constant_pad_nd_out() 191 value_type, ctx, "constant_pad_nd.out", CTYPE_VALUE, [&]() { in constant_pad_nd_out()
|
/aosp_15_r20/external/executorch/backends/xnnpack/partition/ |
H A D | configs.py | 42 exir_ops.edge.aten.constant_pad_nd.default, 97 exir_ops.edge.aten.constant_pad_nd.default,
|
/aosp_15_r20/external/pytorch/test/cpp/api/ |
H A D | fft.cpp | 53 auto expect = torch::fft::fft(torch::constant_pad_nd(t, {0, 72})); in TEST() 57 expect = torch::fft::fft(torch::constant_pad_nd(t, {0, -64})); in TEST()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | PadNd.cpp | 15 #include <ATen/ops/constant_pad_nd.h> 29 Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, const Scalar& value) { in constant_pad_nd() function
|
H A D | ts_native_functions.yaml | 31 - constant_pad_nd
|
H A D | SpectralOps.cpp | 27 #include <ATen/ops/constant_pad_nd.h> 1117 window_tmp = at::constant_pad_nd(window_tmp, {left, n_fft - win_length - left}, 0); in istft() 1191 y = at::constant_pad_nd(y, {0, end - expected_output_signal_len}, 0); in istft()
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/_ops/ |
H A D | _matrix_ops.py | 319 @register_op_strategy(aten.constant_pad_nd.default) 321 # TODO(d4l3k); implement a more correct strategy for constant_pad_nd
|
/aosp_15_r20/external/pytorch/test/expect/ |
H A D | HasDecompTest.test_aten_core_operators.expect | 156 aten::constant_pad_nd 157 aten::constant_pad_nd.out
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_opset11.py | 42 "constant_pad_nd", 714 @_onnx_symbolic("aten::constant_pad_nd") 715 def constant_pad_nd(g: jit_utils.GraphContext, input, padding, value=None): function 755 return constant_pad_nd(g, input, pad, value)
|
/aosp_15_r20/external/executorch/backends/apple/mps/operators/ |
H A D | pad_ops.py | 22 target = "aten.constant_pad_nd.default"
|
/aosp_15_r20/external/executorch/backends/cadence/aot/tests/ |
H A D | test_remove_ops_passes.py | 209 # F.pad is converted to aten::constant_pad_nd after functionalization & decomposition. 227 count_node(graph_after_passes, exir_ops.edge.aten.constant_pad_nd.default),
|
H A D | test_replace_ops_passes.py | 84 # F.pad is converted to aten::constant_pad_nd after functionalization & decomposition. 106 count_node(graph_after_passes, exir_ops.edge.aten.constant_pad_nd.default), 483 # F.pad is converted to aten::constant_pad_nd after functionalization & decomposition.
|
/aosp_15_r20/external/executorch/backends/cadence/aot/ |
H A D | replace_ops.py | 547 if op != exir_ops.edge.aten.constant_pad_nd.default: 564 # Replace only if constant_pad_nd is along the innermost padding dimension. 628 if op != exir_ops.edge.aten.constant_pad_nd.default: 641 # Replace only if constant_pad_nd is along the innermost padding dimension.
|
/aosp_15_r20/external/executorch/backends/qualcomm/builders/ |
H A D | op_pad.py | 20 target = ["aten.constant_pad_nd.default"]
|
/aosp_15_r20/external/executorch/backends/xnnpack/operators/ |
H A D | op_static_constant_pad.py | 25 target = "aten.constant_pad_nd.default"
|
/aosp_15_r20/external/executorch/backends/vulkan/partitioner/ |
H A D | supported_ops.py | 130 exir_ops.edge.aten.constant_pad_nd.default,
|
/aosp_15_r20/external/pytorch/functorch/op_analysis/ |
H A D | public_api | 408 constant_pad_nd
|
/aosp_15_r20/external/executorch/kernels/aten/ |
H A D | functions.yaml | 112 - op: constant_pad_nd.out
|