Home
last modified time | relevance | path

Searched full:prepacked (Results 1 – 25 of 56) sorted by relevance

123

/aosp_15_r20/external/pytorch/test/
H A Dtest_xnnpack_integration.py51 packed_weight_bias = torch.ops.prepacked.linear_clamp_prepack(weight, bias)
52 output_linearprepacked = torch.ops.prepacked.linear_clamp_run(
72 packed_weight_bias = torch.ops.prepacked.linear_clamp_prepack(weight, bias)
73 output_linearprepacked = torch.ops.prepacked.linear_clamp_run(
139 packed_weight_bias = torch.ops.prepacked.conv2d_clamp_prepack(
142 xnnpack_result = torch.ops.prepacked.conv2d_clamp_run(
221 packed_weight_bias = torch.ops.prepacked.conv2d_transpose_clamp_prepack(
224 xnnpack_result = torch.ops.prepacked.conv2d_transpose_clamp_run(
263 self.packed_weight_bias = torch.ops.prepacked.linear_clamp_prepack(
268 return torch.ops.prepacked.linear_clamp_run(x, self.packed_weight_bias)
[all …]
H A Dtest_mobile_optimizer.py113 .check_not("prepacked::conv2d_clamp_prepack") \
114 .check_count("prepacked::conv2d_clamp_run", 1, exactly=True) \
115 .check_not("prepacked::linear_clamp_prepack") \
116 .check_count("prepacked::linear_clamp_run", 1, exactly=True) \
125 .check_not("prepacked::conv2d_clamp_prepack") \
126 .check_count("prepacked::conv2d_clamp_run", 1, exactly=True) \
127 .check_not("prepacked::linear_clamp_prepack") \
128 .check_count("prepacked::linear_clamp_run", 1, exactly=True) \
141 .check_not("prepacked::linear_clamp_run") \
142 .check_not("prepacked::conv2d_clamp_run") \
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/
H A Dxnnpack_rewrite.cpp101 %packed_weight_bias = prepacked::linear_clamp_prepack( in insertPrePackedLinearOp()
103 %res = prepacked::linear_clamp_run(%input, %packed_weight_bias) in insertPrePackedLinearOp()
129 %packed_weight_bias = prepacked::conv2d_clamp_prepack( in insertPrePackedConv2dOp()
132 %res = prepacked::conv2d_clamp_run(%input, %packed_weight_bias) in insertPrePackedConv2dOp()
154 %packed_weight_bias = prepacked::conv2d_transpose_clamp_prepack( in insertPrePackedConv2dOp()
157 %res = prepacked::conv2d_transpose_clamp_run(%input, %packed_weight_bias) in insertPrePackedConv2dOp()
176 …%packed_weight_bias : __torch__.torch.classes.xnnpack.LinearOpContext = prepacked::linear_clamp_pr… in fuseHardtanhWithPackedOps()
178 %res = prepacked::linear_clamp_run(%input, %packed_weight_bias) in fuseHardtanhWithPackedOps()
184 …%packed_weight_bias : __torch__.torch.classes.xnnpack.Conv2dOpContext = prepacked::conv2d_clamp_pr… in fuseHardtanhWithPackedOps()
187 %res = prepacked::conv2d_clamp_run(%input, %packed_weight_bias) in fuseHardtanhWithPackedOps()
[all …]
H A Dconstant_propagation.h11 // and prepacked::conv2d_clamp_prepack)
/aosp_15_r20/external/pytorch/aten/src/ATen/native/xnnpack/
H A DRegisterOpContextClass.cpp75 TORCH_LIBRARY(prepacked, m) { in TORCH_LIBRARY() argument
76 …m.def(TORCH_SELECTIVE_SCHEMA("prepacked::unpack_prepacked_sizes_conv2d(Any W_prepack) -> (Any)"), … in TORCH_LIBRARY()
77 …m.def(TORCH_SELECTIVE_SCHEMA("prepacked::unpack_prepacked_sizes_linear(Any W_prepack) -> (Any)"), … in TORCH_LIBRARY()
78 …m.def(TORCH_SELECTIVE_SCHEMA("prepacked::linear_clamp_prepack(Tensor W, Tensor? B=None, Scalar? ou… in TORCH_LIBRARY()
79 …m.def(TORCH_SELECTIVE_SCHEMA("prepacked::linear_clamp_run(Tensor X, __torch__.torch.classes.xnnpac… in TORCH_LIBRARY()
80 …m.def(TORCH_SELECTIVE_SCHEMA("prepacked::conv2d_clamp_prepack(Tensor W, Tensor? B, int[2] stride, … in TORCH_LIBRARY()
81 …m.def(TORCH_SELECTIVE_SCHEMA("prepacked::conv2d_transpose_clamp_prepack(Tensor W, Tensor? B, int[2… in TORCH_LIBRARY()
82 …m.def(TORCH_SELECTIVE_SCHEMA("prepacked::conv2d_clamp_run(Tensor X, __torch__.torch.classes.xnnpac… in TORCH_LIBRARY()
83 …m.def(TORCH_SELECTIVE_SCHEMA("prepacked::conv2d_transpose_clamp_run(Tensor X, __torch__.torch.clas… in TORCH_LIBRARY()
86 TORCH_LIBRARY_IMPL(prepacked, CPU, m) { in TORCH_LIBRARY_IMPL() argument
[all …]
/aosp_15_r20/external/pytorch/test/jit/
H A Dtest_optimize_for_mobile_preserve_debug_info.py133 "prepacked::linear_clamp_prepack": "aten::linear",
134 "prepacked::linear_clamp_run": "aten::linear",
135 "prepacked::conv2d_clamp_prepack": "aten::conv2d",
136 "prepacked::conv2d_clamp_run": "aten::conv2d",
137 "prepacked::conv2d_transpose_clamp_prepack": "aten::conv_transpose2d",
138 "prepacked::conv2d_transpose_clamp_run": "aten::conv_transpose2d",
148 "prepacked::linear_clamp_prepack": "aten::linear",
149 "prepacked::linear_clamp_run": "aten::linear",
225 "prepacked::linear_clamp_prepack": "prepacked::linear_clamp_prepack",
226 "prepacked::linear_clamp_run": linear_activation_kind,
[all …]
/aosp_15_r20/external/mesa3d/src/broadcom/vulkan/
H A Dv3dv_cl.h189 * comes from a prepacked buffer. So the use is similar to cl_emit, where you
190 * set individual values, and the rest of values come from prepacked.
193 * coming from the prepacked buffer, as it does an OR operation. That means
194 * that the prepacked buffer is usually reserved for values that we know that
197 #define cl_emit_with_prepacked(cl, packet, prepacked, name) \ argument
208 ((uint8_t *)cl_out)[_i] = packed[_i] | (prepacked)[_i]; \
H A Dv3dv_private.h791 /* Prepacked TEXTURE_SHADER_STATE. It will be copied to the descriptor info
849 /* Prepacked TEXTURE_SHADER_STATE. */
2178 /* Prepacked per plane SAMPLER_STATE, that is referenced as part of the tmu
2282 * already prepacked, so here we are only storing those that need recheck
2322 /* Per-RT prepacked blend config packets */
2337 /* Packets prepacked during pipeline creation
/aosp_15_r20/external/pytorch/docs/source/
H A Dmobile_optimizer.rst16prepacked ops** (blocklisting option `mobile_optimizer.MobileOptimizerType.INSERT_FOLD_PREPACK_OPS…
/aosp_15_r20/external/ruy/ruy/
H A Dprepacked_cache_test.cc56 // Allocate the prepacked matrix. in TEST()
82 // Allocate the prepacked matrix. in TEST()
108 // Allocate the prepacked matrix. in TEST()
142 // Allocate the prepacked matrix 1. in TEST()
149 // Allocate the prepacked matrix 2. in TEST()
156 // Allocate the prepacked matrix 3. in TEST()
170 // Allocate the prepacked matrix 4. in TEST()
H A Dprepacked_cache.h27 // "Low effort" Least Recently Used Cache for Prepacked Matrices
28 // A cache mechanism for prepacked matrices that ejects oldest entries.
/aosp_15_r20/external/mesa3d/src/gallium/drivers/v3d/
H A Dv3d_cl.h233 * comes from a prepacked buffer. So the use is similar to cl_emit, where you
234 * set individual values, and the rest of values come from prepacked.
237 * coming from the prepacked buffer, as it does an OR operation. That means
238 * that the prepacked buffer is usually reserved for values that we know that
241 #define cl_emit_with_prepacked(cl, packet, prepacked, name) \ argument
252 ((uint8_t *)cl_out)[_i] = packed[_i] | (prepacked)[_i]; \
/aosp_15_r20/external/pytorch/torch/csrc/jit/tensorexpr/operators/
H A Dquantization.cpp328 const BufHandle& prepacked = std::get<BufHandle>(inputs[1]); in computeQuantizedConv1d() local
342 {qx, prepacked}, in computeQuantizedConv1d()
358 const BufHandle& prepacked = std::get<BufHandle>(inputs[1]); in computeQuantizedConv2d() local
372 {qx, prepacked}, in computeQuantizedConv2d()
388 const BufHandle& prepacked = std::get<BufHandle>(inputs[1]); in computeQuantizedConv2dRelu() local
402 {qx, prepacked}, in computeQuantizedConv2dRelu()
418 const BufHandle& prepacked = std::get<BufHandle>(inputs[1]); in computeQuantizedLinear() local
432 {qx, prepacked}, in computeQuantizedLinear()
448 const BufHandle& prepacked = std::get<BufHandle>(inputs[1]); in computeQuantizedLinearRelu() local
462 {qx, prepacked}, in computeQuantizedLinearRelu()
H A Dconv2d.cpp443 const BufHandle& prepacked = std::get<BufHandle>(inputs[1]); in computePrepackedConv2dClampRun() local
445 ResultBuf, "nnc_prepacked_conv2d_clamp_run", {inp, prepacked}, {}); in computePrepackedConv2dClampRun()
462 const BufHandle& prepacked = std::get<BufHandle>(inputs[1]); in computePrepackedLinearClampRun() local
464 ResultBuf, "nnc_prepacked_linear_clamp_run", {inp, prepacked}, {}); in computePrepackedLinearClampRun()
482 const BufHandle& prepacked = std::get<BufHandle>(inputs[1]); in computeMkldnnPrepackedConvRun() local
484 ResultBuf, "nnc_mkldnn_prepacked_conv_run", {inp, prepacked}, {}); in computeMkldnnPrepackedConvRun()
/aosp_15_r20/external/pytorch/test/cpp/tensorexpr/
H A Dtest_external_calls.cpp504 // Create prepacked xnnpack context object. in TEST()
507 .findSchemaOrThrow("prepacked::linear_clamp_prepack", "") in TEST()
513 auto prepacked = linear_clamp_prepack_op.call( in TEST() local
536 llvm_codegen.call({input_buf, prepacked.get(), result_buf}); in TEST()
543 ir_eval.call({input_buf, prepacked.get(), result_buf}); in TEST()
578 // Create prepacked xnnpack context object. in TEST()
581 .findSchemaOrThrow("prepacked::conv2d_clamp_prepack", "") in TEST()
591 auto prepacked = conv2d_clamp_prepack_op.call( in TEST() local
621 llvm_codegen.call({input_buf, prepacked.get(), result_buf}); in TEST()
628 ir_eval.call({input_buf, prepacked.get(), result_buf}); in TEST()
/aosp_15_r20/external/pytorch/test/mobile/model_test/
H A Dmodel_ops.yaml394 prepacked::conv2d_clamp_prepack: 2
395 prepacked::conv2d_clamp_run: 41
396 prepacked::conv2d_transpose_clamp_prepack: 1
397 prepacked::conv2d_transpose_clamp_run: 2
398 prepacked::linear_clamp_run: 36
H A Dcoverage.yaml647 - prepacked::conv2d_clamp_run
648 - prepacked::linear_clamp_run
1016 prepacked::conv2d_clamp_run: 32
1017 prepacked::linear_clamp_run: 26
1076 prepacked::conv2d_clamp_prepack: 2
1077 prepacked::conv2d_transpose_clamp_prepack: 1
1078 prepacked::conv2d_transpose_clamp_run: 1
H A Dupdate_production_ops.py35 namespaces = ["aten", "prepacked", "prim", "quantized"]
/aosp_15_r20/external/pytorch/torch/_export/passes/
H A Dreplace_quantized_ops_with_standard_ops_pass.py425 Transformation for functions under prepacked namespace, where they share
445 func_args += torch.ops.prepacked.unpack_prepacked_sizes_conv2d(so)[2:]
557 …For prepacked::conv2d_clamp_run and prepacked::linear_clamp_run, we directly convert them to aten.…
582 elif namespace == "prepacked":
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/
H A Dsymbolic_shape_registry.cpp26 ops.prepacked.unpack_prepacked_sizes_conv2d(conv2dOpContext),
34 ops.prepacked.unpack_prepacked_sizes_linear(linearOpContext),
63 …{"prepacked::conv2d_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.Conv2dOpContext W_prepack)… in conditionally_defined_ops()
64 …{"prepacked::linear_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.LinearOpContext W_prepack)… in conditionally_defined_ops()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/
H A Dpack.h101 // Weights need to be prepacked with the zero points, in their tail space in pytorch_pack_q8gemm_wrq()
224 // Weights need to be prepacked with the zero points, in their tail space in pytorch_pack_q8conv_wrq()
346 // Weights need to be prepacked with the zero points, in their tail space in pytorch_pack_q8deconv_wrq()
/aosp_15_r20/external/pytorch/test/forward_backward_compatibility/
H A Dcheck_forward_backward_compatibility.py87 ("prepacked::unpack_prepacked_sizes_conv2d", datetime.date(9999, 1, 1)),
88 ("prepacked::unpack_prepacked_sizes_linear", datetime.date(9999, 1, 1)),
/aosp_15_r20/external/pytorch/torch/_inductor/
H A Dmkldnn_ir.py73 # The size of prepacked_weight is the prepacked weight size of deconv:
116 # When transposed, the size of the prepacked oneDNN weight is different
153 …# In static shape cases, since weight is prepacked, we'll always force output to be channels last …
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/qnnpack/src/
H A Dfc-unpack.cc27 // Convert prepacked weight to original weight / bias. in unpackWeights()
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/
H A Dcpu_backend_context.h112 // sometimes provide speedups by caching the "prepacked" data, for some

123