Home
last modified time | relevance | path

Searched full:linear_prepack (Results 1 – 25 of 29) sorted by relevance

12

/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/
H A Dqlinear_prepack.cpp69 "The weight tensor for quantized::linear_prepack (fbgemm) should" in prepack()
144 "quantized::linear_prepack (qnnpack): Weight tensor rank should be == 2"); in prepack()
156 "quantized::linear_prepack (qnnpack): Given weight of size ", in prepack()
222 "The weight tensor for quantized::linear_prepack (onednn) should" in prepack()
233 "quantized::linear_prepack: ONEDNN only supports symmetric quantization of weight," in prepack()
244 "quantized::linear_prepack: ONEDNN only supports symmetric quantization of weight," in prepack()
374 // 2. Use quantized::linear_prepack to prepack the weight and bias in wrapped_quantized_linear()
383 .findSchemaOrThrow("quantized::linear_prepack", "") in wrapped_quantized_linear()
450 // 2. Use quantized::linear_prepack to prepack the weight and bias in _wrapped_linear_prepack()
461 .findSchemaOrThrow("quantized::linear_prepack", "") in _wrapped_linear_prepack()
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/
H A Dmetal_rewrite.cpp33 %packed_weight_bias = metal_prepack::linear_prepack( in insertPrePackedLinearOp()
74 …packed_weight_bias : __torch__.torch.classes.metal.LinearOpContext = metal_prepack::linear_prepack( in fuseReluWithPackedOps()
81 %packed_weight_bias = metal_prepack::linear_prepack( in fuseReluWithPackedOps()
116 %packed_weight_bias = metal_prepack::linear_prepack( in fuseReluWithPackedOps()
145 …ias : __torch__.torch.classes.metal.LinearOpContext = metal_prepack::linear_prepack(%weight, %bias… in fuseHardtanhWithPackedOps()
151 %packed_weight_bias = metal_prepack::linear_prepack( in fuseHardtanhWithPackedOps()
194 %packed_weight_bias = metal_prepack::linear_prepack( in fuseHardtanhWithPackedOps()
231 (n->kind() == Symbol::fromQualString("metal_prepack::linear_prepack"))); in metalFoldPrePackingOps()
/aosp_15_r20/external/pytorch/test/quantization/jit/
H A Dtest_ondevice_quantization.py112 node.kind() == "quantized::linear_prepack"
113 ), "Node must corresponds to linear_prepack."
262 linear_prepack = 0
268 if maybe_packed_param.kind() == "quantized::linear_prepack":
269 linear_prepack += 1
279 self.assertEqual(linear_prepack, num_nodes)
296 if maybe_packed_param.kind() == "quantized::linear_prepack":
H A Dtest_deprecated_jit_quant.py164 self._packed_weight = torch.ops.quantized.linear_prepack(qweight)
178 self._packed_weight = torch.ops.quantized.linear_prepack(state[0])
187 self._packed_weight = torch.ops.quantized.linear_prepack(w)
H A Dtest_quantize_jit.py1295 # and linear_prepack is folded
1297 "quantized::linear_prepack"
1659 FileCheck().check_not("quantized::linear_prepack").run(model.graph)
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/quantization/
H A Dregister_packed_params.cpp16 n->kind() == Symbol::fromQualString("quantized::linear_prepack") || in isPrepackNode()
87 // method, is when the linear_prepack op will be executed and at that in RegisterPrePackParams()
90 // In order to add the output of linear_prepack, we now have to do in RegisterPrePackParams()
100 // quantize_forward will only have, for example, linear_prepack and in RegisterPrePackParams()
105 // dynamic_linear and quantized_forward will not have any linear_prepack in RegisterPrePackParams()
H A Dfinalize.cpp159 n->kind() == Symbol::fromQualString("quantized::linear_prepack") || in FoldQuantizedPrepackingOps()
176 n->kind() == Symbol::fromQualString("quantized::linear_prepack") || in RegisterPrePackingParams()
H A Dquantization_patterns.h1155 %packed_params = quantized::linear_prepack(%w_quant, %b) in linear_prepack_unpack_patterns()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/metal/
H A DMetalPrepackOpRegister.cpp89 …TORCH_SELECTIVE_SCHEMA("metal_prepack::linear_prepack(Tensor W, Tensor? B, Scalar? output_min=None… in TORCH_LIBRARY()
116 static c10::intrusive_ptr<LinearOpContext> linear_prepack( in linear_prepack() function
127 m.impl(TORCH_SELECTIVE_NAME("metal_prepack::linear_prepack"), TORCH_FN(linear_prepack)); in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/dynamic/modules/
H A Drnn.py47 packed_weight = torch.ops.quantized.linear_prepack(qweight, bias)
168 packed_ih = torch.ops.quantized.linear_prepack(w_ih, b_ih)
169 packed_hh = torch.ops.quantized.linear_prepack(w_hh, b_hh)
329 packed_ih = torch.ops.quantized.linear_prepack(w_ih, b_ih)
330 packed_hh = torch.ops.quantized.linear_prepack(w_hh, b_hh)
431 packed_weight = torch.ops.quantized.linear_prepack(qweight, b)
982 packed_weight_ih = torch.ops.quantized.linear_prepack(
985 packed_weight_hh = torch.ops.quantized.linear_prepack(
/aosp_15_r20/external/pytorch/torch/_export/passes/
H A Dreplace_quantized_ops_with_standard_ops_pass.py358 # Using LinearPrepackParam from linear_prepack.
551 One exception in the transformation is conv_prepack and linear_prepack. Those calls pack
553 …During transformation, we directly skip transforming conv_prepack or linear_prepack. We check whet…
554 …quantized::conv2d or linear is from conv_prepack or linear_prepack. If it is, we then inline those…
576 "linear_prepack",
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cudnn/
H A DLinearPrepack.cpp52 …m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack"), TORCH_FN(QLinearPackWeightInt8Cudnn::run… in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/pytorch/test/quantization/core/
H A Dtest_quantized_op.py2855 w_packed = torch.ops.quantized.linear_prepack(qw, bias_float)
2871 w_packed = torch.ops.quantized.linear_prepack(qw, bias_float)
3130 qlinear_prepack = torch.ops.quantized.linear_prepack
3506 packed_ih = torch.ops.quantized.linear_prepack(Wq1, b1)
3507 packed_hh = torch.ops.quantized.linear_prepack(Wq2, b2)
3637 packed_ih = torch.ops.quantized.linear_prepack(Wq1, b1)
3638 packed_hh = torch.ops.quantized.linear_prepack(Wq2, b2)
3776 qlinear_prepack = torch.ops.quantized.linear_prepack
3946 qlinear_prepack = torch.ops.quantized.linear_prepack
4065 qlinear_prepack = torch.ops.quantized.linear_prepack
[all …]
/aosp_15_r20/external/pytorch/test/ao/sparsity/
H A Dtest_kernels.py57 dense_prepack = torch.ops.quantized.linear_prepack
/aosp_15_r20/external/pytorch/torch/onnx/
H A Dsymbolic_caffe2.py61 def linear_prepack(g: jit_utils.GraphContext, weight, bias): function
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/
H A Dlinear.py38 self._packed_params = torch.ops.quantized.linear_prepack(weight, bias)
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/
H A Dlibrary.cpp189 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::linear_prepack(Tensor W, Tensor? B=None) -> __torch__.tor… in TORCH_LIBRARY()
247 …m.def(TORCH_SELECTIVE_SCHEMA("_quantized::linear_prepack(Tensor W, Tensor? B=None) -> __torch__.to… in TORCH_LIBRARY()
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/
H A D_lower_to_native_backend.py347 torch._ops.ops.quantized.linear_prepack,
908 # kwargs of the func node are needed for prepack op (i.e., quantized::linear_prepack)
H A Dutils.py151 return torch.ops.quantized.linear_prepack
/aosp_15_r20/external/pytorch/test/mobile/model_test/
H A Dmodel_ops.yaml436 quantized::linear_prepack: 69
H A Dcoverage.yaml1090 quantized::linear_prepack: 29
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/
H A Dfunctional.py473 _packed_params = torch.ops.quantized.linear_prepack(weight, bias)
/aosp_15_r20/external/pytorch/test/cpp/jit/
H A Dtest_misc.cpp3138 …_torch__.torch.classes.quantized.LinearPackedParamsBase = quantized::linear_prepack(%weight_q, %no… in TEST()
3147 testing::FileCheck().check_not("quantized::linear_prepack")->run(*graph); in TEST()
/aosp_15_r20/external/pytorch/torch/csrc/jit/frontend/
H A Dir_emitter.cpp3135 // quantized::linear_prepack to quantized::linear_prepack_legacy. We in emitSingleAssignment()
3136 // changed linear_prepack to return a TorchBind class and not a in emitSingleAssignment()
3147 Symbol::fromQualString("quantized::linear_prepack")) { in emitSingleAssignment()
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/
H A Dpasses.cpp1444 %packed_params = quantized::linear_prepack(%weight, %bias) in PrepackWeights()

12