/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/ |
H A D | qlinear_prepack.cpp | 69 "The weight tensor for quantized::linear_prepack (fbgemm) should" in prepack() 144 "quantized::linear_prepack (qnnpack): Weight tensor rank should be == 2"); in prepack() 156 "quantized::linear_prepack (qnnpack): Given weight of size ", in prepack() 222 "The weight tensor for quantized::linear_prepack (onednn) should" in prepack() 233 "quantized::linear_prepack: ONEDNN only supports symmetric quantization of weight," in prepack() 244 "quantized::linear_prepack: ONEDNN only supports symmetric quantization of weight," in prepack() 374 // 2. Use quantized::linear_prepack to prepack the weight and bias in wrapped_quantized_linear() 383 .findSchemaOrThrow("quantized::linear_prepack", "") in wrapped_quantized_linear() 450 // 2. Use quantized::linear_prepack to prepack the weight and bias in _wrapped_linear_prepack() 461 .findSchemaOrThrow("quantized::linear_prepack", "") in _wrapped_linear_prepack() [all …]
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/ |
H A D | metal_rewrite.cpp | 33 %packed_weight_bias = metal_prepack::linear_prepack( in insertPrePackedLinearOp() 74 …packed_weight_bias : __torch__.torch.classes.metal.LinearOpContext = metal_prepack::linear_prepack( in fuseReluWithPackedOps() 81 %packed_weight_bias = metal_prepack::linear_prepack( in fuseReluWithPackedOps() 116 %packed_weight_bias = metal_prepack::linear_prepack( in fuseReluWithPackedOps() 145 …ias : __torch__.torch.classes.metal.LinearOpContext = metal_prepack::linear_prepack(%weight, %bias… in fuseHardtanhWithPackedOps() 151 %packed_weight_bias = metal_prepack::linear_prepack( in fuseHardtanhWithPackedOps() 194 %packed_weight_bias = metal_prepack::linear_prepack( in fuseHardtanhWithPackedOps() 231 (n->kind() == Symbol::fromQualString("metal_prepack::linear_prepack"))); in metalFoldPrePackingOps()
|
/aosp_15_r20/external/pytorch/test/quantization/jit/ |
H A D | test_ondevice_quantization.py | 112 node.kind() == "quantized::linear_prepack" 113 ), "Node must corresponds to linear_prepack." 262 linear_prepack = 0 268 if maybe_packed_param.kind() == "quantized::linear_prepack": 269 linear_prepack += 1 279 self.assertEqual(linear_prepack, num_nodes) 296 if maybe_packed_param.kind() == "quantized::linear_prepack":
|
H A D | test_deprecated_jit_quant.py | 164 self._packed_weight = torch.ops.quantized.linear_prepack(qweight) 178 self._packed_weight = torch.ops.quantized.linear_prepack(state[0]) 187 self._packed_weight = torch.ops.quantized.linear_prepack(w)
|
H A D | test_quantize_jit.py | 1295 # and linear_prepack is folded 1297 "quantized::linear_prepack" 1659 FileCheck().check_not("quantized::linear_prepack").run(model.graph)
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/quantization/ |
H A D | register_packed_params.cpp | 16 n->kind() == Symbol::fromQualString("quantized::linear_prepack") || in isPrepackNode() 87 // method, is when the linear_prepack op will be executed and at that in RegisterPrePackParams() 90 // In order to add the output of linear_prepack, we now have to do in RegisterPrePackParams() 100 // quantize_forward will only have, for example, linear_prepack and in RegisterPrePackParams() 105 // dynamic_linear and quantized_forward will not have any linear_prepack in RegisterPrePackParams()
|
H A D | finalize.cpp | 159 n->kind() == Symbol::fromQualString("quantized::linear_prepack") || in FoldQuantizedPrepackingOps() 176 n->kind() == Symbol::fromQualString("quantized::linear_prepack") || in RegisterPrePackingParams()
|
H A D | quantization_patterns.h | 1155 %packed_params = quantized::linear_prepack(%w_quant, %b) in linear_prepack_unpack_patterns()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/metal/ |
H A D | MetalPrepackOpRegister.cpp | 89 …TORCH_SELECTIVE_SCHEMA("metal_prepack::linear_prepack(Tensor W, Tensor? B, Scalar? output_min=None… in TORCH_LIBRARY() 116 static c10::intrusive_ptr<LinearOpContext> linear_prepack( in linear_prepack() function 127 m.impl(TORCH_SELECTIVE_NAME("metal_prepack::linear_prepack"), TORCH_FN(linear_prepack)); in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/dynamic/modules/ |
H A D | rnn.py | 47 packed_weight = torch.ops.quantized.linear_prepack(qweight, bias) 168 packed_ih = torch.ops.quantized.linear_prepack(w_ih, b_ih) 169 packed_hh = torch.ops.quantized.linear_prepack(w_hh, b_hh) 329 packed_ih = torch.ops.quantized.linear_prepack(w_ih, b_ih) 330 packed_hh = torch.ops.quantized.linear_prepack(w_hh, b_hh) 431 packed_weight = torch.ops.quantized.linear_prepack(qweight, b) 982 packed_weight_ih = torch.ops.quantized.linear_prepack( 985 packed_weight_hh = torch.ops.quantized.linear_prepack(
|
/aosp_15_r20/external/pytorch/torch/_export/passes/ |
H A D | replace_quantized_ops_with_standard_ops_pass.py | 358 # Using LinearPrepackParam from linear_prepack. 551 One exception in the transformation is conv_prepack and linear_prepack. Those calls pack 553 …During transformation, we directly skip transforming conv_prepack or linear_prepack. We check whet… 554 …quantized::conv2d or linear is from conv_prepack or linear_prepack. If it is, we then inline those… 576 "linear_prepack",
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cudnn/ |
H A D | LinearPrepack.cpp | 52 …m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack"), TORCH_FN(QLinearPackWeightInt8Cudnn::run… in TORCH_LIBRARY_IMPL()
|
/aosp_15_r20/external/pytorch/test/quantization/core/ |
H A D | test_quantized_op.py | 2855 w_packed = torch.ops.quantized.linear_prepack(qw, bias_float) 2871 w_packed = torch.ops.quantized.linear_prepack(qw, bias_float) 3130 qlinear_prepack = torch.ops.quantized.linear_prepack 3506 packed_ih = torch.ops.quantized.linear_prepack(Wq1, b1) 3507 packed_hh = torch.ops.quantized.linear_prepack(Wq2, b2) 3637 packed_ih = torch.ops.quantized.linear_prepack(Wq1, b1) 3638 packed_hh = torch.ops.quantized.linear_prepack(Wq2, b2) 3776 qlinear_prepack = torch.ops.quantized.linear_prepack 3946 qlinear_prepack = torch.ops.quantized.linear_prepack 4065 qlinear_prepack = torch.ops.quantized.linear_prepack [all …]
|
/aosp_15_r20/external/pytorch/test/ao/sparsity/ |
H A D | test_kernels.py | 57 dense_prepack = torch.ops.quantized.linear_prepack
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_caffe2.py | 61 def linear_prepack(g: jit_utils.GraphContext, weight, bias): function
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/ |
H A D | linear.py | 38 self._packed_params = torch.ops.quantized.linear_prepack(weight, bias)
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/ |
H A D | library.cpp | 189 …m.def(TORCH_SELECTIVE_SCHEMA("quantized::linear_prepack(Tensor W, Tensor? B=None) -> __torch__.tor… in TORCH_LIBRARY() 247 …m.def(TORCH_SELECTIVE_SCHEMA("_quantized::linear_prepack(Tensor W, Tensor? B=None) -> __torch__.to… in TORCH_LIBRARY()
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/ |
H A D | _lower_to_native_backend.py | 347 torch._ops.ops.quantized.linear_prepack, 908 # kwargs of the func node are needed for prepack op (i.e., quantized::linear_prepack)
|
H A D | utils.py | 151 return torch.ops.quantized.linear_prepack
|
/aosp_15_r20/external/pytorch/test/mobile/model_test/ |
H A D | model_ops.yaml | 436 quantized::linear_prepack: 69
|
H A D | coverage.yaml | 1090 quantized::linear_prepack: 29
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/ |
H A D | functional.py | 473 _packed_params = torch.ops.quantized.linear_prepack(weight, bias)
|
/aosp_15_r20/external/pytorch/test/cpp/jit/ |
H A D | test_misc.cpp | 3138 …_torch__.torch.classes.quantized.LinearPackedParamsBase = quantized::linear_prepack(%weight_q, %no… in TEST() 3147 testing::FileCheck().check_not("quantized::linear_prepack")->run(*graph); in TEST()
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/frontend/ |
H A D | ir_emitter.cpp | 3135 // quantized::linear_prepack to quantized::linear_prepack_legacy. We in emitSingleAssignment() 3136 // changed linear_prepack to return a TorchBind class and not a in emitSingleAssignment() 3147 Symbol::fromQualString("quantized::linear_prepack")) { in emitSingleAssignment()
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/ |
H A D | passes.cpp | 1444 %packed_params = quantized::linear_prepack(%weight, %bias) in PrepackWeights()
|