Home
last modified time | relevance | path

Searched full:embedding_bag (Results 1 – 25 of 65) sorted by relevance

123

/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DEmbeddingBag.cpp143 "embedding_bag: Expected idx >= 0 && idx < num_embeddings but found idx to be ", in index_select_add()
337 "embedding_bag: Expected idx >= 0 && idx < num_embeddings but found idx to be ", in index_select_add()
473 "embedding_bag: Expected idx >= 0 && idx < num_embeddings but found idx to be ", in index_select_add()
534 "embedding_bag: Expected idx >= 0 && idx < num_embeddings but found idx to be ", in index_select_scale_add()
718 "embedding_bag: Expected idx >= 0 && idx < num_embeddings but found idx to be ", in index_select_scale_add()
851 "embedding_bag: Expected idx >= 0 && idx < num_embeddings but found idx to be ", in index_select_scale_add()
878 checkScalarTypes("embedding_bag", indices_arg, {kLong, kInt}); in check_arguments()
880 checkScalarTypes("embedding_bag", offsets_arg, {kLong, kInt}); in check_arguments()
881 checkSameType("embedding_bag", indices_arg, offsets_arg); in check_arguments()
884 "embedding_bag", weight_arg, {kHalf, kBFloat16, kFloat, kDouble}); in check_arguments()
[all …]
/aosp_15_r20/external/pytorch/torch/ao/nn/qat/modules/
H A Dembedding_ops.py110 embedding_bag = torch.nn.Embedding(
120 embedding_bag.weight = torch.nn.Parameter(self.weight.detach())
121 embedding_bag.train(self.training)
122 return embedding_bag
181 return F.embedding_bag(
234 embedding_bag = torch.nn.EmbeddingBag(
246 embedding_bag.weight = torch.nn.Parameter(self.weight.detach())
247 embedding_bag.train(self.training)
248 return embedding_bag
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/
H A Dembedding.h89 inline Tensor embedding_bag( in embedding_bag() function
107 "embedding_bag: If per_sample_weights (", in embedding_bag()
159 "embedding_bag: per_sample_weights was not null. ", in embedding_bag()
164 return std::get<0>(torch::embedding_bag( in embedding_bag()
179 /// https://pytorch.org/docs/main/nn.functional.html#torch.nn.functional.embedding_bag
188 /// F::embedding_bag(input, weight,
191 inline Tensor embedding_bag(
195 return detail::embedding_bag(
/aosp_15_r20/external/pytorch/test/nn/
H A Dtest_embedding.py202 res_F = F.embedding_bag(a, embeddings)
208 res_F = F.embedding_bag(a, embeddings, padding_idx=2)
225 F.embedding_bag(a, embeddings, padding_idx=padding_idx)
231 F.embedding_bag(a, embeddings, padding_idx=padding_idx)
482 # Check correctness of torch.nn.functional.embedding_bag forward and
522 # embedding_bag requires first entry of offsets to be 0
601 bag = torch.nn.functional.embedding_bag(
611 bag_check = torch.nn.functional.embedding_bag(
635 # Check correctness of torch.nn.functional.embedding_bag forward and
642 # Use a Python implementation of embedding_bag with padding_idx support
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/jit/passes/quantization/
H A Dhelper.cpp32 "embedding_bag",
52 "embedding_bag",
64 "embedding_bag",
67 "embedding_bag",
280 // ate::embedding_bag(%weight, %input, %offsets, %scale_grad_by_freq, in isWeight()
289 {"embedding_bag", 0}}), in isWeight()
290 // embedding_bag - prim::CallFunction(%func, %input.1, %weight, in isWeight()
293 CallFuncArgs({{"linear", 2}, {"embedding_bag", 2}})); in isWeight()
314 AtenFuncArgs({{"embedding_bag", 2}, {"embedding_bag", 6}}), in isEmbeddingBagNonInput()
H A Dinsert_quant_dequant.cpp343 // embedding_bag operator. in insertEmbeddingBagOps()
345 if (matchCallFuncToUse(use, "embedding_bag", 2) || in insertEmbeddingBagOps()
346 matchAtenFuncToUse(use, "embedding_bag", 0)) { in insertEmbeddingBagOps()
360 embedding_bag_float_op->kind() == Symbol::aten("embedding_bag"); in insertEmbeddingBagOps()
370 "Expecting FP aten::embedding_bag operator to have 9 inputs"); in insertEmbeddingBagOps()
383 "Expecting F.embedding_bag operator to have 12 inputs"); in insertEmbeddingBagOps()
399 "Expected aten::embedding_bag padding_idx input to be None"); in insertEmbeddingBagOps()
411 "Expected aten::embedding_bag to only have use for its first output."); in insertEmbeddingBagOps()
445 // Temporary solution to quantize embedding_bag operators. Will be re-written in insertQuantizationOps()
446 // once we support quantization of embedding_bag weights. in insertQuantizationOps()
/aosp_15_r20/external/pytorch/torch/onnx/
H A Dsymbolic_opset10.py38 "embedding_bag",
588 @_onnx_symbolic("aten::embedding_bag")
590 def embedding_bag( function
604 "embedding_bag with scale_grad_by_freq for training mode"
607 raise RuntimeError("embedding_bag with padding_idx")
610 "Export of embedding_bag with dynamic input/offsets shape is not supported in opset 10. "
664 … # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices.
665 … the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag.
669 "embedding_bag with unknown shape of offsets for opset 10 is not supported. "
H A Dsymbolic_opset18.py227 @_onnx_symbolic("aten::embedding_bag")
229 def embedding_bag( function
H A Dsymbolic_helper.py1954 "embedding_bag with scale_grad_by_freq for training mode"
1957 raise RuntimeError("embedding_bag with padding_idx")
2041 # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices.
2042 … the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag.
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/
H A Dembedding_bag.py21 @custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding_bag)
24 Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding_bag``.
296 torch.nn.functional.embedding_bag,
385 torch.nn.functional.embedding_bag(
396 result = torch.nn.functional.embedding_bag(
/aosp_15_r20/external/pytorch/test/distributed/_shard/sharded_tensor/ops/
H A Dtest_embedding_bag.py135 # Validate for torch.nn.functional.embedding_bag version.
136 local_output = torch.nn.functional.embedding_bag(
147 sharded_output = torch.nn.functional.embedding_bag(
/aosp_15_r20/external/pytorch/benchmarks/static_runtime/
H A Dtest_static_runtime.cc479 x, y, z, _ = torch.embedding_bag(a, b, c) in TEST()
485 x, y, z, _ = torch.embedding_bag(a, b, c, False, 1) in TEST()
491 x, y, z, _ = torch.embedding_bag(a, b, c, False, 2) in TEST()
497 x, y, z, _ = torch.embedding_bag(a, b, c, False, 0, False, None, True) in TEST()
503 x, y, z, _ = torch.embedding_bag(a, b, c, False, 1, False, None, True) in TEST()
509 x, y, z, _ = torch.embedding_bag(a, b, c, False, 2, False, None, True) in TEST()
539 # The outputs of embedding_bag become an intermediate tensors in TEST()
541 x, y, z, _ = torch.embedding_bag(a, b, c) in TEST()
567 …%y0 : Tensor, %y1 : Tensor, %y2 : Tensor, %y3 : Tensor = aten::embedding_bag(%weight, %indices, %o… in TEST()
575 .check("static_runtime::embedding_bag") in TEST()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DEmbeddingBag.cu308 // See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details
331 // See NOTE [ embedding_bag Native Functions ] in native_functions.yaml for details
441 // Also see NOTE [ embedding_bag Native Functions ] in native_functions.yaml in _embedding_bag_dense_backward_cuda()
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/static/
H A Dpasses.cpp452 …"static_runtime::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_f… in TORCH_LIBRARY_FRAGMENT()
455 …"static_runtime::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool sca… in TORCH_LIBRARY_FRAGMENT()
1307 …%y0 : Tensor, %y1 : Tensor, %y2 : Tensor, %y3 : Tensor = aten::embedding_bag(%weight, %indices, %o… in RemoveUnnecessaryEmbeddingBagOutputs()
1311 …%y0 : Tensor, %y1 : Tensor, %y2 : Tensor = static_runtime::embedding_bag(%weight, %indices, %offse… in RemoveUnnecessaryEmbeddingBagOutputs()
1319 …%y0 : Tensor, %y1 : Tensor, %y2 : Tensor, %y3 : Tensor = aten::embedding_bag(%weight, %indices, %o… in RemoveUnnecessaryEmbeddingBagOutputs()
1323 …%y0 : Tensor, %y1 : Tensor, %y2 : Tensor = static_runtime::embedding_bag(%weight, %indices, %offse… in RemoveUnnecessaryEmbeddingBagOutputs()
/aosp_15_r20/external/pytorch/test/functorch/
H A Dtest_vmap_registrations.py67 "aten::embedding_bag",
68 "aten::embedding_bag.padding_idx",
/aosp_15_r20/external/pytorch/torch/nn/
H A Dfunctional.py2554 def embedding_bag( function
2634 >>> F.embedding_bag(input, embedding_matrix, offsets)
2642 >>> F.embedding_bag(input, embedding_matrix, offsets, padding_idx=2, mode='sum')
2648 embedding_bag,
2663 # Used to be embedding_bag(weight, input, ...)
2664 # Now is embedding_bag(input, weight, ...)
2667 "Argument order of nn.functional.embedding_bag was changed. "
2668 "Usage `embedding_bag(weight, input, ...)` is deprecated, "
2669 "and should now be `embedding_bag(input, weight, ...)`."
2675 f"embedding_bag: If per_sample_weights ({per_sample_weights.shape}) is not None, "
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cpu/
H A Dqembeddingbag_unpack.cpp99 "We currently only support 8-bit and 4-bit quantization of embedding_bag."); in unpack()
282 // Unpack the packed embedding_bag weights using TorchBind custom class. in TORCH_LIBRARY_IMPL()
H A Dqembeddingbag.cpp572 // For embedding_bag operator with 2D indices, we set the offsets explicitly in embedding_bag_byte_helper()
675 // For embedding_bag operator with 2D indices, we need to set the offsets in _embedding_bag_nbit_helper()
1069 "Currently only support 8-bit embedding_bag quantization"); in run()
H A Dqembeddingbag_prepack.cpp29 * Prepack function for embedding_bag weights.
69 "Expect embedding_bag weights to be quantized using kPerChannelAffineFloatQParams"); in prepack()
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/options/
H A Dembedding.h194 /// Options for `torch::nn::functional::embedding_bag`.
199 /// F::embedding_bag(input, weight,
/aosp_15_r20/external/pytorch/functorch/dim/
H A DREADME.md35 def embedding_bag(input: torch.Tensor, embedding_weights: torch.Tensor):
518 def embedding_bag(input, embedding_weights):
525 embedding_bag(input, W)
/aosp_15_r20/external/pytorch/aten/src/ATen/native/quantized/cuda/
H A DEmbeddingBag.cu285 // For embedding_bag operator with 2D indices, we set the offsets explicitly in embedding_bag_byte_rowwise_offsets()
467 // For embedding_bag operator with 2D indices, we need to set the offsets in embedding_bag_4bit_rowwise_offsets()
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharded_tensor/_ops/
H A D__init__.py8 from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag import (
/aosp_15_r20/external/pytorch/torch/_functorch/
H A Dtop_operators_github_usage.py330 ("nn.functional.embedding_bag", 122),
478 ("nn.EmbeddingBag", 2344, "nn.functional.embedding_bag"),
/aosp_15_r20/external/pytorch/docs/source/
H A Dnn.functional.rst149 embedding_bag

123