/aosp_15_r20/external/tensorflow/tensorflow/python/ops/ |
H A D | embedding_ops.py | 37 def _clip(params, ids, max_norm): argument 94 max_norm=None, argument 269 max_norm=None): argument 341 def embedding_lookup_v2(params, ids, max_norm=None, name=None): argument 413 max_norm=None): argument 597 max_norm=None, argument 689 max_norm=None, argument 793 max_norm=None): argument 957 max_norm=None, argument
|
H A D | clip_ops_test.py | 33 def _testClipTensorByNorm(self, inputs, max_norm, expected): argument 40 def _testClipTensorByGlobalNorm(self, inputs, max_norm, expected): argument 45 def _testNonFiniteClippingByGlobalNorm(self, inputs, max_norm): argument 50 def _testClipIndexedSlicesByNorm(self, values, indices, shape, max_norm, argument
|
/aosp_15_r20/external/pytorch/torch/csrc/api/include/torch/nn/functional/ |
H A D | embedding.h | 18 float max_norm, in _no_grad_embedding_renorm_() 28 std::optional<double> max_norm, in embedding() 93 std::optional<double> max_norm, in embedding_bag()
|
/aosp_15_r20/external/tensorflow/tensorflow/python/tpu/ |
H A D | feature_column.py | 368 max_norm=None, argument 398 max_norm=None, argument 515 max_norm=None, argument 541 max_norm=None, argument
|
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/ |
H A D | embedding.py | 187 input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg argument 235 input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, rank, pg argument
|
H A D | embedding_bag.py | 247 max_norm, argument 320 max_norm, argument
|
H A D | _common.py | 221 def _handle_max_norm_col_wise( argument
|
/aosp_15_r20/external/pytorch/torch/ao/nn/qat/modules/ |
H A D | embedding_ops.py | 33 max_norm=None, argument 146 max_norm=None, argument
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | Embedding.cu | 198 scalar_t* weights, index_t* indices, accscalar_t max_norm, in renorm_kernel() 348 double max_norm, double norm_type) { in embedding_renorm_cuda_()
|
/aosp_15_r20/external/tensorflow/tensorflow/python/feature_column/ |
H A D | feature_column_v2.py | 521 max_norm=None, argument 636 max_norm=None, argument 820 max_norm=None, argument 2903 max_norm, argument 3185 def __call__(self, categorical_column, combiner, max_norm): argument 3230 max_norm, argument
|
H A D | feature_column.py | 824 max_norm=None, argument 2488 max_norm, argument
|
/aosp_15_r20/external/pytorch/test/distributed/_shard/sharded_tensor/ops/ |
H A D | test_embedding.py | 37 max_norm=None, argument
|
H A D | test_embedding_bag.py | 40 max_norm=None, argument
|
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/ |
H A D | constraints.py | 310 max_norm = MaxNorm variable
|
/aosp_15_r20/external/pytorch/torch/nn/modules/ |
H A D | sparse.py | 220 max_norm=None, argument
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | Embedding.cpp | 182 Tensor & self, const Tensor & indices, double max_norm, double norm_type) { in embedding_renorm_cpu_()
|
/aosp_15_r20/external/pytorch/torch/csrc/jit/runtime/ |
H A D | register_special_ops.cpp | 335 double max_norm = 0; in __anonedd36e380402() local
|
/aosp_15_r20/external/tensorflow/tensorflow/python/distribute/ |
H A D | sharded_variable.py | 903 max_norm=None): argument
|
/aosp_15_r20/external/pytorch/test/nn/ |
H A D | test_embedding.py | 1210 max_norm=None, argument
|
/aosp_15_r20/external/pytorch/test/cpp/api/ |
H A D | nn_utils.cpp | 23 float max_norm = 2; in TEST_F() local
|
/aosp_15_r20/external/pytorch/torch/onnx/ |
H A D | symbolic_opset11.py | 1231 def embedding_renorm(g: jit_utils.GraphContext, weight, indices, max_norm, norm_type): argument
|
/aosp_15_r20/external/pytorch/test/ |
H A D | test_linalg.py | 4982 def renorm(matrix, value, dim, max_norm): argument
|
H A D | test_jit.py | 10094 def embedding_norm(input, embedding_matrix, max_norm): argument 10098 def embedding_norm_script(input, embedding_matrix, max_norm): argument
|