Searched defs:grad_v (Results 1 – 8 of 8) sorted by relevance
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | WeightNorm.cpp | 75 auto grad_v = at::empty_like(saved_v, at::MemoryFormat::Contiguous); in weight_norm_backward_cpu() local 150 auto grad_v = (saved_g/norms)*(grad_w - saved_v*(per_dim_sums/(norms*norms))); in _weight_norm_differentiable_backward() local 156 auto grad_v = (saved_g/norms)*(grad_w - saved_v*(per_dim_sums/(norms*norms))); in _weight_norm_differentiable_backward() local
|
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/ |
H A D | linalg_grad.py | 719 def _EigGrad(op, grad_e, grad_v): argument 773 def _SelfAdjointEigV2Grad(op, grad_e, grad_v): argument 815 def _SvdGrad(op, grad_s, grad_u, grad_v): argument
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cpu/ |
H A D | WeightNormKernel.cpp | 177 TensorBase& grad_v, in weight_norm_backward_first_dim_kernel() 314 TensorBase& grad_v, in weight_norm_backward_last_dim_kernel() 414 TensorBase& grad_v, in weight_norm_backward_kernel()
|
H A D | FlashAttentionKernel.cpp | 423 const at::Tensor& grad_v, in cpu_flash_attention_backward() 779 const at::Tensor& grad_v, in flash_attention_backward_kernel_impl()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/ |
H A D | WeightNorm.cu | 217 (scalar_t* __restrict__ grad_v, in weight_norm_bwd_first_dim_kernel() 278 (scalar_t* __restrict__ grad_v, in weight_norm_bwd_last_dim_kernel()
|
/aosp_15_r20/external/skia/modules/skottie/src/effects/ |
H A D | LinearWipeEffect.cpp | 94 const SkVector grad_v = angle_v * grad_len, in onMakeMask() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/cuda/ |
H A D | attention_backward.cu | 353 at::Tensor grad_q, grad_k, grad_v, grad_bias; in _efficient_attention_backward() local
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/transformers/ |
H A D | attention.cpp | 839 auto grad_v = at::zeros(v_t.sizes(), value.options()); in _scaled_dot_product_flash_attention_cpu_backward() local
|