/aosp_15_r20/external/pytorch/torch/_decomp/ |
H A D | decompositions.py | 106 def _unsqueeze_to_dim(x: Tensor, dim: int) -> Tensor: 115 def tanh_backward(out_grad: Tensor, y: Tensor): 122 def sigmoid_backward(out_grad: Tensor, y: Tensor): 129 def softplus_backward(out_grad: Tensor, x: Tensor, beta: float, threshold: float): 138 grad_output: Tensor, 143 self_or_result: Tensor, 168 def fill_tensor(self, value: Tensor): 179 def hardsigmoid(self: Tensor) -> Tensor: 186 def hardsigmoid_backward(grad_output: Tensor, self: Tensor): 197 grad_output: Tensor, self: Tensor, min_val: float, max_val: float [all …]
|
H A D | decompositions_for_jvp.py | 102 def trace(self: Tensor) -> Tensor: 107 def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]: 118 input: Tensor, rstd: Tensor, inner_dim_indices: List[int], keepdim: bool 133 grad_out: Tensor, 134 input: Tensor, 136 mean: Tensor, 137 rstd: Tensor, 138 weight: Optional[Tensor], 139 bias: Optional[Tensor], 217 grad_out: Tensor, [all …]
|
/aosp_15_r20/external/pytorch/test/inductor/ |
H A D | test_b2b_gemm.py | 21 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 25 def f_32(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 55 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 59 def f_32(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 81 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 84 def f_32(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 106 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 109 def f_32(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 130 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: 151 def f(m1: torch.Tensor, m2: torch.Tensor, m3: torch.Tensor) -> torch.Tensor: [all …]
|
H A D | test_fused_attention.py | 111 query: torch.Tensor, key: torch.Tensor, value: torch.Tensor 139 query: torch.Tensor, key: torch.Tensor, value: torch.Tensor 250 query: torch.Tensor, key: torch.Tensor, value: torch.Tensor 271 query: torch.Tensor, key: torch.Tensor, value: torch.Tensor 286 query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, training: bool 303 query: torch.Tensor, 304 key: torch.Tensor, 305 value: torch.Tensor, 581 query: torch.Tensor, key: torch.Tensor, value: torch.Tensor 598 query: torch.Tensor, [all …]
|
/aosp_15_r20/external/pytorch/torch/nn/ |
H A D | functional.py | 437 input: Tensor, 442 _random_samples: Optional[Tensor] = None, 514 input: Tensor, 519 _random_samples: Optional[Tensor] = None, 549 input: Tensor, 554 _random_samples: Optional[Tensor] = None, 630 input: Tensor, 635 _random_samples: Optional[Tensor] = None, 665 input: Tensor, 718 input: Tensor, [all …]
|
/aosp_15_r20/external/pytorch/torch/_inductor/ |
H A D | decomposition.py | 126 def assert_async_msg_decomp(tensor: torch.Tensor, msg: str) -> None: 132 def functional_assert_async_msg_decomp(tensor: torch.Tensor, msg: str) -> None: 149 x: torch.Tensor, 191 grad_output: torch.Tensor, 192 input: torch.Tensor, 193 weight: torch.Tensor, 223 def round_dec(x: torch.Tensor, decimals: int = 0) -> torch.Tensor: 231 self: torch.Tensor, 232 batch2: torch.Tensor, 254 self: torch.Tensor, [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/fx/ |
H A D | _decomposed.py | 52 input: torch.Tensor, 89 input: torch.Tensor, 114 input: torch.Tensor, 115 scale: torch.Tensor, 116 zero_point: torch.Tensor, 139 input: torch.Tensor, 140 scale: torch.Tensor, 141 zero_point: torch.Tensor, 171 input: torch.Tensor, 172 scale: torch.Tensor, [all …]
|
/aosp_15_r20/external/executorch/backends/cadence/aot/ |
H A D | ops_registrations.py | 264 input: torch.Tensor, 276 input: torch.Tensor, 288 src: torch.Tensor, 289 weight: torch.Tensor, 290 bias: torch.Tensor, 292 weight_zero_point: torch.Tensor, 293 out_multiplier: torch.Tensor, 294 out_shift: torch.Tensor, 296 offset: Optional[torch.Tensor], 310 src: torch.Tensor, [all …]
|
/aosp_15_r20/external/pytorch/torch/distributed/_symmetric_memory/ |
H A D | __init__.py | 137 shard: torch.Tensor, 138 shard_consumer: Callable[[torch.Tensor, int], None], 139 ag_out: torch.Tensor, 195 chunk_producer: Callable[[int, torch.Tensor], None], 196 output: torch.Tensor, 289 A_shard: torch.Tensor, 290 Bs: List[torch.Tensor], 319 def unflatten(t: torch.Tensor) -> torch.Tensor: 333 def shard_consumer(shard: torch.Tensor, rank: int) -> None: 348 A_shard: torch.Tensor, [all …]
|
/aosp_15_r20/external/pytorch/torch/_higher_order_ops/ |
H A D | flex_attention.py | 49 def _permute_strides(out: torch.Tensor, query_strides: Tuple[int, ...]) -> torch.Tensor: 94 query: torch.Tensor, 95 key: torch.Tensor, 96 value: torch.Tensor, 131 query: torch.Tensor, 132 key: torch.Tensor, 133 value: torch.Tensor, 134 out: torch.Tensor, 135 logsumexp: torch.Tensor, 136 grad_out: torch.Tensor, [all …]
|
/aosp_15_r20/external/pytorch/torch/masked/ |
H A D | _ops.py | 400 def _reduction_identity(op_name: str, input: Tensor, *args): 485 def _sparse_coo_flatten_indices(indices: Tensor, shape: tuple): 494 def _any(input: Tensor, dim: tuple, keepdim: bool): 503 def _sparse_coo_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: 617 mask_input: Tensor, 737 mask_input: Tensor, 821 def _sparse_csr_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: 829 def _where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: 865 def _input_mask(input: Union[Tensor, MaskedTensor], *args, **kwargs) -> Tensor: 950 def _output_mask(op, input: Tensor, *args, **kwargs) -> Tensor: [all …]
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/experimental/ |
H A D | _attention.py | 69 def _maybe_wait(tensor: torch.Tensor) -> torch.Tensor: 89 def _merge_one(self, block_out: torch.Tensor, block_lse: torch.Tensor) -> None: 103 def step(self, out: torch.Tensor, lse: torch.Tensor) -> None: 122 query: torch.Tensor, 123 key: torch.Tensor, 124 value: torch.Tensor, 148 query: torch.Tensor, 149 key: torch.Tensor, 150 value: torch.Tensor, 151 attn_bias: Optional[torch.Tensor] = None, [all …]
|
/aosp_15_r20/external/pytorch/torch/nn/modules/ |
H A D | transformer.py | 47 def _get_seq_len(src: Tensor, batch_first: bool) -> Optional[int]: 107 activation: Union[str, Callable[[Tensor], Tensor]] = F.relu, argument 174 src: Tensor, 175 tgt: Tensor, 176 src_mask: Optional[Tensor] = None, 177 tgt_mask: Optional[Tensor] = None, 178 memory_mask: Optional[Tensor] = None, 179 src_key_padding_mask: Optional[Tensor] = None, 180 tgt_key_padding_mask: Optional[Tensor] = None, 181 memory_key_padding_mask: Optional[Tensor] = None, [all …]
|
H A D | loss.py | 52 weight: Optional[Tensor] = None, 127 def forward(self, input: Tensor, target: Tensor) -> Tensor: 241 weight: Optional[Tensor] = None, 250 def forward(self, input: Tensor, target: Tensor) -> Tensor: 269 weight: Optional[Tensor] = None, 353 def forward(self, log_input: Tensor, target: Tensor) -> Tensor: 441 def forward(self, input: Tensor, target: Tensor, var: Tensor) -> Tensor: 540 def forward(self, input: Tensor, target: Tensor) -> Tensor: 607 def forward(self, input: Tensor, target: Tensor) -> Tensor: 689 weight: Optional[Tensor] = None, [all …]
|
/aosp_15_r20/external/pytorch/test/jit/ |
H A D | test_module_interface.py | 26 def one(self, inp1: Tensor, inp2: Tensor) -> Tensor: 29 def two(self, input: Tensor) -> Tensor: 32 def forward(self, input: Tensor) -> Tensor: 37 def one(self, inp1: Tensor, inp2: Tensor) -> Tensor: 40 def forward(self, input: Tensor) -> Tensor: 48 def one(self, inp1: Tensor, inp2: Tensor) -> Tensor: 58 def forward(self, input: Tensor) -> Tensor: 69 def one(self, x: Tensor, y: Tensor) -> Tensor: 72 def two(self, x: Tensor) -> Tensor: 75 def forward(self, x: Tensor) -> Tensor: [all …]
|
H A D | test_await.py | 34 def fn(x: Tensor): 49 def fn(x: Tensor): 65 def __init__(self, a: Tensor, b: Tensor): 72 def fn(x: Tensor): 89 def __init__(self, a: Tensor, b: Tensor): 101 def fn(x: Tensor): 120 def __init__(self, a: Tensor, b: Tensor): 131 def fn(x: Tensor): 149 def __init__(self, a: Tensor, b: Tensor): 163 def fn(x: Tensor): [all …]
|
/aosp_15_r20/external/pytorch/torch/optim/ |
H A D | adamw.py | 36 lr: Union[float, Tensor] = 1e-3, argument 318 params: List[Tensor], 319 grads: List[Tensor], 320 exp_avgs: List[Tensor], 321 exp_avg_sqs: List[Tensor], 322 max_exp_avg_sqs: List[Tensor], 323 state_steps: List[Tensor], 324 grad_scale: Optional[Tensor], 325 found_inf: Optional[Tensor], 330 lr: Union[Tensor, float], [all …]
|
H A D | adam.py | 36 lr: Union[float, Tensor] = 1e-3, argument 321 params: List[Tensor], 322 grads: List[Tensor], 323 exp_avgs: List[Tensor], 324 exp_avg_sqs: List[Tensor], 325 max_exp_avg_sqs: List[Tensor], 326 state_steps: List[Tensor], 327 grad_scale: Optional[Tensor], 328 found_inf: Optional[Tensor], 440 params: List[Tensor], [all …]
|
/aosp_15_r20/external/pytorch/torch/_inductor/fx_passes/ |
H A D | pad_mm.py | 63 def get_alignment_size(x: Tensor) -> int: 76 def check_device(a: Tensor, b: Tensor) -> bool: 80 def check_dtype(a: Tensor, b: Tensor) -> bool: 85 mat1: Tensor, mat2: Tensor, input: Optional[Tensor] = None 89 def valid_shape_and_stride(t: Optional[Tensor]) -> bool: 131 def pad_dim(x: Tensor, padded_length: int, dim: int) -> Tensor: 139 input: Tensor, mat1: Tensor, mat2: Tensor, beta: float, alpha: float 152 input: Optional[Tensor], 153 mat1: Tensor, 154 mat2: Tensor, [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/dynamic/modules/ |
H A D | rnn.py | 29 def _apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: 37 def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: 239 def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None: 251 self, input: Tensor, batch_sizes: Optional[Tensor] 267 hx: Tensor, 275 self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor] 283 def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]) -> Tensor: 536 input: Tensor, 537 hx: Optional[Tuple[Tensor, Tensor]], argument 538 batch_sizes: Optional[Tensor], [all …]
|
/aosp_15_r20/external/pytorch/torch/ |
H A D | _meta_registrations.py | 420 input: Tensor, 421 weight: Tensor, 422 _meta: Tensor, 423 bias: Optional[Tensor] = None, 454 mat1: Tensor, 455 mat1_meta: Tensor, 456 mat2: Tensor, 479 input: Tensor, 480 mat1: Tensor, 481 mat1_meta: Tensor, [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/nn/quantized/modules/ |
H A D | functional_modules.py | 51 def add(self, x: Tensor, y: Tensor) -> Tensor: 58 def add_scalar(self, x: Tensor, y: float) -> Tensor: 66 def mul(self, x: Tensor, y: Tensor) -> Tensor: 73 def mul_scalar(self, x: Tensor, y: float) -> Tensor: 81 def cat(self, x: List[Tensor], dim: int = 0) -> Tensor: 88 def add_relu(self, x: Tensor, y: Tensor) -> Tensor: 96 def matmul(self, x: Tensor, y: Tensor) -> Tensor: 123 def add(self, x: Tensor, y: Tensor) -> Tensor: 129 def add_scalar(self, x: Tensor, y: float) -> Tensor: 135 def mul(self, x: Tensor, y: Tensor) -> Tensor: [all …]
|
/aosp_15_r20/external/pytorch/torch/nn/attention/ |
H A D | flex_attention.py | 123 score: Tensor, 124 batch: Tensor, 125 head: Tensor, 126 token_q: Tensor, 127 token_kv: Tensor, 133 batch: Tensor, 134 head: Tensor, 135 token_q: Tensor, 136 token_kv: Tensor, 146 def _ordered_to_dense(num_blocks_in_row: Tensor, col_indices: Tensor): [all …]
|
/aosp_15_r20/external/pytorch/torch/ao/quantization/pt2e/ |
H A D | qat_utils.py | 88 x: torch.Tensor, 89 conv_weight: torch.Tensor, 90 conv_bias: torch.Tensor, 91 bn_weight: torch.Tensor, 92 bn_bias: torch.Tensor, 93 bn_running_mean: torch.Tensor, 94 bn_running_var: torch.Tensor, 108 x: torch.Tensor, 109 conv_weight: torch.Tensor, 110 conv_bias: torch.Tensor, [all …]
|
/aosp_15_r20/external/executorch/examples/qualcomm/oss_scripts/llama2/model/ |
H A D | static_llama.py | 19 def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: 34 x: torch.Tensor, freqs_cos: torch.Tensor, freqs_sin: torch.Tensor 102 hidden_states: torch.Tensor, 103 freqs_cos: torch.Tensor, 104 freqs_sin: torch.Tensor, 105 atten_mask: torch.Tensor, 106 k_caches: List[torch.Tensor], 107 v_caches: List[torch.Tensor], 138 hidden_states: torch.Tensor, 139 freqs_cos: torch.Tensor, [all …]
|