Home
last modified time | relevance | path

Searched full:symint (Results 1 – 25 of 573) sorted by relevance

12345678910>>...23

/aosp_15_r20/external/pytorch/c10/core/
H A DSymInt.h21 // SymInt represents either a regular int64_t, or a symbolic integer
22 // (represented in a type erased way as SymNode). The intention is for SymInt
27 // SymInt has an API equivalent to int64_t. In particular, it is a value type.
28 // Internally, SymInt is represented in a clever packed way, so that it only
35 class C10_API SymInt {
41 /*implicit*/ SymInt(int64_t d) : data_(d) { in SymInt() function
47 SymInt() : data_(0) {} in SymInt() function
48 SymInt(SymNode n);
51 // One appropriate use for this is when you are constructing a symint
54 SymInt(Unchecked, int64_t d) : data_(d) {} in SymInt() function
[all …]
H A DSymInt.cpp3 #include <c10/core/SymInt.h>
13 // SymInt has temporarily violated invariants
14 // Postcondition: invariants on SymInt are fixed
15 void SymInt::promote_to_negative() { in promote_to_negative()
17 SymInt(SymNode(c10::make_intrusive<ConstantSymNodeImpl<int64_t>>(data_))); in promote_to_negative()
23 SymNode SymInt::toSymNode() const { in toSymNode()
25 is_heap_allocated(), "SymInt::toSymNode is_heap_allocated"); in toSymNode()
29 SymInt::SymInt(SymNode sin_sp) { in SymInt() function in c10::SymInt
31 sin_sp->is_int(), "SymInt::SymInt sin_sp->is_int()"); in SymInt()
38 bool SymInt::has_hint() const { in has_hint()
[all …]
/aosp_15_r20/external/pytorch/torchgen/api/types/
H A Dsignatures.py38 # Is this a symint C++ signature. For BC reasons, functions that take
39 # SymInts still present as int64_t in C++, and the SymInt variant is
42 # NB: If a function RETURNS a SymInt, this is ALWAYS false
43 symint: bool
61 symint=self.symint,
70 symint_overload=False if suppress_symint_suffix else self.symint,
86 self.func.returns, symint=self.symint
106 self.func.returns, symint=self.symint
118 …return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_types_str}…
123 … return f"{cpp.returns_type(self.func.returns, symint=self.symint).cpp_type()} ({args_types_str})"
[all …]
/aosp_15_r20/external/pytorch/torchgen/api/
H A Dcpp.py99 symint: bool = False,
104 elif str(t) == "SymInt":
105 if symint:
117 elem = valuetype_type(t.elem, binds=binds, mutable=mutable, symint=symint)
141 symint: bool = False,
148 symint=symint,
178 elif isinstance(t.elem, ListType) and str(t.elem.elem) == "SymInt":
179 if symint:
183 elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
192 if str(t.elem) == "SymInt":
[all …]
H A Dlazy.py71 typ: Type, properties: LazyIrProperties, *, symint: bool
101 elif typ.name == BaseTy.SymInt:
102 if symint:
123 return OptionalCType(process_ir_type(typ.elem, properties, symint=symint))
131 elif typ.elem == BaseType(BaseTy.SymInt):
133 # the problem with tensorListValueT: if you have SymInt[] you
142 return VectorCType(process_ir_type(typ.elem, properties, symint=symint))
175 return isinstance(typ, BaseType) and typ.name == BaseTy.SymInt
210 # TODO: this is lies, it is false for symint list
213 # Whether or not we are treating this as symint or not
[all …]
H A Dnative.py43 # NB: this is symint aware, you will get the non-SymInt variant for some
44 # dispatch entries and SymInt for others.
58 t: Type, *, mutable: bool, binds: ArgName, symint: bool
74 return cpp.argumenttype_type(t, mutable=mutable, binds=binds, symint=symint)
77 def returns_type(rs: Sequence[Return], *, symint: bool) -> CType:
78 return cpp.returns_type(rs, symint=symint)
81 def argument_type(a: Argument, *, binds: ArgName, symint: bool) -> NamedCType:
82 return argumenttype_type(a.type, mutable=a.is_write, binds=binds, symint=symint)
89 symint: bool,
100 default = cpp.default_expr(a.default, a.type, symint=symint)
[all …]
H A Ddispatcher.py45 symint: bool = True,
55 symint=symint,
65 symint: bool = True,
72 symint=symint,
76 def returns_type(rs: Sequence[Return], *, symint: bool = True) -> CType:
78 return cpp.returns_type(rs, symint=symint)
105 a: Argument, *, remove_non_owning_ref_types: bool = False, symint: bool = True
112 symint=symint,
119 def arguments(func: FunctionSchema, *, symint: bool = True) -> list[Binding]:
120 return [argument(a, symint=symint) for a in jit_arguments(func)]
H A Dpython.py221 def argument_str(self, *, method: bool = False, symint: bool = True) -> str:
223 argument_type_str(self.type, symint=symint)
395 def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
398 a.argument_str(method=self.method, symint=symint) for a in args
437 and str(vararg_type.elem) in ["int", "SymInt"]
479 def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
482 self, skip_outputs=skip_outputs, symint=symint
652 t: Type, *, simple_type: bool = False, symint: bool = True
677 BaseTy.SymInt,
686 elem = argument_type_str(t.elem, simple_type=simple_type, symint=symint)
[all …]
/aosp_15_r20/external/pytorch/tools/autograd/
H A Dderivatives.yaml279 - name: affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor
338 - name: as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> …
342 - name: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -…
567 - name: diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2)…
655 # TODO: this derivative is not SymInt safe, need sum_to support
656 - name: expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)
1076 - name: masked_scatter_backward(Tensor grad_output, Tensor mask, SymInt[] sizes) -> Tensor
1224 - name: native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, fl…
1228 - name: native_layer_norm_backward(Tensor grad_out, Tensor input, SymInt[] normalized_shape, Tensor…
1234 - name: native_group_norm(Tensor input, Tensor? weight, Tensor? bias, SymInt N, SymInt C, SymInt Hx…
[all …]
H A Dgen_python_functions.py268 symint: bool = True,
284 symint=symint,
298 symint=symint,
308 symint=symint,
318 symint=symint,
328 symint=symint,
347 symint=symint,
357 symint=symint,
406 symint: bool = True,
419 method_impl(name, module, overloads, method=method, symint=symint)
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A Dnative_functions.yaml190 - func: _assert_tensor_metadata(Tensor a, SymInt[]? size=None, SymInt[]? stride=None, ScalarType? d…
242 …_weight(Tensor[] weight_arr, int weight_stride0, SymInt input_size, int mode, SymInt hidden_size,
247 …cx, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropou…
256 …cy, int mode, SymInt hidden_size, SymInt proj_size, int num_layers, bool batch_first, float dropou…
663 - func: affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor
669 - func: affine_grid_generator_backward(Tensor grad, SymInt[] size, bool align_corners) -> Tensor
931 - func: as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> …
942 - func: as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -…
1364 - func: broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)
1469 - func: tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]
[all …]
H A DConvolution.cpp97 auto w = at::symint::size<T>(input, 3); // same as h in check_cudnn_depthwise_workload()
98 auto ch = at::symint::size<T>(input, 1); in check_cudnn_depthwise_workload()
99 auto bs = at::symint::size<T>(input, 0); in check_cudnn_depthwise_workload()
221 if(at::symint::size<T>(input, 2) == 1 && stride == 1){ in check_cudnn_depthwise_workload_with_filter()
227 if (at::symint::size<T>(weight, 2) != at::symint::size<T>(weight, 3)) return false; in check_cudnn_depthwise_workload_with_filter()
228 auto filter = at::symint::size<T>(weight, 3); in check_cudnn_depthwise_workload_with_filter()
232 if (at::symint::size<T>(input, 3) < 7) return false; // min width 7 in check_cudnn_depthwise_workload_with_filter()
233 auto w = at::symint::size<T>(input, 3); in check_cudnn_depthwise_workload_with_filter()
238 auto ch = at::symint::size<T>(input, 1); in check_cudnn_depthwise_workload_with_filter()
239 auto bs = at::symint::size<T>(input, 0); in check_cudnn_depthwise_workload_with_filter()
[all …]
/aosp_15_r20/external/pytorch/test/custom_operator/
H A Dtest_infer_schema_annotation.py34 self.assertEqual(result, "(SymInt x) -> SymInt")
46 self.assertEqual(result, "(str x) -> SymInt")
65 self.assertEqual(result, "(ScalarType x) -> SymInt")
71 self.assertEqual(result, "(Device x) -> SymInt")
78 self.assertEqual(result, "(SymInt? x) -> SymInt")
84 self.assertEqual(result, "(SymInt[] x) -> SymInt")
90 self.assertEqual(result, "(SymInt[] x) -> SymInt")
96 self.assertEqual(result, "(SymInt[]? x) -> SymInt")
102 self.assertEqual(result, "(SymInt[]? x) -> SymInt")
139 self.assertEqual(result, "(SymInt[] x) -> Scalar")
[all …]
/aosp_15_r20/external/executorch/docs/source/
H A Dcompiler-backend-dialect.md136 * `executorch_prims::add.int(SymInt a, SymInt b) -> SymInt`
139 * `executorch_prims::mul.int(SymInt a, SymInt b) -> SymInt`
142 * `executorch_prims::sub.int(SymInt a, SymInt b) -> SymInt`
145 * `executorch_prims::floordiv.int(SymInt a, SymInt b) -> SymInt`
154 * `executorch_prims::gt.int(SymInt a, SymInt b) -> bool`
157 * `executorch_prims::lt.int(SymInt a, SymInt b) -> bool`
160 * `executorch_prims::ge.int(SymInt a, SymInt b) -> bool`
163 * `executorch_prims::le.int(SymInt a, SymInt b) -> bool`
166 * `executorch_prims::eq.int(SymInt a, SymInt b) -> bool`
169 * `executorch_prims::mod.Scalar(SymInt a, SymInt b) -> SymInt`
/aosp_15_r20/external/pytorch/torch/csrc/autograd/
H A Dpython_variable_indexing.h3 #include <c10/core/SymInt.h>
12 c10::SymInt start;
13 c10::SymInt stop;
14 c10::SymInt step;
22 c10::SymInt start_sym, stop_sym, step_sym; in __PySlice_Unpack()
25 if (val < c10::SymInt::min_representable_int()) { in __PySlice_Unpack()
35 return (Py_ssize_t)(c10::SymInt::min_representable_int()); in __PySlice_Unpack()
41 step_sym = c10::SymInt(1); in __PySlice_Unpack()
44 step_sym = py::handle(r->step).cast<c10::SymInt>(); in __PySlice_Unpack()
55 step_sym = c10::SymInt(step); in __PySlice_Unpack()
[all …]
H A DFunctionsManual.h190 std::vector<c10::SymInt> reverse_list_symint(const c10::SymIntArrayRef list);
267 const std::vector<std::vector<c10::SymInt>>& sizes,
399 c10::SymInt numel,
455 const c10::SymInt& split_size,
591 const c10::SymInt& padding_idx);
628 std::optional<c10::SymInt> start,
629 std::optional<c10::SymInt> end,
630 c10::SymInt step);
742 const c10::SymInt& last_dim_size);
770 c10::SymInt N,
[all …]
/aosp_15_r20/external/executorch/backends/cadence/aot/
H A Dops_registrations.py54 …Tensor weight, Tensor bias, SymInt src_zero_point, SymInt weight_zero_point, SymInt out_multiplier…
57 "quantized_linear.per_tensor(Tensor src, Tensor weight, Tensor bias, SymInt src_zero_point, "
58 …"SymInt weight_zero_point, SymInt out_multiplier, SymInt out_shift, SymInt out_zero_point, Tensor?…
69 …"quantized_conv(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[] di…
72 …"quantized_conv.out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, int[…
75 …"quantized_conv.per_tensor(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] paddin…
78 …"quantized_conv.per_tensor_out(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] pa…
89 "convolution(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, "
93 …"transposed_convolution(Tensor input, Tensor weight, Tensor bias, int[] stride, SymInt[] padding, "
94 "int[] dilation, SymInt[] output_padding, int groups, bool channel_last=False) -> (Tensor Y)"
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DTensorIndexing.h7 #include <c10/core/SymInt.h>
27 constexpr int64_t INDEX_MIN = c10::SymInt::min_representable_int();
30 enum class TensorIndexType { None, Ellipsis, SymInt, Boolean, Slice, Tensor }; enumerator
42 std::optional<c10::SymInt> start_index = std::nullopt,
43 std::optional<c10::SymInt> stop_index = std::nullopt,
44 std::optional<c10::SymInt> step_index = std::nullopt) {
46 step_ = c10::SymInt(1);
56 start_ = c10::SymInt(step_ < 0 ? INDEX_MAX : 0);
62 stop_ = c10::SymInt(step_ < 0 ? INDEX_MIN : INDEX_MAX);
68 inline c10::SymInt start() const { in start()
[all …]
H A DTensorGeometry.h24 c10::SymInt expected_stride = 1; in TensorGeometry()
74 c10::SymInt sym_size(int64_t dim) const { in sym_size()
81 c10::SymInt sym_stride(int64_t dim) const { in sym_stride()
88 c10::SymInt sym_storage_offset() const { in sym_storage_offset()
91 c10::SymInt sym_numel() const { in sym_numel()
116 std::vector<c10::SymInt>& mutable_sizes() { in mutable_sizes()
119 std::vector<c10::SymInt>& mutable_strides() { in mutable_strides()
122 c10::SymInt& mutable_storage_offset() { in mutable_storage_offset()
127 c10::SymInt numel = 1; in recompute()
137 std::vector<c10::SymInt> sizes_;
[all …]
/aosp_15_r20/external/executorch/exir/
H A Dsym_util.py17 def eval_expr(symint: Union[int, torch.SymInt]) -> Optional[int]: argument
19 Evaluate a symint to int. Returns None if symint's symoblic expr
22 if isinstance(symint, int):
23 return symint
24 node = symint.node
34 def eval_upper_bound(maybe_symint: Union[int, torch.SymInt]) -> int: argument
36 Evaluate a symint to its uppper bound value. Returns None if symint's symoblic expr's
69 def eval_shape(shape: Iterable[Union[int, torch.SymInt]]): # pyre-ignore[3] argument
80 def eval_shape_upper_bound(shape: Iterable[Union[int, torch.SymInt]]) -> List[int]: argument
88 shape: Iterable[Union[int, torch.SymInt]] argument
[all …]
/aosp_15_r20/external/pytorch/torch/_inductor/
H A Dinductor_prims.py72 "inductor_random(SymInt[] size, Tensor seed, str mode) -> Tensor",
77 "inductor_randint(SymInt low, SymInt high, SymInt[] size, Tensor seed) -> Tensor",
82 "inductor_force_stride_order(Tensor input, SymInt[] stride) -> Tensor",
169 …emory_max_pool2d_with_offsets(Tensor self, SymInt[2] kernel_size, SymInt[2] stride, SymInt[2] pad…
176 …mory_max_pool2d_offsets_to_indices(Tensor self, SymInt kernel_w, SymInt input_w, SymInt[2] stride,…
/aosp_15_r20/external/pytorch/torch/_subclasses/
H A D_fake_tensor_utils.py7 from torch import SymInt
80 Represents a SymInt, SymFloat, SymBool without the associated ShapeEnv
116 Represents a SymInt in the cached key. Needed because SymInt doesn't
121 # PySymType: This is the 'normal' SymInt value, wrapped so we can use
122 # hash/eq as value hash/eq (normally SymInt does object
179 Represents a SymInt in the cached output.
186 def __init__(self, value: SymInt, key_path: Optional[int]) -> None: argument
192 def extract(self, key: _DispatchCacheKey, shape_env: ShapeEnv) -> SymInt:
194 return SymInt(self.value.extract(shape_env))
197 assert isinstance(src, _PySymInputStub) and isinstance(src.value, SymInt)
[all …]
/aosp_15_r20/external/pytorch/torch/csrc/utils/
H A Dpython_arg_parser.h243 inline std::vector<c10::SymInt> symintlist(int i);
245 inline c10::OptionalArray<c10::SymInt> symintlistOptional(int i);
263 inline std::optional<c10::SymInt> toSymIntOptional(int i);
291 inline c10::SymInt toSymInt(int i);
507 inline PyObject* toPyObject(const c10::SymInt& symint) { in toPyObject() argument
508 if (symint.is_symbolic()) { in toPyObject()
509 auto r = py::cast(symint).release().ptr(); in toPyObject()
513 auto m = symint.maybe_as_int(); in toPyObject()
536 inline std::vector<c10::SymInt> PythonArgs::symintlist(int i) { in symintlist()
539 return c10::SymInt(di); in symintlist()
[all …]
/aosp_15_r20/external/executorch/exir/dialects/edge/op/
H A Dsample_input.py185 …"as_strided_copy.default": { # (Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offs…
341 "constant_pad_nd.default": { # (Tensor self, SymInt[] pad, Scalar value=0) -> Tensor
351 …put, Tensor weight, Tensor? bias, int[] stride, SymInt[] padding, int[] dilation, bool transposed,…
427 …"embedding.default": { # (Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_b…
439 …"empty.memory_format": { # (SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device…
468 "expand_copy.default": { # (Tensor self, SymInt[] size, *, bool implicit=False) -> Tensor
533 …"full.default": { # (SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=…
840 …"native_layer_norm.default": { # (Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor…
882 …"ones.default": { # (SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? devic…
953 "repeat.default": { # (Tensor self, SymInt[] repeats) -> Tensor
[all …]
/aosp_15_r20/external/pytorch/torch/_library/
H A Dfake_impl.py130 def create_unbacked_symint(self, *, min=2, max=None) -> torch.SymInt:
133 def new_dynamic_size(self, *, min=0, max=None) -> torch.SymInt:
134 """Constructs a new symint (symbolic int) representing a data-dependent value.
141 min (int): A statically known inclusive lower bound for this symint. Default: 0
143 symint. Default: None
154 to the symint also has respects these constraint.
168 >>> # we use the ctx object to construct a new symint that
189 if isinstance(min, torch.SymInt) or isinstance(max, torch.SymInt):
192 f"min and max to be statically known ints but got SymInt. "

12345678910>>...23