Home
last modified time | relevance | path

Searched defs:bdim (Results 1 – 19 of 19) sorted by relevance

/aosp_15_r20/external/pytorch/aten/src/ATen/functorch/
H A DBatchRulesViews.cpp188 …nsor, std::optional<int64_t>> squeeze_batch_rule(const Tensor& self, std::optional<int64_t> bdim) { in squeeze_batch_rule()
224 const Tensor& self, std::optional<int64_t> bdim, IntArrayRef dims) { in squeeze_dims_batch_rule()
256 const Tensor& self, std::optional<int64_t> bdim, int64_t dim) { in squeeze_dim_batch_rule()
260 …d::optional<int64_t>> select_batching_rule(const Tensor& self, std::optional<int64_t> bdim, int64_… in select_batching_rule()
271 …tional<int64_t>> _reshape_alias_batch_rule(const Tensor& self, std::optional<int64_t> bdim, const … in _reshape_alias_batch_rule()
282 …r, std::optional<int64_t>> roll_batch_rule(const Tensor& self, std::optional<int64_t> bdim, SymInt… in roll_batch_rule()
H A DPlumbingHelper.cpp25 Tensor makeBatched(const Tensor& tensor, std::optional<int64_t> bdim, int64_t level) { in makeBatched()
34 …<Tensor> makeBatchedVector(const std::vector<Tensor>& tensors, std::optional<int64_t> bdim, int64_… in makeBatchedVector()
H A DBatchedTensorImpl.cpp15 BatchedTensorImpl::BatchedTensorImpl(DispatchKeySet key_set, Tensor value, int64_t bdim, int64_t le… in BatchedTensorImpl()
174 Tensor makeBatched(const Tensor& tensor, int64_t bdim, int64_t level) { in makeBatched()
H A DBatchRulesHelper.cpp46 std::optional<int64_t> bdim = has_batch_dim ? std::optional<int64_t>(0) : std::nullopt; in getPhysicalDim() local
57 std::optional<int64_t> bdim = has_batch_dim ? std::optional<int64_t>(0) : std::nullopt; in getPhysicalDims() local
H A DLegacyBatchingRegistrations.cpp108 const auto bdim = batched->bdim(); in squeeze_dims__batching_rule() local
156 const auto bdim = batched->bdim(); in squeeze__batching_rule() local
593 auto bdim = physical_tensors[0].size(0); in block_diag_batching_rule() local
H A DBatchedTensorImpl.h46 int64_t bdim() const { return bdim_; } in bdim() function
H A DBatchRulesLoss.cpp15 static at::Tensor flatten_logical(const Tensor& tensor, std::optional<int64_t> bdim) { in flatten_logical()
H A DBatchRulesScatterOps.cpp21 for (const auto& bdim : bdims) { in any_has_value() local
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DLegacyBatching.cpp24 auto* it = std::find_if(bdims.begin(), bdims.end(), [&](const BatchDim& bdim) { in has_level()
58 for (const auto& bdim : bdims) { in remove_existing_batch_dim() local
70 [&](const BatchDim& bdim) { in remove_existing_batch_dim()
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DLegacyBatchedTensorImpl.h130 for (const auto& bdim : bdims) { in createBatchDimBitset() local
139 for (const auto& bdim : bdims) { in createVmapLevelsBitset() local
H A DLegacyBatchedTensorImpl.cpp73 for (const auto& bdim : bdims_) { in checkInvariants() local
125 [](const BatchDim& bdim) { return bdim.level() < kVmapNumLevels; }), in makeBatched()
H A DLegacyVmapTransforms.cpp31 for (const auto& bdim : bdims) { in permuteBatchDimsToFront() local
170 for (const auto bdim : c10::irange(requested_levels.count())) { in alignBatchDimsAtFront() local
H A DLegacyBatchingRegistrations.cpp350 for (const auto bdim : c10::irange(self_physical.numBatchDims())) { in permute_batching_rule() local
/aosp_15_r20/external/pytorch/aten/src/ATen/native/cuda/
H A DUpSampleNearest1d.cu125 dim3 bdim{std::min<unsigned int>( in upsample_nearest1d_out_cuda_template() local
173 dim3 bdim{std::min<unsigned int>( in upsample_nearest1d_backward_out_cuda_template() local
H A DUpSampleNearest3d.cu180 dim3 bdim{std::min<unsigned int>( in upsample_nearest3d_out_cuda_template() local
254 dim3 bdim{std::min<unsigned int>( in upsample_nearest3d_backward_out_cuda_template() local
H A DUpSampleNearest2d.cu408 dim3 bdim{std::min<unsigned int>( in upsample_nearest2d_backward_out_cuda_template() local
/aosp_15_r20/external/pytorch/test/functorch/
H A Dcommon_utils.py199 def add_batch_dim(arg, bdim, batch_size=3): argument
261 def get_batched_arg(arg, bdim): argument
/aosp_15_r20/external/pytorch/aten/src/ATen/test/
H A Dlegacy_vmap_test.cpp556 [&](const BatchDim& bdim) -> BatchDim { in TEST()
1038 [&](const BatchDim& bdim) -> BatchDim { in TEST()
/aosp_15_r20/external/pytorch/torch/_subclasses/
H A Dmeta_utils.py500 bdim: Optional[int] = None # is_functorch_wrapped variable in MetaTensorDesc