xref: /aosp_15_r20/external/pytorch/torch/_torch_docs.py (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1# mypy: allow-untyped-defs
2"""Adds docstrings to functions defined in the torch._C module."""
3
4import re
5from typing import Dict
6
7import torch._C
8from torch._C import _add_docstr as add_docstr
9
10
11def parse_kwargs(desc):
12    r"""Map a description of args to a dictionary of {argname: description}.
13
14    Input:
15        ('    weight (Tensor): a weight tensor\n' +
16         '        Some optional description')
17    Output: {
18        'weight': \
19        'weight (Tensor): a weight tensor\n        Some optional description'
20    }
21    """
22    # Split on exactly 4 spaces after a newline
23    regx = re.compile(r"\n\s{4}(?!\s)")
24    kwargs = [section.strip() for section in regx.split(desc)]
25    kwargs = [section for section in kwargs if len(section) > 0]
26    return {desc.split(" ")[0]: desc for desc in kwargs}
27
28
29def merge_dicts(*dicts):
30    """Merge dictionaries into a single dictionary."""
31    return {x: d[x] for d in dicts for x in d}
32
33
34common_args = parse_kwargs(
35    """
36    input (Tensor): the input tensor.
37    generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling
38    out (Tensor, optional): the output tensor.
39    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
40        returned tensor. Default: ``torch.preserve_format``.
41"""
42)
43
44reduceops_common_args = merge_dicts(
45    common_args,
46    parse_kwargs(
47        """
48    dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
49        If specified, the input tensor is casted to :attr:`dtype` before the operation
50        is performed. This is useful for preventing data type overflows. Default: None.
51    keepdim (bool): whether the output tensor has :attr:`dim` retained or not.
52"""
53    ),
54)
55
56multi_dim_common = merge_dicts(
57    reduceops_common_args,
58    parse_kwargs(
59        """
60    dim (int or tuple of ints): the dimension or dimensions to reduce.
61"""
62    ),
63    {
64        "keepdim_details": """
65If :attr:`keepdim` is ``True``, the output tensor is of the same size
66as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1.
67Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the
68output tensor having 1 (or ``len(dim)``) fewer dimension(s).
69"""
70    },
71    {
72        "opt_dim": """
73    dim (int or tuple of ints, optional): the dimension or dimensions to reduce.
74        If ``None``, all dimensions are reduced.
75"""
76    },
77)
78
79single_dim_common = merge_dicts(
80    reduceops_common_args,
81    parse_kwargs(
82        """
83    dim (int): the dimension to reduce.
84"""
85    ),
86    {
87        "keepdim_details": """If :attr:`keepdim` is ``True``, the output tensor is of the same size
88as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
89Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
90the output tensor having 1 fewer dimension than :attr:`input`."""
91    },
92)
93
94factory_common_args = merge_dicts(
95    common_args,
96    parse_kwargs(
97        """
98    dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
99        Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`).
100    layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
101        Default: ``torch.strided``.
102    device (:class:`torch.device`, optional): the desired device of returned tensor.
103        Default: if ``None``, uses the current device for the default tensor type
104        (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
105        for CPU tensor types and the current CUDA device for CUDA tensor types.
106    requires_grad (bool, optional): If autograd should record operations on the
107        returned tensor. Default: ``False``.
108    pin_memory (bool, optional): If set, returned tensor would be allocated in
109        the pinned memory. Works only for CPU tensors. Default: ``False``.
110    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
111        returned Tensor. Default: ``torch.contiguous_format``.
112    check_invariants (bool, optional): If sparse tensor invariants are checked.
113        Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`,
114        initially False.
115"""
116    ),
117    {
118        "sparse_factory_device_note": """\
119.. note::
120
121   If the ``device`` argument is not specified the device of the given
122   :attr:`values` and indices tensor(s) must match. If, however, the
123   argument is specified the input Tensors will be converted to the
124   given device and in turn determine the device of the constructed
125   sparse tensor."""
126    },
127)
128
129factory_like_common_args = parse_kwargs(
130    """
131    input (Tensor): the size of :attr:`input` will determine size of the output tensor.
132    layout (:class:`torch.layout`, optional): the desired layout of returned tensor.
133        Default: if ``None``, defaults to the layout of :attr:`input`.
134    dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor.
135        Default: if ``None``, defaults to the dtype of :attr:`input`.
136    device (:class:`torch.device`, optional): the desired device of returned tensor.
137        Default: if ``None``, defaults to the device of :attr:`input`.
138    requires_grad (bool, optional): If autograd should record operations on the
139        returned tensor. Default: ``False``.
140    pin_memory (bool, optional): If set, returned tensor would be allocated in
141        the pinned memory. Works only for CPU tensors. Default: ``False``.
142    memory_format (:class:`torch.memory_format`, optional): the desired memory format of
143        returned Tensor. Default: ``torch.preserve_format``.
144"""
145)
146
147factory_data_common_args = parse_kwargs(
148    """
149    data (array_like): Initial data for the tensor. Can be a list, tuple,
150        NumPy ``ndarray``, scalar, and other types.
151    dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
152        Default: if ``None``, infers data type from :attr:`data`.
153    device (:class:`torch.device`, optional): the desired device of returned tensor.
154        Default: if ``None``, uses the current device for the default tensor type
155        (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
156        for CPU tensor types and the current CUDA device for CUDA tensor types.
157    requires_grad (bool, optional): If autograd should record operations on the
158        returned tensor. Default: ``False``.
159    pin_memory (bool, optional): If set, returned tensor would be allocated in
160        the pinned memory. Works only for CPU tensors. Default: ``False``.
161"""
162)
163
164tf32_notes = {
165    "tf32_note": """This operator supports :ref:`TensorFloat32<tf32_on_ampere>`."""
166}
167
168rocm_fp16_notes = {
169    "rocm_fp16_note": """On certain ROCm devices, when using float16 inputs this module will use \
170:ref:`different precision<fp16_on_mi200>` for backward."""
171}
172
173reproducibility_notes: Dict[str, str] = {
174    "forward_reproducibility_note": """This operation may behave nondeterministically when given tensors on \
175a CUDA device. See :doc:`/notes/randomness` for more information.""",
176    "backward_reproducibility_note": """This operation may produce nondeterministic gradients when given tensors on \
177a CUDA device. See :doc:`/notes/randomness` for more information.""",
178    "cudnn_reproducibility_note": """In some circumstances when given tensors on a CUDA device \
179and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is \
180undesirable, you can try to make the operation deterministic (potentially at \
181a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. \
182See :doc:`/notes/randomness` for more information.""",
183}
184
185sparse_support_notes = {
186    "sparse_beta_warning": """
187.. warning::
188    Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported,
189    or may not have autograd support. If you notice missing functionality please
190    open a feature request.""",
191}
192
193add_docstr(
194    torch.abs,
195    r"""
196abs(input, *, out=None) -> Tensor
197
198Computes the absolute value of each element in :attr:`input`.
199
200.. math::
201    \text{out}_{i} = |\text{input}_{i}|
202"""
203    + r"""
204Args:
205    {input}
206
207Keyword args:
208    {out}
209
210Example::
211
212    >>> torch.abs(torch.tensor([-1, -2, 3]))
213    tensor([ 1,  2,  3])
214""".format(**common_args),
215)
216
217add_docstr(
218    torch.absolute,
219    r"""
220absolute(input, *, out=None) -> Tensor
221
222Alias for :func:`torch.abs`
223""",
224)
225
226add_docstr(
227    torch.acos,
228    r"""
229acos(input, *, out=None) -> Tensor
230
231Computes the inverse cosine of each element in :attr:`input`.
232
233.. math::
234    \text{out}_{i} = \cos^{-1}(\text{input}_{i})
235"""
236    + r"""
237Args:
238    {input}
239
240Keyword args:
241    {out}
242
243Example::
244
245    >>> a = torch.randn(4)
246    >>> a
247    tensor([ 0.3348, -0.5889,  0.2005, -0.1584])
248    >>> torch.acos(a)
249    tensor([ 1.2294,  2.2004,  1.3690,  1.7298])
250""".format(**common_args),
251)
252
253add_docstr(
254    torch.arccos,
255    r"""
256arccos(input, *, out=None) -> Tensor
257
258Alias for :func:`torch.acos`.
259""",
260)
261
262add_docstr(
263    torch.acosh,
264    r"""
265acosh(input, *, out=None) -> Tensor
266
267Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
268
269.. math::
270    \text{out}_{i} = \cosh^{-1}(\text{input}_{i})
271
272Note:
273    The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range
274    will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`.
275"""
276    + r"""
277Args:
278    {input}
279
280Keyword arguments:
281    {out}
282
283Example::
284
285    >>> a = torch.randn(4).uniform_(1, 2)
286    >>> a
287    tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ])
288    >>> torch.acosh(a)
289    tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ])
290""".format(**common_args),
291)
292
293add_docstr(
294    torch.arccosh,
295    r"""
296arccosh(input, *, out=None) -> Tensor
297
298Alias for :func:`torch.acosh`.
299""",
300)
301
302add_docstr(
303    torch.index_add,
304    r"""
305index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor
306
307See :meth:`~Tensor.index_add_` for function description.
308""",
309)
310
311add_docstr(
312    torch.index_copy,
313    r"""
314index_copy(input, dim, index, source, *, out=None) -> Tensor
315
316See :meth:`~Tensor.index_add_` for function description.
317""",
318)
319
320add_docstr(
321    torch.index_reduce,
322    r"""
323index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor
324
325See :meth:`~Tensor.index_reduce_` for function description.
326""",
327)
328
329add_docstr(
330    torch.add,
331    r"""
332add(input, other, *, alpha=1, out=None) -> Tensor
333
334Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`.
335
336.. math::
337    \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i
338"""
339    + r"""
340
341Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
342:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
343
344Args:
345    {input}
346    other (Tensor or Number): the tensor or number to add to :attr:`input`.
347
348Keyword arguments:
349    alpha (Number): the multiplier for :attr:`other`.
350    {out}
351
352Examples::
353
354    >>> a = torch.randn(4)
355    >>> a
356    tensor([ 0.0202,  1.0985,  1.3506, -0.6056])
357    >>> torch.add(a, 20)
358    tensor([ 20.0202,  21.0985,  21.3506,  19.3944])
359
360    >>> b = torch.randn(4)
361    >>> b
362    tensor([-0.9732, -0.3497,  0.6245,  0.4022])
363    >>> c = torch.randn(4, 1)
364    >>> c
365    tensor([[ 0.3743],
366            [-1.7724],
367            [-0.5811],
368            [-0.8017]])
369    >>> torch.add(b, c, alpha=10)
370    tensor([[  2.7695,   3.3930,   4.3672,   4.1450],
371            [-18.6971, -18.0736, -17.0994, -17.3216],
372            [ -6.7845,  -6.1610,  -5.1868,  -5.4090],
373            [ -8.9902,  -8.3667,  -7.3925,  -7.6147]])
374""".format(**common_args),
375)
376
377add_docstr(
378    torch.addbmm,
379    r"""
380addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
381
382Performs a batch matrix-matrix product of matrices stored
383in :attr:`batch1` and :attr:`batch2`,
384with a reduced add step (all matrix multiplications get accumulated
385along the first dimension).
386:attr:`input` is added to the final result.
387
388:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the
389same number of matrices.
390
391If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
392:math:`(b \times m \times p)` tensor, :attr:`input` must be
393:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
394and :attr:`out` will be a :math:`(n \times p)` tensor.
395
396.. math::
397    out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i)
398
399If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
400it will not be propagated.
401"""
402    + r"""
403For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha`
404must be real numbers, otherwise they should be integers.
405
406{tf32_note}
407
408{rocm_fp16_note}
409
410Args:
411    batch1 (Tensor): the first batch of matrices to be multiplied
412    batch2 (Tensor): the second batch of matrices to be multiplied
413
414Keyword args:
415    beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
416    input (Tensor): matrix to be added
417    alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`)
418    {out}
419
420Example::
421
422    >>> M = torch.randn(3, 5)
423    >>> batch1 = torch.randn(10, 3, 4)
424    >>> batch2 = torch.randn(10, 4, 5)
425    >>> torch.addbmm(M, batch1, batch2)
426    tensor([[  6.6311,   0.0503,   6.9768, -12.0362,  -2.1653],
427            [ -4.8185,  -1.4255,  -6.6760,   8.9453,   2.5743],
428            [ -3.8202,   4.3691,   1.0943,  -1.1109,   5.4730]])
429""".format(**common_args, **tf32_notes, **rocm_fp16_notes),
430)
431
432add_docstr(
433    torch.addcdiv,
434    r"""
435addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
436
437Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`,
438multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`.
439
440.. warning::
441    Integer division with addcdiv is no longer supported, and in a future
442    release addcdiv will perform a true division of tensor1 and tensor2.
443    The historic addcdiv behavior can be implemented as
444    (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype)
445    for integer inputs and as (input + value * tensor1 / tensor2) for float inputs.
446    The future addcdiv behavior is just the latter implementation:
447    (input + value * tensor1 / tensor2), for all dtypes.
448
449.. math::
450    \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i}
451"""
452    + r"""
453
454The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be
455:ref:`broadcastable <broadcasting-semantics>`.
456
457For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
458a real number, otherwise an integer.
459
460Args:
461    input (Tensor): the tensor to be added
462    tensor1 (Tensor): the numerator tensor
463    tensor2 (Tensor): the denominator tensor
464
465Keyword args:
466    value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}`
467    {out}
468
469Example::
470
471    >>> t = torch.randn(1, 3)
472    >>> t1 = torch.randn(3, 1)
473    >>> t2 = torch.randn(1, 3)
474    >>> torch.addcdiv(t, t1, t2, value=0.1)
475    tensor([[-0.2312, -3.6496,  0.1312],
476            [-1.0428,  3.4292, -0.1030],
477            [-0.5369, -0.9829,  0.0430]])
478""".format(**common_args),
479)
480
481add_docstr(
482    torch.addcmul,
483    r"""
484addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor
485
486Performs the element-wise multiplication of :attr:`tensor1`
487by :attr:`tensor2`, multiplies the result by the scalar :attr:`value`
488and adds it to :attr:`input`.
489
490.. math::
491    \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i
492"""
493    + r"""
494The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be
495:ref:`broadcastable <broadcasting-semantics>`.
496
497For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be
498a real number, otherwise an integer.
499
500Args:
501    input (Tensor): the tensor to be added
502    tensor1 (Tensor): the tensor to be multiplied
503    tensor2 (Tensor): the tensor to be multiplied
504
505Keyword args:
506    value (Number, optional): multiplier for :math:`tensor1 .* tensor2`
507    {out}
508
509Example::
510
511    >>> t = torch.randn(1, 3)
512    >>> t1 = torch.randn(3, 1)
513    >>> t2 = torch.randn(1, 3)
514    >>> torch.addcmul(t, t1, t2, value=0.1)
515    tensor([[-0.8635, -0.6391,  1.6174],
516            [-0.7617, -0.5879,  1.7388],
517            [-0.8353, -0.6249,  1.6511]])
518""".format(**common_args),
519)
520
521add_docstr(
522    torch.addmm,
523    r"""
524addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
525
526Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
527The matrix :attr:`input` is added to the final result.
528
529If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
530:math:`(m \times p)` tensor, then :attr:`input` must be
531:ref:`broadcastable <broadcasting-semantics>` with a :math:`(n \times p)` tensor
532and :attr:`out` will be a :math:`(n \times p)` tensor.
533
534:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
535:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
536
537.. math::
538    \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i)
539
540If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
541it will not be propagated.
542"""
543    + r"""
544For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
545:attr:`alpha` must be real numbers, otherwise they should be integers.
546
547This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. If
548:attr:`input` is sparse the result will have the same layout and if :attr:`out`
549is provided it must have the same layout as :attr:`input`.
550
551{sparse_beta_warning}
552
553{tf32_note}
554
555{rocm_fp16_note}
556
557Args:
558    input (Tensor): matrix to be added
559    mat1 (Tensor): the first matrix to be matrix multiplied
560    mat2 (Tensor): the second matrix to be matrix multiplied
561
562Keyword args:
563    beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
564    alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
565    {out}
566
567Example::
568
569    >>> M = torch.randn(2, 3)
570    >>> mat1 = torch.randn(2, 3)
571    >>> mat2 = torch.randn(3, 3)
572    >>> torch.addmm(M, mat1, mat2)
573    tensor([[-4.8716,  1.4671, -1.3746],
574            [ 0.7573, -3.9555, -2.8681]])
575""".format(**common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes),
576)
577
578add_docstr(
579    torch.adjoint,
580    r"""
581adjoint(Tensor) -> Tensor
582Returns a view of the tensor conjugated and with the last two dimensions transposed.
583
584``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and
585to ``x.transpose(-2, -1)`` for real tensors.
586
587Example::
588    >>> x = torch.arange(4, dtype=torch.float)
589    >>> A = torch.complex(x, x).reshape(2, 2)
590    >>> A
591    tensor([[0.+0.j, 1.+1.j],
592            [2.+2.j, 3.+3.j]])
593    >>> A.adjoint()
594    tensor([[0.-0.j, 2.-2.j],
595            [1.-1.j, 3.-3.j]])
596    >>> (A.adjoint() == A.mH).all()
597    tensor(True)
598""",
599)
600
601add_docstr(
602    torch.sspaddmm,
603    r"""
604sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor
605
606Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor
607:attr:`mat2`, then adds the sparse tensor :attr:`input` to the result.
608
609Note: This function is equivalent to :func:`torch.addmm`, except
610:attr:`input` and :attr:`mat1` are sparse.
611
612Args:
613    input (Tensor): a sparse matrix to be added
614    mat1 (Tensor): a sparse matrix to be matrix multiplied
615    mat2 (Tensor): a dense matrix to be matrix multiplied
616
617Keyword args:
618    beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`)
619    alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`)
620    {out}
621""".format(**common_args),
622)
623
624add_docstr(
625    torch.smm,
626    r"""
627smm(input, mat) -> Tensor
628
629Performs a matrix multiplication of the sparse matrix :attr:`input`
630with the dense matrix :attr:`mat`.
631
632Args:
633    input (Tensor): a sparse matrix to be matrix multiplied
634    mat (Tensor): a dense matrix to be matrix multiplied
635""",
636)
637
638add_docstr(
639    torch.addmv,
640    r"""
641addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor
642
643Performs a matrix-vector product of the matrix :attr:`mat` and
644the vector :attr:`vec`.
645The vector :attr:`input` is added to the final result.
646
647If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
648size `m`, then :attr:`input` must be
649:ref:`broadcastable <broadcasting-semantics>` with a 1-D tensor of size `n` and
650:attr:`out` will be 1-D tensor of size `n`.
651
652:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
653:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively.
654
655.. math::
656    \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec})
657
658If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
659it will not be propagated.
660"""
661    + r"""
662For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
663:attr:`alpha` must be real numbers, otherwise they should be integers.
664
665Args:
666    input (Tensor): vector to be added
667    mat (Tensor): matrix to be matrix multiplied
668    vec (Tensor): vector to be matrix multiplied
669
670Keyword args:
671    beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
672    alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`)
673    {out}
674
675Example::
676
677    >>> M = torch.randn(2)
678    >>> mat = torch.randn(2, 3)
679    >>> vec = torch.randn(3)
680    >>> torch.addmv(M, mat, vec)
681    tensor([-0.3768, -5.5565])
682""".format(**common_args),
683)
684
685add_docstr(
686    torch.addr,
687    r"""
688addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor
689
690Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2`
691and adds it to the matrix :attr:`input`.
692
693Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the
694outer product between :attr:`vec1` and :attr:`vec2` and the added matrix
695:attr:`input` respectively.
696
697.. math::
698    \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2})
699
700If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
701it will not be propagated.
702"""
703    + r"""
704If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector
705of size `m`, then :attr:`input` must be
706:ref:`broadcastable <broadcasting-semantics>` with a matrix of size
707:math:`(n \times m)` and :attr:`out` will be a matrix of size
708:math:`(n \times m)`.
709
710Args:
711    input (Tensor): matrix to be added
712    vec1 (Tensor): the first vector of the outer product
713    vec2 (Tensor): the second vector of the outer product
714
715Keyword args:
716    beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
717    alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`)
718    {out}
719
720Example::
721
722    >>> vec1 = torch.arange(1., 4.)
723    >>> vec2 = torch.arange(1., 3.)
724    >>> M = torch.zeros(3, 2)
725    >>> torch.addr(M, vec1, vec2)
726    tensor([[ 1.,  2.],
727            [ 2.,  4.],
728            [ 3.,  6.]])
729""".format(**common_args),
730)
731
732add_docstr(
733    torch.allclose,
734    r"""
735allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool
736
737This function checks if :attr:`input` and :attr:`other` satisfy the condition:
738
739.. math::
740    \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
741"""
742    + r"""
743elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to
744`numpy.allclose <https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html>`_
745
746Args:
747    input (Tensor): first tensor to compare
748    other (Tensor): second tensor to compare
749    atol (float, optional): absolute tolerance. Default: 1e-08
750    rtol (float, optional): relative tolerance. Default: 1e-05
751    equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
752
753Example::
754
755    >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08]))
756    False
757    >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09]))
758    True
759    >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]))
760    False
761    >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True)
762    True
763""",
764)
765
766add_docstr(
767    torch.all,
768    r"""
769all(input) -> Tensor
770
771Tests if all elements in :attr:`input` evaluate to `True`.
772
773.. note:: This function matches the behaviour of NumPy in returning
774          output of dtype `bool` for all supported dtypes except `uint8`.
775          For `uint8` the dtype of output is `uint8` itself.
776
777Example::
778
779    >>> a = torch.rand(1, 2).bool()
780    >>> a
781    tensor([[False, True]], dtype=torch.bool)
782    >>> torch.all(a)
783    tensor(False, dtype=torch.bool)
784    >>> a = torch.arange(0, 3)
785    >>> a
786    tensor([0, 1, 2])
787    >>> torch.all(a)
788    tensor(False)
789
790.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor
791   :noindex:
792
793For each row of :attr:`input` in the given dimension :attr:`dim`,
794returns `True` if all elements in the row evaluate to `True` and `False` otherwise.
795
796{keepdim_details}
797
798Args:
799    {input}
800    {dim}
801    {keepdim}
802
803Keyword args:
804    {out}
805
806Example::
807
808    >>> a = torch.rand(4, 2).bool()
809    >>> a
810    tensor([[True, True],
811            [True, False],
812            [True, True],
813            [True, True]], dtype=torch.bool)
814    >>> torch.all(a, dim=1)
815    tensor([ True, False,  True,  True], dtype=torch.bool)
816    >>> torch.all(a, dim=0)
817    tensor([ True, False], dtype=torch.bool)
818""".format(**multi_dim_common),
819)
820
821add_docstr(
822    torch.any,
823    r"""
824any(input) -> Tensor
825
826Tests if any element in :attr:`input` evaluates to `True`.
827
828.. note:: This function matches the behaviour of NumPy in returning
829          output of dtype `bool` for all supported dtypes except `uint8`.
830          For `uint8` the dtype of output is `uint8` itself.
831
832Example::
833
834    >>> a = torch.rand(1, 2).bool()
835    >>> a
836    tensor([[False, True]], dtype=torch.bool)
837    >>> torch.any(a)
838    tensor(True, dtype=torch.bool)
839    >>> a = torch.arange(0, 3)
840    >>> a
841    tensor([0, 1, 2])
842    >>> torch.any(a)
843    tensor(True)
844
845.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor
846   :noindex:
847
848For each row of :attr:`input` in the given dimension :attr:`dim`,
849returns `True` if any element in the row evaluate to `True` and `False` otherwise.
850
851{keepdim_details}
852
853Args:
854    {input}
855    {dim}
856    {keepdim}
857
858Keyword args:
859    {out}
860
861Example::
862
863    >>> a = torch.randn(4, 2) < 0
864    >>> a
865    tensor([[ True,  True],
866            [False,  True],
867            [ True,  True],
868            [False, False]])
869    >>> torch.any(a, 1)
870    tensor([ True,  True,  True, False])
871    >>> torch.any(a, 0)
872    tensor([True, True])
873""".format(**multi_dim_common),
874)
875
876add_docstr(
877    torch.angle,
878    r"""
879angle(input, *, out=None) -> Tensor
880
881Computes the element-wise angle (in radians) of the given :attr:`input` tensor.
882
883.. math::
884    \text{out}_{i} = angle(\text{input}_{i})
885"""
886    + r"""
887Args:
888    {input}
889
890Keyword args:
891    {out}
892
893.. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers,
894          zero for non-negative real numbers, and propagates NaNs. Previously
895          the function would return zero for all real numbers and not propagate
896          floating-point NaNs.
897
898Example::
899
900    >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
901    tensor([ 135.,  135,  -45])
902""".format(**common_args),
903)
904
905add_docstr(
906    torch.as_strided,
907    r"""
908as_strided(input, size, stride, storage_offset=None) -> Tensor
909
910Create a view of an existing `torch.Tensor` :attr:`input` with specified
911:attr:`size`, :attr:`stride` and :attr:`storage_offset`.
912
913.. warning::
914    Prefer using other view functions, like :meth:`torch.Tensor.expand`,
915    to setting a view's strides manually with `as_strided`, as this
916    function's behavior depends on the implementation of a tensor's storage.
917    The constructed view of the storage must only refer to elements within
918    the storage or a runtime error will be thrown, and if the view is
919    "overlapped" (with multiple indices referring to the same element in
920    memory) its behavior is undefined.
921
922Args:
923    {input}
924    size (tuple or ints): the shape of the output tensor
925    stride (tuple or ints): the stride of the output tensor
926    storage_offset (int, optional): the offset in the underlying storage of the output tensor.
927        If ``None``, the storage_offset of the output tensor will match the input tensor.
928
929Example::
930
931    >>> x = torch.randn(3, 3)
932    >>> x
933    tensor([[ 0.9039,  0.6291,  1.0795],
934            [ 0.1586,  2.1939, -0.4900],
935            [-0.1909, -0.7503,  1.9355]])
936    >>> t = torch.as_strided(x, (2, 2), (1, 2))
937    >>> t
938    tensor([[0.9039, 1.0795],
939            [0.6291, 0.1586]])
940    >>> t = torch.as_strided(x, (2, 2), (1, 2), 1)
941    tensor([[0.6291, 0.1586],
942            [1.0795, 2.1939]])
943""".format(**common_args),
944)
945
946add_docstr(
947    torch.as_tensor,
948    r"""
949as_tensor(data, dtype=None, device=None) -> Tensor
950
951Converts :attr:`data` into a tensor, sharing data and preserving autograd
952history if possible.
953
954If :attr:`data` is already a tensor with the requested dtype and device
955then :attr:`data` itself is returned, but if :attr:`data` is a
956tensor with a different dtype or device then it's copied as if using
957`data.to(dtype=dtype, device=device)`.
958
959If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a
960tensor is constructed using :func:`torch.from_numpy`.
961
962If :attr:`data` is a CuPy array, the returned tensor will be located on the same device as the CuPy array unless
963specifically overwritten by :attr:`device` or a default device.
964
965.. seealso::
966
967    :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`).
968
969
970Args:
971    {data}
972    {dtype}
973    device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
974        then the device of data is used. If None and data is not a tensor then
975        the result tensor is constructed on the current device.
976
977
978Example::
979
980    >>> a = numpy.array([1, 2, 3])
981    >>> t = torch.as_tensor(a)
982    >>> t
983    tensor([ 1,  2,  3])
984    >>> t[0] = -1
985    >>> a
986    array([-1,  2,  3])
987
988    >>> a = numpy.array([1, 2, 3])
989    >>> t = torch.as_tensor(a, device=torch.device('cuda'))
990    >>> t
991    tensor([ 1,  2,  3])
992    >>> t[0] = -1
993    >>> a
994    array([1,  2,  3])
995""".format(**factory_data_common_args),
996)
997
998add_docstr(
999    torch.asin,
1000    r"""
1001asin(input, *, out=None) -> Tensor
1002
1003Returns a new tensor with the arcsine of the elements of :attr:`input`.
1004
1005.. math::
1006    \text{out}_{i} = \sin^{-1}(\text{input}_{i})
1007"""
1008    + r"""
1009Args:
1010    {input}
1011
1012Keyword args:
1013    {out}
1014
1015Example::
1016
1017    >>> a = torch.randn(4)
1018    >>> a
1019    tensor([-0.5962,  1.4985, -0.4396,  1.4525])
1020    >>> torch.asin(a)
1021    tensor([-0.6387,     nan, -0.4552,     nan])
1022""".format(**common_args),
1023)
1024
1025add_docstr(
1026    torch.arcsin,
1027    r"""
1028arcsin(input, *, out=None) -> Tensor
1029
1030Alias for :func:`torch.asin`.
1031""",
1032)
1033
1034add_docstr(
1035    torch.asinh,
1036    r"""
1037asinh(input, *, out=None) -> Tensor
1038
1039Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
1040
1041.. math::
1042    \text{out}_{i} = \sinh^{-1}(\text{input}_{i})
1043"""
1044    + r"""
1045Args:
1046    {input}
1047
1048Keyword arguments:
1049    {out}
1050
1051Example::
1052
1053    >>> a = torch.randn(4)
1054    >>> a
1055    tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ])
1056    >>> torch.asinh(a)
1057    tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ])
1058""".format(**common_args),
1059)
1060
1061add_docstr(
1062    torch.arcsinh,
1063    r"""
1064arcsinh(input, *, out=None) -> Tensor
1065
1066Alias for :func:`torch.asinh`.
1067""",
1068)
1069
1070add_docstr(
1071    torch.atan,
1072    r"""
1073atan(input, *, out=None) -> Tensor
1074
1075Returns a new tensor with the arctangent of the elements of :attr:`input`.
1076
1077.. math::
1078    \text{out}_{i} = \tan^{-1}(\text{input}_{i})
1079"""
1080    + r"""
1081Args:
1082    {input}
1083
1084Keyword args:
1085    {out}
1086
1087Example::
1088
1089    >>> a = torch.randn(4)
1090    >>> a
1091    tensor([ 0.2341,  0.2539, -0.6256, -0.6448])
1092    >>> torch.atan(a)
1093    tensor([ 0.2299,  0.2487, -0.5591, -0.5727])
1094""".format(**common_args),
1095)
1096
1097add_docstr(
1098    torch.arctan,
1099    r"""
1100arctan(input, *, out=None) -> Tensor
1101
1102Alias for :func:`torch.atan`.
1103""",
1104)
1105
1106add_docstr(
1107    torch.atan2,
1108    r"""
1109atan2(input, other, *, out=None) -> Tensor
1110
1111Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}`
1112with consideration of the quadrant. Returns a new tensor with the signed angles
1113in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})`
1114and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second
1115parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first
1116parameter, is the y-coordinate.)
1117
1118The shapes of ``input`` and ``other`` must be
1119:ref:`broadcastable <broadcasting-semantics>`.
1120
1121Args:
1122    input (Tensor): the first input tensor
1123    other (Tensor): the second input tensor
1124
1125Keyword args:
1126    {out}
1127
1128Example::
1129
1130    >>> a = torch.randn(4)
1131    >>> a
1132    tensor([ 0.9041,  0.0196, -0.3108, -2.4423])
1133    >>> torch.atan2(a, torch.randn(4))
1134    tensor([ 0.9833,  0.0811, -1.9743, -1.4151])
1135""".format(**common_args),
1136)
1137
1138add_docstr(
1139    torch.arctan2,
1140    r"""
1141arctan2(input, other, *, out=None) -> Tensor
1142Alias for :func:`torch.atan2`.
1143""",
1144)
1145
1146add_docstr(
1147    torch.atanh,
1148    r"""
1149atanh(input, *, out=None) -> Tensor
1150
1151Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
1152
1153Note:
1154    The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range
1155    will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is
1156    mapped to `+/-INF` respectively.
1157
1158.. math::
1159    \text{out}_{i} = \tanh^{-1}(\text{input}_{i})
1160"""
1161    + r"""
1162Args:
1163    {input}
1164
1165Keyword arguments:
1166    {out}
1167
1168Example::
1169
1170    >>> a = torch.randn(4).uniform_(-1, 1)
1171    >>> a
1172    tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ])
1173    >>> torch.atanh(a)
1174    tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ])
1175""".format(**common_args),
1176)
1177
1178add_docstr(
1179    torch.arctanh,
1180    r"""
1181arctanh(input, *, out=None) -> Tensor
1182
1183Alias for :func:`torch.atanh`.
1184""",
1185)
1186
1187add_docstr(
1188    torch.asarray,
1189    r"""
1190asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor
1191
1192Converts :attr:`obj` to a tensor.
1193
1194:attr:`obj` can be one of:
1195
11961. a tensor
11972. a NumPy array or a NumPy scalar
11983. a DLPack capsule
11994. an object that implements Python's buffer protocol
12005. a scalar
12016. a sequence of scalars
1202
1203When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will,
1204by default, not require a gradient, have the same datatype as :attr:`obj`, be on the
1205same device, and share memory with it. These properties can be controlled with the
1206:attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments.
1207If the returned tensor is of a different datatype, on a different device, or a copy is
1208requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad`
1209is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is
1210also a tensor with an autograd history then the returned tensor will have the same history.
1211
1212When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's
1213buffer protocol then the buffer is interpreted as an array of bytes grouped according to
1214the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is
1215passed then the default floating point datatype is used, instead.) The returned tensor
1216will have the specified datatype (or default floating point datatype if none is specified)
1217and, by default, be on the CPU device and share memory with the buffer.
1218
1219When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on
1220the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will
1221be the PyTorch datatype corresponding to the NumPy's scalar's datatype.
1222
1223When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the
1224returned tensor will, by default, infer its datatype from the scalar values, be on the
1225current default device, and not share its memory.
1226
1227.. seealso::
1228
1229    :func:`torch.tensor` creates a tensor that always copies the data from the input object.
1230    :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays.
1231    :func:`torch.frombuffer` creates a tensor that always shares memory from objects that
1232    implement the buffer protocol.
1233    :func:`torch.from_dlpack` creates a tensor that always shares memory from
1234    DLPack capsules.
1235
1236Args:
1237    obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's
1238           buffer protocol, scalar, or sequence of scalars.
1239
1240Keyword args:
1241    dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor.
1242           Default: ``None``, which causes the datatype of the returned tensor to be
1243           inferred from :attr:`obj`.
1244    copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`.
1245           Default: ``None``, which causes the returned tensor to share memory with :attr:`obj`
1246           whenever possible. If ``True`` then the returned tensor does not share its memory.
1247           If ``False`` then the returned tensor shares its memory with :attr:`obj` and an
1248           error is thrown if it cannot.
1249    device (:class:`torch.device`, optional): the device of the returned tensor.
1250           Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if
1251           :attr:`obj` is a Python sequence, the current default device will be used.
1252    requires_grad (bool, optional): whether the returned tensor requires grad.
1253           Default: ``False``, which causes the returned tensor not to require a gradient.
1254           If ``True``, then the returned tensor will require a gradient, and if :attr:`obj`
1255           is also a tensor with an autograd history then the returned tensor will have
1256           the same history.
1257
1258Example::
1259
1260    >>> a = torch.tensor([1, 2, 3])
1261    >>> # Shares memory with tensor 'a'
1262    >>> b = torch.asarray(a)
1263    >>> a.data_ptr() == b.data_ptr()
1264    True
1265    >>> # Forces memory copy
1266    >>> c = torch.asarray(a, copy=True)
1267    >>> a.data_ptr() == c.data_ptr()
1268    False
1269
1270    >>> a = torch.tensor([1., 2., 3.], requires_grad=True)
1271    >>> b = a + 2
1272    >>> b
1273    tensor([3., 4., 5.], grad_fn=<AddBackward0>)
1274    >>> # Shares memory with tensor 'b', with no grad
1275    >>> c = torch.asarray(b)
1276    >>> c
1277    tensor([3., 4., 5.])
1278    >>> # Shares memory with tensor 'b', retaining autograd history
1279    >>> d = torch.asarray(b, requires_grad=True)
1280    >>> d
1281    tensor([3., 4., 5.], grad_fn=<AddBackward0>)
1282
1283    >>> array = numpy.array([1, 2, 3])
1284    >>> # Shares memory with array 'array'
1285    >>> t1 = torch.asarray(array)
1286    >>> array.__array_interface__['data'][0] == t1.data_ptr()
1287    True
1288    >>> # Copies memory due to dtype mismatch
1289    >>> t2 = torch.asarray(array, dtype=torch.float32)
1290    >>> array.__array_interface__['data'][0] == t2.data_ptr()
1291    False
1292
1293    >>> scalar = numpy.float64(0.5)
1294    >>> torch.asarray(scalar)
1295    tensor(0.5000, dtype=torch.float64)
1296""",
1297)
1298
1299add_docstr(
1300    torch.baddbmm,
1301    r"""
1302baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor
1303
1304Performs a batch matrix-matrix product of matrices in :attr:`batch1`
1305and :attr:`batch2`.
1306:attr:`input` is added to the final result.
1307
1308:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same
1309number of matrices.
1310
1311If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a
1312:math:`(b \times m \times p)` tensor, then :attr:`input` must be
1313:ref:`broadcastable <broadcasting-semantics>` with a
1314:math:`(b \times n \times p)` tensor and :attr:`out` will be a
1315:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the
1316same as the scaling factors used in :meth:`torch.addbmm`.
1317
1318.. math::
1319    \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i)
1320
1321If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in
1322it will not be propagated.
1323"""
1324    + r"""
1325For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and
1326:attr:`alpha` must be real numbers, otherwise they should be integers.
1327
1328{tf32_note}
1329
1330{rocm_fp16_note}
1331
1332Args:
1333    input (Tensor): the tensor to be added
1334    batch1 (Tensor): the first batch of matrices to be multiplied
1335    batch2 (Tensor): the second batch of matrices to be multiplied
1336
1337Keyword args:
1338    beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`)
1339    alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`)
1340    {out}
1341
1342Example::
1343
1344    >>> M = torch.randn(10, 3, 5)
1345    >>> batch1 = torch.randn(10, 3, 4)
1346    >>> batch2 = torch.randn(10, 4, 5)
1347    >>> torch.baddbmm(M, batch1, batch2).size()
1348    torch.Size([10, 3, 5])
1349""".format(**common_args, **tf32_notes, **rocm_fp16_notes),
1350)
1351
1352add_docstr(
1353    torch.bernoulli,
1354    r"""
1355bernoulli(input, *, generator=None, out=None) -> Tensor
1356
1357Draws binary random numbers (0 or 1) from a Bernoulli distribution.
1358
1359The :attr:`input` tensor should be a tensor containing probabilities
1360to be used for drawing the binary random number.
1361Hence, all values in :attr:`input` have to be in the range:
1362:math:`0 \leq \text{input}_i \leq 1`.
1363
1364The :math:`\text{i}^{th}` element of the output tensor will draw a
1365value :math:`1` according to the :math:`\text{i}^{th}` probability value given
1366in :attr:`input`.
1367
1368.. math::
1369    \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i})
1370"""
1371    + r"""
1372The returned :attr:`out` tensor only has values 0 or 1 and is of the same
1373shape as :attr:`input`.
1374
1375:attr:`out` can have integral ``dtype``, but :attr:`input` must have floating
1376point ``dtype``.
1377
1378Args:
1379    input (Tensor): the input tensor of probability values for the Bernoulli distribution
1380
1381Keyword args:
1382    {generator}
1383    {out}
1384
1385Example::
1386
1387    >>> a = torch.empty(3, 3).uniform_(0, 1)  # generate a uniform random matrix with range [0, 1]
1388    >>> a
1389    tensor([[ 0.1737,  0.0950,  0.3609],
1390            [ 0.7148,  0.0289,  0.2676],
1391            [ 0.9456,  0.8937,  0.7202]])
1392    >>> torch.bernoulli(a)
1393    tensor([[ 1.,  0.,  0.],
1394            [ 0.,  0.,  0.],
1395            [ 1.,  1.,  1.]])
1396
1397    >>> a = torch.ones(3, 3) # probability of drawing "1" is 1
1398    >>> torch.bernoulli(a)
1399    tensor([[ 1.,  1.,  1.],
1400            [ 1.,  1.,  1.],
1401            [ 1.,  1.,  1.]])
1402    >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0
1403    >>> torch.bernoulli(a)
1404    tensor([[ 0.,  0.,  0.],
1405            [ 0.,  0.,  0.],
1406            [ 0.,  0.,  0.]])
1407""".format(**common_args),
1408)
1409
1410add_docstr(
1411    torch.bincount,
1412    r"""
1413bincount(input, weights=None, minlength=0) -> Tensor
1414
1415Count the frequency of each value in an array of non-negative ints.
1416
1417The number of bins (size 1) is one larger than the largest value in
1418:attr:`input` unless :attr:`input` is empty, in which case the result is a
1419tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least
1420:attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size
1421:attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``,
1422``out[n] += weights[i]`` if :attr:`weights` is specified else
1423``out[n] += 1``.
1424
1425Note:
1426    {backward_reproducibility_note}
1427
1428Arguments:
1429    input (Tensor): 1-d int tensor
1430    weights (Tensor): optional, weight for each value in the input tensor.
1431        Should be of same size as input tensor.
1432    minlength (int): optional, minimum number of bins. Should be non-negative.
1433
1434Returns:
1435    output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if
1436    :attr:`input` is non-empty, else ``Size(0)``
1437
1438Example::
1439
1440    >>> input = torch.randint(0, 8, (5,), dtype=torch.int64)
1441    >>> weights = torch.linspace(0, 1, steps=5)
1442    >>> input, weights
1443    (tensor([4, 3, 6, 3, 4]),
1444     tensor([ 0.0000,  0.2500,  0.5000,  0.7500,  1.0000])
1445
1446    >>> torch.bincount(input)
1447    tensor([0, 0, 0, 2, 2, 0, 1])
1448
1449    >>> input.bincount(weights)
1450    tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000])
1451""".format(**reproducibility_notes),
1452)
1453
1454add_docstr(
1455    torch.bitwise_not,
1456    r"""
1457bitwise_not(input, *, out=None) -> Tensor
1458
1459Computes the bitwise NOT of the given input tensor. The input tensor must be of
1460integral or Boolean types. For bool tensors, it computes the logical NOT.
1461
1462Args:
1463    {input}
1464
1465Keyword args:
1466    {out}
1467
1468Example::
1469
1470    >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8))
1471    tensor([ 0,  1, -4], dtype=torch.int8)
1472""".format(**common_args),
1473)
1474
1475add_docstr(
1476    torch.bmm,
1477    r"""
1478bmm(input, mat2, *, out=None) -> Tensor
1479
1480Performs a batch matrix-matrix product of matrices stored in :attr:`input`
1481and :attr:`mat2`.
1482
1483:attr:`input` and :attr:`mat2` must be 3-D tensors each containing
1484the same number of matrices.
1485
1486If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a
1487:math:`(b \times m \times p)` tensor, :attr:`out` will be a
1488:math:`(b \times n \times p)` tensor.
1489
1490.. math::
1491    \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i
1492"""
1493    + r"""
1494{tf32_note}
1495
1496{rocm_fp16_note}
1497
1498.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
1499          For broadcasting matrix products, see :func:`torch.matmul`.
1500
1501Args:
1502    input (Tensor): the first batch of matrices to be multiplied
1503    mat2 (Tensor): the second batch of matrices to be multiplied
1504
1505Keyword Args:
1506    {out}
1507
1508Example::
1509
1510    >>> input = torch.randn(10, 3, 4)
1511    >>> mat2 = torch.randn(10, 4, 5)
1512    >>> res = torch.bmm(input, mat2)
1513    >>> res.size()
1514    torch.Size([10, 3, 5])
1515""".format(**common_args, **tf32_notes, **rocm_fp16_notes),
1516)
1517
1518add_docstr(
1519    torch.bitwise_and,
1520    r"""
1521bitwise_and(input, other, *, out=None) -> Tensor
1522
1523Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of
1524integral or Boolean types. For bool tensors, it computes the logical AND.
1525
1526Args:
1527    input: the first input tensor
1528    other: the second input tensor
1529
1530Keyword args:
1531    {out}
1532
1533Example::
1534
1535    >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
1536    tensor([1, 0,  3], dtype=torch.int8)
1537    >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
1538    tensor([ False, True, False])
1539""".format(**common_args),
1540)
1541
1542add_docstr(
1543    torch.bitwise_or,
1544    r"""
1545bitwise_or(input, other, *, out=None) -> Tensor
1546
1547Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of
1548integral or Boolean types. For bool tensors, it computes the logical OR.
1549
1550Args:
1551    input: the first input tensor
1552    other: the second input tensor
1553
1554Keyword args:
1555    {out}
1556
1557Example::
1558
1559    >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
1560    tensor([-1, -2,  3], dtype=torch.int8)
1561    >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
1562    tensor([ True, True, False])
1563""".format(**common_args),
1564)
1565
1566add_docstr(
1567    torch.bitwise_xor,
1568    r"""
1569bitwise_xor(input, other, *, out=None) -> Tensor
1570
1571Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of
1572integral or Boolean types. For bool tensors, it computes the logical XOR.
1573
1574Args:
1575    input: the first input tensor
1576    other: the second input tensor
1577
1578Keyword args:
1579    {out}
1580
1581Example::
1582
1583    >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
1584    tensor([-2, -2,  0], dtype=torch.int8)
1585    >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
1586    tensor([ True, False, False])
1587""".format(**common_args),
1588)
1589
1590add_docstr(
1591    torch.bitwise_left_shift,
1592    r"""
1593bitwise_left_shift(input, other, *, out=None) -> Tensor
1594
1595Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits.
1596The input tensor must be of integral type. This operator supports
1597:ref:`broadcasting to a common shape <broadcasting-semantics>` and
1598:ref:`type promotion <type-promotion-doc>`.
1599
1600The operation applied is:
1601
1602.. math::
1603    \text{{out}}_i = \text{{input}}_i << \text{{other}}_i
1604
1605Args:
1606    input (Tensor or Scalar): the first input tensor
1607    other (Tensor or Scalar): the second input tensor
1608
1609Keyword args:
1610    {out}
1611
1612Example::
1613
1614    >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
1615    tensor([-2, -2, 24], dtype=torch.int8)
1616""".format(**common_args),
1617)
1618
1619add_docstr(
1620    torch.bitwise_right_shift,
1621    r"""
1622bitwise_right_shift(input, other, *, out=None) -> Tensor
1623
1624Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits.
1625The input tensor must be of integral type. This operator supports
1626:ref:`broadcasting to a common shape <broadcasting-semantics>` and
1627:ref:`type promotion <type-promotion-doc>`.
1628In any case, if the value of the right operand is negative or is greater
1629or equal to the number of bits in the promoted left operand, the behavior is undefined.
1630
1631The operation applied is:
1632
1633.. math::
1634    \text{{out}}_i = \text{{input}}_i >> \text{{other}}_i
1635
1636Args:
1637    input (Tensor or Scalar): the first input tensor
1638    other (Tensor or Scalar): the second input tensor
1639
1640Keyword args:
1641    {out}
1642
1643Example::
1644
1645    >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8))
1646    tensor([-1, -7,  3], dtype=torch.int8)
1647""".format(**common_args),
1648)
1649
1650add_docstr(
1651    torch.broadcast_to,
1652    r"""
1653broadcast_to(input, shape) -> Tensor
1654
1655Broadcasts :attr:`input` to the shape :attr:`\shape`.
1656Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details.
1657
1658Args:
1659    {input}
1660    shape (list, tuple, or :class:`torch.Size`): the new shape.
1661
1662Example::
1663
1664    >>> x = torch.tensor([1, 2, 3])
1665    >>> torch.broadcast_to(x, (3, 3))
1666    tensor([[1, 2, 3],
1667            [1, 2, 3],
1668            [1, 2, 3]])
1669""".format(**common_args),
1670)
1671
1672add_docstr(
1673    torch.stack,
1674    r"""
1675stack(tensors, dim=0, *, out=None) -> Tensor
1676
1677Concatenates a sequence of tensors along a new dimension.
1678
1679All tensors need to be of the same size.
1680
1681.. seealso::
1682
1683    :func:`torch.cat` concatenates the given sequence along an existing dimension.
1684
1685Arguments:
1686    tensors (sequence of Tensors): sequence of tensors to concatenate
1687    dim (int, optional): dimension to insert. Has to be between 0 and the number
1688        of dimensions of concatenated tensors (inclusive). Default: 0
1689
1690Keyword args:
1691    {out}
1692
1693Example::
1694
1695    >>> x = torch.randn(2, 3)
1696    >>> x
1697    tensor([[ 0.3367,  0.1288,  0.2345],
1698            [ 0.2303, -1.1229, -0.1863]])
1699    >>> torch.stack((x, x)) # same as torch.stack((x, x), dim=0)
1700    tensor([[[ 0.3367,  0.1288,  0.2345],
1701             [ 0.2303, -1.1229, -0.1863]],
1702
1703            [[ 0.3367,  0.1288,  0.2345],
1704             [ 0.2303, -1.1229, -0.1863]]])
1705    >>> torch.stack((x, x)).size()
1706    torch.Size([2, 2, 3])
1707    >>> torch.stack((x, x), dim=1)
1708    tensor([[[ 0.3367,  0.1288,  0.2345],
1709             [ 0.3367,  0.1288,  0.2345]],
1710
1711            [[ 0.2303, -1.1229, -0.1863],
1712             [ 0.2303, -1.1229, -0.1863]]])
1713    >>> torch.stack((x, x), dim=2)
1714    tensor([[[ 0.3367,  0.3367],
1715             [ 0.1288,  0.1288],
1716             [ 0.2345,  0.2345]],
1717
1718            [[ 0.2303,  0.2303],
1719             [-1.1229, -1.1229],
1720             [-0.1863, -0.1863]]])
1721    >>> torch.stack((x, x), dim=-1)
1722    tensor([[[ 0.3367,  0.3367],
1723             [ 0.1288,  0.1288],
1724             [ 0.2345,  0.2345]],
1725
1726            [[ 0.2303,  0.2303],
1727             [-1.1229, -1.1229],
1728             [-0.1863, -0.1863]]])
1729""".format(**common_args),
1730)
1731
1732add_docstr(
1733    torch.hstack,
1734    r"""
1735hstack(tensors, *, out=None) -> Tensor
1736
1737Stack tensors in sequence horizontally (column wise).
1738
1739This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors.
1740
1741Args:
1742    tensors (sequence of Tensors): sequence of tensors to concatenate
1743
1744Keyword args:
1745    {out}
1746
1747Example::
1748
1749    >>> a = torch.tensor([1, 2, 3])
1750    >>> b = torch.tensor([4, 5, 6])
1751    >>> torch.hstack((a,b))
1752    tensor([1, 2, 3, 4, 5, 6])
1753    >>> a = torch.tensor([[1],[2],[3]])
1754    >>> b = torch.tensor([[4],[5],[6]])
1755    >>> torch.hstack((a,b))
1756    tensor([[1, 4],
1757            [2, 5],
1758            [3, 6]])
1759
1760""".format(**common_args),
1761)
1762
1763add_docstr(
1764    torch.vstack,
1765    r"""
1766vstack(tensors, *, out=None) -> Tensor
1767
1768Stack tensors in sequence vertically (row wise).
1769
1770This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`.
1771
1772Args:
1773    tensors (sequence of Tensors): sequence of tensors to concatenate
1774
1775Keyword args:
1776    {out}
1777
1778Example::
1779
1780    >>> a = torch.tensor([1, 2, 3])
1781    >>> b = torch.tensor([4, 5, 6])
1782    >>> torch.vstack((a,b))
1783    tensor([[1, 2, 3],
1784            [4, 5, 6]])
1785    >>> a = torch.tensor([[1],[2],[3]])
1786    >>> b = torch.tensor([[4],[5],[6]])
1787    >>> torch.vstack((a,b))
1788    tensor([[1],
1789            [2],
1790            [3],
1791            [4],
1792            [5],
1793            [6]])
1794
1795
1796""".format(**common_args),
1797)
1798
1799add_docstr(
1800    torch.dstack,
1801    r"""
1802dstack(tensors, *, out=None) -> Tensor
1803
1804Stack tensors in sequence depthwise (along third axis).
1805
1806This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`.
1807
1808Args:
1809    tensors (sequence of Tensors): sequence of tensors to concatenate
1810
1811Keyword args:
1812    {out}
1813
1814Example::
1815
1816    >>> a = torch.tensor([1, 2, 3])
1817    >>> b = torch.tensor([4, 5, 6])
1818    >>> torch.dstack((a,b))
1819    tensor([[[1, 4],
1820             [2, 5],
1821             [3, 6]]])
1822    >>> a = torch.tensor([[1],[2],[3]])
1823    >>> b = torch.tensor([[4],[5],[6]])
1824    >>> torch.dstack((a,b))
1825    tensor([[[1, 4]],
1826            [[2, 5]],
1827            [[3, 6]]])
1828
1829
1830""".format(**common_args),
1831)
1832
1833add_docstr(
1834    torch.tensor_split,
1835    r"""
1836tensor_split(input, indices_or_sections, dim=0) -> List of Tensors
1837
1838Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`,
1839along dimension :attr:`dim` according to the indices or number of sections specified
1840by :attr:`indices_or_sections`. This function is based on NumPy's
1841:func:`numpy.array_split`.
1842
1843Args:
1844    input (Tensor): the tensor to split
1845    indices_or_sections (Tensor, int or list or tuple of ints):
1846        If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor
1847        with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`.
1848        If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each
1849        section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input`
1850        is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)`
1851        sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will
1852        have size :code:`int(input.size(dim) / n)`.
1853
1854        If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long
1855        tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices
1856        in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0`
1857        would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`.
1858
1859        If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional
1860        long tensor on the CPU.
1861
1862    dim (int, optional): dimension along which to split the tensor. Default: ``0``
1863
1864Example::
1865
1866    >>> x = torch.arange(8)
1867    >>> torch.tensor_split(x, 3)
1868    (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7]))
1869
1870    >>> x = torch.arange(7)
1871    >>> torch.tensor_split(x, 3)
1872    (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6]))
1873    >>> torch.tensor_split(x, (1, 6))
1874    (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6]))
1875
1876    >>> x = torch.arange(14).reshape(2, 7)
1877    >>> x
1878    tensor([[ 0,  1,  2,  3,  4,  5,  6],
1879            [ 7,  8,  9, 10, 11, 12, 13]])
1880    >>> torch.tensor_split(x, 3, dim=1)
1881    (tensor([[0, 1, 2],
1882            [7, 8, 9]]),
1883     tensor([[ 3,  4],
1884            [10, 11]]),
1885     tensor([[ 5,  6],
1886            [12, 13]]))
1887    >>> torch.tensor_split(x, (1, 6), dim=1)
1888    (tensor([[0],
1889            [7]]),
1890     tensor([[ 1,  2,  3,  4,  5],
1891            [ 8,  9, 10, 11, 12]]),
1892     tensor([[ 6],
1893            [13]]))
1894""",
1895)
1896
1897add_docstr(
1898    torch.chunk,
1899    r"""
1900chunk(input, chunks, dim=0) -> List of Tensors
1901
1902Attempts to split a tensor into the specified number of chunks. Each chunk is a view of
1903the input tensor.
1904
1905
1906.. note::
1907
1908    This function may return fewer than the specified number of chunks!
1909
1910.. seealso::
1911
1912    :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks
1913
1914If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`,
1915all returned chunks will be the same size.
1916If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`,
1917all returned chunks will be the same size, except the last one.
1918If such division is not possible, this function may return fewer
1919than the specified number of chunks.
1920
1921Arguments:
1922    input (Tensor): the tensor to split
1923    chunks (int): number of chunks to return
1924    dim (int): dimension along which to split the tensor
1925
1926Example:
1927    >>> torch.arange(11).chunk(6)
1928    (tensor([0, 1]),
1929     tensor([2, 3]),
1930     tensor([4, 5]),
1931     tensor([6, 7]),
1932     tensor([8, 9]),
1933     tensor([10]))
1934    >>> torch.arange(12).chunk(6)
1935    (tensor([0, 1]),
1936     tensor([2, 3]),
1937     tensor([4, 5]),
1938     tensor([6, 7]),
1939     tensor([8, 9]),
1940     tensor([10, 11]))
1941    >>> torch.arange(13).chunk(6)
1942    (tensor([0, 1, 2]),
1943     tensor([3, 4, 5]),
1944     tensor([6, 7, 8]),
1945     tensor([ 9, 10, 11]),
1946     tensor([12]))
1947""",
1948)
1949
1950add_docstr(
1951    torch.unsafe_chunk,
1952    r"""
1953unsafe_chunk(input, chunks, dim=0) -> List of Tensors
1954
1955Works like :func:`torch.chunk` but without enforcing the autograd restrictions
1956on inplace modification of the outputs.
1957
1958.. warning::
1959    This function is safe to use as long as only the input, or only the outputs
1960    are modified inplace after calling this function. It is user's
1961    responsibility to ensure that is the case. If both the input and one or more
1962    of the outputs are modified inplace, gradients computed by autograd will be
1963    silently incorrect.
1964""",
1965)
1966
1967add_docstr(
1968    torch.unsafe_split,
1969    r"""
1970unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors
1971
1972Works like :func:`torch.split` but without enforcing the autograd restrictions
1973on inplace modification of the outputs.
1974
1975.. warning::
1976    This function is safe to use as long as only the input, or only the outputs
1977    are modified inplace after calling this function. It is user's
1978    responsibility to ensure that is the case. If both the input and one or more
1979    of the outputs are modified inplace, gradients computed by autograd will be
1980    silently incorrect.
1981""",
1982)
1983
1984add_docstr(
1985    torch.hsplit,
1986    r"""
1987hsplit(input, indices_or_sections) -> List of Tensors
1988
1989Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors
1990horizontally according to :attr:`indices_or_sections`. Each split is a view of
1991:attr:`input`.
1992
1993If :attr:`input` is one dimensional this is equivalent to calling
1994torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is
1995zero), and if :attr:`input` has two or more dimensions it's equivalent to calling
1996torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1),
1997except that if :attr:`indices_or_sections` is an integer it must evenly divide
1998the split dimension or a runtime error will be thrown.
1999
2000This function is based on NumPy's :func:`numpy.hsplit`.
2001
2002Args:
2003    input (Tensor): tensor to split.
2004    indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
2005
2006Example::
2007    >>> t = torch.arange(16.0).reshape(4,4)
2008    >>> t
2009    tensor([[ 0.,  1.,  2.,  3.],
2010            [ 4.,  5.,  6.,  7.],
2011            [ 8.,  9., 10., 11.],
2012            [12., 13., 14., 15.]])
2013    >>> torch.hsplit(t, 2)
2014    (tensor([[ 0.,  1.],
2015             [ 4.,  5.],
2016             [ 8.,  9.],
2017             [12., 13.]]),
2018     tensor([[ 2.,  3.],
2019             [ 6.,  7.],
2020             [10., 11.],
2021             [14., 15.]]))
2022    >>> torch.hsplit(t, [3, 6])
2023    (tensor([[ 0.,  1.,  2.],
2024             [ 4.,  5.,  6.],
2025             [ 8.,  9., 10.],
2026             [12., 13., 14.]]),
2027     tensor([[ 3.],
2028             [ 7.],
2029             [11.],
2030             [15.]]),
2031     tensor([], size=(4, 0)))
2032
2033""",
2034)
2035
2036add_docstr(
2037    torch.vsplit,
2038    r"""
2039vsplit(input, indices_or_sections) -> List of Tensors
2040
2041Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors
2042vertically according to :attr:`indices_or_sections`. Each split is a view of
2043:attr:`input`.
2044
2045This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0)
2046(the split dimension is 0), except that if :attr:`indices_or_sections` is an integer
2047it must evenly divide the split dimension or a runtime error will be thrown.
2048
2049This function is based on NumPy's :func:`numpy.vsplit`.
2050
2051Args:
2052    input (Tensor): tensor to split.
2053    indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
2054
2055Example::
2056    >>> t = torch.arange(16.0).reshape(4,4)
2057    >>> t
2058    tensor([[ 0.,  1.,  2.,  3.],
2059            [ 4.,  5.,  6.,  7.],
2060            [ 8.,  9., 10., 11.],
2061            [12., 13., 14., 15.]])
2062    >>> torch.vsplit(t, 2)
2063    (tensor([[0., 1., 2., 3.],
2064             [4., 5., 6., 7.]]),
2065     tensor([[ 8.,  9., 10., 11.],
2066             [12., 13., 14., 15.]]))
2067    >>> torch.vsplit(t, [3, 6])
2068    (tensor([[ 0.,  1.,  2.,  3.],
2069             [ 4.,  5.,  6.,  7.],
2070             [ 8.,  9., 10., 11.]]),
2071     tensor([[12., 13., 14., 15.]]),
2072     tensor([], size=(0, 4)))
2073
2074""",
2075)
2076
2077add_docstr(
2078    torch.dsplit,
2079    r"""
2080dsplit(input, indices_or_sections) -> List of Tensors
2081
2082Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors
2083depthwise according to :attr:`indices_or_sections`. Each split is a view of
2084:attr:`input`.
2085
2086This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2)
2087(the split dimension is 2), except that if :attr:`indices_or_sections` is an integer
2088it must evenly divide the split dimension or a runtime error will be thrown.
2089
2090This function is based on NumPy's :func:`numpy.dsplit`.
2091
2092Args:
2093    input (Tensor): tensor to split.
2094    indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`.
2095
2096Example::
2097    >>> t = torch.arange(16.0).reshape(2, 2, 4)
2098    >>> t
2099    tensor([[[ 0.,  1.,  2.,  3.],
2100             [ 4.,  5.,  6.,  7.]],
2101            [[ 8.,  9., 10., 11.],
2102             [12., 13., 14., 15.]]])
2103    >>> torch.dsplit(t, 2)
2104    (tensor([[[ 0.,  1.],
2105            [ 4.,  5.]],
2106           [[ 8.,  9.],
2107            [12., 13.]]]),
2108     tensor([[[ 2.,  3.],
2109              [ 6.,  7.]],
2110             [[10., 11.],
2111              [14., 15.]]]))
2112
2113    >>> torch.dsplit(t, [3, 6])
2114    (tensor([[[ 0.,  1.,  2.],
2115              [ 4.,  5.,  6.]],
2116             [[ 8.,  9., 10.],
2117              [12., 13., 14.]]]),
2118     tensor([[[ 3.],
2119              [ 7.]],
2120             [[11.],
2121              [15.]]]),
2122     tensor([], size=(2, 2, 0)))
2123
2124""",
2125)
2126
2127add_docstr(
2128    torch.can_cast,
2129    r"""
2130can_cast(from_, to) -> bool
2131
2132Determines if a type conversion is allowed under PyTorch casting rules
2133described in the type promotion :ref:`documentation <type-promotion-doc>`.
2134
2135Args:
2136    from\_ (dtype): The original :class:`torch.dtype`.
2137    to (dtype): The target :class:`torch.dtype`.
2138
2139Example::
2140
2141    >>> torch.can_cast(torch.double, torch.float)
2142    True
2143    >>> torch.can_cast(torch.float, torch.int)
2144    False
2145""",
2146)
2147
2148add_docstr(
2149    torch.corrcoef,
2150    r"""
2151corrcoef(input) -> Tensor
2152
2153Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix,
2154where rows are the variables and columns are the observations.
2155
2156.. note::
2157
2158    The correlation coefficient matrix R is computed using the covariance matrix C as given by
2159    :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }`
2160
2161.. note::
2162
2163    Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1.
2164    The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation.
2165
2166Args:
2167    input (Tensor): A 2D matrix containing multiple variables and observations, or a
2168        Scalar or 1D vector representing a single variable.
2169
2170Returns:
2171    (Tensor) The correlation coefficient matrix of the variables.
2172
2173.. seealso::
2174
2175        :func:`torch.cov` covariance matrix.
2176
2177Example::
2178
2179    >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]])
2180    >>> torch.corrcoef(x)
2181    tensor([[ 1., -1.],
2182            [-1.,  1.]])
2183    >>> x = torch.randn(2, 4)
2184    >>> x
2185    tensor([[-0.2678, -0.0908, -0.3766,  0.2780],
2186            [-0.5812,  0.1535,  0.2387,  0.2350]])
2187    >>> torch.corrcoef(x)
2188    tensor([[1.0000, 0.3582],
2189            [0.3582, 1.0000]])
2190    >>> torch.corrcoef(x[0])
2191    tensor(1.)
2192""",
2193)
2194
2195add_docstr(
2196    torch.cov,
2197    r"""
2198cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor
2199
2200Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are
2201the variables and columns are the observations.
2202
2203A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains
2204the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents
2205a single variable (Scalar or 1D) then its variance is returned.
2206
2207The sample covariance of the variables :math:`x` and :math:`y` is given by:
2208
2209.. math::
2210    \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)}
2211
2212where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and
2213:math:`\delta N` is the :attr:`correction`.
2214
2215If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance
2216is calculated, which is given by:
2217
2218.. math::
2219    \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)}
2220    {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)}
2221
2222where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is
2223provided, or :math:`w = f \times a` if both are provided, and
2224:math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not
2225provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size.
2226
2227Args:
2228    input (Tensor): A 2D matrix containing multiple variables and observations, or a
2229        Scalar or 1D vector representing a single variable.
2230
2231Keyword Args:
2232    correction (int, optional): difference between the sample size and sample degrees of freedom.
2233        Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate,
2234        even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0``
2235        will return the simple average. Defaults to ``1``.
2236    fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of
2237        times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`.
2238        Must have integral dtype. Ignored if ``None``. Defaults to ``None``.
2239    aweights (tensor, optional): A Scalar or 1D array of observation vector weights.
2240        These relative weights are typically large for observations considered "important" and smaller for
2241        observations considered less "important". Its numel must equal the number of columns of :attr:`input`.
2242        Must have floating point dtype. Ignored if ``None``. Defaults to ``None``.
2243
2244Returns:
2245    (Tensor) The covariance matrix of the variables.
2246
2247.. seealso::
2248
2249        :func:`torch.corrcoef` normalized covariance matrix.
2250
2251Example::
2252    >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T
2253    >>> x
2254    tensor([[0, 1, 2],
2255            [2, 1, 0]])
2256    >>> torch.cov(x)
2257    tensor([[ 1., -1.],
2258            [-1.,  1.]])
2259    >>> torch.cov(x, correction=0)
2260    tensor([[ 0.6667, -0.6667],
2261            [-0.6667,  0.6667]])
2262    >>> fw = torch.randint(1, 10, (3,))
2263    >>> fw
2264    tensor([1, 6, 9])
2265    >>> aw = torch.rand(3)
2266    >>> aw
2267    tensor([0.4282, 0.0255, 0.4144])
2268    >>> torch.cov(x, fweights=fw, aweights=aw)
2269    tensor([[ 0.4169, -0.4169],
2270            [-0.4169,  0.4169]])
2271""",
2272)
2273
2274add_docstr(
2275    torch.cat,
2276    r"""
2277cat(tensors, dim=0, *, out=None) -> Tensor
2278
2279Concatenates the given sequence of :attr:`seq` tensors in the given dimension.
2280All tensors must either have the same shape (except in the concatenating
2281dimension) or be a 1-D empty tensor with size ``(0,)``.
2282
2283:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split`
2284and :func:`torch.chunk`.
2285
2286:func:`torch.cat` can be best understood via examples.
2287
2288.. seealso::
2289
2290    :func:`torch.stack` concatenates the given sequence along a new dimension.
2291
2292Args:
2293    tensors (sequence of Tensors): any python sequence of tensors of the same type.
2294        Non-empty tensors provided must have the same shape, except in the
2295        cat dimension.
2296    dim (int, optional): the dimension over which the tensors are concatenated
2297
2298Keyword args:
2299    {out}
2300
2301Example::
2302
2303    >>> x = torch.randn(2, 3)
2304    >>> x
2305    tensor([[ 0.6580, -1.0969, -0.4614],
2306            [-0.1034, -0.5790,  0.1497]])
2307    >>> torch.cat((x, x, x), 0)
2308    tensor([[ 0.6580, -1.0969, -0.4614],
2309            [-0.1034, -0.5790,  0.1497],
2310            [ 0.6580, -1.0969, -0.4614],
2311            [-0.1034, -0.5790,  0.1497],
2312            [ 0.6580, -1.0969, -0.4614],
2313            [-0.1034, -0.5790,  0.1497]])
2314    >>> torch.cat((x, x, x), 1)
2315    tensor([[ 0.6580, -1.0969, -0.4614,  0.6580, -1.0969, -0.4614,  0.6580,
2316             -1.0969, -0.4614],
2317            [-0.1034, -0.5790,  0.1497, -0.1034, -0.5790,  0.1497, -0.1034,
2318             -0.5790,  0.1497]])
2319""".format(**common_args),
2320)
2321
2322add_docstr(
2323    torch.concat,
2324    r"""
2325concat(tensors, dim=0, *, out=None) -> Tensor
2326
2327Alias of :func:`torch.cat`.
2328""",
2329)
2330
2331add_docstr(
2332    torch.concatenate,
2333    r"""
2334concatenate(tensors, axis=0, out=None) -> Tensor
2335
2336Alias of :func:`torch.cat`.
2337""",
2338)
2339
2340add_docstr(
2341    torch.ceil,
2342    r"""
2343ceil(input, *, out=None) -> Tensor
2344
2345Returns a new tensor with the ceil of the elements of :attr:`input`,
2346the smallest integer greater than or equal to each element.
2347
2348For integer inputs, follows the array-api convention of returning a
2349copy of the input tensor.
2350
2351.. math::
2352    \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil
2353"""
2354    + r"""
2355Args:
2356    {input}
2357
2358Keyword args:
2359    {out}
2360
2361Example::
2362
2363    >>> a = torch.randn(4)
2364    >>> a
2365    tensor([-0.6341, -1.4208, -1.0900,  0.5826])
2366    >>> torch.ceil(a)
2367    tensor([-0., -1., -1.,  1.])
2368""".format(**common_args),
2369)
2370
2371add_docstr(
2372    torch.real,
2373    r"""
2374real(input) -> Tensor
2375
2376Returns a new tensor containing real values of the :attr:`self` tensor.
2377The returned tensor and :attr:`self` share the same underlying storage.
2378
2379Args:
2380    {input}
2381
2382Example::
2383
2384    >>> x=torch.randn(4, dtype=torch.cfloat)
2385    >>> x
2386    tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
2387    >>> x.real
2388    tensor([ 0.3100, -0.5445, -1.6492, -0.0638])
2389
2390""".format(**common_args),
2391)
2392
2393add_docstr(
2394    torch.imag,
2395    r"""
2396imag(input) -> Tensor
2397
2398Returns a new tensor containing imaginary values of the :attr:`self` tensor.
2399The returned tensor and :attr:`self` share the same underlying storage.
2400
2401.. warning::
2402    :func:`imag` is only supported for tensors with complex dtypes.
2403
2404Args:
2405    {input}
2406
2407Example::
2408
2409    >>> x=torch.randn(4, dtype=torch.cfloat)
2410    >>> x
2411    tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)])
2412    >>> x.imag
2413    tensor([ 0.3553, -0.7896, -0.0633, -0.8119])
2414
2415""".format(**common_args),
2416)
2417
2418add_docstr(
2419    torch.view_as_real,
2420    r"""
2421view_as_real(input) -> Tensor
2422
2423Returns a view of :attr:`input` as a real tensor. For an input complex tensor of
2424:attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new
2425real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2
2426represents the real and imaginary components of complex numbers.
2427
2428.. warning::
2429    :func:`view_as_real` is only supported for tensors with ``complex dtypes``.
2430
2431Args:
2432    {input}
2433
2434Example::
2435
2436    >>> x=torch.randn(4, dtype=torch.cfloat)
2437    >>> x
2438    tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)])
2439    >>> torch.view_as_real(x)
2440    tensor([[ 0.4737, -0.3839],
2441            [-0.2098, -0.6699],
2442            [ 0.3470, -0.9451],
2443            [-0.5174, -1.3136]])
2444""".format(**common_args),
2445)
2446
2447add_docstr(
2448    torch.view_as_complex,
2449    r"""
2450view_as_complex(input) -> Tensor
2451
2452Returns a view of :attr:`input` as a complex tensor. For an input complex
2453tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a
2454new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last
2455dimension of the input tensor is expected to represent the real and imaginary
2456components of complex numbers.
2457
2458.. warning::
2459    :func:`view_as_complex` is only supported for tensors with
2460    :class:`torch.dtype` ``torch.float64`` and ``torch.float32``.  The input is
2461    expected to have the last dimension of :attr:`size` 2. In addition, the
2462    tensor must have a `stride` of 1 for its last dimension. The strides of all
2463    other dimensions must be even numbers.
2464
2465Args:
2466    {input}
2467
2468Example::
2469
2470    >>> x=torch.randn(4, 2)
2471    >>> x
2472    tensor([[ 1.6116, -0.5772],
2473            [-1.4606, -0.9120],
2474            [ 0.0786, -1.7497],
2475            [-0.6561, -1.6623]])
2476    >>> torch.view_as_complex(x)
2477    tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)])
2478""".format(**common_args),
2479)
2480
2481add_docstr(
2482    torch.reciprocal,
2483    r"""
2484reciprocal(input, *, out=None) -> Tensor
2485
2486Returns a new tensor with the reciprocal of the elements of :attr:`input`
2487
2488.. math::
2489    \text{out}_{i} = \frac{1}{\text{input}_{i}}
2490
2491.. note::
2492    Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral
2493    inputs to reciprocal are automatically :ref:`promoted <type-promotion-doc>` to
2494    the default scalar type.
2495"""
2496    + r"""
2497Args:
2498    {input}
2499
2500Keyword args:
2501    {out}
2502
2503Example::
2504
2505    >>> a = torch.randn(4)
2506    >>> a
2507    tensor([-0.4595, -2.1219, -1.4314,  0.7298])
2508    >>> torch.reciprocal(a)
2509    tensor([-2.1763, -0.4713, -0.6986,  1.3702])
2510""".format(**common_args),
2511)
2512
2513add_docstr(
2514    torch.cholesky,
2515    r"""
2516cholesky(input, upper=False, *, out=None) -> Tensor
2517
2518Computes the Cholesky decomposition of a symmetric positive-definite
2519matrix :math:`A` or for batches of symmetric positive-definite matrices.
2520
2521If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and
2522the decomposition has the form:
2523
2524.. math::
2525
2526  A = U^TU
2527
2528If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and
2529the decomposition has the form:
2530
2531.. math::
2532
2533    A = LL^T
2534
2535If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite
2536matrices, then the returned tensor will be composed of upper-triangular Cholesky factors
2537of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned
2538tensor will be composed of lower-triangular Cholesky factors of each of the individual
2539matrices.
2540
2541.. warning::
2542
2543    :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky`
2544    and will be removed in a future PyTorch release.
2545
2546    ``L = torch.cholesky(A)`` should be replaced with
2547
2548    .. code:: python
2549
2550        L = torch.linalg.cholesky(A)
2551
2552    ``U = torch.cholesky(A, upper=True)`` should be replaced with
2553
2554    .. code:: python
2555
2556        U = torch.linalg.cholesky(A).mH
2557
2558    This transform will produce equivalent results for all valid (symmetric positive definite) inputs.
2559
2560Args:
2561    input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more
2562                batch dimensions consisting of symmetric positive-definite matrices.
2563    upper (bool, optional): flag that indicates whether to return a
2564                            upper or lower triangular matrix. Default: ``False``
2565
2566Keyword args:
2567    out (Tensor, optional): the output matrix
2568
2569Example::
2570
2571    >>> a = torch.randn(3, 3)
2572    >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite
2573    >>> l = torch.cholesky(a)
2574    >>> a
2575    tensor([[ 2.4112, -0.7486,  1.4551],
2576            [-0.7486,  1.3544,  0.1294],
2577            [ 1.4551,  0.1294,  1.6724]])
2578    >>> l
2579    tensor([[ 1.5528,  0.0000,  0.0000],
2580            [-0.4821,  1.0592,  0.0000],
2581            [ 0.9371,  0.5487,  0.7023]])
2582    >>> l @ l.mT
2583    tensor([[ 2.4112, -0.7486,  1.4551],
2584            [-0.7486,  1.3544,  0.1294],
2585            [ 1.4551,  0.1294,  1.6724]])
2586    >>> a = torch.randn(3, 2, 2) # Example for batched input
2587    >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite
2588    >>> l = torch.cholesky(a)
2589    >>> z = l @ l.mT
2590    >>> torch.dist(z, a)
2591    tensor(2.3842e-07)
2592""",
2593)
2594
2595add_docstr(
2596    torch.cholesky_solve,
2597    r"""
2598cholesky_solve(B, L, upper=False, *, out=None) -> Tensor
2599
2600Computes the solution of a system of linear equations with complex Hermitian
2601or real symmetric positive-definite lhs given its Cholesky decomposition.
2602
2603Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
2604and :math:`L` its Cholesky decomposition such that:
2605
2606.. math::
2607
2608    A = LL^{\text{H}}
2609
2610where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
2611and the transpose when :math:`L` is real-valued.
2612
2613Returns the solution :math:`X` of the following linear system:
2614
2615.. math::
2616
2617    AX = B
2618
2619Supports inputs of float, double, cfloat and cdouble dtypes.
2620Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices
2621then the output has the same batch dimensions.
2622
2623Args:
2624    B (Tensor): right-hand side tensor of shape `(*, n, k)`
2625        where :math:`*` is zero or more batch dimensions
2626    L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
2627        consisting of lower or upper triangular Cholesky decompositions of
2628        symmetric or Hermitian positive-definite matrices.
2629    upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
2630        or upper triangular. Default: ``False``.
2631
2632Keyword args:
2633    out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
2634
2635Example::
2636
2637    >>> A = torch.randn(3, 3)
2638    >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
2639    >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
2640    >>> B = torch.randn(3, 2)
2641    >>> torch.cholesky_solve(B, L)
2642    tensor([[ -8.1625,  19.6097],
2643            [ -5.8398,  14.2387],
2644            [ -4.3771,  10.4173]])
2645    >>> A.inverse() @  B
2646    tensor([[ -8.1626,  19.6097],
2647            [ -5.8398,  14.2387],
2648            [ -4.3771,  10.4173]])
2649
2650    >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
2651    >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
2652    >>> L = torch.linalg.cholesky(A)
2653    >>> B = torch.randn(2, 1, dtype=torch.complex64)
2654    >>> X = torch.cholesky_solve(B, L)
2655    >>> torch.dist(X, A.inverse() @ B)
2656    tensor(1.6881e-5)
2657""",
2658)
2659
2660add_docstr(
2661    torch.cholesky_inverse,
2662    r"""
2663cholesky_inverse(L, upper=False, *, out=None) -> Tensor
2664
2665Computes the inverse of a complex Hermitian or real symmetric
2666positive-definite matrix given its Cholesky decomposition.
2667
2668Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix,
2669and :math:`L` its Cholesky decomposition such that:
2670
2671.. math::
2672
2673    A = LL^{\text{H}}
2674
2675where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex,
2676and the transpose when :math:`L` is real-valued.
2677
2678Computes the inverse matrix :math:`A^{-1}`.
2679
2680Supports input of float, double, cfloat and cdouble dtypes.
2681Also supports batches of matrices, and if :math:`A` is a batch of matrices
2682then the output has the same batch dimensions.
2683
2684Args:
2685    L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
2686        consisting of lower or upper triangular Cholesky decompositions of
2687        symmetric or Hermitian positive-definite matrices.
2688    upper (bool, optional): flag that indicates whether :math:`L` is lower triangular
2689        or upper triangular. Default: ``False``
2690
2691Keyword args:
2692    out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
2693
2694Example::
2695
2696    >>> A = torch.randn(3, 3)
2697    >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix
2698    >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition
2699    >>> torch.cholesky_inverse(L)
2700    tensor([[ 1.9314,  1.2251, -0.0889],
2701            [ 1.2251,  2.4439,  0.2122],
2702            [-0.0889,  0.2122,  0.1412]])
2703    >>> A.inverse()
2704    tensor([[ 1.9314,  1.2251, -0.0889],
2705            [ 1.2251,  2.4439,  0.2122],
2706            [-0.0889,  0.2122,  0.1412]])
2707
2708    >>> A = torch.randn(3, 2, 2, dtype=torch.complex64)
2709    >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices
2710    >>> L = torch.linalg.cholesky(A)
2711    >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L))
2712    tensor(5.6358e-7)
2713""",
2714)
2715
2716add_docstr(
2717    torch.clone,
2718    r"""
2719clone(input, *, memory_format=torch.preserve_format) -> Tensor
2720
2721Returns a copy of :attr:`input`.
2722
2723.. note::
2724
2725    This function is differentiable, so gradients will flow back from the
2726    result of this operation to :attr:`input`. To create a tensor without an
2727    autograd relationship to :attr:`input` see :meth:`~Tensor.detach`.
2728
2729Args:
2730    {input}
2731
2732Keyword args:
2733    {memory_format}
2734""".format(**common_args),
2735)
2736
2737add_docstr(
2738    torch.clamp,
2739    r"""
2740clamp(input, min=None, max=None, *, out=None) -> Tensor
2741
2742Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`.
2743Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns:
2744
2745.. math::
2746    y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i)
2747
2748If :attr:`min` is ``None``, there is no lower bound.
2749Or, if :attr:`max` is ``None`` there is no upper bound.
2750"""
2751    + r"""
2752
2753.. note::
2754    If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) <torch.clamp>`
2755    sets all elements in :attr:`input` to the value of :attr:`max`.
2756
2757Args:
2758    {input}
2759    min (Number or Tensor, optional): lower-bound of the range to be clamped to
2760    max (Number or Tensor, optional): upper-bound of the range to be clamped to
2761
2762Keyword args:
2763    {out}
2764
2765Example::
2766
2767    >>> a = torch.randn(4)
2768    >>> a
2769    tensor([-1.7120,  0.1734, -0.0478, -0.0922])
2770    >>> torch.clamp(a, min=-0.5, max=0.5)
2771    tensor([-0.5000,  0.1734, -0.0478, -0.0922])
2772
2773    >>> min = torch.linspace(-1, 1, steps=4)
2774    >>> torch.clamp(a, min=min)
2775    tensor([-1.0000,  0.1734,  0.3333,  1.0000])
2776
2777""".format(**common_args),
2778)
2779
2780add_docstr(
2781    torch.clip,
2782    r"""
2783clip(input, min=None, max=None, *, out=None) -> Tensor
2784
2785Alias for :func:`torch.clamp`.
2786""",
2787)
2788
2789add_docstr(
2790    torch.column_stack,
2791    r"""
2792column_stack(tensors, *, out=None) -> Tensor
2793
2794Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`.
2795
2796Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t``
2797in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally.
2798
2799Args:
2800    tensors (sequence of Tensors): sequence of tensors to concatenate
2801
2802Keyword args:
2803    {out}
2804
2805Example::
2806
2807    >>> a = torch.tensor([1, 2, 3])
2808    >>> b = torch.tensor([4, 5, 6])
2809    >>> torch.column_stack((a, b))
2810    tensor([[1, 4],
2811        [2, 5],
2812        [3, 6]])
2813    >>> a = torch.arange(5)
2814    >>> b = torch.arange(10).reshape(5, 2)
2815    >>> torch.column_stack((a, b, b))
2816    tensor([[0, 0, 1, 0, 1],
2817            [1, 2, 3, 2, 3],
2818            [2, 4, 5, 4, 5],
2819            [3, 6, 7, 6, 7],
2820            [4, 8, 9, 8, 9]])
2821
2822""".format(**common_args),
2823)
2824
2825add_docstr(
2826    torch.complex,
2827    r"""
2828complex(real, imag, *, out=None) -> Tensor
2829
2830Constructs a complex tensor with its real part equal to :attr:`real` and its
2831imaginary part equal to :attr:`imag`.
2832
2833Args:
2834    real (Tensor): The real part of the complex tensor. Must be half, float or double.
2835    imag (Tensor): The imaginary part of the complex tensor. Must be same dtype
2836        as :attr:`real`.
2837
2838Keyword args:
2839    out (Tensor): If the inputs are ``torch.float32``, must be
2840        ``torch.complex64``. If the inputs are ``torch.float64``, must be
2841        ``torch.complex128``.
2842
2843Example::
2844
2845    >>> real = torch.tensor([1, 2], dtype=torch.float32)
2846    >>> imag = torch.tensor([3, 4], dtype=torch.float32)
2847    >>> z = torch.complex(real, imag)
2848    >>> z
2849    tensor([(1.+3.j), (2.+4.j)])
2850    >>> z.dtype
2851    torch.complex64
2852
2853""",
2854)
2855
2856add_docstr(
2857    torch.polar,
2858    r"""
2859polar(abs, angle, *, out=None) -> Tensor
2860
2861Constructs a complex tensor whose elements are Cartesian coordinates
2862corresponding to the polar coordinates with absolute value :attr:`abs` and angle
2863:attr:`angle`.
2864
2865.. math::
2866    \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j
2867
2868.. note::
2869    `torch.polar` is similar to
2870    `std::polar <https://en.cppreference.com/w/cpp/numeric/complex/polar>`_
2871    and does not compute the polar decomposition
2872    of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do.
2873    The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is
2874    infinite.
2875
2876"""
2877    + r"""
2878Args:
2879    abs (Tensor): The absolute value the complex tensor. Must be float or double.
2880    angle (Tensor): The angle of the complex tensor. Must be same dtype as
2881        :attr:`abs`.
2882
2883Keyword args:
2884    out (Tensor): If the inputs are ``torch.float32``, must be
2885        ``torch.complex64``. If the inputs are ``torch.float64``, must be
2886        ``torch.complex128``.
2887
2888Example::
2889
2890    >>> import numpy as np
2891    >>> abs = torch.tensor([1, 2], dtype=torch.float64)
2892    >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64)
2893    >>> z = torch.polar(abs, angle)
2894    >>> z
2895    tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128)
2896""",
2897)
2898
2899add_docstr(
2900    torch.conj_physical,
2901    r"""
2902conj_physical(input, *, out=None) -> Tensor
2903
2904Computes the element-wise conjugate of the given :attr:`input` tensor.
2905If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`.
2906
2907.. note::
2908   This performs the conjugate operation regardless of the fact conjugate bit is set or not.
2909
2910.. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of
2911             non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
2912             when :attr:`input` is of non-complex dtype to be compatible with this change.
2913
2914.. math::
2915    \text{out}_{i} = conj(\text{input}_{i})
2916"""
2917    + r"""
2918Args:
2919    {input}
2920
2921Keyword args:
2922    {out}
2923
2924Example::
2925
2926    >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
2927    tensor([-1 - 1j, -2 - 2j, 3 + 3j])
2928""".format(**common_args),
2929)
2930
2931add_docstr(
2932    torch.conj,
2933    r"""
2934conj(input) -> Tensor
2935
2936Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype,
2937this function just returns :attr:`input`.
2938
2939.. note::
2940    :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized
2941    at any time using :func:`torch.resolve_conj`.
2942
2943.. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of
2944             non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical`
2945             when :attr:`input` is of non-complex dtype to be compatible with this change.
2946
2947Args:
2948    {input}
2949
2950Example::
2951
2952    >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
2953    >>> x.is_conj()
2954    False
2955    >>> y = torch.conj(x)
2956    >>> y.is_conj()
2957    True
2958""".format(**common_args),
2959)
2960
2961add_docstr(
2962    torch.resolve_conj,
2963    r"""
2964resolve_conj(input) -> Tensor
2965
2966Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`,
2967else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`.
2968
2969Args:
2970    {input}
2971
2972Example::
2973
2974    >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
2975    >>> y = x.conj()
2976    >>> y.is_conj()
2977    True
2978    >>> z = y.resolve_conj()
2979    >>> z
2980    tensor([-1 - 1j, -2 - 2j, 3 + 3j])
2981    >>> z.is_conj()
2982    False
2983""".format(**common_args),
2984)
2985
2986add_docstr(
2987    torch.resolve_neg,
2988    r"""
2989resolve_neg(input) -> Tensor
2990
2991Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`,
2992else returns :attr:`input`. The output tensor will always have its negative bit set to `False`.
2993
2994Args:
2995    {input}
2996
2997Example::
2998
2999    >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])
3000    >>> y = x.conj()
3001    >>> z = y.imag
3002    >>> z.is_neg()
3003    True
3004    >>> out = z.resolve_neg()
3005    >>> out
3006    tensor([-1., -2., 3.])
3007    >>> out.is_neg()
3008    False
3009""".format(**common_args),
3010)
3011
3012add_docstr(
3013    torch.copysign,
3014    r"""
3015copysign(input, other, *, out=None) -> Tensor
3016
3017Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise.
3018
3019.. math::
3020    \text{out}_{i} = \begin{cases}
3021        -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\
3022         |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\
3023    \end{cases}
3024"""
3025    + r"""
3026
3027Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
3028and integer and float inputs.
3029
3030Args:
3031    input (Tensor): magnitudes.
3032    other (Tensor or Number): contains value(s) whose signbit(s) are
3033        applied to the magnitudes in :attr:`input`.
3034
3035Keyword args:
3036    {out}
3037
3038Example::
3039
3040    >>> a = torch.randn(5)
3041    >>> a
3042    tensor([-1.2557, -0.0026, -0.5387,  0.4740, -0.9244])
3043    >>> torch.copysign(a, 1)
3044    tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244])
3045    >>> a = torch.randn(4, 4)
3046    >>> a
3047    tensor([[ 0.7079,  0.2778, -1.0249,  0.5719],
3048            [-0.0059, -0.2600, -0.4475, -1.3948],
3049            [ 0.3667, -0.9567, -2.5757, -0.1751],
3050            [ 0.2046, -0.0742,  0.2998, -0.1054]])
3051    >>> b = torch.randn(4)
3052    tensor([ 0.2373,  0.3120,  0.3190, -1.1128])
3053    >>> torch.copysign(a, b)
3054    tensor([[ 0.7079,  0.2778,  1.0249, -0.5719],
3055            [ 0.0059,  0.2600,  0.4475, -1.3948],
3056            [ 0.3667,  0.9567,  2.5757, -0.1751],
3057            [ 0.2046,  0.0742,  0.2998, -0.1054]])
3058    >>> a = torch.tensor([1.])
3059    >>> b = torch.tensor([-0.])
3060    >>> torch.copysign(a, b)
3061    tensor([-1.])
3062
3063.. note::
3064    copysign handles signed zeros. If the other argument has a negative zero (-0),
3065    the corresponding output value will be negative.
3066
3067""".format(**common_args),
3068)
3069
3070add_docstr(
3071    torch.cos,
3072    r"""
3073cos(input, *, out=None) -> Tensor
3074
3075Returns a new tensor with the cosine  of the elements of :attr:`input`.
3076
3077.. math::
3078    \text{out}_{i} = \cos(\text{input}_{i})
3079"""
3080    + r"""
3081Args:
3082    {input}
3083
3084Keyword args:
3085    {out}
3086
3087Example::
3088
3089    >>> a = torch.randn(4)
3090    >>> a
3091    tensor([ 1.4309,  1.2706, -0.8562,  0.9796])
3092    >>> torch.cos(a)
3093    tensor([ 0.1395,  0.2957,  0.6553,  0.5574])
3094""".format(**common_args),
3095)
3096
3097add_docstr(
3098    torch.cosh,
3099    r"""
3100cosh(input, *, out=None) -> Tensor
3101
3102Returns a new tensor with the hyperbolic cosine  of the elements of
3103:attr:`input`.
3104
3105.. math::
3106    \text{out}_{i} = \cosh(\text{input}_{i})
3107"""
3108    + r"""
3109Args:
3110    {input}
3111
3112Keyword args:
3113    {out}
3114
3115Example::
3116
3117    >>> a = torch.randn(4)
3118    >>> a
3119    tensor([ 0.1632,  1.1835, -0.6979, -0.7325])
3120    >>> torch.cosh(a)
3121    tensor([ 1.0133,  1.7860,  1.2536,  1.2805])
3122
3123.. note::
3124   When :attr:`input` is on the CPU, the implementation of torch.cosh may use
3125   the Sleef library, which rounds very large results to infinity or negative
3126   infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
3127""".format(**common_args),
3128)
3129
3130add_docstr(
3131    torch.cross,
3132    r"""
3133cross(input, other, dim=None, *, out=None) -> Tensor
3134
3135
3136Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input`
3137and :attr:`other`.
3138
3139Supports input of float, double, cfloat and cdouble dtypes. Also supports batches
3140of vectors, for which it computes the product along the dimension :attr:`dim`.
3141In this case, the output has the same batch dimensions as the inputs.
3142
3143.. warning::
3144    If :attr:`dim` is not given, it defaults to the first dimension found
3145    with the size 3. Note that this might be unexpected.
3146
3147    This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross`
3148    in a future release.
3149
3150.. seealso::
3151        :func:`torch.linalg.cross` which has dim=-1 as default.
3152
3153
3154Args:
3155    {input}
3156    other (Tensor): the second input tensor
3157    dim  (int, optional): the dimension to take the cross-product in.
3158
3159Keyword args:
3160    {out}
3161
3162Example::
3163
3164    >>> a = torch.randn(4, 3)
3165    >>> a
3166    tensor([[-0.3956,  1.1455,  1.6895],
3167            [-0.5849,  1.3672,  0.3599],
3168            [-1.1626,  0.7180, -0.0521],
3169            [-0.1339,  0.9902, -2.0225]])
3170    >>> b = torch.randn(4, 3)
3171    >>> b
3172    tensor([[-0.0257, -1.4725, -1.2251],
3173            [-1.1479, -0.7005, -1.9757],
3174            [-1.3904,  0.3726, -1.1836],
3175            [-0.9688, -0.7153,  0.2159]])
3176    >>> torch.cross(a, b, dim=1)
3177    tensor([[ 1.0844, -0.5281,  0.6120],
3178            [-2.4490, -1.5687,  1.9792],
3179            [-0.8304, -1.3037,  0.5650],
3180            [-1.2329,  1.9883,  1.0551]])
3181    >>> torch.cross(a, b)
3182    tensor([[ 1.0844, -0.5281,  0.6120],
3183            [-2.4490, -1.5687,  1.9792],
3184            [-0.8304, -1.3037,  0.5650],
3185            [-1.2329,  1.9883,  1.0551]])
3186""".format(**common_args),
3187)
3188
3189add_docstr(
3190    torch.logcumsumexp,
3191    r"""
3192logcumsumexp(input, dim, *, out=None) -> Tensor
3193Returns the logarithm of the cumulative summation of the exponentiation of
3194elements of :attr:`input` in the dimension :attr:`dim`.
3195
3196For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
3197
3198    .. math::
3199        \text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}})
3200
3201Args:
3202    {input}
3203    dim  (int): the dimension to do the operation over
3204
3205Keyword args:
3206    {out}
3207
3208Example::
3209
3210    >>> a = torch.randn(10)
3211    >>> torch.logcumsumexp(a, dim=0)
3212    tensor([-0.42296738, -0.04462666,  0.86278635,  0.94622083,  1.05277811,
3213             1.39202815,  1.83525007,  1.84492621,  2.06084887,  2.06844475]))
3214""".format(**reduceops_common_args),
3215)
3216
3217add_docstr(
3218    torch.cummax,
3219    r"""
3220cummax(input, dim, *, out=None) -> (Tensor, LongTensor)
3221Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of
3222elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
3223location of each maximum value found in the dimension :attr:`dim`.
3224
3225.. math::
3226    y_i = max(x_1, x_2, x_3, \dots, x_i)
3227
3228Args:
3229    {input}
3230    dim  (int): the dimension to do the operation over
3231
3232Keyword args:
3233    out (tuple, optional): the result tuple of two output tensors (values, indices)
3234
3235Example::
3236
3237    >>> a = torch.randn(10)
3238    >>> a
3239    tensor([-0.3449, -1.5447,  0.0685, -1.5104, -1.1706,  0.2259,  1.4696, -1.3284,
3240         1.9946, -0.8209])
3241    >>> torch.cummax(a, dim=0)
3242    torch.return_types.cummax(
3243        values=tensor([-0.3449, -0.3449,  0.0685,  0.0685,  0.0685,  0.2259,  1.4696,  1.4696,
3244         1.9946,  1.9946]),
3245        indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8]))
3246""".format(**reduceops_common_args),
3247)
3248
3249add_docstr(
3250    torch.cummin,
3251    r"""
3252cummin(input, dim, *, out=None) -> (Tensor, LongTensor)
3253Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of
3254elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index
3255location of each maximum value found in the dimension :attr:`dim`.
3256
3257.. math::
3258    y_i = min(x_1, x_2, x_3, \dots, x_i)
3259
3260Args:
3261    {input}
3262    dim  (int): the dimension to do the operation over
3263
3264Keyword args:
3265    out (tuple, optional): the result tuple of two output tensors (values, indices)
3266
3267Example::
3268
3269    >>> a = torch.randn(10)
3270    >>> a
3271    tensor([-0.2284, -0.6628,  0.0975,  0.2680, -1.3298, -0.4220, -0.3885,  1.1762,
3272         0.9165,  1.6684])
3273    >>> torch.cummin(a, dim=0)
3274    torch.return_types.cummin(
3275        values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298,
3276        -1.3298, -1.3298]),
3277        indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4]))
3278""".format(**reduceops_common_args),
3279)
3280
3281add_docstr(
3282    torch.cumprod,
3283    r"""
3284cumprod(input, dim, *, dtype=None, out=None) -> Tensor
3285
3286Returns the cumulative product of elements of :attr:`input` in the dimension
3287:attr:`dim`.
3288
3289For example, if :attr:`input` is a vector of size N, the result will also be
3290a vector of size N, with elements.
3291
3292.. math::
3293    y_i = x_1 \times x_2\times x_3\times \dots \times x_i
3294
3295Args:
3296    {input}
3297    dim  (int): the dimension to do the operation over
3298
3299Keyword args:
3300    {dtype}
3301    {out}
3302
3303Example::
3304
3305    >>> a = torch.randn(10)
3306    >>> a
3307    tensor([ 0.6001,  0.2069, -0.1919,  0.9792,  0.6727,  1.0062,  0.4126,
3308            -0.2129, -0.4206,  0.1968])
3309    >>> torch.cumprod(a, dim=0)
3310    tensor([ 0.6001,  0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065,
3311             0.0014, -0.0006, -0.0001])
3312
3313    >>> a[5] = 0.0
3314    >>> torch.cumprod(a, dim=0)
3315    tensor([ 0.6001,  0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000,
3316             0.0000, -0.0000, -0.0000])
3317""".format(**reduceops_common_args),
3318)
3319
3320add_docstr(
3321    torch.cumsum,
3322    r"""
3323cumsum(input, dim, *, dtype=None, out=None) -> Tensor
3324
3325Returns the cumulative sum of elements of :attr:`input` in the dimension
3326:attr:`dim`.
3327
3328For example, if :attr:`input` is a vector of size N, the result will also be
3329a vector of size N, with elements.
3330
3331.. math::
3332    y_i = x_1 + x_2 + x_3 + \dots + x_i
3333
3334Args:
3335    {input}
3336    dim  (int): the dimension to do the operation over
3337
3338Keyword args:
3339    {dtype}
3340    {out}
3341
3342Example::
3343
3344    >>> a = torch.randint(1, 20, (10,))
3345    >>> a
3346    tensor([13,  7,  3, 10, 13,  3, 15, 10,  9, 10])
3347    >>> torch.cumsum(a, dim=0)
3348    tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93])
3349""".format(**reduceops_common_args),
3350)
3351
3352add_docstr(
3353    torch.count_nonzero,
3354    r"""
3355count_nonzero(input, dim=None) -> Tensor
3356
3357Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`.
3358If no dim is specified then all non-zeros in the tensor are counted.
3359
3360Args:
3361    {input}
3362    dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros.
3363
3364Example::
3365
3366    >>> x = torch.zeros(3,3)
3367    >>> x[torch.randn(3,3) > 0.5] = 1
3368    >>> x
3369    tensor([[0., 1., 1.],
3370            [0., 0., 0.],
3371            [0., 0., 1.]])
3372    >>> torch.count_nonzero(x)
3373    tensor(3)
3374    >>> torch.count_nonzero(x, dim=0)
3375    tensor([0, 1, 2])
3376""".format(**reduceops_common_args),
3377)
3378
3379add_docstr(
3380    torch.dequantize,
3381    r"""
3382dequantize(tensor) -> Tensor
3383
3384Returns an fp32 Tensor by dequantizing a quantized Tensor
3385
3386Args:
3387    tensor (Tensor): A quantized Tensor
3388
3389.. function:: dequantize(tensors) -> sequence of Tensors
3390   :noindex:
3391
3392Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors
3393
3394Args:
3395     tensors (sequence of Tensors): A list of quantized Tensors
3396""",
3397)
3398
3399add_docstr(
3400    torch.diag,
3401    r"""
3402diag(input, diagonal=0, *, out=None) -> Tensor
3403
3404- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
3405  with the elements of :attr:`input` as the diagonal.
3406- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with
3407  the diagonal elements of :attr:`input`.
3408
3409The argument :attr:`diagonal` controls which diagonal to consider:
3410
3411- If :attr:`diagonal` = 0, it is the main diagonal.
3412- If :attr:`diagonal` > 0, it is above the main diagonal.
3413- If :attr:`diagonal` < 0, it is below the main diagonal.
3414
3415Args:
3416    {input}
3417    diagonal (int, optional): the diagonal to consider
3418
3419Keyword args:
3420    {out}
3421
3422.. seealso::
3423
3424        :func:`torch.diagonal` always returns the diagonal of its input.
3425
3426        :func:`torch.diagflat` always constructs a tensor with diagonal elements
3427        specified by the input.
3428
3429Examples:
3430
3431Get the square matrix where the input vector is the diagonal::
3432
3433    >>> a = torch.randn(3)
3434    >>> a
3435    tensor([ 0.5950,-0.0872, 2.3298])
3436    >>> torch.diag(a)
3437    tensor([[ 0.5950, 0.0000, 0.0000],
3438            [ 0.0000,-0.0872, 0.0000],
3439            [ 0.0000, 0.0000, 2.3298]])
3440    >>> torch.diag(a, 1)
3441    tensor([[ 0.0000, 0.5950, 0.0000, 0.0000],
3442            [ 0.0000, 0.0000,-0.0872, 0.0000],
3443            [ 0.0000, 0.0000, 0.0000, 2.3298],
3444            [ 0.0000, 0.0000, 0.0000, 0.0000]])
3445
3446Get the k-th diagonal of a given matrix::
3447
3448    >>> a = torch.randn(3, 3)
3449    >>> a
3450    tensor([[-0.4264, 0.0255,-0.1064],
3451            [ 0.8795,-0.2429, 0.1374],
3452            [ 0.1029,-0.6482,-1.6300]])
3453    >>> torch.diag(a, 0)
3454    tensor([-0.4264,-0.2429,-1.6300])
3455    >>> torch.diag(a, 1)
3456    tensor([ 0.0255, 0.1374])
3457""".format(**common_args),
3458)
3459
3460add_docstr(
3461    torch.diag_embed,
3462    r"""
3463diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor
3464
3465Creates a tensor whose diagonals of certain 2D planes (specified by
3466:attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`.
3467To facilitate creating batched diagonal matrices, the 2D planes formed by
3468the last two dimensions of the returned tensor are chosen by default.
3469
3470The argument :attr:`offset` controls which diagonal to consider:
3471
3472- If :attr:`offset` = 0, it is the main diagonal.
3473- If :attr:`offset` > 0, it is above the main diagonal.
3474- If :attr:`offset` < 0, it is below the main diagonal.
3475
3476The size of the new matrix will be calculated to make the specified diagonal
3477of the size of the last input dimension.
3478Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1`
3479and :attr:`dim2` matters. Exchanging them is equivalent to changing the
3480sign of :attr:`offset`.
3481
3482Applying :meth:`torch.diagonal` to the output of this function with
3483the same arguments yields a matrix identical to input. However,
3484:meth:`torch.diagonal` has different default dimensions, so those
3485need to be explicitly specified.
3486
3487Args:
3488    {input} Must be at least 1-dimensional.
3489    offset (int, optional): which diagonal to consider. Default: 0
3490        (main diagonal).
3491    dim1 (int, optional): first dimension with respect to which to
3492        take diagonal. Default: -2.
3493    dim2 (int, optional): second dimension with respect to which to
3494        take diagonal. Default: -1.
3495
3496Example::
3497
3498    >>> a = torch.randn(2, 3)
3499    >>> torch.diag_embed(a)
3500    tensor([[[ 1.5410,  0.0000,  0.0000],
3501             [ 0.0000, -0.2934,  0.0000],
3502             [ 0.0000,  0.0000, -2.1788]],
3503
3504            [[ 0.5684,  0.0000,  0.0000],
3505             [ 0.0000, -1.0845,  0.0000],
3506             [ 0.0000,  0.0000, -1.3986]]])
3507
3508    >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2)
3509    tensor([[[ 0.0000,  1.5410,  0.0000,  0.0000],
3510             [ 0.0000,  0.5684,  0.0000,  0.0000]],
3511
3512            [[ 0.0000,  0.0000, -0.2934,  0.0000],
3513             [ 0.0000,  0.0000, -1.0845,  0.0000]],
3514
3515            [[ 0.0000,  0.0000,  0.0000, -2.1788],
3516             [ 0.0000,  0.0000,  0.0000, -1.3986]],
3517
3518            [[ 0.0000,  0.0000,  0.0000,  0.0000],
3519             [ 0.0000,  0.0000,  0.0000,  0.0000]]])
3520""".format(**common_args),
3521)
3522
3523
3524add_docstr(
3525    torch.diagflat,
3526    r"""
3527diagflat(input, offset=0) -> Tensor
3528
3529- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor
3530  with the elements of :attr:`input` as the diagonal.
3531- If :attr:`input` is a tensor with more than one dimension, then returns a
3532  2-D tensor with diagonal elements equal to a flattened :attr:`input`.
3533
3534The argument :attr:`offset` controls which diagonal to consider:
3535
3536- If :attr:`offset` = 0, it is the main diagonal.
3537- If :attr:`offset` > 0, it is above the main diagonal.
3538- If :attr:`offset` < 0, it is below the main diagonal.
3539
3540Args:
3541    {input}
3542    offset (int, optional): the diagonal to consider. Default: 0 (main
3543        diagonal).
3544
3545Examples::
3546
3547    >>> a = torch.randn(3)
3548    >>> a
3549    tensor([-0.2956, -0.9068,  0.1695])
3550    >>> torch.diagflat(a)
3551    tensor([[-0.2956,  0.0000,  0.0000],
3552            [ 0.0000, -0.9068,  0.0000],
3553            [ 0.0000,  0.0000,  0.1695]])
3554    >>> torch.diagflat(a, 1)
3555    tensor([[ 0.0000, -0.2956,  0.0000,  0.0000],
3556            [ 0.0000,  0.0000, -0.9068,  0.0000],
3557            [ 0.0000,  0.0000,  0.0000,  0.1695],
3558            [ 0.0000,  0.0000,  0.0000,  0.0000]])
3559
3560    >>> a = torch.randn(2, 2)
3561    >>> a
3562    tensor([[ 0.2094, -0.3018],
3563            [-0.1516,  1.9342]])
3564    >>> torch.diagflat(a)
3565    tensor([[ 0.2094,  0.0000,  0.0000,  0.0000],
3566            [ 0.0000, -0.3018,  0.0000,  0.0000],
3567            [ 0.0000,  0.0000, -0.1516,  0.0000],
3568            [ 0.0000,  0.0000,  0.0000,  1.9342]])
3569""".format(**common_args),
3570)
3571
3572add_docstr(
3573    torch.diagonal,
3574    r"""
3575diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor
3576
3577Returns a partial view of :attr:`input` with the its diagonal elements
3578with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension
3579at the end of the shape.
3580
3581The argument :attr:`offset` controls which diagonal to consider:
3582
3583- If :attr:`offset` = 0, it is the main diagonal.
3584- If :attr:`offset` > 0, it is above the main diagonal.
3585- If :attr:`offset` < 0, it is below the main diagonal.
3586
3587Applying :meth:`torch.diag_embed` to the output of this function with
3588the same arguments yields a diagonal matrix with the diagonal entries
3589of the input. However, :meth:`torch.diag_embed` has different default
3590dimensions, so those need to be explicitly specified.
3591
3592Args:
3593    {input} Must be at least 2-dimensional.
3594    offset (int, optional): which diagonal to consider. Default: 0
3595        (main diagonal).
3596    dim1 (int, optional): first dimension with respect to which to
3597        take diagonal. Default: 0.
3598    dim2 (int, optional): second dimension with respect to which to
3599        take diagonal. Default: 1.
3600
3601.. note::  To take a batch diagonal, pass in dim1=-2, dim2=-1.
3602
3603Examples::
3604
3605    >>> a = torch.randn(3, 3)
3606    >>> a
3607    tensor([[-1.0854,  1.1431, -0.1752],
3608            [ 0.8536, -0.0905,  0.0360],
3609            [ 0.6927, -0.3735, -0.4945]])
3610
3611
3612    >>> torch.diagonal(a, 0)
3613    tensor([-1.0854, -0.0905, -0.4945])
3614
3615
3616    >>> torch.diagonal(a, 1)
3617    tensor([ 1.1431,  0.0360])
3618
3619
3620    >>> x = torch.randn(2, 5, 4, 2)
3621    >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2)
3622    tensor([[[-1.2631,  0.3755, -1.5977, -1.8172],
3623             [-1.1065,  1.0401, -0.2235, -0.7938]],
3624
3625            [[-1.7325, -0.3081,  0.6166,  0.2335],
3626             [ 1.0500,  0.7336, -0.3836, -1.1015]]])
3627""".format(**common_args),
3628)
3629
3630add_docstr(
3631    torch.diagonal_scatter,
3632    r"""
3633diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor
3634
3635Embeds the values of the :attr:`src` tensor into :attr:`input` along
3636the diagonal elements of :attr:`input`, with respect to :attr:`dim1`
3637and :attr:`dim2`.
3638
3639This function returns a tensor with fresh storage; it does not
3640return a view.
3641
3642The argument :attr:`offset` controls which diagonal to consider:
3643
3644- If :attr:`offset` = 0, it is the main diagonal.
3645- If :attr:`offset` > 0, it is above the main diagonal.
3646- If :attr:`offset` < 0, it is below the main diagonal.
3647
3648Args:
3649    {input} Must be at least 2-dimensional.
3650    src (Tensor): the tensor to embed into :attr:`input`.
3651    offset (int, optional): which diagonal to consider. Default: 0
3652        (main diagonal).
3653    dim1 (int, optional): first dimension with respect to which to
3654        take diagonal. Default: 0.
3655    dim2 (int, optional): second dimension with respect to which to
3656        take diagonal. Default: 1.
3657
3658.. note::
3659
3660    :attr:`src` must be of the proper size in order to be embedded
3661    into :attr:`input`. Specifically, it should have the same shape as
3662    ``torch.diagonal(input, offset, dim1, dim2)``
3663
3664Examples::
3665
3666    >>> a = torch.zeros(3, 3)
3667    >>> a
3668    tensor([[0., 0., 0.],
3669            [0., 0., 0.],
3670            [0., 0., 0.]])
3671
3672    >>> torch.diagonal_scatter(a, torch.ones(3), 0)
3673    tensor([[1., 0., 0.],
3674            [0., 1., 0.],
3675            [0., 0., 1.]])
3676
3677    >>> torch.diagonal_scatter(a, torch.ones(2), 1)
3678    tensor([[0., 1., 0.],
3679            [0., 0., 1.],
3680            [0., 0., 0.]])
3681""".format(**common_args),
3682)
3683
3684add_docstr(
3685    torch.as_strided_scatter,
3686    r"""
3687as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor
3688
3689Embeds the values of the :attr:`src` tensor into :attr:`input` along
3690the elements corresponding to the result of calling
3691input.as_strided(size, stride, storage_offset).
3692
3693This function returns a tensor with fresh storage; it does not
3694return a view.
3695
3696Args:
3697    {input}
3698    size (tuple or ints): the shape of the output tensor
3699    stride (tuple or ints): the stride of the output tensor
3700    storage_offset (int, optional): the offset in the underlying storage of the output tensor
3701
3702.. note::
3703
3704    :attr:`src` must be of the proper size in order to be embedded
3705    into :attr:`input`. Specifically, it should have the same shape as
3706    `torch.as_strided(input, size, stride, storage_offset)`
3707
3708Example::
3709
3710    >>> a = torch.arange(4).reshape(2, 2) + 1
3711    >>> a
3712    tensor([[1, 2],
3713            [3, 4]])
3714    >>> b = torch.zeros(3, 3)
3715    >>> b
3716    tensor([[0., 0., 0.],
3717            [0., 0., 0.],
3718            [0., 0., 0.]])
3719    >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2))
3720    tensor([[1., 3., 2.],
3721            [4., 0., 0.],
3722            [0., 0., 0.]])
3723
3724""".format(**common_args),
3725)
3726
3727add_docstr(
3728    torch.diff,
3729    r"""
3730diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor
3731
3732Computes the n-th forward difference along the given dimension.
3733
3734The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order
3735differences are calculated by using :func:`torch.diff` recursively.
3736
3737Args:
3738    input (Tensor): the tensor to compute the differences on
3739    n (int, optional): the number of times to recursively compute the difference
3740    dim (int, optional): the dimension to compute the difference along.
3741        Default is the last dimension.
3742    prepend, append (Tensor, optional): values to prepend or append to
3743        :attr:`input` along :attr:`dim` before computing the difference.
3744        Their dimensions must be equivalent to that of input, and their shapes
3745        must match input's shape except on :attr:`dim`.
3746
3747Keyword args:
3748    {out}
3749
3750Example::
3751
3752    >>> a = torch.tensor([1, 3, 2])
3753    >>> torch.diff(a)
3754    tensor([ 2, -1])
3755    >>> b = torch.tensor([4, 5])
3756    >>> torch.diff(a, append=b)
3757    tensor([ 2, -1,  2,  1])
3758    >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]])
3759    >>> torch.diff(c, dim=0)
3760    tensor([[2, 2, 2]])
3761    >>> torch.diff(c, dim=1)
3762    tensor([[1, 1],
3763            [1, 1]])
3764""".format(**common_args),
3765)
3766
3767add_docstr(
3768    torch.digamma,
3769    r"""
3770digamma(input, *, out=None) -> Tensor
3771
3772Alias for :func:`torch.special.digamma`.
3773""",
3774)
3775
3776add_docstr(
3777    torch.dist,
3778    r"""
3779dist(input, other, p=2) -> Tensor
3780
3781Returns the p-norm of (:attr:`input` - :attr:`other`)
3782
3783The shapes of :attr:`input` and :attr:`other` must be
3784:ref:`broadcastable <broadcasting-semantics>`.
3785
3786Args:
3787    {input}
3788    other (Tensor): the Right-hand-side input tensor
3789    p (float, optional): the norm to be computed
3790
3791Example::
3792
3793    >>> x = torch.randn(4)
3794    >>> x
3795    tensor([-1.5393, -0.8675,  0.5916,  1.6321])
3796    >>> y = torch.randn(4)
3797    >>> y
3798    tensor([ 0.0967, -1.0511,  0.6295,  0.8360])
3799    >>> torch.dist(x, y, 3.5)
3800    tensor(1.6727)
3801    >>> torch.dist(x, y, 3)
3802    tensor(1.6973)
3803    >>> torch.dist(x, y, 0)
3804    tensor(4.)
3805    >>> torch.dist(x, y, 1)
3806    tensor(2.6537)
3807""".format(**common_args),
3808)
3809
3810add_docstr(
3811    torch.div,
3812    r"""
3813div(input, other, *, rounding_mode=None, out=None) -> Tensor
3814
3815Divides each element of the input ``input`` by the corresponding element of
3816:attr:`other`.
3817
3818.. math::
3819    \text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}}
3820
3821.. note::
3822    By default, this performs a "true" division like Python 3.
3823    See the :attr:`rounding_mode` argument for floor division.
3824
3825Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
3826:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
3827Always promotes integer types to the default scalar type.
3828
3829Args:
3830    input (Tensor): the dividend
3831    other (Tensor or Number): the divisor
3832
3833Keyword args:
3834    rounding_mode (str, optional): Type of rounding applied to the result:
3835
3836        * None - default behavior. Performs no rounding and, if both :attr:`input` and
3837          :attr:`other` are integer types, promotes the inputs to the default scalar type.
3838          Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``.
3839        * ``"trunc"`` - rounds the results of the division towards zero.
3840          Equivalent to C-style integer division.
3841        * ``"floor"`` - rounds the results of the division down.
3842          Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``.
3843
3844    {out}
3845
3846Examples::
3847
3848    >>> x = torch.tensor([ 0.3810,  1.2774, -0.2972, -0.3719,  0.4637])
3849    >>> torch.div(x, 0.5)
3850    tensor([ 0.7620,  2.5548, -0.5944, -0.7438,  0.9274])
3851
3852    >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
3853    ...                   [ 0.1815, -1.0111,  0.9805, -1.5923],
3854    ...                   [ 0.1062,  1.4581,  0.7759, -1.2344],
3855    ...                   [-0.1830, -0.0313,  1.1908, -1.4757]])
3856    >>> b = torch.tensor([ 0.8032,  0.2930, -0.8113, -0.2308])
3857    >>> torch.div(a, b)
3858    tensor([[-0.4620, -6.6051,  0.5676,  1.2639],
3859            [ 0.2260, -3.4509, -1.2086,  6.8990],
3860            [ 0.1322,  4.9764, -0.9564,  5.3484],
3861            [-0.2278, -0.1068, -1.4678,  6.3938]])
3862
3863    >>> torch.div(a, b, rounding_mode='trunc')
3864    tensor([[-0., -6.,  0.,  1.],
3865            [ 0., -3., -1.,  6.],
3866            [ 0.,  4., -0.,  5.],
3867            [-0., -0., -1.,  6.]])
3868
3869    >>> torch.div(a, b, rounding_mode='floor')
3870    tensor([[-1., -7.,  0.,  1.],
3871            [ 0., -4., -2.,  6.],
3872            [ 0.,  4., -1.,  5.],
3873            [-1., -1., -2.,  6.]])
3874
3875""".format(**common_args),
3876)
3877
3878add_docstr(
3879    torch.divide,
3880    r"""
3881divide(input, other, *, rounding_mode=None, out=None) -> Tensor
3882
3883Alias for :func:`torch.div`.
3884""",
3885)
3886
3887add_docstr(
3888    torch.dot,
3889    r"""
3890dot(input, tensor, *, out=None) -> Tensor
3891
3892Computes the dot product of two 1D tensors.
3893
3894.. note::
3895
3896    Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product
3897    of two 1D tensors with the same number of elements.
3898
3899Args:
3900    input (Tensor): first tensor in the dot product, must be 1D.
3901    tensor (Tensor): second tensor in the dot product, must be 1D.
3902
3903Keyword args:
3904    {out}
3905
3906Example::
3907
3908    >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1]))
3909    tensor(7)
3910
3911    >>> t1, t2 = torch.tensor([0, 1]), torch.tensor([2, 3])
3912    >>> torch.dot(t1, t2)
3913    tensor(3)
3914""".format(**common_args),
3915)
3916
3917add_docstr(
3918    torch.vdot,
3919    r"""
3920vdot(input, other, *, out=None) -> Tensor
3921
3922Computes the dot product of two 1D vectors along a dimension.
3923
3924In symbols, this function computes
3925
3926.. math::
3927
3928    \sum_{i=1}^n \overline{x_i}y_i.
3929
3930where :math:`\overline{x_i}` denotes the conjugate for complex
3931vectors, and it is the identity for real vectors.
3932
3933.. note::
3934
3935    Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product
3936    of two 1D tensors with the same number of elements.
3937
3938.. seealso::
3939
3940        :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension.
3941
3942Args:
3943    input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex.
3944    other (Tensor): second tensor in the dot product, must be 1D.
3945
3946Keyword args:
3947"""
3948    + rf"""
3949.. note:: {common_args["out"]}
3950"""
3951    + r"""
3952
3953Example::
3954
3955    >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1]))
3956    tensor(7)
3957    >>> a = torch.tensor((1 +2j, 3 - 1j))
3958    >>> b = torch.tensor((2 +1j, 4 - 0j))
3959    >>> torch.vdot(a, b)
3960    tensor([16.+1.j])
3961    >>> torch.vdot(b, a)
3962    tensor([16.-1.j])
3963""",
3964)
3965
3966add_docstr(
3967    torch.eq,
3968    r"""
3969eq(input, other, *, out=None) -> Tensor
3970
3971Computes element-wise equality
3972
3973The second argument can be a number or a tensor whose shape is
3974:ref:`broadcastable <broadcasting-semantics>` with the first argument.
3975
3976Args:
3977    input (Tensor): the tensor to compare
3978    other (Tensor or float): the tensor or value to compare
3979
3980Keyword args:
3981    {out}
3982
3983Returns:
3984    A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere
3985
3986Example::
3987
3988    >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
3989    tensor([[ True, False],
3990            [False, True]])
3991""".format(**common_args),
3992)
3993
3994add_docstr(
3995    torch.equal,
3996    r"""
3997equal(input, other) -> bool
3998
3999``True`` if two tensors have the same size and elements, ``False`` otherwise.
4000
4001Example::
4002
4003    >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2]))
4004    True
4005""",
4006)
4007
4008add_docstr(
4009    torch.erf,
4010    r"""
4011erf(input, *, out=None) -> Tensor
4012
4013Alias for :func:`torch.special.erf`.
4014""",
4015)
4016
4017add_docstr(
4018    torch.erfc,
4019    r"""
4020erfc(input, *, out=None) -> Tensor
4021
4022Alias for :func:`torch.special.erfc`.
4023""",
4024)
4025
4026add_docstr(
4027    torch.erfinv,
4028    r"""
4029erfinv(input, *, out=None) -> Tensor
4030
4031Alias for :func:`torch.special.erfinv`.
4032""",
4033)
4034
4035add_docstr(
4036    torch.exp,
4037    r"""
4038exp(input, *, out=None) -> Tensor
4039
4040Returns a new tensor with the exponential of the elements
4041of the input tensor :attr:`input`.
4042
4043.. math::
4044    y_{i} = e^{x_{i}}
4045"""
4046    + r"""
4047Args:
4048    {input}
4049
4050Keyword args:
4051    {out}
4052
4053Example::
4054
4055    >>> torch.exp(torch.tensor([0, math.log(2.)]))
4056    tensor([ 1.,  2.])
4057""".format(**common_args),
4058)
4059
4060add_docstr(
4061    torch.exp2,
4062    r"""
4063exp2(input, *, out=None) -> Tensor
4064
4065Alias for :func:`torch.special.exp2`.
4066""",
4067)
4068
4069add_docstr(
4070    torch.expm1,
4071    r"""
4072expm1(input, *, out=None) -> Tensor
4073
4074Alias for :func:`torch.special.expm1`.
4075""",
4076)
4077
4078add_docstr(
4079    torch.eye,
4080    r"""
4081eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
4082
4083Returns a 2-D tensor with ones on the diagonal and zeros elsewhere.
4084
4085Args:
4086    n (int): the number of rows
4087    m (int, optional): the number of columns with default being :attr:`n`
4088
4089Keyword arguments:
4090    {out}
4091    {dtype}
4092    {layout}
4093    {device}
4094    {requires_grad}
4095
4096Returns:
4097    Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere
4098
4099Example::
4100
4101    >>> torch.eye(3)
4102    tensor([[ 1.,  0.,  0.],
4103            [ 0.,  1.,  0.],
4104            [ 0.,  0.,  1.]])
4105""".format(**factory_common_args),
4106)
4107
4108add_docstr(
4109    torch.floor,
4110    r"""
4111floor(input, *, out=None) -> Tensor
4112
4113Returns a new tensor with the floor of the elements of :attr:`input`,
4114the largest integer less than or equal to each element.
4115
4116For integer inputs, follows the array-api convention of returning a
4117copy of the input tensor.
4118
4119.. math::
4120    \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor
4121"""
4122    + r"""
4123Args:
4124    {input}
4125
4126Keyword args:
4127    {out}
4128
4129Example::
4130
4131    >>> a = torch.randn(4)
4132    >>> a
4133    tensor([-0.8166,  1.5308, -0.2530, -0.2091])
4134    >>> torch.floor(a)
4135    tensor([-1.,  1., -1., -1.])
4136""".format(**common_args),
4137)
4138
4139add_docstr(
4140    torch.floor_divide,
4141    r"""
4142floor_divide(input, other, *, out=None) -> Tensor
4143
4144.. note::
4145
4146    Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed
4147    truncation division. To restore the previous behavior use
4148    :func:`torch.div` with ``rounding_mode='trunc'``.
4149
4150Computes :attr:`input` divided by :attr:`other`, elementwise, and floors
4151the result.
4152
4153.. math::
4154    \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right)
4155
4156"""
4157    + r"""
4158
4159Supports broadcasting to a common shape, type promotion, and integer and float inputs.
4160
4161Args:
4162    input (Tensor or Number): the dividend
4163    other (Tensor or Number): the divisor
4164
4165Keyword args:
4166    {out}
4167
4168Example::
4169
4170    >>> a = torch.tensor([4.0, 3.0])
4171    >>> b = torch.tensor([2.0, 2.0])
4172    >>> torch.floor_divide(a, b)
4173    tensor([2.0, 1.0])
4174    >>> torch.floor_divide(a, 1.4)
4175    tensor([2.0, 2.0])
4176""".format(**common_args),
4177)
4178
4179add_docstr(
4180    torch.fmod,
4181    r"""
4182fmod(input, other, *, out=None) -> Tensor
4183
4184Applies C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_ entrywise.
4185The result has the same sign as the dividend :attr:`input` and its absolute value
4186is less than that of :attr:`other`.
4187
4188This function may be defined in terms of :func:`torch.div` as
4189
4190.. code:: python
4191
4192    torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b
4193
4194Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
4195:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
4196
4197.. note::
4198
4199    When the divisor is zero, returns ``NaN`` for floating point dtypes
4200    on both CPU and GPU; raises ``RuntimeError`` for integer division by
4201    zero on CPU; Integer division by zero on GPU may return any value.
4202
4203.. note::
4204
4205   Complex inputs are not supported. In some cases, it is not mathematically
4206   possible to satisfy the definition of a modulo operation with complex numbers.
4207
4208.. seealso::
4209
4210    :func:`torch.remainder` which implements Python's modulus operator.
4211    This one is defined using division rounding down the result.
4212
4213Args:
4214    input (Tensor): the dividend
4215    other (Tensor or Scalar): the divisor
4216
4217Keyword args:
4218    {out}
4219
4220Example::
4221
4222    >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
4223    tensor([-1., -0., -1.,  1.,  0.,  1.])
4224    >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5)
4225    tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000])
4226
4227""".format(**common_args),
4228)
4229
4230add_docstr(
4231    torch.frac,
4232    r"""
4233frac(input, *, out=None) -> Tensor
4234
4235Computes the fractional portion of each element in :attr:`input`.
4236
4237.. math::
4238    \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i})
4239
4240Example::
4241
4242    >>> torch.frac(torch.tensor([1, 2.5, -3.2]))
4243    tensor([ 0.0000,  0.5000, -0.2000])
4244""",
4245)
4246
4247add_docstr(
4248    torch.frexp,
4249    r"""
4250frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent)
4251
4252Decomposes :attr:`input` into mantissa and exponent tensors
4253such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`.
4254
4255The range of mantissa is the open interval (-1, 1).
4256
4257Supports float inputs.
4258
4259Args:
4260    input (Tensor): the input tensor
4261
4262
4263Keyword args:
4264    out (tuple, optional): the output tensors
4265
4266Example::
4267
4268    >>> x = torch.arange(9.)
4269    >>> mantissa, exponent = torch.frexp(x)
4270    >>> mantissa
4271    tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000])
4272    >>> exponent
4273    tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32)
4274    >>> torch.ldexp(mantissa, exponent)
4275    tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.])
4276""",
4277)
4278
4279add_docstr(
4280    torch.from_numpy,
4281    r"""
4282from_numpy(ndarray) -> Tensor
4283
4284Creates a :class:`Tensor` from a :class:`numpy.ndarray`.
4285
4286The returned tensor and :attr:`ndarray` share the same memory. Modifications to
4287the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned
4288tensor is not resizable.
4289
4290It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``,
4291``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``,
4292``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``,
4293and ``bool``.
4294
4295.. warning::
4296    Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior.
4297
4298Example::
4299
4300    >>> a = numpy.array([1, 2, 3])
4301    >>> t = torch.from_numpy(a)
4302    >>> t
4303    tensor([ 1,  2,  3])
4304    >>> t[0] = -1
4305    >>> a
4306    array([-1,  2,  3])
4307""",
4308)
4309
4310add_docstr(
4311    torch.frombuffer,
4312    r"""
4313frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor
4314
4315Creates a 1-dimensional :class:`Tensor` from an object that implements
4316the Python buffer protocol.
4317
4318Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of
4319the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count`
4320elements.
4321
4322Note that either of the following must be true:
4323
43241. :attr:`count` is a positive non-zero number, and the total number of bytes
4325in the buffer is more than :attr:`offset` plus :attr:`count` times the size
4326(in bytes) of :attr:`dtype`.
4327
43282. :attr:`count` is negative, and the length (number of bytes) of the buffer
4329subtracted by the :attr:`offset` is a multiple of the size (in bytes) of
4330:attr:`dtype`.
4331
4332The returned tensor and buffer share the same memory. Modifications to
4333the tensor will be reflected in the buffer and vice versa. The returned
4334tensor is not resizable.
4335
4336.. note::
4337    This function increments the reference count for the object that
4338    owns the shared memory. Therefore, such memory will not be deallocated
4339    before the returned tensor goes out of scope.
4340
4341.. warning::
4342    This function's behavior is undefined when passed an object implementing
4343    the buffer protocol whose data is not on the CPU. Doing so is likely to
4344    cause a segmentation fault.
4345
4346.. warning::
4347    This function does not try to infer the :attr:`dtype` (hence, it is not
4348    optional). Passing a different :attr:`dtype` than its source may result
4349    in unexpected behavior.
4350
4351Args:
4352    buffer (object): a Python object that exposes the buffer interface.
4353
4354Keyword args:
4355    dtype (:class:`torch.dtype`): the desired data type of returned tensor.
4356    count (int, optional): the number of desired elements to be read.
4357        If negative, all the elements (until the end of the buffer) will be
4358        read. Default: -1.
4359    offset (int, optional): the number of bytes to skip at the start of
4360        the buffer. Default: 0.
4361    {requires_grad}
4362
4363Example::
4364
4365    >>> import array
4366    >>> a = array.array('i', [1, 2, 3])
4367    >>> t = torch.frombuffer(a, dtype=torch.int32)
4368    >>> t
4369    tensor([ 1,  2,  3])
4370    >>> t[0] = -1
4371    >>> a
4372    array([-1,  2,  3])
4373
4374    >>> # Interprets the signed char bytes as 32-bit integers.
4375    >>> # Each 4 signed char elements will be interpreted as
4376    >>> # 1 signed 32-bit integer.
4377    >>> import array
4378    >>> a = array.array('b', [-1, 0, 0, 0])
4379    >>> torch.frombuffer(a, dtype=torch.int32)
4380    tensor([255], dtype=torch.int32)
4381""".format(**factory_common_args),
4382)
4383
4384add_docstr(
4385    torch.from_file,
4386    r"""
4387from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False)
4388
4389Creates a CPU tensor with a storage backed by a memory-mapped file.
4390
4391If ``shared`` is True, then memory is shared between processes. All changes are written to the file.
4392If ``shared`` is False, then changes to the tensor do not affect the file.
4393
4394``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain
4395at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed.
4396
4397.. note::
4398    Only CPU tensors can be mapped to files.
4399
4400.. note::
4401    For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory.
4402
4403
4404Args:
4405    filename (str): file name to map
4406    shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
4407                    underlying `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_)
4408    size (int): number of elements in the tensor
4409
4410Keyword args:
4411    {dtype}
4412    {layout}
4413    {device}
4414    {pin_memory}
4415
4416Example::
4417    >>> t = torch.randn(2, 5, dtype=torch.float64)
4418    >>> t.numpy().tofile('storage.pt')
4419    >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64)
4420    """.format(**factory_common_args),
4421)
4422
4423add_docstr(
4424    torch.flatten,
4425    r"""
4426flatten(input, start_dim=0, end_dim=-1) -> Tensor
4427
4428Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim`
4429are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened.
4430The order of elements in :attr:`input` is unchanged.
4431
4432Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view,
4433or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can
4434be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the
4435flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned.
4436
4437.. note::
4438    Flattening a zero-dimensional tensor will return a one-dimensional view.
4439
4440Args:
4441    {input}
4442    start_dim (int): the first dim to flatten
4443    end_dim (int): the last dim to flatten
4444
4445Example::
4446
4447    >>> t = torch.tensor([[[1, 2],
4448    ...                    [3, 4]],
4449    ...                   [[5, 6],
4450    ...                    [7, 8]]])
4451    >>> torch.flatten(t)
4452    tensor([1, 2, 3, 4, 5, 6, 7, 8])
4453    >>> torch.flatten(t, start_dim=1)
4454    tensor([[1, 2, 3, 4],
4455            [5, 6, 7, 8]])
4456""".format(**common_args),
4457)
4458
4459add_docstr(
4460    torch.unflatten,
4461    r"""
4462unflatten(input, dim, sizes) -> Tensor
4463
4464Expands a dimension of the input tensor over multiple dimensions.
4465
4466.. seealso::
4467
4468    :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one.
4469
4470Args:
4471    {input}
4472    dim (int): Dimension to be unflattened, specified as an index into
4473         ``input.shape``.
4474    sizes (Tuple[int]): New shape of the unflattened dimension.
4475         One of its elements can be `-1` in which case the corresponding output
4476         dimension is inferred. Otherwise, the product of ``sizes`` *must*
4477         equal ``input.shape[dim]``.
4478
4479Returns:
4480    A View of input with the specified dimension unflattened.
4481
4482Examples::
4483    >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape
4484    torch.Size([3, 2, 2, 1])
4485    >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape
4486    torch.Size([3, 2, 2, 1])
4487    >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape
4488    torch.Size([5, 2, 2, 3, 1, 1, 3])
4489""".format(**common_args),
4490)
4491
4492add_docstr(
4493    torch.gather,
4494    r"""
4495gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor
4496
4497Gathers values along an axis specified by `dim`.
4498
4499For a 3-D tensor the output is specified by::
4500
4501    out[i][j][k] = input[index[i][j][k]][j][k]  # if dim == 0
4502    out[i][j][k] = input[i][index[i][j][k]][k]  # if dim == 1
4503    out[i][j][k] = input[i][j][index[i][j][k]]  # if dim == 2
4504
4505:attr:`input` and :attr:`index` must have the same number of dimensions.
4506It is also required that ``index.size(d) <= input.size(d)`` for all
4507dimensions ``d != dim``.  :attr:`out` will have the same shape as :attr:`index`.
4508Note that ``input`` and ``index`` do not broadcast against each other.
4509
4510Args:
4511    input (Tensor): the source tensor
4512    dim (int): the axis along which to index
4513    index (LongTensor): the indices of elements to gather
4514
4515Keyword arguments:
4516    sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor.
4517    out (Tensor, optional): the destination tensor
4518
4519Example::
4520
4521    >>> t = torch.tensor([[1, 2], [3, 4]])
4522    >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]]))
4523    tensor([[ 1,  1],
4524            [ 4,  3]])
4525""",
4526)
4527
4528
4529add_docstr(
4530    torch.gcd,
4531    r"""
4532gcd(input, other, *, out=None) -> Tensor
4533
4534Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`.
4535
4536Both :attr:`input` and :attr:`other` must have integer types.
4537
4538.. note::
4539    This defines :math:`gcd(0, 0) = 0`.
4540
4541Args:
4542    {input}
4543    other (Tensor): the second input tensor
4544
4545Keyword arguments:
4546    {out}
4547
4548Example::
4549
4550    >>> a = torch.tensor([5, 10, 15])
4551    >>> b = torch.tensor([3, 4, 5])
4552    >>> torch.gcd(a, b)
4553    tensor([1, 2, 5])
4554    >>> c = torch.tensor([3])
4555    >>> torch.gcd(a, c)
4556    tensor([1, 1, 3])
4557""".format(**common_args),
4558)
4559
4560add_docstr(
4561    torch.ge,
4562    r"""
4563ge(input, other, *, out=None) -> Tensor
4564
4565Computes :math:`\text{input} \geq \text{other}` element-wise.
4566"""
4567    + r"""
4568
4569The second argument can be a number or a tensor whose shape is
4570:ref:`broadcastable <broadcasting-semantics>` with the first argument.
4571
4572Args:
4573    input (Tensor): the tensor to compare
4574    other (Tensor or float): the tensor or value to compare
4575
4576Keyword args:
4577    {out}
4578
4579Returns:
4580    A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere
4581
4582Example::
4583
4584    >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
4585    tensor([[True, True], [False, True]])
4586""".format(**common_args),
4587)
4588
4589add_docstr(
4590    torch.greater_equal,
4591    r"""
4592greater_equal(input, other, *, out=None) -> Tensor
4593
4594Alias for :func:`torch.ge`.
4595""",
4596)
4597
4598add_docstr(
4599    torch.gradient,
4600    r"""
4601gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors
4602
4603Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in
4604one or more dimensions using the `second-order accurate central differences method
4605<https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ and
4606either first or second order estimates at the boundaries.
4607
4608The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not
4609specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates
4610to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional
4611:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and
4612:math:`g(1, 2, 3)\ == input[1, 2, 3]`.
4613
4614When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates.
4615This is detailed in the "Keyword Arguments" section below.
4616
4617The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is
4618accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be
4619improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative
4620is estimated using `Taylor's theorem with remainder <https://en.wikipedia.org/wiki/Taylor%27s_theorem>`_.
4621Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring
4622it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using:
4623
4624.. math::
4625    \begin{aligned}
4626        f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2  \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\
4627        f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2  \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\
4628    \end{aligned}
4629
4630Using the fact that :math:`f \in C^3` and solving the linear system, we derive:
4631
4632.. math::
4633    f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l)
4634          + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} }
4635
4636.. note::
4637    We estimate the gradient of functions in complex domain
4638    :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way.
4639
4640The value of each partial derivative at the boundary points is computed differently. See edge_order below.
4641
4642Args:
4643    input (``Tensor``): the tensor that represents the values of the function
4644
4645Keyword args:
4646    spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify
4647        how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then
4648        the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the
4649        indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding
4650        indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9).
4651        Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for
4652        the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then
4653        the coordinates are (t0[1], t1[2], t2[3])
4654
4655    dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over.  By default
4656        the partial  gradient in every dimension is computed. Note that when :attr:`dim` is  specified the elements of
4657        the :attr:`spacing` argument must correspond with the specified dims."
4658
4659    edge_order (``int``, optional): 1 or 2, for `first-order
4660        <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_ or
4661        `second-order <https://www.ams.org/journals/mcom/1988-51-184/S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_
4662        estimation of the boundary ("edge") values, respectively.
4663
4664Examples::
4665
4666    >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4]
4667    >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),)
4668    >>> values = torch.tensor([4., 1., 1., 16.], )
4669    >>> torch.gradient(values, spacing = coordinates)
4670    (tensor([-3., -2., 2., 5.]),)
4671
4672    >>> # Estimates the gradient of the R^2 -> R function whose samples are
4673    >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost
4674    >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates
4675    >>> # partial derivative for both dimensions.
4676    >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]])
4677    >>> torch.gradient(t)
4678    (tensor([[ 9., 18., 36., 72.],
4679             [ 9., 18., 36., 72.]]),
4680     tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
4681             [10.0000, 15.0000, 30.0000, 40.0000]]))
4682
4683    >>> # A scalar value for spacing modifies the relationship between tensor indices
4684    >>> # and input coordinates by multiplying the indices to find the
4685    >>> # coordinates. For example, below the indices of the innermost
4686    >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of
4687    >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2].
4688    >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1])
4689    (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
4690              [ 4.5000, 9.0000, 18.0000, 36.0000]]),
4691     tensor([[ 0.5000, 0.7500, 1.5000, 2.0000],
4692              [ 5.0000, 7.5000, 15.0000, 20.0000]]))
4693    >>> # doubling the spacing between samples halves the estimated partial gradients.
4694
4695    >>>
4696    >>> # Estimates only the partial derivative for dimension 1
4697    >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.)
4698    (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000],
4699             [10.0000, 15.0000, 30.0000, 40.0000]]),)
4700
4701    >>> # When spacing is a list of scalars, the relationship between the tensor
4702    >>> # indices and input coordinates changes based on dimension.
4703    >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate
4704    >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension
4705    >>> # 0, 1 translate to coordinates of [0, 2].
4706    >>> torch.gradient(t, spacing = [3., 2.])
4707    (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
4708             [ 4.5000, 9.0000, 18.0000, 36.0000]]),
4709     tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
4710             [ 3.3333, 5.0000, 10.0000, 13.3333]]))
4711
4712    >>> # The following example is a replication of the previous one with explicit
4713    >>> # coordinates.
4714    >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9]))
4715    >>> torch.gradient(t, spacing = coords)
4716    (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000],
4717             [ 4.5000, 9.0000, 18.0000, 36.0000]]),
4718     tensor([[ 0.3333, 0.5000, 1.0000, 1.3333],
4719             [ 3.3333, 5.0000, 10.0000, 13.3333]]))
4720
4721""",
4722)
4723
4724add_docstr(
4725    torch.geqrf,
4726    r"""
4727geqrf(input, *, out=None) -> (Tensor, Tensor)
4728
4729This is a low-level function for calling LAPACK's geqrf directly. This function
4730returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ .
4731
4732Computes a QR decomposition of :attr:`input`.
4733Both `Q` and `R` matrices are stored in the same output tensor `a`.
4734The elements of `R` are stored on and above the diagonal.
4735Elementary reflectors (or Householder vectors) implicitly defining matrix `Q`
4736are stored below the diagonal.
4737The results of this function can be used together with :func:`torch.linalg.householder_product`
4738to obtain the `Q` matrix or
4739with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix,
4740for an efficient matrix-matrix multiplication.
4741
4742See `LAPACK documentation for geqrf`_ for further details.
4743
4744.. note::
4745    See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq`
4746    with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition.
4747
4748Args:
4749    input (Tensor): the input matrix
4750
4751Keyword args:
4752    out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`.
4753
4754.. _LAPACK documentation for geqrf:
4755    http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html
4756
4757""",
4758)
4759
4760add_docstr(
4761    torch.inner,
4762    r"""
4763inner(input, other, *, out=None) -> Tensor
4764
4765Computes the dot product for 1D tensors. For higher dimensions, sums the product
4766of elements from :attr:`input` and :attr:`other` along their last dimension.
4767
4768.. note::
4769
4770    If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent
4771    to `torch.mul(input, other)`.
4772
4773    If both :attr:`input` and :attr:`other` are non-scalars, the size of their last
4774    dimension must match and the result is equivalent to `torch.tensordot(input,
4775    other, dims=([-1], [-1]))`
4776
4777Args:
4778    input (Tensor): First input tensor
4779    other (Tensor): Second input tensor
4780
4781Keyword args:
4782    out (Tensor, optional): Optional output tensor to write result into. The output
4783                            shape is `input.shape[:-1] + other.shape[:-1]`.
4784
4785Example::
4786
4787    # Dot product
4788    >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1]))
4789    tensor(7)
4790
4791    # Multidimensional input tensors
4792    >>> a = torch.randn(2, 3)
4793    >>> a
4794    tensor([[0.8173, 1.0874, 1.1784],
4795            [0.3279, 0.1234, 2.7894]])
4796    >>> b = torch.randn(2, 4, 3)
4797    >>> b
4798    tensor([[[-0.4682, -0.7159,  0.1506],
4799            [ 0.4034, -0.3657,  1.0387],
4800            [ 0.9892, -0.6684,  0.1774],
4801            [ 0.9482,  1.3261,  0.3917]],
4802
4803            [[ 0.4537,  0.7493,  1.1724],
4804            [ 0.2291,  0.5749, -0.2267],
4805            [-0.7920,  0.3607, -0.3701],
4806            [ 1.3666, -0.5850, -1.7242]]])
4807    >>> torch.inner(a, b)
4808    tensor([[[-0.9837,  1.1560,  0.2907,  2.6785],
4809            [ 2.5671,  0.5452, -0.6912, -1.5509]],
4810
4811            [[ 0.1782,  2.9843,  0.7366,  1.5672],
4812            [ 3.5115, -0.4864, -1.2476, -4.4337]]])
4813
4814    # Scalar input
4815    >>> torch.inner(a, torch.tensor(2))
4816    tensor([[1.6347, 2.1748, 2.3567],
4817            [0.6558, 0.2469, 5.5787]])
4818""",
4819)
4820
4821add_docstr(
4822    torch.outer,
4823    r"""
4824outer(input, vec2, *, out=None) -> Tensor
4825
4826Outer product of :attr:`input` and :attr:`vec2`.
4827If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of
4828size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`.
4829
4830.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
4831
4832Args:
4833    input (Tensor): 1-D input vector
4834    vec2 (Tensor): 1-D input vector
4835
4836Keyword args:
4837    out (Tensor, optional): optional output matrix
4838
4839Example::
4840
4841    >>> v1 = torch.arange(1., 5.)
4842    >>> v2 = torch.arange(1., 4.)
4843    >>> torch.outer(v1, v2)
4844    tensor([[  1.,   2.,   3.],
4845            [  2.,   4.,   6.],
4846            [  3.,   6.,   9.],
4847            [  4.,   8.,  12.]])
4848""",
4849)
4850
4851add_docstr(
4852    torch.ger,
4853    r"""
4854ger(input, vec2, *, out=None) -> Tensor
4855
4856Alias of :func:`torch.outer`.
4857
4858.. warning::
4859    This function is deprecated and will be removed in a future PyTorch release.
4860    Use :func:`torch.outer` instead.
4861""",
4862)
4863
4864add_docstr(
4865    torch.get_default_dtype,
4866    r"""
4867get_default_dtype() -> torch.dtype
4868
4869Get the current default floating point :class:`torch.dtype`.
4870
4871Example::
4872
4873    >>> torch.get_default_dtype()  # initial default for floating point is torch.float32
4874    torch.float32
4875    >>> torch.set_default_dtype(torch.float64)
4876    >>> torch.get_default_dtype()  # default is now changed to torch.float64
4877    torch.float64
4878
4879""",
4880)
4881
4882add_docstr(
4883    torch.get_num_threads,
4884    r"""
4885get_num_threads() -> int
4886
4887Returns the number of threads used for parallelizing CPU operations
4888""",
4889)
4890
4891add_docstr(
4892    torch.get_num_interop_threads,
4893    r"""
4894get_num_interop_threads() -> int
4895
4896Returns the number of threads used for inter-op parallelism on CPU
4897(e.g. in JIT interpreter)
4898""",
4899)
4900
4901add_docstr(
4902    torch.gt,
4903    r"""
4904gt(input, other, *, out=None) -> Tensor
4905
4906Computes :math:`\text{input} > \text{other}` element-wise.
4907"""
4908    + r"""
4909
4910The second argument can be a number or a tensor whose shape is
4911:ref:`broadcastable <broadcasting-semantics>` with the first argument.
4912
4913Args:
4914    input (Tensor): the tensor to compare
4915    other (Tensor or float): the tensor or value to compare
4916
4917Keyword args:
4918    {out}
4919
4920Returns:
4921    A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere
4922
4923Example::
4924
4925    >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
4926    tensor([[False, True], [False, False]])
4927""".format(**common_args),
4928)
4929
4930add_docstr(
4931    torch.greater,
4932    r"""
4933greater(input, other, *, out=None) -> Tensor
4934
4935Alias for :func:`torch.gt`.
4936""",
4937)
4938
4939add_docstr(
4940    torch.histc,
4941    r"""
4942histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor
4943
4944Computes the histogram of a tensor.
4945
4946The elements are sorted into equal width bins between :attr:`min` and
4947:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and
4948maximum values of the data are used.
4949
4950Elements lower than min and higher than max and ``NaN`` elements are ignored.
4951
4952Args:
4953    {input}
4954    bins (int): number of histogram bins
4955    min (Scalar): lower end of the range (inclusive)
4956    max (Scalar): upper end of the range (inclusive)
4957
4958Keyword args:
4959    {out}
4960
4961Returns:
4962    Tensor: Histogram represented as a tensor
4963
4964Example::
4965
4966    >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3)
4967    tensor([ 0.,  2.,  1.,  0.])
4968""".format(**common_args),
4969)
4970
4971add_docstr(
4972    torch.histogram,
4973    r"""
4974histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor)
4975
4976Computes a histogram of the values in a tensor.
4977
4978:attr:`bins` can be an integer or a 1D tensor.
4979
4980If :attr:`bins` is an int, it specifies the number of equal-width bins.
4981By default, the lower and upper range of the bins is determined by the
4982minimum and maximum elements of the input tensor. The :attr:`range`
4983argument can be provided to specify a range for the bins.
4984
4985If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges
4986including the rightmost edge. It should contain at least 2 elements
4987and its elements should be increasing.
4988
4989Args:
4990    {input}
4991    bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor,
4992          defines the sequence of bin edges including the rightmost edge.
4993
4994Keyword args:
4995    range (tuple of float): Defines the range of the bins.
4996    weight (Tensor): If provided, weight should have the same shape as input. Each value in
4997                     input contributes its associated weight towards its bin's result.
4998    density (bool): If False, the result will contain the count (or total weight) in each bin.
4999                    If True, the result is the value of the probability density function over the bins,
5000                    normalized such that the integral over the range of the bins is 1.
5001    {out} (tuple, optional): The result tuple of two output tensors (hist, bin_edges).
5002
5003Returns:
5004    hist (Tensor): 1D Tensor containing the values of the histogram.
5005    bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins.
5006
5007Example::
5008
5009    >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]))
5010    (tensor([ 0.,  5.,  2.,  0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
5011    >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True)
5012    (tensor([ 0.,  0.9524,  0.3810,  0.]), tensor([0., 0.75, 1.5, 2.25, 3.]))
5013""".format(**common_args),
5014)
5015
5016add_docstr(
5017    torch.histogramdd,
5018    r"""
5019histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[])
5020
5021Computes a multi-dimensional histogram of the values in a tensor.
5022
5023Interprets the elements of an input tensor whose innermost dimension has size N
5024as a collection of N-dimensional points. Maps each of the points into a set of
5025N-dimensional bins and returns the number of points (or total weight) in each bin.
5026
5027:attr:`input` must be a tensor with at least 2 dimensions.
5028If input has shape (M, N), each of its M rows defines a point in N-dimensional space.
5029If input has three or more dimensions, all but the last dimension are flattened.
5030
5031Each dimension is independently associated with its own strictly increasing sequence
5032of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D
5033tensors. Alternatively, bin edges may be constructed automatically by passing a
5034sequence of integers specifying the number of equal-width bins in each dimension.
5035
5036For each N-dimensional point in input:
5037    - Each of its coordinates is binned independently among the bin edges
5038        corresponding to its dimension
5039    - Binning results are combined to identify the N-dimensional bin (if any)
5040        into which the point falls
5041    - If the point falls into a bin, the bin's count (or total weight) is incremented
5042    - Points which do not fall into any bin do not contribute to the output
5043
5044:attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int.
5045
5046If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences
5047of bin edges. Each 1D tensor should contain a strictly increasing sequence with at
5048least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying
5049the left and right edges of all bins. Every bin is exclusive of its left edge. Only
5050the rightmost bin is inclusive of its right edge.
5051
5052If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins
5053in each dimension. By default, the leftmost and rightmost bin edges in each dimension
5054are determined by the minimum and maximum elements of the input tensor in the
5055corresponding dimension. The :attr:`range` argument can be provided to manually
5056specify the leftmost and rightmost bin edges in each dimension.
5057
5058If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions.
5059
5060.. note::
5061    See also :func:`torch.histogram`, which specifically computes 1D histograms.
5062    While :func:`torch.histogramdd` infers the dimensionality of its bins and
5063    binned values from the shape of :attr:`input`, :func:`torch.histogram`
5064    accepts and flattens :attr:`input` of any shape.
5065
5066Args:
5067    {input}
5068    bins: Tensor[], int[], or int.
5069            If Tensor[], defines the sequences of bin edges.
5070            If int[], defines the number of equal-width bins in each dimension.
5071            If int, defines the number of equal-width bins for all dimensions.
5072Keyword args:
5073    range (sequence of float): Defines the leftmost and rightmost bin edges
5074                                in each dimension.
5075    weight (Tensor): By default, each value in the input has weight 1. If a weight
5076                        tensor is passed, each N-dimensional coordinate in input
5077                        contributes its associated weight towards its bin's result.
5078                        The weight tensor should have the same shape as the :attr:`input`
5079                        tensor excluding its innermost dimension N.
5080    density (bool): If False (default), the result will contain the count (or total weight)
5081                    in each bin. If True, each count (weight) is divided by the total count
5082                    (total weight), then divided by the volume of its associated bin.
5083Returns:
5084    hist (Tensor): N-dimensional Tensor containing the values of the histogram.
5085    bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges.
5086
5087Example::
5088    >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3],
5089    ...                   weight=torch.tensor([1., 2., 4., 8.]))
5090        torch.return_types.histogramdd(
5091            hist=tensor([[0., 1., 0.],
5092                         [2., 0., 0.],
5093                         [4., 0., 8.]]),
5094            bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]),
5095                       tensor([0.0000, 0.6667, 1.3333, 2.0000])))
5096
5097    >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2],
5098    ...                   range=[0., 1., 0., 1.], density=True)
5099        torch.return_types.histogramdd(
5100           hist=tensor([[2., 0.],
5101                        [0., 2.]]),
5102           bin_edges=(tensor([0.0000, 0.5000, 1.0000]),
5103                      tensor([0.0000, 0.5000, 1.0000])))
5104
5105""".format(**common_args),
5106)
5107# TODO: Fix via https://github.com/pytorch/pytorch/issues/75798
5108torch.histogramdd.__module__ = "torch"
5109
5110add_docstr(
5111    torch.hypot,
5112    r"""
5113hypot(input, other, *, out=None) -> Tensor
5114
5115Given the legs of a right triangle, return its hypotenuse.
5116
5117.. math::
5118    \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}}
5119
5120The shapes of ``input`` and ``other`` must be
5121:ref:`broadcastable <broadcasting-semantics>`.
5122"""
5123    + r"""
5124Args:
5125    input (Tensor): the first input tensor
5126    other (Tensor): the second input tensor
5127
5128Keyword args:
5129    {out}
5130
5131Example::
5132
5133    >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
5134    tensor([5.0000, 5.6569, 6.4031])
5135
5136""".format(**common_args),
5137)
5138
5139add_docstr(
5140    torch.i0,
5141    r"""
5142i0(input, *, out=None) -> Tensor
5143
5144Alias for :func:`torch.special.i0`.
5145""",
5146)
5147
5148add_docstr(
5149    torch.igamma,
5150    r"""
5151igamma(input, other, *, out=None) -> Tensor
5152
5153Alias for :func:`torch.special.gammainc`.
5154""",
5155)
5156
5157add_docstr(
5158    torch.igammac,
5159    r"""
5160igammac(input, other, *, out=None) -> Tensor
5161
5162Alias for :func:`torch.special.gammaincc`.
5163""",
5164)
5165
5166add_docstr(
5167    torch.index_select,
5168    r"""
5169index_select(input, dim, index, *, out=None) -> Tensor
5170
5171Returns a new tensor which indexes the :attr:`input` tensor along dimension
5172:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`.
5173
5174The returned tensor has the same number of dimensions as the original tensor
5175(:attr:`input`).  The :attr:`dim`\ th dimension has the same size as the length
5176of :attr:`index`; other dimensions have the same size as in the original tensor.
5177
5178.. note:: The returned tensor does **not** use the same storage as the original
5179          tensor.  If :attr:`out` has a different shape than expected, we
5180          silently change it to the correct shape, reallocating the underlying
5181          storage if necessary.
5182
5183Args:
5184    {input}
5185    dim (int): the dimension in which we index
5186    index (IntTensor or LongTensor): the 1-D tensor containing the indices to index
5187
5188Keyword args:
5189    {out}
5190
5191Example::
5192
5193    >>> x = torch.randn(3, 4)
5194    >>> x
5195    tensor([[ 0.1427,  0.0231, -0.5414, -1.0009],
5196            [-0.4664,  0.2647, -0.1228, -1.1068],
5197            [-1.1734, -0.6571,  0.7230, -0.6004]])
5198    >>> indices = torch.tensor([0, 2])
5199    >>> torch.index_select(x, 0, indices)
5200    tensor([[ 0.1427,  0.0231, -0.5414, -1.0009],
5201            [-1.1734, -0.6571,  0.7230, -0.6004]])
5202    >>> torch.index_select(x, 1, indices)
5203    tensor([[ 0.1427, -0.5414],
5204            [-0.4664, -0.1228],
5205            [-1.1734,  0.7230]])
5206""".format(**common_args),
5207)
5208
5209add_docstr(
5210    torch.inverse,
5211    r"""
5212inverse(input, *, out=None) -> Tensor
5213
5214Alias for :func:`torch.linalg.inv`
5215""",
5216)
5217
5218add_docstr(
5219    torch.isin,
5220    r"""
5221isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor
5222
5223Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns
5224a boolean tensor of the same shape as :attr:`elements` that is True for elements
5225in :attr:`test_elements` and False otherwise.
5226
5227.. note::
5228    One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both.
5229
5230Args:
5231    elements (Tensor or Scalar): Input elements
5232    test_elements (Tensor or Scalar): Values against which to test for each input element
5233    assume_unique (bool, optional): If True, assumes both :attr:`elements` and
5234        :attr:`test_elements` contain unique elements, which can speed up the
5235        calculation. Default: False
5236    invert (bool, optional): If True, inverts the boolean return tensor, resulting in True
5237        values for elements *not* in :attr:`test_elements`. Default: False
5238
5239Returns:
5240    A boolean tensor of the same shape as :attr:`elements` that is True for elements in
5241    :attr:`test_elements` and False otherwise
5242
5243Example:
5244    >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3]))
5245    tensor([[False,  True],
5246            [ True, False]])
5247""",
5248)
5249
5250add_docstr(
5251    torch.isinf,
5252    r"""
5253isinf(input) -> Tensor
5254
5255Tests if each element of :attr:`input` is infinite
5256(positive or negative infinity) or not.
5257
5258.. note::
5259    Complex values are infinite when their real or imaginary part is
5260    infinite.
5261
5262Args:
5263    {input}
5264
5265Returns:
5266    A boolean tensor that is True where :attr:`input` is infinite and False elsewhere
5267
5268Example::
5269
5270    >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
5271    tensor([False,  True,  False,  True,  False])
5272""".format(**common_args),
5273)
5274
5275add_docstr(
5276    torch.isposinf,
5277    r"""
5278isposinf(input, *, out=None) -> Tensor
5279Tests if each element of :attr:`input` is positive infinity or not.
5280
5281Args:
5282  {input}
5283
5284Keyword args:
5285  {out}
5286
5287Example::
5288
5289    >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
5290    >>> torch.isposinf(a)
5291    tensor([False,  True, False])
5292""".format(**common_args),
5293)
5294
5295add_docstr(
5296    torch.isneginf,
5297    r"""
5298isneginf(input, *, out=None) -> Tensor
5299Tests if each element of :attr:`input` is negative infinity or not.
5300
5301Args:
5302  {input}
5303
5304Keyword args:
5305  {out}
5306
5307Example::
5308
5309    >>> a = torch.tensor([-float('inf'), float('inf'), 1.2])
5310    >>> torch.isneginf(a)
5311    tensor([ True, False, False])
5312""".format(**common_args),
5313)
5314
5315add_docstr(
5316    torch.isclose,
5317    r"""
5318isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor
5319
5320Returns a new tensor with boolean elements representing if each element of
5321:attr:`input` is "close" to the corresponding element of :attr:`other`.
5322Closeness is defined as:
5323
5324.. math::
5325    \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert
5326"""
5327    + r"""
5328
5329where :attr:`input` and :attr:`other` are finite. Where :attr:`input`
5330and/or :attr:`other` are nonfinite they are close if and only if
5331they are equal, with NaNs being considered equal to each other when
5332:attr:`equal_nan` is True.
5333
5334Args:
5335    input (Tensor): first tensor to compare
5336    other (Tensor): second tensor to compare
5337    atol (float, optional): absolute tolerance. Default: 1e-08
5338    rtol (float, optional): relative tolerance. Default: 1e-05
5339    equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False``
5340
5341Examples::
5342
5343    >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4)))
5344    tensor([ True, False, False])
5345    >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5)
5346    tensor([True, True])
5347""",
5348)
5349
5350add_docstr(
5351    torch.isfinite,
5352    r"""
5353isfinite(input) -> Tensor
5354
5355Returns a new tensor with boolean elements representing if each element is `finite` or not.
5356
5357Real values are finite when they are not NaN, negative infinity, or infinity.
5358Complex values are finite when both their real and imaginary parts are finite.
5359
5360Args:
5361    {input}
5362
5363Returns:
5364    A boolean tensor that is True where :attr:`input` is finite and False elsewhere
5365
5366Example::
5367
5368    >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')]))
5369    tensor([True,  False,  True,  False,  False])
5370""".format(**common_args),
5371)
5372
5373add_docstr(
5374    torch.isnan,
5375    r"""
5376isnan(input) -> Tensor
5377
5378Returns a new tensor with boolean elements representing if each element of :attr:`input`
5379is NaN or not. Complex values are considered NaN when either their real
5380and/or imaginary part is NaN.
5381
5382Arguments:
5383    {input}
5384
5385Returns:
5386    A boolean tensor that is True where :attr:`input` is NaN and False elsewhere
5387
5388Example::
5389
5390    >>> torch.isnan(torch.tensor([1, float('nan'), 2]))
5391    tensor([False, True, False])
5392""".format(**common_args),
5393)
5394
5395add_docstr(
5396    torch.isreal,
5397    r"""
5398isreal(input) -> Tensor
5399
5400Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not.
5401All real-valued types are considered real. Complex values are considered real when their imaginary part is 0.
5402
5403Arguments:
5404    {input}
5405
5406Returns:
5407    A boolean tensor that is True where :attr:`input` is real and False elsewhere
5408
5409Example::
5410
5411    >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j]))
5412    tensor([True, False, True])
5413""".format(**common_args),
5414)
5415
5416add_docstr(
5417    torch.is_floating_point,
5418    r"""
5419is_floating_point(input) -> (bool)
5420
5421Returns True if the data type of :attr:`input` is a floating point data type i.e.,
5422one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``.
5423
5424Args:
5425    {input}
5426""".format(**common_args),
5427)
5428
5429add_docstr(
5430    torch.is_complex,
5431    r"""
5432is_complex(input) -> (bool)
5433
5434Returns True if the data type of :attr:`input` is a complex data type i.e.,
5435one of ``torch.complex64``, and ``torch.complex128``.
5436
5437Args:
5438    {input}
5439""".format(**common_args),
5440)
5441
5442add_docstr(
5443    torch.is_grad_enabled,
5444    r"""
5445is_grad_enabled() -> (bool)
5446
5447Returns True if grad mode is currently enabled.
5448""".format(**common_args),
5449)
5450
5451add_docstr(
5452    torch.is_inference_mode_enabled,
5453    r"""
5454is_inference_mode_enabled() -> (bool)
5455
5456Returns True if inference mode is currently enabled.
5457""".format(**common_args),
5458)
5459
5460add_docstr(
5461    torch.is_inference,
5462    r"""
5463is_inference(input) -> (bool)
5464
5465Returns True if :attr:`input` is an inference tensor.
5466
5467A non-view tensor is an inference tensor if and only if it was
5468allocated during inference mode. A view tensor is an inference
5469tensor if and only if the tensor it is a view of is an inference tensor.
5470
5471For details on inference mode please see
5472`Inference Mode <https://pytorch.org/cppdocs/notes/inference_mode.html>`_.
5473
5474Args:
5475    {input}
5476""".format(**common_args),
5477)
5478
5479add_docstr(
5480    torch.is_conj,
5481    r"""
5482is_conj(input) -> (bool)
5483
5484Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`.
5485
5486Args:
5487    {input}
5488""".format(**common_args),
5489)
5490
5491add_docstr(
5492    torch.is_nonzero,
5493    r"""
5494is_nonzero(input) -> (bool)
5495
5496Returns True if the :attr:`input` is a single element tensor which is not equal to zero
5497after type conversions.
5498i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or
5499``torch.tensor([False])``.
5500Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case
5501of sparse tensors).
5502
5503Args:
5504    {input}
5505
5506Examples::
5507
5508    >>> torch.is_nonzero(torch.tensor([0.]))
5509    False
5510    >>> torch.is_nonzero(torch.tensor([1.5]))
5511    True
5512    >>> torch.is_nonzero(torch.tensor([False]))
5513    False
5514    >>> torch.is_nonzero(torch.tensor([3]))
5515    True
5516    >>> torch.is_nonzero(torch.tensor([1, 3, 5]))
5517    Traceback (most recent call last):
5518    ...
5519    RuntimeError: bool value of Tensor with more than one value is ambiguous
5520    >>> torch.is_nonzero(torch.tensor([]))
5521    Traceback (most recent call last):
5522    ...
5523    RuntimeError: bool value of Tensor with no values is ambiguous
5524""".format(**common_args),
5525)
5526
5527add_docstr(
5528    torch.kron,
5529    r"""
5530kron(input, other, *, out=None) -> Tensor
5531
5532Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`.
5533
5534If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a
5535:math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a
5536:math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries:
5537
5538.. math::
5539    (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} =
5540        \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n},
5541
5542where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`.
5543If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions.
5544
5545Supports real-valued and complex-valued inputs.
5546
5547.. note::
5548    This function generalizes the typical definition of the Kronecker product for two matrices to two tensors,
5549    as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a
5550    :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix:
5551
5552    .. math::
5553        \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix}
5554        a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\
5555        \vdots & \ddots & \vdots \\
5556        a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix}
5557
5558    where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`.
5559
5560Arguments:
5561    input (Tensor)
5562    other (Tensor)
5563
5564Keyword args:
5565    out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None``
5566
5567Examples::
5568
5569    >>> mat1 = torch.eye(2)
5570    >>> mat2 = torch.ones(2, 2)
5571    >>> torch.kron(mat1, mat2)
5572    tensor([[1., 1., 0., 0.],
5573            [1., 1., 0., 0.],
5574            [0., 0., 1., 1.],
5575            [0., 0., 1., 1.]])
5576
5577    >>> mat1 = torch.eye(2)
5578    >>> mat2 = torch.arange(1, 5).reshape(2, 2)
5579    >>> torch.kron(mat1, mat2)
5580    tensor([[1., 2., 0., 0.],
5581            [3., 4., 0., 0.],
5582            [0., 0., 1., 2.],
5583            [0., 0., 3., 4.]])
5584""",
5585)
5586
5587add_docstr(
5588    torch.kthvalue,
5589    r"""
5590kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor)
5591
5592Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th
5593smallest element of each row of the :attr:`input` tensor in the given dimension
5594:attr:`dim`. And ``indices`` is the index location of each element found.
5595
5596If :attr:`dim` is not given, the last dimension of the `input` is chosen.
5597
5598If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors
5599are the same size as :attr:`input`, except in the dimension :attr:`dim` where
5600they are of size 1. Otherwise, :attr:`dim` is squeezed
5601(see :func:`torch.squeeze`), resulting in both the :attr:`values` and
5602:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor.
5603
5604.. note::
5605    When :attr:`input` is a CUDA tensor and there are multiple valid
5606    :attr:`k` th values, this function may nondeterministically return
5607    :attr:`indices` for any of them.
5608
5609Args:
5610    {input}
5611    k (int): k for the k-th smallest element
5612    dim (int, optional): the dimension to find the kth value along
5613    {keepdim}
5614
5615Keyword args:
5616    out (tuple, optional): the output tuple of (Tensor, LongTensor)
5617                           can be optionally given to be used as output buffers
5618
5619Example::
5620
5621    >>> x = torch.arange(1., 6.)
5622    >>> x
5623    tensor([ 1.,  2.,  3.,  4.,  5.])
5624    >>> torch.kthvalue(x, 4)
5625    torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3))
5626
5627    >>> x=torch.arange(1.,7.).resize_(2,3)
5628    >>> x
5629    tensor([[ 1.,  2.,  3.],
5630            [ 4.,  5.,  6.]])
5631    >>> torch.kthvalue(x, 2, 0, True)
5632    torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]]))
5633""".format(**single_dim_common),
5634)
5635
5636add_docstr(
5637    torch.lcm,
5638    r"""
5639lcm(input, other, *, out=None) -> Tensor
5640
5641Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`.
5642
5643Both :attr:`input` and :attr:`other` must have integer types.
5644
5645.. note::
5646    This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`.
5647
5648Args:
5649    {input}
5650    other (Tensor): the second input tensor
5651
5652Keyword arguments:
5653    {out}
5654
5655Example::
5656
5657    >>> a = torch.tensor([5, 10, 15])
5658    >>> b = torch.tensor([3, 4, 5])
5659    >>> torch.lcm(a, b)
5660    tensor([15, 20, 15])
5661    >>> c = torch.tensor([3])
5662    >>> torch.lcm(a, c)
5663    tensor([15, 30, 15])
5664""".format(**common_args),
5665)
5666
5667add_docstr(
5668    torch.ldexp,
5669    r"""
5670ldexp(input, other, *, out=None) -> Tensor
5671
5672Multiplies :attr:`input` by 2 ** :attr:`other`.
5673
5674.. math::
5675    \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i
5676"""
5677    + r"""
5678
5679Typically this function is used to construct floating point numbers by multiplying
5680mantissas in :attr:`input` with integral powers of two created from the exponents
5681in :attr:`other`.
5682
5683Args:
5684    {input}
5685    other (Tensor): a tensor of exponents, typically integers.
5686
5687Keyword args:
5688    {out}
5689
5690Example::
5691
5692    >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
5693    tensor([2.])
5694    >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
5695    tensor([ 2.,  4.,  8., 16.])
5696
5697
5698""".format(**common_args),
5699)
5700
5701add_docstr(
5702    torch.le,
5703    r"""
5704le(input, other, *, out=None) -> Tensor
5705
5706Computes :math:`\text{input} \leq \text{other}` element-wise.
5707"""
5708    + r"""
5709
5710The second argument can be a number or a tensor whose shape is
5711:ref:`broadcastable <broadcasting-semantics>` with the first argument.
5712
5713Args:
5714    input (Tensor): the tensor to compare
5715    other (Tensor or Scalar): the tensor or value to compare
5716
5717Keyword args:
5718    {out}
5719
5720Returns:
5721    A boolean tensor that is True where :attr:`input` is less than or equal to
5722    :attr:`other` and False elsewhere
5723
5724Example::
5725
5726    >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
5727    tensor([[True, False], [True, True]])
5728""".format(**common_args),
5729)
5730
5731add_docstr(
5732    torch.less_equal,
5733    r"""
5734less_equal(input, other, *, out=None) -> Tensor
5735
5736Alias for :func:`torch.le`.
5737""",
5738)
5739
5740add_docstr(
5741    torch.lerp,
5742    r"""
5743lerp(input, end, weight, *, out=None)
5744
5745Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based
5746on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor.
5747
5748.. math::
5749    \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i)
5750"""
5751    + r"""
5752The shapes of :attr:`start` and :attr:`end` must be
5753:ref:`broadcastable <broadcasting-semantics>`. If :attr:`weight` is a tensor, then
5754the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable <broadcasting-semantics>`.
5755
5756Args:
5757    input (Tensor): the tensor with the starting points
5758    end (Tensor): the tensor with the ending points
5759    weight (float or tensor): the weight for the interpolation formula
5760
5761Keyword args:
5762    {out}
5763
5764Example::
5765
5766    >>> start = torch.arange(1., 5.)
5767    >>> end = torch.empty(4).fill_(10)
5768    >>> start
5769    tensor([ 1.,  2.,  3.,  4.])
5770    >>> end
5771    tensor([ 10.,  10.,  10.,  10.])
5772    >>> torch.lerp(start, end, 0.5)
5773    tensor([ 5.5000,  6.0000,  6.5000,  7.0000])
5774    >>> torch.lerp(start, end, torch.full_like(start, 0.5))
5775    tensor([ 5.5000,  6.0000,  6.5000,  7.0000])
5776""".format(**common_args),
5777)
5778
5779add_docstr(
5780    torch.lgamma,
5781    r"""
5782lgamma(input, *, out=None) -> Tensor
5783
5784Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`.
5785
5786.. math::
5787    \text{out}_{i} = \ln |\Gamma(\text{input}_{i})|
5788"""
5789    + """
5790Args:
5791    {input}
5792
5793Keyword args:
5794    {out}
5795
5796Example::
5797
5798    >>> a = torch.arange(0.5, 2, 0.5)
5799    >>> torch.lgamma(a)
5800    tensor([ 0.5724,  0.0000, -0.1208])
5801""".format(**common_args),
5802)
5803
5804add_docstr(
5805    torch.linspace,
5806    r"""
5807linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
5808
5809Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
5810spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are:
5811
5812.. math::
5813    (\text{start},
5814    \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1},
5815    \ldots,
5816    \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1},
5817    \text{end})
5818"""
5819    + """
5820
5821From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior.
5822
5823Args:
5824    start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
5825    end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
5826    steps (int): size of the constructed tensor
5827
5828Keyword arguments:
5829    {out}
5830    dtype (torch.dtype, optional): the data type to perform the computation in.
5831        Default: if None, uses the global default dtype (see torch.get_default_dtype())
5832        when both :attr:`start` and :attr:`end` are real,
5833        and corresponding complex dtype when either is complex.
5834    {layout}
5835    {device}
5836    {requires_grad}
5837
5838
5839Example::
5840
5841    >>> torch.linspace(3, 10, steps=5)
5842    tensor([  3.0000,   4.7500,   6.5000,   8.2500,  10.0000])
5843    >>> torch.linspace(-10, 10, steps=5)
5844    tensor([-10.,  -5.,   0.,   5.,  10.])
5845    >>> torch.linspace(start=-10, end=10, steps=5)
5846    tensor([-10.,  -5.,   0.,   5.,  10.])
5847    >>> torch.linspace(start=-10, end=10, steps=1)
5848    tensor([-10.])
5849""".format(**factory_common_args),
5850)
5851
5852add_docstr(
5853    torch.log,
5854    r"""
5855log(input, *, out=None) -> Tensor
5856
5857Returns a new tensor with the natural logarithm of the elements
5858of :attr:`input`.
5859
5860.. math::
5861    y_{i} = \log_{e} (x_{i})
5862"""
5863    + r"""
5864
5865Args:
5866    {input}
5867
5868Keyword args:
5869    {out}
5870
5871Example::
5872
5873    >>> a = torch.rand(5) * 5
5874    >>> a
5875    tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739])
5876    >>> torch.log(a)
5877    tensor([ 1.5637,  1.4640,  0.1952, -1.4226,  1.5204])
5878""".format(**common_args),
5879)
5880
5881add_docstr(
5882    torch.log10,
5883    r"""
5884log10(input, *, out=None) -> Tensor
5885
5886Returns a new tensor with the logarithm to the base 10 of the elements
5887of :attr:`input`.
5888
5889.. math::
5890    y_{i} = \log_{10} (x_{i})
5891"""
5892    + r"""
5893
5894Args:
5895    {input}
5896
5897Keyword args:
5898    {out}
5899
5900Example::
5901
5902    >>> a = torch.rand(5)
5903    >>> a
5904    tensor([ 0.5224,  0.9354,  0.7257,  0.1301,  0.2251])
5905
5906
5907    >>> torch.log10(a)
5908    tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476])
5909
5910""".format(**common_args),
5911)
5912
5913add_docstr(
5914    torch.log1p,
5915    r"""
5916log1p(input, *, out=None) -> Tensor
5917
5918Returns a new tensor with the natural logarithm of (1 + :attr:`input`).
5919
5920.. math::
5921    y_i = \log_{e} (x_i + 1)
5922"""
5923    + r"""
5924.. note:: This function is more accurate than :func:`torch.log` for small
5925          values of :attr:`input`
5926
5927Args:
5928    {input}
5929
5930Keyword args:
5931    {out}
5932
5933Example::
5934
5935    >>> a = torch.randn(5)
5936    >>> a
5937    tensor([-1.0090, -0.9923,  1.0249, -0.5372,  0.2492])
5938    >>> torch.log1p(a)
5939    tensor([    nan, -4.8653,  0.7055, -0.7705,  0.2225])
5940""".format(**common_args),
5941)
5942
5943add_docstr(
5944    torch.log2,
5945    r"""
5946log2(input, *, out=None) -> Tensor
5947
5948Returns a new tensor with the logarithm to the base 2 of the elements
5949of :attr:`input`.
5950
5951.. math::
5952    y_{i} = \log_{2} (x_{i})
5953"""
5954    + r"""
5955
5956Args:
5957    {input}
5958
5959Keyword args:
5960    {out}
5961
5962Example::
5963
5964    >>> a = torch.rand(5)
5965    >>> a
5966    tensor([ 0.8419,  0.8003,  0.9971,  0.5287,  0.0490])
5967
5968
5969    >>> torch.log2(a)
5970    tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504])
5971
5972""".format(**common_args),
5973)
5974
5975add_docstr(
5976    torch.logaddexp,
5977    r"""
5978logaddexp(input, other, *, out=None) -> Tensor
5979
5980Logarithm of the sum of exponentiations of the inputs.
5981
5982Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful
5983in statistics where the calculated probabilities of events may be so small as to
5984exceed the range of normal floating point numbers. In such cases the logarithm
5985of the calculated probability is stored. This function allows adding
5986probabilities stored in such a fashion.
5987
5988This op should be disambiguated with :func:`torch.logsumexp` which performs a
5989reduction on a single tensor.
5990
5991Args:
5992    {input}
5993    other (Tensor): the second input tensor
5994
5995Keyword arguments:
5996    {out}
5997
5998Example::
5999
6000    >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
6001    tensor([-0.3069, -0.6867, -0.8731])
6002    >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
6003    tensor([-1., -2., -3.])
6004    >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
6005    tensor([1.1269e+00, 2.0000e+03, 3.0000e+04])
6006""".format(**common_args),
6007)
6008
6009add_docstr(
6010    torch.logaddexp2,
6011    r"""
6012logaddexp2(input, other, *, out=None) -> Tensor
6013
6014Logarithm of the sum of exponentiations of the inputs in base-2.
6015
6016Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See
6017:func:`torch.logaddexp` for more details.
6018
6019Args:
6020    {input}
6021    other (Tensor): the second input tensor
6022
6023Keyword arguments:
6024    {out}
6025""".format(**common_args),
6026)
6027
6028add_docstr(
6029    torch.xlogy,
6030    r"""
6031xlogy(input, other, *, out=None) -> Tensor
6032
6033Alias for :func:`torch.special.xlogy`.
6034""",
6035)
6036
6037add_docstr(
6038    torch.logical_and,
6039    r"""
6040logical_and(input, other, *, out=None) -> Tensor
6041
6042Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are
6043treated as ``True``.
6044
6045Args:
6046    {input}
6047    other (Tensor): the tensor to compute AND with
6048
6049Keyword args:
6050    {out}
6051
6052Example::
6053
6054    >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
6055    tensor([ True, False, False])
6056    >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
6057    >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
6058    >>> torch.logical_and(a, b)
6059    tensor([False, False,  True, False])
6060    >>> torch.logical_and(a.double(), b.double())
6061    tensor([False, False,  True, False])
6062    >>> torch.logical_and(a.double(), b)
6063    tensor([False, False,  True, False])
6064    >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool))
6065    tensor([False, False,  True, False])
6066""".format(**common_args),
6067)
6068
6069add_docstr(
6070    torch.logical_not,
6071    r"""
6072logical_not(input, *, out=None) -> Tensor
6073
6074Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool
6075dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``.
6076
6077Args:
6078    {input}
6079
6080Keyword args:
6081    {out}
6082
6083Example::
6084
6085    >>> torch.logical_not(torch.tensor([True, False]))
6086    tensor([False,  True])
6087    >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
6088    tensor([ True, False, False])
6089    >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
6090    tensor([ True, False, False])
6091    >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
6092    tensor([1, 0, 0], dtype=torch.int16)
6093""".format(**common_args),
6094)
6095
6096add_docstr(
6097    torch.logical_or,
6098    r"""
6099logical_or(input, other, *, out=None) -> Tensor
6100
6101Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
6102treated as ``True``.
6103
6104Args:
6105    {input}
6106    other (Tensor): the tensor to compute OR with
6107
6108Keyword args:
6109    {out}
6110
6111Example::
6112
6113    >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
6114    tensor([ True, False,  True])
6115    >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
6116    >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
6117    >>> torch.logical_or(a, b)
6118    tensor([ True,  True,  True, False])
6119    >>> torch.logical_or(a.double(), b.double())
6120    tensor([ True,  True,  True, False])
6121    >>> torch.logical_or(a.double(), b)
6122    tensor([ True,  True,  True, False])
6123    >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool))
6124    tensor([ True,  True,  True, False])
6125""".format(**common_args),
6126)
6127
6128add_docstr(
6129    torch.logical_xor,
6130    r"""
6131logical_xor(input, other, *, out=None) -> Tensor
6132
6133Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are
6134treated as ``True``.
6135
6136Args:
6137    {input}
6138    other (Tensor): the tensor to compute XOR with
6139
6140Keyword args:
6141    {out}
6142
6143Example::
6144
6145    >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
6146    tensor([False, False,  True])
6147    >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
6148    >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
6149    >>> torch.logical_xor(a, b)
6150    tensor([ True,  True, False, False])
6151    >>> torch.logical_xor(a.double(), b.double())
6152    tensor([ True,  True, False, False])
6153    >>> torch.logical_xor(a.double(), b)
6154    tensor([ True,  True, False, False])
6155    >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool))
6156    tensor([ True,  True, False, False])
6157""".format(**common_args),
6158)
6159
6160add_docstr(
6161    torch.logspace,
6162    """
6163logspace(start, end, steps, base=10.0, *, \
6164         out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
6165"""
6166    + r"""
6167
6168Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly
6169spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to
6170:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale
6171with base :attr:`base`. That is, the values are:
6172
6173.. math::
6174    (\text{base}^{\text{start}},
6175    \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
6176    \ldots,
6177    \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})},
6178    \text{base}^{\text{end}})
6179"""
6180    + """
6181
6182
6183From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior.
6184
6185Args:
6186    start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional
6187    end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional
6188    steps (int): size of the constructed tensor
6189    base (float, optional): base of the logarithm function. Default: ``10.0``.
6190
6191Keyword arguments:
6192    {out}
6193    dtype (torch.dtype, optional): the data type to perform the computation in.
6194        Default: if None, uses the global default dtype (see torch.get_default_dtype())
6195        when both :attr:`start` and :attr:`end` are real,
6196        and corresponding complex dtype when either is complex.
6197    {layout}
6198    {device}
6199    {requires_grad}
6200
6201Example::
6202
6203    >>> torch.logspace(start=-10, end=10, steps=5)
6204    tensor([ 1.0000e-10,  1.0000e-05,  1.0000e+00,  1.0000e+05,  1.0000e+10])
6205    >>> torch.logspace(start=0.1, end=1.0, steps=5)
6206    tensor([  1.2589,   2.1135,   3.5481,   5.9566,  10.0000])
6207    >>> torch.logspace(start=0.1, end=1.0, steps=1)
6208    tensor([1.2589])
6209    >>> torch.logspace(start=2, end=2, steps=1, base=2)
6210    tensor([4.0])
6211""".format(**factory_common_args),
6212)
6213
6214add_docstr(
6215    torch.logsumexp,
6216    r"""
6217logsumexp(input, dim, keepdim=False, *, out=None)
6218
6219Returns the log of summed exponentials of each row of the :attr:`input`
6220tensor in the given dimension :attr:`dim`. The computation is numerically
6221stabilized.
6222
6223For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is
6224
6225    .. math::
6226        \text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}})
6227
6228{keepdim_details}
6229
6230Args:
6231    {input}
6232    {opt_dim}
6233    {keepdim}
6234
6235Keyword args:
6236    {out}
6237
6238Example::
6239
6240    >>> a = torch.randn(3, 3)
6241    >>> torch.logsumexp(a, 1)
6242    tensor([1.4907, 1.0593, 1.5696])
6243    >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1)))
6244    tensor(1.6859e-07)
6245""".format(**multi_dim_common),
6246)
6247
6248add_docstr(
6249    torch.lt,
6250    r"""
6251lt(input, other, *, out=None) -> Tensor
6252
6253Computes :math:`\text{input} < \text{other}` element-wise.
6254"""
6255    + r"""
6256
6257The second argument can be a number or a tensor whose shape is
6258:ref:`broadcastable <broadcasting-semantics>` with the first argument.
6259
6260Args:
6261    input (Tensor): the tensor to compare
6262    other (Tensor or float): the tensor or value to compare
6263
6264Keyword args:
6265    {out}
6266
6267Returns:
6268    A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere
6269
6270Example::
6271
6272    >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
6273    tensor([[False, False], [True, False]])
6274""".format(**common_args),
6275)
6276
6277add_docstr(
6278    torch.lu_unpack,
6279    r"""
6280lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor)
6281
6282Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices.
6283
6284.. seealso::
6285
6286    :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient
6287    than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`.
6288
6289Args:
6290    LU_data (Tensor): the packed LU factorization data
6291    LU_pivots (Tensor): the packed LU factorization pivots
6292    unpack_data (bool): flag indicating if the data should be unpacked.
6293                        If ``False``, then the returned ``L`` and ``U`` are empty tensors.
6294                        Default: ``True``
6295    unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``.
6296                          If ``False``, then the returned ``P`` is  an empty tensor.
6297                          Default: ``True``
6298
6299Keyword args:
6300    out (tuple, optional): output tuple of three tensors. Ignored if `None`.
6301
6302Returns:
6303    A namedtuple ``(P, L, U)``
6304
6305Examples::
6306
6307    >>> A = torch.randn(2, 3, 3)
6308    >>> LU, pivots = torch.linalg.lu_factor(A)
6309    >>> P, L, U = torch.lu_unpack(LU, pivots)
6310    >>> # We can recover A from the factorization
6311    >>> A_ = P @ L @ U
6312    >>> torch.allclose(A, A_)
6313    True
6314
6315    >>> # LU factorization of a rectangular matrix:
6316    >>> A = torch.randn(2, 3, 2)
6317    >>> LU, pivots = torch.linalg.lu_factor(A)
6318    >>> P, L, U = torch.lu_unpack(LU, pivots)
6319    >>> # P, L, U are the same as returned by linalg.lu
6320    >>> P_, L_, U_ = torch.linalg.lu(A)
6321    >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_)
6322    True
6323
6324""".format(**common_args),
6325)
6326
6327add_docstr(
6328    torch.less,
6329    r"""
6330less(input, other, *, out=None) -> Tensor
6331
6332Alias for :func:`torch.lt`.
6333""",
6334)
6335
6336add_docstr(
6337    torch.lu_solve,
6338    r"""
6339lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor
6340
6341Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted
6342LU factorization of A from :func:`~linalg.lu_factor`.
6343
6344This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`.
6345
6346.. warning::
6347
6348    :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`.
6349    :func:`torch.lu_solve` will be removed in a future PyTorch release.
6350    ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with
6351
6352    .. code:: python
6353
6354        X = linalg.lu_solve(LU, pivots, B)
6355
6356Arguments:
6357    b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*`
6358                is zero or more batch dimensions.
6359    LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`,
6360                       where :math:`*` is zero or more batch dimensions.
6361    LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`,
6362                           where :math:`*` is zero or more batch dimensions.
6363                           The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of
6364                           :attr:`LU_data`.
6365
6366Keyword args:
6367    {out}
6368
6369Example::
6370
6371    >>> A = torch.randn(2, 3, 3)
6372    >>> b = torch.randn(2, 3, 1)
6373    >>> LU, pivots = torch.linalg.lu_factor(A)
6374    >>> x = torch.lu_solve(b, LU, pivots)
6375    >>> torch.dist(A @ x, b)
6376    tensor(1.00000e-07 *
6377           2.8312)
6378""".format(**common_args),
6379)
6380
6381add_docstr(
6382    torch.masked_select,
6383    r"""
6384masked_select(input, mask, *, out=None) -> Tensor
6385
6386Returns a new 1-D tensor which indexes the :attr:`input` tensor according to
6387the boolean mask :attr:`mask` which is a `BoolTensor`.
6388
6389The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need
6390to match, but they must be :ref:`broadcastable <broadcasting-semantics>`.
6391
6392.. note:: The returned tensor does **not** use the same storage
6393          as the original tensor
6394
6395Args:
6396    {input}
6397    mask  (BoolTensor): the tensor containing the binary mask to index with
6398
6399Keyword args:
6400    {out}
6401
6402Example::
6403
6404    >>> x = torch.randn(3, 4)
6405    >>> x
6406    tensor([[ 0.3552, -2.3825, -0.8297,  0.3477],
6407            [-1.2035,  1.2252,  0.5002,  0.6248],
6408            [ 0.1307, -2.0608,  0.1244,  2.0139]])
6409    >>> mask = x.ge(0.5)
6410    >>> mask
6411    tensor([[False, False, False, False],
6412            [False, True, True, True],
6413            [False, False, False, True]])
6414    >>> torch.masked_select(x, mask)
6415    tensor([ 1.2252,  0.5002,  0.6248,  2.0139])
6416""".format(**common_args),
6417)
6418
6419add_docstr(
6420    torch.matrix_power,
6421    r"""
6422matrix_power(input, n, *, out=None) -> Tensor
6423
6424Alias for :func:`torch.linalg.matrix_power`
6425""",
6426)
6427
6428add_docstr(
6429    torch.matrix_exp,
6430    r"""
6431matrix_exp(A) -> Tensor
6432
6433Alias for :func:`torch.linalg.matrix_exp`.
6434""",
6435)
6436
6437add_docstr(
6438    torch.max,
6439    r"""
6440max(input) -> Tensor
6441
6442Returns the maximum value of all elements in the ``input`` tensor.
6443
6444.. warning::
6445    This function produces deterministic (sub)gradients unlike ``max(dim=0)``
6446
6447Args:
6448    {input}
6449
6450Example::
6451
6452    >>> a = torch.randn(1, 3)
6453    >>> a
6454    tensor([[ 0.6763,  0.7445, -2.2369]])
6455    >>> torch.max(a)
6456    tensor(0.7445)
6457
6458.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
6459   :noindex:
6460
6461Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum
6462value of each row of the :attr:`input` tensor in the given dimension
6463:attr:`dim`. And ``indices`` is the index location of each maximum value found
6464(argmax).
6465
6466If ``keepdim`` is ``True``, the output tensors are of the same size
6467as ``input`` except in the dimension ``dim`` where they are of size 1.
6468Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting
6469in the output tensors having 1 fewer dimension than ``input``.
6470
6471.. note:: If there are multiple maximal values in a reduced row then
6472          the indices of the first maximal value are returned.
6473
6474Args:
6475    {input}
6476    {dim}
6477    {keepdim} Default: ``False``.
6478
6479Keyword args:
6480    out (tuple, optional): the result tuple of two output tensors (max, max_indices)
6481
6482Example::
6483
6484    >>> a = torch.randn(4, 4)
6485    >>> a
6486    tensor([[-1.2360, -0.2942, -0.1222,  0.8475],
6487            [ 1.1949, -1.1127, -2.2379, -0.6702],
6488            [ 1.5717, -0.9207,  0.1297, -1.8768],
6489            [-0.6172,  1.0036, -0.6060, -0.2432]])
6490    >>> torch.max(a, 1)
6491    torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1]))
6492
6493.. function:: max(input, other, *, out=None) -> Tensor
6494   :noindex:
6495
6496See :func:`torch.maximum`.
6497
6498""".format(**single_dim_common),
6499)
6500
6501add_docstr(
6502    torch.maximum,
6503    r"""
6504maximum(input, other, *, out=None) -> Tensor
6505
6506Computes the element-wise maximum of :attr:`input` and :attr:`other`.
6507
6508.. note::
6509    If one of the elements being compared is a NaN, then that element is returned.
6510    :func:`maximum` is not supported for tensors with complex dtypes.
6511
6512Args:
6513    {input}
6514    other (Tensor): the second input tensor
6515
6516Keyword args:
6517    {out}
6518
6519Example::
6520
6521    >>> a = torch.tensor((1, 2, -1))
6522    >>> b = torch.tensor((3, 0, 4))
6523    >>> torch.maximum(a, b)
6524    tensor([3, 2, 4])
6525""".format(**common_args),
6526)
6527
6528add_docstr(
6529    torch.fmax,
6530    r"""
6531fmax(input, other, *, out=None) -> Tensor
6532
6533Computes the element-wise maximum of :attr:`input` and :attr:`other`.
6534
6535This is like :func:`torch.maximum` except it handles NaNs differently:
6536if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum.
6537Only if both elements are NaN is NaN propagated.
6538
6539This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function.
6540
6541Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
6542:ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
6543
6544Args:
6545    {input}
6546    other (Tensor): the second input tensor
6547
6548Keyword args:
6549    {out}
6550
6551Example::
6552
6553    >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')])
6554    >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')])
6555    >>> torch.fmax(a, b)
6556    tensor([9.7000, 0.5000, 3.1000,    nan])
6557""".format(**common_args),
6558)
6559
6560add_docstr(
6561    torch.amax,
6562    r"""
6563amax(input, dim, keepdim=False, *, out=None) -> Tensor
6564
6565Returns the maximum value of each slice of the :attr:`input` tensor in the given
6566dimension(s) :attr:`dim`.
6567
6568.. note::
6569    The difference between ``max``/``min`` and ``amax``/``amin`` is:
6570        - ``amax``/``amin`` supports reducing on multiple dimensions,
6571        - ``amax``/``amin`` does not return indices,
6572        - ``amax``/``amin`` evenly distributes gradient between equal values,
6573          while ``max(dim)``/``min(dim)`` propagates gradient only to a single
6574          index in the source tensor.
6575
6576{keepdim_details}
6577
6578Args:
6579    {input}
6580    {dim}
6581    {keepdim}
6582
6583Keyword args:
6584  {out}
6585
6586Example::
6587
6588    >>> a = torch.randn(4, 4)
6589    >>> a
6590    tensor([[ 0.8177,  1.4878, -0.2491,  0.9130],
6591            [-0.7158,  1.1775,  2.0992,  0.4817],
6592            [-0.0053,  0.0164, -1.3738, -0.0507],
6593            [ 1.9700,  1.1106, -1.0318, -1.0816]])
6594    >>> torch.amax(a, 1)
6595    tensor([1.4878, 2.0992, 0.0164, 1.9700])
6596""".format(**multi_dim_common),
6597)
6598
6599add_docstr(
6600    torch.argmax,
6601    r"""
6602argmax(input) -> LongTensor
6603
6604Returns the indices of the maximum value of all elements in the :attr:`input` tensor.
6605
6606This is the second value returned by :meth:`torch.max`. See its
6607documentation for the exact semantics of this method.
6608
6609.. note:: If there are multiple maximal values then the indices of the first maximal value are returned.
6610
6611Args:
6612    {input}
6613
6614Example::
6615
6616    >>> a = torch.randn(4, 4)
6617    >>> a
6618    tensor([[ 1.3398,  0.2663, -0.2686,  0.2450],
6619            [-0.7401, -0.8805, -0.3402, -1.1936],
6620            [ 0.4907, -1.3948, -1.0691, -0.3132],
6621            [-1.6092,  0.5419, -0.2993,  0.3195]])
6622    >>> torch.argmax(a)
6623    tensor(0)
6624
6625.. function:: argmax(input, dim, keepdim=False) -> LongTensor
6626   :noindex:
6627
6628Returns the indices of the maximum values of a tensor across a dimension.
6629
6630This is the second value returned by :meth:`torch.max`. See its
6631documentation for the exact semantics of this method.
6632
6633Args:
6634    {input}
6635    {dim} If ``None``, the argmax of the flattened input is returned.
6636    {keepdim}
6637
6638Example::
6639
6640    >>> a = torch.randn(4, 4)
6641    >>> a
6642    tensor([[ 1.3398,  0.2663, -0.2686,  0.2450],
6643            [-0.7401, -0.8805, -0.3402, -1.1936],
6644            [ 0.4907, -1.3948, -1.0691, -0.3132],
6645            [-1.6092,  0.5419, -0.2993,  0.3195]])
6646    >>> torch.argmax(a, dim=1)
6647    tensor([ 0,  2,  0,  1])
6648""".format(**single_dim_common),
6649)
6650
6651add_docstr(
6652    torch.argwhere,
6653    r"""
6654argwhere(input) -> Tensor
6655
6656Returns a tensor containing the indices of all non-zero elements of
6657:attr:`input`.  Each row in the result contains the indices of a non-zero
6658element in :attr:`input`. The result is sorted lexicographically, with
6659the last index changing the fastest (C-style).
6660
6661If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
6662:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
6663non-zero elements in the :attr:`input` tensor.
6664
6665.. note::
6666    This function is similar to NumPy's `argwhere`.
6667
6668    When :attr:`input` is on CUDA, this function causes host-device synchronization.
6669
6670Args:
6671    {input}
6672
6673Example::
6674
6675    >>> t = torch.tensor([1, 0, 1])
6676    >>> torch.argwhere(t)
6677    tensor([[0],
6678            [2]])
6679    >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]])
6680    >>> torch.argwhere(t)
6681    tensor([[0, 0],
6682            [0, 2],
6683            [1, 1],
6684            [1, 2]])
6685""",
6686)
6687
6688add_docstr(
6689    torch.mean,
6690    r"""
6691mean(input, *, dtype=None) -> Tensor
6692
6693Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex.
6694
6695Args:
6696    input (Tensor):
6697      the input tensor, either of floating point or complex dtype
6698
6699Keyword args:
6700    {dtype}
6701
6702Example::
6703
6704    >>> a = torch.randn(1, 3)
6705    >>> a
6706    tensor([[ 0.2294, -0.5481,  1.3288]])
6707    >>> torch.mean(a)
6708    tensor(0.3367)
6709
6710.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor
6711   :noindex:
6712
6713Returns the mean value of each row of the :attr:`input` tensor in the given
6714dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
6715reduce over all of them.
6716
6717{keepdim_details}
6718
6719Args:
6720    {input}
6721    {dim}
6722    {keepdim}
6723
6724Keyword args:
6725    {dtype}
6726    {out}
6727
6728.. seealso::
6729
6730    :func:`torch.nanmean` computes the mean value of `non-NaN` elements.
6731
6732Example::
6733
6734    >>> a = torch.randn(4, 4)
6735    >>> a
6736    tensor([[-0.3841,  0.6320,  0.4254, -0.7384],
6737            [-0.9644,  1.0131, -0.6549, -1.4279],
6738            [-0.2951, -1.3350, -0.7694,  0.5600],
6739            [ 1.0842, -0.9580,  0.3623,  0.2343]])
6740    >>> torch.mean(a, 1)
6741    tensor([-0.0163, -0.5085, -0.4599,  0.1807])
6742    >>> torch.mean(a, 1, True)
6743    tensor([[-0.0163],
6744            [-0.5085],
6745            [-0.4599],
6746            [ 0.1807]])
6747""".format(**multi_dim_common),
6748)
6749
6750add_docstr(
6751    torch.nanmean,
6752    r"""
6753nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
6754
6755Computes the mean of all `non-NaN` elements along the specified dimensions.
6756Input must be floating point or complex.
6757
6758This function is identical to :func:`torch.mean` when there are no `NaN` values
6759in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will
6760propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the
6761`NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`).
6762
6763{keepdim_details}
6764
6765Args:
6766    input (Tensor): the input tensor, either of floating point or complex dtype
6767    {opt_dim}
6768    {keepdim}
6769
6770Keyword args:
6771    {dtype}
6772    {out}
6773
6774.. seealso::
6775
6776    :func:`torch.mean` computes the mean value, propagating `NaN`.
6777
6778Example::
6779
6780    >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]])
6781    >>> x.mean()
6782    tensor(nan)
6783    >>> x.nanmean()
6784    tensor(1.8000)
6785    >>> x.mean(dim=0)
6786    tensor([   nan, 1.5000, 2.5000])
6787    >>> x.nanmean(dim=0)
6788    tensor([1.0000, 1.5000, 2.5000])
6789
6790    # If all elements in the reduced dimensions are NaN then the result is NaN
6791    >>> torch.tensor([torch.nan]).nanmean()
6792    tensor(nan)
6793""".format(**multi_dim_common),
6794)
6795
6796add_docstr(
6797    torch.median,
6798    r"""
6799median(input) -> Tensor
6800
6801Returns the median of the values in :attr:`input`.
6802
6803.. note::
6804    The median is not unique for :attr:`input` tensors with an even number
6805    of elements. In this case the lower of the two medians is returned. To
6806    compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead.
6807
6808.. warning::
6809    This function produces deterministic (sub)gradients unlike ``median(dim=0)``
6810
6811Args:
6812    {input}
6813
6814Example::
6815
6816    >>> a = torch.randn(1, 3)
6817    >>> a
6818    tensor([[ 1.5219, -1.5212,  0.2202]])
6819    >>> torch.median(a)
6820    tensor(0.2202)
6821
6822.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
6823   :noindex:
6824
6825Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
6826in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`.
6827
6828By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
6829
6830If :attr:`keepdim` is ``True``, the output tensors are of the same size
6831as :attr:`input` except in the dimension :attr:`dim` where they are of size 1.
6832Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
6833the outputs tensor having 1 fewer dimension than :attr:`input`.
6834
6835.. note::
6836    The median is not unique for :attr:`input` tensors with an even number
6837    of elements in the dimension :attr:`dim`. In this case the lower of the
6838    two medians is returned. To compute the mean of both medians in
6839    :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead.
6840
6841.. warning::
6842    ``indices`` does not necessarily contain the first occurrence of each
6843    median value found, unless it is unique.
6844    The exact implementation details are device-specific.
6845    Do not expect the same result when run on CPU and GPU in general.
6846    For the same reason do not expect the gradients to be deterministic.
6847
6848Args:
6849    {input}
6850    {dim}
6851    {keepdim}
6852
6853Keyword args:
6854    out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
6855                                      tensor, which must have dtype long, with their indices in the dimension
6856                                      :attr:`dim` of :attr:`input`.
6857
6858Example::
6859
6860    >>> a = torch.randn(4, 5)
6861    >>> a
6862    tensor([[ 0.2505, -0.3982, -0.9948,  0.3518, -1.3131],
6863            [ 0.3180, -0.6993,  1.0436,  0.0438,  0.2270],
6864            [-0.2751,  0.7303,  0.2192,  0.3321,  0.2488],
6865            [ 1.0778, -1.9510,  0.7048,  0.4742, -0.7125]])
6866    >>> torch.median(a, 1)
6867    torch.return_types.median(values=tensor([-0.3982,  0.2270,  0.2488,  0.4742]), indices=tensor([1, 4, 4, 3]))
6868""".format(**single_dim_common),
6869)
6870
6871add_docstr(
6872    torch.nanmedian,
6873    r"""
6874nanmedian(input) -> Tensor
6875
6876Returns the median of the values in :attr:`input`, ignoring ``NaN`` values.
6877
6878This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`.
6879When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``,
6880while this function will return the median of the non-``NaN`` elements in :attr:`input`.
6881If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``.
6882
6883Args:
6884    {input}
6885
6886Example::
6887
6888    >>> a = torch.tensor([1, float('nan'), 3, 2])
6889    >>> a.median()
6890    tensor(nan)
6891    >>> a.nanmedian()
6892    tensor(2.)
6893
6894.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
6895   :noindex:
6896
6897Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input`
6898in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values
6899found in the dimension :attr:`dim`.
6900
6901This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has
6902one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the
6903median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too.
6904
6905Args:
6906    {input}
6907    {dim}
6908    {keepdim}
6909
6910Keyword args:
6911    out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second
6912                                      tensor, which must have dtype long, with their indices in the dimension
6913                                      :attr:`dim` of :attr:`input`.
6914
6915Example::
6916
6917    >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]])
6918    >>> a
6919    tensor([[2., 3., 1.],
6920            [nan, 1., nan]])
6921    >>> a.median(0)
6922    torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1]))
6923    >>> a.nanmedian(0)
6924    torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0]))
6925""".format(**single_dim_common),
6926)
6927
6928add_docstr(
6929    torch.quantile,
6930    r"""
6931quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
6932
6933Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`.
6934
6935To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location
6936of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with
6937indices ``i`` and ``j`` in the sorted order, result is computed according to the given
6938:attr:`interpolation` method as follows:
6939
6940- ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index.
6941- ``lower``: ``a``.
6942- ``higher``: ``b``.
6943- ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions).
6944- ``midpoint``: ``(a + b) / 2``.
6945
6946If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size
6947equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction.
6948
6949.. note::
6950    By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation.
6951
6952Args:
6953    {input}
6954    q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1].
6955    {dim}
6956    {keepdim}
6957
6958Keyword arguments:
6959    interpolation (str): interpolation method to use when the desired quantile lies between two data points.
6960                            Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
6961                            Default is ``linear``.
6962    {out}
6963
6964Example::
6965
6966    >>> a = torch.randn(2, 3)
6967    >>> a
6968    tensor([[ 0.0795, -1.2117,  0.9765],
6969            [ 1.1707,  0.6706,  0.4884]])
6970    >>> q = torch.tensor([0.25, 0.5, 0.75])
6971    >>> torch.quantile(a, q, dim=1, keepdim=True)
6972    tensor([[[-0.5661],
6973            [ 0.5795]],
6974
6975            [[ 0.0795],
6976            [ 0.6706]],
6977
6978            [[ 0.5280],
6979            [ 0.9206]]])
6980    >>> torch.quantile(a, q, dim=1, keepdim=True).shape
6981    torch.Size([3, 2, 1])
6982    >>> a = torch.arange(4.)
6983    >>> a
6984    tensor([0., 1., 2., 3.])
6985    >>> torch.quantile(a, 0.6, interpolation='linear')
6986    tensor(1.8000)
6987    >>> torch.quantile(a, 0.6, interpolation='lower')
6988    tensor(1.)
6989    >>> torch.quantile(a, 0.6, interpolation='higher')
6990    tensor(2.)
6991    >>> torch.quantile(a, 0.6, interpolation='midpoint')
6992    tensor(1.5000)
6993    >>> torch.quantile(a, 0.6, interpolation='nearest')
6994    tensor(2.)
6995    >>> torch.quantile(a, 0.4, interpolation='nearest')
6996    tensor(1.)
6997""".format(**single_dim_common),
6998)
6999
7000add_docstr(
7001    torch.nanquantile,
7002    r"""
7003nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor
7004
7005This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values,
7006computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did
7007not exist. If all values in a reduced row are ``NaN`` then the quantiles for
7008that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`.
7009
7010Args:
7011    {input}
7012    q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1]
7013    {dim}
7014    {keepdim}
7015
7016Keyword arguments:
7017    interpolation (str): interpolation method to use when the desired quantile lies between two data points.
7018                            Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``.
7019                            Default is ``linear``.
7020    {out}
7021
7022Example::
7023
7024    >>> t = torch.tensor([float('nan'), 1, 2])
7025    >>> t.quantile(0.5)
7026    tensor(nan)
7027    >>> t.nanquantile(0.5)
7028    tensor(1.5000)
7029    >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]])
7030    >>> t
7031    tensor([[nan, nan],
7032            [1., 2.]])
7033    >>> t.nanquantile(0.5, dim=0)
7034    tensor([1., 2.])
7035    >>> t.nanquantile(0.5, dim=1)
7036    tensor([   nan, 1.5000])
7037""".format(**single_dim_common),
7038)
7039
7040add_docstr(
7041    torch.min,
7042    r"""
7043min(input) -> Tensor
7044
7045Returns the minimum value of all elements in the :attr:`input` tensor.
7046
7047.. warning::
7048    This function produces deterministic (sub)gradients unlike ``min(dim=0)``
7049
7050Args:
7051    {input}
7052
7053Example::
7054
7055    >>> a = torch.randn(1, 3)
7056    >>> a
7057    tensor([[ 0.6750,  1.0857,  1.7197]])
7058    >>> torch.min(a)
7059    tensor(0.6750)
7060
7061.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor)
7062   :noindex:
7063
7064Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum
7065value of each row of the :attr:`input` tensor in the given dimension
7066:attr:`dim`. And ``indices`` is the index location of each minimum value found
7067(argmin).
7068
7069If :attr:`keepdim` is ``True``, the output tensors are of the same size as
7070:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
7071Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
7072the output tensors having 1 fewer dimension than :attr:`input`.
7073
7074.. note:: If there are multiple minimal values in a reduced row then
7075          the indices of the first minimal value are returned.
7076
7077Args:
7078    {input}
7079    {dim}
7080    {keepdim}
7081
7082Keyword args:
7083    out (tuple, optional): the tuple of two output tensors (min, min_indices)
7084
7085Example::
7086
7087    >>> a = torch.randn(4, 4)
7088    >>> a
7089    tensor([[-0.6248,  1.1334, -1.1899, -0.2803],
7090            [-1.4644, -0.2635, -0.3651,  0.6134],
7091            [ 0.2457,  0.0384,  1.0128,  0.7015],
7092            [-0.1153,  2.9849,  2.1458,  0.5788]])
7093    >>> torch.min(a, 1)
7094    torch.return_types.min(values=tensor([-1.1899, -1.4644,  0.0384, -0.1153]), indices=tensor([2, 0, 1, 0]))
7095
7096.. function:: min(input, other, *, out=None) -> Tensor
7097   :noindex:
7098
7099See :func:`torch.minimum`.
7100""".format(**single_dim_common),
7101)
7102
7103add_docstr(
7104    torch.minimum,
7105    r"""
7106minimum(input, other, *, out=None) -> Tensor
7107
7108Computes the element-wise minimum of :attr:`input` and :attr:`other`.
7109
7110.. note::
7111    If one of the elements being compared is a NaN, then that element is returned.
7112    :func:`minimum` is not supported for tensors with complex dtypes.
7113
7114Args:
7115    {input}
7116    other (Tensor): the second input tensor
7117
7118Keyword args:
7119    {out}
7120
7121Example::
7122
7123    >>> a = torch.tensor((1, 2, -1))
7124    >>> b = torch.tensor((3, 0, 4))
7125    >>> torch.minimum(a, b)
7126    tensor([1, 0, -1])
7127""".format(**common_args),
7128)
7129
7130add_docstr(
7131    torch.fmin,
7132    r"""
7133fmin(input, other, *, out=None) -> Tensor
7134
7135Computes the element-wise minimum of :attr:`input` and :attr:`other`.
7136
7137This is like :func:`torch.minimum` except it handles NaNs differently:
7138if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum.
7139Only if both elements are NaN is NaN propagated.
7140
7141This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function.
7142
7143Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
7144:ref:`type promotion <type-promotion-doc>`, and integer and floating-point inputs.
7145
7146Args:
7147    {input}
7148    other (Tensor): the second input tensor
7149
7150Keyword args:
7151    {out}
7152
7153Example::
7154
7155    >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')])
7156    >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')])
7157    >>> torch.fmin(a, b)
7158    tensor([-9.3000, 0.1000, 2.1000,    nan])
7159""".format(**common_args),
7160)
7161
7162add_docstr(
7163    torch.amin,
7164    r"""
7165amin(input, dim, keepdim=False, *, out=None) -> Tensor
7166
7167Returns the minimum value of each slice of the :attr:`input` tensor in the given
7168dimension(s) :attr:`dim`.
7169
7170.. note::
7171    The difference between ``max``/``min`` and ``amax``/``amin`` is:
7172        - ``amax``/``amin`` supports reducing on multiple dimensions,
7173        - ``amax``/``amin`` does not return indices,
7174        - ``amax``/``amin`` evenly distributes gradient between equal values,
7175          while ``max(dim)``/``min(dim)`` propagates gradient only to a single
7176          index in the source tensor.
7177
7178{keepdim_details}
7179
7180Args:
7181    {input}
7182    {dim}
7183    {keepdim}
7184
7185Keyword args:
7186  {out}
7187
7188Example::
7189
7190    >>> a = torch.randn(4, 4)
7191    >>> a
7192    tensor([[ 0.6451, -0.4866,  0.2987, -1.3312],
7193            [-0.5744,  1.2980,  1.8397, -0.2713],
7194            [ 0.9128,  0.9214, -1.7268, -0.2995],
7195            [ 0.9023,  0.4853,  0.9075, -1.6165]])
7196    >>> torch.amin(a, 1)
7197    tensor([-1.3312, -0.5744, -1.7268, -1.6165])
7198""".format(**multi_dim_common),
7199)
7200
7201add_docstr(
7202    torch.aminmax,
7203    r"""
7204aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max)
7205
7206Computes the minimum and maximum values of the :attr:`input` tensor.
7207
7208Args:
7209    input (Tensor):
7210        The input tensor
7211
7212Keyword Args:
7213    dim (Optional[int]):
7214        The dimension along which to compute the values. If `None`,
7215        computes the values over the entire :attr:`input` tensor.
7216        Default is `None`.
7217    keepdim (bool):
7218        If `True`, the reduced dimensions will be kept in the output
7219        tensor as dimensions with size 1 for broadcasting, otherwise
7220        they will be removed, as if calling (:func:`torch.squeeze`).
7221        Default is `False`.
7222    out (Optional[Tuple[Tensor, Tensor]]):
7223        Optional tensors on which to write the result. Must have the same
7224        shape and dtype as the expected output.
7225        Default is `None`.
7226
7227Returns:
7228    A named tuple `(min, max)` containing the minimum and maximum values.
7229
7230Raises:
7231    RuntimeError
7232        If any of the dimensions to compute the values over has size 0.
7233
7234.. note::
7235    NaN values are propagated to the output if at least one value is NaN.
7236
7237.. seealso::
7238    :func:`torch.amin` computes just the minimum value
7239    :func:`torch.amax` computes just the maximum value
7240
7241Example::
7242
7243    >>> torch.aminmax(torch.tensor([1, -3, 5]))
7244    torch.return_types.aminmax(
7245    min=tensor(-3),
7246    max=tensor(5))
7247
7248    >>> # aminmax propagates NaNs
7249    >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan]))
7250    torch.return_types.aminmax(
7251    min=tensor(nan),
7252    max=tensor(nan))
7253
7254    >>> t = torch.arange(10).view(2, 5)
7255    >>> t
7256    tensor([[0, 1, 2, 3, 4],
7257            [5, 6, 7, 8, 9]])
7258    >>> t.aminmax(dim=0, keepdim=True)
7259    torch.return_types.aminmax(
7260    min=tensor([[0, 1, 2, 3, 4]]),
7261    max=tensor([[5, 6, 7, 8, 9]]))
7262""",
7263)
7264
7265add_docstr(
7266    torch.argmin,
7267    r"""
7268argmin(input, dim=None, keepdim=False) -> LongTensor
7269
7270Returns the indices of the minimum value(s) of the flattened tensor or along a dimension
7271
7272This is the second value returned by :meth:`torch.min`. See its
7273documentation for the exact semantics of this method.
7274
7275.. note:: If there are multiple minimal values then the indices of the first minimal value are returned.
7276
7277Args:
7278    {input}
7279    {dim} If ``None``, the argmin of the flattened input is returned.
7280    {keepdim}
7281
7282Example::
7283
7284    >>> a = torch.randn(4, 4)
7285    >>> a
7286    tensor([[ 0.1139,  0.2254, -0.1381,  0.3687],
7287            [ 1.0100, -1.1975, -0.0102, -0.4732],
7288            [-0.9240,  0.1207, -0.7506, -1.0213],
7289            [ 1.7809, -1.2960,  0.9384,  0.1438]])
7290    >>> torch.argmin(a)
7291    tensor(13)
7292    >>> torch.argmin(a, dim=1)
7293    tensor([ 2,  1,  3,  1])
7294    >>> torch.argmin(a, dim=1, keepdim=True)
7295    tensor([[2],
7296            [1],
7297            [3],
7298            [1]])
7299""".format(**single_dim_common),
7300)
7301
7302add_docstr(
7303    torch.mm,
7304    r"""
7305mm(input, mat2, *, out=None) -> Tensor
7306
7307Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`.
7308
7309If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a
7310:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor.
7311
7312.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
7313          For broadcasting matrix products, see :func:`torch.matmul`.
7314
7315Supports strided and sparse 2-D tensors as inputs, autograd with
7316respect to strided inputs.
7317
7318This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`.
7319If :attr:`out` is provided its layout will be used. Otherwise, the result
7320layout will be deduced from that of :attr:`input`.
7321
7322{sparse_beta_warning}
7323
7324{tf32_note}
7325
7326{rocm_fp16_note}
7327
7328Args:
7329    input (Tensor): the first matrix to be matrix multiplied
7330    mat2 (Tensor): the second matrix to be matrix multiplied
7331
7332Keyword args:
7333    {out}
7334
7335Example::
7336
7337    >>> mat1 = torch.randn(2, 3)
7338    >>> mat2 = torch.randn(3, 3)
7339    >>> torch.mm(mat1, mat2)
7340    tensor([[ 0.4851,  0.5037, -0.3633],
7341            [-0.0760, -3.6705,  2.4784]])
7342""".format(**common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes),
7343)
7344
7345add_docstr(
7346    torch.hspmm,
7347    r"""
7348hspmm(mat1, mat2, *, out=None) -> Tensor
7349
7350Performs a matrix multiplication of a :ref:`sparse COO matrix
7351<sparse-coo-docs>` :attr:`mat1` and a strided matrix :attr:`mat2`. The
7352result is a (1 + 1)-dimensional :ref:`hybrid COO matrix
7353<sparse-hybrid-coo-docs>`.
7354
7355Args:
7356    mat1 (Tensor): the first sparse matrix to be matrix multiplied
7357    mat2 (Tensor): the second strided matrix to be matrix multiplied
7358
7359Keyword args:
7360    {out}
7361""".format(**common_args),
7362)
7363
7364add_docstr(
7365    torch.matmul,
7366    r"""
7367matmul(input, other, *, out=None) -> Tensor
7368
7369Matrix product of two tensors.
7370
7371The behavior depends on the dimensionality of the tensors as follows:
7372
7373- If both tensors are 1-dimensional, the dot product (scalar) is returned.
7374- If both arguments are 2-dimensional, the matrix-matrix product is returned.
7375- If the first argument is 1-dimensional and the second argument is 2-dimensional,
7376  a 1 is prepended to its dimension for the purpose of the matrix multiply.
7377  After the matrix multiply, the prepended dimension is removed.
7378- If the first argument is 2-dimensional and the second argument is 1-dimensional,
7379  the matrix-vector product is returned.
7380- If both arguments are at least 1-dimensional and at least one argument is
7381  N-dimensional (where N > 2), then a batched matrix multiply is returned.  If the first
7382  argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the
7383  batched matrix multiply and removed after.  If the second argument is 1-dimensional, a
7384  1 is appended to its dimension for the purpose of the batched matrix multiple and removed after.
7385  The non-matrix (i.e. batch) dimensions are :ref:`broadcasted <broadcasting-semantics>` (and thus
7386  must be broadcastable).  For example, if :attr:`input` is a
7387  :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)`
7388  tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor.
7389
7390  Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs
7391  are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a
7392  :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)`
7393  tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the
7394  matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor.
7395
7396This operation has support for arguments with :ref:`sparse layouts<sparse-docs>`. In particular the
7397matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions
7398as :func:`torch.mm`
7399
7400{sparse_beta_warning}
7401
7402{tf32_note}
7403
7404{rocm_fp16_note}
7405
7406.. note::
7407
7408    The 1-dimensional dot product version of this function does not support an :attr:`out` parameter.
7409
7410Arguments:
7411    input (Tensor): the first tensor to be multiplied
7412    other (Tensor): the second tensor to be multiplied
7413
7414Keyword args:
7415    {out}
7416
7417Example::
7418
7419    >>> # vector x vector
7420    >>> tensor1 = torch.randn(3)
7421    >>> tensor2 = torch.randn(3)
7422    >>> torch.matmul(tensor1, tensor2).size()
7423    torch.Size([])
7424    >>> # matrix x vector
7425    >>> tensor1 = torch.randn(3, 4)
7426    >>> tensor2 = torch.randn(4)
7427    >>> torch.matmul(tensor1, tensor2).size()
7428    torch.Size([3])
7429    >>> # batched matrix x broadcasted vector
7430    >>> tensor1 = torch.randn(10, 3, 4)
7431    >>> tensor2 = torch.randn(4)
7432    >>> torch.matmul(tensor1, tensor2).size()
7433    torch.Size([10, 3])
7434    >>> # batched matrix x batched matrix
7435    >>> tensor1 = torch.randn(10, 3, 4)
7436    >>> tensor2 = torch.randn(10, 4, 5)
7437    >>> torch.matmul(tensor1, tensor2).size()
7438    torch.Size([10, 3, 5])
7439    >>> # batched matrix x broadcasted matrix
7440    >>> tensor1 = torch.randn(10, 3, 4)
7441    >>> tensor2 = torch.randn(4, 5)
7442    >>> torch.matmul(tensor1, tensor2).size()
7443    torch.Size([10, 3, 5])
7444
7445""".format(**common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes),
7446)
7447
7448add_docstr(
7449    torch.mode,
7450    r"""
7451mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor)
7452
7453Returns a namedtuple ``(values, indices)`` where ``values`` is the mode
7454value of each row of the :attr:`input` tensor in the given dimension
7455:attr:`dim`, i.e. a value which appears most often
7456in that row, and ``indices`` is the index location of each mode value found.
7457
7458By default, :attr:`dim` is the last dimension of the :attr:`input` tensor.
7459
7460If :attr:`keepdim` is ``True``, the output tensors are of the same size as
7461:attr:`input` except in the dimension :attr:`dim` where they are of size 1.
7462Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting
7463in the output tensors having 1 fewer dimension than :attr:`input`.
7464
7465.. note:: This function is not defined for ``torch.cuda.Tensor`` yet.
7466
7467Args:
7468    {input}
7469    {dim}
7470    {keepdim}
7471
7472Keyword args:
7473    out (tuple, optional): the result tuple of two output tensors (values, indices)
7474
7475Example::
7476
7477    >>> b = torch.tensor([[0, 0, 0, 2, 0, 0, 2],
7478    ...                   [0, 3, 0, 0, 2, 0, 1],
7479    ...                   [2, 2, 2, 0, 0, 0, 3],
7480    ...                   [2, 2, 3, 0, 1, 1, 0],
7481    ...                   [1, 1, 0, 0, 2, 0, 2]])
7482    >>> torch.mode(b, 0)
7483    torch.return_types.mode(
7484    values=tensor([0, 2, 0, 0, 0, 0, 2]),
7485    indices=tensor([1, 3, 4, 4, 2, 4, 4]))
7486""".format(**single_dim_common),
7487)
7488
7489add_docstr(
7490    torch.mul,
7491    r"""
7492mul(input, other, *, out=None) -> Tensor
7493
7494Multiplies :attr:`input` by :attr:`other`.
7495
7496
7497.. math::
7498    \text{out}_i = \text{input}_i \times \text{other}_i
7499"""
7500    + r"""
7501
7502Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
7503:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
7504
7505Args:
7506    {input}
7507    other (Tensor or Number) - the tensor or number to multiply input by.
7508
7509Keyword args:
7510    {out}
7511
7512Examples::
7513
7514    >>> a = torch.randn(3)
7515    >>> a
7516    tensor([ 0.2015, -0.4255,  2.6087])
7517    >>> torch.mul(a, 100)
7518    tensor([  20.1494,  -42.5491,  260.8663])
7519
7520    >>> b = torch.randn(4, 1)
7521    >>> b
7522    tensor([[ 1.1207],
7523            [-0.3137],
7524            [ 0.0700],
7525            [ 0.8378]])
7526    >>> c = torch.randn(1, 4)
7527    >>> c
7528    tensor([[ 0.5146,  0.1216, -0.5244,  2.2382]])
7529    >>> torch.mul(b, c)
7530    tensor([[ 0.5767,  0.1363, -0.5877,  2.5083],
7531            [-0.1614, -0.0382,  0.1645, -0.7021],
7532            [ 0.0360,  0.0085, -0.0367,  0.1567],
7533            [ 0.4312,  0.1019, -0.4394,  1.8753]])
7534""".format(**common_args),
7535)
7536
7537add_docstr(
7538    torch.multiply,
7539    r"""
7540multiply(input, other, *, out=None)
7541
7542Alias for :func:`torch.mul`.
7543""",
7544)
7545
7546add_docstr(
7547    torch.multinomial,
7548    r"""
7549multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor
7550
7551Returns a tensor where each row contains :attr:`num_samples` indices sampled
7552from the multinomial (a stricter definition would be multivariate,
7553refer to :class:`torch.distributions.multinomial.Multinomial` for more details)
7554probability distribution located in the corresponding row
7555of tensor :attr:`input`.
7556
7557.. note::
7558    The rows of :attr:`input` do not need to sum to one (in which case we use
7559    the values as weights), but must be non-negative, finite and have
7560    a non-zero sum.
7561
7562Indices are ordered from left to right according to when each was sampled
7563(first samples are placed in first column).
7564
7565If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`.
7566
7567If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape
7568:math:`(m \times \text{{num\_samples}})`.
7569
7570If replacement is ``True``, samples are drawn with replacement.
7571
7572If not, they are drawn without replacement, which means that when a
7573sample index is drawn for a row, it cannot be drawn again for that row.
7574
7575.. note::
7576    When drawn without replacement, :attr:`num_samples` must be lower than
7577    number of non-zero elements in :attr:`input` (or the min number of non-zero
7578    elements in each row of :attr:`input` if it is a matrix).
7579
7580Args:
7581    input (Tensor): the input tensor containing probabilities
7582    num_samples (int): number of samples to draw
7583    replacement (bool, optional): whether to draw with replacement or not
7584
7585Keyword args:
7586    {generator}
7587    {out}
7588
7589Example::
7590
7591    >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights
7592    >>> torch.multinomial(weights, 2)
7593    tensor([1, 2])
7594    >>> torch.multinomial(weights, 5) # ERROR!
7595    RuntimeError: cannot sample n_sample > prob_dist.size(-1) samples without replacement
7596    >>> torch.multinomial(weights, 4, replacement=True)
7597    tensor([ 2,  1,  1,  1])
7598""".format(**common_args),
7599)
7600
7601add_docstr(
7602    torch.mv,
7603    r"""
7604mv(input, vec, *, out=None) -> Tensor
7605
7606Performs a matrix-vector product of the matrix :attr:`input` and the vector
7607:attr:`vec`.
7608
7609If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of
7610size :math:`m`, :attr:`out` will be 1-D of size :math:`n`.
7611
7612.. note:: This function does not :ref:`broadcast <broadcasting-semantics>`.
7613
7614Args:
7615    input (Tensor): matrix to be multiplied
7616    vec (Tensor): vector to be multiplied
7617
7618Keyword args:
7619    {out}
7620
7621Example::
7622
7623    >>> mat = torch.randn(2, 3)
7624    >>> vec = torch.randn(3)
7625    >>> torch.mv(mat, vec)
7626    tensor([ 1.0404, -0.6361])
7627""".format(**common_args),
7628)
7629
7630add_docstr(
7631    torch.mvlgamma,
7632    r"""
7633mvlgamma(input, p, *, out=None) -> Tensor
7634
7635Alias for :func:`torch.special.multigammaln`.
7636""",
7637)
7638
7639add_docstr(
7640    torch.movedim,
7641    r"""
7642movedim(input, source, destination) -> Tensor
7643
7644Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source`
7645to the position(s) in :attr:`destination`.
7646
7647Other dimensions of :attr:`input` that are not explicitly moved remain in
7648their original order and appear at the positions not specified in :attr:`destination`.
7649
7650Args:
7651    {input}
7652    source (int or tuple of ints): Original positions of the dims to move. These must be unique.
7653    destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique.
7654
7655Examples::
7656
7657    >>> t = torch.randn(3,2,1)
7658    >>> t
7659    tensor([[[-0.3362],
7660            [-0.8437]],
7661
7662            [[-0.9627],
7663            [ 0.1727]],
7664
7665            [[ 0.5173],
7666            [-0.1398]]])
7667    >>> torch.movedim(t, 1, 0).shape
7668    torch.Size([2, 3, 1])
7669    >>> torch.movedim(t, 1, 0)
7670    tensor([[[-0.3362],
7671            [-0.9627],
7672            [ 0.5173]],
7673
7674            [[-0.8437],
7675            [ 0.1727],
7676            [-0.1398]]])
7677    >>> torch.movedim(t, (1, 2), (0, 1)).shape
7678    torch.Size([2, 1, 3])
7679    >>> torch.movedim(t, (1, 2), (0, 1))
7680    tensor([[[-0.3362, -0.9627,  0.5173]],
7681
7682            [[-0.8437,  0.1727, -0.1398]]])
7683""".format(**common_args),
7684)
7685
7686add_docstr(
7687    torch.moveaxis,
7688    r"""
7689moveaxis(input, source, destination) -> Tensor
7690
7691Alias for :func:`torch.movedim`.
7692
7693This function is equivalent to NumPy's moveaxis function.
7694
7695Examples::
7696
7697    >>> t = torch.randn(3,2,1)
7698    >>> t
7699    tensor([[[-0.3362],
7700            [-0.8437]],
7701
7702            [[-0.9627],
7703            [ 0.1727]],
7704
7705            [[ 0.5173],
7706            [-0.1398]]])
7707    >>> torch.moveaxis(t, 1, 0).shape
7708    torch.Size([2, 3, 1])
7709    >>> torch.moveaxis(t, 1, 0)
7710    tensor([[[-0.3362],
7711            [-0.9627],
7712            [ 0.5173]],
7713
7714            [[-0.8437],
7715            [ 0.1727],
7716            [-0.1398]]])
7717    >>> torch.moveaxis(t, (1, 2), (0, 1)).shape
7718    torch.Size([2, 1, 3])
7719    >>> torch.moveaxis(t, (1, 2), (0, 1))
7720    tensor([[[-0.3362, -0.9627,  0.5173]],
7721
7722            [[-0.8437,  0.1727, -0.1398]]])
7723""".format(**common_args),
7724)
7725
7726add_docstr(
7727    torch.swapdims,
7728    r"""
7729swapdims(input, dim0, dim1) -> Tensor
7730
7731Alias for :func:`torch.transpose`.
7732
7733This function is equivalent to NumPy's swapaxes function.
7734
7735Examples::
7736
7737    >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
7738    >>> x
7739    tensor([[[0, 1],
7740            [2, 3]],
7741
7742            [[4, 5],
7743            [6, 7]]])
7744    >>> torch.swapdims(x, 0, 1)
7745    tensor([[[0, 1],
7746            [4, 5]],
7747
7748            [[2, 3],
7749            [6, 7]]])
7750    >>> torch.swapdims(x, 0, 2)
7751    tensor([[[0, 4],
7752            [2, 6]],
7753
7754            [[1, 5],
7755            [3, 7]]])
7756""".format(**common_args),
7757)
7758
7759add_docstr(
7760    torch.swapaxes,
7761    r"""
7762swapaxes(input, axis0, axis1) -> Tensor
7763
7764Alias for :func:`torch.transpose`.
7765
7766This function is equivalent to NumPy's swapaxes function.
7767
7768Examples::
7769
7770    >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
7771    >>> x
7772    tensor([[[0, 1],
7773            [2, 3]],
7774
7775            [[4, 5],
7776            [6, 7]]])
7777    >>> torch.swapaxes(x, 0, 1)
7778    tensor([[[0, 1],
7779            [4, 5]],
7780
7781            [[2, 3],
7782            [6, 7]]])
7783    >>> torch.swapaxes(x, 0, 2)
7784    tensor([[[0, 4],
7785            [2, 6]],
7786
7787            [[1, 5],
7788            [3, 7]]])
7789""".format(**common_args),
7790)
7791
7792add_docstr(
7793    torch.narrow,
7794    r"""
7795narrow(input, dim, start, length) -> Tensor
7796
7797Returns a new tensor that is a narrowed version of :attr:`input` tensor. The
7798dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The
7799returned tensor and :attr:`input` tensor share the same underlying storage.
7800
7801Args:
7802    input (Tensor): the tensor to narrow
7803    dim (int): the dimension along which to narrow
7804    start (int or Tensor): index of the element to start the narrowed dimension
7805        from. Can be negative, which means indexing from the end of `dim`. If
7806        `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed)
7807    length (int): length of the narrowed dimension, must be weakly positive
7808
7809Example::
7810
7811    >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
7812    >>> torch.narrow(x, 0, 0, 2)
7813    tensor([[ 1,  2,  3],
7814            [ 4,  5,  6]])
7815    >>> torch.narrow(x, 1, 1, 2)
7816    tensor([[ 2,  3],
7817            [ 5,  6],
7818            [ 8,  9]])
7819    >>> torch.narrow(x, -1, torch.tensor(-1), 1)
7820    tensor([[3],
7821            [6],
7822            [9]])
7823""",
7824)
7825
7826add_docstr(
7827    torch.narrow_copy,
7828    r"""
7829narrow_copy(input, dim, start, length, *, out=None) -> Tensor
7830
7831Same as :meth:`Tensor.narrow` except this returns a copy rather
7832than shared storage. This is primarily for sparse tensors, which
7833do not have a shared-storage narrow method.
7834
7835Args:
7836    input (Tensor): the tensor to narrow
7837    dim (int): the dimension along which to narrow
7838    start (int): index of the element to start the narrowed dimension from. Can
7839        be negative, which means indexing from the end of `dim`
7840    length (int): length of the narrowed dimension, must be weakly positive
7841
7842Keyword args:
7843    {out}
7844
7845Example::
7846
7847    >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
7848    >>> torch.narrow_copy(x, 0, 0, 2)
7849    tensor([[ 1,  2,  3],
7850            [ 4,  5,  6]])
7851    >>> torch.narrow_copy(x, 1, 1, 2)
7852    tensor([[ 2,  3],
7853            [ 5,  6],
7854            [ 8,  9]])
7855    >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2)
7856    >>> torch.narrow_copy(s, 0, 0, 1)
7857    tensor(indices=tensor([[0, 0],
7858                           [0, 1]]),
7859           values=tensor([[[0, 1],
7860                           [2, 3]],
7861
7862                          [[4, 5],
7863                           [6, 7]]]),
7864           size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo)
7865
7866.. seealso::
7867
7868        :func:`torch.narrow` for a non copy variant
7869
7870""".format(**common_args),
7871)
7872
7873add_docstr(
7874    torch.nan_to_num,
7875    r"""
7876nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor
7877
7878Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input`
7879with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively.
7880By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the
7881greatest finite value representable by :attr:`input`'s dtype, and negative infinity
7882is replaced with the least finite value representable by :attr:`input`'s dtype.
7883
7884Args:
7885    {input}
7886    nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero.
7887    posinf (Number, optional): if a Number, the value to replace positive infinity values with.
7888        If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype.
7889        Default is None.
7890    neginf (Number, optional): if a Number, the value to replace negative infinity values with.
7891        If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype.
7892        Default is None.
7893
7894Keyword args:
7895    {out}
7896
7897Example::
7898
7899    >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
7900    >>> torch.nan_to_num(x)
7901    tensor([ 0.0000e+00,  3.4028e+38, -3.4028e+38,  3.1400e+00])
7902    >>> torch.nan_to_num(x, nan=2.0)
7903    tensor([ 2.0000e+00,  3.4028e+38, -3.4028e+38,  3.1400e+00])
7904    >>> torch.nan_to_num(x, nan=2.0, posinf=1.0)
7905    tensor([ 2.0000e+00,  1.0000e+00, -3.4028e+38,  3.1400e+00])
7906
7907""".format(**common_args),
7908)
7909
7910add_docstr(
7911    torch.ne,
7912    r"""
7913ne(input, other, *, out=None) -> Tensor
7914
7915Computes :math:`\text{input} \neq \text{other}` element-wise.
7916"""
7917    + r"""
7918
7919The second argument can be a number or a tensor whose shape is
7920:ref:`broadcastable <broadcasting-semantics>` with the first argument.
7921
7922Args:
7923    input (Tensor): the tensor to compare
7924    other (Tensor or float): the tensor or value to compare
7925
7926Keyword args:
7927    {out}
7928
7929Returns:
7930    A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere
7931
7932Example::
7933
7934    >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]]))
7935    tensor([[False, True], [True, False]])
7936""".format(**common_args),
7937)
7938
7939add_docstr(
7940    torch.not_equal,
7941    r"""
7942not_equal(input, other, *, out=None) -> Tensor
7943
7944Alias for :func:`torch.ne`.
7945""",
7946)
7947
7948add_docstr(
7949    torch.neg,
7950    r"""
7951neg(input, *, out=None) -> Tensor
7952
7953Returns a new tensor with the negative of the elements of :attr:`input`.
7954
7955.. math::
7956    \text{out} = -1 \times \text{input}
7957"""
7958    + r"""
7959Args:
7960    {input}
7961
7962Keyword args:
7963    {out}
7964
7965Example::
7966
7967    >>> a = torch.randn(5)
7968    >>> a
7969    tensor([ 0.0090, -0.2262, -0.0682, -0.2866,  0.3940])
7970    >>> torch.neg(a)
7971    tensor([-0.0090,  0.2262,  0.0682,  0.2866, -0.3940])
7972""".format(**common_args),
7973)
7974
7975add_docstr(
7976    torch.negative,
7977    r"""
7978negative(input, *, out=None) -> Tensor
7979
7980Alias for :func:`torch.neg`
7981""",
7982)
7983
7984add_docstr(
7985    torch.nextafter,
7986    r"""
7987nextafter(input, other, *, out=None) -> Tensor
7988
7989Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise.
7990
7991The shapes of ``input`` and ``other`` must be
7992:ref:`broadcastable <broadcasting-semantics>`.
7993
7994Args:
7995    input (Tensor): the first input tensor
7996    other (Tensor): the second input tensor
7997
7998Keyword args:
7999    {out}
8000
8001Example::
8002
8003    >>> eps = torch.finfo(torch.float32).eps
8004    >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps])
8005    tensor([True, True])
8006
8007""".format(**common_args),
8008)
8009
8010add_docstr(
8011    torch.nonzero,
8012    r"""
8013nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors
8014
8015.. note::
8016    :func:`torch.nonzero(..., as_tuple=False) <torch.nonzero>` (default) returns a
8017    2-D tensor where each row is the index for a nonzero value.
8018
8019    :func:`torch.nonzero(..., as_tuple=True) <torch.nonzero>` returns a tuple of 1-D
8020    index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
8021    gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
8022    contains nonzero indices for a certain dimension.
8023
8024    See below for more details on the two behaviors.
8025
8026    When :attr:`input` is on CUDA, :func:`torch.nonzero() <torch.nonzero>` causes
8027    host-device synchronization.
8028
8029**When** :attr:`as_tuple` **is** ``False`` **(default)**:
8030
8031Returns a tensor containing the indices of all non-zero elements of
8032:attr:`input`.  Each row in the result contains the indices of a non-zero
8033element in :attr:`input`. The result is sorted lexicographically, with
8034the last index changing the fastest (C-style).
8035
8036If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
8037:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
8038non-zero elements in the :attr:`input` tensor.
8039
8040**When** :attr:`as_tuple` **is** ``True``:
8041
8042Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
8043each containing the indices (in that dimension) of all non-zero elements of
8044:attr:`input` .
8045
8046If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
8047tensors of size :math:`z`, where :math:`z` is the total number of
8048non-zero elements in the :attr:`input` tensor.
8049
8050As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
8051value, it is treated as a one-dimensional tensor with one element.
8052
8053Args:
8054    {input}
8055
8056Keyword args:
8057    out (LongTensor, optional): the output tensor containing indices
8058
8059Returns:
8060    LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output
8061    tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
8062    each dimension, containing the indices of each nonzero element along that
8063    dimension.
8064
8065Example::
8066
8067    >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
8068    tensor([[ 0],
8069            [ 1],
8070            [ 2],
8071            [ 4]])
8072    >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
8073    ...                             [0.0, 0.4, 0.0, 0.0],
8074    ...                             [0.0, 0.0, 1.2, 0.0],
8075    ...                             [0.0, 0.0, 0.0,-0.4]]))
8076    tensor([[ 0,  0],
8077            [ 1,  1],
8078            [ 2,  2],
8079            [ 3,  3]])
8080    >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
8081    (tensor([0, 1, 2, 4]),)
8082    >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
8083    ...                             [0.0, 0.4, 0.0, 0.0],
8084    ...                             [0.0, 0.0, 1.2, 0.0],
8085    ...                             [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
8086    (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
8087    >>> torch.nonzero(torch.tensor(5), as_tuple=True)
8088    (tensor([0]),)
8089""".format(**common_args),
8090)
8091
8092add_docstr(
8093    torch.normal,
8094    r"""
8095normal(mean, std, *, generator=None, out=None) -> Tensor
8096
8097Returns a tensor of random numbers drawn from separate normal distributions
8098whose mean and standard deviation are given.
8099
8100The :attr:`mean` is a tensor with the mean of
8101each output element's normal distribution
8102
8103The :attr:`std` is a tensor with the standard deviation of
8104each output element's normal distribution
8105
8106The shapes of :attr:`mean` and :attr:`std` don't need to match, but the
8107total number of elements in each tensor need to be the same.
8108
8109.. note:: When the shapes do not match, the shape of :attr:`mean`
8110          is used as the shape for the returned output tensor
8111
8112.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes
8113          its device with the CPU.
8114
8115Args:
8116    mean (Tensor): the tensor of per-element means
8117    std (Tensor): the tensor of per-element standard deviations
8118
8119Keyword args:
8120    {generator}
8121    {out}
8122
8123Example::
8124
8125    >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1))
8126    tensor([  1.0425,   3.5672,   2.7969,   4.2925,   4.7229,   6.2134,
8127              8.0505,   8.1408,   9.0563,  10.0566])
8128
8129.. function:: normal(mean=0.0, std, *, out=None) -> Tensor
8130   :noindex:
8131
8132Similar to the function above, but the means are shared among all drawn
8133elements.
8134
8135Args:
8136    mean (float, optional): the mean for all distributions
8137    std (Tensor): the tensor of per-element standard deviations
8138
8139Keyword args:
8140    {out}
8141
8142Example::
8143
8144    >>> torch.normal(mean=0.5, std=torch.arange(1., 6.))
8145    tensor([-1.2793, -1.0732, -2.0687,  5.1177, -1.2303])
8146
8147.. function:: normal(mean, std=1.0, *, out=None) -> Tensor
8148   :noindex:
8149
8150Similar to the function above, but the standard deviations are shared among
8151all drawn elements.
8152
8153Args:
8154    mean (Tensor): the tensor of per-element means
8155    std (float, optional): the standard deviation for all distributions
8156
8157Keyword args:
8158    out (Tensor, optional): the output tensor
8159
8160Example::
8161
8162    >>> torch.normal(mean=torch.arange(1., 6.))
8163    tensor([ 1.1552,  2.6148,  2.6535,  5.8318,  4.2361])
8164
8165.. function:: normal(mean, std, size, *, out=None) -> Tensor
8166   :noindex:
8167
8168Similar to the function above, but the means and standard deviations are shared
8169among all drawn elements. The resulting tensor has size given by :attr:`size`.
8170
8171Args:
8172    mean (float): the mean for all distributions
8173    std (float): the standard deviation for all distributions
8174    size (int...): a sequence of integers defining the shape of the output tensor.
8175
8176Keyword args:
8177    {out}
8178
8179Example::
8180
8181    >>> torch.normal(2, 3, size=(1, 4))
8182    tensor([[-1.3987, -1.9544,  3.6048,  0.7909]])
8183""".format(**common_args),
8184)
8185
8186add_docstr(
8187    torch.numel,
8188    r"""
8189numel(input) -> int
8190
8191Returns the total number of elements in the :attr:`input` tensor.
8192
8193Args:
8194    {input}
8195
8196Example::
8197
8198    >>> a = torch.randn(1, 2, 3, 4, 5)
8199    >>> torch.numel(a)
8200    120
8201    >>> a = torch.zeros(4,4)
8202    >>> torch.numel(a)
8203    16
8204
8205""".format(**common_args),
8206)
8207
8208add_docstr(
8209    torch.ones,
8210    r"""
8211ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
8212
8213Returns a tensor filled with the scalar value `1`, with the shape defined
8214by the variable argument :attr:`size`.
8215
8216Args:
8217    size (int...): a sequence of integers defining the shape of the output tensor.
8218        Can be a variable number of arguments or a collection like a list or tuple.
8219
8220Keyword arguments:
8221    {out}
8222    {dtype}
8223    {layout}
8224    {device}
8225    {requires_grad}
8226
8227Example::
8228
8229    >>> torch.ones(2, 3)
8230    tensor([[ 1.,  1.,  1.],
8231            [ 1.,  1.,  1.]])
8232
8233    >>> torch.ones(5)
8234    tensor([ 1.,  1.,  1.,  1.,  1.])
8235
8236""".format(**factory_common_args),
8237)
8238
8239add_docstr(
8240    torch.ones_like,
8241    r"""
8242ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
8243
8244Returns a tensor filled with the scalar value `1`, with the same size as
8245:attr:`input`. ``torch.ones_like(input)`` is equivalent to
8246``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
8247
8248.. warning::
8249    As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
8250    the old ``torch.ones_like(input, out=output)`` is equivalent to
8251    ``torch.ones(input.size(), out=output)``.
8252
8253Args:
8254    {input}
8255
8256Keyword arguments:
8257    {dtype}
8258    {layout}
8259    {device}
8260    {requires_grad}
8261    {memory_format}
8262
8263Example::
8264
8265    >>> input = torch.empty(2, 3)
8266    >>> torch.ones_like(input)
8267    tensor([[ 1.,  1.,  1.],
8268            [ 1.,  1.,  1.]])
8269""".format(**factory_like_common_args),
8270)
8271
8272add_docstr(
8273    torch.orgqr,
8274    r"""
8275orgqr(input, tau) -> Tensor
8276
8277Alias for :func:`torch.linalg.householder_product`.
8278""",
8279)
8280
8281add_docstr(
8282    torch.ormqr,
8283    r"""
8284ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor
8285
8286Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix.
8287
8288Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`,
8289where `Q` is represented using Householder reflectors `(input, tau)`.
8290See `Representation of Orthogonal or Unitary Matrices`_ for further details.
8291
8292If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`.
8293When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`.
8294It has size :math:`n \times n` otherwise.
8295If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op.
8296
8297Supports inputs of float, double, cfloat and cdouble dtypes.
8298Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions.
8299
8300.. seealso::
8301        :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q`
8302        from the QR decomposition.
8303
8304.. note::
8305        This function supports backward but it is only fast when ``(input, tau)`` do not require gradients
8306        and/or ``tau.size(-1)`` is very small.
8307        ``
8308
8309Args:
8310    input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions
8311                    and `mn` equals to `m` or `n` depending on the :attr:`left`.
8312    tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions.
8313    other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
8314    left (bool): controls the order of multiplication.
8315    transpose (bool): controls whether the matrix `Q` is conjugate transposed or not.
8316
8317Keyword args:
8318    out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`.
8319
8320.. _Representation of Orthogonal or Unitary Matrices:
8321    https://www.netlib.org/lapack/lug/node128.html
8322""",
8323)
8324
8325add_docstr(
8326    torch.permute,
8327    r"""
8328permute(input, dims) -> Tensor
8329
8330Returns a view of the original tensor :attr:`input` with its dimensions permuted.
8331
8332Args:
8333    {input}
8334    dims (tuple of int): The desired ordering of dimensions
8335
8336Example:
8337    >>> x = torch.randn(2, 3, 5)
8338    >>> x.size()
8339    torch.Size([2, 3, 5])
8340    >>> torch.permute(x, (2, 0, 1)).size()
8341    torch.Size([5, 2, 3])
8342""".format(**common_args),
8343)
8344
8345add_docstr(
8346    torch.poisson,
8347    r"""
8348poisson(input, generator=None) -> Tensor
8349
8350Returns a tensor of the same size as :attr:`input` with each element
8351sampled from a Poisson distribution with rate parameter given by the corresponding
8352element in :attr:`input` i.e.,
8353
8354.. math::
8355    \text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i)
8356
8357:attr:`input` must be non-negative.
8358
8359Args:
8360    input (Tensor): the input tensor containing the rates of the Poisson distribution
8361
8362Keyword args:
8363    {generator}
8364
8365Example::
8366
8367    >>> rates = torch.rand(4, 4) * 5  # rate parameter between 0 and 5
8368    >>> torch.poisson(rates)
8369    tensor([[9., 1., 3., 5.],
8370            [8., 6., 6., 0.],
8371            [0., 4., 5., 3.],
8372            [2., 1., 4., 2.]])
8373""".format(**common_args),
8374)
8375
8376add_docstr(
8377    torch.polygamma,
8378    r"""
8379polygamma(n, input, *, out=None) -> Tensor
8380
8381Alias for :func:`torch.special.polygamma`.
8382""",
8383)
8384
8385add_docstr(
8386    torch.positive,
8387    r"""
8388positive(input) -> Tensor
8389
8390Returns :attr:`input`.
8391Throws a runtime error if :attr:`input` is a bool tensor.
8392"""
8393    + r"""
8394Args:
8395    {input}
8396
8397Example::
8398
8399    >>> t = torch.randn(5)
8400    >>> t
8401    tensor([ 0.0090, -0.2262, -0.0682, -0.2866,  0.3940])
8402    >>> torch.positive(t)
8403    tensor([ 0.0090, -0.2262, -0.0682, -0.2866,  0.3940])
8404""".format(**common_args),
8405)
8406
8407add_docstr(
8408    torch.pow,
8409    r"""
8410pow(input, exponent, *, out=None) -> Tensor
8411
8412Takes the power of each element in :attr:`input` with :attr:`exponent` and
8413returns a tensor with the result.
8414
8415:attr:`exponent` can be either a single ``float`` number or a `Tensor`
8416with the same number of elements as :attr:`input`.
8417
8418When :attr:`exponent` is a scalar value, the operation applied is:
8419
8420.. math::
8421    \text{out}_i = x_i ^ \text{exponent}
8422
8423When :attr:`exponent` is a tensor, the operation applied is:
8424
8425.. math::
8426    \text{out}_i = x_i ^ {\text{exponent}_i}
8427"""
8428    + r"""
8429When :attr:`exponent` is a tensor, the shapes of :attr:`input`
8430and :attr:`exponent` must be :ref:`broadcastable <broadcasting-semantics>`.
8431
8432Args:
8433    {input}
8434    exponent (float or tensor): the exponent value
8435
8436Keyword args:
8437    {out}
8438
8439Example::
8440
8441    >>> a = torch.randn(4)
8442    >>> a
8443    tensor([ 0.4331,  1.2475,  0.6834, -0.2791])
8444    >>> torch.pow(a, 2)
8445    tensor([ 0.1875,  1.5561,  0.4670,  0.0779])
8446    >>> exp = torch.arange(1., 5.)
8447
8448    >>> a = torch.arange(1., 5.)
8449    >>> a
8450    tensor([ 1.,  2.,  3.,  4.])
8451    >>> exp
8452    tensor([ 1.,  2.,  3.,  4.])
8453    >>> torch.pow(a, exp)
8454    tensor([   1.,    4.,   27.,  256.])
8455
8456.. function:: pow(self, exponent, *, out=None) -> Tensor
8457   :noindex:
8458
8459:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor.
8460The returned tensor :attr:`out` is of the same shape as :attr:`exponent`
8461
8462The operation applied is:
8463
8464.. math::
8465    \text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}}
8466
8467Args:
8468    self (float): the scalar base value for the power operation
8469    exponent (Tensor): the exponent tensor
8470
8471Keyword args:
8472    {out}
8473
8474Example::
8475
8476    >>> exp = torch.arange(1., 5.)
8477    >>> base = 2
8478    >>> torch.pow(base, exp)
8479    tensor([  2.,   4.,   8.,  16.])
8480""".format(**common_args),
8481)
8482
8483add_docstr(
8484    torch.float_power,
8485    r"""
8486float_power(input, exponent, *, out=None) -> Tensor
8487
8488Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision.
8489If neither input is complex returns a ``torch.float64`` tensor,
8490and if one or more inputs is complex returns a ``torch.complex128`` tensor.
8491
8492.. note::
8493    This function always computes in double precision, unlike :func:`torch.pow`,
8494    which implements more typical :ref:`type promotion <type-promotion-doc>`.
8495    This is useful when the computation needs to be performed in a wider or more precise dtype,
8496    or the results of the computation may contain fractional values not representable in the input dtypes,
8497    like when an integer base is raised to a negative integer exponent.
8498
8499Args:
8500    input (Tensor or Number): the base value(s)
8501    exponent (Tensor or Number): the exponent value(s)
8502
8503Keyword args:
8504    {out}
8505
8506Example::
8507
8508    >>> a = torch.randint(10, (4,))
8509    >>> a
8510    tensor([6, 4, 7, 1])
8511    >>> torch.float_power(a, 2)
8512    tensor([36., 16., 49.,  1.], dtype=torch.float64)
8513
8514    >>> a = torch.arange(1, 5)
8515    >>> a
8516    tensor([ 1,  2,  3,  4])
8517    >>> exp = torch.tensor([2, -3, 4, -5])
8518    >>> exp
8519    tensor([ 2, -3,  4, -5])
8520    >>> torch.float_power(a, exp)
8521    tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64)
8522""".format(**common_args),
8523)
8524
8525add_docstr(
8526    torch.prod,
8527    r"""
8528prod(input, *, dtype=None) -> Tensor
8529
8530Returns the product of all elements in the :attr:`input` tensor.
8531
8532Args:
8533    {input}
8534
8535Keyword args:
8536    {dtype}
8537
8538Example::
8539
8540    >>> a = torch.randn(1, 3)
8541    >>> a
8542    tensor([[-0.8020,  0.5428, -1.5854]])
8543    >>> torch.prod(a)
8544    tensor(0.6902)
8545
8546.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
8547   :noindex:
8548
8549Returns the product of each row of the :attr:`input` tensor in the given
8550dimension :attr:`dim`.
8551
8552{keepdim_details}
8553
8554Args:
8555    {input}
8556    {dim}
8557    {keepdim}
8558
8559Keyword args:
8560    {dtype}
8561
8562Example::
8563
8564    >>> a = torch.randn(4, 2)
8565    >>> a
8566    tensor([[ 0.5261, -0.3837],
8567            [ 1.1857, -0.2498],
8568            [-1.1646,  0.0705],
8569            [ 1.1131, -1.0629]])
8570    >>> torch.prod(a, 1)
8571    tensor([-0.2018, -0.2962, -0.0821, -1.1831])
8572""".format(**single_dim_common),
8573)
8574
8575add_docstr(
8576    torch.promote_types,
8577    r"""
8578promote_types(type1, type2) -> dtype
8579
8580Returns the :class:`torch.dtype` with the smallest size and scalar kind that is
8581not smaller nor of lower kind than either `type1` or `type2`. See type promotion
8582:ref:`documentation <type-promotion-doc>` for more information on the type
8583promotion logic.
8584
8585Args:
8586    type1 (:class:`torch.dtype`)
8587    type2 (:class:`torch.dtype`)
8588
8589Example::
8590
8591    >>> torch.promote_types(torch.int32, torch.float32)
8592    torch.float32
8593    >>> torch.promote_types(torch.uint8, torch.long)
8594    torch.long
8595""",
8596)
8597
8598add_docstr(
8599    torch.qr,
8600    r"""
8601qr(input, some=True, *, out=None) -> (Tensor, Tensor)
8602
8603Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`,
8604and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R`
8605with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and
8606:math:`R` being an upper triangular matrix or batch of upper triangular matrices.
8607
8608If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization.
8609Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization.
8610
8611.. warning::
8612
8613    :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr`
8614    and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been
8615    replaced with a string parameter :attr:`mode`.
8616
8617    ``Q, R = torch.qr(A)`` should be replaced with
8618
8619    .. code:: python
8620
8621        Q, R = torch.linalg.qr(A)
8622
8623    ``Q, R = torch.qr(A, some=False)`` should be replaced with
8624
8625    .. code:: python
8626
8627        Q, R = torch.linalg.qr(A, mode="complete")
8628
8629.. warning::
8630          If you plan to backpropagate through QR, note that the current backward implementation
8631          is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))`
8632          columns of :attr:`input` are linearly independent.
8633          This behavior will probably change once QR supports pivoting.
8634
8635.. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs,
8636          and may produce different (valid) decompositions on different device types
8637          or different platforms.
8638
8639Args:
8640    input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more
8641                batch dimensions consisting of matrices of dimension :math:`m \times n`.
8642    some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for
8643                complete QR decomposition. If `k = min(m, n)` then:
8644
8645                  * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default)
8646
8647                  * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n)
8648
8649Keyword args:
8650    out (tuple, optional): tuple of `Q` and `R` tensors.
8651                The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above.
8652
8653Example::
8654
8655    >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]])
8656    >>> q, r = torch.qr(a)
8657    >>> q
8658    tensor([[-0.8571,  0.3943,  0.3314],
8659            [-0.4286, -0.9029, -0.0343],
8660            [ 0.2857, -0.1714,  0.9429]])
8661    >>> r
8662    tensor([[ -14.0000,  -21.0000,   14.0000],
8663            [   0.0000, -175.0000,   70.0000],
8664            [   0.0000,    0.0000,  -35.0000]])
8665    >>> torch.mm(q, r).round()
8666    tensor([[  12.,  -51.,    4.],
8667            [   6.,  167.,  -68.],
8668            [  -4.,   24.,  -41.]])
8669    >>> torch.mm(q.t(), q).round()
8670    tensor([[ 1.,  0.,  0.],
8671            [ 0.,  1., -0.],
8672            [ 0., -0.,  1.]])
8673    >>> a = torch.randn(3, 4, 5)
8674    >>> q, r = torch.qr(a, some=False)
8675    >>> torch.allclose(torch.matmul(q, r), a)
8676    True
8677    >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5))
8678    True
8679""",
8680)
8681
8682add_docstr(
8683    torch.rad2deg,
8684    r"""
8685rad2deg(input, *, out=None) -> Tensor
8686
8687Returns a new tensor with each of the elements of :attr:`input`
8688converted from angles in radians to degrees.
8689
8690Args:
8691    {input}
8692
8693Keyword arguments:
8694    {out}
8695
8696Example::
8697
8698    >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
8699    >>> torch.rad2deg(a)
8700    tensor([[ 180.0233, -180.0233],
8701            [ 359.9894, -359.9894],
8702            [  89.9544,  -89.9544]])
8703
8704""".format(**common_args),
8705)
8706
8707add_docstr(
8708    torch.deg2rad,
8709    r"""
8710deg2rad(input, *, out=None) -> Tensor
8711
8712Returns a new tensor with each of the elements of :attr:`input`
8713converted from angles in degrees to radians.
8714
8715Args:
8716    {input}
8717
8718Keyword arguments:
8719    {out}
8720
8721Example::
8722
8723    >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
8724    >>> torch.deg2rad(a)
8725    tensor([[ 3.1416, -3.1416],
8726            [ 6.2832, -6.2832],
8727            [ 1.5708, -1.5708]])
8728
8729""".format(**common_args),
8730)
8731
8732add_docstr(
8733    torch.heaviside,
8734    r"""
8735heaviside(input, values, *, out=None) -> Tensor
8736
8737Computes the Heaviside step function for each element in :attr:`input`.
8738The Heaviside step function is defined as:
8739
8740.. math::
8741    \text{{heaviside}}(input, values) = \begin{cases}
8742        0, & \text{if input < 0}\\
8743        values, & \text{if input == 0}\\
8744        1, & \text{if input > 0}
8745    \end{cases}
8746"""
8747    + r"""
8748
8749Args:
8750    {input}
8751    values (Tensor): The values to use where :attr:`input` is zero.
8752
8753Keyword arguments:
8754    {out}
8755
8756Example::
8757
8758    >>> input = torch.tensor([-1.5, 0, 2.0])
8759    >>> values = torch.tensor([0.5])
8760    >>> torch.heaviside(input, values)
8761    tensor([0.0000, 0.5000, 1.0000])
8762    >>> values = torch.tensor([1.2, -2.0, 3.5])
8763    >>> torch.heaviside(input, values)
8764    tensor([0., -2., 1.])
8765
8766""".format(**common_args),
8767)
8768
8769add_docstr(
8770    torch.rand,
8771    """
8772rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, \
8773requires_grad=False, pin_memory=False) -> Tensor
8774"""
8775    + r"""
8776Returns a tensor filled with random numbers from a uniform distribution
8777on the interval :math:`[0, 1)`
8778
8779The shape of the tensor is defined by the variable argument :attr:`size`.
8780
8781Args:
8782    size (int...): a sequence of integers defining the shape of the output tensor.
8783        Can be a variable number of arguments or a collection like a list or tuple.
8784
8785Keyword args:
8786    {generator}
8787    {out}
8788    {dtype}
8789    {layout}
8790    {device}
8791    {requires_grad}
8792    {pin_memory}
8793
8794Example::
8795
8796    >>> torch.rand(4)
8797    tensor([ 0.5204,  0.2503,  0.3525,  0.5673])
8798    >>> torch.rand(2, 3)
8799    tensor([[ 0.8237,  0.5781,  0.6879],
8800            [ 0.3816,  0.7249,  0.0998]])
8801""".format(**factory_common_args),
8802)
8803
8804add_docstr(
8805    torch.rand_like,
8806    r"""
8807rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
8808
8809Returns a tensor with the same size as :attr:`input` that is filled with
8810random numbers from a uniform distribution on the interval :math:`[0, 1)`.
8811``torch.rand_like(input)`` is equivalent to
8812``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
8813
8814Args:
8815    {input}
8816
8817Keyword args:
8818    {dtype}
8819    {layout}
8820    {device}
8821    {requires_grad}
8822    {memory_format}
8823
8824""".format(**factory_like_common_args),
8825)
8826
8827add_docstr(
8828    torch.randint,
8829    """
8830randint(low=0, high, size, \\*, generator=None, out=None, \
8831dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
8832
8833Returns a tensor filled with random integers generated uniformly
8834between :attr:`low` (inclusive) and :attr:`high` (exclusive).
8835
8836The shape of the tensor is defined by the variable argument :attr:`size`.
8837
8838.. note::
8839    With the global dtype default (``torch.float32``), this function returns
8840    a tensor with dtype ``torch.int64``.
8841
8842Args:
8843    low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
8844    high (int): One above the highest integer to be drawn from the distribution.
8845    size (tuple): a tuple defining the shape of the output tensor.
8846
8847Keyword args:
8848    {generator}
8849    {out}
8850    dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``,
8851        this function returns a tensor with dtype ``torch.int64``.
8852    {layout}
8853    {device}
8854    {requires_grad}
8855
8856Example::
8857
8858    >>> torch.randint(3, 5, (3,))
8859    tensor([4, 3, 4])
8860
8861
8862    >>> torch.randint(10, (2, 2))
8863    tensor([[0, 2],
8864            [5, 5]])
8865
8866
8867    >>> torch.randint(3, 10, (2, 2))
8868    tensor([[4, 5],
8869            [6, 7]])
8870
8871
8872""".format(**factory_common_args),
8873)
8874
8875add_docstr(
8876    torch.randint_like,
8877    """
8878randint_like(input, low=0, high, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
8879memory_format=torch.preserve_format) -> Tensor
8880
8881Returns a tensor with the same shape as Tensor :attr:`input` filled with
8882random integers generated uniformly between :attr:`low` (inclusive) and
8883:attr:`high` (exclusive).
8884
8885.. note:
8886    With the global dtype default (``torch.float32``), this function returns
8887    a tensor with dtype ``torch.int64``.
8888
8889Args:
8890    {input}
8891    low (int, optional): Lowest integer to be drawn from the distribution. Default: 0.
8892    high (int): One above the highest integer to be drawn from the distribution.
8893
8894Keyword args:
8895    {dtype}
8896    {layout}
8897    {device}
8898    {requires_grad}
8899    {memory_format}
8900
8901""".format(**factory_like_common_args),
8902)
8903
8904add_docstr(
8905    torch.randn,
8906    """
8907randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
8908pin_memory=False) -> Tensor
8909"""
8910    + r"""
8911
8912Returns a tensor filled with random numbers from a normal distribution
8913with mean `0` and variance `1` (also called the standard normal
8914distribution).
8915
8916.. math::
8917    \text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1)
8918
8919For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and
8920unit variance as
8921
8922.. math::
8923    \text{{out}}_{{i}} \sim \mathcal{{CN}}(0, 1)
8924
8925This is equivalent to separately sampling the real :math:`(\operatorname{{Re}})` and imaginary
8926:math:`(\operatorname{{Im}})` part of :math:`\text{{out}}_i` as
8927
8928.. math::
8929    \operatorname{{Re}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}}),\quad
8930    \operatorname{{Im}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}})
8931
8932The shape of the tensor is defined by the variable argument :attr:`size`.
8933
8934
8935Args:
8936    size (int...): a sequence of integers defining the shape of the output tensor.
8937        Can be a variable number of arguments or a collection like a list or tuple.
8938
8939Keyword args:
8940    {generator}
8941    {out}
8942    {dtype}
8943    {layout}
8944    {device}
8945    {requires_grad}
8946    {pin_memory}
8947
8948Example::
8949
8950    >>> torch.randn(4)
8951    tensor([-2.1436,  0.9966,  2.3426, -0.6366])
8952    >>> torch.randn(2, 3)
8953    tensor([[ 1.5954,  2.8929, -1.0923],
8954            [ 1.1719, -0.4709, -0.1996]])
8955
8956.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution
8957""".format(**factory_common_args),
8958)
8959
8960add_docstr(
8961    torch.randn_like,
8962    r"""
8963randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
8964
8965Returns a tensor with the same size as :attr:`input` that is filled with
8966random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the
8967sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to
8968``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
8969
8970Args:
8971    {input}
8972
8973Keyword args:
8974    {dtype}
8975    {layout}
8976    {device}
8977    {requires_grad}
8978    {memory_format}
8979
8980""".format(**factory_like_common_args),
8981)
8982
8983add_docstr(
8984    torch.randperm,
8985    """
8986randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, \
8987device=None, requires_grad=False, pin_memory=False) -> Tensor
8988"""
8989    + r"""
8990Returns a random permutation of integers from ``0`` to ``n - 1``.
8991
8992Args:
8993    n (int): the upper bound (exclusive)
8994
8995Keyword args:
8996    {generator}
8997    {out}
8998    dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
8999        Default: ``torch.int64``.
9000    {layout}
9001    {device}
9002    {requires_grad}
9003    {pin_memory}
9004
9005Example::
9006
9007    >>> torch.randperm(4)
9008    tensor([2, 1, 0, 3])
9009""".format(**factory_common_args),
9010)
9011
9012add_docstr(
9013    torch.tensor,
9014    r"""
9015tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
9016
9017Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`.
9018
9019.. warning::
9020
9021    When working with tensors prefer using :func:`torch.Tensor.clone`,
9022    :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for
9023    readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to
9024    ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)``
9025    is equivalent to ``t.clone().detach().requires_grad_(True)``.
9026
9027.. seealso::
9028
9029    :func:`torch.as_tensor` preserves autograd history and avoids copies where possible.
9030    :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array.
9031
9032Args:
9033    {data}
9034
9035Keyword args:
9036    {dtype}
9037    device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor
9038        then the device of data is used. If None and data is not a tensor then
9039        the result tensor is constructed on the current device.
9040    {requires_grad}
9041    {pin_memory}
9042
9043
9044Example::
9045
9046    >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
9047    tensor([[ 0.1000,  1.2000],
9048            [ 2.2000,  3.1000],
9049            [ 4.9000,  5.2000]])
9050
9051    >>> torch.tensor([0, 1])  # Type inference on data
9052    tensor([ 0,  1])
9053
9054    >>> torch.tensor([[0.11111, 0.222222, 0.3333333]],
9055    ...              dtype=torch.float64,
9056    ...              device=torch.device('cuda:0'))  # creates a double tensor on a CUDA device
9057    tensor([[ 0.1111,  0.2222,  0.3333]], dtype=torch.float64, device='cuda:0')
9058
9059    >>> torch.tensor(3.14159)  # Create a zero-dimensional (scalar) tensor
9060    tensor(3.1416)
9061
9062    >>> torch.tensor([])  # Create an empty tensor (of size (0,))
9063    tensor([])
9064""".format(**factory_data_common_args),
9065)
9066
9067add_docstr(
9068    torch.range,
9069    r"""
9070range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
9071
9072Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
9073with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
9074the gap between two values in the tensor.
9075
9076.. math::
9077    \text{out}_{i+1} = \text{out}_i + \text{step}.
9078"""
9079    + r"""
9080.. warning::
9081    This function is deprecated and will be removed in a future release because its behavior is inconsistent with
9082    Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end).
9083
9084Args:
9085    start (float): the starting value for the set of points. Default: ``0``.
9086    end (float): the ending value for the set of points
9087    step (float): the gap between each pair of adjacent points. Default: ``1``.
9088
9089Keyword args:
9090    {out}
9091    {dtype} If `dtype` is not given, infer the data type from the other input
9092        arguments. If any of `start`, `end`, or `step` are floating-point, the
9093        `dtype` is inferred to be the default dtype, see
9094        :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
9095        be `torch.int64`.
9096    {layout}
9097    {device}
9098    {requires_grad}
9099
9100Example::
9101
9102    >>> torch.range(1, 4)
9103    tensor([ 1.,  2.,  3.,  4.])
9104    >>> torch.range(1, 4, 0.5)
9105    tensor([ 1.0000,  1.5000,  2.0000,  2.5000,  3.0000,  3.5000,  4.0000])
9106""".format(**factory_common_args),
9107)
9108
9109add_docstr(
9110    torch.arange,
9111    r"""
9112arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
9113
9114Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil`
9115with values from the interval ``[start, end)`` taken with common difference
9116:attr:`step` beginning from `start`.
9117
9118Note that non-integer :attr:`step` is subject to floating point rounding errors when
9119comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end`
9120in such cases.
9121
9122.. math::
9123    \text{out}_{{i+1}} = \text{out}_{i} + \text{step}
9124"""
9125    + r"""
9126Args:
9127    start (Number): the starting value for the set of points. Default: ``0``.
9128    end (Number): the ending value for the set of points
9129    step (Number): the gap between each pair of adjacent points. Default: ``1``.
9130
9131Keyword args:
9132    {out}
9133    {dtype} If `dtype` is not given, infer the data type from the other input
9134        arguments. If any of `start`, `end`, or `stop` are floating-point, the
9135        `dtype` is inferred to be the default dtype, see
9136        :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to
9137        be `torch.int64`.
9138    {layout}
9139    {device}
9140    {requires_grad}
9141
9142Example::
9143
9144    >>> torch.arange(5)
9145    tensor([ 0,  1,  2,  3,  4])
9146    >>> torch.arange(1, 4)
9147    tensor([ 1,  2,  3])
9148    >>> torch.arange(1, 2.5, 0.5)
9149    tensor([ 1.0000,  1.5000,  2.0000])
9150""".format(**factory_common_args),
9151)
9152
9153add_docstr(
9154    torch.ravel,
9155    r"""
9156ravel(input) -> Tensor
9157
9158Return a contiguous flattened tensor. A copy is made only if needed.
9159
9160Args:
9161    {input}
9162
9163Example::
9164
9165    >>> t = torch.tensor([[[1, 2],
9166    ...                    [3, 4]],
9167    ...                   [[5, 6],
9168    ...                    [7, 8]]])
9169    >>> torch.ravel(t)
9170    tensor([1, 2, 3, 4, 5, 6, 7, 8])
9171""".format(**common_args),
9172)
9173
9174add_docstr(
9175    torch.remainder,
9176    r"""
9177remainder(input, other, *, out=None) -> Tensor
9178
9179Computes
9180`Python's modulus operation <https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations>`_
9181entrywise.  The result has the same sign as the divisor :attr:`other` and its absolute value
9182is less than that of :attr:`other`.
9183
9184It may also be defined in terms of :func:`torch.div` as
9185
9186.. code:: python
9187
9188    torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b
9189
9190Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
9191:ref:`type promotion <type-promotion-doc>`, and integer and float inputs.
9192
9193.. note::
9194    Complex inputs are not supported. In some cases, it is not mathematically
9195    possible to satisfy the definition of a modulo operation with complex numbers.
9196    See :func:`torch.fmod` for how division by zero is handled.
9197
9198.. seealso::
9199
9200    :func:`torch.fmod` which implements C++'s `std::fmod <https://en.cppreference.com/w/cpp/numeric/math/fmod>`_.
9201    This one is defined in terms of division rounding towards zero.
9202
9203Args:
9204    input (Tensor or Scalar): the dividend
9205    other (Tensor or Scalar): the divisor
9206
9207Keyword args:
9208    {out}
9209
9210Example::
9211
9212    >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
9213    tensor([ 1.,  0.,  1.,  1.,  0.,  1.])
9214    >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5)
9215    tensor([ -0.5000, -1.0000,  0.0000, -0.5000, -1.0000 ])
9216""".format(**common_args),
9217)
9218
9219add_docstr(
9220    torch.renorm,
9221    r"""
9222renorm(input, p, dim, maxnorm, *, out=None) -> Tensor
9223
9224Returns a tensor where each sub-tensor of :attr:`input` along dimension
9225:attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower
9226than the value :attr:`maxnorm`
9227
9228.. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged
9229
9230Args:
9231    {input}
9232    p (float): the power for the norm computation
9233    dim (int): the dimension to slice over to get the sub-tensors
9234    maxnorm (float): the maximum norm to keep each sub-tensor under
9235
9236Keyword args:
9237    {out}
9238
9239Example::
9240
9241    >>> x = torch.ones(3, 3)
9242    >>> x[1].fill_(2)
9243    tensor([ 2.,  2.,  2.])
9244    >>> x[2].fill_(3)
9245    tensor([ 3.,  3.,  3.])
9246    >>> x
9247    tensor([[ 1.,  1.,  1.],
9248            [ 2.,  2.,  2.],
9249            [ 3.,  3.,  3.]])
9250    >>> torch.renorm(x, 1, 0, 5)
9251    tensor([[ 1.0000,  1.0000,  1.0000],
9252            [ 1.6667,  1.6667,  1.6667],
9253            [ 1.6667,  1.6667,  1.6667]])
9254""".format(**common_args),
9255)
9256
9257add_docstr(
9258    torch.reshape,
9259    r"""
9260reshape(input, shape) -> Tensor
9261
9262Returns a tensor with the same data and number of elements as :attr:`input`,
9263but with the specified shape. When possible, the returned tensor will be a view
9264of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs
9265with compatible strides can be reshaped without copying, but you should not
9266depend on the copying vs. viewing behavior.
9267
9268See :meth:`torch.Tensor.view` on when it is possible to return a view.
9269
9270A single dimension may be -1, in which case it's inferred from the remaining
9271dimensions and the number of elements in :attr:`input`.
9272
9273Args:
9274    input (Tensor): the tensor to be reshaped
9275    shape (tuple of int): the new shape
9276
9277Example::
9278
9279    >>> a = torch.arange(4.)
9280    >>> torch.reshape(a, (2, 2))
9281    tensor([[ 0.,  1.],
9282            [ 2.,  3.]])
9283    >>> b = torch.tensor([[0, 1], [2, 3]])
9284    >>> torch.reshape(b, (-1,))
9285    tensor([ 0,  1,  2,  3])
9286""",
9287)
9288
9289
9290add_docstr(
9291    torch.result_type,
9292    r"""
9293result_type(tensor1, tensor2) -> dtype
9294
9295Returns the :class:`torch.dtype` that would result from performing an arithmetic
9296operation on the provided input tensors. See type promotion :ref:`documentation <type-promotion-doc>`
9297for more information on the type promotion logic.
9298
9299Args:
9300    tensor1 (Tensor or Number): an input tensor or number
9301    tensor2 (Tensor or Number): an input tensor or number
9302
9303Example::
9304
9305    >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0)
9306    torch.float32
9307    >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1))
9308    torch.uint8
9309""",
9310)
9311
9312add_docstr(
9313    torch.row_stack,
9314    r"""
9315row_stack(tensors, *, out=None) -> Tensor
9316
9317Alias of :func:`torch.vstack`.
9318""",
9319)
9320
9321add_docstr(
9322    torch.round,
9323    r"""
9324round(input, *, decimals=0, out=None) -> Tensor
9325
9326Rounds elements of :attr:`input` to the nearest integer.
9327
9328For integer inputs, follows the array-api convention of returning a
9329copy of the input tensor.
9330The return type of output is same as that of input's dtype.
9331
9332.. note::
9333    This function implements the "round half to even" to
9334    break ties when a number is equidistant from two
9335    integers (e.g. `round(2.5)` is 2).
9336
9337    When the :attr:\`decimals\` argument is specified the
9338    algorithm used is similar to NumPy's `around`. This
9339    algorithm is fast but inexact and it can easily
9340    overflow for low precision dtypes.
9341    Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`.
9342
9343.. seealso::
9344    :func:`torch.ceil`, which rounds up.
9345    :func:`torch.floor`, which rounds down.
9346    :func:`torch.trunc`, which rounds towards zero.
9347
9348Args:
9349    {input}
9350    decimals (int): Number of decimal places to round to (default: 0).
9351        If decimals is negative, it specifies the number of positions
9352        to the left of the decimal point.
9353
9354Keyword args:
9355    {out}
9356
9357Example::
9358
9359    >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7)))
9360    tensor([ 5.,  -2.,  9., -8.])
9361
9362    >>> # Values equidistant from two integers are rounded towards the
9363    >>> #   the nearest even value (zero is treated as even)
9364    >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5]))
9365    tensor([-0., 0., 2., 2.])
9366
9367    >>> # A positive decimals argument rounds to the to that decimal place
9368    >>> torch.round(torch.tensor([0.1234567]), decimals=3)
9369    tensor([0.1230])
9370
9371    >>> # A negative decimals argument rounds to the left of the decimal
9372    >>> torch.round(torch.tensor([1200.1234567]), decimals=-3)
9373    tensor([1000.])
9374""".format(**common_args),
9375)
9376
9377add_docstr(
9378    torch.rsqrt,
9379    r"""
9380rsqrt(input, *, out=None) -> Tensor
9381
9382Returns a new tensor with the reciprocal of the square-root of each of
9383the elements of :attr:`input`.
9384
9385.. math::
9386    \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
9387"""
9388    + r"""
9389Args:
9390    {input}
9391
9392Keyword args:
9393    {out}
9394
9395Example::
9396
9397    >>> a = torch.randn(4)
9398    >>> a
9399    tensor([-0.0370,  0.2970,  1.5420, -0.9105])
9400    >>> torch.rsqrt(a)
9401    tensor([    nan,  1.8351,  0.8053,     nan])
9402""".format(**common_args),
9403)
9404
9405add_docstr(
9406    torch.scatter,
9407    r"""
9408scatter(input, dim, index, src) -> Tensor
9409
9410Out-of-place version of :meth:`torch.Tensor.scatter_`
9411""",
9412)
9413
9414add_docstr(
9415    torch.scatter_add,
9416    r"""
9417scatter_add(input, dim, index, src) -> Tensor
9418
9419Out-of-place version of :meth:`torch.Tensor.scatter_add_`
9420""",
9421)
9422
9423add_docstr(
9424    torch.scatter_reduce,
9425    r"""
9426scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor
9427
9428Out-of-place version of :meth:`torch.Tensor.scatter_reduce_`
9429""",
9430)
9431
9432add_docstr(
9433    torch.select,
9434    r"""
9435select(input, dim, index) -> Tensor
9436
9437Slices the :attr:`input` tensor along the selected dimension at the given index.
9438This function returns a view of the original tensor with the given dimension removed.
9439
9440.. note:: If :attr:`input` is a sparse tensor and returning a view of
9441          the tensor is not possible, a RuntimeError exception is
9442          raised. In this is the case, consider using
9443          :func:`torch.select_copy` function.
9444
9445Args:
9446    {input}
9447    dim (int): the dimension to slice
9448    index (int): the index to select with
9449
9450.. note::
9451
9452    :meth:`select` is equivalent to slicing. For example,
9453    ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and
9454    ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``.
9455""".format(**common_args),
9456)
9457
9458add_docstr(
9459    torch.select_scatter,
9460    r"""
9461select_scatter(input, src, dim, index) -> Tensor
9462
9463Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index.
9464This function returns a tensor with fresh storage; it does not create a view.
9465
9466
9467Args:
9468    {input}
9469    src (Tensor): The tensor to embed into :attr:`input`
9470    dim (int): the dimension to insert the slice into.
9471    index (int): the index to select with
9472
9473.. note::
9474
9475    :attr:`src` must be of the proper size in order to be embedded
9476    into :attr:`input`. Specifically, it should have the same shape as
9477    ``torch.select(input, dim, index)``
9478
9479Example::
9480
9481    >>> a = torch.zeros(2, 2)
9482    >>> b = torch.ones(2)
9483    >>> a.select_scatter(b, 0, 0)
9484    tensor([[1., 1.],
9485            [0., 0.]])
9486""".format(**common_args),
9487)
9488
9489add_docstr(
9490    torch.slice_scatter,
9491    r"""
9492slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor
9493
9494Embeds the values of the :attr:`src` tensor into :attr:`input` at the given
9495dimension.
9496This function returns a tensor with fresh storage; it does not create a view.
9497
9498
9499Args:
9500    {input}
9501    src (Tensor): The tensor to embed into :attr:`input`
9502    dim (int): the dimension to insert the slice into
9503    start (Optional[int]): the start index of where to insert the slice
9504    end (Optional[int]): the end index of where to insert the slice
9505    step (int): the how many elements to skip in
9506
9507Example::
9508
9509    >>> a = torch.zeros(8, 8)
9510    >>> b = torch.ones(2, 8)
9511    >>> a.slice_scatter(b, start=6)
9512    tensor([[0., 0., 0., 0., 0., 0., 0., 0.],
9513            [0., 0., 0., 0., 0., 0., 0., 0.],
9514            [0., 0., 0., 0., 0., 0., 0., 0.],
9515            [0., 0., 0., 0., 0., 0., 0., 0.],
9516            [0., 0., 0., 0., 0., 0., 0., 0.],
9517            [0., 0., 0., 0., 0., 0., 0., 0.],
9518            [1., 1., 1., 1., 1., 1., 1., 1.],
9519            [1., 1., 1., 1., 1., 1., 1., 1.]])
9520
9521    >>> b = torch.ones(8, 2)
9522    >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2)
9523    tensor([[0., 0., 1., 0., 1., 0., 0., 0.],
9524            [0., 0., 1., 0., 1., 0., 0., 0.],
9525            [0., 0., 1., 0., 1., 0., 0., 0.],
9526            [0., 0., 1., 0., 1., 0., 0., 0.],
9527            [0., 0., 1., 0., 1., 0., 0., 0.],
9528            [0., 0., 1., 0., 1., 0., 0., 0.],
9529            [0., 0., 1., 0., 1., 0., 0., 0.],
9530            [0., 0., 1., 0., 1., 0., 0., 0.]])
9531""".format(**common_args),
9532)
9533
9534add_docstr(
9535    torch.set_flush_denormal,
9536    r"""
9537set_flush_denormal(mode) -> bool
9538
9539Disables denormal floating numbers on CPU.
9540
9541Returns ``True`` if your system supports flushing denormal numbers and it
9542successfully configures flush denormal mode.  :meth:`~torch.set_flush_denormal`
9543is supported on x86 architectures supporting SSE3 and AArch64 architecture.
9544
9545Args:
9546    mode (bool): Controls whether to enable flush denormal mode or not
9547
9548Example::
9549
9550    >>> torch.set_flush_denormal(True)
9551    True
9552    >>> torch.tensor([1e-323], dtype=torch.float64)
9553    tensor([ 0.], dtype=torch.float64)
9554    >>> torch.set_flush_denormal(False)
9555    True
9556    >>> torch.tensor([1e-323], dtype=torch.float64)
9557    tensor(9.88131e-324 *
9558           [ 1.0000], dtype=torch.float64)
9559""",
9560)
9561
9562add_docstr(
9563    torch.set_num_threads,
9564    r"""
9565set_num_threads(int)
9566
9567Sets the number of threads used for intraop parallelism on CPU.
9568
9569.. warning::
9570    To ensure that the correct number of threads is used, set_num_threads
9571    must be called before running eager, JIT or autograd code.
9572""",
9573)
9574
9575add_docstr(
9576    torch.set_num_interop_threads,
9577    r"""
9578set_num_interop_threads(int)
9579
9580Sets the number of threads used for interop parallelism
9581(e.g. in JIT interpreter) on CPU.
9582
9583.. warning::
9584    Can only be called once and before any inter-op parallel work
9585    is started (e.g. JIT execution).
9586""",
9587)
9588
9589add_docstr(
9590    torch.sigmoid,
9591    r"""
9592sigmoid(input, *, out=None) -> Tensor
9593
9594Alias for :func:`torch.special.expit`.
9595""",
9596)
9597
9598add_docstr(
9599    torch.logit,
9600    r"""
9601logit(input, eps=None, *, out=None) -> Tensor
9602
9603Alias for :func:`torch.special.logit`.
9604""",
9605)
9606
9607add_docstr(
9608    torch.sign,
9609    r"""
9610sign(input, *, out=None) -> Tensor
9611
9612Returns a new tensor with the signs of the elements of :attr:`input`.
9613
9614.. math::
9615    \text{out}_{i} = \operatorname{sgn}(\text{input}_{i})
9616"""
9617    + r"""
9618Args:
9619    {input}
9620
9621Keyword args:
9622    {out}
9623
9624Example::
9625
9626    >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
9627    >>> a
9628    tensor([ 0.7000, -1.2000,  0.0000,  2.3000])
9629    >>> torch.sign(a)
9630    tensor([ 1., -1.,  0.,  1.])
9631""".format(**common_args),
9632)
9633
9634add_docstr(
9635    torch.signbit,
9636    r"""
9637signbit(input, *, out=None) -> Tensor
9638
9639Tests if each element of :attr:`input` has its sign bit set or not.
9640
9641Args:
9642  {input}
9643
9644Keyword args:
9645  {out}
9646
9647Example::
9648
9649    >>> a = torch.tensor([0.7, -1.2, 0., 2.3])
9650    >>> torch.signbit(a)
9651    tensor([ False, True,  False,  False])
9652    >>> a = torch.tensor([-0.0, 0.0])
9653    >>> torch.signbit(a)
9654    tensor([ True,  False])
9655
9656.. note::
9657    signbit handles signed zeros, so negative zero (-0) returns True.
9658
9659""".format(**common_args),
9660)
9661
9662add_docstr(
9663    torch.sgn,
9664    r"""
9665sgn(input, *, out=None) -> Tensor
9666
9667This function is an extension of torch.sign() to complex tensors.
9668It computes a new tensor whose elements have
9669the same angles as the corresponding elements of :attr:`input` and
9670absolute values (i.e. magnitudes) of one for complex tensors and
9671is equivalent to torch.sign() for non-complex tensors.
9672
9673.. math::
9674    \text{out}_{i} = \begin{cases}
9675                    0 & |\text{{input}}_i| == 0 \\
9676                    \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise}
9677                    \end{cases}
9678
9679"""
9680    + r"""
9681Args:
9682    {input}
9683
9684Keyword args:
9685  {out}
9686
9687Example::
9688
9689    >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j])
9690    >>> t.sgn()
9691    tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j])
9692""".format(**common_args),
9693)
9694
9695add_docstr(
9696    torch.sin,
9697    r"""
9698sin(input, *, out=None) -> Tensor
9699
9700Returns a new tensor with the sine of the elements of :attr:`input`.
9701
9702.. math::
9703    \text{out}_{i} = \sin(\text{input}_{i})
9704"""
9705    + r"""
9706Args:
9707    {input}
9708
9709Keyword args:
9710    {out}
9711
9712Example::
9713
9714    >>> a = torch.randn(4)
9715    >>> a
9716    tensor([-0.5461,  0.1347, -2.7266, -0.2746])
9717    >>> torch.sin(a)
9718    tensor([-0.5194,  0.1343, -0.4032, -0.2711])
9719""".format(**common_args),
9720)
9721
9722add_docstr(
9723    torch.sinc,
9724    r"""
9725sinc(input, *, out=None) -> Tensor
9726
9727Alias for :func:`torch.special.sinc`.
9728""",
9729)
9730
9731add_docstr(
9732    torch.sinh,
9733    r"""
9734sinh(input, *, out=None) -> Tensor
9735
9736Returns a new tensor with the hyperbolic sine of the elements of
9737:attr:`input`.
9738
9739.. math::
9740    \text{out}_{i} = \sinh(\text{input}_{i})
9741"""
9742    + r"""
9743Args:
9744    {input}
9745
9746Keyword args:
9747    {out}
9748
9749Example::
9750
9751    >>> a = torch.randn(4)
9752    >>> a
9753    tensor([ 0.5380, -0.8632, -0.1265,  0.9399])
9754    >>> torch.sinh(a)
9755    tensor([ 0.5644, -0.9744, -0.1268,  1.0845])
9756
9757.. note::
9758   When :attr:`input` is on the CPU, the implementation of torch.sinh may use
9759   the Sleef library, which rounds very large results to infinity or negative
9760   infinity. See `here <https://sleef.org/purec.xhtml>`_ for details.
9761""".format(**common_args),
9762)
9763
9764add_docstr(
9765    torch.sort,
9766    r"""
9767sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor)
9768
9769Sorts the elements of the :attr:`input` tensor along a given dimension
9770in ascending order by value.
9771
9772If :attr:`dim` is not given, the last dimension of the `input` is chosen.
9773
9774If :attr:`descending` is ``True`` then the elements are sorted in descending
9775order by value.
9776
9777If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
9778the order of equivalent elements.
9779
9780A namedtuple of (values, indices) is returned, where the `values` are the
9781sorted values and `indices` are the indices of the elements in the original
9782`input` tensor.
9783
9784Args:
9785    {input}
9786    dim (int, optional): the dimension to sort along
9787    descending (bool, optional): controls the sorting order (ascending or descending)
9788    stable (bool, optional): makes the sorting routine stable, which guarantees that the order
9789       of equivalent elements is preserved.
9790
9791Keyword args:
9792    out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can
9793        be optionally given to be used as output buffers
9794
9795Example::
9796
9797    >>> x = torch.randn(3, 4)
9798    >>> sorted, indices = torch.sort(x)
9799    >>> sorted
9800    tensor([[-0.2162,  0.0608,  0.6719,  2.3332],
9801            [-0.5793,  0.0061,  0.6058,  0.9497],
9802            [-0.5071,  0.3343,  0.9553,  1.0960]])
9803    >>> indices
9804    tensor([[ 1,  0,  2,  3],
9805            [ 3,  1,  0,  2],
9806            [ 0,  3,  1,  2]])
9807
9808    >>> sorted, indices = torch.sort(x, 0)
9809    >>> sorted
9810    tensor([[-0.5071, -0.2162,  0.6719, -0.5793],
9811            [ 0.0608,  0.0061,  0.9497,  0.3343],
9812            [ 0.6058,  0.9553,  1.0960,  2.3332]])
9813    >>> indices
9814    tensor([[ 2,  0,  0,  1],
9815            [ 0,  1,  1,  2],
9816            [ 1,  2,  2,  0]])
9817    >>> x = torch.tensor([0, 1] * 9)
9818    >>> x.sort()
9819    torch.return_types.sort(
9820        values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
9821        indices=tensor([ 2, 16,  4,  6, 14,  8,  0, 10, 12,  9, 17, 15, 13, 11,  7,  5,  3,  1]))
9822    >>> x.sort(stable=True)
9823    torch.return_types.sort(
9824        values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
9825        indices=tensor([ 0,  2,  4,  6,  8, 10, 12, 14, 16,  1,  3,  5,  7,  9, 11, 13, 15, 17]))
9826""".format(**common_args),
9827)
9828
9829add_docstr(
9830    torch.argsort,
9831    r"""
9832argsort(input, dim=-1, descending=False, stable=False) -> Tensor
9833
9834Returns the indices that sort a tensor along a given dimension in ascending
9835order by value.
9836
9837This is the second value returned by :meth:`torch.sort`.  See its documentation
9838for the exact semantics of this method.
9839
9840If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving
9841the order of equivalent elements. If ``False``, the relative order of values
9842which compare equal is not guaranteed. ``True`` is slower.
9843
9844Args:
9845    {input}
9846    dim (int, optional): the dimension to sort along
9847    descending (bool, optional): controls the sorting order (ascending or descending)
9848    stable (bool, optional): controls the relative order of equivalent elements
9849
9850Example::
9851
9852    >>> a = torch.randn(4, 4)
9853    >>> a
9854    tensor([[ 0.0785,  1.5267, -0.8521,  0.4065],
9855            [ 0.1598,  0.0788, -0.0745, -1.2700],
9856            [ 1.2208,  1.0722, -0.7064,  1.2564],
9857            [ 0.0669, -0.2318, -0.8229, -0.9280]])
9858
9859
9860    >>> torch.argsort(a, dim=1)
9861    tensor([[2, 0, 3, 1],
9862            [3, 2, 1, 0],
9863            [2, 1, 0, 3],
9864            [3, 2, 1, 0]])
9865""".format(**common_args),
9866)
9867
9868add_docstr(
9869    torch.msort,
9870    r"""
9871msort(input, *, out=None) -> Tensor
9872
9873Sorts the elements of the :attr:`input` tensor along its first dimension
9874in ascending order by value.
9875
9876.. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`.
9877          See also :func:`torch.sort`.
9878
9879Args:
9880    {input}
9881
9882Keyword args:
9883    {out}
9884
9885Example::
9886
9887    >>> t = torch.randn(3, 4)
9888    >>> t
9889    tensor([[-0.1321,  0.4370, -1.2631, -1.1289],
9890            [-2.0527, -1.1250,  0.2275,  0.3077],
9891            [-0.0881, -0.1259, -0.5495,  1.0284]])
9892    >>> torch.msort(t)
9893    tensor([[-2.0527, -1.1250, -1.2631, -1.1289],
9894            [-0.1321, -0.1259, -0.5495,  0.3077],
9895            [-0.0881,  0.4370,  0.2275,  1.0284]])
9896""".format(**common_args),
9897)
9898
9899add_docstr(
9900    torch.sparse_compressed_tensor,
9901    r"""sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, """
9902    r"""*, dtype=None, layout=None, device=None, pin_memory=False, requires_grad=False, check_invariants=None) -> Tensor
9903
9904Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR,
9905CSC, BSR, or BSC - <sparse-compressed-docs>` with specified values at
9906the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse
9907matrix multiplication operations in Compressed Sparse format are
9908typically faster than that for sparse tensors in COO format. Make you
9909have a look at :ref:`the note on the data type of the indices
9910<sparse-compressed-docs>`.
9911
9912{sparse_factory_device_note}
9913
9914Args:
9915    compressed_indices (array_like): (B+1)-dimensional array of size
9916        ``(*batchsize, compressed_dim_size + 1)``.  The last element of
9917        each batch is the number of non-zero elements or blocks. This
9918        tensor encodes the index in ``values`` and ``plain_indices``
9919        depending on where the given compressed dimension (row or
9920        column) starts. Each successive number in the tensor
9921        subtracted by the number before it denotes the number of
9922        elements or blocks in a given compressed dimension.
9923    plain_indices (array_like): Plain dimension (column or row)
9924        co-ordinates of each element or block in values. (B+1)-dimensional
9925        tensor with the same length as values.
9926
9927    values (array_list): Initial values for the tensor. Can be a list,
9928        tuple, NumPy ``ndarray``, scalar, and other types.  that
9929        represents a (1+K)-dimensional (for CSR and CSC layouts) or
9930        (1+2+K)-dimensional tensor (for BSR and BSC layouts) where
9931        ``K`` is the number of dense dimensions.
9932    size (list, tuple, :class:`torch.Size`, optional): Size of the
9933        sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
9934        blocksize[1], *densesize)`` where ``blocksize[0] ==
9935        blocksize[1] == 1`` for CSR and CSC formats. If not provided,
9936        the size will be inferred as the minimum size big enough to
9937        hold all non-zero elements or blocks.
9938
9939Keyword args:
9940    dtype (:class:`torch.dtype`, optional): the desired data type of
9941        returned tensor.  Default: if None, infers data type from
9942        :attr:`values`.
9943    layout (:class:`torch.layout`, required): the desired layout of
9944        returned tensor: :attr:`torch.sparse_csr`,
9945        :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or
9946        :attr:`torch.sparse_bsc`.
9947    device (:class:`torch.device`, optional): the desired device of
9948        returned tensor.  Default: if None, uses the current device
9949        for the default tensor type (see
9950        :func:`torch.set_default_device`). :attr:`device` will be
9951        the CPU for CPU tensor types and the current CUDA device for
9952        CUDA tensor types.
9953    {pin_memory}
9954    {requires_grad}
9955    {check_invariants}
9956
9957Example::
9958    >>> compressed_indices = [0, 2, 4]
9959    >>> plain_indices = [0, 1, 0, 1]
9960    >>> values = [1, 2, 3, 4]
9961    >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64),
9962    ...                                torch.tensor(plain_indices, dtype=torch.int64),
9963    ...                                torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr)
9964    tensor(crow_indices=tensor([0, 2, 4]),
9965           col_indices=tensor([0, 1, 0, 1]),
9966           values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
9967           dtype=torch.float64, layout=torch.sparse_csr)
9968""".format(**factory_common_args),
9969)
9970
9971add_docstr(
9972    torch.sparse_csr_tensor,
9973    r"""sparse_csr_tensor(crow_indices, col_indices, values, size=None, """
9974    r"""*, dtype=None, device=None, pin_memory=False, requires_grad=False, check_invariants=None) -> Tensor
9975
9976Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) <sparse-csr-docs>` with specified
9977values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations
9978in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look
9979at :ref:`the note on the data type of the indices <sparse-csr-docs>`.
9980
9981{sparse_factory_device_note}
9982
9983Args:
9984    crow_indices (array_like): (B+1)-dimensional array of size
9985        ``(*batchsize, nrows + 1)``.  The last element of each batch
9986        is the number of non-zeros. This tensor encodes the index in
9987        values and col_indices depending on where the given row
9988        starts. Each successive number in the tensor subtracted by the
9989        number before it denotes the number of elements in a given
9990        row.
9991    col_indices (array_like): Column co-ordinates of each element in
9992        values. (B+1)-dimensional tensor with the same length
9993        as values.
9994    values (array_list): Initial values for the tensor. Can be a list,
9995        tuple, NumPy ``ndarray``, scalar, and other types that
9996        represents a (1+K)-dimensional tensor where ``K`` is the number
9997        of dense dimensions.
9998    size (list, tuple, :class:`torch.Size`, optional): Size of the
9999        sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
10000        not provided, the size will be inferred as the minimum size
10001        big enough to hold all non-zero elements.
10002
10003Keyword args:
10004    dtype (:class:`torch.dtype`, optional): the desired data type of
10005        returned tensor.  Default: if None, infers data type from
10006        :attr:`values`.
10007    device (:class:`torch.device`, optional): the desired device of
10008        returned tensor.  Default: if None, uses the current device
10009        for the default tensor type (see
10010        :func:`torch.set_default_device`). :attr:`device` will be
10011        the CPU for CPU tensor types and the current CUDA device for
10012        CUDA tensor types.
10013    {pin_memory}
10014    {requires_grad}
10015    {check_invariants}
10016
10017Example::
10018    >>> crow_indices = [0, 2, 4]
10019    >>> col_indices = [0, 1, 0, 1]
10020    >>> values = [1, 2, 3, 4]
10021    >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
10022    ...                         torch.tensor(col_indices, dtype=torch.int64),
10023    ...                         torch.tensor(values), dtype=torch.double)
10024    tensor(crow_indices=tensor([0, 2, 4]),
10025           col_indices=tensor([0, 1, 0, 1]),
10026           values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
10027           dtype=torch.float64, layout=torch.sparse_csr)
10028""".format(**factory_common_args),
10029)
10030
10031add_docstr(
10032    torch.sparse_csc_tensor,
10033    r"""sparse_csc_tensor(ccol_indices, row_indices, values, size=None, """
10034    r"""*, dtype=None, device=None, pin_memory=False, requires_grad=False, check_invariants=None) -> Tensor
10035
10036Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column)
10037<sparse-csc-docs>` with specified values at the given
10038:attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
10039multiplication operations in CSC format are typically faster than that
10040for sparse tensors in COO format. Make you have a look at :ref:`the
10041note on the data type of the indices <sparse-csc-docs>`.
10042
10043{sparse_factory_device_note}
10044
10045Args:
10046    ccol_indices (array_like): (B+1)-dimensional array of size
10047        ``(*batchsize, ncols + 1)``.  The last element of each batch
10048        is the number of non-zeros. This tensor encodes the index in
10049        values and row_indices depending on where the given column
10050        starts. Each successive number in the tensor subtracted by the
10051        number before it denotes the number of elements in a given
10052        column.
10053    row_indices (array_like): Row co-ordinates of each element in
10054        values. (B+1)-dimensional tensor with the same length as
10055        values.
10056    values (array_list): Initial values for the tensor. Can be a list,
10057        tuple, NumPy ``ndarray``, scalar, and other types that
10058        represents a (1+K)-dimensional tensor where ``K`` is the number
10059        of dense dimensions.
10060    size (list, tuple, :class:`torch.Size`, optional): Size of the
10061        sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If
10062        not provided, the size will be inferred as the minimum size
10063        big enough to hold all non-zero elements.
10064
10065Keyword args:
10066    dtype (:class:`torch.dtype`, optional): the desired data type of
10067        returned tensor.  Default: if None, infers data type from
10068        :attr:`values`.
10069    device (:class:`torch.device`, optional): the desired device of
10070        returned tensor.  Default: if None, uses the current device
10071        for the default tensor type (see
10072        :func:`torch.set_default_device`). :attr:`device` will be
10073        the CPU for CPU tensor types and the current CUDA device for
10074        CUDA tensor types.
10075    {pin_memory}
10076    {requires_grad}
10077    {check_invariants}
10078
10079Example::
10080    >>> ccol_indices = [0, 2, 4]
10081    >>> row_indices = [0, 1, 0, 1]
10082    >>> values = [1, 2, 3, 4]
10083    >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
10084    ...                         torch.tensor(row_indices, dtype=torch.int64),
10085    ...                         torch.tensor(values), dtype=torch.double)
10086    tensor(ccol_indices=tensor([0, 2, 4]),
10087           row_indices=tensor([0, 1, 0, 1]),
10088           values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4,
10089           dtype=torch.float64, layout=torch.sparse_csc)
10090""".format(**factory_common_args),
10091)
10092
10093add_docstr(
10094    torch.sparse_bsr_tensor,
10095    r"""sparse_bsr_tensor(crow_indices, col_indices, values, size=None, """
10096    r"""*, dtype=None, device=None, pin_memory=False, requires_grad=False, check_invariants=None) -> Tensor
10097
10098Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row))
10099<sparse-bsr-docs>` with specified 2-dimensional blocks at the given
10100:attr:`crow_indices` and :attr:`col_indices`. Sparse matrix
10101multiplication operations in BSR format are typically faster than that
10102for sparse tensors in COO format. Make you have a look at :ref:`the
10103note on the data type of the indices <sparse-bsr-docs>`.
10104
10105{sparse_factory_device_note}
10106
10107Args:
10108    crow_indices (array_like): (B+1)-dimensional array of size
10109        ``(*batchsize, nrowblocks + 1)``.  The last element of each
10110        batch is the number of non-zeros. This tensor encodes the
10111        block index in values and col_indices depending on where the
10112        given row block starts. Each successive number in the tensor
10113        subtracted by the number before it denotes the number of
10114        blocks in a given row.
10115    col_indices (array_like): Column block co-ordinates of each block
10116        in values. (B+1)-dimensional tensor with the same length as
10117        values.
10118    values (array_list): Initial values for the tensor. Can be a list,
10119        tuple, NumPy ``ndarray``, scalar, and other types that
10120        represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
10121        number of dense dimensions.
10122    size (list, tuple, :class:`torch.Size`, optional): Size of the
10123        sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
10124        blocksize[1], *densesize)`` where ``blocksize ==
10125        values.shape[1:3]``. If not provided, the size will be
10126        inferred as the minimum size big enough to hold all non-zero
10127        blocks.
10128
10129Keyword args:
10130    dtype (:class:`torch.dtype`, optional): the desired data type of
10131        returned tensor.  Default: if None, infers data type from
10132        :attr:`values`.
10133    device (:class:`torch.device`, optional): the desired device of
10134        returned tensor.  Default: if None, uses the current device
10135        for the default tensor type (see
10136        :func:`torch.set_default_device`). :attr:`device` will be
10137        the CPU for CPU tensor types and the current CUDA device for
10138        CUDA tensor types.
10139    {pin_memory}
10140    {requires_grad}
10141    {check_invariants}
10142
10143Example::
10144    >>> crow_indices = [0, 1, 2]
10145    >>> col_indices = [0, 1]
10146    >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
10147    >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64),
10148    ...                         torch.tensor(col_indices, dtype=torch.int64),
10149    ...                         torch.tensor(values), dtype=torch.double)
10150    tensor(crow_indices=tensor([0, 1, 2]),
10151           col_indices=tensor([0, 1]),
10152           values=tensor([[[1., 2.],
10153                           [3., 4.]],
10154                          [[5., 6.],
10155                           [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
10156           layout=torch.sparse_bsr)
10157""".format(**factory_common_args),
10158)
10159
10160add_docstr(
10161    torch.sparse_bsc_tensor,
10162    r"""sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, """
10163    r"""*, dtype=None, device=None, pin_memory=False, requires_grad=False, check_invariants=None) -> Tensor
10164
10165Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse
10166Column)) <sparse-bsc-docs>` with specified 2-dimensional blocks at the
10167given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix
10168multiplication operations in BSC format are typically faster than that
10169for sparse tensors in COO format. Make you have a look at :ref:`the
10170note on the data type of the indices <sparse-bsc-docs>`.
10171
10172{sparse_factory_device_note}
10173
10174Args:
10175    ccol_indices (array_like): (B+1)-dimensional array of size
10176        ``(*batchsize, ncolblocks + 1)``. The last element of each
10177        batch is the number of non-zeros. This tensor encodes the
10178        index in values and row_indices depending on where the given
10179        column starts. Each successive number in the tensor subtracted
10180        by the number before it denotes the number of elements in a
10181        given column.
10182    row_indices (array_like): Row block co-ordinates of each block in
10183        values. (B+1)-dimensional tensor with the same length
10184        as values.
10185    values (array_list): Initial blocks for the tensor. Can be a list,
10186        tuple, NumPy ``ndarray``, and other types that
10187        represents a (1 + 2 + K)-dimensional tensor where ``K`` is the
10188        number of dense dimensions.
10189    size (list, tuple, :class:`torch.Size`, optional): Size of the
10190        sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols *
10191        blocksize[1], *densesize)`` If not provided, the size will be
10192        inferred as the minimum size big enough to hold all non-zero
10193        blocks.
10194
10195Keyword args:
10196    dtype (:class:`torch.dtype`, optional): the desired data type of
10197        returned tensor.  Default: if None, infers data type from
10198        :attr:`values`.
10199    device (:class:`torch.device`, optional): the desired device of
10200        returned tensor.  Default: if None, uses the current device
10201        for the default tensor type (see
10202        :func:`torch.set_default_device`). :attr:`device` will be
10203        the CPU for CPU tensor types and the current CUDA device for
10204        CUDA tensor types.
10205    {pin_memory}
10206    {requires_grad}
10207    {check_invariants}
10208
10209Example::
10210    >>> ccol_indices = [0, 1, 2]
10211    >>> row_indices = [0, 1]
10212    >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
10213    >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64),
10214    ...                         torch.tensor(row_indices, dtype=torch.int64),
10215    ...                         torch.tensor(values), dtype=torch.double)
10216    tensor(ccol_indices=tensor([0, 1, 2]),
10217           row_indices=tensor([0, 1]),
10218           values=tensor([[[1., 2.],
10219                           [3., 4.]],
10220                          [[5., 6.],
10221                           [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64,
10222           layout=torch.sparse_bsc)
10223""".format(**factory_common_args),
10224)
10225
10226add_docstr(
10227    torch.sparse_coo_tensor,
10228    r"""sparse_coo_tensor(indices, values, size=None, """
10229    r"""*, dtype=None, device=None, pin_memory=False, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor
10230
10231Constructs a :ref:`sparse tensor in COO(rdinate) format
10232<sparse-coo-docs>` with specified values at the given
10233:attr:`indices`.
10234
10235.. note::
10236
10237   This function returns an :ref:`uncoalesced tensor
10238   <sparse-uncoalesced-coo-docs>` when :attr:`is_coalesced` is
10239   unspecified or ``None``.
10240
10241{sparse_factory_device_note}
10242
10243Args:
10244    indices (array_like): Initial data for the tensor. Can be a list, tuple,
10245        NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor`
10246        internally. The indices are the coordinates of the non-zero values in the matrix, and thus
10247        should be two-dimensional where the first dimension is the number of tensor dimensions and
10248        the second dimension is the number of non-zero values.
10249    values (array_like): Initial values for the tensor. Can be a list, tuple,
10250        NumPy ``ndarray``, scalar, and other types.
10251    size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not
10252        provided the size will be inferred as the minimum size big enough to hold all non-zero
10253        elements.
10254
10255Keyword args:
10256    dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
10257        Default: if None, infers data type from :attr:`values`.
10258    device (:class:`torch.device`, optional): the desired device of returned tensor.
10259        Default: if None, uses the current device for the default tensor type
10260        (see :func:`torch.set_default_device`). :attr:`device` will be the CPU
10261        for CPU tensor types and the current CUDA device for CUDA tensor types.
10262    {pin_memory}
10263    {requires_grad}
10264    {check_invariants}
10265    is_coalesced (bool, optional): When``True``, the caller is
10266        responsible for providing tensor indices that correspond to a
10267        coalesced tensor.  If the :attr:`check_invariants` flag is
10268        False, no error will be raised if the prerequisites are not
10269        met and this will lead to silently incorrect results. To force
10270        coalescion please use :meth:`coalesce` on the resulting
10271        Tensor.
10272        Default: None: except for trivial cases (e.g. nnz < 2) the
10273        resulting Tensor has is_coalesced set to ``False```.
10274
10275Example::
10276
10277    >>> i = torch.tensor([[0, 1, 1],
10278    ...                   [2, 0, 2]])
10279    >>> v = torch.tensor([3, 4, 5], dtype=torch.float32)
10280    >>> torch.sparse_coo_tensor(i, v, [2, 4])
10281    tensor(indices=tensor([[0, 1, 1],
10282                           [2, 0, 2]]),
10283           values=tensor([3., 4., 5.]),
10284           size=(2, 4), nnz=3, layout=torch.sparse_coo)
10285
10286    >>> torch.sparse_coo_tensor(i, v)  # Shape inference
10287    tensor(indices=tensor([[0, 1, 1],
10288                           [2, 0, 2]]),
10289           values=tensor([3., 4., 5.]),
10290           size=(2, 3), nnz=3, layout=torch.sparse_coo)
10291
10292    >>> torch.sparse_coo_tensor(i, v, [2, 4],
10293    ...                         dtype=torch.float64,
10294    ...                         device=torch.device('cuda:0'))
10295    tensor(indices=tensor([[0, 1, 1],
10296                           [2, 0, 2]]),
10297           values=tensor([3., 4., 5.]),
10298           device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64,
10299           layout=torch.sparse_coo)
10300
10301    # Create an empty sparse tensor with the following invariants:
10302    #   1. sparse_dim + dense_dim = len(SparseTensor.shape)
10303    #   2. SparseTensor._indices().shape = (sparse_dim, nnz)
10304    #   3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:])
10305    #
10306    # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and
10307    # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0))
10308    >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
10309    tensor(indices=tensor([], size=(1, 0)),
10310           values=tensor([], size=(0,)),
10311           size=(1,), nnz=0, layout=torch.sparse_coo)
10312
10313    # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and
10314    # sparse_dim = 1
10315    >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2])
10316    tensor(indices=tensor([], size=(1, 0)),
10317           values=tensor([], size=(0, 2)),
10318           size=(1, 2), nnz=0, layout=torch.sparse_coo)
10319
10320.. _torch.sparse: https://pytorch.org/docs/stable/sparse.html
10321""".format(**factory_common_args),
10322)
10323
10324add_docstr(
10325    torch.sqrt,
10326    r"""
10327sqrt(input, *, out=None) -> Tensor
10328
10329Returns a new tensor with the square-root of the elements of :attr:`input`.
10330
10331.. math::
10332    \text{out}_{i} = \sqrt{\text{input}_{i}}
10333"""
10334    + r"""
10335Args:
10336    {input}
10337
10338Keyword args:
10339    {out}
10340
10341Example::
10342
10343    >>> a = torch.randn(4)
10344    >>> a
10345    tensor([-2.0755,  1.0226,  0.0831,  0.4806])
10346    >>> torch.sqrt(a)
10347    tensor([    nan,  1.0112,  0.2883,  0.6933])
10348""".format(**common_args),
10349)
10350
10351add_docstr(
10352    torch.square,
10353    r"""
10354square(input, *, out=None) -> Tensor
10355
10356Returns a new tensor with the square of the elements of :attr:`input`.
10357
10358Args:
10359    {input}
10360
10361Keyword args:
10362    {out}
10363
10364Example::
10365
10366    >>> a = torch.randn(4)
10367    >>> a
10368    tensor([-2.0755,  1.0226,  0.0831,  0.4806])
10369    >>> torch.square(a)
10370    tensor([ 4.3077,  1.0457,  0.0069,  0.2310])
10371""".format(**common_args),
10372)
10373
10374add_docstr(
10375    torch.squeeze,
10376    r"""
10377squeeze(input, dim=None) -> Tensor
10378
10379Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed.
10380
10381For example, if `input` is of shape:
10382:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()`
10383will be of shape: :math:`(A \times B \times C \times D)`.
10384
10385When :attr:`dim` is given, a squeeze operation is done only in the given
10386dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`,
10387``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
10388will squeeze the tensor to the shape :math:`(A \times B)`.
10389
10390.. note:: The returned tensor shares the storage with the input tensor,
10391          so changing the contents of one will change the contents of the other.
10392
10393.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)`
10394          will also remove the batch dimension, which can lead to unexpected
10395          errors. Consider specifying only the dims you wish to be squeezed.
10396
10397Args:
10398    {input}
10399    dim (int or tuple of ints, optional): if given, the input will be squeezed
10400           only in the specified dimensions.
10401
10402        .. versionchanged:: 2.0
10403           :attr:`dim` now accepts tuples of dimensions.
10404
10405Example::
10406
10407    >>> x = torch.zeros(2, 1, 2, 1, 2)
10408    >>> x.size()
10409    torch.Size([2, 1, 2, 1, 2])
10410    >>> y = torch.squeeze(x)
10411    >>> y.size()
10412    torch.Size([2, 2, 2])
10413    >>> y = torch.squeeze(x, 0)
10414    >>> y.size()
10415    torch.Size([2, 1, 2, 1, 2])
10416    >>> y = torch.squeeze(x, 1)
10417    >>> y.size()
10418    torch.Size([2, 2, 1, 2])
10419    >>> y = torch.squeeze(x, (1, 2, 3))
10420    torch.Size([2, 2, 2])
10421""".format(**common_args),
10422)
10423
10424add_docstr(
10425    torch.std,
10426    r"""
10427std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
10428
10429Calculates the standard deviation over the dimensions specified by :attr:`dim`.
10430:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
10431reduce over all dimensions.
10432
10433The standard deviation (:math:`\sigma`) is calculated as
10434
10435.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
10436
10437where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
10438sample mean, :math:`N` is the number of samples and :math:`\delta N` is
10439the :attr:`correction`.
10440"""
10441    + r"""
10442
10443{keepdim_details}
10444
10445Args:
10446    {input}
10447    {dim}
10448
10449Keyword args:
10450    correction (int): difference between the sample size and sample degrees of freedom.
10451        Defaults to `Bessel's correction`_, ``correction=1``.
10452
10453        .. versionchanged:: 2.0
10454            Previously this argument was called ``unbiased`` and was a boolean
10455            with ``True`` corresponding to ``correction=1`` and ``False`` being
10456            ``correction=0``.
10457    {keepdim}
10458    {out}
10459
10460Example:
10461
10462    >>> a = torch.tensor(
10463    ...     [[ 0.2035,  1.2959,  1.8101, -0.4644],
10464    ...      [ 1.5027, -0.3270,  0.5905,  0.6538],
10465    ...      [-1.5745,  1.3330, -0.5596, -0.6548],
10466    ...      [ 0.1264, -0.5080,  1.6420,  0.1992]])
10467    >>> torch.std(a, dim=1, keepdim=True)
10468    tensor([[1.0311],
10469            [0.7477],
10470            [1.2204],
10471            [0.9087]])
10472
10473.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
10474
10475""".format(**multi_dim_common),
10476)
10477
10478add_docstr(
10479    torch.std_mean,
10480    r"""
10481std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
10482
10483Calculates the standard deviation and mean over the dimensions specified by
10484:attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or
10485``None`` to reduce over all dimensions.
10486
10487The standard deviation (:math:`\sigma`) is calculated as
10488
10489.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2}
10490
10491where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
10492sample mean, :math:`N` is the number of samples and :math:`\delta N` is
10493the :attr:`correction`.
10494
10495"""
10496    + r"""
10497
10498{keepdim_details}
10499
10500Args:
10501    {input}
10502    {opt_dim}
10503
10504Keyword args:
10505    correction (int): difference between the sample size and sample degrees of freedom.
10506        Defaults to `Bessel's correction`_, ``correction=1``.
10507
10508        .. versionchanged:: 2.0
10509            Previously this argument was called ``unbiased`` and was a boolean
10510            with ``True`` corresponding to ``correction=1`` and ``False`` being
10511            ``correction=0``.
10512    {keepdim}
10513    {out}
10514
10515Returns:
10516    A tuple (std, mean) containing the standard deviation and mean.
10517
10518Example:
10519
10520    >>> a = torch.tensor(
10521    ...     [[ 0.2035,  1.2959,  1.8101, -0.4644],
10522    ...      [ 1.5027, -0.3270,  0.5905,  0.6538],
10523    ...      [-1.5745,  1.3330, -0.5596, -0.6548],
10524    ...      [ 0.1264, -0.5080,  1.6420,  0.1992]])
10525    >>> torch.std_mean(a, dim=0, keepdim=True)
10526    (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]),
10527     tensor([[ 0.0645,  0.4485,  0.8707, -0.0665]]))
10528
10529.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
10530
10531""".format(**multi_dim_common),
10532)
10533
10534add_docstr(
10535    torch.sub,
10536    r"""
10537sub(input, other, *, alpha=1, out=None) -> Tensor
10538
10539Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`.
10540
10541.. math::
10542    \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i
10543"""
10544    + r"""
10545
10546Supports :ref:`broadcasting to a common shape <broadcasting-semantics>`,
10547:ref:`type promotion <type-promotion-doc>`, and integer, float, and complex inputs.
10548
10549Args:
10550    {input}
10551    other (Tensor or Number): the tensor or number to subtract from :attr:`input`.
10552
10553Keyword args:
10554    alpha (Number): the multiplier for :attr:`other`.
10555    {out}
10556
10557Example::
10558
10559    >>> a = torch.tensor((1, 2))
10560    >>> b = torch.tensor((0, 1))
10561    >>> torch.sub(a, b, alpha=2)
10562    tensor([1, 0])
10563""".format(**common_args),
10564)
10565
10566add_docstr(
10567    torch.subtract,
10568    r"""
10569subtract(input, other, *, alpha=1, out=None) -> Tensor
10570
10571Alias for :func:`torch.sub`.
10572""",
10573)
10574
10575add_docstr(
10576    torch.sum,
10577    r"""
10578sum(input, *, dtype=None) -> Tensor
10579
10580Returns the sum of all elements in the :attr:`input` tensor.
10581
10582Args:
10583    {input}
10584
10585Keyword args:
10586    {dtype}
10587
10588Example::
10589
10590    >>> a = torch.randn(1, 3)
10591    >>> a
10592    tensor([[ 0.1133, -0.9567,  0.2958]])
10593    >>> torch.sum(a)
10594    tensor(-0.5475)
10595
10596.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
10597   :noindex:
10598
10599Returns the sum of each row of the :attr:`input` tensor in the given
10600dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
10601reduce over all of them.
10602
10603{keepdim_details}
10604
10605Args:
10606    {input}
10607    {opt_dim}
10608    {keepdim}
10609
10610Keyword args:
10611    {dtype}
10612
10613Example::
10614
10615    >>> a = torch.randn(4, 4)
10616    >>> a
10617    tensor([[ 0.0569, -0.2475,  0.0737, -0.3429],
10618            [-0.2993,  0.9138,  0.9337, -1.6864],
10619            [ 0.1132,  0.7892, -0.1003,  0.5688],
10620            [ 0.3637, -0.9906, -0.4752, -1.5197]])
10621    >>> torch.sum(a, 1)
10622    tensor([-0.4598, -0.1381,  1.3708, -2.6217])
10623    >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6)
10624    >>> torch.sum(b, (2, 1))
10625    tensor([  435.,  1335.,  2235.,  3135.])
10626""".format(**multi_dim_common),
10627)
10628
10629add_docstr(
10630    torch.nansum,
10631    r"""
10632nansum(input, *, dtype=None) -> Tensor
10633
10634Returns the sum of all elements, treating Not a Numbers (NaNs) as zero.
10635
10636Args:
10637    {input}
10638
10639Keyword args:
10640    {dtype}
10641
10642Example::
10643
10644    >>> a = torch.tensor([1., 2., float('nan'), 4.])
10645    >>> torch.nansum(a)
10646    tensor(7.)
10647
10648.. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor
10649   :noindex:
10650
10651Returns the sum of each row of the :attr:`input` tensor in the given
10652dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero.
10653If :attr:`dim` is a list of dimensions, reduce over all of them.
10654
10655{keepdim_details}
10656
10657Args:
10658    {input}
10659    {opt_dim}
10660    {keepdim}
10661
10662Keyword args:
10663    {dtype}
10664
10665Example::
10666
10667    >>> torch.nansum(torch.tensor([1., float("nan")]))
10668    1.0
10669    >>> a = torch.tensor([[1, 2], [3., float("nan")]])
10670    >>> torch.nansum(a)
10671    tensor(6.)
10672    >>> torch.nansum(a, dim=0)
10673    tensor([4., 2.])
10674    >>> torch.nansum(a, dim=1)
10675    tensor([3., 3.])
10676""".format(**multi_dim_common),
10677)
10678
10679add_docstr(
10680    torch.svd,
10681    r"""
10682svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor)
10683
10684Computes the singular value decomposition of either a matrix or batch of
10685matrices :attr:`input`. The singular value decomposition is represented as a
10686namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`.
10687where :math:`V^{\text{H}}` is the transpose of `V` for real inputs,
10688and the conjugate transpose of `V` for complex inputs.
10689If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also
10690batched with the same batch dimensions as :attr:`input`.
10691
10692If :attr:`some` is `True` (default), the method returns the reduced singular
10693value decomposition. In this case, if the last two dimensions of :attr:`input` are
10694`m` and `n`, then the returned `U` and `V` matrices will contain only
10695`min(n, m)` orthonormal columns.
10696
10697If :attr:`compute_uv` is `False`, the returned `U` and `V` will be
10698zero-filled matrices of shape `(m, m)` and `(n, n)`
10699respectively, and the same device as :attr:`input`. The argument :attr:`some`
10700has no effect when :attr:`compute_uv` is `False`.
10701
10702Supports :attr:`input` of float, double, cfloat and cdouble data types.
10703The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will
10704always be real-valued, even if :attr:`input` is complex.
10705
10706.. warning::
10707
10708    :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd`
10709    and will be removed in a future PyTorch release.
10710
10711    ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with
10712
10713    .. code:: python
10714
10715        U, S, Vh = torch.linalg.svd(A, full_matrices=not some)
10716        V = Vh.mH
10717
10718    ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with
10719
10720    .. code:: python
10721
10722        S = torch.linalg.svdvals(A)
10723
10724.. note:: Differences with :func:`torch.linalg.svd`:
10725
10726             * :attr:`some` is the opposite of
10727               :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that
10728               default value for both is `True`, so the default behavior is
10729               effectively the opposite.
10730             * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns
10731               `Vh`, that is, :math:`V^{\text{H}}`.
10732             * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled
10733               tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns
10734               empty tensors.
10735
10736.. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices,
10737          then the singular values of each matrix in the batch are returned in descending order.
10738
10739.. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`.
10740
10741.. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]`
10742          and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors
10743          can be arbitrary bases of the corresponding subspaces.
10744
10745.. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd`
10746          (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously,
10747          on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243
10748          and later, and MAGMA's routine `gesdd` on earlier versions of CUDA.
10749
10750.. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will
10751          be represented as a column-major matrix (i.e. Fortran-contiguous).
10752
10753.. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not
10754             have zero nor repeated singular values.
10755
10756.. warning:: If the distance between any two singular values is close to zero, the gradients with respect to
10757             `U` and `V` will be numerically unstable, as they depends on
10758             :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix
10759             has small singular values, as these gradients also depend on `S^{-1}`.
10760
10761.. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique,
10762             as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column.
10763             The same happens when :attr:`input` has repeated singular values, where one may multiply
10764             the columns of the spanning subspace in `U` and `V` by a rotation matrix
10765             and `the resulting vectors will span the same subspace`_.
10766             Different platforms, like NumPy, or inputs on different device types,
10767             may produce different `U` and `V` tensors.
10768
10769Args:
10770    input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more
10771                    batch dimensions consisting of `(m, n)` matrices.
10772    some (bool, optional): controls whether to compute the reduced or full decomposition, and
10773                           consequently, the shape of returned `U` and `V`. Default: `True`.
10774    compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`.
10775
10776Keyword args:
10777    out (tuple, optional): the output tuple of tensors
10778
10779Example::
10780
10781    >>> a = torch.randn(5, 3)
10782    >>> a
10783    tensor([[ 0.2364, -0.7752,  0.6372],
10784            [ 1.7201,  0.7394, -0.0504],
10785            [-0.3371, -1.0584,  0.5296],
10786            [ 0.3550, -0.4022,  1.5569],
10787            [ 0.2445, -0.0158,  1.1414]])
10788    >>> u, s, v = torch.svd(a)
10789    >>> u
10790    tensor([[ 0.4027,  0.0287,  0.5434],
10791            [-0.1946,  0.8833,  0.3679],
10792            [ 0.4296, -0.2890,  0.5261],
10793            [ 0.6604,  0.2717, -0.2618],
10794            [ 0.4234,  0.2481, -0.4733]])
10795    >>> s
10796    tensor([2.3289, 2.0315, 0.7806])
10797    >>> v
10798    tensor([[-0.0199,  0.8766,  0.4809],
10799            [-0.5080,  0.4054, -0.7600],
10800            [ 0.8611,  0.2594, -0.4373]])
10801    >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t()))
10802    tensor(8.6531e-07)
10803    >>> a_big = torch.randn(7, 5, 3)
10804    >>> u, s, v = torch.svd(a_big)
10805    >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT))
10806    tensor(2.6503e-06)
10807
10808.. _the resulting vectors will span the same subspace:
10809       (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD)
10810""",
10811)
10812
10813
10814add_docstr(
10815    torch.t,
10816    r"""
10817t(input) -> Tensor
10818
10819Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0
10820and 1.
10821
108220-D and 1-D tensors are returned as is. When input is a 2-D tensor this
10823is equivalent to ``transpose(input, 0, 1)``.
10824
10825Args:
10826    {input}
10827
10828Example::
10829
10830    >>> x = torch.randn(())
10831    >>> x
10832    tensor(0.1995)
10833    >>> torch.t(x)
10834    tensor(0.1995)
10835    >>> x = torch.randn(3)
10836    >>> x
10837    tensor([ 2.4320, -0.4608,  0.7702])
10838    >>> torch.t(x)
10839    tensor([ 2.4320, -0.4608,  0.7702])
10840    >>> x = torch.randn(2, 3)
10841    >>> x
10842    tensor([[ 0.4875,  0.9158, -0.5872],
10843            [ 0.3938, -0.6929,  0.6932]])
10844    >>> torch.t(x)
10845    tensor([[ 0.4875,  0.3938],
10846            [ 0.9158, -0.6929],
10847            [-0.5872,  0.6932]])
10848
10849See also :func:`torch.transpose`.
10850""".format(**common_args),
10851)
10852
10853add_docstr(
10854    torch.flip,
10855    r"""
10856flip(input, dims) -> Tensor
10857
10858Reverse the order of an n-D tensor along given axis in dims.
10859
10860.. note::
10861    `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
10862    which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
10863    `torch.flip` is expected to be slower than `np.flip`.
10864
10865Args:
10866    {input}
10867    dims (a list or tuple): axis to flip on
10868
10869Example::
10870
10871    >>> x = torch.arange(8).view(2, 2, 2)
10872    >>> x
10873    tensor([[[ 0,  1],
10874             [ 2,  3]],
10875
10876            [[ 4,  5],
10877             [ 6,  7]]])
10878    >>> torch.flip(x, [0, 1])
10879    tensor([[[ 6,  7],
10880             [ 4,  5]],
10881
10882            [[ 2,  3],
10883             [ 0,  1]]])
10884""".format(**common_args),
10885)
10886
10887add_docstr(
10888    torch.fliplr,
10889    r"""
10890fliplr(input) -> Tensor
10891
10892Flip tensor in the left/right direction, returning a new tensor.
10893
10894Flip the entries in each row in the left/right direction.
10895Columns are preserved, but appear in a different order than before.
10896
10897Note:
10898    Requires the tensor to be at least 2-D.
10899
10900.. note::
10901    `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`,
10902    which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
10903    `torch.fliplr` is expected to be slower than `np.fliplr`.
10904
10905Args:
10906    input (Tensor): Must be at least 2-dimensional.
10907
10908Example::
10909
10910    >>> x = torch.arange(4).view(2, 2)
10911    >>> x
10912    tensor([[0, 1],
10913            [2, 3]])
10914    >>> torch.fliplr(x)
10915    tensor([[1, 0],
10916            [3, 2]])
10917""".format(**common_args),
10918)
10919
10920add_docstr(
10921    torch.flipud,
10922    r"""
10923flipud(input) -> Tensor
10924
10925Flip tensor in the up/down direction, returning a new tensor.
10926
10927Flip the entries in each column in the up/down direction.
10928Rows are preserved, but appear in a different order than before.
10929
10930Note:
10931    Requires the tensor to be at least 1-D.
10932
10933.. note::
10934    `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`,
10935    which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
10936    `torch.flipud` is expected to be slower than `np.flipud`.
10937
10938Args:
10939    input (Tensor): Must be at least 1-dimensional.
10940
10941Example::
10942
10943    >>> x = torch.arange(4).view(2, 2)
10944    >>> x
10945    tensor([[0, 1],
10946            [2, 3]])
10947    >>> torch.flipud(x)
10948    tensor([[2, 3],
10949            [0, 1]])
10950""".format(**common_args),
10951)
10952
10953add_docstr(
10954    torch.roll,
10955    r"""
10956roll(input, shifts, dims=None) -> Tensor
10957
10958Roll the tensor :attr:`input` along the given dimension(s). Elements that are
10959shifted beyond the last position are re-introduced at the first position. If
10960:attr:`dims` is `None`, the tensor will be flattened before rolling and then
10961restored to the original shape.
10962
10963Args:
10964    {input}
10965    shifts (int or tuple of ints): The number of places by which the elements
10966        of the tensor are shifted. If shifts is a tuple, dims must be a tuple of
10967        the same size, and each dimension will be rolled by the corresponding
10968        value
10969    dims (int or tuple of ints): Axis along which to roll
10970
10971Example::
10972
10973    >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
10974    >>> x
10975    tensor([[1, 2],
10976            [3, 4],
10977            [5, 6],
10978            [7, 8]])
10979    >>> torch.roll(x, 1)
10980    tensor([[8, 1],
10981            [2, 3],
10982            [4, 5],
10983            [6, 7]])
10984    >>> torch.roll(x, 1, 0)
10985    tensor([[7, 8],
10986            [1, 2],
10987            [3, 4],
10988            [5, 6]])
10989    >>> torch.roll(x, -1, 0)
10990    tensor([[3, 4],
10991            [5, 6],
10992            [7, 8],
10993            [1, 2]])
10994    >>> torch.roll(x, shifts=(2, 1), dims=(0, 1))
10995    tensor([[6, 5],
10996            [8, 7],
10997            [2, 1],
10998            [4, 3]])
10999""".format(**common_args),
11000)
11001
11002add_docstr(
11003    torch.rot90,
11004    r"""
11005rot90(input, k=1, dims=(0, 1)) -> Tensor
11006
11007Rotate an n-D tensor by 90 degrees in the plane specified by dims axis.
11008Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0.
11009
11010Args:
11011    {input}
11012    k (int): number of times to rotate. Default value is 1
11013    dims (a list or tuple): axis to rotate. Default value is [0, 1]
11014
11015Example::
11016
11017    >>> x = torch.arange(4).view(2, 2)
11018    >>> x
11019    tensor([[0, 1],
11020            [2, 3]])
11021    >>> torch.rot90(x, 1, [0, 1])
11022    tensor([[1, 3],
11023            [0, 2]])
11024
11025    >>> x = torch.arange(8).view(2, 2, 2)
11026    >>> x
11027    tensor([[[0, 1],
11028             [2, 3]],
11029
11030            [[4, 5],
11031             [6, 7]]])
11032    >>> torch.rot90(x, 1, [1, 2])
11033    tensor([[[1, 3],
11034             [0, 2]],
11035
11036            [[5, 7],
11037             [4, 6]]])
11038""".format(**common_args),
11039)
11040
11041add_docstr(
11042    torch.take,
11043    r"""
11044take(input, index) -> Tensor
11045
11046Returns a new tensor with the elements of :attr:`input` at the given indices.
11047The input tensor is treated as if it were viewed as a 1-D tensor. The result
11048takes the same shape as the indices.
11049
11050Args:
11051    {input}
11052    index (LongTensor): the indices into tensor
11053
11054Example::
11055
11056    >>> src = torch.tensor([[4, 3, 5],
11057    ...                     [6, 7, 8]])
11058    >>> torch.take(src, torch.tensor([0, 2, 5]))
11059    tensor([ 4,  5,  8])
11060""".format(**common_args),
11061)
11062
11063add_docstr(
11064    torch.take_along_dim,
11065    r"""
11066take_along_dim(input, indices, dim=None, *, out=None) -> Tensor
11067
11068Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`.
11069
11070If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d.
11071
11072Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`,
11073are designed to work with this function. See the examples below.
11074
11075.. note::
11076    This function is similar to NumPy's `take_along_axis`.
11077    See also :func:`torch.gather`.
11078
11079Args:
11080    {input}
11081    indices (tensor): the indices into :attr:`input`. Must have long dtype.
11082    dim (int, optional): dimension to select along.
11083
11084Keyword args:
11085    {out}
11086
11087Example::
11088
11089    >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]])
11090    >>> max_idx = torch.argmax(t)
11091    >>> torch.take_along_dim(t, max_idx)
11092    tensor([60])
11093    >>> sorted_idx = torch.argsort(t, dim=1)
11094    >>> torch.take_along_dim(t, sorted_idx, dim=1)
11095    tensor([[10, 20, 30],
11096            [40, 50, 60]])
11097""".format(**common_args),
11098)
11099
11100add_docstr(
11101    torch.tan,
11102    r"""
11103tan(input, *, out=None) -> Tensor
11104
11105Returns a new tensor with the tangent of the elements of :attr:`input`.
11106
11107.. math::
11108    \text{out}_{i} = \tan(\text{input}_{i})
11109"""
11110    + r"""
11111Args:
11112    {input}
11113
11114Keyword args:
11115    {out}
11116
11117Example::
11118
11119    >>> a = torch.randn(4)
11120    >>> a
11121    tensor([-1.2027, -1.7687,  0.4412, -1.3856])
11122    >>> torch.tan(a)
11123    tensor([-2.5930,  4.9859,  0.4722, -5.3366])
11124""".format(**common_args),
11125)
11126
11127add_docstr(
11128    torch.tanh,
11129    r"""
11130tanh(input, *, out=None) -> Tensor
11131
11132Returns a new tensor with the hyperbolic tangent of the elements
11133of :attr:`input`.
11134
11135.. math::
11136    \text{out}_{i} = \tanh(\text{input}_{i})
11137"""
11138    + r"""
11139Args:
11140    {input}
11141
11142Keyword args:
11143    {out}
11144
11145Example::
11146
11147    >>> a = torch.randn(4)
11148    >>> a
11149    tensor([ 0.8986, -0.7279,  1.1745,  0.2611])
11150    >>> torch.tanh(a)
11151    tensor([ 0.7156, -0.6218,  0.8257,  0.2553])
11152""".format(**common_args),
11153)
11154
11155add_docstr(
11156    # torch.softmax doc str. Point this to torch.nn.functional.softmax
11157    torch.softmax,
11158    r"""
11159softmax(input, dim, *, dtype=None) -> Tensor
11160
11161Alias for :func:`torch.nn.functional.softmax`.
11162""",
11163)
11164
11165add_docstr(
11166    torch.topk,
11167    r"""
11168topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor)
11169
11170Returns the :attr:`k` largest elements of the given :attr:`input` tensor along
11171a given dimension.
11172
11173If :attr:`dim` is not given, the last dimension of the `input` is chosen.
11174
11175If :attr:`largest` is ``False`` then the `k` smallest elements are returned.
11176
11177A namedtuple of `(values, indices)` is returned with the `values` and
11178`indices` of the largest `k` elements of each row of the `input` tensor in the
11179given dimension `dim`.
11180
11181The boolean option :attr:`sorted` if ``True``, will make sure that the returned
11182`k` elements are themselves sorted
11183
11184Args:
11185    {input}
11186    k (int): the k in "top-k"
11187    dim (int, optional): the dimension to sort along
11188    largest (bool, optional): controls whether to return largest or
11189           smallest elements
11190    sorted (bool, optional): controls whether to return the elements
11191           in sorted order
11192
11193Keyword args:
11194    out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be
11195        optionally given to be used as output buffers
11196
11197Example::
11198
11199    >>> x = torch.arange(1., 6.)
11200    >>> x
11201    tensor([ 1.,  2.,  3.,  4.,  5.])
11202    >>> torch.topk(x, 3)
11203    torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2]))
11204""".format(**common_args),
11205)
11206
11207add_docstr(
11208    torch.trace,
11209    r"""
11210trace(input) -> Tensor
11211
11212Returns the sum of the elements of the diagonal of the input 2-D matrix.
11213
11214Example::
11215
11216    >>> x = torch.arange(1., 10.).view(3, 3)
11217    >>> x
11218    tensor([[ 1.,  2.,  3.],
11219            [ 4.,  5.,  6.],
11220            [ 7.,  8.,  9.]])
11221    >>> torch.trace(x)
11222    tensor(15.)
11223""",
11224)
11225
11226add_docstr(
11227    torch.transpose,
11228    r"""
11229transpose(input, dim0, dim1) -> Tensor
11230
11231Returns a tensor that is a transposed version of :attr:`input`.
11232The given dimensions :attr:`dim0` and :attr:`dim1` are swapped.
11233
11234If :attr:`input` is a strided tensor then the resulting :attr:`out`
11235tensor shares its underlying storage with the :attr:`input` tensor, so
11236changing the content of one would change the content of the other.
11237
11238If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` then the
11239resulting :attr:`out` tensor *does not* share the underlying storage
11240with the :attr:`input` tensor.
11241
11242If :attr:`input` is a :ref:`sparse tensor <sparse-docs>` with compressed
11243layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments
11244:attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must
11245both be sparse dimensions. The batch dimensions of a sparse tensor are the
11246dimensions preceding the sparse dimensions.
11247
11248.. note::
11249    Transpositions which interchange the sparse dimensions of a `SparseCSR`
11250    or `SparseCSC` layout tensor will result in the layout changing between
11251    the two options. Transposition of the sparse dimensions of a ` SparseBSR`
11252    or `SparseBSC` layout tensor will likewise generate a result with the
11253    opposite layout.
11254
11255
11256Args:
11257    {input}
11258    dim0 (int): the first dimension to be transposed
11259    dim1 (int): the second dimension to be transposed
11260
11261Example::
11262
11263    >>> x = torch.randn(2, 3)
11264    >>> x
11265    tensor([[ 1.0028, -0.9893,  0.5809],
11266            [-0.1669,  0.7299,  0.4942]])
11267    >>> torch.transpose(x, 0, 1)
11268    tensor([[ 1.0028, -0.1669],
11269            [-0.9893,  0.7299],
11270            [ 0.5809,  0.4942]])
11271
11272See also :func:`torch.t`.
11273""".format(**common_args),
11274)
11275
11276add_docstr(
11277    torch.triangular_solve,
11278    r"""
11279triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor)
11280
11281Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A`
11282and multiple right-hand sides :math:`b`.
11283
11284In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular
11285(or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal.
11286
11287`torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are
11288batches of 2D matrices. If the inputs are batches, then returns
11289batched outputs `X`
11290
11291If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and
11292:attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned,
11293the result may contain `NaN` s.
11294
11295Supports input of float, double, cfloat and cdouble data types.
11296
11297.. warning::
11298
11299    :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular`
11300    and will be removed in a future PyTorch release.
11301    :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a
11302    copy of one of the inputs.
11303
11304    ``X = torch.triangular_solve(B, A).solution`` should be replaced with
11305
11306    .. code:: python
11307
11308        X = torch.linalg.solve_triangular(A, B)
11309
11310Args:
11311    b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where
11312                :math:`*` is zero of more batch dimensions
11313    A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)`
11314                where :math:`*` is zero or more batch dimensions
11315    upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``.
11316    transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``,
11317                                and `op(A) = A` if it is ``False``. Default: ``False``.
11318    unitriangular (bool, optional): whether :math:`A` is unit triangular.
11319        If True, the diagonal elements of :math:`A` are assumed to be
11320        1 and not referenced from :math:`A`. Default: ``False``.
11321
11322Keyword args:
11323    out ((Tensor, Tensor), optional): tuple of two tensors to write
11324        the output to. Ignored if `None`. Default: `None`.
11325
11326Returns:
11327    A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient`
11328    is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b`
11329    (or whatever variant of the system of equations, depending on the keyword arguments.)
11330
11331Examples::
11332
11333    >>> A = torch.randn(2, 2).triu()
11334    >>> A
11335    tensor([[ 1.1527, -1.0753],
11336            [ 0.0000,  0.7986]])
11337    >>> b = torch.randn(2, 3)
11338    >>> b
11339    tensor([[-0.0210,  2.3513, -1.5492],
11340            [ 1.5429,  0.7403, -1.0243]])
11341    >>> torch.triangular_solve(b, A)
11342    torch.return_types.triangular_solve(
11343    solution=tensor([[ 1.7841,  2.9046, -2.5405],
11344            [ 1.9320,  0.9270, -1.2826]]),
11345    cloned_coefficient=tensor([[ 1.1527, -1.0753],
11346            [ 0.0000,  0.7986]]))
11347""",
11348)
11349
11350add_docstr(
11351    torch.tril,
11352    r"""
11353tril(input, diagonal=0, *, out=None) -> Tensor
11354
11355Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices
11356:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
11357
11358The lower triangular part of the matrix is defined as the elements on and
11359below the diagonal.
11360
11361The argument :attr:`diagonal` controls which diagonal to consider. If
11362:attr:`diagonal` = 0, all elements on and below the main diagonal are
11363retained. A positive value includes just as many diagonals above the main
11364diagonal, and similarly a negative value excludes just as many diagonals below
11365the main diagonal. The main diagonal are the set of indices
11366:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
11367:math:`d_{1}, d_{2}` are the dimensions of the matrix.
11368"""
11369    + r"""
11370Args:
11371    {input}
11372    diagonal (int, optional): the diagonal to consider
11373
11374Keyword args:
11375    {out}
11376
11377Example::
11378
11379    >>> a = torch.randn(3, 3)
11380    >>> a
11381    tensor([[-1.0813, -0.8619,  0.7105],
11382            [ 0.0935,  0.1380,  2.2112],
11383            [-0.3409, -0.9828,  0.0289]])
11384    >>> torch.tril(a)
11385    tensor([[-1.0813,  0.0000,  0.0000],
11386            [ 0.0935,  0.1380,  0.0000],
11387            [-0.3409, -0.9828,  0.0289]])
11388
11389    >>> b = torch.randn(4, 6)
11390    >>> b
11391    tensor([[ 1.2219,  0.5653, -0.2521, -0.2345,  1.2544,  0.3461],
11392            [ 0.4785, -0.4477,  0.6049,  0.6368,  0.8775,  0.7145],
11393            [ 1.1502,  3.2716, -1.1243, -0.5413,  0.3615,  0.6864],
11394            [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024,  0.0978]])
11395    >>> torch.tril(b, diagonal=1)
11396    tensor([[ 1.2219,  0.5653,  0.0000,  0.0000,  0.0000,  0.0000],
11397            [ 0.4785, -0.4477,  0.6049,  0.0000,  0.0000,  0.0000],
11398            [ 1.1502,  3.2716, -1.1243, -0.5413,  0.0000,  0.0000],
11399            [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024,  0.0000]])
11400    >>> torch.tril(b, diagonal=-1)
11401    tensor([[ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
11402            [ 0.4785,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000],
11403            [ 1.1502,  3.2716,  0.0000,  0.0000,  0.0000,  0.0000],
11404            [-0.0614, -0.7344, -1.3164,  0.0000,  0.0000,  0.0000]])
11405""".format(**common_args),
11406)
11407
11408# docstr is split in two parts to avoid format mis-captureing :math: braces '{}'
11409# as common args.
11410add_docstr(
11411    torch.tril_indices,
11412    r"""
11413tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
11414
11415Returns the indices of the lower triangular part of a :attr:`row`-by-
11416:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
11417coordinates of all indices and the second row contains column coordinates.
11418Indices are ordered based on rows and then columns.
11419
11420The lower triangular part of the matrix is defined as the elements on and
11421below the diagonal.
11422
11423The argument :attr:`offset` controls which diagonal to consider. If
11424:attr:`offset` = 0, all elements on and below the main diagonal are
11425retained. A positive value includes just as many diagonals above the main
11426diagonal, and similarly a negative value excludes just as many diagonals below
11427the main diagonal. The main diagonal are the set of indices
11428:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
11429where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
11430
11431.. note::
11432    When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
11433    prevent overflow during calculation.
11434"""
11435    + r"""
11436Args:
11437    row (``int``): number of rows in the 2-D matrix.
11438    col (``int``): number of columns in the 2-D matrix.
11439    offset (``int``): diagonal offset from the main diagonal.
11440        Default: if not provided, 0.
11441
11442Keyword args:
11443    dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
11444        Default: if ``None``, ``torch.long``.
11445    {device}
11446    layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
11447
11448Example::
11449
11450    >>> a = torch.tril_indices(3, 3)
11451    >>> a
11452    tensor([[0, 1, 1, 2, 2, 2],
11453            [0, 0, 1, 0, 1, 2]])
11454
11455    >>> a = torch.tril_indices(4, 3, -1)
11456    >>> a
11457    tensor([[1, 2, 2, 3, 3, 3],
11458            [0, 0, 1, 0, 1, 2]])
11459
11460    >>> a = torch.tril_indices(4, 3, 1)
11461    >>> a
11462    tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3],
11463            [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]])
11464""".format(**factory_common_args),
11465)
11466
11467add_docstr(
11468    torch.triu,
11469    r"""
11470triu(input, diagonal=0, *, out=None) -> Tensor
11471
11472Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
11473:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0.
11474
11475The upper triangular part of the matrix is defined as the elements on and
11476above the diagonal.
11477
11478The argument :attr:`diagonal` controls which diagonal to consider. If
11479:attr:`diagonal` = 0, all elements on and above the main diagonal are
11480retained. A positive value excludes just as many diagonals above the main
11481diagonal, and similarly a negative value includes just as many diagonals below
11482the main diagonal. The main diagonal are the set of indices
11483:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
11484:math:`d_{1}, d_{2}` are the dimensions of the matrix.
11485"""
11486    + r"""
11487Args:
11488    {input}
11489    diagonal (int, optional): the diagonal to consider
11490
11491Keyword args:
11492    {out}
11493
11494Example::
11495
11496    >>> a = torch.randn(3, 3)
11497    >>> a
11498    tensor([[ 0.2309,  0.5207,  2.0049],
11499            [ 0.2072, -1.0680,  0.6602],
11500            [ 0.3480, -0.5211, -0.4573]])
11501    >>> torch.triu(a)
11502    tensor([[ 0.2309,  0.5207,  2.0049],
11503            [ 0.0000, -1.0680,  0.6602],
11504            [ 0.0000,  0.0000, -0.4573]])
11505    >>> torch.triu(a, diagonal=1)
11506    tensor([[ 0.0000,  0.5207,  2.0049],
11507            [ 0.0000,  0.0000,  0.6602],
11508            [ 0.0000,  0.0000,  0.0000]])
11509    >>> torch.triu(a, diagonal=-1)
11510    tensor([[ 0.2309,  0.5207,  2.0049],
11511            [ 0.2072, -1.0680,  0.6602],
11512            [ 0.0000, -0.5211, -0.4573]])
11513
11514    >>> b = torch.randn(4, 6)
11515    >>> b
11516    tensor([[ 0.5876, -0.0794, -1.8373,  0.6654,  0.2604,  1.5235],
11517            [-0.2447,  0.9556, -1.2919,  1.3378, -0.1768, -1.0857],
11518            [ 0.4333,  0.3146,  0.6576, -1.0432,  0.9348, -0.4410],
11519            [-0.9888,  1.0679, -1.3337, -1.6556,  0.4798,  0.2830]])
11520    >>> torch.triu(b, diagonal=1)
11521    tensor([[ 0.0000, -0.0794, -1.8373,  0.6654,  0.2604,  1.5235],
11522            [ 0.0000,  0.0000, -1.2919,  1.3378, -0.1768, -1.0857],
11523            [ 0.0000,  0.0000,  0.0000, -1.0432,  0.9348, -0.4410],
11524            [ 0.0000,  0.0000,  0.0000,  0.0000,  0.4798,  0.2830]])
11525    >>> torch.triu(b, diagonal=-1)
11526    tensor([[ 0.5876, -0.0794, -1.8373,  0.6654,  0.2604,  1.5235],
11527            [-0.2447,  0.9556, -1.2919,  1.3378, -0.1768, -1.0857],
11528            [ 0.0000,  0.3146,  0.6576, -1.0432,  0.9348, -0.4410],
11529            [ 0.0000,  0.0000, -1.3337, -1.6556,  0.4798,  0.2830]])
11530""".format(**common_args),
11531)
11532
11533# docstr is split in two parts to avoid format mis-capturing :math: braces '{}'
11534# as common args.
11535add_docstr(
11536    torch.triu_indices,
11537    r"""
11538triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor
11539
11540Returns the indices of the upper triangular part of a :attr:`row` by
11541:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row
11542coordinates of all indices and the second row contains column coordinates.
11543Indices are ordered based on rows and then columns.
11544
11545The upper triangular part of the matrix is defined as the elements on and
11546above the diagonal.
11547
11548The argument :attr:`offset` controls which diagonal to consider. If
11549:attr:`offset` = 0, all elements on and above the main diagonal are
11550retained. A positive value excludes just as many diagonals above the main
11551diagonal, and similarly a negative value includes just as many diagonals below
11552the main diagonal. The main diagonal are the set of indices
11553:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]`
11554where :math:`d_{1}, d_{2}` are the dimensions of the matrix.
11555
11556.. note::
11557    When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to
11558    prevent overflow during calculation.
11559"""
11560    + r"""
11561Args:
11562    row (``int``): number of rows in the 2-D matrix.
11563    col (``int``): number of columns in the 2-D matrix.
11564    offset (``int``): diagonal offset from the main diagonal.
11565        Default: if not provided, 0.
11566
11567Keyword args:
11568    dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
11569        Default: if ``None``, ``torch.long``.
11570    {device}
11571    layout (:class:`torch.layout`, optional): currently only support ``torch.strided``.
11572
11573Example::
11574
11575    >>> a = torch.triu_indices(3, 3)
11576    >>> a
11577    tensor([[0, 0, 0, 1, 1, 2],
11578            [0, 1, 2, 1, 2, 2]])
11579
11580    >>> a = torch.triu_indices(4, 3, -1)
11581    >>> a
11582    tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3],
11583            [0, 1, 2, 0, 1, 2, 1, 2, 2]])
11584
11585    >>> a = torch.triu_indices(4, 3, 1)
11586    >>> a
11587    tensor([[0, 0, 1],
11588            [1, 2, 2]])
11589""".format(**factory_common_args),
11590)
11591
11592add_docstr(
11593    torch.true_divide,
11594    r"""
11595true_divide(dividend, divisor, *, out) -> Tensor
11596
11597Alias for :func:`torch.div` with ``rounding_mode=None``.
11598""",
11599)
11600
11601add_docstr(
11602    torch.trunc,
11603    r"""
11604trunc(input, *, out=None) -> Tensor
11605
11606Returns a new tensor with the truncated integer values of
11607the elements of :attr:`input`.
11608
11609For integer inputs, follows the array-api convention of returning a
11610copy of the input tensor.
11611
11612Args:
11613    {input}
11614
11615Keyword args:
11616    {out}
11617
11618Example::
11619
11620    >>> a = torch.randn(4)
11621    >>> a
11622    tensor([ 3.4742,  0.5466, -0.8008, -0.9079])
11623    >>> torch.trunc(a)
11624    tensor([ 3.,  0., -0., -0.])
11625""".format(**common_args),
11626)
11627
11628add_docstr(
11629    torch.fake_quantize_per_tensor_affine,
11630    r"""
11631fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor
11632
11633Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`,
11634:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`.
11635
11636.. math::
11637    \text{output} = (
11638        min(
11639            \text{quant\_max},
11640            max(
11641                \text{quant\_min},
11642                \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
11643            )
11644        ) - \text{zero\_point}
11645    ) \times \text{scale}
11646
11647Args:
11648    input (Tensor): the input value(s), ``torch.float32`` tensor
11649    scale (double scalar or ``float32`` Tensor): quantization scale
11650    zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point
11651    quant_min (int64): lower bound of the quantized domain
11652    quant_max (int64): upper bound of the quantized domain
11653
11654Returns:
11655    Tensor: A newly fake_quantized ``torch.float32`` tensor
11656
11657Example::
11658
11659    >>> x = torch.randn(4)
11660    >>> x
11661    tensor([ 0.0552,  0.9730,  0.3973, -1.0780])
11662    >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255)
11663    tensor([0.1000, 1.0000, 0.4000, 0.0000])
11664    >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255)
11665    tensor([0.1000, 1.0000, 0.4000, 0.0000])
11666""",
11667)
11668
11669add_docstr(
11670    torch.fake_quantize_per_channel_affine,
11671    r"""
11672fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor
11673
11674Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`,
11675:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`.
11676
11677.. math::
11678    \text{output} = (
11679        min(
11680            \text{quant\_max},
11681            max(
11682                \text{quant\_min},
11683                \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point}
11684            )
11685        ) - \text{zero\_point}
11686    ) \times \text{scale}
11687
11688Args:
11689    input (Tensor): the input value(s), in ``torch.float32``
11690    scale (Tensor): quantization scale, per channel in ``torch.float32``
11691    zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32``
11692    axis (int32): channel axis
11693    quant_min (int64): lower bound of the quantized domain
11694    quant_max (int64): upper bound of the quantized domain
11695
11696Returns:
11697    Tensor: A newly fake_quantized per channel ``torch.float32`` tensor
11698
11699Example::
11700
11701    >>> x = torch.randn(2, 2, 2)
11702    >>> x
11703    tensor([[[-0.2525, -0.0466],
11704             [ 0.3491, -0.2168]],
11705
11706            [[-0.5906,  1.6258],
11707             [ 0.6444, -0.0542]]])
11708    >>> scales = (torch.randn(2) + 1) * 0.05
11709    >>> scales
11710    tensor([0.0475, 0.0486])
11711    >>> zero_points = torch.zeros(2).to(torch.int32)
11712    >>> zero_points
11713    tensor([0, 0])
11714    >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
11715    tensor([[[0.0000, 0.0000],
11716             [0.3405, 0.0000]],
11717
11718            [[0.0000, 1.6134],
11719            [0.6323, 0.0000]]])
11720""",
11721)
11722
11723add_docstr(
11724    torch.fix,
11725    r"""
11726fix(input, *, out=None) -> Tensor
11727
11728Alias for :func:`torch.trunc`
11729""",
11730)
11731
11732add_docstr(
11733    torch.unsqueeze,
11734    r"""
11735unsqueeze(input, dim) -> Tensor
11736
11737Returns a new tensor with a dimension of size one inserted at the
11738specified position.
11739
11740The returned tensor shares the same underlying data with this tensor.
11741
11742A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)``
11743can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
11744applied at :attr:`dim` = ``dim + input.dim() + 1``.
11745
11746Args:
11747    {input}
11748    dim (int): the index at which to insert the singleton dimension
11749
11750Example::
11751
11752    >>> x = torch.tensor([1, 2, 3, 4])
11753    >>> torch.unsqueeze(x, 0)
11754    tensor([[ 1,  2,  3,  4]])
11755    >>> torch.unsqueeze(x, 1)
11756    tensor([[ 1],
11757            [ 2],
11758            [ 3],
11759            [ 4]])
11760""".format(**common_args),
11761)
11762
11763add_docstr(
11764    torch.var,
11765    r"""
11766var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor
11767
11768Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim`
11769can be a single dimension, list of dimensions, or ``None`` to reduce over all
11770dimensions.
11771
11772The variance (:math:`\sigma^2`) is calculated as
11773
11774.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
11775
11776where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
11777sample mean, :math:`N` is the number of samples and :math:`\delta N` is
11778the :attr:`correction`.
11779"""
11780    + r"""
11781
11782{keepdim_details}
11783
11784Args:
11785    {input}
11786    {opt_dim}
11787
11788Keyword args:
11789    correction (int): difference between the sample size and sample degrees of freedom.
11790        Defaults to `Bessel's correction`_, ``correction=1``.
11791
11792        .. versionchanged:: 2.0
11793            Previously this argument was called ``unbiased`` and was a boolean
11794            with ``True`` corresponding to ``correction=1`` and ``False`` being
11795            ``correction=0``.
11796    {keepdim}
11797    {out}
11798
11799Example:
11800
11801    >>> a = torch.tensor(
11802    ...     [[ 0.2035,  1.2959,  1.8101, -0.4644],
11803    ...      [ 1.5027, -0.3270,  0.5905,  0.6538],
11804    ...      [-1.5745,  1.3330, -0.5596, -0.6548],
11805    ...      [ 0.1264, -0.5080,  1.6420,  0.1992]])
11806    >>> torch.var(a, dim=1, keepdim=True)
11807    tensor([[1.0631],
11808            [0.5590],
11809            [1.4893],
11810            [0.8258]])
11811
11812.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
11813
11814""".format(**multi_dim_common),
11815)
11816
11817add_docstr(
11818    torch.var_mean,
11819    r"""
11820var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor)
11821
11822Calculates the variance and mean over the dimensions specified by :attr:`dim`.
11823:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to
11824reduce over all dimensions.
11825
11826The variance (:math:`\sigma^2`) is calculated as
11827
11828.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2
11829
11830where :math:`x` is the sample set of elements, :math:`\bar{x}` is the
11831sample mean, :math:`N` is the number of samples and :math:`\delta N` is
11832the :attr:`correction`.
11833"""
11834    + r"""
11835
11836{keepdim_details}
11837
11838Args:
11839    {input}
11840    {opt_dim}
11841
11842Keyword args:
11843    correction (int): difference between the sample size and sample degrees of freedom.
11844        Defaults to `Bessel's correction`_, ``correction=1``.
11845
11846        .. versionchanged:: 2.0
11847            Previously this argument was called ``unbiased`` and was a boolean
11848            with ``True`` corresponding to ``correction=1`` and ``False`` being
11849            ``correction=0``.
11850    {keepdim}
11851    {out}
11852
11853Returns:
11854    A tuple (var, mean) containing the variance and mean.
11855
11856Example:
11857
11858    >>> a = torch.tensor(
11859    ...     [[ 0.2035,  1.2959,  1.8101, -0.4644],
11860    ...      [ 1.5027, -0.3270,  0.5905,  0.6538],
11861    ...      [-1.5745,  1.3330, -0.5596, -0.6548],
11862    ...      [ 0.1264, -0.5080,  1.6420,  0.1992]])
11863    >>> torch.var_mean(a, dim=0, keepdim=True)
11864    (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]),
11865     tensor([[ 0.0645,  0.4485,  0.8707, -0.0665]]))
11866
11867.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction
11868
11869""".format(**multi_dim_common),
11870)
11871
11872add_docstr(
11873    torch.zeros,
11874    r"""
11875zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
11876
11877Returns a tensor filled with the scalar value `0`, with the shape defined
11878by the variable argument :attr:`size`.
11879
11880Args:
11881    size (int...): a sequence of integers defining the shape of the output tensor.
11882        Can be a variable number of arguments or a collection like a list or tuple.
11883
11884Keyword args:
11885    {out}
11886    {dtype}
11887    {layout}
11888    {device}
11889    {requires_grad}
11890
11891Example::
11892
11893    >>> torch.zeros(2, 3)
11894    tensor([[ 0.,  0.,  0.],
11895            [ 0.,  0.,  0.]])
11896
11897    >>> torch.zeros(5)
11898    tensor([ 0.,  0.,  0.,  0.,  0.])
11899""".format(**factory_common_args),
11900)
11901
11902add_docstr(
11903    torch.zeros_like,
11904    r"""
11905zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
11906
11907Returns a tensor filled with the scalar value `0`, with the same size as
11908:attr:`input`. ``torch.zeros_like(input)`` is equivalent to
11909``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
11910
11911.. warning::
11912    As of 0.4, this function does not support an :attr:`out` keyword. As an alternative,
11913    the old ``torch.zeros_like(input, out=output)`` is equivalent to
11914    ``torch.zeros(input.size(), out=output)``.
11915
11916Args:
11917    {input}
11918
11919Keyword args:
11920    {dtype}
11921    {layout}
11922    {device}
11923    {requires_grad}
11924    {memory_format}
11925
11926Example::
11927
11928    >>> input = torch.empty(2, 3)
11929    >>> torch.zeros_like(input)
11930    tensor([[ 0.,  0.,  0.],
11931            [ 0.,  0.,  0.]])
11932""".format(**factory_like_common_args),
11933)
11934
11935add_docstr(
11936    torch.empty,
11937    """
11938empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, \
11939memory_format=torch.contiguous_format) -> Tensor
11940
11941Returns a tensor filled with uninitialized data. The shape of the tensor is
11942defined by the variable argument :attr:`size`.
11943
11944.. note::
11945    If :func:`torch.use_deterministic_algorithms()` and
11946    :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
11947    ``True``, the output tensor is initialized to prevent any possible
11948    nondeterministic behavior from using the data as an input to an operation.
11949    Floating point and complex tensors are filled with NaN, and integer tensors
11950    are filled with the maximum value.
11951
11952Args:
11953    size (int...): a sequence of integers defining the shape of the output tensor.
11954        Can be a variable number of arguments or a collection like a list or tuple.
11955
11956Keyword args:
11957    {out}
11958    {dtype}
11959    {layout}
11960    {device}
11961    {requires_grad}
11962    {pin_memory}
11963    {memory_format}
11964
11965Example::
11966
11967    >>> torch.empty((2,3), dtype=torch.int64)
11968    tensor([[ 9.4064e+13,  2.8000e+01,  9.3493e+13],
11969            [ 7.5751e+18,  7.1428e+18,  7.5955e+18]])
11970""".format(**factory_common_args),
11971)
11972
11973add_docstr(
11974    torch.empty_like,
11975    r"""
11976empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor
11977
11978Returns an uninitialized tensor with the same size as :attr:`input`.
11979``torch.empty_like(input)`` is equivalent to
11980``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``.
11981
11982.. note::
11983    If :func:`torch.use_deterministic_algorithms()` and
11984    :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
11985    ``True``, the output tensor is initialized to prevent any possible
11986    nondeterministic behavior from using the data as an input to an operation.
11987    Floating point and complex tensors are filled with NaN, and integer tensors
11988    are filled with the maximum value.
11989
11990Args:
11991    {input}
11992
11993Keyword args:
11994    {dtype}
11995    {layout}
11996    {device}
11997    {requires_grad}
11998    {memory_format}
11999
12000Example::
12001
12002    >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda')
12003    >>> torch.empty_like(a)
12004    tensor([[0, 0, 0],
12005            [0, 0, 0]], device='cuda:0', dtype=torch.int32)
12006""".format(**factory_like_common_args),
12007)
12008
12009add_docstr(
12010    torch.empty_strided,
12011    r"""
12012empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
12013
12014Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data.
12015
12016.. warning::
12017    If the constructed tensor is "overlapped" (with multiple indices referring to the same element
12018    in memory) its behavior is undefined.
12019
12020.. note::
12021    If :func:`torch.use_deterministic_algorithms()` and
12022    :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
12023    ``True``, the output tensor is initialized to prevent any possible
12024    nondeterministic behavior from using the data as an input to an operation.
12025    Floating point and complex tensors are filled with NaN, and integer tensors
12026    are filled with the maximum value.
12027
12028Args:
12029    size (tuple of int): the shape of the output tensor
12030    stride (tuple of int): the strides of the output tensor
12031
12032Keyword args:
12033    {dtype}
12034    {layout}
12035    {device}
12036    {requires_grad}
12037    {pin_memory}
12038
12039Example::
12040
12041    >>> a = torch.empty_strided((2, 3), (1, 2))
12042    >>> a
12043    tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07],
12044            [0.0000e+00, 0.0000e+00, 3.0705e-41]])
12045    >>> a.stride()
12046    (1, 2)
12047    >>> a.size()
12048    torch.Size([2, 3])
12049""".format(**factory_common_args),
12050)
12051
12052add_docstr(
12053    torch.empty_permuted,
12054    r"""
12055empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor
12056
12057Creates an uninitialized, non-overlapping and dense tensor with the
12058specified :attr:`size`, with :attr:`physical_layout` specifying how the
12059dimensions are physically laid out in memory (each logical dimension is listed
12060from outermost to innermost).  :attr:`physical_layout` is a generalization
12061of NCHW/NHWC notation: if each dimension is assigned a number according to
12062what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)``
12063while NHWC is ``(0, 2, 3, 1)``.  Equivalently, the strides of the output
12064tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]``
12065(notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``).
12066
12067Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense
12068tensor with no overlaps.  If possible, prefer using this function over
12069:func:`torch.empty_strided` or manual use of :func:`torch.as_strided`.
12070
12071.. note::
12072    If :func:`torch.use_deterministic_algorithms()` and
12073    :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to
12074    ``True``, the output tensor is initialized to prevent any possible
12075    nondeterministic behavior from using the data as an input to an operation.
12076    Floating point and complex tensors are filled with NaN, and integer tensors
12077    are filled with the maximum value.
12078
12079Args:
12080    size (tuple of int): the shape of the output tensor
12081    physical_layout (tuple of int): the ordering of dimensions physically in memory
12082
12083Keyword args:
12084    {dtype}
12085    {layout}
12086    {device}
12087    {requires_grad}
12088    {pin_memory}
12089
12090Examples:
12091
12092    >>> torch.empty((2, 3, 5, 7)).stride()
12093    (105, 35, 7, 1)
12094    >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride()
12095    (105, 35, 7, 1)
12096    >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride()
12097    (105, 1, 21, 3)
12098    >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride()
12099    (105, 1, 21, 3)
12100    >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order()
12101    (0, 2, 3, 1)
12102""".format(**factory_common_args),
12103)
12104
12105add_docstr(
12106    torch.full,
12107    r"""
12108full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor
12109
12110Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The
12111tensor's dtype is inferred from :attr:`fill_value`.
12112
12113Args:
12114    size (int...): a list, tuple, or :class:`torch.Size` of integers defining the
12115        shape of the output tensor.
12116    fill_value (Scalar): the value to fill the output tensor with.
12117
12118Keyword args:
12119    {out}
12120    {dtype}
12121    {layout}
12122    {device}
12123    {requires_grad}
12124
12125Example::
12126
12127    >>> torch.full((2, 3), 3.141592)
12128    tensor([[ 3.1416,  3.1416,  3.1416],
12129            [ 3.1416,  3.1416,  3.1416]])
12130""".format(**factory_common_args),
12131)
12132
12133add_docstr(
12134    torch.full_like,
12135    """
12136full_like(input, fill_value, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \
12137memory_format=torch.preserve_format) -> Tensor
12138
12139Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`.
12140``torch.full_like(input, fill_value)`` is equivalent to
12141``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``.
12142
12143Args:
12144    {input}
12145    fill_value: the number to fill the output tensor with.
12146
12147Keyword args:
12148    {dtype}
12149    {layout}
12150    {device}
12151    {requires_grad}
12152    {memory_format}
12153""".format(**factory_like_common_args),
12154)
12155
12156add_docstr(
12157    torch.det,
12158    r"""
12159det(input) -> Tensor
12160
12161Alias for :func:`torch.linalg.det`
12162""",
12163)
12164
12165add_docstr(
12166    torch.where,
12167    r"""
12168where(condition, input, other, *, out=None) -> Tensor
12169
12170Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`.
12171
12172The operation is defined as:
12173
12174.. math::
12175    \text{out}_i = \begin{cases}
12176        \text{input}_i & \text{if } \text{condition}_i \\
12177        \text{other}_i & \text{otherwise} \\
12178    \end{cases}
12179"""
12180    + r"""
12181.. note::
12182    The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable <broadcasting-semantics>`.
12183
12184Arguments:
12185    condition (BoolTensor): When True (nonzero), yield input, otherwise yield other
12186    input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices
12187                          where :attr:`condition` is ``True``
12188    other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices
12189                          where :attr:`condition` is ``False``
12190
12191Keyword args:
12192    {out}
12193
12194Returns:
12195    Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other`
12196
12197Example::
12198
12199    >>> x = torch.randn(3, 2)
12200    >>> y = torch.ones(3, 2)
12201    >>> x
12202    tensor([[-0.4620,  0.3139],
12203            [ 0.3898, -0.7197],
12204            [ 0.0478, -0.1657]])
12205    >>> torch.where(x > 0, 1.0, 0.0)
12206    tensor([[0., 1.],
12207            [1., 0.],
12208            [1., 0.]])
12209    >>> torch.where(x > 0, x, y)
12210    tensor([[ 1.0000,  0.3139],
12211            [ 0.3898,  1.0000],
12212            [ 0.0478,  1.0000]])
12213    >>> x = torch.randn(2, 2, dtype=torch.double)
12214    >>> x
12215    tensor([[ 1.0779,  0.0383],
12216            [-0.8785, -1.1089]], dtype=torch.float64)
12217    >>> torch.where(x > 0, x, 0.)
12218    tensor([[1.0779, 0.0383],
12219            [0.0000, 0.0000]], dtype=torch.float64)
12220
12221.. function:: where(condition) -> tuple of LongTensor
12222   :noindex:
12223
12224``torch.where(condition)`` is identical to
12225``torch.nonzero(condition, as_tuple=True)``.
12226
12227.. note::
12228    See also :func:`torch.nonzero`.
12229""".format(**common_args),
12230)
12231
12232add_docstr(
12233    torch.logdet,
12234    r"""
12235logdet(input) -> Tensor
12236
12237Calculates log determinant of a square matrix or batches of square matrices.
12238
12239It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has
12240a negative determinant.
12241
12242.. note::
12243    Backward through :meth:`logdet` internally uses SVD results when :attr:`input`
12244    is not invertible. In this case, double backward through :meth:`logdet` will
12245    be unstable in when :attr:`input` doesn't have distinct singular values. See
12246    :func:`torch.linalg.svd` for details.
12247
12248.. seealso::
12249
12250        :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the
12251        absolute value of the determinant of real-valued (resp. complex) square matrices.
12252
12253Arguments:
12254    input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more
12255                batch dimensions.
12256
12257Example::
12258
12259    >>> A = torch.randn(3, 3)
12260    >>> torch.det(A)
12261    tensor(0.2611)
12262    >>> torch.logdet(A)
12263    tensor(-1.3430)
12264    >>> A
12265    tensor([[[ 0.9254, -0.6213],
12266             [-0.5787,  1.6843]],
12267
12268            [[ 0.3242, -0.9665],
12269             [ 0.4539, -0.0887]],
12270
12271            [[ 1.1336, -0.4025],
12272             [-0.7089,  0.9032]]])
12273    >>> A.det()
12274    tensor([1.1990, 0.4099, 0.7386])
12275    >>> A.det().log()
12276    tensor([ 0.1815, -0.8917, -0.3031])
12277""",
12278)
12279
12280add_docstr(
12281    torch.slogdet,
12282    r"""
12283slogdet(input) -> (Tensor, Tensor)
12284
12285Alias for :func:`torch.linalg.slogdet`
12286""",
12287)
12288
12289add_docstr(
12290    torch.pinverse,
12291    r"""
12292pinverse(input, rcond=1e-15) -> Tensor
12293
12294Alias for :func:`torch.linalg.pinv`
12295""",
12296)
12297
12298add_docstr(
12299    torch.hann_window,
12300    """
12301hann_window(window_length, periodic=True, *, dtype=None, \
12302layout=torch.strided, device=None, requires_grad=False) -> Tensor
12303"""
12304    + r"""
12305Hann window function.
12306
12307.. math::
12308    w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] =
12309            \sin^2 \left( \frac{\pi n}{N - 1} \right),
12310
12311where :math:`N` is the full window size.
12312
12313The input :attr:`window_length` is a positive integer controlling the
12314returned window size. :attr:`periodic` flag determines whether the returned
12315window trims off the last duplicate value from the symmetric window and is
12316ready to be used as a periodic window with functions like
12317:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
12318above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
12319``torch.hann_window(L, periodic=True)`` equal to
12320``torch.hann_window(L + 1, periodic=False)[:-1])``.
12321
12322.. note::
12323    If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
12324"""
12325    + r"""
12326Arguments:
12327    window_length (int): the size of returned window
12328    periodic (bool, optional): If True, returns a window to be used as periodic
12329        function. If False, return a symmetric window.
12330
12331Keyword args:
12332    {dtype} Only floating point types are supported.
12333    layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
12334          ``torch.strided`` (dense layout) is supported.
12335    {device}
12336    {requires_grad}
12337
12338Returns:
12339    Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
12340
12341""".format(**factory_common_args),
12342)
12343
12344
12345add_docstr(
12346    torch.hamming_window,
12347    """
12348hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, \
12349layout=torch.strided, device=None, requires_grad=False) -> Tensor
12350"""
12351    + r"""
12352Hamming window function.
12353
12354.. math::
12355    w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right),
12356
12357where :math:`N` is the full window size.
12358
12359The input :attr:`window_length` is a positive integer controlling the
12360returned window size. :attr:`periodic` flag determines whether the returned
12361window trims off the last duplicate value from the symmetric window and is
12362ready to be used as a periodic window with functions like
12363:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
12364above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
12365``torch.hamming_window(L, periodic=True)`` equal to
12366``torch.hamming_window(L + 1, periodic=False)[:-1])``.
12367
12368.. note::
12369    If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
12370
12371.. note::
12372    This is a generalized version of :meth:`torch.hann_window`.
12373"""
12374    + r"""
12375Arguments:
12376    window_length (int): the size of returned window
12377    periodic (bool, optional): If True, returns a window to be used as periodic
12378        function. If False, return a symmetric window.
12379    alpha (float, optional): The coefficient :math:`\alpha` in the equation above
12380    beta (float, optional): The coefficient :math:`\beta` in the equation above
12381
12382Keyword args:
12383    {dtype} Only floating point types are supported.
12384    layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
12385          ``torch.strided`` (dense layout) is supported.
12386    {device}
12387    {requires_grad}
12388
12389Returns:
12390    Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window.
12391
12392""".format(**factory_common_args),
12393)
12394
12395
12396add_docstr(
12397    torch.bartlett_window,
12398    """
12399bartlett_window(window_length, periodic=True, *, dtype=None, \
12400layout=torch.strided, device=None, requires_grad=False) -> Tensor
12401"""
12402    + r"""
12403Bartlett window function.
12404
12405.. math::
12406    w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases}
12407        \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\
12408        2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\
12409    \end{cases},
12410
12411where :math:`N` is the full window size.
12412
12413The input :attr:`window_length` is a positive integer controlling the
12414returned window size. :attr:`periodic` flag determines whether the returned
12415window trims off the last duplicate value from the symmetric window and is
12416ready to be used as a periodic window with functions like
12417:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
12418above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
12419``torch.bartlett_window(L, periodic=True)`` equal to
12420``torch.bartlett_window(L + 1, periodic=False)[:-1])``.
12421
12422.. note::
12423    If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
12424"""
12425    + r"""
12426Arguments:
12427    window_length (int): the size of returned window
12428    periodic (bool, optional): If True, returns a window to be used as periodic
12429        function. If False, return a symmetric window.
12430
12431Keyword args:
12432    {dtype} Only floating point types are supported.
12433    layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
12434          ``torch.strided`` (dense layout) is supported.
12435    {device}
12436    {requires_grad}
12437
12438Returns:
12439    Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
12440
12441""".format(**factory_common_args),
12442)
12443
12444
12445add_docstr(
12446    torch.blackman_window,
12447    """
12448blackman_window(window_length, periodic=True, *, dtype=None, \
12449layout=torch.strided, device=None, requires_grad=False) -> Tensor
12450"""
12451    + r"""
12452Blackman window function.
12453
12454.. math::
12455    w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right)
12456
12457where :math:`N` is the full window size.
12458
12459The input :attr:`window_length` is a positive integer controlling the
12460returned window size. :attr:`periodic` flag determines whether the returned
12461window trims off the last duplicate value from the symmetric window and is
12462ready to be used as a periodic window with functions like
12463:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in
12464above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have
12465``torch.blackman_window(L, periodic=True)`` equal to
12466``torch.blackman_window(L + 1, periodic=False)[:-1])``.
12467
12468.. note::
12469    If :attr:`window_length` :math:`=1`, the returned window contains a single value 1.
12470"""
12471    + r"""
12472Arguments:
12473    window_length (int): the size of returned window
12474    periodic (bool, optional): If True, returns a window to be used as periodic
12475        function. If False, return a symmetric window.
12476
12477Keyword args:
12478    {dtype} Only floating point types are supported.
12479    layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
12480          ``torch.strided`` (dense layout) is supported.
12481    {device}
12482    {requires_grad}
12483
12484Returns:
12485    Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window
12486
12487""".format(**factory_common_args),
12488)
12489
12490
12491add_docstr(
12492    torch.kaiser_window,
12493    """
12494kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, \
12495layout=torch.strided, device=None, requires_grad=False) -> Tensor
12496"""
12497    + r"""
12498Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`.
12499
12500Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and
12501``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True,
12502where ``L`` is the :attr:`window_length`. This function computes:
12503
12504.. math::
12505    out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta )
12506
12507Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling
12508``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``.
12509The :attr:`periodic` argument is intended as a helpful shorthand
12510to produce a periodic window as input to functions like :func:`torch.stft`.
12511
12512.. note::
12513    If :attr:`window_length` is one, then the returned window is a single element tensor containing a one.
12514
12515"""
12516    + r"""
12517Args:
12518    window_length (int): length of the window.
12519    periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis.
12520        If False, returns a symmetric window suitable for use in filter design.
12521    beta (float, optional): shape parameter for the window.
12522
12523Keyword args:
12524    {dtype}
12525    layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only
12526          ``torch.strided`` (dense layout) is supported.
12527    {device}
12528    {requires_grad}
12529
12530""".format(**factory_common_args),
12531)
12532
12533
12534add_docstr(
12535    torch.vander,
12536    """
12537vander(x, N=None, increasing=False) -> Tensor
12538"""
12539    + r"""
12540Generates a Vandermonde matrix.
12541
12542The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`.
12543If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a
12544matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde.
12545
12546Arguments:
12547    x (Tensor): 1-D input tensor.
12548    N (int, optional): Number of columns in the output. If N is not specified,
12549        a square array is returned :math:`(N = len(x))`.
12550    increasing (bool, optional): Order of the powers of the columns. If True,
12551        the powers increase from left to right, if False (the default) they are reversed.
12552
12553Returns:
12554    Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`,
12555    the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns
12556    are :math:`x^0, x^1, ..., x^{{(N-1)}}`.
12557
12558Example::
12559
12560    >>> x = torch.tensor([1, 2, 3, 5])
12561    >>> torch.vander(x)
12562    tensor([[  1,   1,   1,   1],
12563            [  8,   4,   2,   1],
12564            [ 27,   9,   3,   1],
12565            [125,  25,   5,   1]])
12566    >>> torch.vander(x, N=3)
12567    tensor([[ 1,  1,  1],
12568            [ 4,  2,  1],
12569            [ 9,  3,  1],
12570            [25,  5,  1]])
12571    >>> torch.vander(x, N=3, increasing=True)
12572    tensor([[ 1,  1,  1],
12573            [ 1,  2,  4],
12574            [ 1,  3,  9],
12575            [ 1,  5, 25]])
12576
12577""".format(**factory_common_args),
12578)
12579
12580
12581add_docstr(
12582    torch.unbind,
12583    r"""
12584unbind(input, dim=0) -> seq
12585
12586Removes a tensor dimension.
12587
12588Returns a tuple of all slices along a given dimension, already without it.
12589
12590Arguments:
12591    input (Tensor): the tensor to unbind
12592    dim (int): dimension to remove
12593
12594Example::
12595
12596    >>> torch.unbind(torch.tensor([[1, 2, 3],
12597    >>>                            [4, 5, 6],
12598    >>>                            [7, 8, 9]]))
12599    (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9]))
12600""",
12601)
12602
12603
12604add_docstr(
12605    torch.combinations,
12606    r"""
12607combinations(input, r=2, with_replacement=False) -> seq
12608
12609Compute combinations of length :math:`r` of the given tensor. The behavior is similar to
12610python's `itertools.combinations` when `with_replacement` is set to `False`, and
12611`itertools.combinations_with_replacement` when `with_replacement` is set to `True`.
12612
12613Arguments:
12614    input (Tensor): 1D vector.
12615    r (int, optional): number of elements to combine
12616    with_replacement (bool, optional): whether to allow duplication in combination
12617
12618Returns:
12619    Tensor: A tensor equivalent to converting all the input tensors into lists, do
12620    `itertools.combinations` or `itertools.combinations_with_replacement` on these
12621    lists, and finally convert the resulting list into tensor.
12622
12623Example::
12624
12625    >>> a = [1, 2, 3]
12626    >>> list(itertools.combinations(a, r=2))
12627    [(1, 2), (1, 3), (2, 3)]
12628    >>> list(itertools.combinations(a, r=3))
12629    [(1, 2, 3)]
12630    >>> list(itertools.combinations_with_replacement(a, r=2))
12631    [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)]
12632    >>> tensor_a = torch.tensor(a)
12633    >>> torch.combinations(tensor_a)
12634    tensor([[1, 2],
12635            [1, 3],
12636            [2, 3]])
12637    >>> torch.combinations(tensor_a, r=3)
12638    tensor([[1, 2, 3]])
12639    >>> torch.combinations(tensor_a, with_replacement=True)
12640    tensor([[1, 1],
12641            [1, 2],
12642            [1, 3],
12643            [2, 2],
12644            [2, 3],
12645            [3, 3]])
12646
12647""",
12648)
12649
12650add_docstr(
12651    torch.trapezoid,
12652    r"""
12653trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
12654
12655Computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_ along
12656:attr:`dim`. By default the spacing between elements is assumed to be 1, but
12657:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
12658used to specify arbitrary spacing along :attr:`dim`.
12659
12660
12661Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`,
12662the default computation is
12663
12664.. math::
12665    \begin{aligned}
12666        \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1})
12667    \end{aligned}
12668
12669When :attr:`dx` is specified the computation becomes
12670
12671.. math::
12672    \begin{aligned}
12673        \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1})
12674    \end{aligned}
12675
12676effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified,
12677assuming :attr:`x` is also a one-dimensional tensor with
12678elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes
12679
12680.. math::
12681    \begin{aligned}
12682        \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1})
12683    \end{aligned}
12684
12685When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed.
12686The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x`
12687and :attr:`y`, the function computes the difference between consecutive elements along
12688dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have
12689the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1.
12690After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule.
12691See the examples below for details.
12692
12693.. note::
12694    The trapezoidal rule is a technique for approximating the definite integral of a function
12695    by averaging its left and right Riemann sums. The approximation becomes more accurate as
12696    the resolution of the partition increases.
12697
12698Arguments:
12699    y (Tensor): Values to use when computing the trapezoidal rule.
12700    x (Tensor): If specified, defines spacing between values as specified above.
12701
12702Keyword arguments:
12703    dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
12704        are specified then this defaults to 1. Effectively multiplies the result by its value.
12705    dim (int): The dimension along which to compute the trapezoidal rule.
12706        The last (inner-most) dimension by default.
12707
12708Examples::
12709
12710    >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1
12711    >>> y = torch.tensor([1, 5, 10])
12712    >>> torch.trapezoid(y)
12713    tensor(10.5)
12714
12715    >>> # Computes the same trapezoidal rule directly to verify
12716    >>> (1 + 10 + 10) / 2
12717    10.5
12718
12719    >>> # Computes the trapezoidal rule in 1D with constant spacing of 2
12720    >>> # NOTE: the result is the same as before, but multiplied by 2
12721    >>> torch.trapezoid(y, dx=2)
12722    21.0
12723
12724    >>> # Computes the trapezoidal rule in 1D with arbitrary spacing
12725    >>> x = torch.tensor([1, 3, 6])
12726    >>> torch.trapezoid(y, x)
12727    28.5
12728
12729    >>> # Computes the same trapezoidal rule directly to verify
12730    >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
12731    28.5
12732
12733    >>> # Computes the trapezoidal rule for each row of a 3x3 matrix
12734    >>> y = torch.arange(9).reshape(3, 3)
12735    tensor([[0, 1, 2],
12736            [3, 4, 5],
12737            [6, 7, 8]])
12738    >>> torch.trapezoid(y)
12739    tensor([ 2., 8., 14.])
12740
12741    >>> # Computes the trapezoidal rule for each column of the matrix
12742    >>> torch.trapezoid(y, dim=0)
12743    tensor([ 6., 8., 10.])
12744
12745    >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
12746    >>> #   with the same arbitrary spacing
12747    >>> y = torch.ones(3, 3)
12748    >>> x = torch.tensor([1, 3, 6])
12749    >>> torch.trapezoid(y, x)
12750    array([5., 5., 5.])
12751
12752    >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix
12753    >>> #   with different arbitrary spacing per row
12754    >>> y = torch.ones(3, 3)
12755    >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
12756    >>> torch.trapezoid(y, x)
12757    array([2., 4., 6.])
12758""",
12759)
12760
12761add_docstr(
12762    torch.trapz,
12763    r"""
12764trapz(y, x, *, dim=-1) -> Tensor
12765
12766Alias for :func:`torch.trapezoid`.
12767""",
12768)
12769
12770add_docstr(
12771    torch.cumulative_trapezoid,
12772    r"""
12773cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor
12774
12775Cumulatively computes the `trapezoidal rule <https://en.wikipedia.org/wiki/Trapezoidal_rule>`_
12776along :attr:`dim`. By default the spacing between elements is assumed to be 1, but
12777:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be
12778used to specify arbitrary spacing along :attr:`dim`.
12779
12780For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid`
12781and this function is that, :func:`torch.trapezoid` returns a value for each integration,
12782where as this function returns a cumulative value for every spacing within the integration. This
12783is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum.
12784
12785Arguments:
12786    y (Tensor): Values to use when computing the trapezoidal rule.
12787    x (Tensor): If specified, defines spacing between values as specified above.
12788
12789Keyword arguments:
12790    dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx`
12791        are specified then this defaults to 1. Effectively multiplies the result by its value.
12792    dim (int): The dimension along which to compute the trapezoidal rule.
12793        The last (inner-most) dimension by default.
12794
12795Examples::
12796
12797    >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1.
12798    >>> y = torch.tensor([1, 5, 10])
12799    >>> torch.cumulative_trapezoid(y)
12800    tensor([3., 10.5])
12801
12802    >>> # Computes the same trapezoidal rule directly up to each element to verify
12803    >>> (1 + 5) / 2
12804    3.0
12805    >>> (1 + 10 + 10) / 2
12806    10.5
12807
12808    >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2
12809    >>> # NOTE: the result is the same as before, but multiplied by 2
12810    >>> torch.cumulative_trapezoid(y, dx=2)
12811    tensor([6., 21.])
12812
12813    >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing
12814    >>> x = torch.tensor([1, 3, 6])
12815    >>> torch.cumulative_trapezoid(y, x)
12816    tensor([6., 28.5])
12817
12818    >>> # Computes the same trapezoidal rule directly up to each element to verify
12819    >>> ((3 - 1) * (1 + 5)) / 2
12820    6.0
12821    >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2
12822    28.5
12823
12824    >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix
12825    >>> y = torch.arange(9).reshape(3, 3)
12826    tensor([[0, 1, 2],
12827            [3, 4, 5],
12828            [6, 7, 8]])
12829    >>> torch.cumulative_trapezoid(y)
12830    tensor([[ 0.5,  2.],
12831            [ 3.5,  8.],
12832            [ 6.5, 14.]])
12833
12834    >>> # Cumulatively computes the trapezoidal rule for each column of the matrix
12835    >>> torch.cumulative_trapezoid(y, dim=0)
12836    tensor([[ 1.5,  2.5,  3.5],
12837            [ 6.0,  8.0, 10.0]])
12838
12839    >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
12840    >>> #   with the same arbitrary spacing
12841    >>> y = torch.ones(3, 3)
12842    >>> x = torch.tensor([1, 3, 6])
12843    >>> torch.cumulative_trapezoid(y, x)
12844    tensor([[2., 5.],
12845            [2., 5.],
12846            [2., 5.]])
12847
12848    >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix
12849    >>> #   with different arbitrary spacing per row
12850    >>> y = torch.ones(3, 3)
12851    >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]])
12852    >>> torch.cumulative_trapezoid(y, x)
12853    tensor([[1., 2.],
12854            [2., 4.],
12855            [3., 6.]])
12856""",
12857)
12858
12859add_docstr(
12860    torch.repeat_interleave,
12861    r"""
12862repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor
12863
12864Repeat elements of a tensor.
12865
12866.. warning::
12867
12868    This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``.
12869
12870Args:
12871    {input}
12872    repeats (Tensor or int): The number of repetitions for each element.
12873        repeats is broadcasted to fit the shape of the given axis.
12874    dim (int, optional): The dimension along which to repeat values.
12875        By default, use the flattened input array, and return a flat output
12876        array.
12877
12878Keyword args:
12879    output_size (int, optional): Total output size for the given axis
12880        ( e.g. sum of repeats). If given, it will avoid stream synchronization
12881        needed to calculate output shape of the tensor.
12882
12883Returns:
12884    Tensor: Repeated tensor which has the same shape as input, except along the given axis.
12885
12886Example::
12887
12888    >>> x = torch.tensor([1, 2, 3])
12889    >>> x.repeat_interleave(2)
12890    tensor([1, 1, 2, 2, 3, 3])
12891    >>> y = torch.tensor([[1, 2], [3, 4]])
12892    >>> torch.repeat_interleave(y, 2)
12893    tensor([1, 1, 2, 2, 3, 3, 4, 4])
12894    >>> torch.repeat_interleave(y, 3, dim=1)
12895    tensor([[1, 1, 1, 2, 2, 2],
12896            [3, 3, 3, 4, 4, 4]])
12897    >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0)
12898    tensor([[1, 2],
12899            [3, 4],
12900            [3, 4]])
12901    >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3)
12902    tensor([[1, 2],
12903            [3, 4],
12904            [3, 4]])
12905
12906If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be
12907`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times,
12908`1` appears `n2` times, `2` appears `n3` times, etc.
12909
12910.. function:: repeat_interleave(repeats, *) -> Tensor
12911   :noindex:
12912
12913Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc.
12914
12915Args:
12916    repeats (Tensor): The number of repetitions for each element.
12917
12918Returns:
12919    Tensor: Repeated tensor of size `sum(repeats)`.
12920
12921Example::
12922
12923    >>> torch.repeat_interleave(torch.tensor([1, 2, 3]))
12924    tensor([0, 1, 1, 2, 2, 2])
12925
12926""".format(**common_args),
12927)
12928
12929add_docstr(
12930    torch.tile,
12931    r"""
12932tile(input, dims) -> Tensor
12933
12934Constructs a tensor by repeating the elements of :attr:`input`.
12935The :attr:`dims` argument specifies the number of repetitions
12936in each dimension.
12937
12938If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then
12939ones are prepended to :attr:`dims` until all dimensions are specified.
12940For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims`
12941is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2).
12942
12943Analogously, if :attr:`input` has fewer dimensions than :attr:`dims`
12944specifies, then :attr:`input` is treated as if it were unsqueezed at
12945dimension zero until it has as many dimensions as :attr:`dims` specifies.
12946For example, if :attr:`input` has shape (4, 2) and :attr:`dims`
12947is (3, 3, 2, 2), then :attr:`input` is treated as if it had the
12948shape (1, 1, 4, 2).
12949
12950.. note::
12951
12952    This function is similar to NumPy's tile function.
12953
12954Args:
12955    input (Tensor): the tensor whose elements to repeat.
12956    dims (tuple): the number of repetitions per dimension.
12957
12958Example::
12959
12960    >>> x = torch.tensor([1, 2, 3])
12961    >>> x.tile((2,))
12962    tensor([1, 2, 3, 1, 2, 3])
12963    >>> y = torch.tensor([[1, 2], [3, 4]])
12964    >>> torch.tile(y, (2, 2))
12965    tensor([[1, 2, 1, 2],
12966            [3, 4, 3, 4],
12967            [1, 2, 1, 2],
12968            [3, 4, 3, 4]])
12969""",
12970)
12971
12972add_docstr(
12973    torch.quantize_per_tensor,
12974    r"""
12975quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor
12976
12977Converts a float tensor to a quantized tensor with given scale and zero point.
12978
12979Arguments:
12980    input (Tensor): float tensor or list of tensors to quantize
12981    scale (float or Tensor): scale to apply in quantization formula
12982    zero_point (int or Tensor): offset in integer value that maps to float zero
12983    dtype (:class:`torch.dtype`): the desired data type of returned tensor.
12984        Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
12985
12986Returns:
12987    Tensor: A newly quantized tensor or list of quantized tensors.
12988
12989Example::
12990
12991    >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
12992    tensor([-1.,  0.,  1.,  2.], size=(4,), dtype=torch.quint8,
12993           quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10)
12994    >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr()
12995    tensor([ 0, 10, 20, 30], dtype=torch.uint8)
12996    >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])],
12997    >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8)
12998    (tensor([-1.,  0.], size=(2,), dtype=torch.quint8,
12999        quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10),
13000        tensor([-2.,  2.], size=(2,), dtype=torch.quint8,
13001        quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20))
13002    >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8)
13003    tensor([-1.,  0.,  1.,  2.], size=(4,), dtype=torch.quint8,
13004       quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10)
13005""",
13006)
13007
13008add_docstr(
13009    torch.quantize_per_tensor_dynamic,
13010    r"""
13011quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor
13012
13013Converts a float tensor to a quantized tensor with scale and zero_point calculated
13014dynamically based on the input.
13015
13016Arguments:
13017    input (Tensor): float tensor or list of tensors to quantize
13018    dtype (:class:`torch.dtype`): the desired data type of returned tensor.
13019        Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``
13020    reduce_range (bool): a flag to indicate whether to reduce the range of quantized
13021    data by 1 bit, it's required to avoid instruction overflow for some hardwares
13022
13023Returns:
13024    Tensor: A newly (dynamically) quantized tensor
13025
13026Example::
13027
13028    >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False)
13029    >>> print(t)
13030    tensor([-1.,  0.,  1.,  2.], size=(4,), dtype=torch.quint8,
13031           quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941,
13032           zero_point=85)
13033    >>> t.int_repr()
13034    tensor([  0,  85, 170, 255], dtype=torch.uint8)
13035""",
13036)
13037
13038add_docstr(
13039    torch.quantize_per_channel,
13040    r"""
13041quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor
13042
13043Converts a float tensor to a per-channel quantized tensor with given scales and zero points.
13044
13045Arguments:
13046    input (Tensor): float tensor to quantize
13047    scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)``
13048    zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)``
13049    axis (int): dimension on which apply per-channel quantization
13050    dtype (:class:`torch.dtype`): the desired data type of returned tensor.
13051        Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32``
13052
13053Returns:
13054    Tensor: A newly quantized tensor
13055
13056Example::
13057
13058    >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
13059    >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
13060    tensor([[-1.,  0.],
13061            [ 1.,  2.]], size=(2, 2), dtype=torch.quint8,
13062           quantization_scheme=torch.per_channel_affine,
13063           scale=tensor([0.1000, 0.0100], dtype=torch.float64),
13064           zero_point=tensor([10,  0]), axis=0)
13065    >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr()
13066    tensor([[  0,  10],
13067            [100, 200]], dtype=torch.uint8)
13068""",
13069)
13070
13071
13072add_docstr(
13073    torch.quantized_batch_norm,
13074    r"""
13075quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor
13076
13077Applies batch normalization on a 4D (NCHW) quantized tensor.
13078
13079.. math::
13080
13081        y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta
13082
13083Arguments:
13084    input (Tensor): quantized tensor
13085    weight (Tensor): float tensor that corresponds to the gamma, size C
13086    bias (Tensor):  float tensor that corresponds to the beta, size C
13087    mean (Tensor): float mean value in batch normalization, size C
13088    var (Tensor): float tensor for variance, size C
13089    eps (float): a value added to the denominator for numerical stability.
13090    output_scale (float): output quantized tensor scale
13091    output_zero_point (int): output quantized tensor zero_point
13092
13093Returns:
13094    Tensor: A quantized tensor with batch normalization applied.
13095
13096Example::
13097
13098    >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
13099    >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2)
13100    tensor([[[[-0.2000, -0.2000],
13101          [ 1.6000, -0.2000]],
13102
13103         [[-0.4000, -0.4000],
13104          [-0.4000,  0.6000]]],
13105
13106
13107        [[[-0.2000, -0.2000],
13108          [-0.2000, -0.2000]],
13109
13110         [[ 0.6000, -0.4000],
13111          [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8,
13112       quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2)
13113""",
13114)
13115
13116
13117add_docstr(
13118    torch.quantized_max_pool1d,
13119    r"""
13120quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
13121
13122Applies a 1D max pooling over an input quantized tensor composed of several input planes.
13123
13124Arguments:
13125    input (Tensor): quantized tensor
13126    kernel_size (list of int): the size of the sliding window
13127    stride (``list of int``, optional): the stride of the sliding window
13128    padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
13129    dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
13130    ceil_mode (bool, optional):  If True, will use ceil instead of floor to compute the output shape.
13131        Defaults to False.
13132
13133
13134Returns:
13135    Tensor: A quantized tensor with max_pool1d applied.
13136
13137Example::
13138
13139    >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8)
13140    >>> torch.quantized_max_pool1d(qx, [2])
13141    tensor([[0.0000],
13142            [1.5000]], size=(2, 1), dtype=torch.quint8,
13143        quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
13144""",
13145)
13146
13147
13148add_docstr(
13149    torch.quantized_max_pool2d,
13150    r"""
13151quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor
13152
13153Applies a 2D max pooling over an input quantized tensor composed of several input planes.
13154
13155Arguments:
13156    input (Tensor): quantized tensor
13157    kernel_size (``list of int``): the size of the sliding window
13158    stride (``list of int``, optional): the stride of the sliding window
13159    padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2
13160    dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1
13161    ceil_mode (bool, optional):  If True, will use ceil instead of floor to compute the output shape.
13162        Defaults to False.
13163
13164
13165Returns:
13166    Tensor: A quantized tensor with max_pool2d applied.
13167
13168Example::
13169
13170    >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8)
13171    >>> torch.quantized_max_pool2d(qx, [2,2])
13172    tensor([[[[1.5000]],
13173
13174            [[1.5000]]],
13175
13176
13177            [[[0.0000]],
13178
13179            [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8,
13180        quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3)
13181""",
13182)
13183
13184
13185add_docstr(
13186    torch.Stream,
13187    r"""
13188Stream(device, *, priority) -> Stream
13189
13190An in-order queue of executing the respective tasks asynchronously in first in first out (FIFO) order.
13191It can control or synchronize the execution of other Stream or block the current host thread to ensure
13192the correct task sequencing.
13193
13194See in-depth description of the CUDA behavior at :ref:`cuda-semantics` for details
13195on the exact semantic that applies to all devices.
13196
13197Arguments:
13198    device (:class:`torch.device`, optional): the desired device for the Stream.
13199        If not given, the current :ref:`accelerator<accelerators>` type will be used.
13200    priority (int, optional): priority of the stream, should be 0 or negative, where negative
13201        numbers indicate higher priority. By default, streams have priority 0.
13202
13203Returns:
13204    Stream: An torch.Stream object.
13205
13206Example::
13207
13208    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13209    >>> s_cuda = torch.Stream(device='cuda')
13210""",
13211)
13212
13213
13214add_docstr(
13215    torch.Stream.query,
13216    r"""
13217Stream.query() -> bool
13218
13219Check if all the work submitted has been completed.
13220
13221Returns:
13222    bool: A boolean indicating if all kernels in this stream are completed.
13223
13224Example::
13225
13226    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13227    >>> s_cuda = torch.Stream(device='cuda')
13228    >>> s_cuda.query()
13229    True
13230""",
13231)
13232
13233
13234add_docstr(
13235    torch.Stream.record_event,
13236    r"""
13237Stream.record_event(event) -> Event
13238
13239Record an event. En-queuing it into the Stream to allow further synchronization from the current point in the FIFO queue.
13240
13241Arguments:
13242    event (:class:`torch.Event`, optional): event to record. If not given, a new one will be allocated.
13243
13244Returns:
13245    Event: Recorded event.
13246
13247Example::
13248
13249    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13250    >>> s_cuda = torch.Stream(device='cuda')
13251    >>> e_cuda = s_cuda.record_event()
13252""",
13253)
13254
13255
13256add_docstr(
13257    torch.Stream.synchronize,
13258    r"""
13259Stream.synchronize() -> None
13260
13261Wait for all the kernels in this stream to complete.
13262
13263Example::
13264
13265    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13266    >>> s_cuda = torch.Stream(device='cuda')
13267    >>> s_cuda.synchronize()
13268""",
13269)
13270
13271
13272add_docstr(
13273    torch.Stream.wait_event,
13274    r"""
13275Stream.wait_event(event) -> None
13276
13277Make all future work submitted to the stream wait for an event.
13278
13279Arguments:
13280    event (:class:`torch.Event`): an event to wait for.
13281
13282Example::
13283
13284    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13285    >>> s1_cuda = torch.Stream(device='cuda')
13286    >>> s2_cuda = torch.Stream(device='cuda')
13287    >>> e_cuda = s1_cuda.record_event()
13288    >>> s2_cuda.wait_event(e_cuda)
13289""",
13290)
13291
13292
13293add_docstr(
13294    torch.Stream.wait_stream,
13295    r"""
13296Stream.wait_stream(stream) -> None
13297
13298Synchronize with another stream. All future work submitted to this stream will wait until all kernels
13299already submitted to the given stream are completed.
13300
13301Arguments:
13302    stream (:class:`torch.Stream`): a stream to synchronize.
13303
13304Example::
13305
13306    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13307    >>> s1_cuda = torch.Stream(device='cuda')
13308    >>> s2_cuda = torch.Stream(device='cuda')
13309    >>> s2_cuda.wait_stream(s1_cuda)
13310""",
13311)
13312
13313
13314add_docstr(
13315    torch.Event,
13316    r"""
13317Event(device, *, enable_timing) -> Event
13318
13319Query and record Stream status to identify or control dependencies across Stream and measure timing.
13320
13321Arguments:
13322    device (:class:`torch.device`, optional): the desired device for the Event.
13323        If not given, the current :ref:`accelerator<accelerators>` type will be used.
13324    enable_timing (bool, optional): indicates if the event should measure time (default: ``False``).
13325
13326Returns:
13327    Event: An torch.Event object.
13328
13329Example::
13330
13331    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13332    >>> e_cuda = torch.Event(device='cuda')
13333""",
13334)
13335
13336
13337add_docstr(
13338    torch.Event.elapsed_time,
13339    r"""
13340Event.elapsed_time(end_event) -> float
13341
13342Returns the elapsed time in milliseconds between when this event and the :attr:`end_event` are
13343each recorded via :func:`torch.Stream.record_event`.
13344
13345Arguments:
13346    end_event (:class:`torch.Event`): The ending event has been recorded.
13347
13348Returns:
13349    float: Time between starting and ending event in milliseconds.
13350
13351Example::
13352
13353    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13354    >>> s_cuda = torch.Stream(device='cuda')
13355    >>> e1_cuda = s_cuda.record_event()
13356    >>> e2_cuda = s_cuda.record_event()
13357    >>> ms = e1_cuda.elapsed_time(e2_cuda)
13358""",
13359)
13360
13361
13362add_docstr(
13363    torch.Event.query,
13364    r"""
13365Event.query() -> bool
13366
13367Check if the stream where this event was recorded already moved past the point where the event was recorded.
13368Always returns ``True`` if the Event was not recorded.
13369
13370Returns:
13371    bool: A boolean indicating if all work currently captured by event has completed.
13372
13373Example::
13374
13375    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13376    >>> s_cuda = torch.Stream(device='cuda')
13377    >>> e_cuda = s_cuda.record_event()
13378    >>> e_cuda.query()
13379    True
13380""",
13381)
13382
13383
13384add_docstr(
13385    torch.Event.record,
13386    r"""
13387Event.record(stream) -> None
13388
13389Record the event in a given stream. The stream's device must match the event's device.
13390This function is equivalent to ``stream.record_event(self)``.
13391
13392Arguments:
13393    stream (:class:`torch.Stream`, optional): A stream to be recorded.
13394    If not given, the current stream will be used.
13395
13396Example::
13397
13398    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13399    >>> e_cuda = torch.Event(device='cuda')
13400    >>> e_cuda.record()
13401""",
13402)
13403
13404
13405add_docstr(
13406    torch.Event.synchronize,
13407    r"""
13408Event.synchronize() -> None
13409
13410Wait for the event to complete. This prevents the CPU thread from proceeding until the event completes.
13411
13412Example::
13413
13414    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13415    >>> s_cuda = torch.Stream(device='cuda')
13416    >>> e_cuda = s_cuda.record_event()
13417    >>> e_cuda.synchronize()
13418""",
13419)
13420
13421
13422add_docstr(
13423    torch.Event.wait,
13424    r"""
13425Event.wait(stream) -> None
13426
13427Make all future work submitted to the given stream wait for this event.
13428
13429Arguments:
13430    stream (:class:`torch.Stream`, optional): A stream to synchronize.
13431    If not given, the current stream will be used.
13432
13433Example::
13434
13435    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13436    >>> s1_cuda = torch.Stream(device='cuda')
13437    >>> s2_cuda = torch.Stream(device='cuda')
13438    >>> e_cuda = s1_cuda.record()
13439    >>> e_cuda.wait(s2)
13440""",
13441)
13442
13443
13444add_docstr(
13445    torch.Generator,
13446    r"""
13447Generator(device='cpu') -> Generator
13448
13449Creates and returns a generator object that manages the state of the algorithm which
13450produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling`
13451functions.
13452
13453Arguments:
13454    device (:class:`torch.device`, optional): the desired device for the generator.
13455
13456Returns:
13457    Generator: An torch.Generator object.
13458
13459Example::
13460
13461    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
13462    >>> g_cpu = torch.Generator()
13463    >>> g_cuda = torch.Generator(device='cuda')
13464""",
13465)
13466
13467
13468add_docstr(
13469    torch.Generator.set_state,
13470    r"""
13471Generator.set_state(new_state) -> void
13472
13473Sets the Generator state.
13474
13475Arguments:
13476    new_state (torch.ByteTensor): The desired state.
13477
13478Example::
13479
13480    >>> g_cpu = torch.Generator()
13481    >>> g_cpu_other = torch.Generator()
13482    >>> g_cpu.set_state(g_cpu_other.get_state())
13483""",
13484)
13485
13486
13487add_docstr(
13488    torch.Generator.get_state,
13489    r"""
13490Generator.get_state() -> Tensor
13491
13492Returns the Generator state as a ``torch.ByteTensor``.
13493
13494Returns:
13495    Tensor: A ``torch.ByteTensor`` which contains all the necessary bits
13496    to restore a Generator to a specific point in time.
13497
13498Example::
13499
13500    >>> g_cpu = torch.Generator()
13501    >>> g_cpu.get_state()
13502""",
13503)
13504
13505add_docstr(
13506    torch.Generator.graphsafe_set_state,
13507    r"""
13508Generator.graphsafe_set_state(state) -> None
13509
13510Sets the state of the generator to the specified state in a manner that is safe for use in graph capture.
13511This method is crucial for ensuring that the generator's state can be captured in the CUDA graph.
13512
13513Arguments:
13514    state (torch.Generator): A Generator point to the new state for the generator, typically obtained from `graphsafe_get_state`.
13515
13516Example:
13517    >>> g_cuda = torch.Generator(device='cuda')
13518    >>> g_cuda_other = torch.Generator(device='cuda')
13519    >>> current_state = g_cuda_other.graphsafe_get_state()
13520    >>> g_cuda.graphsafe_set_state(current_state)
13521""",
13522)
13523
13524add_docstr(
13525    torch.Generator.graphsafe_get_state,
13526    r"""
13527Generator.graphsafe_get_state() -> torch.Generator
13528
13529Retrieves the current state of the generator in a manner that is safe for graph capture.
13530This method is crucial for ensuring that the generator's state can be captured in the CUDA graph.
13531
13532Returns:
13533    torch.Generator: A Generator point to the current state of the generator
13534
13535Example:
13536    >>> g_cuda = torch.Generator(device='cuda')
13537    >>> current_state = g_cuda.graphsafe_get_state()
13538""",
13539)
13540
13541add_docstr(
13542    torch.Generator.clone_state,
13543    r"""
13544Generator.clone_state() -> torch.Generator
13545
13546Clones the current state of the generator and returns a new generator pointing to this cloned state.
13547This method is beneficial for preserving a particular state of a generator to restore at a later point.
13548
13549Returns:
13550    torch.Generator: A Generator pointing to the newly cloned state.
13551
13552Example:
13553    >>> g_cuda = torch.Generator(device='cuda')
13554    >>> cloned_state = g_cuda.clone_state()
13555""",
13556)
13557
13558add_docstr(
13559    torch.Generator.manual_seed,
13560    r"""
13561Generator.manual_seed(seed) -> Generator
13562
13563Sets the seed for generating random numbers. Returns a `torch.Generator` object. Any 32-bit integer is a valid seed.
13564
13565Arguments:
13566    seed (int): The desired seed. Value must be within the inclusive range
13567        `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
13568        is raised. Negative inputs are remapped to positive values with the formula
13569        `0xffff_ffff_ffff_ffff + seed`.
13570
13571Returns:
13572    Generator: An torch.Generator object.
13573
13574Example::
13575
13576    >>> g_cpu = torch.Generator()
13577    >>> g_cpu.manual_seed(2147483647)
13578""",
13579)
13580
13581
13582add_docstr(
13583    torch.Generator.initial_seed,
13584    r"""
13585Generator.initial_seed() -> int
13586
13587Returns the initial seed for generating random numbers.
13588
13589Example::
13590
13591    >>> g_cpu = torch.Generator()
13592    >>> g_cpu.initial_seed()
13593    2147483647
13594""",
13595)
13596
13597
13598add_docstr(
13599    torch.Generator.seed,
13600    r"""
13601Generator.seed() -> int
13602
13603Gets a non-deterministic random number from std::random_device or the current
13604time and uses it to seed a Generator.
13605
13606Example::
13607
13608    >>> g_cpu = torch.Generator()
13609    >>> g_cpu.seed()
13610    1516516984916
13611""",
13612)
13613
13614
13615add_docstr(
13616    torch.Generator.device,
13617    r"""
13618Generator.device -> device
13619
13620Gets the current device of the generator.
13621
13622Example::
13623
13624    >>> g_cpu = torch.Generator()
13625    >>> g_cpu.device
13626    device(type='cpu')
13627""",
13628)
13629
13630add_docstr(
13631    torch._assert_async,
13632    r"""
13633_assert_async(tensor) -> void
13634
13635Asynchronously assert that the contents of tensor are nonzero.  For CPU tensors,
13636this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for
13637CUDA tensors, we DO NOT synchronize and you may only find out the assertion
13638failed at a later CUDA kernel launch.  Asynchronous assertion can be helpful for
13639testing invariants in CUDA tensors without giving up performance.  This function
13640is NOT intended to be used for regular error checking, as it will trash your CUDA
13641context if the assert fails (forcing you to restart your PyTorch process.)
13642
13643Args:
13644    tensor (Tensor): a one element tensor to test to see if it is nonzero.  Zero
13645        elements (including False for boolean tensors) cause an assertion failure
13646        to be raised.
13647""",
13648)
13649
13650add_docstr(
13651    torch.searchsorted,
13652    r"""
13653searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor
13654
13655Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the
13656corresponding values in :attr:`values` were inserted before the indices, when sorted, the order
13657of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved.
13658Return a new tensor with the same size as :attr:`values`. More formally,
13659the returned index satisfies the following rules:
13660
13661.. list-table::
13662   :widths: 12 10 78
13663   :header-rows: 1
13664
13665   * - :attr:`sorted_sequence`
13666     - :attr:`right`
13667     - *returned index satisfies*
13668   * - 1-D
13669     - False
13670     - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]``
13671   * - 1-D
13672     - True
13673     - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]``
13674   * - N-D
13675     - False
13676     - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]``
13677   * - N-D
13678     - True
13679     - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]``
13680
13681Args:
13682    sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost*
13683                              dimension unless :attr:`sorter` is provided, in which case the sequence does not
13684                              need to be sorted
13685    values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
13686
13687Keyword args:
13688    out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
13689                                Default value is False, i.e. default output data type is torch.int64.
13690    right (bool, optional): if False, return the first suitable location that is found. If True, return the
13691                            last such index. If no suitable index found, return 0 for non-numerical value
13692                            (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence`
13693                            (one pass the last index of the *innermost* dimension). In other words, if False,
13694                            gets the lower bound index for each value in :attr:`values` on the corresponding
13695                            *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper
13696                            bound index instead. Default value is False. :attr:`side` does the same and is
13697                            preferred. It will error if :attr:`side` is set to "left" while this is True.
13698    side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right`
13699                            and "right" corresponds to True for :attr:`right`. It will error if this is set to
13700                            "left" while :attr:`right` is True. Default value is None.
13701    out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided.
13702    sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted
13703                            :attr:`sorted_sequence` containing a sequence of indices that sort it in the
13704                            ascending order on the innermost dimension
13705
13706
13707Example::
13708
13709    >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
13710    >>> sorted_sequence
13711    tensor([[ 1,  3,  5,  7,  9],
13712            [ 2,  4,  6,  8, 10]])
13713    >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]])
13714    >>> values
13715    tensor([[3, 6, 9],
13716            [3, 6, 9]])
13717    >>> torch.searchsorted(sorted_sequence, values)
13718    tensor([[1, 3, 4],
13719            [1, 2, 4]])
13720    >>> torch.searchsorted(sorted_sequence, values, side='right')
13721    tensor([[2, 3, 5],
13722            [1, 3, 4]])
13723
13724    >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
13725    >>> sorted_sequence_1d
13726    tensor([1, 3, 5, 7, 9])
13727    >>> torch.searchsorted(sorted_sequence_1d, values)
13728    tensor([[1, 3, 4],
13729            [1, 3, 4]])
13730""",
13731)
13732
13733add_docstr(
13734    torch.bucketize,
13735    r"""
13736bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor
13737
13738Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the
13739boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size
13740as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that
13741this behavior is opposite the behavior of
13742`numpy.digitize <https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html>`_.
13743More formally, the returned index satisfies the following rules:
13744
13745.. list-table::
13746   :widths: 15 85
13747   :header-rows: 1
13748
13749   * - :attr:`right`
13750     - *returned index satisfies*
13751   * - False
13752     - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]``
13753   * - True
13754     - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]``
13755
13756Args:
13757    input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
13758    boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined.
13759
13760Keyword args:
13761    out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
13762                                Default value is False, i.e. default output data type is torch.int64.
13763    right (bool, optional): if False, return the first suitable location that is found. If True, return the
13764                            last such index. If no suitable index found, return 0 for non-numerical value
13765                            (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index).
13766                            In other words, if False, gets the lower bound index for each value in :attr:`input`
13767                            from :attr:`boundaries`. If True, gets the upper bound index instead.
13768                            Default value is False.
13769    out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided.
13770
13771
13772Example::
13773
13774    >>> boundaries = torch.tensor([1, 3, 5, 7, 9])
13775    >>> boundaries
13776    tensor([1, 3, 5, 7, 9])
13777    >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]])
13778    >>> v
13779    tensor([[3, 6, 9],
13780            [3, 6, 9]])
13781    >>> torch.bucketize(v, boundaries)
13782    tensor([[1, 3, 4],
13783            [1, 3, 4]])
13784    >>> torch.bucketize(v, boundaries, right=True)
13785    tensor([[2, 3, 5],
13786            [2, 3, 5]])
13787""",
13788)
13789
13790add_docstr(
13791    torch.view_as_real_copy,
13792    r"""
13793Performs the same operation as :func:`torch.view_as_real`, but all output tensors
13794are freshly created instead of aliasing the input.
13795""",
13796)
13797
13798add_docstr(
13799    torch.view_as_complex_copy,
13800    r"""
13801Performs the same operation as :func:`torch.view_as_complex`, but all output tensors
13802are freshly created instead of aliasing the input.
13803""",
13804)
13805
13806add_docstr(
13807    torch.as_strided_copy,
13808    r"""
13809Performs the same operation as :func:`torch.as_strided`, but all output tensors
13810are freshly created instead of aliasing the input.
13811""",
13812)
13813
13814add_docstr(
13815    torch.diagonal_copy,
13816    r"""
13817Performs the same operation as :func:`torch.diagonal`, but all output tensors
13818are freshly created instead of aliasing the input.
13819""",
13820)
13821
13822add_docstr(
13823    torch.expand_copy,
13824    r"""
13825Performs the same operation as :func:`torch.expand`, but all output tensors
13826are freshly created instead of aliasing the input.
13827""",
13828)
13829
13830add_docstr(
13831    torch.permute_copy,
13832    r"""
13833Performs the same operation as :func:`torch.permute`, but all output tensors
13834are freshly created instead of aliasing the input.
13835""",
13836)
13837
13838add_docstr(
13839    torch.select_copy,
13840    r"""
13841Performs the same operation as :func:`torch.select`, but all output tensors
13842are freshly created instead of aliasing the input.
13843""",
13844)
13845
13846add_docstr(
13847    torch.detach_copy,
13848    r"""
13849Performs the same operation as :func:`torch.detach`, but all output tensors
13850are freshly created instead of aliasing the input.
13851""",
13852)
13853
13854add_docstr(
13855    torch.slice_copy,
13856    r"""
13857Performs the same operation as :func:`torch.slice`, but all output tensors
13858are freshly created instead of aliasing the input.
13859""",
13860)
13861
13862add_docstr(
13863    torch.split_copy,
13864    r"""
13865Performs the same operation as :func:`torch.split`, but all output tensors
13866are freshly created instead of aliasing the input.
13867""",
13868)
13869
13870add_docstr(
13871    torch.split_with_sizes_copy,
13872    r"""
13873Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors
13874are freshly created instead of aliasing the input.
13875""",
13876)
13877
13878add_docstr(
13879    torch.squeeze_copy,
13880    r"""
13881Performs the same operation as :func:`torch.squeeze`, but all output tensors
13882are freshly created instead of aliasing the input.
13883""",
13884)
13885
13886add_docstr(
13887    torch.t_copy,
13888    r"""
13889Performs the same operation as :func:`torch.t`, but all output tensors
13890are freshly created instead of aliasing the input.
13891""",
13892)
13893
13894add_docstr(
13895    torch.transpose_copy,
13896    r"""
13897Performs the same operation as :func:`torch.transpose`, but all output tensors
13898are freshly created instead of aliasing the input.
13899""",
13900)
13901
13902add_docstr(
13903    torch.unsqueeze_copy,
13904    r"""
13905Performs the same operation as :func:`torch.unsqueeze`, but all output tensors
13906are freshly created instead of aliasing the input.
13907""",
13908)
13909
13910add_docstr(
13911    torch.indices_copy,
13912    r"""
13913Performs the same operation as :func:`torch.indices`, but all output tensors
13914are freshly created instead of aliasing the input.
13915""",
13916)
13917
13918add_docstr(
13919    torch.values_copy,
13920    r"""
13921Performs the same operation as :func:`torch.values`, but all output tensors
13922are freshly created instead of aliasing the input.
13923""",
13924)
13925
13926add_docstr(
13927    torch.crow_indices_copy,
13928    r"""
13929Performs the same operation as :func:`torch.crow_indices`, but all output tensors
13930are freshly created instead of aliasing the input.
13931""",
13932)
13933
13934add_docstr(
13935    torch.col_indices_copy,
13936    r"""
13937Performs the same operation as :func:`torch.col_indices`, but all output tensors
13938are freshly created instead of aliasing the input.
13939""",
13940)
13941
13942add_docstr(
13943    torch.unbind_copy,
13944    r"""
13945Performs the same operation as :func:`torch.unbind`, but all output tensors
13946are freshly created instead of aliasing the input.
13947""",
13948)
13949
13950add_docstr(
13951    torch.view_copy,
13952    r"""
13953Performs the same operation as :func:`torch.view`, but all output tensors
13954are freshly created instead of aliasing the input.
13955""",
13956)
13957
13958add_docstr(
13959    torch.unfold_copy,
13960    r"""
13961Performs the same operation as :func:`torch.unfold`, but all output tensors
13962are freshly created instead of aliasing the input.
13963""",
13964)
13965
13966add_docstr(
13967    torch.alias_copy,
13968    r"""
13969Performs the same operation as :func:`torch.alias`, but all output tensors
13970are freshly created instead of aliasing the input.
13971""",
13972)
13973
13974for unary_base_func_name in (
13975    "exp",
13976    "sqrt",
13977    "abs",
13978    "acos",
13979    "asin",
13980    "atan",
13981    "ceil",
13982    "cos",
13983    "cosh",
13984    "erf",
13985    "erfc",
13986    "expm1",
13987    "floor",
13988    "log",
13989    "log10",
13990    "log1p",
13991    "log2",
13992    "neg",
13993    "tan",
13994    "tanh",
13995    "sin",
13996    "sinh",
13997    "round",
13998    "lgamma",
13999    "frac",
14000    "reciprocal",
14001    "sigmoid",
14002    "trunc",
14003    "zero",
14004):
14005    unary_foreach_func_name = f"_foreach_{unary_base_func_name}"
14006    if hasattr(torch, unary_foreach_func_name):
14007        add_docstr(
14008            getattr(torch, unary_foreach_func_name),
14009            rf"""
14010{unary_foreach_func_name}(self: List[Tensor]) -> List[Tensor]
14011
14012Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list.
14013            """,
14014        )
14015    unary_inplace_foreach_func_name = f"{unary_foreach_func_name}_"
14016    if hasattr(torch, unary_inplace_foreach_func_name):
14017        add_docstr(
14018            getattr(torch, unary_inplace_foreach_func_name),
14019            rf"""
14020{unary_inplace_foreach_func_name}(self: List[Tensor]) -> None
14021
14022Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list.
14023        """,
14024        )
14025