Home
last modified time | relevance | path

Searched full:mps (Results 1 – 25 of 911) sorted by relevance

12345678910>>...37

/aosp_15_r20/external/pytorch/test/
H A Dtest_mps.py1 # Owner(s): ["module: mps"]
29 import torch.backends.mps
171 …ss for `sort` both are used (values and indices), thus resulting in a issmatch between CPU and MPS.
190 …ss for `sort` both are used (values and indices), thus resulting in a issmatch between CPU and MPS.
196 # Failures due to lack of implementation of downstream functions on MPS backend
200 # Exception: Caused by sample input at index 3 on MPS
225 if key in MACOS_12_3_XFAILLIST_GRAD and (not torch.backends.mps.is_macos13_or_newer()):
230 …if key in MACOS_BEFORE_13_3_XFAILLIST_GRAD and (torch.backends.mps.is_macos13_or_newer() and produ…
601 # inconsistency errors between cpu and mps, max seen atol is 2
616 # - MPS output: tensor([2546, 6917, 3181, ..., 7128, 30, 5133], device='mps:0')
[all …]
/aosp_15_r20/external/openthread/third_party/mbedtls/repo/tests/suites/
H A Dtest_suite_mps.data1 MPS Reader: Single step, single round, pausing disabled
4 MPS Reader: Single step, single round, pausing enabled but unused
7 MPS Reader: Single step, multiple rounds, pausing disabled
10 MPS Reader: Single step, multiple rounds, pausing enabled but unused
13 MPS Reader: Multiple steps, single round, pausing disabled
16 MPS Reader: Multiple steps, single round, pausing enabled but unused
19 MPS Reader: Multiple steps, multiple rounds, pausing disabled
22 MPS Reader: Multiple steps, multiple rounds, pausing enabled but unused
25 MPS Reader: Pausing needed but disabled
28 MPS Reader: Pausing needed + enabled, but buffer too small
[all …]
/aosp_15_r20/external/mbedtls/tests/suites/
H A Dtest_suite_mps.data1 MPS Reader: Single step, single round, pausing disabled
4 MPS Reader: Single step, single round, pausing enabled but unused
7 MPS Reader: Single step, multiple rounds, pausing disabled
10 MPS Reader: Single step, multiple rounds, pausing enabled but unused
13 MPS Reader: Multiple steps, single round, pausing disabled
16 MPS Reader: Multiple steps, single round, pausing enabled but unused
19 MPS Reader: Multiple steps, multiple rounds, pausing disabled
22 MPS Reader: Multiple steps, multiple rounds, pausing enabled but unused
25 MPS Reader: Pausing needed but disabled
28 MPS Reader: Pausing needed + enabled, but buffer too small
[all …]
/aosp_15_r20/external/executorch/backends/apple/mps/
H A Dsetup.md1 # Building and Running ExecuTorch with MPS Backend
3 In this tutorial we will walk you through the process of getting setup to build the MPS backend for…
5MPS backend device maps machine learning computational graphs and primitives on the [MPS Graph](ht…
10 … [MobileNet V3](https://pytorch.org/vision/main/models/mobilenetv3.html) model to the MPS delegate.
11 * You will also learn how to compile and deploy the ExecuTorch runtime with the MPS delegate on mac…
26 In order to be able to successfully build and run a model using the MPS backend for ExecuTorch, you…
45 ***Step 2.*** Install dependencies needed to lower MPS delegate:
48 ./backends/apple/mps/install_requirements.sh
55 **Compiling model for MPS delegate**:
56 …obileNetV3 model to the MPS delegate. You'll then pass this Program (the `.pte` file) during the r…
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/mps/
H A DMPSHooks.mm3 #include <ATen/mps/MPSAllocatorInterface.h>
4 #include <ATen/mps/MPSDevice.h>
5 #include <ATen/mps/MPSGeneratorImpl.h>
6 #include <ATen/mps/MPSHooks.h>
7 #include <ATen/mps/MPSProfiler.h>
8 #include <ATen/mps/MPSStream.h>
11 namespace at::mps {
14 C10_LOG_API_USAGE_ONCE("aten.init.mps");
15 // TODO: initialize MPS devices and streams here
19 return at::mps::is_available();
[all …]
H A DMPSGuardImpl.h8 #include <ATen/mps/MPSStream.h>
9 #include <ATen/mps/MPSEvent.h>
27 namespace at::mps {
34 static constexpr c10::DeviceType static_type = c10::DeviceType::MPS;
39 TORCH_INTERNAL_ASSERT(t == c10::DeviceType::MPS); in MPSGuardImpl()
44 return c10::DeviceType::MPS; in type()
48 return Device(c10::DeviceType::MPS, 0); in exchangeDevice()
52 return Device(c10::DeviceType::MPS, 0); in getDevice()
56 return Device(c10::DeviceType::MPS, 0); in uncheckedGetDevice()
68 return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0)); in getStream()
[all …]
H A DEmptyTensor.cpp7 #include <ATen/mps/EmptyTensor.h>
8 #include <ATen/mps/MPSDevice.h>
11 #include <ATen/native/mps/Copy.h>
13 #define MPS_ERROR_NOT_COMPILED "PyTorch code is not compiled with MPS enabled"
15 "The MPS backend is supported on MacOS 12.3+.", \
17 #define MPS_ERROR_DOUBLE_NOT_SUPPORTED "Cannot convert a MPS Tensor to float64 dtype " \
18 "as the MPS framework doesn't support float64. Please use float32 instead."
32 TORCH_INTERNAL_ASSERT_DEBUG_ONLY(device.type() == DeviceType::MPS); in empty_mps()
36 "only strided tensors are supported on MPS"); in empty_mps()
38 …TORCH_CHECK(size.size() <= 16, "MPS supports tensors with dimensions <= 16, but got ", size.size()… in empty_mps()
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/mps/operations/
H A DBinaryOps.mm6 #include <ATen/native/mps/OperationUtils.h>
7 #include <ATen/native/mps/operations/BinaryKernel.h>
44 namespace mps {
54 …MPSGraphTensor*(mps::BinaryOpCachedGraph * graph, MPSGraphTensor * primary, MPSGraphTensor * secon…
75 "MPS: ",
78 TORCH_CHECK_TYPE(!isComplexType(self.scalar_type()) || mps::supportsComplex(),
206 …TORCH_CHECK(self.scalar_type() != ScalarType::Half, "MPS: does not support trunc_divide op with fl…
312 } // namespace mps
316mps::binaryOp##other_type( \
325 … WithPrimaryTensor:mps::castMPSTensor(mpsGraph, primaryCastTensor, ScalarType::Bool) \
[all …]
H A DUnaryOps.mm3 #include <ATen/native/mps/Copy.h>
4 #include <ATen/native/mps/MPSGraphSonomaOps.h>
5 #include <ATen/native/mps/MPSGraphVenturaOps.h>
6 #include <ATen/native/mps/OperationUtils.h>
67 namespace mps {
102 mps::mps_copy_(self_, self, false);
167 } // namespace mps
170mps::unary_op(self, output, "trunc_out_mps", ^MPSGraphTensor*(MPSGraph* mpsGraph, MPSGraphTensor* …
171 return mps::trunc_tensor(mpsGraph, inputTensor);
176mps::unary_op(self, output, "signbit_out_mps", ^MPSGraphTensor*(MPSGraph* mpsGraph, MPSGraphTensor…
[all …]
H A DLinearAlgebra.mm4 #include <ATen/mps/MPSProfiler.h>
8 #include <ATen/native/mps/MPSGraphSequoiaOps.h>
9 #include <ATen/native/mps/MPSGraphSonomaOps.h>
10 #include <ATen/native/mps/OperationUtils.h>
31 namespace mps {
87 …auto matmulPSO = lib.getPipelineStateForFunc("naive_matmul_" + mps::scalarToMetalTypeString(output…
139 using namespace mps;
142 "linalg.lu_factor(): MPS doesn't support complex types.");
143 TORCH_CHECK(pivot, "linalg.lu_factor(): MPS doesn't allow pivot == False.");
249 using namespace mps;
[all …]
H A DDistributions.mm4 #include <ATen/mps/MPSGeneratorImpl.h>
8 #include <ATen/native/mps/MPSGraphVenturaOps.h>
9 #include <ATen/native/mps/OperationUtils.h>
31 namespace mps {
44 #define RandomOpFn(graph, randomTensor) MPSGraphTensor*(mps::RandomCachedGraph * graph, MPSGraphTen…
61 …auto mps_gen = get_generator_or_default<MPSGeneratorImpl>(gen, at::mps::detail::getDefaultMPSGener…
69 … mpsGraphRankedPlaceHolder(mpsGraph, MPSDataTypeInt32, @[ @(at::mps::detail::PHILOX_STATE_N) ]);
71 // FP16, FP32 and Int32 are the only data types supported for distributions on MPS backend.
103 // results will be cast if self's scalar type isn't directly supported by MPS backend.
109 …[MPSNDArrayDescriptor descriptorWithDataType:MPSDataTypeInt32 shape:@[ @(at::mps::detail::PHILOX_S…
[all …]
H A DIndexing.mm3 #include <ATen/native/mps/OperationUtils.h>
11 #include <ATen/mps/MPSAllocatorInterface.h>
12 #include <ATen/mps/MPSProfiler.h>
18 #include <ATen/native/mps/MPSGraphVenturaOps.h>
19 #include <ATen/native/mps/operations/Indexing.h>
48 namespace mps {
80 using namespace mps;
116 // this function call is a no-op if MPS Profiler is not enabled
154 using namespace mps;
157 …TORCH_CHECK(num_indices <= 16, "Current limit allows up to 16 indices to be used in MPS indexing k…
[all …]
H A DConvolution.mm4 #include <ATen/native/mps/MPSGraphVenturaOps.h>
5 #include <ATen/native/mps/OperationUtils.h>
135 "Conv3D is only supported on MPS for MacOS_13_2 or newer");
140 using namespace at::native::mps;
168 // TODO: MPS convolution kernel currently does not support output channels > 2^16
172 "Output channels > 65536 not supported at the MPS device. ",
175 "on MPS.");
189 auto stream = at::mps::getCurrentMPSStream();
221mps::getTensorsStringKey({input_t, weight_t}) + ":" + std::to_string(bias_defined) + ":" + bias_sh…
227mps::getTensorsStringKey({input_t, weight_t}) + ":" + std::to_string(bias_defined) + ":" + bias_sh…
[all …]
H A DPooling.mm4 #include <ATen/native/mps/OperationUtils.h>
21 namespace mps {
33 #define PoolingOpFn(graph, desc) MPSGraphTensor*(mps::PoolingCachedGraph & graph, MPSGraphPooling2D…
137 // workaround for issue #103039644: mismatching MPS vs. CPU results
190 …// MPS TODO: Using strided API causes invalid indices to be generated if the original format is NH…
263 TORCH_WARN_ONCE("MPS: passing divisor to Average Pooling op with int64 input is ",
264 "not supported on MPS backend. ",
282 mps::PoolingOpBlock pooling_op_block = ^PoolingOpFn(cachedGraph, desc) {
288 // workaround for issue #103039644: mismatching MPS vs. CPU results
313 // workaround: custom divisor isn't supported by MPS backend, so we scale manually
[all …]
H A DPad.mm3 #include <ATen/native/mps/OperationUtils.h>
25 namespace mps {
317 } // namespace mps
322 mps::pad_out_template(const_cast<Tensor&>(output),
334 mps::pad_out_template(const_cast<Tensor&>(grad_input),
345 mps::pad_out_template(const_cast<Tensor&>(output),
357 mps::pad_out_template(const_cast<Tensor&>(grad_input),
368 …return mps::pad_out_template(output, input, padding, std::nullopt, MPSGraphPaddingModeReflect, 0.0…
373 …return mps::pad_out_template(output, input, padding, std::nullopt, MPSGraphPaddingModeReflect, 0.0…
381 …return mps::pad_out_template(grad_input, input, padding, grad_output, MPSGraphPaddingModeReflect, …
[all …]
H A DUpSample.mm4 #include <ATen/native/mps/MPSGraphVenturaOps.h>
5 #include <ATen/native/mps/OperationUtils.h>
37 namespace mps {
219 } // namespace mps
223mps::upsample_out_template(input, output_size, std::nullopt, std::nullopt, scale, output, false, "…
232mps::upsample_out_template(grad_output, output_size, input_size, std::nullopt, scale, grad_input, …
237mps::upsample_out_template(input, output_size, std::nullopt, std::nullopt, scale, output, false, "…
246 mps::upsample_out_template(
256mps::upsample_out_template(input, output_size, std::nullopt, scales_h, scales_w, output, false, "n…
266mps::upsample_out_template(grad_output, output_size, input_size, scales_h, scales_w, grad_input, f…
[all …]
/aosp_15_r20/external/executorch/examples/apple/mps/
H A DREADME.md3 # MPS Backend
5 [MPS](https://developer.apple.com/documentation/metalperformanceshaders) is a framework of highly o…
6 **MPS** backend takes advantage of [MPSGraph](https://developer.apple.com/documentation/metalperfor…
12 - [Setting up MPS backend](../../../backends/apple/mps/setup.md).
14 ## Delegation to MPS backend
16 The following command will lower the EdgeIR to MPS delegate:
20 python3 -m examples.apple.mps.scripts.mps_example --model_name="mv2" --bundled
22 To see all the options when exporting a model to MPS delegate, use the following command:
24 python3 -m examples.apple.mps.scripts.mps_example --help
41 rm -rf cmake-out/examples/apple/mps
[all …]
/aosp_15_r20/external/pytorch/docs/source/notes/
H A Dmps.rst3 MPS backend
6 :mod:`mps` device enables high-performance
12 The new MPS backend extends the PyTorch ecosystem and provides existing scripts
15 To get started, simply move your Tensor and Module to the ``mps`` device:
19 # Check that MPS is available
20 if not torch.backends.mps.is_available():
21 if not torch.backends.mps.is_built():
22 print("MPS not available because the current PyTorch install was not "
23 "built with MPS enabled.")
25 print("MPS not available because the current MacOS version is not 12.3+ "
[all …]
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A Dnative_functions.yaml358 MPS: abs_out_mps
416 CPU, CUDA, MPS, Meta: view_as_real
421 CPU, CUDA, MPS, Meta: view_as_complex
446 MPS: sgn_out_mps
485 MPS: conj_physical_out_mps
526 MPS: acos_out_mps
583 MPS: add_out_mps
641 MPS: addmv_out_mps
649 MPS: addr_mps
660 MPS: addr_out_mps
[all …]
/aosp_15_r20/packages/modules/Bluetooth/system/stack/l2cap/
Dl2c_ble.cc239 uint16_t lcid = 0, rcid = 0, mtu = 0, mps = 0, initial_credit = 0; in l2cble_process_sig_cmd() local
352 STREAM_TO_UINT16(mps, p); in l2cble_process_sig_cmd()
366 "Recv L2CAP_CMD_CREDIT_BASED_CONN_REQ with mtu = {}, mps = {}, " in l2cble_process_sig_cmd()
368 mtu, mps, initial_credit, num_of_channels); in l2cble_process_sig_cmd()
399 if (mtu < L2CAP_CREDIT_BASED_MIN_MTU || mps < L2CAP_CREDIT_BASED_MIN_MPS || in l2cble_process_sig_cmd()
400 mps > L2CAP_LE_MAX_MPS) { in l2cble_process_sig_cmd()
435 temp_p_ccb->peer_conn_cfg.mps = mps; in l2cble_process_sig_cmd()
438 temp_p_ccb->tx_mps = mps; in l2cble_process_sig_cmd()
451 p_ccb->local_conn_cfg.mps = in l2cble_process_sig_cmd()
498 STREAM_TO_UINT16(mps, p); in l2cble_process_sig_cmd()
[all …]
/aosp_15_r20/external/pytorch/torch/mps/
H A D__init__.py3 This package enables an interface for accessing MPS (Metal Performance Shaders) backend in Python.
4 Metal is Apple's API for programming metal GPU (graphics processor unit). Using MPS means that incr…
27 r"""Returns the number of available MPS devices."""
32 r"""Waits for all kernels in all streams on a MPS device to complete."""
36 def get_rng_state(device: Union[int, str, torch.device] = "mps") -> Tensor:
41 Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device).
47 new_state: Tensor, device: Union[int, str, torch.device] = "mps"
54 Default: ``'mps'`` (i.e., ``torch.device('mps')``, the current MPS device).
66 # the torch.mps.manual_seed() can be called from the global
68 # sure mps is available (otherwise we just return without
[all …]
/aosp_15_r20/external/pigweed/pw_bluetooth_sapphire/host/l2cap/
H A Dle_dynamic_channel_test.cc41 LowerBits(payload.mps), in LeConnReq()
42 UpperBits(payload.mps), in LeConnReq()
52 LowerBits(payload.mps), in LeConnRsp()
53 UpperBits(payload.mps), in LeConnRsp()
284 .mps = kMaxInboundPduPayloadSize, in TEST_F()
290 .mps = 0x0032, in TEST_F()
311 kLeConnRspPayload.mps, in TEST_F()
343 .mps = kMaxInboundPduPayloadSize, in TEST_F()
349 .mps = 0x0032, in TEST_F()
370 kLeConnRspPayload.mps, in TEST_F()
[all …]
/aosp_15_r20/external/coreboot/payloads/libpayload/drivers/udc/
H A Ddwc2.c26 uint16_t mps = 0; in get_mps() local
30 switch (depctl.mps) { in get_mps()
32 mps = 64; in get_mps()
35 mps = 32; in get_mps()
38 mps = 16; in get_mps()
41 mps = 8; in get_mps()
44 usb_debug("get mps error\n"); in get_mps()
47 mps = depctl.mps; in get_mps()
50 return mps; in get_mps()
58 uint16_t mps; in dwc2_process_ep() local
[all …]
/aosp_15_r20/external/mbedtls/library/
H A Dmps_common.h9 * \brief Common functions and macros used by MPS
20 * \name SECTION: MPS Configuration
25 /*! This flag controls whether the MPS-internal components
29 * Context: All MPS API functions impose assumptions/preconditions on the
32 * calls to the MPS API which satisfy their preconditions and either succeed,
38 * In addition to state integrity, all MPS structures have a more refined
68 /*! This flag enables/disables assertions on the internal state of MPS.
70 * Assertions are sanity checks that should never trigger when MPS
83 /*! This flag controls whether tracing for MPS should be enabled. */
127 /* \} name SECTION: MPS Configuration */
[all …]
/aosp_15_r20/external/openthread/third_party/mbedtls/repo/library/
H A Dmps_common.h9 * \brief Common functions and macros used by MPS
20 * \name SECTION: MPS Configuration
25 /*! This flag controls whether the MPS-internal components
29 * Context: All MPS API functions impose assumptions/preconditions on the
32 * calls to the MPS API which satisfy their preconditions and either succeed,
38 * In addition to state integrity, all MPS structures have a more refined
68 /*! This flag enables/disables assertions on the internal state of MPS.
70 * Assertions are sanity checks that should never trigger when MPS
83 /*! This flag controls whether tracing for MPS should be enabled. */
127 /* \} name SECTION: MPS Configuration */
[all …]

12345678910>>...37