Home
last modified time | relevance | path

Searched full:cumsum (Results 1 – 25 of 346) sorted by relevance

12345678910>>...14

/aosp_15_r20/external/tensorflow/tensorflow/lite/delegates/gpu/common/tasks/
H A Dcumsum.h29 class Cumsum : public GPUOperation {
31 Cumsum() = default;
32 explicit Cumsum(const OperationDef& definition, Axis axis) in Cumsum() function
37 Cumsum(Cumsum&& operation);
38 Cumsum& operator=(Cumsum&& operation);
39 Cumsum(const Cumsum&) = delete;
40 Cumsum& operator=(const Cumsum&) = delete;
47 Cumsum CreateCumsum(const OperationDef& definition,
H A Dcumsum.cc15 #include "tensorflow/lite/delegates/gpu/common/tasks/cumsum.h"
26 void Cumsum::GetCumsumCode(const OperationDef& op_def) { in GetCumsumCode()
87 int3 Cumsum::GetGridSize() const { in GetGridSize()
99 Cumsum::Cumsum(Cumsum&& operation) in Cumsum() function in tflite::gpu::Cumsum
102 Cumsum& Cumsum::operator=(Cumsum&& operation) { in operator =()
110 Cumsum CreateCumsum(const OperationDef& definition, in CreateCumsum()
112 Cumsum op(definition, attr.axis); in CreateCumsum()
/aosp_15_r20/external/tensorflow/tensorflow/core/api_def/base_api/
H A Dapi_def_Cumsum.pbtxt2 graph_op_name: "Cumsum"
21 If `True`, perform exclusive cumsum.
32 By default, this op performs an inclusive cumsum, which means that the first
36 tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
39 By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
43 tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
46 By setting the `reverse` kwarg to `True`, the cumsum is performed in the
50 tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
58 tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
/aosp_15_r20/external/tensorflow/tensorflow/lite/kernels/
H A Dcumsum.cc27 namespace cumsum { namespace
73 optimized_ops::CumSum(GetTensorData<int>(input), GetTensorShape(input), in Eval()
79 optimized_ops::CumSum(GetTensorData<int64_t>(input), in Eval()
85 optimized_ops::CumSum(GetTensorData<float>(input), GetTensorShape(input), in Eval()
93 "Unsupported input type, cumsum only supports int32 & float32."); in Eval()
101 } // namespace cumsum
104 static TfLiteRegistration r = {nullptr, nullptr, cumsum::Prepare, in Register_CUMSUM()
105 cumsum::Eval}; in Register_CUMSUM()
/aosp_15_r20/external/tensorflow/tensorflow/dtensor/mlir/expansions/
H A Dcumsum_spmd_expander.cc37 // Extract `axis` tensor from Cumsum op and return it's positive value, since
40 auto cumsum = llvm::dyn_cast<mlir::TF::CumsumOp>(op); in GetAxisDimension() local
41 if (cumsum == nullptr) { in GetAxisDimension()
43 absl::StrCat("Expected Cumsum op but got : ", OpName(op)).c_str()); in GetAxisDimension()
46 ExtractConstIntFromValue(cumsum.axis())); in GetAxisDimension()
47 int64_t tensor_rank = ValueRank(cumsum.x()); in GetAxisDimension()
80 "input layout of Cumsum op must be known before SPMD " in ExpandOp()
/aosp_15_r20/external/tensorflow/tensorflow/core/lib/histogram/
H A Dhistogram.cc132 double cumsum = cumsum_prev + buckets_[i]; in Percentile() local
134 // Find the first bucket whose cumsum >= threshold in Percentile()
135 if (cumsum >= threshold) { in Percentile()
136 // Prevent divide by 0 in remap which happens if cumsum == cumsum_prev in Percentile()
137 // This should only get hit when p == 0, cumsum == 0, and cumsum_prev == 0 in Percentile()
138 if (cumsum == cumsum_prev) { in Percentile()
150 double weight = Remap(threshold, cumsum_prev, cumsum, lhs, rhs); in Percentile()
154 cumsum_prev = cumsum; in Percentile()
/aosp_15_r20/external/tensorflow/tensorflow/compiler/tests/
H A Dscan_ops_test.py56 if func == np.cumsum:
79 np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
82 tf_out = math_ops.cumsum(p, axis, exclusive, reverse).eval(
105 math_ops.cumsum(p, axis).eval(feed_dict={p: x})
133 y = math_ops.cumsum(
147 math_ops.cumsum(input_tensor, -3).eval()
151 math_ops.cumsum(input_tensor, 2).eval()
155 math_ops.cumsum(input_tensor, [0]).eval()
/aosp_15_r20/external/eigen/unsupported/test/
H A Dcxx11_tensor_scan.cpp23 Tensor<Type, 1, DataLayout> result = tensor.cumsum(0, Exclusive); in test_1d_scan()
60 result = tensor.cumsum(0); in test_4d_scan()
66 result = tensor.cumsum(1); in test_4d_scan()
72 result = tensor.cumsum(2); in test_4d_scan()
78 result = tensor.cumsum(3); in test_4d_scan()
92 Tensor<int, 1, DataLayout> result = tensor_map.cumsum(0); in test_tensor_maps()
/aosp_15_r20/external/tensorflow/tensorflow/python/kernel_tests/control_flow/
H A Dscan_ops_test.py54 if func == np.cumsum:
77 np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
79 tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval()
102 tf_out = math_ops.cumsum(x, axis).eval()
155 math_ops.cumsum(input_tensor, -3).eval()
159 math_ops.cumsum(input_tensor, 2).eval()
163 math_ops.cumsum(input_tensor, [0]).eval()
169 result = math_ops.cumsum(t, axis, exclusive, reverse)
/aosp_15_r20/external/aac/libSBRenc/src/
H A Dsbrenc_freq_sca.cpp122 static void cumSum(INT start_value, INT *diff, INT length, UCHAR *start_adress);
472 cumSum(k0, diff0, num_bands0, v_k_master); /* cumsum */ in FDKsbrEnc_UpdateFreqScale()
482 cumSum(k1, diff1, num_bands1, &v_k_master[num_bands0]); in FDKsbrEnc_UpdateFreqScale()
500 cumSum(k0, diff0, num_bands0, v_k_master); /* cumsum */ in FDKsbrEnc_UpdateFreqScale()
540 cumSum(k0, diff_tot, num_bands0, v_k_master); /* cumsum */ in FDKsbrEnc_UpdateFreqScale()
582 static void cumSum(INT start_value, INT *diff, INT length, in cumSum() function
588 } /* End cumSum */ in cumSum()
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/ops/
H A Dcumsum.cpp87 Tensor cumsum( in cumsum() function
93 "Vulkan cumsum expects 1 <= input dimension <= 4, Tensor input dimensions ", in cumsum()
98 "cumsum dim input was ", in cumsum()
194 m.impl(TORCH_SELECTIVE_NAME("aten::cumsum"), TORCH_FN(cumsum)); in TORCH_LIBRARY_IMPL()
/aosp_15_r20/external/fonttools/Lib/fontTools/cffLib/
H A Dwidth.py24 def cumSum(f, op=add, start=0, decreasing=False): function
114 cumFrqU = cumSum(widths, op=add)
115 cumMaxU = cumSum(widths, op=max)
116 cumFrqD = cumSum(widths, op=add, decreasing=True)
117 cumMaxD = cumSum(widths, op=max, decreasing=True)
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DRepeat.h30 Tensor cumsum = repeats.cumsum(0); in repeat_interleave_common() local
35 total = cumsum[-1].item<int64_t>(); in repeat_interleave_common()
42 const int64_t* cumsum_ptr = cumsum.const_data_ptr<int64_t>(); in repeat_interleave_common()
H A DReduceOps.cpp63 #include <ATen/ops/cumsum.h>
266 TORCH_META_FUNC(cumsum) in TORCH_META_FUNC() argument
268 meta_func_cum_ops(*this, "cumsum", self, dim, dtype); in TORCH_META_FUNC()
508 return w.flip(dim).cumsum(dim).flip(dim); in reversed_cumsum()
634 // We do a cumsum of the zeros along the dimension. in cumprod_backward()
636 // we would have cumsum = [0, 1, 1, 2, 2] in cumprod_backward()
639 // cumsum == 0 in cumprod_backward()
642 // indices = (cumsum == 1).max(dim, keepdim=True).indices in cumprod_backward()
644 // zeros_like(indices).scatter_(dim, indices, 1.) & cumsum == 1 in cumprod_backward()
645 // Note that the logic_and with cumsum == 1 accounts in cumprod_backward()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/
H A Dscan_ops.cc120 // Register Cumsum kernels
123 Name("Cumsum") \
129 Name("Cumsum") \
140 Name("Cumsum") \
147 Name("Cumsum") \
H A Dscan_ops_test.cc31 test::graph::Cumsum(g, test::graph::Constant(g, data), in LargeOneDCumsum()
42 test::graph::Cumsum(g, test::graph::Constant(g, data), in ColCumsum()
53 test::graph::Cumsum(g, test::graph::Constant(g, data), in RowCumsum()
64 test::graph::Cumsum(g, test::graph::Constant(g, data), in ThreeDYCumsum()
/aosp_15_r20/external/tensorflow/tensorflow/lite/testing/op_tests/
H A Dcumsum.py15 """Test configs for cumsum."""
24 """Make a set of tests to do cumsum."""
35 """Build the cumsum op testing graph."""
38 out = tf.math.cumsum(
/aosp_15_r20/external/tensorflow/tensorflow/lite/experimental/mlir/testing/op_tests/
H A Dcumsum.py15 """Test configs for cumsum."""
28 """Make a set of tests to do cumsum."""
39 """Build the cumsum op testing graph."""
42 out = tf.math.cumsum(
/aosp_15_r20/external/tensorflow/tensorflow/core/ops/compat/ops_history_v1/
H A DCumsum.pbtxt2 name: "Cumsum"
66 name: "Cumsum"
132 name: "Cumsum"
199 name: "Cumsum"
/aosp_15_r20/external/tensorflow/tensorflow/core/ops/compat/ops_history_v2/
H A DCumsum.pbtxt2 name: "Cumsum"
66 name: "Cumsum"
132 name: "Cumsum"
199 name: "Cumsum"
/aosp_15_r20/external/aac/libSBRdec/src/
H A Dsbrdec_freq_sca.cpp122 static void cumSum(UCHAR start_value, UCHAR *diff, UCHAR length,
242 cumSum(stopMin, diff0, 13, diff1); in getStopBand()
379 cumSum(k0, diff0, num_bands0, v_k_master); in sbrdecUpdateFreqScale()
391 cumSum(k1, diff1, num_bands1, &v_k_master[num_bands0]); in sbrdecUpdateFreqScale()
407 cumSum(k0, diff0, num_bands0, v_k_master); in sbrdecUpdateFreqScale()
451 cumSum(k0, diff_tot, num_bands0, v_k_master); /* cumsum */ in sbrdecUpdateFreqScale()
614 static void cumSum(UCHAR start_value, UCHAR *diff, UCHAR length, in cumSum() function
/aosp_15_r20/external/pytorch/test/dynamo/
H A Dtest_higher_order_ops.py2965 cumsum: "i64[1]" = tensor.cumsum(dim = 0); tensor = None
2966 getitem: "i64[0]" = cumsum[slice(None, -1, None)]; cumsum = None
3014 cumsum_1: "i64[1]" = tensor_1.cumsum(dim = 0); tensor_1 = None
3097 cumsum: "i64[1]" = tensor.cumsum(dim = 0); tensor = None
3098 getitem: "i64[0]" = cumsum[slice(None, -1, None)]; cumsum = None
3148 cumsum_1: "i64[1]" = tensor_1.cumsum(dim = 0); tensor_1 = None
3274 cumsum: "i64[1]" = tensor.cumsum(dim = 0); tensor = None
3275 getitem: "i64[0]" = cumsum[slice(None, -1, None)]; cumsum = None
3354 cumsum: "i64[1]" = tensor.cumsum(dim = 0); tensor = None
3355 getitem: "i64[0]" = cumsum[slice(None, -1, None)]; cumsum = None
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/utils/
H A Dmetrics_utils.py307 which is the reversed cumulative sum in tf.cumsum().
341 true_positive = tf.math.cumsum(tp_bucket_value, reverse=True)
440 math_ops.cumsum(tp_bucket_v, reverse=True, axis=1))
442 math_ops.cumsum(fp_bucket_v, reverse=True, axis=1))
450 tp = math_ops.cumsum(tp_bucket_v, reverse=True)
451 fp = math_ops.cumsum(fp_bucket_v, reverse=True)
648 # for Brella release to pick up the new op tf.math.cumsum with float32.
/aosp_15_r20/external/pytorch/torch/masked/
H A D_ops.py170 cumsum=(("dim__as_int",), ("dtype=None", "mask=None")),
249 cumsum="""\
251 of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
255 of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
279 cumsum="cumulative_sum",
417 if op_name in {"sum", "cumsum"}:
788 torch.cumsum(torch.diff(crow_indices) != 0, 0),
972 "cumsum",
1169 def cumsum( function
1181 return torch.cumsum(mask_input, dim_, dtype=dtype).to(dtype=dtype)
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/ops/ragged/
H A Dragged_math_ops.py243 math_ops.cumsum(output_row_lengths)
1181 """Calculate flat_values for math_ops.cumsum when axis==ragged_rank."""
1190 new_flat_values = math_ops.cumsum(flat_values, exclusive=True, reverse=True)
1198 new_flat_values = math_ops.cumsum(flat_values, exclusive=True)
1204 @dispatch.dispatch_for_api(math_ops.cumsum)
1210 """Calculate math_ops.cumsum for a RaggedTensor.
1215 dense_result=tf.math.cumsum(rt.to_tensor(), axis=axis, exclusive=exclusive,
1241 math_ops.cumsum, axis=new_axis, exclusive=exclusive, reverse=reverse)
1245 result = math_ops.cumsum(

12345678910>>...14