xref: /aosp_15_r20/external/tensorflow/tensorflow/go/op/wrappers.go (revision b6fb3261f9314811a0f4371741dbb8839866f948)
1// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15// DO NOT EDIT
16// This file was machine generated by github.com/tensorflow/tensorflow/tensorflow/go/genop/internal
17//
18// WARNING: This generation of wrapper function for TensorFlow ops is in an
19// experimental state. The generated API can change without notice.
20
21package op
22
23import tf "github.com/tensorflow/tensorflow/tensorflow/go"
24
25// optionalAttr is an intentionally un-exported type to hide
26// details of how optional attributes to operations are implemented.
27type optionalAttr map[string]interface{}
28
29func makeOutputList(op *tf.Operation, start int, output string) ([]tf.Output, int, error) {
30	size, err := op.OutputListSize(output)
31	if err != nil {
32		return nil, start, err
33	}
34	list := make([]tf.Output, size)
35	for i := 0; i < size; i++ {
36		list[i] = op.Output(start + i)
37	}
38	return list, start + size, nil
39}
40
41// AbortAttr is an optional argument to Abort.
42type AbortAttr func(optionalAttr)
43
44// AbortErrorMsg sets the optional error_msg attribute to value.
45//
46// value: A string which is the message associated with the exception.
47// If not specified, defaults to ""
48func AbortErrorMsg(value string) AbortAttr {
49	return func(m optionalAttr) {
50		m["error_msg"] = value
51	}
52}
53
54// AbortExitWithoutError sets the optional exit_without_error attribute to value.
55// If not specified, defaults to false
56func AbortExitWithoutError(value bool) AbortAttr {
57	return func(m optionalAttr) {
58		m["exit_without_error"] = value
59	}
60}
61
62// Raise a exception to abort the process when called.
63//
64// If exit_without_error is true, the process will exit normally,
65// otherwise it will exit with a SIGABORT signal.
66//
67// Returns nothing but an exception.
68//
69// Returns the created operation.
70func Abort(scope *Scope, optional ...AbortAttr) (o *tf.Operation) {
71	if scope.Err() != nil {
72		return
73	}
74	attrs := map[string]interface{}{}
75	for _, a := range optional {
76		a(attrs)
77	}
78	opspec := tf.OpSpec{
79		Type: "Abort",
80
81		Attrs: attrs,
82	}
83	return scope.AddOperation(opspec)
84}
85
86// Computes the absolute value of a tensor.
87//
88// Given a tensor `x`, this operation returns a tensor containing the absolute
89// value of each element in `x`. For example, if x is an input element and y is
90// an output element, this operation computes \\(y = |x|\\).
91func Abs(scope *Scope, x tf.Output) (y tf.Output) {
92	if scope.Err() != nil {
93		return
94	}
95	opspec := tf.OpSpec{
96		Type: "Abs",
97		Input: []tf.Input{
98			x,
99		},
100	}
101	op := scope.AddOperation(opspec)
102	return op.Output(0)
103}
104
105// Returns the element-wise sum of a list of tensors.
106//
107// `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
108// wait for all of its inputs to be ready before beginning to sum. This can
109// save memory if inputs are ready at different times, since minimum temporary
110// storage is proportional to the output size rather than the inputs size.
111//
112// Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
113//
114// Returns a `Tensor` of same shape and type as the elements of `inputs`.
115//
116// Arguments:
117//
118//	inputs: A list of `Tensor` objects, each with same shape and type.
119//	shape: Shape of elements of `inputs`.
120func AccumulateNV2(scope *Scope, inputs []tf.Output, shape tf.Shape) (sum tf.Output) {
121	if scope.Err() != nil {
122		return
123	}
124	attrs := map[string]interface{}{"shape": shape}
125	opspec := tf.OpSpec{
126		Type: "AccumulateNV2",
127		Input: []tf.Input{
128			tf.OutputList(inputs),
129		},
130		Attrs: attrs,
131	}
132	op := scope.AddOperation(opspec)
133	return op.Output(0)
134}
135
136// Computes acos of x element-wise.
137//
138//	Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
139//
140//	Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
141func Acos(scope *Scope, x tf.Output) (y tf.Output) {
142	if scope.Err() != nil {
143		return
144	}
145	opspec := tf.OpSpec{
146		Type: "Acos",
147		Input: []tf.Input{
148			x,
149		},
150	}
151	op := scope.AddOperation(opspec)
152	return op.Output(0)
153}
154
155// Computes inverse hyperbolic cosine of x element-wise.
156//
157// Given an input tensor, the function computes inverse hyperbolic cosine of every element.
158// Input range is `[1, inf]`. It returns `nan` if the input lies outside the range.
159//
160// ```python
161// x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")])
162// tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]
163// ```
164func Acosh(scope *Scope, x tf.Output) (y tf.Output) {
165	if scope.Err() != nil {
166		return
167	}
168	opspec := tf.OpSpec{
169		Type: "Acosh",
170		Input: []tf.Input{
171			x,
172		},
173	}
174	op := scope.AddOperation(opspec)
175	return op.Output(0)
176}
177
178// Returns x + y element-wise.
179//
180// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
181// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
182//
183// Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor.
184//
185// Both input and output have a range `(-inf, inf)`.
186func Add(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
187	if scope.Err() != nil {
188		return
189	}
190	opspec := tf.OpSpec{
191		Type: "Add",
192		Input: []tf.Input{
193			x, y,
194		},
195	}
196	op := scope.AddOperation(opspec)
197	return op.Output(0)
198}
199
200// AddManySparseToTensorsMapAttr is an optional argument to AddManySparseToTensorsMap.
201type AddManySparseToTensorsMapAttr func(optionalAttr)
202
203// AddManySparseToTensorsMapContainer sets the optional container attribute to value.
204//
205// value: The container name for the `SparseTensorsMap` created by this op.
206// If not specified, defaults to ""
207func AddManySparseToTensorsMapContainer(value string) AddManySparseToTensorsMapAttr {
208	return func(m optionalAttr) {
209		m["container"] = value
210	}
211}
212
213// AddManySparseToTensorsMapSharedName sets the optional shared_name attribute to value.
214//
215// value: The shared name for the `SparseTensorsMap` created by this op.
216// If blank, the new Operation's unique name is used.
217// If not specified, defaults to ""
218func AddManySparseToTensorsMapSharedName(value string) AddManySparseToTensorsMapAttr {
219	return func(m optionalAttr) {
220		m["shared_name"] = value
221	}
222}
223
224// Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
225//
226// A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
227// `sparse_values`, and `sparse_shape`, where
228//
229// ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
230//
231// An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
232// having a first `sparse_indices` column taking values between `[0, N)`, where
233// the minibatch size `N == sparse_shape[0]`.
234//
235// The input `SparseTensor` must have rank `R` greater than 1, and the first
236// dimension is treated as the minibatch dimension.  Elements of the `SparseTensor`
237// must be sorted in increasing order of this first dimension.  The stored
238// `SparseTensor` objects pointed to by each row of the output `sparse_handles`
239// will have rank `R-1`.
240//
241// The `SparseTensor` values can then be read out as part of a minibatch by passing
242// the given keys as vector elements to `TakeManySparseFromTensorsMap`.  To ensure
243// the correct `SparseTensorsMap` is accessed, ensure that the same
244// `container` and `shared_name` are passed to that Op.  If no `shared_name`
245// is provided here, instead use the *name* of the Operation created by calling
246// `AddManySparseToTensorsMap` as the `shared_name` passed to
247// `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
248//
249// Arguments:
250//
251//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
252//
253// `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
254//
255//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
256//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
257//
258// The minibatch size `N == sparse_shape[0]`.
259//
260// Returns 1-D.  The handles of the `SparseTensor` now stored in the
261// `SparseTensorsMap`.  Shape: `[N]`.
262func AddManySparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddManySparseToTensorsMapAttr) (sparse_handles tf.Output) {
263	if scope.Err() != nil {
264		return
265	}
266	attrs := map[string]interface{}{}
267	for _, a := range optional {
268		a(attrs)
269	}
270	opspec := tf.OpSpec{
271		Type: "AddManySparseToTensorsMap",
272		Input: []tf.Input{
273			sparse_indices, sparse_values, sparse_shape,
274		},
275		Attrs: attrs,
276	}
277	op := scope.AddOperation(opspec)
278	return op.Output(0)
279}
280
281// Add all input tensors element wise.
282//
283//	Inputs must be of same size and shape.
284//
285//	```python
286//	x = [9, 7, 10]
287//	tf.math.add_n(x) ==> 26
288//	```
289func AddN(scope *Scope, inputs []tf.Output) (sum tf.Output) {
290	if scope.Err() != nil {
291		return
292	}
293	opspec := tf.OpSpec{
294		Type: "AddN",
295		Input: []tf.Input{
296			tf.OutputList(inputs),
297		},
298	}
299	op := scope.AddOperation(opspec)
300	return op.Output(0)
301}
302
303// AddSparseToTensorsMapAttr is an optional argument to AddSparseToTensorsMap.
304type AddSparseToTensorsMapAttr func(optionalAttr)
305
306// AddSparseToTensorsMapContainer sets the optional container attribute to value.
307//
308// value: The container name for the `SparseTensorsMap` created by this op.
309// If not specified, defaults to ""
310func AddSparseToTensorsMapContainer(value string) AddSparseToTensorsMapAttr {
311	return func(m optionalAttr) {
312		m["container"] = value
313	}
314}
315
316// AddSparseToTensorsMapSharedName sets the optional shared_name attribute to value.
317//
318// value: The shared name for the `SparseTensorsMap` created by this op.
319// If blank, the new Operation's unique name is used.
320// If not specified, defaults to ""
321func AddSparseToTensorsMapSharedName(value string) AddSparseToTensorsMapAttr {
322	return func(m optionalAttr) {
323		m["shared_name"] = value
324	}
325}
326
327// Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
328//
329// A `SparseTensor` is represented by three tensors: `sparse_indices`,
330// `sparse_values`, and `sparse_shape`.
331//
332// This operator takes the given `SparseTensor` and adds it to a container
333// object (a `SparseTensorsMap`).  A unique key within this container is generated
334// in the form of an `int64`, and this is the value that is returned.
335//
336// The `SparseTensor` can then be read out as part of a minibatch by passing
337// the key as a vector element to `TakeManySparseFromTensorsMap`.  To ensure
338// the correct `SparseTensorsMap` is accessed, ensure that the same
339// `container` and `shared_name` are passed to that Op.  If no `shared_name`
340// is provided here, instead use the *name* of the Operation created by calling
341// `AddSparseToTensorsMap` as the `shared_name` passed to
342// `TakeManySparseFromTensorsMap`.  Ensure the Operations are colocated.
343//
344// Arguments:
345//
346//	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
347//	sparse_values: 1-D.  The `values` of the `SparseTensor`.
348//	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
349//
350// Returns 0-D.  The handle of the `SparseTensor` now stored in the
351// `SparseTensorsMap`.
352func AddSparseToTensorsMap(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...AddSparseToTensorsMapAttr) (sparse_handle tf.Output) {
353	if scope.Err() != nil {
354		return
355	}
356	attrs := map[string]interface{}{}
357	for _, a := range optional {
358		a(attrs)
359	}
360	opspec := tf.OpSpec{
361		Type: "AddSparseToTensorsMap",
362		Input: []tf.Input{
363			sparse_indices, sparse_values, sparse_shape,
364		},
365		Attrs: attrs,
366	}
367	op := scope.AddOperation(opspec)
368	return op.Output(0)
369}
370
371// Returns x + y element-wise.
372//
373// *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
374// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
375func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
376	if scope.Err() != nil {
377		return
378	}
379	opspec := tf.OpSpec{
380		Type: "AddV2",
381		Input: []tf.Input{
382			x, y,
383		},
384	}
385	op := scope.AddOperation(opspec)
386	return op.Output(0)
387}
388
389// Deprecated. Disallowed in GraphDef version >= 2.
390//
391// DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead
392func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output) {
393	if scope.Err() != nil {
394		return
395	}
396	opspec := tf.OpSpec{
397		Type: "AdjustContrast",
398		Input: []tf.Input{
399			images, contrast_factor, min_value, max_value,
400		},
401	}
402	op := scope.AddOperation(opspec)
403	return op.Output(0)
404}
405
406// Adjust the contrast of one or more images.
407//
408// `images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
409// interpreted as `[height, width, channels]`.  The other dimensions only
410// represent a collection of images, such as `[batch, height, width, channels].`
411//
412// Contrast is adjusted independently for each channel of each image.
413//
414// For each channel, the Op first computes the mean of the image pixels in the
415// channel and then adjusts each component of each pixel to
416// `(x - mean) * contrast_factor + mean`.
417//
418// Arguments:
419//
420//	images: Images to adjust.  At least 3-D.
421//	contrast_factor: A float multiplier for adjusting contrast.
422//
423// Returns The contrast-adjusted image or images.
424func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output) {
425	if scope.Err() != nil {
426		return
427	}
428	opspec := tf.OpSpec{
429		Type: "AdjustContrastv2",
430		Input: []tf.Input{
431			images, contrast_factor,
432		},
433	}
434	op := scope.AddOperation(opspec)
435	return op.Output(0)
436}
437
438// Adjust the hue of one or more images.
439//
440// `images` is a tensor of at least 3 dimensions.  The last dimension is
441// interpreted as channels, and must be three.
442//
443// The input image is considered in the RGB colorspace. Conceptually, the RGB
444// colors are first mapped into HSV. A delta is then applied all the hue values,
445// and then remapped back to RGB colorspace.
446//
447// Arguments:
448//
449//	images: Images to adjust.  At least 3-D.
450//	delta: A float delta to add to the hue.
451//
452// Returns The hue-adjusted image or images.
453func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output) {
454	if scope.Err() != nil {
455		return
456	}
457	opspec := tf.OpSpec{
458		Type: "AdjustHue",
459		Input: []tf.Input{
460			images, delta,
461		},
462	}
463	op := scope.AddOperation(opspec)
464	return op.Output(0)
465}
466
467// Adjust the saturation of one or more images.
468//
469// `images` is a tensor of at least 3 dimensions.  The last dimension is
470// interpreted as channels, and must be three.
471//
472// The input image is considered in the RGB colorspace. Conceptually, the RGB
473// colors are first mapped into HSV. A scale is then applied all the saturation
474// values, and then remapped back to RGB colorspace.
475//
476// Arguments:
477//
478//	images: Images to adjust.  At least 3-D.
479//	scale: A float scale to add to the saturation.
480//
481// Returns The hue-adjusted image or images.
482func AdjustSaturation(scope *Scope, images tf.Output, scale tf.Output) (output tf.Output) {
483	if scope.Err() != nil {
484		return
485	}
486	opspec := tf.OpSpec{
487		Type: "AdjustSaturation",
488		Input: []tf.Input{
489			images, scale,
490		},
491	}
492	op := scope.AddOperation(opspec)
493	return op.Output(0)
494}
495
496// AllAttr is an optional argument to All.
497type AllAttr func(optionalAttr)
498
499// AllKeepDims sets the optional keep_dims attribute to value.
500//
501// value: If true, retain reduced dimensions with length 1.
502// If not specified, defaults to false
503func AllKeepDims(value bool) AllAttr {
504	return func(m optionalAttr) {
505		m["keep_dims"] = value
506	}
507}
508
509// Computes the "logical and" of elements across dimensions of a tensor.
510//
511// Reduces `input` along the dimensions given in `axis`. Unless
512// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
513// `axis`. If `keep_dims` is true, the reduced dimensions are
514// retained with length 1.
515//
516// Arguments:
517//
518//	input: The tensor to reduce.
519//	axis: The dimensions to reduce. Must be in the range
520//
521// `[-rank(input), rank(input))`.
522//
523// Returns The reduced tensor.
524func All(scope *Scope, input tf.Output, axis tf.Output, optional ...AllAttr) (output tf.Output) {
525	if scope.Err() != nil {
526		return
527	}
528	attrs := map[string]interface{}{}
529	for _, a := range optional {
530		a(attrs)
531	}
532	opspec := tf.OpSpec{
533		Type: "All",
534		Input: []tf.Input{
535			input, axis,
536		},
537		Attrs: attrs,
538	}
539	op := scope.AddOperation(opspec)
540	return op.Output(0)
541}
542
543// AllCandidateSamplerAttr is an optional argument to AllCandidateSampler.
544type AllCandidateSamplerAttr func(optionalAttr)
545
546// AllCandidateSamplerSeed sets the optional seed attribute to value.
547//
548// value: If either seed or seed2 are set to be non-zero, the random number
549// generator is seeded by the given seed.  Otherwise, it is seeded by a
550// random seed.
551// If not specified, defaults to 0
552func AllCandidateSamplerSeed(value int64) AllCandidateSamplerAttr {
553	return func(m optionalAttr) {
554		m["seed"] = value
555	}
556}
557
558// AllCandidateSamplerSeed2 sets the optional seed2 attribute to value.
559//
560// value: An second seed to avoid seed collision.
561// If not specified, defaults to 0
562func AllCandidateSamplerSeed2(value int64) AllCandidateSamplerAttr {
563	return func(m optionalAttr) {
564		m["seed2"] = value
565	}
566}
567
568// Generates labels for candidate sampling with a learned unigram distribution.
569//
570// See explanations of candidate sampling and the data formats at
571// go/candidate-sampling.
572//
573// For each batch, this op picks a single set of sampled candidate labels.
574//
575// The advantages of sampling candidates per-batch are simplicity and the
576// possibility of efficient dense matrix multiplication. The disadvantage is that
577// the sampled candidates must be chosen independently of the context and of the
578// true labels.
579//
580// Arguments:
581//
582//	true_classes: A batch_size * num_true matrix, in which each row contains the
583//
584// IDs of the num_true target_classes in the corresponding original label.
585//
586//	num_true: Number of true labels per context.
587//	num_sampled: Number of candidates to produce.
588//	unique: If unique is true, we sample with rejection, so that all sampled
589//
590// candidates in a batch are unique. This requires some approximation to
591// estimate the post-rejection sampling probabilities.
592//
593// Returns:
594//
595//	sampled_candidates: A vector of length num_sampled, in which each element is
596//
597// the ID of a sampled candidate.
598//
599//	true_expected_count: A batch_size * num_true matrix, representing
600//
601// the number of times each candidate is expected to occur in a batch
602// of sampled candidates. If unique=true, then this is a probability.
603//
604//	sampled_expected_count: A vector of length num_sampled, for each sampled
605//
606// candidate representing the number of times the candidate is expected
607// to occur in a batch of sampled candidates.  If unique=true, then this is a
608// probability.
609func AllCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, optional ...AllCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
610	if scope.Err() != nil {
611		return
612	}
613	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique}
614	for _, a := range optional {
615		a(attrs)
616	}
617	opspec := tf.OpSpec{
618		Type: "AllCandidateSampler",
619		Input: []tf.Input{
620			true_classes,
621		},
622		Attrs: attrs,
623	}
624	op := scope.AddOperation(opspec)
625	return op.Output(0), op.Output(1), op.Output(2)
626}
627
628// An Op to exchange data across TPU replicas.
629//
630// On each replica, the input is split into `split_count` blocks along
631// `split_dimension` and send to the other replicas given group_assignment. After
632// receiving `split_count` - 1 blocks from other replicas, we concatenate the
633// blocks along `concat_dimension` as the output.
634//
635// For example, suppose there are 2 TPU replicas:
636// replica 0 receives input: `[[A, B]]`
637// replica 1 receives input: `[[C, D]]`
638//
639// group_assignment=`[[0, 1]]`
640// concat_dimension=0
641// split_dimension=1
642// split_count=2
643//
644// replica 0's output: `[[A], [C]]`
645// replica 1's output: `[[B], [D]]`
646//
647// Arguments:
648//
649//	input: The local input to the sum.
650//	group_assignment: An int32 tensor with shape
651//
652// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
653// replica ids in the ith subgroup.
654//
655//	concat_dimension: The dimension number to concatenate.
656//	split_dimension: The dimension number to split.
657//	split_count: The number of splits, this number must equal to the sub-group
658//
659// size(group_assignment.get_shape()[1])
660//
661// Returns The exchanged result.
662func AllToAll(scope *Scope, input tf.Output, group_assignment tf.Output, concat_dimension int64, split_dimension int64, split_count int64) (output tf.Output) {
663	if scope.Err() != nil {
664		return
665	}
666	attrs := map[string]interface{}{"concat_dimension": concat_dimension, "split_dimension": split_dimension, "split_count": split_count}
667	opspec := tf.OpSpec{
668		Type: "AllToAll",
669		Input: []tf.Input{
670			input, group_assignment,
671		},
672		Attrs: attrs,
673	}
674	op := scope.AddOperation(opspec)
675	return op.Output(0)
676}
677
678// AngleAttr is an optional argument to Angle.
679type AngleAttr func(optionalAttr)
680
681// AngleTout sets the optional Tout attribute to value.
682// If not specified, defaults to DT_FLOAT
683func AngleTout(value tf.DataType) AngleAttr {
684	return func(m optionalAttr) {
685		m["Tout"] = value
686	}
687}
688
689// Returns the argument of a complex number.
690//
691// Given a tensor `input` of complex numbers, this operation returns a tensor of
692// type `float` that is the argument of each element in `input`. All elements in
693// `input` must be complex numbers of the form \\(a + bj\\), where *a*
694// is the real part and *b* is the imaginary part.
695//
696// The argument returned by this operation is of the form \\(atan2(b, a)\\).
697//
698// For example:
699//
700// ```
701// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
702// tf.angle(input) ==> [2.0132, 1.056]
703// ```
704//
705// @compatibility(numpy)
706// Equivalent to np.angle.
707// @end_compatibility
708func Angle(scope *Scope, input tf.Output, optional ...AngleAttr) (output tf.Output) {
709	if scope.Err() != nil {
710		return
711	}
712	attrs := map[string]interface{}{}
713	for _, a := range optional {
714		a(attrs)
715	}
716	opspec := tf.OpSpec{
717		Type: "Angle",
718		Input: []tf.Input{
719			input,
720		},
721		Attrs: attrs,
722	}
723	op := scope.AddOperation(opspec)
724	return op.Output(0)
725}
726
727// Creates a uninitialized anonymous hash table.
728//
729// This op creates a new anonymous hash table (as a resource) everytime
730// it is executed, with the specified dtype of its keys and values,
731// returning the resource handle.  Before using the table you will have
732// to initialize it.  After initialization the table will be
733// immutable. The table is anonymous in the sense that it can only be
734// accessed by the returned resource handle (e.g. it cannot be looked up
735// by a name in a resource manager). The table will be automatically
736// deleted when all resource handles pointing to it are gone.
737//
738// Arguments:
739//
740//	key_dtype: Type of the table keys.
741//	value_dtype: Type of the table values.
742//
743// Returns The resource handle to the newly created hash-table resource.
744func AnonymousHashTable(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType) (table_handle tf.Output) {
745	if scope.Err() != nil {
746		return
747	}
748	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
749	opspec := tf.OpSpec{
750		Type: "AnonymousHashTable",
751
752		Attrs: attrs,
753	}
754	op := scope.AddOperation(opspec)
755	return op.Output(0)
756}
757
758// A container for an iterator resource.
759//
760// Returns A handle to the iterator that can be passed to a "MakeIterator" or
761// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
762// resource sharing by name, and does not keep a reference to the resource
763// container.
764func AnonymousIterator(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
765	if scope.Err() != nil {
766		return
767	}
768	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
769	opspec := tf.OpSpec{
770		Type: "AnonymousIterator",
771
772		Attrs: attrs,
773	}
774	op := scope.AddOperation(opspec)
775	return op.Output(0)
776}
777
778// A container for an iterator resource.
779//
780// Returns:
781//
782//	handle: A handle to the iterator that can be passed to a "MakeIterator" or
783//
784// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
785// resource sharing by name, and does not keep a reference to the resource
786// container.
787//
788//	deleter: A variant deleter that should be passed into the op that deletes the iterator.
789func AnonymousIteratorV2(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output, deleter tf.Output) {
790	if scope.Err() != nil {
791		return
792	}
793	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
794	opspec := tf.OpSpec{
795		Type: "AnonymousIteratorV2",
796
797		Attrs: attrs,
798	}
799	op := scope.AddOperation(opspec)
800	return op.Output(0), op.Output(1)
801}
802
803// A container for an iterator resource.
804//
805// Returns A handle to the iterator that can be passed to a "MakeIterator" or
806// "IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
807// resource sharing by name, and does not keep a reference to the resource
808// container.
809func AnonymousIteratorV3(scope *Scope, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
810	if scope.Err() != nil {
811		return
812	}
813	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
814	opspec := tf.OpSpec{
815		Type: "AnonymousIteratorV3",
816
817		Attrs: attrs,
818	}
819	op := scope.AddOperation(opspec)
820	return op.Output(0)
821}
822
823// A container for a multi device iterator resource.
824//
825// Returns:
826//
827//	handle: A handle to a multi device iterator that can be passed to a
828//
829// "MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
830// AnonymousIterator prevents resource sharing by name, and does not keep a
831// reference to the resource container.
832//
833//	deleter: A variant deleter that should be passed into the op that deletes the iterator.
834func AnonymousMultiDeviceIterator(scope *Scope, devices []string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output, deleter tf.Output) {
835	if scope.Err() != nil {
836		return
837	}
838	attrs := map[string]interface{}{"devices": devices, "output_types": output_types, "output_shapes": output_shapes}
839	opspec := tf.OpSpec{
840		Type: "AnonymousMultiDeviceIterator",
841
842		Attrs: attrs,
843	}
844	op := scope.AddOperation(opspec)
845	return op.Output(0), op.Output(1)
846}
847
848// A container for a multi device iterator resource.
849//
850// Returns A handle to a multi device iterator that can be passed to a
851// "MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
852// AnonymousIterator prevents resource sharing by name, and does not keep a
853// reference to the resource container.
854func AnonymousMultiDeviceIteratorV3(scope *Scope, devices []string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
855	if scope.Err() != nil {
856		return
857	}
858	attrs := map[string]interface{}{"devices": devices, "output_types": output_types, "output_shapes": output_shapes}
859	opspec := tf.OpSpec{
860		Type: "AnonymousMultiDeviceIteratorV3",
861
862		Attrs: attrs,
863	}
864	op := scope.AddOperation(opspec)
865	return op.Output(0)
866}
867
868// AnonymousMutableDenseHashTableAttr is an optional argument to AnonymousMutableDenseHashTable.
869type AnonymousMutableDenseHashTableAttr func(optionalAttr)
870
871// AnonymousMutableDenseHashTableValueShape sets the optional value_shape attribute to value.
872//
873// value: The shape of each value.
874// If not specified, defaults to {}
875func AnonymousMutableDenseHashTableValueShape(value tf.Shape) AnonymousMutableDenseHashTableAttr {
876	return func(m optionalAttr) {
877		m["value_shape"] = value
878	}
879}
880
881// AnonymousMutableDenseHashTableInitialNumBuckets sets the optional initial_num_buckets attribute to value.
882//
883// value: The initial number of hash table buckets. Must be a power
884// to 2.
885// If not specified, defaults to 131072
886func AnonymousMutableDenseHashTableInitialNumBuckets(value int64) AnonymousMutableDenseHashTableAttr {
887	return func(m optionalAttr) {
888		m["initial_num_buckets"] = value
889	}
890}
891
892// AnonymousMutableDenseHashTableMaxLoadFactor sets the optional max_load_factor attribute to value.
893//
894// value: The maximum ratio between number of entries and number of
895// buckets before growing the table. Must be between 0 and 1.
896// If not specified, defaults to 0.8
897func AnonymousMutableDenseHashTableMaxLoadFactor(value float32) AnonymousMutableDenseHashTableAttr {
898	return func(m optionalAttr) {
899		m["max_load_factor"] = value
900	}
901}
902
903// Creates an empty anonymous mutable hash table that uses tensors as the backing store.
904//
905// This op creates a new anonymous mutable hash table (as a resource) everytime
906// it is executed, with the specified dtype of its keys and values,
907// returning the resource handle. Each value must be a scalar.
908// Data can be inserted into the table using
909// the insert operations. It does not support the initialization operation.
910//
911// It uses "open addressing" with quadratic reprobing to resolve
912// collisions.
913//
914// The table is anonymous in the sense that it can only be
915// accessed by the returned resource handle (e.g. it cannot be looked up
916// by a name in a resource manager). The table will be automatically
917// deleted when all resource handles pointing to it are gone.
918//
919// Arguments:
920//
921//	empty_key: The key used to represent empty key buckets internally. Must not
922//
923// be used in insert or lookup operations.
924//
925//	value_dtype: Type of the table values.
926//
927// Returns The resource handle to the newly created hash-table resource.
928func AnonymousMutableDenseHashTable(scope *Scope, empty_key tf.Output, deleted_key tf.Output, value_dtype tf.DataType, optional ...AnonymousMutableDenseHashTableAttr) (table_handle tf.Output) {
929	if scope.Err() != nil {
930		return
931	}
932	attrs := map[string]interface{}{"value_dtype": value_dtype}
933	for _, a := range optional {
934		a(attrs)
935	}
936	opspec := tf.OpSpec{
937		Type: "AnonymousMutableDenseHashTable",
938		Input: []tf.Input{
939			empty_key, deleted_key,
940		},
941		Attrs: attrs,
942	}
943	op := scope.AddOperation(opspec)
944	return op.Output(0)
945}
946
947// Creates an empty anonymous mutable hash table.
948//
949// This op creates a new anonymous mutable hash table (as a resource) everytime
950// it is executed, with the specified dtype of its keys and values,
951// returning the resource handle. Each value must be a scalar.
952// Data can be inserted into the table using
953// the insert operations. It does not support the initialization operation.
954// The table is anonymous in the sense that it can only be
955// accessed by the returned resource handle (e.g. it cannot be looked up
956// by a name in a resource manager). The table will be automatically
957// deleted when all resource handles pointing to it are gone.
958//
959// Arguments:
960//
961//	key_dtype: Type of the table keys.
962//	value_dtype: Type of the table values.
963//
964// Returns The resource handle to the newly created hash-table resource.
965func AnonymousMutableHashTable(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType) (table_handle tf.Output) {
966	if scope.Err() != nil {
967		return
968	}
969	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
970	opspec := tf.OpSpec{
971		Type: "AnonymousMutableHashTable",
972
973		Attrs: attrs,
974	}
975	op := scope.AddOperation(opspec)
976	return op.Output(0)
977}
978
979// AnonymousMutableHashTableOfTensorsAttr is an optional argument to AnonymousMutableHashTableOfTensors.
980type AnonymousMutableHashTableOfTensorsAttr func(optionalAttr)
981
982// AnonymousMutableHashTableOfTensorsValueShape sets the optional value_shape attribute to value.
983// If not specified, defaults to {}
984func AnonymousMutableHashTableOfTensorsValueShape(value tf.Shape) AnonymousMutableHashTableOfTensorsAttr {
985	return func(m optionalAttr) {
986		m["value_shape"] = value
987	}
988}
989
990// Creates an empty anonymous mutable hash table of vector values.
991//
992// This op creates a new anonymous mutable hash table (as a resource) everytime
993// it is executed, with the specified dtype of its keys and values,
994// returning the resource handle. Each value must be a vector.
995// Data can be inserted into the table using
996// the insert operations. It does not support the initialization operation.
997// The table is anonymous in the sense that it can only be
998// accessed by the returned resource handle (e.g. it cannot be looked up
999// by a name in a resource manager). The table will be automatically
1000// deleted when all resource handles pointing to it are gone.
1001//
1002// Arguments:
1003//
1004//	key_dtype: Type of the table keys.
1005//	value_dtype: Type of the table values.
1006//
1007// Returns The resource handle to the newly created hash-table resource.
1008func AnonymousMutableHashTableOfTensors(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...AnonymousMutableHashTableOfTensorsAttr) (table_handle tf.Output) {
1009	if scope.Err() != nil {
1010		return
1011	}
1012	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
1013	for _, a := range optional {
1014		a(attrs)
1015	}
1016	opspec := tf.OpSpec{
1017		Type: "AnonymousMutableHashTableOfTensors",
1018
1019		Attrs: attrs,
1020	}
1021	op := scope.AddOperation(opspec)
1022	return op.Output(0)
1023}
1024
1025// AnyAttr is an optional argument to Any.
1026type AnyAttr func(optionalAttr)
1027
1028// AnyKeepDims sets the optional keep_dims attribute to value.
1029//
1030// value: If true, retain reduced dimensions with length 1.
1031// If not specified, defaults to false
1032func AnyKeepDims(value bool) AnyAttr {
1033	return func(m optionalAttr) {
1034		m["keep_dims"] = value
1035	}
1036}
1037
1038// Computes the "logical or" of elements across dimensions of a tensor.
1039//
1040// Reduces `input` along the dimensions given in `axis`. Unless
1041// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
1042// `axis`. If `keep_dims` is true, the reduced dimensions are
1043// retained with length 1.
1044//
1045// Arguments:
1046//
1047//	input: The tensor to reduce.
1048//	axis: The dimensions to reduce. Must be in the range
1049//
1050// `[-rank(input), rank(input))`.
1051//
1052// Returns The reduced tensor.
1053func Any(scope *Scope, input tf.Output, axis tf.Output, optional ...AnyAttr) (output tf.Output) {
1054	if scope.Err() != nil {
1055		return
1056	}
1057	attrs := map[string]interface{}{}
1058	for _, a := range optional {
1059		a(attrs)
1060	}
1061	opspec := tf.OpSpec{
1062		Type: "Any",
1063		Input: []tf.Input{
1064			input, axis,
1065		},
1066		Attrs: attrs,
1067	}
1068	op := scope.AddOperation(opspec)
1069	return op.Output(0)
1070}
1071
1072// ApproxTopKAttr is an optional argument to ApproxTopK.
1073type ApproxTopKAttr func(optionalAttr)
1074
1075// ApproxTopKReductionDimension sets the optional reduction_dimension attribute to value.
1076//
1077// value: Integer dimension along which to search. Default: -1.
1078// If not specified, defaults to -1
1079func ApproxTopKReductionDimension(value int64) ApproxTopKAttr {
1080	return func(m optionalAttr) {
1081		m["reduction_dimension"] = value
1082	}
1083}
1084
1085// ApproxTopKRecallTarget sets the optional recall_target attribute to value.
1086//
1087// value: Recall target for the approximation. Range in (0,1]
1088// If not specified, defaults to 0.95
1089func ApproxTopKRecallTarget(value float32) ApproxTopKAttr {
1090	return func(m optionalAttr) {
1091		m["recall_target"] = value
1092	}
1093}
1094
1095// ApproxTopKIsMaxK sets the optional is_max_k attribute to value.
1096//
1097// value: When true, computes max-k; otherwise computes min-k.
1098// If not specified, defaults to true
1099func ApproxTopKIsMaxK(value bool) ApproxTopKAttr {
1100	return func(m optionalAttr) {
1101		m["is_max_k"] = value
1102	}
1103}
1104
1105// ApproxTopKReductionInputSizeOverride sets the optional reduction_input_size_override attribute to value.
1106//
1107// value: When set to a positive value, it overrides the size determined by
1108// `input[reduction_dim]` for evaluating the recall. This option is useful when
1109// the given `input` is only a subset of the overall computation in SPMD or
1110// distributed pipelines, where the true input size cannot be deferred by the
1111// `input` shape.
1112// If not specified, defaults to -1
1113func ApproxTopKReductionInputSizeOverride(value int64) ApproxTopKAttr {
1114	return func(m optionalAttr) {
1115		m["reduction_input_size_override"] = value
1116	}
1117}
1118
1119// ApproxTopKAggregateToTopk sets the optional aggregate_to_topk attribute to value.
1120//
1121// value: When true, aggregates approximate results to top-k. When false, returns the
1122// approximate results. The number of the approximate results is implementation
1123// defined and is greater equals to the specified `k`.
1124// If not specified, defaults to true
1125func ApproxTopKAggregateToTopk(value bool) ApproxTopKAttr {
1126	return func(m optionalAttr) {
1127		m["aggregate_to_topk"] = value
1128	}
1129}
1130
1131// Returns min/max k values and their indices of the input operand in an approximate manner.
1132//
1133// See https://arxiv.org/abs/2206.14286 for the algorithm details.
1134// This op is only optimized on TPU currently.
1135//
1136// Arguments:
1137//
1138//	input: Array to search. Must be at least 1-D of the floating type
1139//	k: Specifies the number of min/max-k.
1140//
1141// Returns:
1142//
1143//	values: The min/max k values along the `reduction_dimension` of the `input` operand.
1144//
1145// The dimension are the same as the `input` operand except for the
1146// `reduction_dimension`: when `aggregate_to_topk` is true, the reduction
1147// dimension is `k`; otherwise, it is greater equals to `k` where the size is
1148// implementation-defined.
1149//
1150//	indices: The indices of `values` along the `reduction_dimension` of the `input` operand.
1151func ApproxTopK(scope *Scope, input tf.Output, k int64, optional ...ApproxTopKAttr) (values tf.Output, indices tf.Output) {
1152	if scope.Err() != nil {
1153		return
1154	}
1155	attrs := map[string]interface{}{"k": k}
1156	for _, a := range optional {
1157		a(attrs)
1158	}
1159	opspec := tf.OpSpec{
1160		Type: "ApproxTopK",
1161		Input: []tf.Input{
1162			input,
1163		},
1164		Attrs: attrs,
1165	}
1166	op := scope.AddOperation(opspec)
1167	return op.Output(0), op.Output(1)
1168}
1169
1170// ApproximateEqualAttr is an optional argument to ApproximateEqual.
1171type ApproximateEqualAttr func(optionalAttr)
1172
1173// ApproximateEqualTolerance sets the optional tolerance attribute to value.
1174// If not specified, defaults to 1e-05
1175func ApproximateEqualTolerance(value float32) ApproximateEqualAttr {
1176	return func(m optionalAttr) {
1177		m["tolerance"] = value
1178	}
1179}
1180
1181// Returns the truth value of abs(x-y) < tolerance element-wise.
1182func ApproximateEqual(scope *Scope, x tf.Output, y tf.Output, optional ...ApproximateEqualAttr) (z tf.Output) {
1183	if scope.Err() != nil {
1184		return
1185	}
1186	attrs := map[string]interface{}{}
1187	for _, a := range optional {
1188		a(attrs)
1189	}
1190	opspec := tf.OpSpec{
1191		Type: "ApproximateEqual",
1192		Input: []tf.Input{
1193			x, y,
1194		},
1195		Attrs: attrs,
1196	}
1197	op := scope.AddOperation(opspec)
1198	return op.Output(0)
1199}
1200
1201// ArgMaxAttr is an optional argument to ArgMax.
1202type ArgMaxAttr func(optionalAttr)
1203
1204// ArgMaxOutputType sets the optional output_type attribute to value.
1205// If not specified, defaults to DT_INT64
1206func ArgMaxOutputType(value tf.DataType) ArgMaxAttr {
1207	return func(m optionalAttr) {
1208		m["output_type"] = value
1209	}
1210}
1211
1212// Returns the index with the largest value across dimensions of a tensor.
1213//
1214// Note that in case of ties the identity of the return value is not guaranteed.
1215//
1216// Usage:
1217//
1218//	```python
1219//	import tensorflow as tf
1220//	a = [1, 10, 26.9, 2.8, 166.32, 62.3]
1221//	b = tf.math.argmax(input = a)
1222//	c = tf.keras.backend.eval(b)
1223//	# c = 4
1224//	# here a[4] = 166.32 which is the largest element of a across axis 0
1225//	```
1226//
1227// Arguments:
1228//
1229//	dimension: int16, int32 or int64, must be in the range `[-rank(input), rank(input))`.
1230//
1231// Describes which dimension of the input Tensor to reduce across. For vectors,
1232// use dimension = 0.
1233func ArgMax(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMaxAttr) (output tf.Output) {
1234	if scope.Err() != nil {
1235		return
1236	}
1237	attrs := map[string]interface{}{}
1238	for _, a := range optional {
1239		a(attrs)
1240	}
1241	opspec := tf.OpSpec{
1242		Type: "ArgMax",
1243		Input: []tf.Input{
1244			input, dimension,
1245		},
1246		Attrs: attrs,
1247	}
1248	op := scope.AddOperation(opspec)
1249	return op.Output(0)
1250}
1251
1252// ArgMinAttr is an optional argument to ArgMin.
1253type ArgMinAttr func(optionalAttr)
1254
1255// ArgMinOutputType sets the optional output_type attribute to value.
1256// If not specified, defaults to DT_INT64
1257func ArgMinOutputType(value tf.DataType) ArgMinAttr {
1258	return func(m optionalAttr) {
1259		m["output_type"] = value
1260	}
1261}
1262
1263// Returns the index with the smallest value across dimensions of a tensor.
1264//
1265// Note that in case of ties the identity of the return value is not guaranteed.
1266//
1267// Usage:
1268//
1269//	```python
1270//	import tensorflow as tf
1271//	a = [1, 10, 26.9, 2.8, 166.32, 62.3]
1272//	b = tf.math.argmin(input = a)
1273//	c = tf.keras.backend.eval(b)
1274//	# c = 0
1275//	# here a[0] = 1 which is the smallest element of a across axis 0
1276//	```
1277//
1278// Arguments:
1279//
1280//	dimension: int32 or int64, must be in the range `[-rank(input), rank(input))`.
1281//
1282// Describes which dimension of the input Tensor to reduce across. For vectors,
1283// use dimension = 0.
1284func ArgMin(scope *Scope, input tf.Output, dimension tf.Output, optional ...ArgMinAttr) (output tf.Output) {
1285	if scope.Err() != nil {
1286		return
1287	}
1288	attrs := map[string]interface{}{}
1289	for _, a := range optional {
1290		a(attrs)
1291	}
1292	opspec := tf.OpSpec{
1293		Type: "ArgMin",
1294		Input: []tf.Input{
1295			input, dimension,
1296		},
1297		Attrs: attrs,
1298	}
1299	op := scope.AddOperation(opspec)
1300	return op.Output(0)
1301}
1302
1303// AsStringAttr is an optional argument to AsString.
1304type AsStringAttr func(optionalAttr)
1305
1306// AsStringPrecision sets the optional precision attribute to value.
1307//
1308// value: The post-decimal precision to use for floating point numbers.
1309// Only used if precision > -1.
1310// If not specified, defaults to -1
1311func AsStringPrecision(value int64) AsStringAttr {
1312	return func(m optionalAttr) {
1313		m["precision"] = value
1314	}
1315}
1316
1317// AsStringScientific sets the optional scientific attribute to value.
1318//
1319// value: Use scientific notation for floating point numbers.
1320// If not specified, defaults to false
1321func AsStringScientific(value bool) AsStringAttr {
1322	return func(m optionalAttr) {
1323		m["scientific"] = value
1324	}
1325}
1326
1327// AsStringShortest sets the optional shortest attribute to value.
1328//
1329// value: Use shortest representation (either scientific or standard) for
1330// floating point numbers.
1331// If not specified, defaults to false
1332func AsStringShortest(value bool) AsStringAttr {
1333	return func(m optionalAttr) {
1334		m["shortest"] = value
1335	}
1336}
1337
1338// AsStringWidth sets the optional width attribute to value.
1339//
1340// value: Pad pre-decimal numbers to this width.
1341// Applies to both floating point and integer numbers.
1342// Only used if width > -1.
1343// If not specified, defaults to -1
1344func AsStringWidth(value int64) AsStringAttr {
1345	return func(m optionalAttr) {
1346		m["width"] = value
1347	}
1348}
1349
1350// AsStringFill sets the optional fill attribute to value.
1351//
1352// value: The value to pad if width > -1.  If empty, pads with spaces.
1353// Another typical value is '0'.  String cannot be longer than 1 character.
1354// If not specified, defaults to ""
1355func AsStringFill(value string) AsStringAttr {
1356	return func(m optionalAttr) {
1357		m["fill"] = value
1358	}
1359}
1360
1361// Converts each entry in the given tensor to strings.
1362//
1363// Supports many numeric types and boolean.
1364//
1365// For Unicode, see the
1366// [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text)
1367// tutorial.
1368//
1369// Examples:
1370//
1371// >>> tf.strings.as_string([3, 2])
1372// <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)>
1373// >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy()
1374// array([b'3.14', b'2.72'], dtype=object)
1375func AsString(scope *Scope, input tf.Output, optional ...AsStringAttr) (output tf.Output) {
1376	if scope.Err() != nil {
1377		return
1378	}
1379	attrs := map[string]interface{}{}
1380	for _, a := range optional {
1381		a(attrs)
1382	}
1383	opspec := tf.OpSpec{
1384		Type: "AsString",
1385		Input: []tf.Input{
1386			input,
1387		},
1388		Attrs: attrs,
1389	}
1390	op := scope.AddOperation(opspec)
1391	return op.Output(0)
1392}
1393
1394// Computes the trignometric inverse sine of x element-wise.
1395//
1396// The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that
1397// if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.
1398//
1399// **Note**: The output of `tf.math.asin` will lie within the invertible range
1400// of sine, i.e [-pi/2, pi/2].
1401//
1402// For example:
1403//
1404// ```python
1405// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
1406// x = tf.constant([1.047, 0.785])
1407// y = tf.math.sin(x) # [0.8659266, 0.7068252]
1408//
1409// tf.math.asin(y) # [1.047, 0.785] = x
1410// ```
1411func Asin(scope *Scope, x tf.Output) (y tf.Output) {
1412	if scope.Err() != nil {
1413		return
1414	}
1415	opspec := tf.OpSpec{
1416		Type: "Asin",
1417		Input: []tf.Input{
1418			x,
1419		},
1420	}
1421	op := scope.AddOperation(opspec)
1422	return op.Output(0)
1423}
1424
1425// Computes inverse hyperbolic sine of x element-wise.
1426//
1427//	Given an input tensor, this function computes inverse hyperbolic sine
1428//	for every element in the tensor. Both input and output has a range of
1429//	`[-inf, inf]`.
1430//
1431//	```python
1432//	x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")])
1433//	tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]
1434//	```
1435func Asinh(scope *Scope, x tf.Output) (y tf.Output) {
1436	if scope.Err() != nil {
1437		return
1438	}
1439	opspec := tf.OpSpec{
1440		Type: "Asinh",
1441		Input: []tf.Input{
1442			x,
1443		},
1444	}
1445	op := scope.AddOperation(opspec)
1446	return op.Output(0)
1447}
1448
1449// AssertAttr is an optional argument to Assert.
1450type AssertAttr func(optionalAttr)
1451
1452// AssertSummarize sets the optional summarize attribute to value.
1453//
1454// value: Print this many entries of each tensor.
1455// If not specified, defaults to 3
1456func AssertSummarize(value int64) AssertAttr {
1457	return func(m optionalAttr) {
1458		m["summarize"] = value
1459	}
1460}
1461
1462// Asserts that the given condition is true.
1463//
1464// If `condition` evaluates to false, print the list of tensors in `data`.
1465// `summarize` determines how many entries of the tensors to print.
1466//
1467// Arguments:
1468//
1469//	condition: The condition to evaluate.
1470//	data: The tensors to print out when condition is false.
1471//
1472// Returns the created operation.
1473func Assert(scope *Scope, condition tf.Output, data []tf.Output, optional ...AssertAttr) (o *tf.Operation) {
1474	if scope.Err() != nil {
1475		return
1476	}
1477	attrs := map[string]interface{}{}
1478	for _, a := range optional {
1479		a(attrs)
1480	}
1481	opspec := tf.OpSpec{
1482		Type: "Assert",
1483		Input: []tf.Input{
1484			condition, tf.OutputList(data),
1485		},
1486		Attrs: attrs,
1487	}
1488	return scope.AddOperation(opspec)
1489}
1490
1491// A transformation that asserts which transformations happen next.
1492//
1493// This transformation checks whether the camel-case names (i.e. "FlatMap", not
1494// "flat_map") of the transformations following this transformation match the list
1495// of names in the `transformations` argument. If there is a mismatch, the
1496// transformation raises an exception.
1497//
1498// The check occurs when iterating over the contents of the dataset, which
1499// means that the check happens *after* any static optimizations are applied
1500// to the dataset graph.
1501//
1502// Arguments:
1503//
1504//	input_dataset: A variant tensor representing the input dataset.
1505//
1506// `AssertNextDataset` passes through the outputs of its input dataset.
1507//
1508//	transformations: A `tf.string` vector `tf.Tensor` identifying the transformations that are
1509//
1510// expected to happen next.
1511func AssertNextDataset(scope *Scope, input_dataset tf.Output, transformations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
1512	if scope.Err() != nil {
1513		return
1514	}
1515	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
1516	opspec := tf.OpSpec{
1517		Type: "AssertNextDataset",
1518		Input: []tf.Input{
1519			input_dataset, transformations,
1520		},
1521		Attrs: attrs,
1522	}
1523	op := scope.AddOperation(opspec)
1524	return op.Output(0)
1525}
1526
1527// A transformation that asserts which transformations happened previously.
1528//
1529// This transformation checks the names and, optionally, the attribute name-value
1530// pairs in the `transformations` argument against those of the transformations
1531// that preceded this transformation.  If there is a mismatch, the transformation
1532// raises an exception.
1533//
1534// The check occurs when iterating over the contents of the dataset, which
1535// means that the check happens *after* any static optimizations are applied
1536// to the dataset graph.
1537//
1538// Arguments:
1539//
1540//	input_dataset: A variant tensor representing the input dataset.
1541//
1542// `AssertPrevDataset` passes through the outputs of its input dataset.
1543//
1544//	transformations: A `tf.string` vector `tf.Tensor` identifying the transformations, with optional
1545//
1546// attribute name-value pairs, that are expected to have happened previously.
1547func AssertPrevDataset(scope *Scope, input_dataset tf.Output, transformations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
1548	if scope.Err() != nil {
1549		return
1550	}
1551	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
1552	opspec := tf.OpSpec{
1553		Type: "AssertPrevDataset",
1554		Input: []tf.Input{
1555			input_dataset, transformations,
1556		},
1557		Attrs: attrs,
1558	}
1559	op := scope.AddOperation(opspec)
1560	return op.Output(0)
1561}
1562
1563// Adds a value to the current value of a variable.
1564//
1565// Any ReadVariableOp with a control dependency on this op is guaranteed to
1566// see the incremented value or a subsequent newer one.
1567//
1568// Arguments:
1569//
1570//	resource: handle to the resource in which to store the variable.
1571//	value: the value by which the variable will be incremented.
1572//
1573// Returns the created operation.
1574func AssignAddVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
1575	if scope.Err() != nil {
1576		return
1577	}
1578	opspec := tf.OpSpec{
1579		Type: "AssignAddVariableOp",
1580		Input: []tf.Input{
1581			resource, value,
1582		},
1583	}
1584	return scope.AddOperation(opspec)
1585}
1586
1587// Subtracts a value from the current value of a variable.
1588//
1589// Any ReadVariableOp with a control dependency on this op is guaranteed to
1590// see the decremented value or a subsequent newer one.
1591//
1592// Arguments:
1593//
1594//	resource: handle to the resource in which to store the variable.
1595//	value: the value by which the variable will be incremented.
1596//
1597// Returns the created operation.
1598func AssignSubVariableOp(scope *Scope, resource tf.Output, value tf.Output) (o *tf.Operation) {
1599	if scope.Err() != nil {
1600		return
1601	}
1602	opspec := tf.OpSpec{
1603		Type: "AssignSubVariableOp",
1604		Input: []tf.Input{
1605			resource, value,
1606		},
1607	}
1608	return scope.AddOperation(opspec)
1609}
1610
1611// AssignVariableOpAttr is an optional argument to AssignVariableOp.
1612type AssignVariableOpAttr func(optionalAttr)
1613
1614// AssignVariableOpValidateShape sets the optional validate_shape attribute to value.
1615// If not specified, defaults to false
1616func AssignVariableOpValidateShape(value bool) AssignVariableOpAttr {
1617	return func(m optionalAttr) {
1618		m["validate_shape"] = value
1619	}
1620}
1621
1622// Assigns a new value to a variable.
1623//
1624// Any ReadVariableOp with a control dependency on this op is guaranteed to return
1625// this value or a subsequent newer value of the variable.
1626//
1627// Arguments:
1628//
1629//	resource: handle to the resource in which to store the variable.
1630//	value: the value to set the new tensor to use.
1631//
1632// Returns the created operation.
1633func AssignVariableOp(scope *Scope, resource tf.Output, value tf.Output, optional ...AssignVariableOpAttr) (o *tf.Operation) {
1634	if scope.Err() != nil {
1635		return
1636	}
1637	attrs := map[string]interface{}{}
1638	for _, a := range optional {
1639		a(attrs)
1640	}
1641	opspec := tf.OpSpec{
1642		Type: "AssignVariableOp",
1643		Input: []tf.Input{
1644			resource, value,
1645		},
1646		Attrs: attrs,
1647	}
1648	return scope.AddOperation(opspec)
1649}
1650
1651// AssignVariableXlaConcatNDAttr is an optional argument to AssignVariableXlaConcatND.
1652type AssignVariableXlaConcatNDAttr func(optionalAttr)
1653
1654// AssignVariableXlaConcatNDPaddings sets the optional paddings attribute to value.
1655//
1656// value: Optional list of right paddings per dimension to strip from the final merged
1657// tensor. These paddings must not exceed the dimension size of the merged result
1658// prior to stripping paddings.
1659// If not specified, defaults to {}
1660func AssignVariableXlaConcatNDPaddings(value []int64) AssignVariableXlaConcatNDAttr {
1661	return func(m optionalAttr) {
1662		m["paddings"] = value
1663	}
1664}
1665
1666// Concats input tensor across all dimensions.
1667//
1668// An op which merges slices the input tensor based on the given num_splits
1669// attribute, strips paddings optionally, and writes the merged tensor without
1670// paddings to the resource variable.
1671//
1672// This op may be generated via the TPU bridge.
1673//
1674// For example, with `input` tensor:
1675// ```
1676// [[0, 1],
1677//
1678//	[4, 5]]
1679//
1680// [[2, 3],
1681//
1682//	[6, 7]]
1683//
1684// [[8, 9],
1685//
1686//	[12, 13]]
1687//
1688// [[10, 11],
1689//
1690//	[14, 15]]
1691//
1692// ```
1693// `num_splits`:
1694// ```
1695// [2, 2]
1696// ```
1697// and `paddings`:
1698// ```
1699// [1, 1]
1700// ```
1701// the expected `outputs` is:
1702// ```
1703// [[0, 1, 2],
1704//
1705//	[4, 5, 6],
1706//	[8, 9, 10]]
1707//
1708// ```
1709//
1710// Arguments:
1711//
1712//		resource: Resource variable for concatenated input tensors across all dimensions.
1713//	  }
1714//	  in_arg {
1715//	    name: "inputs"
1716//	    description: <<END
1717//
1718// Input tensor slices in row-major order to merge across all dimensions. All
1719// inputs must have the same shape.
1720//
1721//	}
1722//	out_arg {
1723//	  name: "output"
1724//	  description: <<END
1725//
1726// Output tensor formed from merging input slices based on num_concats defined.
1727//
1728//	num_concats: Number of ways to merge per dimension.
1729//
1730// Returns the created operation.
1731func AssignVariableXlaConcatND(scope *Scope, resource tf.Output, inputs []tf.Output, num_concats []int64, optional ...AssignVariableXlaConcatNDAttr) (o *tf.Operation) {
1732	if scope.Err() != nil {
1733		return
1734	}
1735	attrs := map[string]interface{}{"num_concats": num_concats}
1736	for _, a := range optional {
1737		a(attrs)
1738	}
1739	opspec := tf.OpSpec{
1740		Type: "AssignVariableXlaConcatND",
1741		Input: []tf.Input{
1742			resource, tf.OutputList(inputs),
1743		},
1744		Attrs: attrs,
1745	}
1746	return scope.AddOperation(opspec)
1747}
1748
1749// Computes the trignometric inverse tangent of x element-wise.
1750//
1751// The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that
1752// if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`.
1753//
1754// **Note**: The output of `tf.math.atan` will lie within the invertible range
1755// of tan, i.e (-pi/2, pi/2).
1756//
1757// For example:
1758//
1759// ```python
1760// # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
1761// x = tf.constant([1.047, 0.785])
1762// y = tf.math.tan(x) # [1.731261, 0.99920404]
1763//
1764// tf.math.atan(y) # [1.047, 0.785] = x
1765// ```
1766func Atan(scope *Scope, x tf.Output) (y tf.Output) {
1767	if scope.Err() != nil {
1768		return
1769	}
1770	opspec := tf.OpSpec{
1771		Type: "Atan",
1772		Input: []tf.Input{
1773			x,
1774		},
1775	}
1776	op := scope.AddOperation(opspec)
1777	return op.Output(0)
1778}
1779
1780// Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
1781//
1782// This is the angle \\( \theta \in [-\pi, \pi] \\) such that
1783// \\[ x = r \cos(\theta) \\]
1784// and
1785// \\[ y = r \sin(\theta) \\]
1786// where \\(r = \sqrt{x^2 + y^2} \\).
1787//
1788// For example:
1789//
1790// >>> x = [1., 1.]
1791// >>> y = [1., -1.]
1792// >>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy())
1793// [ 45. -45.]
1794func Atan2(scope *Scope, y tf.Output, x tf.Output) (z tf.Output) {
1795	if scope.Err() != nil {
1796		return
1797	}
1798	opspec := tf.OpSpec{
1799		Type: "Atan2",
1800		Input: []tf.Input{
1801			y, x,
1802		},
1803	}
1804	op := scope.AddOperation(opspec)
1805	return op.Output(0)
1806}
1807
1808// Computes inverse hyperbolic tangent of x element-wise.
1809//
1810//	Given an input tensor, this function computes inverse hyperbolic tangent
1811//	for every element in the tensor. Input range is `[-1,1]` and output range is
1812//	`[-inf, inf]`. If input is `-1`, output will be `-inf` and if the
1813//	input is `1`, output will be `inf`. Values outside the range will have
1814//	`nan` as output.
1815//
1816//	```python
1817//	x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")])
1818//	tf.math.atanh(x) ==> [nan -inf -0.54930615 inf  0. 0.54930615 nan nan]
1819//	```
1820func Atanh(scope *Scope, x tf.Output) (y tf.Output) {
1821	if scope.Err() != nil {
1822		return
1823	}
1824	opspec := tf.OpSpec{
1825		Type: "Atanh",
1826		Input: []tf.Input{
1827			x,
1828		},
1829	}
1830	op := scope.AddOperation(opspec)
1831	return op.Output(0)
1832}
1833
1834// AudioSpectrogramAttr is an optional argument to AudioSpectrogram.
1835type AudioSpectrogramAttr func(optionalAttr)
1836
1837// AudioSpectrogramMagnitudeSquared sets the optional magnitude_squared attribute to value.
1838//
1839// value: Whether to return the squared magnitude or just the
1840// magnitude. Using squared magnitude can avoid extra calculations.
1841// If not specified, defaults to false
1842func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr {
1843	return func(m optionalAttr) {
1844		m["magnitude_squared"] = value
1845	}
1846}
1847
1848// Produces a visualization of audio data over time.
1849//
1850// Spectrograms are a standard way of representing audio information as a series of
1851// slices of frequency information, one slice for each window of time. By joining
1852// these together into a sequence, they form a distinctive fingerprint of the sound
1853// over time.
1854//
1855// This op expects to receive audio data as an input, stored as floats in the range
1856// -1 to 1, together with a window width in samples, and a stride specifying how
1857// far to move the window between slices. From this it generates a three
1858// dimensional output. The first dimension is for the channels in the input, so a
1859// stereo audio input would have two here for example. The second dimension is time,
1860// with successive frequency slices. The third dimension has an amplitude value for
1861// each frequency during that time slice.
1862//
1863// This means the layout when converted and saved as an image is rotated 90 degrees
1864// clockwise from a typical spectrogram. Time is descending down the Y axis, and
1865// the frequency decreases from left to right.
1866//
1867// Each value in the result represents the square root of the sum of the real and
1868// imaginary parts of an FFT on the current window of samples. In this way, the
1869// lowest dimension represents the power of each frequency in the current window,
1870// and adjacent windows are concatenated in the next dimension.
1871//
1872// To get a more intuitive and visual look at what this operation does, you can run
1873// tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the
1874// resulting spectrogram as a PNG image.
1875//
1876// Arguments:
1877//
1878//	input: Float representation of audio data.
1879//	window_size: How wide the input window is in samples. For the highest efficiency
1880//
1881// this should be a power of two, but other values are accepted.
1882//
1883//	stride: How widely apart the center of adjacent sample windows should be.
1884//
1885// Returns 3D representation of the audio frequencies as an image.
1886func AudioSpectrogram(scope *Scope, input tf.Output, window_size int64, stride int64, optional ...AudioSpectrogramAttr) (spectrogram tf.Output) {
1887	if scope.Err() != nil {
1888		return
1889	}
1890	attrs := map[string]interface{}{"window_size": window_size, "stride": stride}
1891	for _, a := range optional {
1892		a(attrs)
1893	}
1894	opspec := tf.OpSpec{
1895		Type: "AudioSpectrogram",
1896		Input: []tf.Input{
1897			input,
1898		},
1899		Attrs: attrs,
1900	}
1901	op := scope.AddOperation(opspec)
1902	return op.Output(0)
1903}
1904
1905// AudioSummaryAttr is an optional argument to AudioSummary.
1906type AudioSummaryAttr func(optionalAttr)
1907
1908// AudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
1909//
1910// value: Max number of batch elements to generate audio for.
1911// If not specified, defaults to 3
1912//
1913// REQUIRES: value >= 1
1914func AudioSummaryMaxOutputs(value int64) AudioSummaryAttr {
1915	return func(m optionalAttr) {
1916		m["max_outputs"] = value
1917	}
1918}
1919
1920// Outputs a `Summary` protocol buffer with audio.
1921//
1922// DEPRECATED at GraphDef version 15: Use AudioSummaryV2.
1923//
1924// The summary has up to `max_outputs` summary values containing audio. The
1925// audio is built from `tensor` which must be 3-D with shape `[batch_size,
1926// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
1927// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
1928//
1929// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
1930// build the `tag` of the summary values:
1931//
1932//   - If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
1933//   - If `max_outputs` is greater than 1, the summary value tags are
1934//     generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
1935//
1936// Arguments:
1937//
1938//	tag: Scalar. Used to build the `tag` attribute of the summary values.
1939//	tensor: 2-D of shape `[batch_size, frames]`.
1940//	sample_rate: The sample rate of the signal in hertz.
1941//
1942// Returns Scalar. Serialized `Summary` protocol buffer.
1943func AudioSummary(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate float32, optional ...AudioSummaryAttr) (summary tf.Output) {
1944	if scope.Err() != nil {
1945		return
1946	}
1947	attrs := map[string]interface{}{"sample_rate": sample_rate}
1948	for _, a := range optional {
1949		a(attrs)
1950	}
1951	opspec := tf.OpSpec{
1952		Type: "AudioSummary",
1953		Input: []tf.Input{
1954			tag, tensor,
1955		},
1956		Attrs: attrs,
1957	}
1958	op := scope.AddOperation(opspec)
1959	return op.Output(0)
1960}
1961
1962// AudioSummaryV2Attr is an optional argument to AudioSummaryV2.
1963type AudioSummaryV2Attr func(optionalAttr)
1964
1965// AudioSummaryV2MaxOutputs sets the optional max_outputs attribute to value.
1966//
1967// value: Max number of batch elements to generate audio for.
1968// If not specified, defaults to 3
1969//
1970// REQUIRES: value >= 1
1971func AudioSummaryV2MaxOutputs(value int64) AudioSummaryV2Attr {
1972	return func(m optionalAttr) {
1973		m["max_outputs"] = value
1974	}
1975}
1976
1977// Outputs a `Summary` protocol buffer with audio.
1978//
1979// The summary has up to `max_outputs` summary values containing audio. The
1980// audio is built from `tensor` which must be 3-D with shape `[batch_size,
1981// frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
1982// assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
1983//
1984// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
1985// build the `tag` of the summary values:
1986//
1987//   - If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
1988//   - If `max_outputs` is greater than 1, the summary value tags are
1989//     generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
1990//
1991// Arguments:
1992//
1993//	tag: Scalar. Used to build the `tag` attribute of the summary values.
1994//	tensor: 2-D of shape `[batch_size, frames]`.
1995//	sample_rate: The sample rate of the signal in hertz.
1996//
1997// Returns Scalar. Serialized `Summary` protocol buffer.
1998func AudioSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...AudioSummaryV2Attr) (summary tf.Output) {
1999	if scope.Err() != nil {
2000		return
2001	}
2002	attrs := map[string]interface{}{}
2003	for _, a := range optional {
2004		a(attrs)
2005	}
2006	opspec := tf.OpSpec{
2007		Type: "AudioSummaryV2",
2008		Input: []tf.Input{
2009			tag, tensor, sample_rate,
2010		},
2011		Attrs: attrs,
2012	}
2013	op := scope.AddOperation(opspec)
2014	return op.Output(0)
2015}
2016
2017// AutoShardDatasetAttr is an optional argument to AutoShardDataset.
2018type AutoShardDatasetAttr func(optionalAttr)
2019
2020// AutoShardDatasetAutoShardPolicy sets the optional auto_shard_policy attribute to value.
2021// If not specified, defaults to 0
2022func AutoShardDatasetAutoShardPolicy(value int64) AutoShardDatasetAttr {
2023	return func(m optionalAttr) {
2024		m["auto_shard_policy"] = value
2025	}
2026}
2027
2028// AutoShardDatasetNumReplicas sets the optional num_replicas attribute to value.
2029// If not specified, defaults to 0
2030func AutoShardDatasetNumReplicas(value int64) AutoShardDatasetAttr {
2031	return func(m optionalAttr) {
2032		m["num_replicas"] = value
2033	}
2034}
2035
2036// Creates a dataset that shards the input dataset.
2037//
2038// Creates a dataset that shards the input dataset by num_workers, returning a
2039// sharded dataset for the index-th worker. This attempts to automatically shard
2040// a dataset by examining the Dataset graph and inserting a shard op before the
2041// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
2042//
2043// This dataset will throw a NotFound error if we cannot shard the dataset
2044// automatically.
2045//
2046// Arguments:
2047//
2048//	input_dataset: A variant tensor representing the input dataset.
2049//	num_workers: A scalar representing the number of workers to distribute this dataset across.
2050//	index: A scalar representing the index of the current worker out of num_workers.
2051func AutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...AutoShardDatasetAttr) (handle tf.Output) {
2052	if scope.Err() != nil {
2053		return
2054	}
2055	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
2056	for _, a := range optional {
2057		a(attrs)
2058	}
2059	opspec := tf.OpSpec{
2060		Type: "AutoShardDataset",
2061		Input: []tf.Input{
2062			input_dataset, num_workers, index,
2063		},
2064		Attrs: attrs,
2065	}
2066	op := scope.AddOperation(opspec)
2067	return op.Output(0)
2068}
2069
2070// AvgPoolAttr is an optional argument to AvgPool.
2071type AvgPoolAttr func(optionalAttr)
2072
2073// AvgPoolDataFormat sets the optional data_format attribute to value.
2074//
2075// value: Specify the data format of the input and output data. With the
2076// default format "NHWC", the data is stored in the order of:
2077//
2078//	[batch, in_height, in_width, in_channels].
2079//
2080// Alternatively, the format could be "NCHW", the data storage order of:
2081//
2082//	[batch, in_channels, in_height, in_width].
2083//
2084// If not specified, defaults to "NHWC"
2085func AvgPoolDataFormat(value string) AvgPoolAttr {
2086	return func(m optionalAttr) {
2087		m["data_format"] = value
2088	}
2089}
2090
2091// Performs average pooling on the input.
2092//
2093// Each entry in `output` is the mean of the corresponding size `ksize`
2094// window in `value`.
2095//
2096// Arguments:
2097//
2098//	value: 4-D with shape `[batch, height, width, channels]`.
2099//	ksize: The size of the sliding window for each dimension of `value`.
2100//	strides: The stride of the sliding window for each dimension of `value`.
2101//	padding: The type of padding algorithm to use.
2102//
2103// Returns The average pooled output tensor.
2104func AvgPool(scope *Scope, value tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolAttr) (output tf.Output) {
2105	if scope.Err() != nil {
2106		return
2107	}
2108	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
2109	for _, a := range optional {
2110		a(attrs)
2111	}
2112	opspec := tf.OpSpec{
2113		Type: "AvgPool",
2114		Input: []tf.Input{
2115			value,
2116		},
2117		Attrs: attrs,
2118	}
2119	op := scope.AddOperation(opspec)
2120	return op.Output(0)
2121}
2122
2123// AvgPool3DAttr is an optional argument to AvgPool3D.
2124type AvgPool3DAttr func(optionalAttr)
2125
2126// AvgPool3DDataFormat sets the optional data_format attribute to value.
2127//
2128// value: The data format of the input and output data. With the
2129// default format "NDHWC", the data is stored in the order of:
2130//
2131//	[batch, in_depth, in_height, in_width, in_channels].
2132//
2133// Alternatively, the format could be "NCDHW", the data storage order is:
2134//
2135//	[batch, in_channels, in_depth, in_height, in_width].
2136//
2137// If not specified, defaults to "NDHWC"
2138func AvgPool3DDataFormat(value string) AvgPool3DAttr {
2139	return func(m optionalAttr) {
2140		m["data_format"] = value
2141	}
2142}
2143
2144// Performs 3D average pooling on the input.
2145//
2146// Each entry in `output` is the mean of the corresponding size `ksize` window in
2147// `value`.
2148//
2149// Arguments:
2150//
2151//	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
2152//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
2153//
2154// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
2155//
2156//	strides: 1-D tensor of length 5. The stride of the sliding window for each
2157//
2158// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
2159//
2160//	padding: The type of padding algorithm to use.
2161//
2162// Returns The average pooled output tensor.
2163func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) {
2164	if scope.Err() != nil {
2165		return
2166	}
2167	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
2168	for _, a := range optional {
2169		a(attrs)
2170	}
2171	opspec := tf.OpSpec{
2172		Type: "AvgPool3D",
2173		Input: []tf.Input{
2174			input,
2175		},
2176		Attrs: attrs,
2177	}
2178	op := scope.AddOperation(opspec)
2179	return op.Output(0)
2180}
2181
2182// AvgPool3DGradAttr is an optional argument to AvgPool3DGrad.
2183type AvgPool3DGradAttr func(optionalAttr)
2184
2185// AvgPool3DGradDataFormat sets the optional data_format attribute to value.
2186//
2187// value: The data format of the input and output data. With the
2188// default format "NDHWC", the data is stored in the order of:
2189//
2190//	[batch, in_depth, in_height, in_width, in_channels].
2191//
2192// Alternatively, the format could be "NCDHW", the data storage order is:
2193//
2194//	[batch, in_channels, in_depth, in_height, in_width].
2195//
2196// If not specified, defaults to "NDHWC"
2197func AvgPool3DGradDataFormat(value string) AvgPool3DGradAttr {
2198	return func(m optionalAttr) {
2199		m["data_format"] = value
2200	}
2201}
2202
2203// Computes gradients of average pooling function.
2204//
2205// Arguments:
2206//
2207//	orig_input_shape: The original input dimensions.
2208//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
2209//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
2210//
2211// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
2212//
2213//	strides: 1-D tensor of length 5. The stride of the sliding window for each
2214//
2215// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
2216//
2217//	padding: The type of padding algorithm to use.
2218//
2219// Returns The backprop for input.
2220func AvgPool3DGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DGradAttr) (output tf.Output) {
2221	if scope.Err() != nil {
2222		return
2223	}
2224	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
2225	for _, a := range optional {
2226		a(attrs)
2227	}
2228	opspec := tf.OpSpec{
2229		Type: "AvgPool3DGrad",
2230		Input: []tf.Input{
2231			orig_input_shape, grad,
2232		},
2233		Attrs: attrs,
2234	}
2235	op := scope.AddOperation(opspec)
2236	return op.Output(0)
2237}
2238
2239// AvgPoolGradAttr is an optional argument to AvgPoolGrad.
2240type AvgPoolGradAttr func(optionalAttr)
2241
2242// AvgPoolGradDataFormat sets the optional data_format attribute to value.
2243//
2244// value: Specify the data format of the input and output data. With the
2245// default format "NHWC", the data is stored in the order of:
2246//
2247//	[batch, in_height, in_width, in_channels].
2248//
2249// Alternatively, the format could be "NCHW", the data storage order of:
2250//
2251//	[batch, in_channels, in_height, in_width].
2252//
2253// If not specified, defaults to "NHWC"
2254func AvgPoolGradDataFormat(value string) AvgPoolGradAttr {
2255	return func(m optionalAttr) {
2256		m["data_format"] = value
2257	}
2258}
2259
2260// Computes gradients of the average pooling function.
2261//
2262// Arguments:
2263//
2264//	orig_input_shape: 1-D.  Shape of the original input to `avg_pool`.
2265//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
2266//
2267// the output of `avg_pool`.
2268//
2269//	ksize: The size of the sliding window for each dimension of the input.
2270//	strides: The stride of the sliding window for each dimension of the input.
2271//	padding: The type of padding algorithm to use.
2272//
2273// Returns 4-D.  Gradients w.r.t. the input of `avg_pool`.
2274func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output) {
2275	if scope.Err() != nil {
2276		return
2277	}
2278	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
2279	for _, a := range optional {
2280		a(attrs)
2281	}
2282	opspec := tf.OpSpec{
2283		Type: "AvgPoolGrad",
2284		Input: []tf.Input{
2285			orig_input_shape, grad,
2286		},
2287		Attrs: attrs,
2288	}
2289	op := scope.AddOperation(opspec)
2290	return op.Output(0)
2291}
2292
2293// BatchAttr is an optional argument to Batch.
2294type BatchAttr func(optionalAttr)
2295
2296// BatchMaxEnqueuedBatches sets the optional max_enqueued_batches attribute to value.
2297// If not specified, defaults to 10
2298func BatchMaxEnqueuedBatches(value int64) BatchAttr {
2299	return func(m optionalAttr) {
2300		m["max_enqueued_batches"] = value
2301	}
2302}
2303
2304// BatchAllowedBatchSizes sets the optional allowed_batch_sizes attribute to value.
2305// If not specified, defaults to {}
2306func BatchAllowedBatchSizes(value []int64) BatchAttr {
2307	return func(m optionalAttr) {
2308		m["allowed_batch_sizes"] = value
2309	}
2310}
2311
2312// BatchContainer sets the optional container attribute to value.
2313// If not specified, defaults to ""
2314func BatchContainer(value string) BatchAttr {
2315	return func(m optionalAttr) {
2316		m["container"] = value
2317	}
2318}
2319
2320// BatchSharedName sets the optional shared_name attribute to value.
2321// If not specified, defaults to ""
2322func BatchSharedName(value string) BatchAttr {
2323	return func(m optionalAttr) {
2324		m["shared_name"] = value
2325	}
2326}
2327
2328// BatchBatchingQueue sets the optional batching_queue attribute to value.
2329// If not specified, defaults to ""
2330func BatchBatchingQueue(value string) BatchAttr {
2331	return func(m optionalAttr) {
2332		m["batching_queue"] = value
2333	}
2334}
2335
2336// Batches all input tensors nondeterministically.
2337//
2338// When many instances of this Op are being run concurrently with the same
2339// container/shared_name in the same device, some will output zero-shaped Tensors
2340// and others will output Tensors of size up to max_batch_size.
2341//
2342// All Tensors in in_tensors are batched together (so, for example, labels and
2343// features should be batched with a single instance of this operation.
2344//
2345// Each invocation of batch emits an `id` scalar which will be used to identify
2346// this particular invocation when doing unbatch or its gradient.
2347//
2348// Each op which emits a non-empty batch will also emit a non-empty batch_index
2349// Tensor, which, is a [K, 3] matrix where each row contains the invocation's id,
2350// start, and length of elements of each set of Tensors present in batched_tensors.
2351//
2352// Batched tensors are concatenated along the first dimension, and all tensors in
2353// in_tensors must have the first dimension of the same size.
2354//
2355// in_tensors: The tensors to be batched.
2356// num_batch_threads: Number of scheduling threads for processing batches of work.
2357//
2358//	Determines the number of batches processed in parallel.
2359//
2360// max_batch_size: Batch sizes will never be bigger than this.
2361// batch_timeout_micros: Maximum number of microseconds to wait before outputting
2362//
2363//	an incomplete batch.
2364//
2365// allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does
2366//
2367//	nothing. Otherwise, supplies a list of batch sizes, causing the op to pad
2368//	batches up to one of those sizes. The entries must increase monotonically, and
2369//	the final entry must equal max_batch_size.
2370//
2371// grad_timeout_micros: The timeout to use for the gradient. See Unbatch.
2372// batched_tensors: Either empty tensors or a batch of concatenated Tensors.
2373// batch_index: If out_tensors is non-empty, has information to invert it.
2374// container: Controls the scope of sharing of this batch.
2375// id: always contains a scalar with a unique ID for this invocation of Batch.
2376// shared_name: Concurrently running instances of batch in the same device with the
2377//
2378//	same container and shared_name will batch their elements together. If left
2379//	empty, the op name will be used as the shared name.
2380//
2381// T: the types of tensors to be batched.
2382func Batch(scope *Scope, in_tensors []tf.Output, num_batch_threads int64, max_batch_size int64, batch_timeout_micros int64, grad_timeout_micros int64, optional ...BatchAttr) (batched_tensors []tf.Output, batch_index tf.Output, id tf.Output) {
2383	if scope.Err() != nil {
2384		return
2385	}
2386	attrs := map[string]interface{}{"num_batch_threads": num_batch_threads, "max_batch_size": max_batch_size, "batch_timeout_micros": batch_timeout_micros, "grad_timeout_micros": grad_timeout_micros}
2387	for _, a := range optional {
2388		a(attrs)
2389	}
2390	opspec := tf.OpSpec{
2391		Type: "Batch",
2392		Input: []tf.Input{
2393			tf.OutputList(in_tensors),
2394		},
2395		Attrs: attrs,
2396	}
2397	op := scope.AddOperation(opspec)
2398	if scope.Err() != nil {
2399		return
2400	}
2401	var idx int
2402	var err error
2403	if batched_tensors, idx, err = makeOutputList(op, idx, "batched_tensors"); err != nil {
2404		scope.UpdateErr("Batch", err)
2405		return
2406	}
2407	batch_index = op.Output(idx)
2408	id = op.Output(idx)
2409	return batched_tensors, batch_index, id
2410}
2411
2412// BatchDatasetAttr is an optional argument to BatchDataset.
2413type BatchDatasetAttr func(optionalAttr)
2414
2415// BatchDatasetMetadata sets the optional metadata attribute to value.
2416// If not specified, defaults to ""
2417func BatchDatasetMetadata(value string) BatchDatasetAttr {
2418	return func(m optionalAttr) {
2419		m["metadata"] = value
2420	}
2421}
2422
2423// Creates a dataset that batches `batch_size` elements from `input_dataset`.
2424//
2425// Arguments:
2426//
2427//	batch_size: A scalar representing the number of elements to accumulate in a
2428//
2429// batch.
2430func BatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...BatchDatasetAttr) (handle tf.Output) {
2431	if scope.Err() != nil {
2432		return
2433	}
2434	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
2435	for _, a := range optional {
2436		a(attrs)
2437	}
2438	opspec := tf.OpSpec{
2439		Type: "BatchDataset",
2440		Input: []tf.Input{
2441			input_dataset, batch_size,
2442		},
2443		Attrs: attrs,
2444	}
2445	op := scope.AddOperation(opspec)
2446	return op.Output(0)
2447}
2448
2449// BatchDatasetV2Attr is an optional argument to BatchDatasetV2.
2450type BatchDatasetV2Attr func(optionalAttr)
2451
2452// BatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value.
2453// If not specified, defaults to false
2454func BatchDatasetV2ParallelCopy(value bool) BatchDatasetV2Attr {
2455	return func(m optionalAttr) {
2456		m["parallel_copy"] = value
2457	}
2458}
2459
2460// BatchDatasetV2Metadata sets the optional metadata attribute to value.
2461// If not specified, defaults to ""
2462func BatchDatasetV2Metadata(value string) BatchDatasetV2Attr {
2463	return func(m optionalAttr) {
2464		m["metadata"] = value
2465	}
2466}
2467
2468// Creates a dataset that batches `batch_size` elements from `input_dataset`.
2469//
2470// Arguments:
2471//
2472//	batch_size: A scalar representing the number of elements to accumulate in a batch.
2473//	drop_remainder: A scalar representing whether the last batch should be dropped in case its size
2474//
2475// is smaller than desired.
2476func BatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...BatchDatasetV2Attr) (handle tf.Output) {
2477	if scope.Err() != nil {
2478		return
2479	}
2480	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
2481	for _, a := range optional {
2482		a(attrs)
2483	}
2484	opspec := tf.OpSpec{
2485		Type: "BatchDatasetV2",
2486		Input: []tf.Input{
2487			input_dataset, batch_size, drop_remainder,
2488		},
2489		Attrs: attrs,
2490	}
2491	op := scope.AddOperation(opspec)
2492	return op.Output(0)
2493}
2494
2495// BatchMatMulAttr is an optional argument to BatchMatMul.
2496type BatchMatMulAttr func(optionalAttr)
2497
2498// BatchMatMulAdjX sets the optional adj_x attribute to value.
2499//
2500// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
2501// If not specified, defaults to false
2502func BatchMatMulAdjX(value bool) BatchMatMulAttr {
2503	return func(m optionalAttr) {
2504		m["adj_x"] = value
2505	}
2506}
2507
2508// BatchMatMulAdjY sets the optional adj_y attribute to value.
2509//
2510// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
2511// If not specified, defaults to false
2512func BatchMatMulAdjY(value bool) BatchMatMulAttr {
2513	return func(m optionalAttr) {
2514		m["adj_y"] = value
2515	}
2516}
2517
2518// Multiplies slices of two tensors in batches.
2519//
2520// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
2521// viewed as an element of a batch), and arranges the individual results
2522// in a single output tensor of the same batch size. Each of the
2523// individual slices can optionally be adjointed (to adjoint a matrix
2524// means to transpose and conjugate it) before multiplication by setting
2525// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
2526//
2527// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
2528// and `[..., r_y, c_y]`.
2529//
2530// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
2531//
2532//	r_o = c_x if adj_x else r_x
2533//	c_o = r_y if adj_y else c_y
2534//
2535// It is computed as:
2536//
2537//	output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
2538//
2539// Arguments:
2540//
2541//	x: 2-D or higher with shape `[..., r_x, c_x]`.
2542//	y: 2-D or higher with shape `[..., r_y, c_y]`.
2543//
2544// Returns 3-D or higher with shape `[..., r_o, c_o]`
2545func BatchMatMul(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulAttr) (output tf.Output) {
2546	if scope.Err() != nil {
2547		return
2548	}
2549	attrs := map[string]interface{}{}
2550	for _, a := range optional {
2551		a(attrs)
2552	}
2553	opspec := tf.OpSpec{
2554		Type: "BatchMatMul",
2555		Input: []tf.Input{
2556			x, y,
2557		},
2558		Attrs: attrs,
2559	}
2560	op := scope.AddOperation(opspec)
2561	return op.Output(0)
2562}
2563
2564// BatchMatMulV2Attr is an optional argument to BatchMatMulV2.
2565type BatchMatMulV2Attr func(optionalAttr)
2566
2567// BatchMatMulV2AdjX sets the optional adj_x attribute to value.
2568//
2569// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
2570// If not specified, defaults to false
2571func BatchMatMulV2AdjX(value bool) BatchMatMulV2Attr {
2572	return func(m optionalAttr) {
2573		m["adj_x"] = value
2574	}
2575}
2576
2577// BatchMatMulV2AdjY sets the optional adj_y attribute to value.
2578//
2579// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
2580// If not specified, defaults to false
2581func BatchMatMulV2AdjY(value bool) BatchMatMulV2Attr {
2582	return func(m optionalAttr) {
2583		m["adj_y"] = value
2584	}
2585}
2586
2587// Multiplies slices of two tensors in batches.
2588//
2589// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
2590// viewed as an element of a batch), and arranges the individual results
2591// in a single output tensor of the same batch size. Each of the
2592// individual slices can optionally be adjointed (to adjoint a matrix
2593// means to transpose and conjugate it) before multiplication by setting
2594// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
2595//
2596// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
2597// and `[..., r_y, c_y]`.
2598//
2599// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
2600//
2601//	r_o = c_x if adj_x else r_x
2602//	c_o = r_y if adj_y else c_y
2603//
2604// It is computed as:
2605//
2606//	output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
2607//
2608// *NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More
2609// about broadcasting
2610// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
2611//
2612// Arguments:
2613//
2614//	x: 2-D or higher with shape `[..., r_x, c_x]`.
2615//	y: 2-D or higher with shape `[..., r_y, c_y]`.
2616//
2617// Returns 3-D or higher with shape `[..., r_o, c_o]`
2618func BatchMatMulV2(scope *Scope, x tf.Output, y tf.Output, optional ...BatchMatMulV2Attr) (output tf.Output) {
2619	if scope.Err() != nil {
2620		return
2621	}
2622	attrs := map[string]interface{}{}
2623	for _, a := range optional {
2624		a(attrs)
2625	}
2626	opspec := tf.OpSpec{
2627		Type: "BatchMatMulV2",
2628		Input: []tf.Input{
2629			x, y,
2630		},
2631		Attrs: attrs,
2632	}
2633	op := scope.AddOperation(opspec)
2634	return op.Output(0)
2635}
2636
2637// BatchMatMulV3Attr is an optional argument to BatchMatMulV3.
2638type BatchMatMulV3Attr func(optionalAttr)
2639
2640// BatchMatMulV3AdjX sets the optional adj_x attribute to value.
2641//
2642// value: If `True`, adjoint the slices of `x`. Defaults to `False`.
2643// If not specified, defaults to false
2644func BatchMatMulV3AdjX(value bool) BatchMatMulV3Attr {
2645	return func(m optionalAttr) {
2646		m["adj_x"] = value
2647	}
2648}
2649
2650// BatchMatMulV3AdjY sets the optional adj_y attribute to value.
2651//
2652// value: If `True`, adjoint the slices of `y`. Defaults to `False`.
2653// If not specified, defaults to false
2654func BatchMatMulV3AdjY(value bool) BatchMatMulV3Attr {
2655	return func(m optionalAttr) {
2656		m["adj_y"] = value
2657	}
2658}
2659
2660// Multiplies slices of two tensors in batches.
2661//
2662// Multiplies all slices of `Tensor` `x` and `y` (each slice can be
2663// viewed as an element of a batch), and arranges the individual results
2664// in a single output tensor of the same batch size. Each of the
2665// individual slices can optionally be adjointed (to adjoint a matrix
2666// means to transpose and conjugate it) before multiplication by setting
2667// the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
2668//
2669// The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
2670// and `[..., r_y, c_y]`.
2671//
2672// The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
2673//
2674//	r_o = c_x if adj_x else r_x
2675//	c_o = r_y if adj_y else c_y
2676//
2677// It is computed as:
2678//
2679//	output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
2680//
2681// *NOTE*: `BatchMatMulV3` supports broadcasting in the batch dimensions. More
2682// about broadcasting
2683// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
2684//
2685// Arguments:
2686//
2687//	x: 2-D or higher with shape `[..., r_x, c_x]`.
2688//	y: 2-D or higher with shape `[..., r_y, c_y]`.
2689//	Tout: If not spcified, Tout is the same type to input type.
2690//
2691// Returns 3-D or higher with shape `[..., r_o, c_o]`
2692func BatchMatMulV3(scope *Scope, x tf.Output, y tf.Output, Tout tf.DataType, optional ...BatchMatMulV3Attr) (output tf.Output) {
2693	if scope.Err() != nil {
2694		return
2695	}
2696	attrs := map[string]interface{}{"Tout": Tout}
2697	for _, a := range optional {
2698		a(attrs)
2699	}
2700	opspec := tf.OpSpec{
2701		Type: "BatchMatMulV3",
2702		Input: []tf.Input{
2703			x, y,
2704		},
2705		Attrs: attrs,
2706	}
2707	op := scope.AddOperation(opspec)
2708	return op.Output(0)
2709}
2710
2711// Batch normalization.
2712//
2713// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
2714//
2715// This op is deprecated. Prefer `tf.nn.batch_normalization`.
2716//
2717// Arguments:
2718//
2719//	t: A 4D input Tensor.
2720//	m: A 1D mean Tensor with size matching the last dimension of t.
2721//
2722// This is the first output from tf.nn.moments,
2723// or a saved moving average thereof.
2724//
2725//	v: A 1D variance Tensor with size matching the last dimension of t.
2726//
2727// This is the second output from tf.nn.moments,
2728// or a saved moving average thereof.
2729//
2730//	beta: A 1D beta Tensor with size matching the last dimension of t.
2731//
2732// An offset to be added to the normalized tensor.
2733//
2734//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
2735//
2736// If "scale_after_normalization" is true, this tensor will be multiplied
2737// with the normalized tensor.
2738//
2739//	variance_epsilon: A small float number to avoid dividing by 0.
2740//	scale_after_normalization: A bool indicating whether the resulted tensor
2741//
2742// needs to be multiplied with gamma.
2743func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output) {
2744	if scope.Err() != nil {
2745		return
2746	}
2747	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
2748	opspec := tf.OpSpec{
2749		Type: "BatchNormWithGlobalNormalization",
2750		Input: []tf.Input{
2751			t, m, v, beta, gamma,
2752		},
2753		Attrs: attrs,
2754	}
2755	op := scope.AddOperation(opspec)
2756	return op.Output(0)
2757}
2758
2759// Gradients for batch normalization.
2760//
2761// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization()
2762//
2763// This op is deprecated. See `tf.nn.batch_normalization`.
2764//
2765// Arguments:
2766//
2767//	t: A 4D input Tensor.
2768//	m: A 1D mean Tensor with size matching the last dimension of t.
2769//
2770// This is the first output from tf.nn.moments,
2771// or a saved moving average thereof.
2772//
2773//	v: A 1D variance Tensor with size matching the last dimension of t.
2774//
2775// This is the second output from tf.nn.moments,
2776// or a saved moving average thereof.
2777//
2778//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
2779//
2780// If "scale_after_normalization" is true, this Tensor will be multiplied
2781// with the normalized Tensor.
2782//
2783//	backprop: 4D backprop Tensor.
2784//	variance_epsilon: A small float number to avoid dividing by 0.
2785//	scale_after_normalization: A bool indicating whether the resulted tensor
2786//
2787// needs to be multiplied with gamma.
2788//
2789// Returns:
2790//
2791//	dx: 4D backprop tensor for input.
2792//	dm: 1D backprop tensor for mean.
2793//	dv: 1D backprop tensor for variance.
2794//	db: 1D backprop tensor for beta.
2795//	dg: 1D backprop tensor for gamma.
2796func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output) {
2797	if scope.Err() != nil {
2798		return
2799	}
2800	attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
2801	opspec := tf.OpSpec{
2802		Type: "BatchNormWithGlobalNormalizationGrad",
2803		Input: []tf.Input{
2804			t, m, v, gamma, backprop,
2805		},
2806		Attrs: attrs,
2807	}
2808	op := scope.AddOperation(opspec)
2809	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
2810}
2811
2812// BatchToSpace for 4-D tensors of type T.
2813//
2814// This is a legacy version of the more general BatchToSpaceND.
2815//
2816// Rearranges (permutes) data from batch into blocks of spatial data, followed by
2817// cropping. This is the reverse transformation of SpaceToBatch. More specifically,
2818// this op outputs a copy of the input tensor where values from the `batch`
2819// dimension are moved in spatial blocks to the `height` and `width` dimensions,
2820// followed by cropping along the `height` and `width` dimensions.
2821//
2822// Arguments:
2823//
2824//	input: 4-D tensor with shape
2825//
2826// `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
2827//
2828//	depth]`. Note that the batch size of the input tensor must be divisible by
2829//
2830// `block_size * block_size`.
2831//
2832//	crops: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
2833//
2834// how many elements to crop from the intermediate result across the spatial
2835// dimensions as follows:
2836//
2837//	crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
2838//
2839// Returns 4-D with shape `[batch, height, width, depth]`, where:
2840//
2841//	height = height_pad - crop_top - crop_bottom
2842//	width = width_pad - crop_left - crop_right
2843//
2844// The attr `block_size` must be greater than one. It indicates the block size.
2845//
2846// Some examples:
2847//
2848// (1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
2849//
2850// ```
2851// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
2852// ```
2853//
2854// The output tensor has shape `[1, 2, 2, 1]` and value:
2855//
2856// ```
2857// x = [[[[1], [2]], [[3], [4]]]]
2858// ```
2859//
2860// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
2861//
2862// ```
2863// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
2864// ```
2865//
2866// The output tensor has shape `[1, 2, 2, 3]` and value:
2867//
2868// ```
2869// x = [[[[1, 2, 3], [4, 5, 6]],
2870//
2871//	[[7, 8, 9], [10, 11, 12]]]]
2872//
2873// ```
2874//
2875// (3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
2876//
2877// ```
2878// x = [[[[1], [3]], [[9], [11]]],
2879//
2880//	[[[2], [4]], [[10], [12]]],
2881//	[[[5], [7]], [[13], [15]]],
2882//	[[[6], [8]], [[14], [16]]]]
2883//
2884// ```
2885//
2886// The output tensor has shape `[1, 4, 4, 1]` and value:
2887//
2888// ```
2889// x = [[[[1],   [2],  [3],  [4]],
2890//
2891//	[[5],   [6],  [7],  [8]],
2892//	[[9],  [10], [11],  [12]],
2893//	[[13], [14], [15],  [16]]]]
2894//
2895// ```
2896//
2897// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
2898//
2899// ```
2900// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
2901//
2902//	[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
2903//
2904// ```
2905//
2906// The output tensor has shape `[2, 2, 4, 1]` and value:
2907//
2908// ```
2909// x = [[[[1], [3]], [[5], [7]]],
2910//
2911//	[[[2], [4]], [[10], [12]]],
2912//	[[[5], [7]], [[13], [15]]],
2913//	[[[6], [8]], [[14], [16]]]]
2914//
2915// ```
2916func BatchToSpace(scope *Scope, input tf.Output, crops tf.Output, block_size int64) (output tf.Output) {
2917	if scope.Err() != nil {
2918		return
2919	}
2920	attrs := map[string]interface{}{"block_size": block_size}
2921	opspec := tf.OpSpec{
2922		Type: "BatchToSpace",
2923		Input: []tf.Input{
2924			input, crops,
2925		},
2926		Attrs: attrs,
2927	}
2928	op := scope.AddOperation(opspec)
2929	return op.Output(0)
2930}
2931
2932// BatchToSpace for N-D tensors of type T.
2933//
2934// This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
2935// `block_shape + [batch]`, interleaves these blocks back into the grid defined by
2936// the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
2937// the input.  The spatial dimensions of this intermediate result are then
2938// optionally cropped according to `crops` to produce the output.  This is the
2939// reverse of SpaceToBatch.  See below for a precise description.
2940//
2941// Arguments:
2942//
2943//	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
2944//
2945// where spatial_shape has M dimensions.
2946//
2947//		block_shape: 1-D with shape `[M]`, all values must be >= 1.
2948//		crops: 2-D with shape `[M, 2]`, all values must be >= 0.
2949//	  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
2950//	  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
2951//	  required that
2952//	  `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
2953//
2954// This operation is equivalent to the following steps:
2955//
2956//  1. Reshape `input` to `reshaped` of shape:
2957//     [block_shape[0], ..., block_shape[M-1],
2958//     batch / prod(block_shape),
2959//     input_shape[1], ..., input_shape[N-1]]
2960//
2961//  2. Permute dimensions of `reshaped` to produce `permuted` of shape
2962//     [batch / prod(block_shape),
2963//
2964//     input_shape[1], block_shape[0],
2965//     ...,
2966//     input_shape[M], block_shape[M-1],
2967//
2968//     input_shape[M+1], ..., input_shape[N-1]]
2969//
2970//  3. Reshape `permuted` to produce `reshaped_permuted` of shape
2971//     [batch / prod(block_shape),
2972//
2973//     input_shape[1] * block_shape[0],
2974//     ...,
2975//     input_shape[M] * block_shape[M-1],
2976//
2977//     input_shape[M+1],
2978//     ...,
2979//     input_shape[N-1]]
2980//
2981//  4. Crop the start and end of dimensions `[1, ..., M]` of
2982//     `reshaped_permuted` according to `crops` to produce the output of shape:
2983//     [batch / prod(block_shape),
2984//
2985//     input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
2986//     ...,
2987//     input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
2988//
2989//     input_shape[M+1], ..., input_shape[N-1]]
2990//
2991// Some examples:
2992//
2993// (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
2994//
2995//	`crops = [[0, 0], [0, 0]]`:
2996//
2997// ```
2998// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
2999// ```
3000//
3001// The output tensor has shape `[1, 2, 2, 1]` and value:
3002//
3003// ```
3004// x = [[[[1], [2]], [[3], [4]]]]
3005// ```
3006//
3007// (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
3008//
3009//	`crops = [[0, 0], [0, 0]]`:
3010//
3011// ```
3012// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
3013// ```
3014//
3015// The output tensor has shape `[1, 2, 2, 3]` and value:
3016//
3017// ```
3018// x = [[[[1, 2, 3], [4, 5, 6]],
3019//
3020//	[[7, 8, 9], [10, 11, 12]]]]
3021//
3022// ```
3023//
3024// (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
3025//
3026//	`crops = [[0, 0], [0, 0]]`:
3027//
3028// ```
3029// x = [[[[1], [3]], [[9], [11]]],
3030//
3031//	[[[2], [4]], [[10], [12]]],
3032//	[[[5], [7]], [[13], [15]]],
3033//	[[[6], [8]], [[14], [16]]]]
3034//
3035// ```
3036//
3037// The output tensor has shape `[1, 4, 4, 1]` and value:
3038//
3039// ```
3040// x = [[[[1],   [2],  [3],  [4]],
3041//
3042//	[[5],   [6],  [7],  [8]],
3043//	[[9],  [10], [11],  [12]],
3044//	[[13], [14], [15],  [16]]]]
3045//
3046// ```
3047//
3048// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
3049//
3050//	`crops = [[0, 0], [2, 0]]`:
3051//
3052// ```
3053// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
3054//
3055//	[[[0], [2], [4]]], [[[0], [10], [12]]],
3056//	[[[0], [5], [7]]], [[[0], [13], [15]]],
3057//	[[[0], [6], [8]]], [[[0], [14], [16]]]]
3058//
3059// ```
3060//
3061// The output tensor has shape `[2, 2, 4, 1]` and value:
3062//
3063// ```
3064// x = [[[[1],   [2],  [3],  [4]],
3065//
3066//	 [[5],   [6],  [7],  [8]]],
3067//	[[[9],  [10], [11],  [12]],
3068//	 [[13], [14], [15],  [16]]]]
3069//
3070// ```
3071func BatchToSpaceND(scope *Scope, input tf.Output, block_shape tf.Output, crops tf.Output) (output tf.Output) {
3072	if scope.Err() != nil {
3073		return
3074	}
3075	opspec := tf.OpSpec{
3076		Type: "BatchToSpaceND",
3077		Input: []tf.Input{
3078			input, block_shape, crops,
3079		},
3080	}
3081	op := scope.AddOperation(opspec)
3082	return op.Output(0)
3083}
3084
3085// Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
3086//
3087// The regularized incomplete beta integral is defined as:
3088//
3089// \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
3090//
3091// where
3092//
3093// \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
3094//
3095// is the incomplete beta function and \\(B(a, b)\\) is the *complete*
3096// beta function.
3097func Betainc(scope *Scope, a tf.Output, b tf.Output, x tf.Output) (z tf.Output) {
3098	if scope.Err() != nil {
3099		return
3100	}
3101	opspec := tf.OpSpec{
3102		Type: "Betainc",
3103		Input: []tf.Input{
3104			a, b, x,
3105		},
3106	}
3107	op := scope.AddOperation(opspec)
3108	return op.Output(0)
3109}
3110
3111// BiasAddAttr is an optional argument to BiasAdd.
3112type BiasAddAttr func(optionalAttr)
3113
3114// BiasAddDataFormat sets the optional data_format attribute to value.
3115//
3116// value: Specify the data format of the input and output data. With the
3117// default format "NHWC", the bias tensor will be added to the last dimension
3118// of the value tensor.
3119// Alternatively, the format could be "NCHW", the data storage order of:
3120//
3121//	[batch, in_channels, in_height, in_width].
3122//
3123// The tensor will be added to "in_channels", the third-to-the-last
3124//
3125//	dimension.
3126//
3127// If not specified, defaults to "NHWC"
3128func BiasAddDataFormat(value string) BiasAddAttr {
3129	return func(m optionalAttr) {
3130		m["data_format"] = value
3131	}
3132}
3133
3134// Adds `bias` to `value`.
3135//
3136// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
3137// Broadcasting is supported, so `value` may have any number of dimensions.
3138//
3139// Arguments:
3140//
3141//	value: Any number of dimensions.
3142//	bias: 1-D with size the last dimension of `value`.
3143//
3144// Returns Broadcasted sum of `value` and `bias`.
3145func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output) {
3146	if scope.Err() != nil {
3147		return
3148	}
3149	attrs := map[string]interface{}{}
3150	for _, a := range optional {
3151		a(attrs)
3152	}
3153	opspec := tf.OpSpec{
3154		Type: "BiasAdd",
3155		Input: []tf.Input{
3156			value, bias,
3157		},
3158		Attrs: attrs,
3159	}
3160	op := scope.AddOperation(opspec)
3161	return op.Output(0)
3162}
3163
3164// BiasAddGradAttr is an optional argument to BiasAddGrad.
3165type BiasAddGradAttr func(optionalAttr)
3166
3167// BiasAddGradDataFormat sets the optional data_format attribute to value.
3168//
3169// value: Specify the data format of the input and output data. With the
3170// default format "NHWC", the bias tensor will be added to the last dimension
3171// of the value tensor.
3172// Alternatively, the format could be "NCHW", the data storage order of:
3173//
3174//	[batch, in_channels, in_height, in_width].
3175//
3176// The tensor will be added to "in_channels", the third-to-the-last
3177//
3178//	dimension.
3179//
3180// If not specified, defaults to "NHWC"
3181func BiasAddGradDataFormat(value string) BiasAddGradAttr {
3182	return func(m optionalAttr) {
3183		m["data_format"] = value
3184	}
3185}
3186
3187// The backward operation for "BiasAdd" on the "bias" tensor.
3188//
3189// It accumulates all the values from out_backprop into the feature dimension.
3190// For NHWC data format, the feature dimension is the last. For NCHW data format,
3191// the feature dimension is the third-to-last.
3192//
3193// Arguments:
3194//
3195//	out_backprop: Any number of dimensions.
3196//
3197// Returns 1-D with size the feature dimension of `out_backprop`.
3198func BiasAddGrad(scope *Scope, out_backprop tf.Output, optional ...BiasAddGradAttr) (output tf.Output) {
3199	if scope.Err() != nil {
3200		return
3201	}
3202	attrs := map[string]interface{}{}
3203	for _, a := range optional {
3204		a(attrs)
3205	}
3206	opspec := tf.OpSpec{
3207		Type: "BiasAddGrad",
3208		Input: []tf.Input{
3209			out_backprop,
3210		},
3211		Attrs: attrs,
3212	}
3213	op := scope.AddOperation(opspec)
3214	return op.Output(0)
3215}
3216
3217// Adds `bias` to `value`.
3218//
3219// This is a deprecated version of BiasAdd and will be soon removed.
3220//
3221// This is a special case of `tf.add` where `bias` is restricted to be 1-D.
3222// Broadcasting is supported, so `value` may have any number of dimensions.
3223//
3224// Arguments:
3225//
3226//	value: Any number of dimensions.
3227//	bias: 1-D with size the last dimension of `value`.
3228//
3229// Returns Broadcasted sum of `value` and `bias`.
3230func BiasAddV1(scope *Scope, value tf.Output, bias tf.Output) (output tf.Output) {
3231	if scope.Err() != nil {
3232		return
3233	}
3234	opspec := tf.OpSpec{
3235		Type: "BiasAddV1",
3236		Input: []tf.Input{
3237			value, bias,
3238		},
3239	}
3240	op := scope.AddOperation(opspec)
3241	return op.Output(0)
3242}
3243
3244// Counts the number of occurrences of each value in an integer array.
3245//
3246// Outputs a vector with length `size` and the same dtype as `weights`. If
3247// `weights` are empty, then index `i` stores the number of times the value `i` is
3248// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
3249// the value in `weights` at each index where the corresponding value in `arr` is
3250// `i`.
3251//
3252// Values in `arr` outside of the range [0, size) are ignored.
3253//
3254// Arguments:
3255//
3256//	arr: int32 `Tensor`.
3257//	size: non-negative int32 scalar `Tensor`.
3258//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
3259//
3260// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
3261// equal to 1.
3262//
3263// Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for
3264// each value in the range [0, size).
3265func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output) {
3266	if scope.Err() != nil {
3267		return
3268	}
3269	opspec := tf.OpSpec{
3270		Type: "Bincount",
3271		Input: []tf.Input{
3272			arr, size, weights,
3273		},
3274	}
3275	op := scope.AddOperation(opspec)
3276	return op.Output(0)
3277}
3278
3279// Bitcasts a tensor from one type to another without copying data.
3280//
3281// Given a tensor `input`, this operation returns a tensor that has the same buffer
3282// data as `input` with datatype `type`.
3283//
3284// If the input datatype `T` is larger than the output datatype `type` then the
3285// shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
3286//
3287// If `T` is smaller than `type`, the operator requires that the rightmost
3288// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
3289// [..., sizeof(`type`)/sizeof(`T`)] to [...].
3290//
3291// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
3292// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
3293// gives module error.
3294// For example,
3295//
3296// Example 1:
3297//
3298// >>> a = [1., 2., 3.]
3299// >>> equality_bitcast = tf.bitcast(a, tf.complex128)
3300// Traceback (most recent call last):
3301// ...
3302// InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]
3303// >>> equality_cast = tf.cast(a, tf.complex128)
3304// >>> print(equality_cast)
3305// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
3306//
3307// Example 2:
3308//
3309// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
3310// <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
3311//
3312// Example 3:
3313//
3314// >>> x = [1., 2., 3.]
3315// >>> y = [0., 2., 3.]
3316// >>> equality= tf.equal(x,y)
3317// >>> equality_cast = tf.cast(equality,tf.float32)
3318// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
3319// >>> print(equality)
3320// tf.Tensor([False True True], shape=(3,), dtype=bool)
3321// >>> print(equality_cast)
3322// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)
3323// >>> print(equality_bitcast)
3324// tf.Tensor(
3325//
3326//	[[  0   0   0   0]
3327//	 [  0   0 128  63]
3328//	 [  0   0 128  63]], shape=(3, 4), dtype=uint8)
3329//
3330// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
3331// endian orderings will give different results.
3332func Bitcast(scope *Scope, input tf.Output, type_ tf.DataType) (output tf.Output) {
3333	if scope.Err() != nil {
3334		return
3335	}
3336	attrs := map[string]interface{}{"type": type_}
3337	opspec := tf.OpSpec{
3338		Type: "Bitcast",
3339		Input: []tf.Input{
3340			input,
3341		},
3342		Attrs: attrs,
3343	}
3344	op := scope.AddOperation(opspec)
3345	return op.Output(0)
3346}
3347
3348// Elementwise computes the bitwise AND of `x` and `y`.
3349//
3350// The result will have those bits set, that are set in both `x` and `y`. The
3351// computation is performed on the underlying representations of `x` and `y`.
3352//
3353// For example:
3354//
3355// ```python
3356// import tensorflow as tf
3357// from tensorflow.python.ops import bitwise_ops
3358// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
3359//
3360//	tf.uint8, tf.uint16, tf.uint32, tf.uint64]
3361//
3362// for dtype in dtype_list:
3363//
3364//	lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
3365//	rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
3366//	exp = tf.constant([0, 0, 3, 10], dtype=tf.float32)
3367//
3368//	res = bitwise_ops.bitwise_and(lhs, rhs)
3369//	tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
3370//
3371// ```
3372func BitwiseAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
3373	if scope.Err() != nil {
3374		return
3375	}
3376	opspec := tf.OpSpec{
3377		Type: "BitwiseAnd",
3378		Input: []tf.Input{
3379			x, y,
3380		},
3381	}
3382	op := scope.AddOperation(opspec)
3383	return op.Output(0)
3384}
3385
3386// Elementwise computes the bitwise OR of `x` and `y`.
3387//
3388// The result will have those bits set, that are set in `x`, `y` or both. The
3389// computation is performed on the underlying representations of `x` and `y`.
3390//
3391// For example:
3392//
3393// ```python
3394// import tensorflow as tf
3395// from tensorflow.python.ops import bitwise_ops
3396// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
3397//
3398//	tf.uint8, tf.uint16, tf.uint32, tf.uint64]
3399//
3400// for dtype in dtype_list:
3401//
3402//	lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
3403//	rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
3404//	exp = tf.constant([5, 5, 7, 15], dtype=tf.float32)
3405//
3406//	res = bitwise_ops.bitwise_or(lhs, rhs)
3407//	tf.assert_equal(tf.cast(res,  tf.float32), exp)  # TRUE
3408//
3409// ```
3410func BitwiseOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
3411	if scope.Err() != nil {
3412		return
3413	}
3414	opspec := tf.OpSpec{
3415		Type: "BitwiseOr",
3416		Input: []tf.Input{
3417			x, y,
3418		},
3419	}
3420	op := scope.AddOperation(opspec)
3421	return op.Output(0)
3422}
3423
3424// Elementwise computes the bitwise XOR of `x` and `y`.
3425//
3426// The result will have those bits set, that are different in `x` and `y`. The
3427// computation is performed on the underlying representations of `x` and `y`.
3428//
3429// For example:
3430//
3431// ```python
3432// import tensorflow as tf
3433// from tensorflow.python.ops import bitwise_ops
3434// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
3435//
3436//	tf.uint8, tf.uint16, tf.uint32, tf.uint64]
3437//
3438// for dtype in dtype_list:
3439//
3440//	lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
3441//	rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
3442//	exp = tf.constant([5, 5, 4, 5],  dtype=tf.float32)
3443//
3444//	res = bitwise_ops.bitwise_xor(lhs, rhs)
3445//	tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
3446//
3447// ```
3448func BitwiseXor(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
3449	if scope.Err() != nil {
3450		return
3451	}
3452	opspec := tf.OpSpec{
3453		Type: "BitwiseXor",
3454		Input: []tf.Input{
3455			x, y,
3456		},
3457	}
3458	op := scope.AddOperation(opspec)
3459	return op.Output(0)
3460}
3461
3462// BlockLSTMAttr is an optional argument to BlockLSTM.
3463type BlockLSTMAttr func(optionalAttr)
3464
3465// BlockLSTMForgetBias sets the optional forget_bias attribute to value.
3466//
3467// value: The forget gate bias.
3468// If not specified, defaults to 1
3469func BlockLSTMForgetBias(value float32) BlockLSTMAttr {
3470	return func(m optionalAttr) {
3471		m["forget_bias"] = value
3472	}
3473}
3474
3475// BlockLSTMCellClip sets the optional cell_clip attribute to value.
3476//
3477// value: Value to clip the 'cs' value to.
3478// If not specified, defaults to 3
3479func BlockLSTMCellClip(value float32) BlockLSTMAttr {
3480	return func(m optionalAttr) {
3481		m["cell_clip"] = value
3482	}
3483}
3484
3485// BlockLSTMUsePeephole sets the optional use_peephole attribute to value.
3486//
3487// value: Whether to use peephole weights.
3488// If not specified, defaults to false
3489func BlockLSTMUsePeephole(value bool) BlockLSTMAttr {
3490	return func(m optionalAttr) {
3491		m["use_peephole"] = value
3492	}
3493}
3494
3495// Computes the LSTM cell forward propagation for all the time steps.
3496//
3497// This is equivalent to applying LSTMBlockCell in a loop, like so:
3498//
3499// ```python
3500// for x1 in unpack(x):
3501//
3502//	i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
3503//	  x1, cs_prev, h_prev, w, wci, wcf, wco, b)
3504//	cs_prev = cs1
3505//	h_prev = h1
3506//	i.append(i1)
3507//	cs.append(cs1)
3508//	f.append(f1)
3509//	o.append(o1)
3510//	ci.append(ci1)
3511//	co.append(co1)
3512//	h.append(h1)
3513//
3514// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
3515// ```
3516//
3517// Arguments:
3518//
3519//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
3520//
3521// with zeros beyond this length.
3522//
3523//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
3524//	cs_prev: Value of the initial cell state.
3525//	h_prev: Initial output of cell (to be used for peephole).
3526//	w: The weight matrix.
3527//	wci: The weight matrix for input gate peephole connection.
3528//	wcf: The weight matrix for forget gate peephole connection.
3529//	wco: The weight matrix for output gate peephole connection.
3530//	b: The bias vector.
3531//
3532// Returns:
3533//
3534//	i: The input gate over the whole time sequence.
3535//	cs: The cell state before the tanh over the whole time sequence.
3536//	f: The forget gate over the whole time sequence.
3537//	o: The output gate over the whole time sequence.
3538//	ci: The cell input over the whole time sequence.
3539//	co: The cell after the tanh over the whole time sequence.
3540//	h: The output h vector over the whole time sequence.
3541func BlockLSTM(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMAttr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
3542	if scope.Err() != nil {
3543		return
3544	}
3545	attrs := map[string]interface{}{}
3546	for _, a := range optional {
3547		a(attrs)
3548	}
3549	opspec := tf.OpSpec{
3550		Type: "BlockLSTM",
3551		Input: []tf.Input{
3552			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
3553		},
3554		Attrs: attrs,
3555	}
3556	op := scope.AddOperation(opspec)
3557	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
3558}
3559
3560// Computes the LSTM cell backward propagation for the entire time sequence.
3561//
3562// This implementation is to be used in conjunction of LSTMBlock.
3563//
3564// Arguments:
3565//
3566//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
3567//
3568// with zeros beyond this length.
3569//
3570//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
3571//	cs_prev: Value of the initial cell state.
3572//	h_prev: Initial output of cell (to be used for peephole).
3573//	w: The weight matrix.
3574//	wci: The weight matrix for input gate peephole connection.
3575//	wcf: The weight matrix for forget gate peephole connection.
3576//	wco: The weight matrix for output gate peephole connection.
3577//	b: The bias vector.
3578//	i: The input gate over the whole time sequence.
3579//	cs: The cell state before the tanh over the whole time sequence.
3580//	f: The forget gate over the whole time sequence.
3581//	o: The output gate over the whole time sequence.
3582//	ci: The cell input over the whole time sequence.
3583//	co: The cell after the tanh over the whole time sequence.
3584//	h: The output h vector over the whole time sequence.
3585//	cs_grad: The current gradient of cs.
3586//	h_grad: The gradient of h vector.
3587//	use_peephole: Whether to use peephole weights.
3588//
3589// Returns:
3590//
3591//	x_grad: The gradient of x to be back-propped.
3592//	cs_prev_grad: The gradient of cs_prev to be back-propped.
3593//	h_prev_grad: The gradient of h_prev to be back-propped.
3594//	w_grad: The gradient for w to be back-propped.
3595//	wci_grad: The gradient for wci to be back-propped.
3596//	wcf_grad: The gradient for wcf to be back-propped.
3597//	wco_grad: The gradient for wco to be back-propped.
3598//	b_grad: The gradient for w to be back-propped.
3599func BlockLSTMGrad(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, w_grad tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output, b_grad tf.Output) {
3600	if scope.Err() != nil {
3601		return
3602	}
3603	attrs := map[string]interface{}{"use_peephole": use_peephole}
3604	opspec := tf.OpSpec{
3605		Type: "BlockLSTMGrad",
3606		Input: []tf.Input{
3607			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad,
3608		},
3609		Attrs: attrs,
3610	}
3611	op := scope.AddOperation(opspec)
3612	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6), op.Output(7)
3613}
3614
3615// Computes the LSTM cell backward propagation for the entire time sequence.
3616//
3617// This implementation is to be used in conjunction of BlockLSTMV2.
3618//
3619// Arguments:
3620//
3621//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
3622//
3623// with zeros beyond this length.
3624//
3625//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
3626//	cs_prev: Value of the initial cell state.
3627//	h_prev: Initial output of cell (to be used for peephole).
3628//	w: The weight matrix.
3629//	wci: The weight matrix for input gate peephole connection.
3630//	wcf: The weight matrix for forget gate peephole connection.
3631//	wco: The weight matrix for output gate peephole connection.
3632//	b: The bias vector.
3633//	i: The input gate over the whole time sequence.
3634//	cs: The cell state before the tanh over the whole time sequence.
3635//	f: The forget gate over the whole time sequence.
3636//	o: The output gate over the whole time sequence.
3637//	ci: The cell input over the whole time sequence.
3638//	co: The cell after the tanh over the whole time sequence.
3639//	h: The output h vector over the whole time sequence.
3640//	cs_grad: The current gradient of cs.
3641//	h_grad: The gradient of h vector.
3642//	use_peephole: Whether to use peephole weights.
3643//
3644// Returns:
3645//
3646//	x_grad: The gradient of x to be back-propped.
3647//	cs_prev_grad: The gradient of cs_prev to be back-propped.
3648//	h_prev_grad: The gradient of h_prev to be back-propped.
3649//	w_grad: The gradient for w to be back-propped.
3650//	wci_grad: The gradient for wci to be back-propped.
3651//	wcf_grad: The gradient for wcf to be back-propped.
3652//	wco_grad: The gradient for wco to be back-propped.
3653//	b_grad: The gradient for w to be back-propped.
3654func BlockLSTMGradV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (x_grad tf.Output, cs_prev_grad tf.Output, h_prev_grad tf.Output, w_grad tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output, b_grad tf.Output) {
3655	if scope.Err() != nil {
3656		return
3657	}
3658	attrs := map[string]interface{}{"use_peephole": use_peephole}
3659	opspec := tf.OpSpec{
3660		Type: "BlockLSTMGradV2",
3661		Input: []tf.Input{
3662			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, h, cs_grad, h_grad,
3663		},
3664		Attrs: attrs,
3665	}
3666	op := scope.AddOperation(opspec)
3667	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6), op.Output(7)
3668}
3669
3670// BlockLSTMV2Attr is an optional argument to BlockLSTMV2.
3671type BlockLSTMV2Attr func(optionalAttr)
3672
3673// BlockLSTMV2CellClip sets the optional cell_clip attribute to value.
3674//
3675// value: Value to clip the 'cs' value to.
3676// If not specified, defaults to 0
3677func BlockLSTMV2CellClip(value float32) BlockLSTMV2Attr {
3678	return func(m optionalAttr) {
3679		m["cell_clip"] = value
3680	}
3681}
3682
3683// BlockLSTMV2UsePeephole sets the optional use_peephole attribute to value.
3684//
3685// value: Whether to use peephole weights.
3686// If not specified, defaults to false
3687func BlockLSTMV2UsePeephole(value bool) BlockLSTMV2Attr {
3688	return func(m optionalAttr) {
3689		m["use_peephole"] = value
3690	}
3691}
3692
3693// Computes the LSTM cell forward propagation for all the time steps.
3694//
3695// This is equivalent to applying LSTMBlockCell in a loop, like so:
3696//
3697// ```python
3698// for x1 in unpack(x):
3699//
3700//	i1, cs1, f1, o1, ci1, co1, h1 = LSTMBlock(
3701//	  x1, cs_prev, h_prev, w, wci, wcf, wco, b)
3702//	cs_prev = cs1
3703//	h_prev = h1
3704//	i.append(i1)
3705//	cs.append(cs1)
3706//	f.append(f1)
3707//	o.append(o1)
3708//	ci.append(ci1)
3709//	co.append(co1)
3710//	h.append(h1)
3711//
3712// return pack(i), pack(cs), pack(f), pack(o), pack(ci), pack(ch), pack(h)
3713//
3714// Note that unlike LSTMBlockCell (and BlockLSTM) which uses ICFO gate layout,
3715// this op uses IFCO. So in order for the following snippet to be equivalent
3716// all gate-related outputs should be reordered.
3717// ```
3718//
3719// Arguments:
3720//
3721//	seq_len_max: Maximum time length actually used by this input. Outputs are padded
3722//
3723// with zeros beyond this length.
3724//
3725//	x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs).
3726//	cs_prev: Value of the initial cell state.
3727//	h_prev: Initial output of cell (to be used for peephole).
3728//	w: The weight matrix.
3729//	wci: The weight matrix for input gate peephole connection.
3730//	wcf: The weight matrix for forget gate peephole connection.
3731//	wco: The weight matrix for output gate peephole connection.
3732//	b: The bias vector.
3733//
3734// Returns:
3735//
3736//	i: The input gate over the whole time sequence.
3737//	cs: The cell state before the tanh over the whole time sequence.
3738//	f: The forget gate over the whole time sequence.
3739//	o: The output gate over the whole time sequence.
3740//	ci: The cell input over the whole time sequence.
3741//	co: The cell after the tanh over the whole time sequence.
3742//	h: The output h vector over the whole time sequence.
3743func BlockLSTMV2(scope *Scope, seq_len_max tf.Output, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...BlockLSTMV2Attr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
3744	if scope.Err() != nil {
3745		return
3746	}
3747	attrs := map[string]interface{}{}
3748	for _, a := range optional {
3749		a(attrs)
3750	}
3751	opspec := tf.OpSpec{
3752		Type: "BlockLSTMV2",
3753		Input: []tf.Input{
3754			seq_len_max, x, cs_prev, h_prev, w, wci, wcf, wco, b,
3755		},
3756		Attrs: attrs,
3757	}
3758	op := scope.AddOperation(opspec)
3759	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
3760}
3761
3762// Aggregates the summary of accumulated stats for the batch.
3763//
3764// The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket.
3765//
3766// Arguments:
3767//
3768//	node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
3769//	gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
3770//	hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
3771//	feature: int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]).
3772//	max_splits: int; the maximum number of splits possible in the whole tree.
3773//	num_buckets: int; equals to the maximum possible value of bucketized feature.
3774//
3775// Returns output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension])
3776// containing accumulated stats for each node, feature dimension and bucket.
3777func BoostedTreesAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, feature tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output) {
3778	if scope.Err() != nil {
3779		return
3780	}
3781	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
3782	opspec := tf.OpSpec{
3783		Type: "BoostedTreesAggregateStats",
3784		Input: []tf.Input{
3785			node_ids, gradients, hessians, feature,
3786		},
3787		Attrs: attrs,
3788	}
3789	op := scope.AddOperation(opspec)
3790	return op.Output(0)
3791}
3792
3793// Bucketize each feature based on bucket boundaries.
3794//
3795// An op that returns a list of float tensors, where each tensor represents the
3796// bucketized values for a single feature.
3797//
3798// Arguments:
3799//
3800//	float_values: float; List of Rank 1 Tensor each containing float values for a single feature.
3801//	bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a single
3802//
3803// feature.
3804//
3805// Returns int; List of Rank 1 Tensors each containing the bucketized values for a single feature.
3806func BoostedTreesBucketize(scope *Scope, float_values []tf.Output, bucket_boundaries []tf.Output) (buckets []tf.Output) {
3807	if scope.Err() != nil {
3808		return
3809	}
3810	opspec := tf.OpSpec{
3811		Type: "BoostedTreesBucketize",
3812		Input: []tf.Input{
3813			tf.OutputList(float_values), tf.OutputList(bucket_boundaries),
3814		},
3815	}
3816	op := scope.AddOperation(opspec)
3817	if scope.Err() != nil {
3818		return
3819	}
3820	var idx int
3821	var err error
3822	if buckets, idx, err = makeOutputList(op, idx, "buckets"); err != nil {
3823		scope.UpdateErr("BoostedTreesBucketize", err)
3824		return
3825	}
3826	return buckets
3827}
3828
3829// BoostedTreesCalculateBestFeatureSplitAttr is an optional argument to BoostedTreesCalculateBestFeatureSplit.
3830type BoostedTreesCalculateBestFeatureSplitAttr func(optionalAttr)
3831
3832// BoostedTreesCalculateBestFeatureSplitSplitType sets the optional split_type attribute to value.
3833//
3834// value: A string indicating if this Op should perform inequality split or equality split.
3835// If not specified, defaults to "inequality"
3836func BoostedTreesCalculateBestFeatureSplitSplitType(value string) BoostedTreesCalculateBestFeatureSplitAttr {
3837	return func(m optionalAttr) {
3838		m["split_type"] = value
3839	}
3840}
3841
3842// Calculates gains for each feature and returns the best possible split information for the feature.
3843//
3844// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
3845//
3846// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
3847//
3848// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
3849//
3850// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
3851//
3852// Arguments:
3853//
3854//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
3855//	stats_summary: A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
3856//
3857// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
3858//
3859//	l1: l1 regularization factor on leaf weights, per instance based.
3860//	l2: l2 regularization factor on leaf weights, per instance based.
3861//	tree_complexity: adjustment to the gain, per leaf based.
3862//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
3863//	logits_dimension: The dimension of logit, i.e., number of classes.
3864//
3865// Returns:
3866//
3867//	node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
3868//	gains: A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
3869//	feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.
3870//	thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
3871//	left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
3872//	right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
3873//	split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.
3874//
3875// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
3876func BoostedTreesCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64, optional ...BoostedTreesCalculateBestFeatureSplitAttr) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
3877	if scope.Err() != nil {
3878		return
3879	}
3880	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
3881	for _, a := range optional {
3882		a(attrs)
3883	}
3884	opspec := tf.OpSpec{
3885		Type: "BoostedTreesCalculateBestFeatureSplit",
3886		Input: []tf.Input{
3887			node_id_range, stats_summary, l1, l2, tree_complexity, min_node_weight,
3888		},
3889		Attrs: attrs,
3890	}
3891	op := scope.AddOperation(opspec)
3892	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
3893}
3894
3895// Calculates gains for each feature and returns the best possible split information for each node. However, if no split is found, then no split information is returned for that node.
3896//
3897// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
3898//
3899// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
3900//
3901// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
3902//
3903// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
3904//
3905// Arguments:
3906//
3907//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
3908//	stats_summaries_list: A list of Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
3909//
3910// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
3911//
3912//	split_types: A Rank 1 tensor indicating if this Op should perform inequality split or equality split per feature.
3913//	candidate_feature_ids: Rank 1 tensor with ids for each feature. This is the real id of the feature.
3914//	l1: l1 regularization factor on leaf weights, per instance based.
3915//	l2: l2 regularization factor on leaf weights, per instance based.
3916//	tree_complexity: adjustment to the gain, per leaf based.
3917//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
3918//	logits_dimension: The dimension of logit, i.e., number of classes.
3919//
3920// Returns:
3921//
3922//	node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
3923//	gains: A Rank 1 tensor indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
3924//	feature_ids: A Rank 1 tensors indicating the best feature id for each node. See above for details like shapes and sizes.
3925//	feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.
3926//	thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
3927//	left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
3928//	right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
3929//	split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.
3930//
3931// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
3932func BoostedTreesCalculateBestFeatureSplitV2(scope *Scope, node_id_range tf.Output, stats_summaries_list []tf.Output, split_types tf.Output, candidate_feature_ids tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64) (node_ids tf.Output, gains tf.Output, feature_ids tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
3933	if scope.Err() != nil {
3934		return
3935	}
3936	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
3937	opspec := tf.OpSpec{
3938		Type: "BoostedTreesCalculateBestFeatureSplitV2",
3939		Input: []tf.Input{
3940			node_id_range, tf.OutputList(stats_summaries_list), split_types, candidate_feature_ids, l1, l2, tree_complexity, min_node_weight,
3941		},
3942		Attrs: attrs,
3943	}
3944	op := scope.AddOperation(opspec)
3945	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6), op.Output(7)
3946}
3947
3948// Calculates gains for each feature and returns the best possible split information for the feature.
3949//
3950// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
3951//
3952// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
3953//
3954// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
3955//
3956// The length of output lists are all of the same length, `num_features`.
3957// The output shapes are compatible in a way that the first dimension of all tensors of all lists are the same and equal to the number of possible split nodes for each feature.
3958//
3959// Arguments:
3960//
3961//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
3962//	stats_summary_list: A list of Rank 3 tensor (#shape=[max_splits, bucket, 2]) for accumulated stats summary (gradient/hessian) per node per buckets for each feature. The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
3963//	l1: l1 regularization factor on leaf weights, per instance based.
3964//	l2: l2 regularization factor on leaf weights, per instance based.
3965//	tree_complexity: adjustment to the gain, per leaf based.
3966//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
3967//	max_splits: the number of nodes that can be split in the whole tree. Used as a dimension of output tensors.
3968//
3969// Returns:
3970//
3971//	node_ids_list: An output list of Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
3972//	gains_list: An output list of Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
3973//	thresholds_list: An output list of Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
3974//	left_node_contribs_list: A list of Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
3975//	right_node_contribs_list: A list of Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
3976func BoostedTreesCalculateBestGainsPerFeature(scope *Scope, node_id_range tf.Output, stats_summary_list []tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, max_splits int64) (node_ids_list []tf.Output, gains_list []tf.Output, thresholds_list []tf.Output, left_node_contribs_list []tf.Output, right_node_contribs_list []tf.Output) {
3977	if scope.Err() != nil {
3978		return
3979	}
3980	attrs := map[string]interface{}{"max_splits": max_splits}
3981	opspec := tf.OpSpec{
3982		Type: "BoostedTreesCalculateBestGainsPerFeature",
3983		Input: []tf.Input{
3984			node_id_range, tf.OutputList(stats_summary_list), l1, l2, tree_complexity, min_node_weight,
3985		},
3986		Attrs: attrs,
3987	}
3988	op := scope.AddOperation(opspec)
3989	if scope.Err() != nil {
3990		return
3991	}
3992	var idx int
3993	var err error
3994	if node_ids_list, idx, err = makeOutputList(op, idx, "node_ids_list"); err != nil {
3995		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
3996		return
3997	}
3998	if gains_list, idx, err = makeOutputList(op, idx, "gains_list"); err != nil {
3999		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
4000		return
4001	}
4002	if thresholds_list, idx, err = makeOutputList(op, idx, "thresholds_list"); err != nil {
4003		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
4004		return
4005	}
4006	if left_node_contribs_list, idx, err = makeOutputList(op, idx, "left_node_contribs_list"); err != nil {
4007		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
4008		return
4009	}
4010	if right_node_contribs_list, idx, err = makeOutputList(op, idx, "right_node_contribs_list"); err != nil {
4011		scope.UpdateErr("BoostedTreesCalculateBestGainsPerFeature", err)
4012		return
4013	}
4014	return node_ids_list, gains_list, thresholds_list, left_node_contribs_list, right_node_contribs_list
4015}
4016
4017// Calculates the prior from the training data (the bias) and fills in the first node with the logits' prior. Returns a boolean indicating whether to continue centering.
4018//
4019// Arguments:
4020//
4021//	tree_ensemble_handle: Handle to the tree ensemble.
4022//	mean_gradients: A tensor with shape=[logits_dimension] with mean of gradients for a first node.
4023//	mean_hessians: A tensor with shape=[logits_dimension] mean of hessians for a first node.
4024//	l1: l1 regularization factor on leaf weights, per instance based.
4025//	l2: l2 regularization factor on leaf weights, per instance based.
4026//
4027// Returns Bool, whether to continue bias centering.
4028func BoostedTreesCenterBias(scope *Scope, tree_ensemble_handle tf.Output, mean_gradients tf.Output, mean_hessians tf.Output, l1 tf.Output, l2 tf.Output) (continue_centering tf.Output) {
4029	if scope.Err() != nil {
4030		return
4031	}
4032	opspec := tf.OpSpec{
4033		Type: "BoostedTreesCenterBias",
4034		Input: []tf.Input{
4035			tree_ensemble_handle, mean_gradients, mean_hessians, l1, l2,
4036		},
4037	}
4038	op := scope.AddOperation(opspec)
4039	return op.Output(0)
4040}
4041
4042// Creates a tree ensemble model and returns a handle to it.
4043//
4044// Arguments:
4045//
4046//	tree_ensemble_handle: Handle to the tree ensemble resource to be created.
4047//	stamp_token: Token to use as the initial value of the resource stamp.
4048//	tree_ensemble_serialized: Serialized proto of the tree ensemble.
4049//
4050// Returns the created operation.
4051func BoostedTreesCreateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation) {
4052	if scope.Err() != nil {
4053		return
4054	}
4055	opspec := tf.OpSpec{
4056		Type: "BoostedTreesCreateEnsemble",
4057		Input: []tf.Input{
4058			tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
4059		},
4060	}
4061	return scope.AddOperation(opspec)
4062}
4063
4064// BoostedTreesCreateQuantileStreamResourceAttr is an optional argument to BoostedTreesCreateQuantileStreamResource.
4065type BoostedTreesCreateQuantileStreamResourceAttr func(optionalAttr)
4066
4067// BoostedTreesCreateQuantileStreamResourceMaxElements sets the optional max_elements attribute to value.
4068//
4069// value: int; The maximum number of data points that can be fed to the stream.
4070// If not specified, defaults to 1099511627776
4071func BoostedTreesCreateQuantileStreamResourceMaxElements(value int64) BoostedTreesCreateQuantileStreamResourceAttr {
4072	return func(m optionalAttr) {
4073		m["max_elements"] = value
4074	}
4075}
4076
4077// Create the Resource for Quantile Streams.
4078//
4079// Arguments:
4080//
4081//	quantile_stream_resource_handle: resource; Handle to quantile stream resource.
4082//	epsilon: float; The required approximation error of the stream resource.
4083//	num_streams: int; The number of streams managed by the resource that shares the same epsilon.
4084//
4085// Returns the created operation.
4086func BoostedTreesCreateQuantileStreamResource(scope *Scope, quantile_stream_resource_handle tf.Output, epsilon tf.Output, num_streams tf.Output, optional ...BoostedTreesCreateQuantileStreamResourceAttr) (o *tf.Operation) {
4087	if scope.Err() != nil {
4088		return
4089	}
4090	attrs := map[string]interface{}{}
4091	for _, a := range optional {
4092		a(attrs)
4093	}
4094	opspec := tf.OpSpec{
4095		Type: "BoostedTreesCreateQuantileStreamResource",
4096		Input: []tf.Input{
4097			quantile_stream_resource_handle, epsilon, num_streams,
4098		},
4099		Attrs: attrs,
4100	}
4101	return scope.AddOperation(opspec)
4102}
4103
4104// Deserializes a serialized tree ensemble config and replaces current tree
4105//
4106// ensemble.
4107//
4108// Arguments:
4109//
4110//	tree_ensemble_handle: Handle to the tree ensemble.
4111//	stamp_token: Token to use as the new value of the resource stamp.
4112//	tree_ensemble_serialized: Serialized proto of the ensemble.
4113//
4114// Returns the created operation.
4115func BoostedTreesDeserializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output, stamp_token tf.Output, tree_ensemble_serialized tf.Output) (o *tf.Operation) {
4116	if scope.Err() != nil {
4117		return
4118	}
4119	opspec := tf.OpSpec{
4120		Type: "BoostedTreesDeserializeEnsemble",
4121		Input: []tf.Input{
4122			tree_ensemble_handle, stamp_token, tree_ensemble_serialized,
4123		},
4124	}
4125	return scope.AddOperation(opspec)
4126}
4127
4128// BoostedTreesEnsembleResourceHandleOpAttr is an optional argument to BoostedTreesEnsembleResourceHandleOp.
4129type BoostedTreesEnsembleResourceHandleOpAttr func(optionalAttr)
4130
4131// BoostedTreesEnsembleResourceHandleOpContainer sets the optional container attribute to value.
4132// If not specified, defaults to ""
4133func BoostedTreesEnsembleResourceHandleOpContainer(value string) BoostedTreesEnsembleResourceHandleOpAttr {
4134	return func(m optionalAttr) {
4135		m["container"] = value
4136	}
4137}
4138
4139// BoostedTreesEnsembleResourceHandleOpSharedName sets the optional shared_name attribute to value.
4140// If not specified, defaults to ""
4141func BoostedTreesEnsembleResourceHandleOpSharedName(value string) BoostedTreesEnsembleResourceHandleOpAttr {
4142	return func(m optionalAttr) {
4143		m["shared_name"] = value
4144	}
4145}
4146
4147// Creates a handle to a BoostedTreesEnsembleResource
4148func BoostedTreesEnsembleResourceHandleOp(scope *Scope, optional ...BoostedTreesEnsembleResourceHandleOpAttr) (resource tf.Output) {
4149	if scope.Err() != nil {
4150		return
4151	}
4152	attrs := map[string]interface{}{}
4153	for _, a := range optional {
4154		a(attrs)
4155	}
4156	opspec := tf.OpSpec{
4157		Type: "BoostedTreesEnsembleResourceHandleOp",
4158
4159		Attrs: attrs,
4160	}
4161	op := scope.AddOperation(opspec)
4162	return op.Output(0)
4163}
4164
4165// Debugging/model interpretability outputs for each example.
4166//
4167// It traverses all the trees and computes debug metrics for individual examples,
4168// such as getting split feature ids and logits after each split along the decision
4169// path used to compute directional feature contributions.
4170//
4171// Arguments:
4172//
4173//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
4174//
4175// feature.
4176//
4177//	logits_dimension: scalar, dimension of the logits, to be used for constructing the protos in
4178//
4179// examples_debug_outputs_serialized.
4180//
4181// Returns Output rank 1 Tensor containing a proto serialized as a string for each example.
4182func BoostedTreesExampleDebugOutputs(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (examples_debug_outputs_serialized tf.Output) {
4183	if scope.Err() != nil {
4184		return
4185	}
4186	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
4187	opspec := tf.OpSpec{
4188		Type: "BoostedTreesExampleDebugOutputs",
4189		Input: []tf.Input{
4190			tree_ensemble_handle, tf.OutputList(bucketized_features),
4191		},
4192		Attrs: attrs,
4193	}
4194	op := scope.AddOperation(opspec)
4195	return op.Output(0)
4196}
4197
4198// Flush the quantile summaries from each quantile stream resource.
4199//
4200// An op that outputs a list of quantile summaries of a quantile stream resource.
4201// Each summary Tensor is rank 2, containing summaries (value, weight, min_rank,
4202// max_rank) for a single feature.
4203//
4204// Arguments:
4205//
4206//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
4207func BoostedTreesFlushQuantileSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (summaries []tf.Output) {
4208	if scope.Err() != nil {
4209		return
4210	}
4211	attrs := map[string]interface{}{"num_features": num_features}
4212	opspec := tf.OpSpec{
4213		Type: "BoostedTreesFlushQuantileSummaries",
4214		Input: []tf.Input{
4215			quantile_stream_resource_handle,
4216		},
4217		Attrs: attrs,
4218	}
4219	op := scope.AddOperation(opspec)
4220	if scope.Err() != nil {
4221		return
4222	}
4223	var idx int
4224	var err error
4225	if summaries, idx, err = makeOutputList(op, idx, "summaries"); err != nil {
4226		scope.UpdateErr("BoostedTreesFlushQuantileSummaries", err)
4227		return
4228	}
4229	return summaries
4230}
4231
4232// Retrieves the tree ensemble resource stamp token, number of trees and growing statistics.
4233//
4234// Arguments:
4235//
4236//	tree_ensemble_handle: Handle to the tree ensemble.
4237//
4238// Returns:
4239//
4240//	stamp_token: Stamp token of the tree ensemble resource.
4241//	num_trees: The number of trees in the tree ensemble resource.
4242//	num_finalized_trees: The number of trees that were finished successfully.
4243//	num_attempted_layers: The number of layers we attempted to build (but not necessarily succeeded).
4244//	last_layer_nodes_range: Rank size 2 tensor that contains start and end ids of the nodes in the latest
4245//
4246// layer.
4247func BoostedTreesGetEnsembleStates(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, num_trees tf.Output, num_finalized_trees tf.Output, num_attempted_layers tf.Output, last_layer_nodes_range tf.Output) {
4248	if scope.Err() != nil {
4249		return
4250	}
4251	opspec := tf.OpSpec{
4252		Type: "BoostedTreesGetEnsembleStates",
4253		Input: []tf.Input{
4254			tree_ensemble_handle,
4255		},
4256	}
4257	op := scope.AddOperation(opspec)
4258	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
4259}
4260
4261// Makes the summary of quantiles for the batch.
4262//
4263// An op that takes a list of tensors (one tensor per feature) and outputs the
4264// quantile summaries for each tensor.
4265//
4266// Arguments:
4267//
4268//	float_values: float; List of Rank 1 Tensors each containing values for a single feature.
4269//	example_weights: float; Rank 1 Tensor with weights per instance.
4270//	epsilon: float; The required maximum approximation error.
4271//
4272// Returns float; List of Rank 2 Tensors each containing the quantile summary
4273// (value, weight, min_rank, max_rank) of a single feature.
4274func BoostedTreesMakeQuantileSummaries(scope *Scope, float_values []tf.Output, example_weights tf.Output, epsilon tf.Output) (summaries []tf.Output) {
4275	if scope.Err() != nil {
4276		return
4277	}
4278	opspec := tf.OpSpec{
4279		Type: "BoostedTreesMakeQuantileSummaries",
4280		Input: []tf.Input{
4281			tf.OutputList(float_values), example_weights, epsilon,
4282		},
4283	}
4284	op := scope.AddOperation(opspec)
4285	if scope.Err() != nil {
4286		return
4287	}
4288	var idx int
4289	var err error
4290	if summaries, idx, err = makeOutputList(op, idx, "summaries"); err != nil {
4291		scope.UpdateErr("BoostedTreesMakeQuantileSummaries", err)
4292		return
4293	}
4294	return summaries
4295}
4296
4297// Makes the summary of accumulated stats for the batch.
4298//
4299// The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
4300//
4301// Arguments:
4302//
4303//	node_ids: int32 Rank 1 Tensor containing node ids, which each example falls into for the requested layer.
4304//	gradients: float32; Rank 2 Tensor (shape=[#examples, 1]) for gradients.
4305//	hessians: float32; Rank 2 Tensor (shape=[#examples, 1]) for hessians.
4306//	bucketized_features_list: int32 list of Rank 1 Tensors, each containing the bucketized feature (for each feature column).
4307//	max_splits: int; the maximum number of splits possible in the whole tree.
4308//	num_buckets: int; equals to the maximum possible value of bucketized feature.
4309//
4310// Returns output Rank 4 Tensor (shape=[#features, #splits, #buckets, 2]) containing accumulated stats put into the corresponding node and bucket. The first index of 4th dimension refers to gradients, and the second to hessians.
4311func BoostedTreesMakeStatsSummary(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, bucketized_features_list []tf.Output, max_splits int64, num_buckets int64) (stats_summary tf.Output) {
4312	if scope.Err() != nil {
4313		return
4314	}
4315	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
4316	opspec := tf.OpSpec{
4317		Type: "BoostedTreesMakeStatsSummary",
4318		Input: []tf.Input{
4319			node_ids, gradients, hessians, tf.OutputList(bucketized_features_list),
4320		},
4321		Attrs: attrs,
4322	}
4323	op := scope.AddOperation(opspec)
4324	return op.Output(0)
4325}
4326
4327// Runs multiple additive regression ensemble predictors on input instances and
4328//
4329// computes the logits. It is designed to be used during prediction.
4330// It traverses all the trees and calculates the final score for each instance.
4331//
4332// Arguments:
4333//
4334//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
4335//
4336// feature.
4337//
4338//	logits_dimension: scalar, dimension of the logits, to be used for partial logits
4339//
4340// shape.
4341//
4342// Returns Output rank 2 Tensor containing logits for each example.
4343func BoostedTreesPredict(scope *Scope, tree_ensemble_handle tf.Output, bucketized_features []tf.Output, logits_dimension int64) (logits tf.Output) {
4344	if scope.Err() != nil {
4345		return
4346	}
4347	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
4348	opspec := tf.OpSpec{
4349		Type: "BoostedTreesPredict",
4350		Input: []tf.Input{
4351			tree_ensemble_handle, tf.OutputList(bucketized_features),
4352		},
4353		Attrs: attrs,
4354	}
4355	op := scope.AddOperation(opspec)
4356	return op.Output(0)
4357}
4358
4359// Add the quantile summaries to each quantile stream resource.
4360//
4361// An op that adds a list of quantile summaries to a quantile stream resource. Each
4362// summary Tensor is rank 2, containing summaries (value, weight, min_rank, max_rank)
4363// for a single feature.
4364//
4365// Arguments:
4366//
4367//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
4368//	summaries: string; List of Rank 2 Tensor each containing the summaries for a single feature.
4369//
4370// Returns the created operation.
4371func BoostedTreesQuantileStreamResourceAddSummaries(scope *Scope, quantile_stream_resource_handle tf.Output, summaries []tf.Output) (o *tf.Operation) {
4372	if scope.Err() != nil {
4373		return
4374	}
4375	opspec := tf.OpSpec{
4376		Type: "BoostedTreesQuantileStreamResourceAddSummaries",
4377		Input: []tf.Input{
4378			quantile_stream_resource_handle, tf.OutputList(summaries),
4379		},
4380	}
4381	return scope.AddOperation(opspec)
4382}
4383
4384// Deserialize bucket boundaries and ready flag into current QuantileAccumulator.
4385//
4386// An op that deserializes bucket boundaries and are boundaries ready flag into current QuantileAccumulator.
4387//
4388// Arguments:
4389//
4390//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
4391//	bucket_boundaries: float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
4392//
4393// Returns the created operation.
4394func BoostedTreesQuantileStreamResourceDeserialize(scope *Scope, quantile_stream_resource_handle tf.Output, bucket_boundaries []tf.Output) (o *tf.Operation) {
4395	if scope.Err() != nil {
4396		return
4397	}
4398	opspec := tf.OpSpec{
4399		Type: "BoostedTreesQuantileStreamResourceDeserialize",
4400		Input: []tf.Input{
4401			quantile_stream_resource_handle, tf.OutputList(bucket_boundaries),
4402		},
4403	}
4404	return scope.AddOperation(opspec)
4405}
4406
4407// BoostedTreesQuantileStreamResourceFlushAttr is an optional argument to BoostedTreesQuantileStreamResourceFlush.
4408type BoostedTreesQuantileStreamResourceFlushAttr func(optionalAttr)
4409
4410// BoostedTreesQuantileStreamResourceFlushGenerateQuantiles sets the optional generate_quantiles attribute to value.
4411//
4412// value: bool; If True, the output will be the num_quantiles for each stream where the ith
4413// entry is the ith quantile of the input with an approximation error of epsilon.
4414// Duplicate values may be present.
4415// If False, the output will be the points in the histogram that we got which roughly
4416// translates to 1/epsilon boundaries and without any duplicates.
4417// Default to False.
4418// If not specified, defaults to false
4419func BoostedTreesQuantileStreamResourceFlushGenerateQuantiles(value bool) BoostedTreesQuantileStreamResourceFlushAttr {
4420	return func(m optionalAttr) {
4421		m["generate_quantiles"] = value
4422	}
4423}
4424
4425// Flush the summaries for a quantile stream resource.
4426//
4427// An op that flushes the summaries for a quantile stream resource.
4428//
4429// Arguments:
4430//
4431//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
4432//	num_buckets: int; approximate number of buckets unless using generate_quantiles.
4433//
4434// Returns the created operation.
4435func BoostedTreesQuantileStreamResourceFlush(scope *Scope, quantile_stream_resource_handle tf.Output, num_buckets tf.Output, optional ...BoostedTreesQuantileStreamResourceFlushAttr) (o *tf.Operation) {
4436	if scope.Err() != nil {
4437		return
4438	}
4439	attrs := map[string]interface{}{}
4440	for _, a := range optional {
4441		a(attrs)
4442	}
4443	opspec := tf.OpSpec{
4444		Type: "BoostedTreesQuantileStreamResourceFlush",
4445		Input: []tf.Input{
4446			quantile_stream_resource_handle, num_buckets,
4447		},
4448		Attrs: attrs,
4449	}
4450	return scope.AddOperation(opspec)
4451}
4452
4453// Generate the bucket boundaries for each feature based on accumulated summaries.
4454//
4455// An op that returns a list of float tensors for a quantile stream resource. Each
4456// tensor is Rank 1 containing bucket boundaries for a single feature.
4457//
4458// Arguments:
4459//
4460//	quantile_stream_resource_handle: resource handle referring to a QuantileStreamResource.
4461//	num_features: inferred int; number of features to get bucket boundaries for.
4462//
4463// Returns float; List of Rank 1 Tensors each containing the bucket boundaries for a feature.
4464func BoostedTreesQuantileStreamResourceGetBucketBoundaries(scope *Scope, quantile_stream_resource_handle tf.Output, num_features int64) (bucket_boundaries []tf.Output) {
4465	if scope.Err() != nil {
4466		return
4467	}
4468	attrs := map[string]interface{}{"num_features": num_features}
4469	opspec := tf.OpSpec{
4470		Type: "BoostedTreesQuantileStreamResourceGetBucketBoundaries",
4471		Input: []tf.Input{
4472			quantile_stream_resource_handle,
4473		},
4474		Attrs: attrs,
4475	}
4476	op := scope.AddOperation(opspec)
4477	if scope.Err() != nil {
4478		return
4479	}
4480	var idx int
4481	var err error
4482	if bucket_boundaries, idx, err = makeOutputList(op, idx, "bucket_boundaries"); err != nil {
4483		scope.UpdateErr("BoostedTreesQuantileStreamResourceGetBucketBoundaries", err)
4484		return
4485	}
4486	return bucket_boundaries
4487}
4488
4489// BoostedTreesQuantileStreamResourceHandleOpAttr is an optional argument to BoostedTreesQuantileStreamResourceHandleOp.
4490type BoostedTreesQuantileStreamResourceHandleOpAttr func(optionalAttr)
4491
4492// BoostedTreesQuantileStreamResourceHandleOpContainer sets the optional container attribute to value.
4493// If not specified, defaults to ""
4494func BoostedTreesQuantileStreamResourceHandleOpContainer(value string) BoostedTreesQuantileStreamResourceHandleOpAttr {
4495	return func(m optionalAttr) {
4496		m["container"] = value
4497	}
4498}
4499
4500// BoostedTreesQuantileStreamResourceHandleOpSharedName sets the optional shared_name attribute to value.
4501// If not specified, defaults to ""
4502func BoostedTreesQuantileStreamResourceHandleOpSharedName(value string) BoostedTreesQuantileStreamResourceHandleOpAttr {
4503	return func(m optionalAttr) {
4504		m["shared_name"] = value
4505	}
4506}
4507
4508// Creates a handle to a BoostedTreesQuantileStreamResource.
4509func BoostedTreesQuantileStreamResourceHandleOp(scope *Scope, optional ...BoostedTreesQuantileStreamResourceHandleOpAttr) (resource tf.Output) {
4510	if scope.Err() != nil {
4511		return
4512	}
4513	attrs := map[string]interface{}{}
4514	for _, a := range optional {
4515		a(attrs)
4516	}
4517	opspec := tf.OpSpec{
4518		Type: "BoostedTreesQuantileStreamResourceHandleOp",
4519
4520		Attrs: attrs,
4521	}
4522	op := scope.AddOperation(opspec)
4523	return op.Output(0)
4524}
4525
4526// Serializes the tree ensemble to a proto.
4527//
4528// Arguments:
4529//
4530//	tree_ensemble_handle: Handle to the tree ensemble.
4531//
4532// Returns:
4533//
4534//	stamp_token: Stamp token of the tree ensemble resource.
4535//	tree_ensemble_serialized: Serialized proto of the ensemble.
4536func BoostedTreesSerializeEnsemble(scope *Scope, tree_ensemble_handle tf.Output) (stamp_token tf.Output, tree_ensemble_serialized tf.Output) {
4537	if scope.Err() != nil {
4538		return
4539	}
4540	opspec := tf.OpSpec{
4541		Type: "BoostedTreesSerializeEnsemble",
4542		Input: []tf.Input{
4543			tree_ensemble_handle,
4544		},
4545	}
4546	op := scope.AddOperation(opspec)
4547	return op.Output(0), op.Output(1)
4548}
4549
4550// Aggregates the summary of accumulated stats for the batch.
4551//
4552// The summary stats contains gradients and hessians accumulated for each node, bucket and dimension id.
4553//
4554// Arguments:
4555//
4556//	node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
4557//	gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
4558//	hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
4559//	feature_indices: int32; Rank 2 indices of feature sparse Tensors (shape=[number of sparse entries, 2]).
4560//
4561// Number of sparse entries across all instances from the batch. The first value is
4562// the index of the instance, the second is dimension of the feature. The second axis
4563// can only have 2 values, i.e., the input dense version of Tensor can only be matrix.
4564//
4565//	feature_values: int32; Rank 1 values of feature sparse Tensors (shape=[number of sparse entries]).
4566//
4567// Number of sparse entries across all instances from the batch. The first value is
4568// the index of the instance, the second is dimension of the feature.
4569//
4570//	feature_shape: int32; Rank 1 dense shape of feature sparse Tensors (shape=[2]).
4571//
4572// The first axis can only have 2 values, [batch_size, feature_dimension].
4573//
4574//	max_splits: int; the maximum number of splits possible in the whole tree.
4575//	num_buckets: int; equals to the maximum possible value of bucketized feature + 1.
4576//
4577// Returns:
4578//
4579//	stats_summary_indices: int32; Rank 2 indices of summary sparse Tensors (shape=[number of non zero statistics, 4])
4580//
4581// The second axis can only be 4 including node id, feature dimension, bucket id, and statistics_dimension.
4582// statistics_dimension = logits_dimension + hessian_dimension.
4583//
4584//	stats_summary_values: output Rank 1 Tensor (shape=[number of non zero statistics])
4585//	stats_summary_shape: output Rank 1 Tensor (shape=[4])
4586//
4587// The tensor has following 4 values: [max_splits, feature_dimension, num_buckets, statistics_dimension],
4588// where statistics_dimension = gradient_dimension + hessian_dimension. gradient_dimension
4589// is the same as label_dimension, i.e., the output space. hessian_dimension can be the same
4590// as logits dimension when diagonal hessian is used, or label_dimension^2 when full
4591// hessian is used.
4592func BoostedTreesSparseAggregateStats(scope *Scope, node_ids tf.Output, gradients tf.Output, hessians tf.Output, feature_indices tf.Output, feature_values tf.Output, feature_shape tf.Output, max_splits int64, num_buckets int64) (stats_summary_indices tf.Output, stats_summary_values tf.Output, stats_summary_shape tf.Output) {
4593	if scope.Err() != nil {
4594		return
4595	}
4596	attrs := map[string]interface{}{"max_splits": max_splits, "num_buckets": num_buckets}
4597	opspec := tf.OpSpec{
4598		Type: "BoostedTreesSparseAggregateStats",
4599		Input: []tf.Input{
4600			node_ids, gradients, hessians, feature_indices, feature_values, feature_shape,
4601		},
4602		Attrs: attrs,
4603	}
4604	op := scope.AddOperation(opspec)
4605	return op.Output(0), op.Output(1), op.Output(2)
4606}
4607
4608// BoostedTreesSparseCalculateBestFeatureSplitAttr is an optional argument to BoostedTreesSparseCalculateBestFeatureSplit.
4609type BoostedTreesSparseCalculateBestFeatureSplitAttr func(optionalAttr)
4610
4611// BoostedTreesSparseCalculateBestFeatureSplitSplitType sets the optional split_type attribute to value.
4612//
4613// value: A string indicating if this Op should perform inequality split or equality split.
4614// If not specified, defaults to "inequality"
4615func BoostedTreesSparseCalculateBestFeatureSplitSplitType(value string) BoostedTreesSparseCalculateBestFeatureSplitAttr {
4616	return func(m optionalAttr) {
4617		m["split_type"] = value
4618	}
4619}
4620
4621// Calculates gains for each feature and returns the best possible split information for the feature.
4622//
4623// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
4624//
4625// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
4626//
4627// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
4628//
4629// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
4630//
4631// Arguments:
4632//
4633//	node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
4634//	stats_summary_indices: A Rank 2 int64 tensor of dense shape [N, 4] (N specifies the number of non-zero values) for accumulated stats summary (gradient/hessian) per node per bucket for each feature. The second dimension contains node id, feature dimension, bucket id, and stats dim.
4635//
4636// stats dim is the sum of logits dimension and hessian dimension, hessian dimension can either be logits dimension if diagonal hessian is used, or logits dimension^2 if full hessian is used.
4637//
4638//	stats_summary_values: A Rank 1 float tensor of dense shape [N] (N specifies the number of non-zero values), which supplies the values for each element in summary_indices.
4639//	stats_summary_shape: A Rank 1 float tensor of dense shape [4], which specifies the dense shape of the sparse tensor, which is [num tree nodes, feature dimensions, num buckets, stats dim].
4640//	l1: l1 regularization factor on leaf weights, per instance based.
4641//	l2: l2 regularization factor on leaf weights, per instance based.
4642//	tree_complexity: adjustment to the gain, per leaf based.
4643//	min_node_weight: minimum avg of hessians in a node before required for the node to be considered for splitting.
4644//	logits_dimension: The dimension of logit, i.e., number of classes.
4645//
4646// Returns:
4647//
4648//	node_ids: A Rank 1 tensor indicating possible node ids that can be split.
4649//	gains: A Rank 1 tensor indicating the best gains to split each node.
4650//	feature_dimensions: A Rank 1 tensor indicating the best feature dimension for each feature to split for each node.
4651//	thresholds: A Rank 1 tensor indicating the bucket id to compare with (as a threshold) for split in each node.
4652//	left_node_contribs: A Rank 2 tensor indicating the contribution of the left nodes when branching from parent nodes to the left direction by the given threshold for each feature.
4653//
4654// This value will be used to make the left node value by adding to the parent node value. Second dimension size is logits dimension.
4655//
4656//	right_node_contribs: A Rank 2 tensor, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
4657//	split_with_default_directions: A Rank 1 tensor indicating which direction to go if data is missing.
4658//
4659// Inequality with default left returns 0, inequality with default right returns 1, equality with default right returns 2.
4660func BoostedTreesSparseCalculateBestFeatureSplit(scope *Scope, node_id_range tf.Output, stats_summary_indices tf.Output, stats_summary_values tf.Output, stats_summary_shape tf.Output, l1 tf.Output, l2 tf.Output, tree_complexity tf.Output, min_node_weight tf.Output, logits_dimension int64, optional ...BoostedTreesSparseCalculateBestFeatureSplitAttr) (node_ids tf.Output, gains tf.Output, feature_dimensions tf.Output, thresholds tf.Output, left_node_contribs tf.Output, right_node_contribs tf.Output, split_with_default_directions tf.Output) {
4661	if scope.Err() != nil {
4662		return
4663	}
4664	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
4665	for _, a := range optional {
4666		a(attrs)
4667	}
4668	opspec := tf.OpSpec{
4669		Type: "BoostedTreesSparseCalculateBestFeatureSplit",
4670		Input: []tf.Input{
4671			node_id_range, stats_summary_indices, stats_summary_values, stats_summary_shape, l1, l2, tree_complexity, min_node_weight,
4672		},
4673		Attrs: attrs,
4674	}
4675	op := scope.AddOperation(opspec)
4676	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
4677}
4678
4679// Runs multiple additive regression ensemble predictors on input instances and
4680//
4681// computes the update to cached logits. It is designed to be used during training.
4682// It traverses the trees starting from cached tree id and cached node id and
4683// calculates the updates to be pushed to the cache.
4684//
4685// Arguments:
4686//
4687//	cached_tree_ids: Rank 1 Tensor containing cached tree ids which is the starting
4688//
4689// tree of prediction.
4690//
4691//	cached_node_ids: Rank 1 Tensor containing cached node id which is the starting
4692//
4693// node of prediction.
4694//
4695//	bucketized_features: A list of rank 1 Tensors containing bucket id for each
4696//
4697// feature.
4698//
4699//	logits_dimension: scalar, dimension of the logits, to be used for partial logits
4700//
4701// shape.
4702//
4703// Returns:
4704//
4705//	partial_logits: Rank 2 Tensor containing logits update (with respect to cached
4706//
4707// values stored) for each example.
4708//
4709//	tree_ids: Rank 1 Tensor containing new tree ids for each example.
4710//	node_ids: Rank 1 Tensor containing new node ids in the new tree_ids.
4711func BoostedTreesTrainingPredict(scope *Scope, tree_ensemble_handle tf.Output, cached_tree_ids tf.Output, cached_node_ids tf.Output, bucketized_features []tf.Output, logits_dimension int64) (partial_logits tf.Output, tree_ids tf.Output, node_ids tf.Output) {
4712	if scope.Err() != nil {
4713		return
4714	}
4715	attrs := map[string]interface{}{"logits_dimension": logits_dimension}
4716	opspec := tf.OpSpec{
4717		Type: "BoostedTreesTrainingPredict",
4718		Input: []tf.Input{
4719			tree_ensemble_handle, cached_tree_ids, cached_node_ids, tf.OutputList(bucketized_features),
4720		},
4721		Attrs: attrs,
4722	}
4723	op := scope.AddOperation(opspec)
4724	return op.Output(0), op.Output(1), op.Output(2)
4725}
4726
4727// Updates the tree ensemble by either adding a layer to the last tree being grown
4728//
4729// or by starting a new tree.
4730//
4731// Arguments:
4732//
4733//	tree_ensemble_handle: Handle to the ensemble variable.
4734//	feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
4735//
4736// the feature that will be used in the split.
4737//
4738//	node_ids: List of rank 1 tensors representing the nodes for which this feature
4739//
4740// has a split.
4741//
4742//	gains: List of rank 1 tensors representing the gains for each of the feature's
4743//
4744// split.
4745//
4746//	thresholds: List of rank 1 tensors representing the thesholds for each of the
4747//
4748// feature's split.
4749//
4750//	left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
4751//
4752// the feature's splits. Will be added to the previous node values to constitute
4753// the values of the left nodes.
4754//
4755//	right_node_contribs: List of rank 2 tensors with right leaf contribs for each
4756//
4757// of the feature's splits. Will be added to the previous node values to constitute
4758// the values of the right nodes.
4759//
4760//	max_depth: Max depth of the tree to build.
4761//	learning_rate: shrinkage const for each new tree.
4762//	pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
4763//
4764// Returns the created operation.
4765func BoostedTreesUpdateEnsemble(scope *Scope, tree_ensemble_handle tf.Output, feature_ids tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode int64) (o *tf.Operation) {
4766	if scope.Err() != nil {
4767		return
4768	}
4769	attrs := map[string]interface{}{"pruning_mode": pruning_mode}
4770	opspec := tf.OpSpec{
4771		Type: "BoostedTreesUpdateEnsemble",
4772		Input: []tf.Input{
4773			tree_ensemble_handle, feature_ids, tf.OutputList(node_ids), tf.OutputList(gains), tf.OutputList(thresholds), tf.OutputList(left_node_contribs), tf.OutputList(right_node_contribs), max_depth, learning_rate,
4774		},
4775		Attrs: attrs,
4776	}
4777	return scope.AddOperation(opspec)
4778}
4779
4780// BoostedTreesUpdateEnsembleV2Attr is an optional argument to BoostedTreesUpdateEnsembleV2.
4781type BoostedTreesUpdateEnsembleV2Attr func(optionalAttr)
4782
4783// BoostedTreesUpdateEnsembleV2LogitsDimension sets the optional logits_dimension attribute to value.
4784//
4785// value: scalar, dimension of the logits
4786// If not specified, defaults to 1
4787func BoostedTreesUpdateEnsembleV2LogitsDimension(value int64) BoostedTreesUpdateEnsembleV2Attr {
4788	return func(m optionalAttr) {
4789		m["logits_dimension"] = value
4790	}
4791}
4792
4793// Updates the tree ensemble by adding a layer to the last tree being grown
4794//
4795// or by starting a new tree.
4796//
4797// Arguments:
4798//
4799//	tree_ensemble_handle: Handle to the ensemble variable.
4800//	feature_ids: Rank 1 tensor with ids for each feature. This is the real id of
4801//
4802// the feature that will be used in the split.
4803//
4804//	dimension_ids: List of rank 1 tensors representing the dimension in each feature.
4805//	node_ids: List of rank 1 tensors representing the nodes for which this feature
4806//
4807// has a split.
4808//
4809//	gains: List of rank 1 tensors representing the gains for each of the feature's
4810//
4811// split.
4812//
4813//	thresholds: List of rank 1 tensors representing the thesholds for each of the
4814//
4815// feature's split.
4816//
4817//	left_node_contribs: List of rank 2 tensors with left leaf contribs for each of
4818//
4819// the feature's splits. Will be added to the previous node values to constitute
4820// the values of the left nodes.
4821//
4822//	right_node_contribs: List of rank 2 tensors with right leaf contribs for each
4823//
4824// of the feature's splits. Will be added to the previous node values to constitute
4825// the values of the right nodes.
4826//
4827//	split_types: List of rank 1 tensors representing the split type for each feature.
4828//	max_depth: Max depth of the tree to build.
4829//	learning_rate: shrinkage const for each new tree.
4830//	pruning_mode: 0-No pruning, 1-Pre-pruning, 2-Post-pruning.
4831//
4832// Returns the created operation.
4833func BoostedTreesUpdateEnsembleV2(scope *Scope, tree_ensemble_handle tf.Output, feature_ids []tf.Output, dimension_ids []tf.Output, node_ids []tf.Output, gains []tf.Output, thresholds []tf.Output, left_node_contribs []tf.Output, right_node_contribs []tf.Output, split_types []tf.Output, max_depth tf.Output, learning_rate tf.Output, pruning_mode tf.Output, optional ...BoostedTreesUpdateEnsembleV2Attr) (o *tf.Operation) {
4834	if scope.Err() != nil {
4835		return
4836	}
4837	attrs := map[string]interface{}{}
4838	for _, a := range optional {
4839		a(attrs)
4840	}
4841	opspec := tf.OpSpec{
4842		Type: "BoostedTreesUpdateEnsembleV2",
4843		Input: []tf.Input{
4844			tree_ensemble_handle, tf.OutputList(feature_ids), tf.OutputList(dimension_ids), tf.OutputList(node_ids), tf.OutputList(gains), tf.OutputList(thresholds), tf.OutputList(left_node_contribs), tf.OutputList(right_node_contribs), tf.OutputList(split_types), max_depth, learning_rate, pruning_mode,
4845		},
4846		Attrs: attrs,
4847	}
4848	return scope.AddOperation(opspec)
4849}
4850
4851// Return the shape of s0 op s1 with broadcast.
4852//
4853// Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
4854// broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
4855func BroadcastArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output) {
4856	if scope.Err() != nil {
4857		return
4858	}
4859	opspec := tf.OpSpec{
4860		Type: "BroadcastArgs",
4861		Input: []tf.Input{
4862			s0, s1,
4863		},
4864	}
4865	op := scope.AddOperation(opspec)
4866	return op.Output(0)
4867}
4868
4869// Return the reduction indices for computing gradients of s0 op s1 with broadcast.
4870//
4871// This is typically used by gradient computations for a broadcasting operation.
4872func BroadcastGradientArgs(scope *Scope, s0 tf.Output, s1 tf.Output) (r0 tf.Output, r1 tf.Output) {
4873	if scope.Err() != nil {
4874		return
4875	}
4876	opspec := tf.OpSpec{
4877		Type: "BroadcastGradientArgs",
4878		Input: []tf.Input{
4879			s0, s1,
4880		},
4881	}
4882	op := scope.AddOperation(opspec)
4883	return op.Output(0), op.Output(1)
4884}
4885
4886// Broadcast an array for a compatible shape.
4887//
4888// Broadcasting is the process of making arrays to have compatible shapes
4889// for arithmetic operations. Two shapes are compatible if for each
4890// dimension pair they are either equal or one of them is one.
4891//
4892// For example:
4893//
4894// >>> x = tf.constant([[1, 2, 3]])   # Shape (1, 3,)
4895// >>> y = tf.broadcast_to(x, [2, 3])
4896// >>> print(y)
4897// tf.Tensor(
4898//
4899//	[[1 2 3]
4900//	 [1 2 3]], shape=(2, 3), dtype=int32)
4901//
4902// In the above example, the input Tensor with the shape of `[1, 3]`
4903// is broadcasted to output Tensor with shape of `[2, 3]`.
4904//
4905// When broadcasting, if a tensor has fewer axes than necessary its shape is
4906// padded on the left with ones. So this gives the same result as the previous
4907// example:
4908//
4909// >>> x = tf.constant([1, 2, 3])   # Shape (3,)
4910// >>> y = tf.broadcast_to(x, [2, 3])
4911//
4912// When doing broadcasted operations such as multiplying a tensor
4913// by a scalar, broadcasting (usually) confers some time or space
4914// benefit, as the broadcasted tensor is never materialized.
4915//
4916// However, `broadcast_to` does not carry with it any such benefits.
4917// The newly-created tensor takes the full memory of the broadcasted
4918// shape. (In a graph context, `broadcast_to` might be fused to
4919// subsequent operation and then be optimized away, however.)
4920//
4921// Arguments:
4922//
4923//	input: A Tensor to broadcast.
4924//	shape: An 1-D `int` Tensor. The shape of the desired output.
4925//
4926// Returns A Tensor.
4927func BroadcastTo(scope *Scope, input tf.Output, shape tf.Output) (output tf.Output) {
4928	if scope.Err() != nil {
4929		return
4930	}
4931	opspec := tf.OpSpec{
4932		Type: "BroadcastTo",
4933		Input: []tf.Input{
4934			input, shape,
4935		},
4936	}
4937	op := scope.AddOperation(opspec)
4938	return op.Output(0)
4939}
4940
4941// Bucketizes 'input' based on 'boundaries'.
4942//
4943// For example, if the inputs are
4944//
4945//	boundaries = [0, 10, 100]
4946//	input = [[-5, 10000]
4947//	         [150,   10]
4948//	         [5,    100]]
4949//
4950// then the output will be
4951//
4952//	output = [[0, 3]
4953//	          [3, 2]
4954//	          [1, 3]]
4955//
4956// Arguments:
4957//
4958//	input: Any shape of Tensor contains with int or float type.
4959//	boundaries: A sorted list of floats gives the boundary of the buckets.
4960//
4961// Returns Same shape with 'input', each value of input replaced with bucket index.
4962//
4963// @compatibility(numpy)
4964// Equivalent to np.digitize.
4965// @end_compatibility
4966func Bucketize(scope *Scope, input tf.Output, boundaries []float32) (output tf.Output) {
4967	if scope.Err() != nil {
4968		return
4969	}
4970	attrs := map[string]interface{}{"boundaries": boundaries}
4971	opspec := tf.OpSpec{
4972		Type: "Bucketize",
4973		Input: []tf.Input{
4974			input,
4975		},
4976		Attrs: attrs,
4977	}
4978	op := scope.AddOperation(opspec)
4979	return op.Output(0)
4980}
4981
4982// Records the bytes size of each element of `input_dataset` in a StatsAggregator.
4983func BytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
4984	if scope.Err() != nil {
4985		return
4986	}
4987	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
4988	opspec := tf.OpSpec{
4989		Type: "BytesProducedStatsDataset",
4990		Input: []tf.Input{
4991			input_dataset, tag,
4992		},
4993		Attrs: attrs,
4994	}
4995	op := scope.AddOperation(opspec)
4996	return op.Output(0)
4997}
4998
4999// Reads out the CSR components at batch `index`.
5000//
5001// This op is meant only for debugging / testing, and its interface is not expected
5002// to be stable.
5003//
5004// Arguments:
5005//
5006//	csr_sparse_matrix: A batched CSRSparseMatrix.
5007//	index: The index in `csr_sparse_matrix`'s batch.
5008//
5009// Returns:
5010//
5011//	row_ptrs: An array containing CSR matrix row pointers.
5012//	col_inds: An array containing CSR matrix column indices.
5013//	values: An array containing CSR matrix nonzero values.
5014func CSRSparseMatrixComponents(scope *Scope, csr_sparse_matrix tf.Output, index tf.Output, type_ tf.DataType) (row_ptrs tf.Output, col_inds tf.Output, values tf.Output) {
5015	if scope.Err() != nil {
5016		return
5017	}
5018	attrs := map[string]interface{}{"type": type_}
5019	opspec := tf.OpSpec{
5020		Type: "CSRSparseMatrixComponents",
5021		Input: []tf.Input{
5022			csr_sparse_matrix, index,
5023		},
5024		Attrs: attrs,
5025	}
5026	op := scope.AddOperation(opspec)
5027	return op.Output(0), op.Output(1), op.Output(2)
5028}
5029
5030// Convert a (possibly batched) CSRSparseMatrix to dense.
5031//
5032// Arguments:
5033//
5034//	sparse_input: A batched CSRSparseMatrix.
5035//
5036// Returns A dense tensor.
5037func CSRSparseMatrixToDense(scope *Scope, sparse_input tf.Output, type_ tf.DataType) (dense_output tf.Output) {
5038	if scope.Err() != nil {
5039		return
5040	}
5041	attrs := map[string]interface{}{"type": type_}
5042	opspec := tf.OpSpec{
5043		Type: "CSRSparseMatrixToDense",
5044		Input: []tf.Input{
5045			sparse_input,
5046		},
5047		Attrs: attrs,
5048	}
5049	op := scope.AddOperation(opspec)
5050	return op.Output(0)
5051}
5052
5053// Converts a (possibly batched) CSRSparesMatrix to a SparseTensor.
5054//
5055// Arguments:
5056//
5057//	sparse_matrix: A (possibly batched) CSRSparseMatrix.
5058//
5059// Returns:
5060//
5061//	indices: SparseTensor indices.
5062//	values: SparseTensor values.
5063//	dense_shape: SparseTensor dense shape.
5064func CSRSparseMatrixToSparseTensor(scope *Scope, sparse_matrix tf.Output, type_ tf.DataType) (indices tf.Output, values tf.Output, dense_shape tf.Output) {
5065	if scope.Err() != nil {
5066		return
5067	}
5068	attrs := map[string]interface{}{"type": type_}
5069	opspec := tf.OpSpec{
5070		Type: "CSRSparseMatrixToSparseTensor",
5071		Input: []tf.Input{
5072			sparse_matrix,
5073		},
5074		Attrs: attrs,
5075	}
5076	op := scope.AddOperation(opspec)
5077	return op.Output(0), op.Output(1), op.Output(2)
5078}
5079
5080// CTCBeamSearchDecoderAttr is an optional argument to CTCBeamSearchDecoder.
5081type CTCBeamSearchDecoderAttr func(optionalAttr)
5082
5083// CTCBeamSearchDecoderMergeRepeated sets the optional merge_repeated attribute to value.
5084//
5085// value: If true, merge repeated classes in output.
5086// If not specified, defaults to true
5087func CTCBeamSearchDecoderMergeRepeated(value bool) CTCBeamSearchDecoderAttr {
5088	return func(m optionalAttr) {
5089		m["merge_repeated"] = value
5090	}
5091}
5092
5093// Performs beam search decoding on the logits given in input.
5094//
5095// A note about the attribute merge_repeated: For the beam search decoder,
5096// this means that if consecutive entries in a beam are the same, only
5097// the first of these is emitted.  That is, when the top path is "A B B B B",
5098// "A B" is returned if merge_repeated = True but "A B B B B" is
5099// returned if merge_repeated = False.
5100//
5101// Arguments:
5102//
5103//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
5104//	sequence_length: A vector containing sequence lengths, size `(batch)`.
5105//	beam_width: A scalar >= 0 (beam search beam width).
5106//	top_paths: A scalar >= 0, <= beam_width (controls output size).
5107//
5108// Returns:
5109//
5110//	decoded_indices: A list (length: top_paths) of indices matrices.  Matrix j,
5111//
5112// size `(total_decoded_outputs[j] x 2)`, has indices of a
5113// `SparseTensor<int64, 2>`.  The rows store: [batch, time].
5114//
5115//	decoded_values: A list (length: top_paths) of values vectors.  Vector j,
5116//
5117// size `(length total_decoded_outputs[j])`, has the values of a
5118// `SparseTensor<int64, 2>`.  The vector stores the decoded classes for beam j.
5119//
5120//	decoded_shape: A list (length: top_paths) of shape vector.  Vector j,
5121//
5122// size `(2)`, stores the shape of the decoded `SparseTensor[j]`.
5123// Its values are: `[batch_size, max_decoded_length[j]]`.
5124//
5125//	log_probability: A matrix, shaped: `(batch_size x top_paths)`.  The
5126//
5127// sequence log-probabilities.
5128func CTCBeamSearchDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, beam_width int64, top_paths int64, optional ...CTCBeamSearchDecoderAttr) (decoded_indices []tf.Output, decoded_values []tf.Output, decoded_shape []tf.Output, log_probability tf.Output) {
5129	if scope.Err() != nil {
5130		return
5131	}
5132	attrs := map[string]interface{}{"beam_width": beam_width, "top_paths": top_paths}
5133	for _, a := range optional {
5134		a(attrs)
5135	}
5136	opspec := tf.OpSpec{
5137		Type: "CTCBeamSearchDecoder",
5138		Input: []tf.Input{
5139			inputs, sequence_length,
5140		},
5141		Attrs: attrs,
5142	}
5143	op := scope.AddOperation(opspec)
5144	if scope.Err() != nil {
5145		return
5146	}
5147	var idx int
5148	var err error
5149	if decoded_indices, idx, err = makeOutputList(op, idx, "decoded_indices"); err != nil {
5150		scope.UpdateErr("CTCBeamSearchDecoder", err)
5151		return
5152	}
5153	if decoded_values, idx, err = makeOutputList(op, idx, "decoded_values"); err != nil {
5154		scope.UpdateErr("CTCBeamSearchDecoder", err)
5155		return
5156	}
5157	if decoded_shape, idx, err = makeOutputList(op, idx, "decoded_shape"); err != nil {
5158		scope.UpdateErr("CTCBeamSearchDecoder", err)
5159		return
5160	}
5161	log_probability = op.Output(idx)
5162	return decoded_indices, decoded_values, decoded_shape, log_probability
5163}
5164
5165// CTCGreedyDecoderAttr is an optional argument to CTCGreedyDecoder.
5166type CTCGreedyDecoderAttr func(optionalAttr)
5167
5168// CTCGreedyDecoderMergeRepeated sets the optional merge_repeated attribute to value.
5169//
5170// value: If True, merge repeated classes in output.
5171// If not specified, defaults to false
5172func CTCGreedyDecoderMergeRepeated(value bool) CTCGreedyDecoderAttr {
5173	return func(m optionalAttr) {
5174		m["merge_repeated"] = value
5175	}
5176}
5177
5178// CTCGreedyDecoderBlankIndex sets the optional blank_index attribute to value.
5179// If not specified, defaults to -1
5180func CTCGreedyDecoderBlankIndex(value int64) CTCGreedyDecoderAttr {
5181	return func(m optionalAttr) {
5182		m["blank_index"] = value
5183	}
5184}
5185
5186// Performs greedy decoding on the logits given in inputs.
5187//
5188// A note about the attribute merge_repeated: if enabled, when
5189// consecutive logits' maximum indices are the same, only the first of
5190// these is emitted.  Labeling the blank '*', the sequence "A B B * B B"
5191// becomes "A B B" if merge_repeated = True and "A B B B B" if
5192// merge_repeated = False.
5193//
5194// Regardless of the value of merge_repeated, if the maximum index of a given
5195// time and batch corresponds to the blank, index `(num_classes - 1)`, no new
5196// element is emitted.
5197//
5198// Arguments:
5199//
5200//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
5201//	sequence_length: A vector containing sequence lengths, size `(batch_size)`.
5202//
5203// Returns:
5204//
5205//	decoded_indices: Indices matrix, size `(total_decoded_outputs x 2)`,
5206//
5207// of a `SparseTensor<int64, 2>`.  The rows store: [batch, time].
5208//
5209//	decoded_values: Values vector, size: `(total_decoded_outputs)`,
5210//
5211// of a `SparseTensor<int64, 2>`.  The vector stores the decoded classes.
5212//
5213//	decoded_shape: Shape vector, size `(2)`, of the decoded SparseTensor.
5214//
5215// Values are: `[batch_size, max_decoded_length]`.
5216//
5217//	log_probability: Matrix, size `(batch_size x 1)`, containing sequence
5218//
5219// log-probabilities.
5220func CTCGreedyDecoder(scope *Scope, inputs tf.Output, sequence_length tf.Output, optional ...CTCGreedyDecoderAttr) (decoded_indices tf.Output, decoded_values tf.Output, decoded_shape tf.Output, log_probability tf.Output) {
5221	if scope.Err() != nil {
5222		return
5223	}
5224	attrs := map[string]interface{}{}
5225	for _, a := range optional {
5226		a(attrs)
5227	}
5228	opspec := tf.OpSpec{
5229		Type: "CTCGreedyDecoder",
5230		Input: []tf.Input{
5231			inputs, sequence_length,
5232		},
5233		Attrs: attrs,
5234	}
5235	op := scope.AddOperation(opspec)
5236	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
5237}
5238
5239// CTCLossAttr is an optional argument to CTCLoss.
5240type CTCLossAttr func(optionalAttr)
5241
5242// CTCLossPreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
5243//
5244// value: Scalar, if true then repeated labels are
5245// collapsed prior to the CTC calculation.
5246// If not specified, defaults to false
5247func CTCLossPreprocessCollapseRepeated(value bool) CTCLossAttr {
5248	return func(m optionalAttr) {
5249		m["preprocess_collapse_repeated"] = value
5250	}
5251}
5252
5253// CTCLossCtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
5254//
5255// value: Scalar.  If set to false, *during* CTC calculation
5256// repeated non-blank labels will not be merged and are interpreted as
5257// individual labels.  This is a simplified version of CTC.
5258// If not specified, defaults to true
5259func CTCLossCtcMergeRepeated(value bool) CTCLossAttr {
5260	return func(m optionalAttr) {
5261		m["ctc_merge_repeated"] = value
5262	}
5263}
5264
5265// CTCLossIgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
5266//
5267// value: Scalar. If set to true, during CTC
5268// calculation, items that have longer output sequences than input sequences
5269// are skipped: they don't contribute to the loss term and have zero-gradient.
5270// If not specified, defaults to false
5271func CTCLossIgnoreLongerOutputsThanInputs(value bool) CTCLossAttr {
5272	return func(m optionalAttr) {
5273		m["ignore_longer_outputs_than_inputs"] = value
5274	}
5275}
5276
5277// Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
5278//
5279// the gradient.  This class performs the softmax operation for you, so inputs
5280// should be e.g. linear projections of outputs by an LSTM.
5281//
5282// Arguments:
5283//
5284//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits.
5285//	labels_indices: The indices of a `SparseTensor<int32, 2>`.
5286//
5287// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
5288// `(batch b, time t)`.
5289//
5290//	labels_values: The values (labels) associated with the given batch and time.
5291//	sequence_length: A vector containing sequence lengths (batch).
5292//
5293// Returns:
5294//
5295//	loss: A vector (batch) containing log-probabilities.
5296//	gradient: The gradient of `loss`.  3-D, shape:
5297//
5298// `(max_time x batch_size x num_classes)`.
5299func CTCLoss(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossAttr) (loss tf.Output, gradient tf.Output) {
5300	if scope.Err() != nil {
5301		return
5302	}
5303	attrs := map[string]interface{}{}
5304	for _, a := range optional {
5305		a(attrs)
5306	}
5307	opspec := tf.OpSpec{
5308		Type: "CTCLoss",
5309		Input: []tf.Input{
5310			inputs, labels_indices, labels_values, sequence_length,
5311		},
5312		Attrs: attrs,
5313	}
5314	op := scope.AddOperation(opspec)
5315	return op.Output(0), op.Output(1)
5316}
5317
5318// CTCLossV2Attr is an optional argument to CTCLossV2.
5319type CTCLossV2Attr func(optionalAttr)
5320
5321// CTCLossV2PreprocessCollapseRepeated sets the optional preprocess_collapse_repeated attribute to value.
5322//
5323// value: Scalar, if true then repeated labels are
5324// collapsed prior to the CTC calculation.
5325// If not specified, defaults to false
5326func CTCLossV2PreprocessCollapseRepeated(value bool) CTCLossV2Attr {
5327	return func(m optionalAttr) {
5328		m["preprocess_collapse_repeated"] = value
5329	}
5330}
5331
5332// CTCLossV2CtcMergeRepeated sets the optional ctc_merge_repeated attribute to value.
5333//
5334// value: Scalar.  If set to false, *during* CTC calculation
5335// repeated non-blank labels will not be merged and are interpreted as
5336// individual labels.  This is a simplified version of CTC.
5337// If not specified, defaults to true
5338func CTCLossV2CtcMergeRepeated(value bool) CTCLossV2Attr {
5339	return func(m optionalAttr) {
5340		m["ctc_merge_repeated"] = value
5341	}
5342}
5343
5344// CTCLossV2IgnoreLongerOutputsThanInputs sets the optional ignore_longer_outputs_than_inputs attribute to value.
5345//
5346// value: Scalar. If set to true, during CTC
5347// calculation, items that have longer output sequences than input sequences
5348// are skipped: they don't contribute to the loss term and have zero-gradient.
5349// If not specified, defaults to false
5350func CTCLossV2IgnoreLongerOutputsThanInputs(value bool) CTCLossV2Attr {
5351	return func(m optionalAttr) {
5352		m["ignore_longer_outputs_than_inputs"] = value
5353	}
5354}
5355
5356// Calculates the CTC Loss (log probability) for each batch entry.  Also calculates
5357//
5358// the gradient.  This class performs the softmax operation for you, so inputs
5359// should be e.g. linear projections of outputs by an LSTM.
5360//
5361// Arguments:
5362//
5363//	inputs: 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. Default blank
5364//
5365// label is 0 rather num_classes - 1.
5366//
5367//	labels_indices: The indices of a `SparseTensor<int32, 2>`.
5368//
5369// `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for
5370// `(batch b, time t)`.
5371//
5372//	labels_values: The values (labels) associated with the given batch and time.
5373//	sequence_length: A vector containing sequence lengths (batch).
5374//
5375// Returns:
5376//
5377//	loss: A vector (batch) containing log-probabilities.
5378//	gradient: The gradient of `loss`.  3-D, shape:
5379//
5380// `(max_time x batch_size x num_classes)`.
5381func CTCLossV2(scope *Scope, inputs tf.Output, labels_indices tf.Output, labels_values tf.Output, sequence_length tf.Output, optional ...CTCLossV2Attr) (loss tf.Output, gradient tf.Output) {
5382	if scope.Err() != nil {
5383		return
5384	}
5385	attrs := map[string]interface{}{}
5386	for _, a := range optional {
5387		a(attrs)
5388	}
5389	opspec := tf.OpSpec{
5390		Type: "CTCLossV2",
5391		Input: []tf.Input{
5392			inputs, labels_indices, labels_values, sequence_length,
5393		},
5394		Attrs: attrs,
5395	}
5396	op := scope.AddOperation(opspec)
5397	return op.Output(0), op.Output(1)
5398}
5399
5400// CacheDatasetAttr is an optional argument to CacheDataset.
5401type CacheDatasetAttr func(optionalAttr)
5402
5403// CacheDatasetMetadata sets the optional metadata attribute to value.
5404// If not specified, defaults to ""
5405func CacheDatasetMetadata(value string) CacheDatasetAttr {
5406	return func(m optionalAttr) {
5407		m["metadata"] = value
5408	}
5409}
5410
5411// Creates a dataset that caches elements from `input_dataset`.
5412//
5413// A CacheDataset will iterate over the input_dataset, and store tensors. If the
5414// cache already exists, the cache will be used. If the cache is inappropriate
5415// (e.g. cannot be opened, contains tensors of the wrong shape / size), an error
5416// will the returned when used.
5417//
5418// Arguments:
5419//
5420//	filename: A path on the filesystem where we should cache the dataset. Note: this
5421//
5422// will be a directory.
5423func CacheDataset(scope *Scope, input_dataset tf.Output, filename tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...CacheDatasetAttr) (handle tf.Output) {
5424	if scope.Err() != nil {
5425		return
5426	}
5427	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
5428	for _, a := range optional {
5429		a(attrs)
5430	}
5431	opspec := tf.OpSpec{
5432		Type: "CacheDataset",
5433		Input: []tf.Input{
5434			input_dataset, filename,
5435		},
5436		Attrs: attrs,
5437	}
5438	op := scope.AddOperation(opspec)
5439	return op.Output(0)
5440}
5441
5442// CastAttr is an optional argument to Cast.
5443type CastAttr func(optionalAttr)
5444
5445// CastTruncate sets the optional Truncate attribute to value.
5446// If not specified, defaults to false
5447func CastTruncate(value bool) CastAttr {
5448	return func(m optionalAttr) {
5449		m["Truncate"] = value
5450	}
5451}
5452
5453// Cast x of type SrcT to y of DstT.
5454func Cast(scope *Scope, x tf.Output, DstT tf.DataType, optional ...CastAttr) (y tf.Output) {
5455	if scope.Err() != nil {
5456		return
5457	}
5458	attrs := map[string]interface{}{"DstT": DstT}
5459	for _, a := range optional {
5460		a(attrs)
5461	}
5462	opspec := tf.OpSpec{
5463		Type: "Cast",
5464		Input: []tf.Input{
5465			x,
5466		},
5467		Attrs: attrs,
5468	}
5469	op := scope.AddOperation(opspec)
5470	return op.Output(0)
5471}
5472
5473// Returns element-wise smallest integer not less than x.
5474func Ceil(scope *Scope, x tf.Output) (y tf.Output) {
5475	if scope.Err() != nil {
5476		return
5477	}
5478	opspec := tf.OpSpec{
5479		Type: "Ceil",
5480		Input: []tf.Input{
5481			x,
5482		},
5483	}
5484	op := scope.AddOperation(opspec)
5485	return op.Output(0)
5486}
5487
5488// Checks a tensor for NaN and Inf values.
5489//
5490// When run, reports an `InvalidArgument` error if `tensor` has any values
5491// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input
5492// tensor.
5493//
5494// Example usage:
5495//
5496// ``` python
5497// a = tf.Variable(1.0)
5498// tf.debugging.check_numerics(a, message=”)
5499//
5500// b = tf.Variable(np.nan)
5501// try:
5502//
5503//	tf.debugging.check_numerics(b, message='Checking b')
5504//
5505// except Exception as e:
5506//
5507//	assert "Checking b : Tensor had NaN values" in e.message
5508//
5509// c = tf.Variable(np.inf)
5510// try:
5511//
5512//	tf.debugging.check_numerics(c, message='Checking c')
5513//
5514// except Exception as e:
5515//
5516//	assert "Checking c : Tensor had Inf values" in e.message
5517//
5518// ```
5519//
5520// Arguments:
5521//
5522//	message: Prefix of the error message.
5523func CheckNumerics(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
5524	if scope.Err() != nil {
5525		return
5526	}
5527	attrs := map[string]interface{}{"message": message}
5528	opspec := tf.OpSpec{
5529		Type: "CheckNumerics",
5530		Input: []tf.Input{
5531			tensor,
5532		},
5533		Attrs: attrs,
5534	}
5535	op := scope.AddOperation(opspec)
5536	return op.Output(0)
5537}
5538
5539// Checks a tensor for NaN, -Inf and +Inf values.
5540//
5541// When run, reports an `InvalidArgument` error if `tensor` has any values
5542// that are not a number (NaN) or infinity (Inf). Otherwise, returns the input
5543// tensor. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf
5544// in the errors it throws.
5545//
5546// Arguments:
5547//
5548//	message: Prefix of the error message.
5549func CheckNumericsV2(scope *Scope, tensor tf.Output, message string) (output tf.Output) {
5550	if scope.Err() != nil {
5551		return
5552	}
5553	attrs := map[string]interface{}{"message": message}
5554	opspec := tf.OpSpec{
5555		Type: "CheckNumericsV2",
5556		Input: []tf.Input{
5557			tensor,
5558		},
5559		Attrs: attrs,
5560	}
5561	op := scope.AddOperation(opspec)
5562	return op.Output(0)
5563}
5564
5565// Computes the Cholesky decomposition of one or more square matrices.
5566//
5567// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
5568// form square matrices.
5569//
5570// The input has to be symmetric and positive definite. Only the lower-triangular
5571// part of the input will be used for this operation. The upper-triangular part
5572// will not be read.
5573//
5574// The output is a tensor of the same shape as the input
5575// containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
5576//
5577// **Note**: The gradient computation on GPU is faster for large matrices but
5578// not for large batch dimensions when the submatrices are small. In this
5579// case it might be faster to use the CPU.
5580//
5581// Arguments:
5582//
5583//	input: Shape is `[..., M, M]`.
5584//
5585// Returns Shape is `[..., M, M]`.
5586func Cholesky(scope *Scope, input tf.Output) (output tf.Output) {
5587	if scope.Err() != nil {
5588		return
5589	}
5590	opspec := tf.OpSpec{
5591		Type: "Cholesky",
5592		Input: []tf.Input{
5593			input,
5594		},
5595	}
5596	op := scope.AddOperation(opspec)
5597	return op.Output(0)
5598}
5599
5600// Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
5601//
5602// For an explanation see "Differentiation of the Cholesky algorithm" by
5603// Iain Murray http://arxiv.org/abs/1602.07527.
5604//
5605// Arguments:
5606//
5607//	l: Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
5608//
5609// Algorithm depends only on lower triangular part of the innermost matrices of
5610// this tensor.
5611//
5612//	grad: df/dl where f is some scalar function. Shape is `[..., M, M]`.
5613//
5614// Algorithm depends only on lower triangular part of the innermost matrices of
5615// this tensor.
5616//
5617// Returns Symmetrized version of df/dA . Shape is `[..., M, M]`
5618func CholeskyGrad(scope *Scope, l tf.Output, grad tf.Output) (output tf.Output) {
5619	if scope.Err() != nil {
5620		return
5621	}
5622	opspec := tf.OpSpec{
5623		Type: "CholeskyGrad",
5624		Input: []tf.Input{
5625			l, grad,
5626		},
5627	}
5628	op := scope.AddOperation(opspec)
5629	return op.Output(0)
5630}
5631
5632// Clips tensor values to a specified min and max.
5633//
5634// Given a tensor `t`, this operation returns a tensor of the same type and
5635// shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
5636// Any values less than `clip_value_min` are set to `clip_value_min`. Any values
5637// greater than `clip_value_max` are set to `clip_value_max`.
5638//
5639// Arguments:
5640//
5641//	t: A `Tensor`.
5642//	clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
5643//
5644// as `t`. The minimum value to clip by.
5645//
5646//	clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
5647//
5648// as `t`. The maximum value to clip by.
5649//
5650// Returns A clipped `Tensor` with the same shape as input 't'.
5651func ClipByValue(scope *Scope, t tf.Output, clip_value_min tf.Output, clip_value_max tf.Output) (output tf.Output) {
5652	if scope.Err() != nil {
5653		return
5654	}
5655	opspec := tf.OpSpec{
5656		Type: "ClipByValue",
5657		Input: []tf.Input{
5658			t, clip_value_min, clip_value_max,
5659		},
5660	}
5661	op := scope.AddOperation(opspec)
5662	return op.Output(0)
5663}
5664
5665// An op that merges the string-encoded memory config protos from all hosts.
5666//
5667// Arguments:
5668//
5669//	memory_configs: String-encoded memory config protos containing metadata about
5670//
5671// the memory allocations reserved for TPUEmbedding across all hosts.
5672func CollateTPUEmbeddingMemory(scope *Scope, memory_configs []tf.Output) (merged_memory_config tf.Output) {
5673	if scope.Err() != nil {
5674		return
5675	}
5676	opspec := tf.OpSpec{
5677		Type: "CollateTPUEmbeddingMemory",
5678		Input: []tf.Input{
5679			tf.OutputList(memory_configs),
5680		},
5681	}
5682	op := scope.AddOperation(opspec)
5683	return op.Output(0)
5684}
5685
5686// CollectiveAllToAllV3Attr is an optional argument to CollectiveAllToAllV3.
5687type CollectiveAllToAllV3Attr func(optionalAttr)
5688
5689// CollectiveAllToAllV3TimeoutSeconds sets the optional timeout_seconds attribute to value.
5690// If not specified, defaults to 0
5691func CollectiveAllToAllV3TimeoutSeconds(value float32) CollectiveAllToAllV3Attr {
5692	return func(m optionalAttr) {
5693		m["timeout_seconds"] = value
5694	}
5695}
5696
5697// Mutually exchanges multiple tensors of identical type and shape.
5698func CollectiveAllToAllV3(scope *Scope, input tf.Output, communicator tf.Output, group_assignment tf.Output, optional ...CollectiveAllToAllV3Attr) (data tf.Output) {
5699	if scope.Err() != nil {
5700		return
5701	}
5702	attrs := map[string]interface{}{}
5703	for _, a := range optional {
5704		a(attrs)
5705	}
5706	opspec := tf.OpSpec{
5707		Type: "CollectiveAllToAllV3",
5708		Input: []tf.Input{
5709			input, communicator, group_assignment,
5710		},
5711		Attrs: attrs,
5712	}
5713	op := scope.AddOperation(opspec)
5714	return op.Output(0)
5715}
5716
5717// Assign group keys based on group assignment.
5718func CollectiveAssignGroupV2(scope *Scope, group_assignment tf.Output, device_index tf.Output, base_key tf.Output) (group_size tf.Output, group_key tf.Output) {
5719	if scope.Err() != nil {
5720		return
5721	}
5722	opspec := tf.OpSpec{
5723		Type: "CollectiveAssignGroupV2",
5724		Input: []tf.Input{
5725			group_assignment, device_index, base_key,
5726		},
5727	}
5728	op := scope.AddOperation(opspec)
5729	return op.Output(0), op.Output(1)
5730}
5731
5732// CollectiveBcastRecvAttr is an optional argument to CollectiveBcastRecv.
5733type CollectiveBcastRecvAttr func(optionalAttr)
5734
5735// CollectiveBcastRecvCommunicationHint sets the optional communication_hint attribute to value.
5736// If not specified, defaults to "auto"
5737func CollectiveBcastRecvCommunicationHint(value string) CollectiveBcastRecvAttr {
5738	return func(m optionalAttr) {
5739		m["communication_hint"] = value
5740	}
5741}
5742
5743// CollectiveBcastRecvTimeoutSeconds sets the optional timeout_seconds attribute to value.
5744// If not specified, defaults to 0
5745func CollectiveBcastRecvTimeoutSeconds(value float32) CollectiveBcastRecvAttr {
5746	return func(m optionalAttr) {
5747		m["timeout_seconds"] = value
5748	}
5749}
5750
5751// Receives a tensor value broadcast from another device.
5752func CollectiveBcastRecv(scope *Scope, T tf.DataType, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveBcastRecvAttr) (data tf.Output) {
5753	if scope.Err() != nil {
5754		return
5755	}
5756	attrs := map[string]interface{}{"T": T, "group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
5757	for _, a := range optional {
5758		a(attrs)
5759	}
5760	opspec := tf.OpSpec{
5761		Type: "CollectiveBcastRecv",
5762
5763		Attrs: attrs,
5764	}
5765	op := scope.AddOperation(opspec)
5766	return op.Output(0)
5767}
5768
5769// CollectiveBcastRecvV2Attr is an optional argument to CollectiveBcastRecvV2.
5770type CollectiveBcastRecvV2Attr func(optionalAttr)
5771
5772// CollectiveBcastRecvV2CommunicationHint sets the optional communication_hint attribute to value.
5773// If not specified, defaults to "auto"
5774func CollectiveBcastRecvV2CommunicationHint(value string) CollectiveBcastRecvV2Attr {
5775	return func(m optionalAttr) {
5776		m["communication_hint"] = value
5777	}
5778}
5779
5780// CollectiveBcastRecvV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
5781// If not specified, defaults to 0
5782func CollectiveBcastRecvV2TimeoutSeconds(value float32) CollectiveBcastRecvV2Attr {
5783	return func(m optionalAttr) {
5784		m["timeout_seconds"] = value
5785	}
5786}
5787
5788// Receives a tensor value broadcast from another device.
5789func CollectiveBcastRecvV2(scope *Scope, group_size tf.Output, group_key tf.Output, instance_key tf.Output, shape tf.Output, T tf.DataType, optional ...CollectiveBcastRecvV2Attr) (data tf.Output) {
5790	if scope.Err() != nil {
5791		return
5792	}
5793	attrs := map[string]interface{}{"T": T}
5794	for _, a := range optional {
5795		a(attrs)
5796	}
5797	opspec := tf.OpSpec{
5798		Type: "CollectiveBcastRecvV2",
5799		Input: []tf.Input{
5800			group_size, group_key, instance_key, shape,
5801		},
5802		Attrs: attrs,
5803	}
5804	op := scope.AddOperation(opspec)
5805	return op.Output(0)
5806}
5807
5808// CollectiveBcastSendAttr is an optional argument to CollectiveBcastSend.
5809type CollectiveBcastSendAttr func(optionalAttr)
5810
5811// CollectiveBcastSendCommunicationHint sets the optional communication_hint attribute to value.
5812// If not specified, defaults to "auto"
5813func CollectiveBcastSendCommunicationHint(value string) CollectiveBcastSendAttr {
5814	return func(m optionalAttr) {
5815		m["communication_hint"] = value
5816	}
5817}
5818
5819// CollectiveBcastSendTimeoutSeconds sets the optional timeout_seconds attribute to value.
5820// If not specified, defaults to 0
5821func CollectiveBcastSendTimeoutSeconds(value float32) CollectiveBcastSendAttr {
5822	return func(m optionalAttr) {
5823		m["timeout_seconds"] = value
5824	}
5825}
5826
5827// Broadcasts a tensor value to one or more other devices.
5828func CollectiveBcastSend(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveBcastSendAttr) (data tf.Output) {
5829	if scope.Err() != nil {
5830		return
5831	}
5832	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
5833	for _, a := range optional {
5834		a(attrs)
5835	}
5836	opspec := tf.OpSpec{
5837		Type: "CollectiveBcastSend",
5838		Input: []tf.Input{
5839			input,
5840		},
5841		Attrs: attrs,
5842	}
5843	op := scope.AddOperation(opspec)
5844	return op.Output(0)
5845}
5846
5847// CollectiveBcastSendV2Attr is an optional argument to CollectiveBcastSendV2.
5848type CollectiveBcastSendV2Attr func(optionalAttr)
5849
5850// CollectiveBcastSendV2CommunicationHint sets the optional communication_hint attribute to value.
5851// If not specified, defaults to "auto"
5852func CollectiveBcastSendV2CommunicationHint(value string) CollectiveBcastSendV2Attr {
5853	return func(m optionalAttr) {
5854		m["communication_hint"] = value
5855	}
5856}
5857
5858// CollectiveBcastSendV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
5859// If not specified, defaults to 0
5860func CollectiveBcastSendV2TimeoutSeconds(value float32) CollectiveBcastSendV2Attr {
5861	return func(m optionalAttr) {
5862		m["timeout_seconds"] = value
5863	}
5864}
5865
5866// Broadcasts a tensor value to one or more other devices.
5867func CollectiveBcastSendV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, optional ...CollectiveBcastSendV2Attr) (data tf.Output) {
5868	if scope.Err() != nil {
5869		return
5870	}
5871	attrs := map[string]interface{}{}
5872	for _, a := range optional {
5873		a(attrs)
5874	}
5875	opspec := tf.OpSpec{
5876		Type: "CollectiveBcastSendV2",
5877		Input: []tf.Input{
5878			input, group_size, group_key, instance_key,
5879		},
5880		Attrs: attrs,
5881	}
5882	op := scope.AddOperation(opspec)
5883	return op.Output(0)
5884}
5885
5886// CollectiveGatherAttr is an optional argument to CollectiveGather.
5887type CollectiveGatherAttr func(optionalAttr)
5888
5889// CollectiveGatherCommunicationHint sets the optional communication_hint attribute to value.
5890// If not specified, defaults to "auto"
5891func CollectiveGatherCommunicationHint(value string) CollectiveGatherAttr {
5892	return func(m optionalAttr) {
5893		m["communication_hint"] = value
5894	}
5895}
5896
5897// CollectiveGatherTimeoutSeconds sets the optional timeout_seconds attribute to value.
5898// If not specified, defaults to 0
5899func CollectiveGatherTimeoutSeconds(value float32) CollectiveGatherAttr {
5900	return func(m optionalAttr) {
5901		m["timeout_seconds"] = value
5902	}
5903}
5904
5905// Mutually accumulates multiple tensors of identical type and shape.
5906func CollectiveGather(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, shape tf.Shape, optional ...CollectiveGatherAttr) (data tf.Output) {
5907	if scope.Err() != nil {
5908		return
5909	}
5910	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "shape": shape}
5911	for _, a := range optional {
5912		a(attrs)
5913	}
5914	opspec := tf.OpSpec{
5915		Type: "CollectiveGather",
5916		Input: []tf.Input{
5917			input,
5918		},
5919		Attrs: attrs,
5920	}
5921	op := scope.AddOperation(opspec)
5922	return op.Output(0)
5923}
5924
5925// CollectiveGatherV2Attr is an optional argument to CollectiveGatherV2.
5926type CollectiveGatherV2Attr func(optionalAttr)
5927
5928// CollectiveGatherV2CommunicationHint sets the optional communication_hint attribute to value.
5929// If not specified, defaults to "auto"
5930func CollectiveGatherV2CommunicationHint(value string) CollectiveGatherV2Attr {
5931	return func(m optionalAttr) {
5932		m["communication_hint"] = value
5933	}
5934}
5935
5936// CollectiveGatherV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
5937// If not specified, defaults to 0
5938func CollectiveGatherV2TimeoutSeconds(value float32) CollectiveGatherV2Attr {
5939	return func(m optionalAttr) {
5940		m["timeout_seconds"] = value
5941	}
5942}
5943
5944// Mutually accumulates multiple tensors of identical type and shape.
5945func CollectiveGatherV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, optional ...CollectiveGatherV2Attr) (data tf.Output) {
5946	if scope.Err() != nil {
5947		return
5948	}
5949	attrs := map[string]interface{}{}
5950	for _, a := range optional {
5951		a(attrs)
5952	}
5953	opspec := tf.OpSpec{
5954		Type: "CollectiveGatherV2",
5955		Input: []tf.Input{
5956			input, group_size, group_key, instance_key, tf.OutputList(ordering_token),
5957		},
5958		Attrs: attrs,
5959	}
5960	op := scope.AddOperation(opspec)
5961	return op.Output(0)
5962}
5963
5964// CollectiveInitializeCommunicatorAttr is an optional argument to CollectiveInitializeCommunicator.
5965type CollectiveInitializeCommunicatorAttr func(optionalAttr)
5966
5967// CollectiveInitializeCommunicatorCommunicationHint sets the optional communication_hint attribute to value.
5968// If not specified, defaults to "auto"
5969func CollectiveInitializeCommunicatorCommunicationHint(value string) CollectiveInitializeCommunicatorAttr {
5970	return func(m optionalAttr) {
5971		m["communication_hint"] = value
5972	}
5973}
5974
5975// CollectiveInitializeCommunicatorTimeoutSeconds sets the optional timeout_seconds attribute to value.
5976// If not specified, defaults to 0
5977func CollectiveInitializeCommunicatorTimeoutSeconds(value float32) CollectiveInitializeCommunicatorAttr {
5978	return func(m optionalAttr) {
5979		m["timeout_seconds"] = value
5980	}
5981}
5982
5983// Initializes a group for collective operations.
5984func CollectiveInitializeCommunicator(scope *Scope, group_key tf.Output, rank tf.Output, group_size tf.Output, optional ...CollectiveInitializeCommunicatorAttr) (communicator tf.Output) {
5985	if scope.Err() != nil {
5986		return
5987	}
5988	attrs := map[string]interface{}{}
5989	for _, a := range optional {
5990		a(attrs)
5991	}
5992	opspec := tf.OpSpec{
5993		Type: "CollectiveInitializeCommunicator",
5994		Input: []tf.Input{
5995			group_key, rank, group_size,
5996		},
5997		Attrs: attrs,
5998	}
5999	op := scope.AddOperation(opspec)
6000	return op.Output(0)
6001}
6002
6003// An Op to permute tensors across replicated TPU instances.
6004//
6005// Each instance supplies its own input.
6006//
6007// For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
6008// source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs:
6009// `[D, A, B, C]`.
6010//
6011// Arguments:
6012//
6013//	input: The local input to be permuted. Currently only supports float and
6014//
6015// bfloat16.
6016//
6017//	source_target_pairs: A tensor with shape [num_pairs, 2].
6018//
6019// Returns The permuted input.
6020func CollectivePermute(scope *Scope, input tf.Output, source_target_pairs tf.Output) (output tf.Output) {
6021	if scope.Err() != nil {
6022		return
6023	}
6024	opspec := tf.OpSpec{
6025		Type: "CollectivePermute",
6026		Input: []tf.Input{
6027			input, source_target_pairs,
6028		},
6029	}
6030	op := scope.AddOperation(opspec)
6031	return op.Output(0)
6032}
6033
6034// CollectiveReduceAttr is an optional argument to CollectiveReduce.
6035type CollectiveReduceAttr func(optionalAttr)
6036
6037// CollectiveReduceWaitFor sets the optional wait_for attribute to value.
6038// If not specified, defaults to {}
6039func CollectiveReduceWaitFor(value []int64) CollectiveReduceAttr {
6040	return func(m optionalAttr) {
6041		m["wait_for"] = value
6042	}
6043}
6044
6045// CollectiveReduceCommunicationHint sets the optional communication_hint attribute to value.
6046// If not specified, defaults to "auto"
6047func CollectiveReduceCommunicationHint(value string) CollectiveReduceAttr {
6048	return func(m optionalAttr) {
6049		m["communication_hint"] = value
6050	}
6051}
6052
6053// CollectiveReduceTimeoutSeconds sets the optional timeout_seconds attribute to value.
6054// If not specified, defaults to 0
6055func CollectiveReduceTimeoutSeconds(value float32) CollectiveReduceAttr {
6056	return func(m optionalAttr) {
6057		m["timeout_seconds"] = value
6058	}
6059}
6060
6061// Mutually reduces multiple tensors of identical type and shape.
6062func CollectiveReduce(scope *Scope, input tf.Output, group_size int64, group_key int64, instance_key int64, merge_op string, final_op string, subdiv_offsets []int64, optional ...CollectiveReduceAttr) (data tf.Output) {
6063	if scope.Err() != nil {
6064		return
6065	}
6066	attrs := map[string]interface{}{"group_size": group_size, "group_key": group_key, "instance_key": instance_key, "merge_op": merge_op, "final_op": final_op, "subdiv_offsets": subdiv_offsets}
6067	for _, a := range optional {
6068		a(attrs)
6069	}
6070	opspec := tf.OpSpec{
6071		Type: "CollectiveReduce",
6072		Input: []tf.Input{
6073			input,
6074		},
6075		Attrs: attrs,
6076	}
6077	op := scope.AddOperation(opspec)
6078	return op.Output(0)
6079}
6080
6081// CollectiveReduceV2Attr is an optional argument to CollectiveReduceV2.
6082type CollectiveReduceV2Attr func(optionalAttr)
6083
6084// CollectiveReduceV2CommunicationHint sets the optional communication_hint attribute to value.
6085// If not specified, defaults to "auto"
6086func CollectiveReduceV2CommunicationHint(value string) CollectiveReduceV2Attr {
6087	return func(m optionalAttr) {
6088		m["communication_hint"] = value
6089	}
6090}
6091
6092// CollectiveReduceV2TimeoutSeconds sets the optional timeout_seconds attribute to value.
6093// If not specified, defaults to 0
6094func CollectiveReduceV2TimeoutSeconds(value float32) CollectiveReduceV2Attr {
6095	return func(m optionalAttr) {
6096		m["timeout_seconds"] = value
6097	}
6098}
6099
6100// CollectiveReduceV2MaxSubdivsPerDevice sets the optional max_subdivs_per_device attribute to value.
6101// If not specified, defaults to -1
6102func CollectiveReduceV2MaxSubdivsPerDevice(value int64) CollectiveReduceV2Attr {
6103	return func(m optionalAttr) {
6104		m["max_subdivs_per_device"] = value
6105	}
6106}
6107
6108// Mutually reduces multiple tensors of identical type and shape.
6109func CollectiveReduceV2(scope *Scope, input tf.Output, group_size tf.Output, group_key tf.Output, instance_key tf.Output, ordering_token []tf.Output, merge_op string, final_op string, optional ...CollectiveReduceV2Attr) (data tf.Output) {
6110	if scope.Err() != nil {
6111		return
6112	}
6113	attrs := map[string]interface{}{"merge_op": merge_op, "final_op": final_op}
6114	for _, a := range optional {
6115		a(attrs)
6116	}
6117	opspec := tf.OpSpec{
6118		Type: "CollectiveReduceV2",
6119		Input: []tf.Input{
6120			input, group_size, group_key, instance_key, tf.OutputList(ordering_token),
6121		},
6122		Attrs: attrs,
6123	}
6124	op := scope.AddOperation(opspec)
6125	return op.Output(0)
6126}
6127
6128// CollectiveReduceV3Attr is an optional argument to CollectiveReduceV3.
6129type CollectiveReduceV3Attr func(optionalAttr)
6130
6131// CollectiveReduceV3TimeoutSeconds sets the optional timeout_seconds attribute to value.
6132// If not specified, defaults to 0
6133func CollectiveReduceV3TimeoutSeconds(value float32) CollectiveReduceV3Attr {
6134	return func(m optionalAttr) {
6135		m["timeout_seconds"] = value
6136	}
6137}
6138
6139// Mutually reduces multiple tensors of identical type and shape.
6140func CollectiveReduceV3(scope *Scope, input tf.Output, communicator tf.Output, group_assignment tf.Output, reduction string, optional ...CollectiveReduceV3Attr) (data tf.Output) {
6141	if scope.Err() != nil {
6142		return
6143	}
6144	attrs := map[string]interface{}{"reduction": reduction}
6145	for _, a := range optional {
6146		a(attrs)
6147	}
6148	opspec := tf.OpSpec{
6149		Type: "CollectiveReduceV3",
6150		Input: []tf.Input{
6151			input, communicator, group_assignment,
6152		},
6153		Attrs: attrs,
6154	}
6155	op := scope.AddOperation(opspec)
6156	return op.Output(0)
6157}
6158
6159// CombinedNonMaxSuppressionAttr is an optional argument to CombinedNonMaxSuppression.
6160type CombinedNonMaxSuppressionAttr func(optionalAttr)
6161
6162// CombinedNonMaxSuppressionPadPerClass sets the optional pad_per_class attribute to value.
6163//
6164// value: If false, the output nmsed boxes, scores and classes
6165// are padded/clipped to `max_total_size`. If true, the
6166// output nmsed boxes, scores and classes are padded to be of length
6167// `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in
6168// which case it is clipped to `max_total_size`. Defaults to false.
6169// If not specified, defaults to false
6170func CombinedNonMaxSuppressionPadPerClass(value bool) CombinedNonMaxSuppressionAttr {
6171	return func(m optionalAttr) {
6172		m["pad_per_class"] = value
6173	}
6174}
6175
6176// CombinedNonMaxSuppressionClipBoxes sets the optional clip_boxes attribute to value.
6177//
6178// value: If true, assume the box coordinates are between [0, 1] and clip the output boxes
6179// if they fall beyond [0, 1]. If false, do not do clipping and output the box
6180// coordinates as it is.
6181// If not specified, defaults to true
6182func CombinedNonMaxSuppressionClipBoxes(value bool) CombinedNonMaxSuppressionAttr {
6183	return func(m optionalAttr) {
6184		m["clip_boxes"] = value
6185	}
6186}
6187
6188// Greedily selects a subset of bounding boxes in descending order of score,
6189//
6190// This operation performs non_max_suppression on the inputs per batch, across
6191// all classes.
6192// Prunes away boxes that have high intersection-over-union (IOU) overlap
6193// with previously selected boxes.  Bounding boxes are supplied as
6194// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
6195// diagonal pair of box corners and the coordinates can be provided as normalized
6196// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
6197// is agnostic to where the origin is in the coordinate system. Also note that
6198// this algorithm is invariant to orthogonal transformations and translations
6199// of the coordinate system; thus translating or reflections of the coordinate
6200// system result in the same boxes being selected by the algorithm.
6201// The output of this operation is the final boxes, scores and classes tensor
6202// returned after performing non_max_suppression.
6203//
6204// Arguments:
6205//
6206//	boxes: A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then
6207//
6208// same boxes are used for all classes otherwise, if `q` is equal to number of
6209// classes, class-specific boxes are used.
6210//
6211//	scores: A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]`
6212//
6213// representing a single score corresponding to each box (each row of boxes).
6214//
6215//	max_output_size_per_class: A scalar integer tensor representing the maximum number of
6216//
6217// boxes to be selected by non max suppression per class
6218//
6219//	max_total_size: An int32 scalar representing the maximum number of boxes retained over all
6220//
6221// classes. Note that setting this value to a large number may result in OOM error
6222// depending on the system workload.
6223//
6224//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
6225//
6226// boxes overlap too much with respect to IOU.
6227//
6228//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
6229//
6230// boxes based on score.
6231//
6232// Returns:
6233//
6234//	nmsed_boxes: A [batch_size, max_detections, 4] float32 tensor
6235//
6236// containing the non-max suppressed boxes.
6237//
6238//	nmsed_scores: A [batch_size, max_detections] float32 tensor
6239//
6240// containing the scores for the boxes.
6241//
6242//	nmsed_classes: A [batch_size, max_detections] float32 tensor
6243//
6244// containing the classes for the boxes.
6245//
6246//	valid_detections: A [batch_size] int32 tensor indicating the number of
6247//
6248// valid detections per batch item. Only the top num_detections[i] entries in
6249// nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
6250// entries are zero paddings.
6251func CombinedNonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size_per_class tf.Output, max_total_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...CombinedNonMaxSuppressionAttr) (nmsed_boxes tf.Output, nmsed_scores tf.Output, nmsed_classes tf.Output, valid_detections tf.Output) {
6252	if scope.Err() != nil {
6253		return
6254	}
6255	attrs := map[string]interface{}{}
6256	for _, a := range optional {
6257		a(attrs)
6258	}
6259	opspec := tf.OpSpec{
6260		Type: "CombinedNonMaxSuppression",
6261		Input: []tf.Input{
6262			boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold,
6263		},
6264		Attrs: attrs,
6265	}
6266	op := scope.AddOperation(opspec)
6267	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
6268}
6269
6270// ComplexAttr is an optional argument to Complex.
6271type ComplexAttr func(optionalAttr)
6272
6273// ComplexTout sets the optional Tout attribute to value.
6274// If not specified, defaults to DT_COMPLEX64
6275func ComplexTout(value tf.DataType) ComplexAttr {
6276	return func(m optionalAttr) {
6277		m["Tout"] = value
6278	}
6279}
6280
6281// Converts two real numbers to a complex number.
6282//
6283// Given a tensor `real` representing the real part of a complex number, and a
6284// tensor `imag` representing the imaginary part of a complex number, this
6285// operation returns complex numbers elementwise of the form \\(a + bj\\), where
6286// *a* represents the `real` part and *b* represents the `imag` part.
6287//
6288// The input tensors `real` and `imag` must have the same shape.
6289//
6290// For example:
6291//
6292// ```
6293// # tensor 'real' is [2.25, 3.25]
6294// # tensor `imag` is [4.75, 5.75]
6295// tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
6296// ```
6297func Complex(scope *Scope, real tf.Output, imag tf.Output, optional ...ComplexAttr) (out tf.Output) {
6298	if scope.Err() != nil {
6299		return
6300	}
6301	attrs := map[string]interface{}{}
6302	for _, a := range optional {
6303		a(attrs)
6304	}
6305	opspec := tf.OpSpec{
6306		Type: "Complex",
6307		Input: []tf.Input{
6308			real, imag,
6309		},
6310		Attrs: attrs,
6311	}
6312	op := scope.AddOperation(opspec)
6313	return op.Output(0)
6314}
6315
6316// ComplexAbsAttr is an optional argument to ComplexAbs.
6317type ComplexAbsAttr func(optionalAttr)
6318
6319// ComplexAbsTout sets the optional Tout attribute to value.
6320// If not specified, defaults to DT_FLOAT
6321func ComplexAbsTout(value tf.DataType) ComplexAbsAttr {
6322	return func(m optionalAttr) {
6323		m["Tout"] = value
6324	}
6325}
6326
6327// Computes the complex absolute value of a tensor.
6328//
6329// Given a tensor `x` of complex numbers, this operation returns a tensor of type
6330// `float` or `double` that is the absolute value of each element in `x`. All
6331// elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
6332// value is computed as \\( \sqrt{a^2 + b^2}\\).
6333//
6334// For example:
6335//
6336// >>> x = tf.complex(3.0, 4.0)
6337// >>> print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy())
6338// 5.0
6339func ComplexAbs(scope *Scope, x tf.Output, optional ...ComplexAbsAttr) (y tf.Output) {
6340	if scope.Err() != nil {
6341		return
6342	}
6343	attrs := map[string]interface{}{}
6344	for _, a := range optional {
6345		a(attrs)
6346	}
6347	opspec := tf.OpSpec{
6348		Type: "ComplexAbs",
6349		Input: []tf.Input{
6350			x,
6351		},
6352		Attrs: attrs,
6353	}
6354	op := scope.AddOperation(opspec)
6355	return op.Output(0)
6356}
6357
6358// Encodes an `ExtensionType` value into a `variant` scalar Tensor.
6359//
6360// Returns a scalar variant tensor containing a single `CompositeTensorVariant`
6361// with the specified Tensor components and TypeSpec.
6362//
6363// Arguments:
6364//
6365//	components: The component tensors for the extension type value.
6366//	metadata: String serialization for the TypeSpec.  (Note: the encoding for the TypeSpec
6367//
6368// may change in future versions of TensorFlow.)
6369//
6370// Returns A `variant` Tensor that containing the encoded value.
6371func CompositeTensorVariantFromComponents(scope *Scope, components []tf.Output, metadata string) (encoded tf.Output) {
6372	if scope.Err() != nil {
6373		return
6374	}
6375	attrs := map[string]interface{}{"metadata": metadata}
6376	opspec := tf.OpSpec{
6377		Type: "CompositeTensorVariantFromComponents",
6378		Input: []tf.Input{
6379			tf.OutputList(components),
6380		},
6381		Attrs: attrs,
6382	}
6383	op := scope.AddOperation(opspec)
6384	return op.Output(0)
6385}
6386
6387// Decodes a `variant` scalar Tensor into an `ExtensionType` value.
6388//
6389// Returns the Tensor components encoded in a `CompositeTensorVariant`.
6390//
6391// Raises an error if `type_spec_proto` doesn't match the TypeSpec
6392// in `encoded`.
6393//
6394// Arguments:
6395//
6396//	encoded: A scalar `variant` Tensor containing an encoded ExtensionType value.
6397//	metadata: String serialization for the TypeSpec.  Must be compatible with the
6398//
6399// `TypeSpec` contained in `encoded`.  (Note: the encoding for the TypeSpec
6400// may change in future versions of TensorFlow.)
6401//
6402//	Tcomponents: Expected dtypes for components.
6403//
6404// Returns The component tensors for the ExtensionType value in `encoded`.
6405func CompositeTensorVariantToComponents(scope *Scope, encoded tf.Output, metadata string, Tcomponents []tf.DataType) (components []tf.Output) {
6406	if scope.Err() != nil {
6407		return
6408	}
6409	attrs := map[string]interface{}{"metadata": metadata, "Tcomponents": Tcomponents}
6410	opspec := tf.OpSpec{
6411		Type: "CompositeTensorVariantToComponents",
6412		Input: []tf.Input{
6413			encoded,
6414		},
6415		Attrs: attrs,
6416	}
6417	op := scope.AddOperation(opspec)
6418	if scope.Err() != nil {
6419		return
6420	}
6421	var idx int
6422	var err error
6423	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
6424		scope.UpdateErr("CompositeTensorVariantToComponents", err)
6425		return
6426	}
6427	return components
6428}
6429
6430// Compresses a dataset element.
6431func CompressElement(scope *Scope, components []tf.Output) (compressed tf.Output) {
6432	if scope.Err() != nil {
6433		return
6434	}
6435	opspec := tf.OpSpec{
6436		Type: "CompressElement",
6437		Input: []tf.Input{
6438			tf.OutputList(components),
6439		},
6440	}
6441	op := scope.AddOperation(opspec)
6442	return op.Output(0)
6443}
6444
6445// ComputeAccidentalHitsAttr is an optional argument to ComputeAccidentalHits.
6446type ComputeAccidentalHitsAttr func(optionalAttr)
6447
6448// ComputeAccidentalHitsSeed sets the optional seed attribute to value.
6449//
6450// value: If either seed or seed2 are set to be non-zero, the random number
6451// generator is seeded by the given seed.  Otherwise, it is seeded by a
6452// random seed.
6453// If not specified, defaults to 0
6454func ComputeAccidentalHitsSeed(value int64) ComputeAccidentalHitsAttr {
6455	return func(m optionalAttr) {
6456		m["seed"] = value
6457	}
6458}
6459
6460// ComputeAccidentalHitsSeed2 sets the optional seed2 attribute to value.
6461//
6462// value: An second seed to avoid seed collision.
6463// If not specified, defaults to 0
6464func ComputeAccidentalHitsSeed2(value int64) ComputeAccidentalHitsAttr {
6465	return func(m optionalAttr) {
6466		m["seed2"] = value
6467	}
6468}
6469
6470// Computes the ids of the positions in sampled_candidates that match true_labels.
6471//
6472// When doing log-odds NCE, the result of this op should be passed through a
6473// SparseToDense op, then added to the logits of the sampled candidates. This has
6474// the effect of 'removing' the sampled labels that match the true labels by
6475// making the classifier sure that they are sampled labels.
6476//
6477// Arguments:
6478//
6479//	true_classes: The true_classes output of UnpackSparseLabels.
6480//	sampled_candidates: The sampled_candidates output of CandidateSampler.
6481//	num_true: Number of true labels per context.
6482//
6483// Returns:
6484//
6485//	indices: A vector of indices corresponding to rows of true_candidates.
6486//	ids: A vector of IDs of positions in sampled_candidates that match a true_label
6487//
6488// for the row with the corresponding index in indices.
6489//
6490//	weights: A vector of the same length as indices and ids, in which each element
6491//
6492// is -FLOAT_MAX.
6493func ComputeAccidentalHits(scope *Scope, true_classes tf.Output, sampled_candidates tf.Output, num_true int64, optional ...ComputeAccidentalHitsAttr) (indices tf.Output, ids tf.Output, weights tf.Output) {
6494	if scope.Err() != nil {
6495		return
6496	}
6497	attrs := map[string]interface{}{"num_true": num_true}
6498	for _, a := range optional {
6499		a(attrs)
6500	}
6501	opspec := tf.OpSpec{
6502		Type: "ComputeAccidentalHits",
6503		Input: []tf.Input{
6504			true_classes, sampled_candidates,
6505		},
6506		Attrs: attrs,
6507	}
6508	op := scope.AddOperation(opspec)
6509	return op.Output(0), op.Output(1), op.Output(2)
6510}
6511
6512// Computes the static batch size of a dataset sans partial batches.
6513func ComputeBatchSize(scope *Scope, input_dataset tf.Output) (batch_size tf.Output) {
6514	if scope.Err() != nil {
6515		return
6516	}
6517	opspec := tf.OpSpec{
6518		Type: "ComputeBatchSize",
6519		Input: []tf.Input{
6520			input_dataset,
6521		},
6522	}
6523	op := scope.AddOperation(opspec)
6524	return op.Output(0)
6525}
6526
6527// Concatenates tensors along one dimension.
6528//
6529// Arguments:
6530//
6531//	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
6532//
6533// range [0, rank(values)).
6534//
6535//	values: The `N` Tensors to concatenate. Their ranks and types must match,
6536//
6537// and their sizes must match in all dimensions except `concat_dim`.
6538//
6539// Returns A `Tensor` with the concatenation of values stacked along the
6540// `concat_dim` dimension.  This tensor's shape matches that of `values` except
6541// in `concat_dim` where it has the sum of the sizes.
6542func Concat(scope *Scope, concat_dim tf.Output, values []tf.Output) (output tf.Output) {
6543	if scope.Err() != nil {
6544		return
6545	}
6546	opspec := tf.OpSpec{
6547		Type: "Concat",
6548		Input: []tf.Input{
6549			concat_dim, tf.OutputList(values),
6550		},
6551	}
6552	op := scope.AddOperation(opspec)
6553	return op.Output(0)
6554}
6555
6556// Computes offsets of concat inputs within its output.
6557//
6558// For example:
6559//
6560// ```
6561// # 'x' is [2, 2, 7]
6562// # 'y' is [2, 3, 7]
6563// # 'z' is [2, 5, 7]
6564// concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
6565// ```
6566//
6567// This is typically used by gradient computations for a concat operation.
6568//
6569// Arguments:
6570//
6571//	concat_dim: The dimension along which to concatenate.
6572//	shape: The `N` int32 vectors representing shape of tensors being concatenated.
6573//
6574// Returns The `N` int32 vectors representing the starting offset
6575// of input tensors within the concatenated output.
6576func ConcatOffset(scope *Scope, concat_dim tf.Output, shape []tf.Output) (offset []tf.Output) {
6577	if scope.Err() != nil {
6578		return
6579	}
6580	opspec := tf.OpSpec{
6581		Type: "ConcatOffset",
6582		Input: []tf.Input{
6583			concat_dim, tf.OutputList(shape),
6584		},
6585	}
6586	op := scope.AddOperation(opspec)
6587	if scope.Err() != nil {
6588		return
6589	}
6590	var idx int
6591	var err error
6592	if offset, idx, err = makeOutputList(op, idx, "offset"); err != nil {
6593		scope.UpdateErr("ConcatOffset", err)
6594		return
6595	}
6596	return offset
6597}
6598
6599// Concatenates tensors along one dimension.
6600//
6601// Arguments:
6602//
6603//	values: List of `N` Tensors to concatenate. Their ranks and types must match,
6604//
6605// and their sizes must match in all dimensions except `concat_dim`.
6606//
6607//	axis: 0-D.  The dimension along which to concatenate.  Must be in the
6608//
6609// range [-rank(values), rank(values)).
6610//
6611// Returns A `Tensor` with the concatenation of values stacked along the
6612// `concat_dim` dimension.  This tensor's shape matches that of `values` except
6613// in `concat_dim` where it has the sum of the sizes.
6614func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) {
6615	if scope.Err() != nil {
6616		return
6617	}
6618	opspec := tf.OpSpec{
6619		Type: "ConcatV2",
6620		Input: []tf.Input{
6621			tf.OutputList(values), axis,
6622		},
6623	}
6624	op := scope.AddOperation(opspec)
6625	return op.Output(0)
6626}
6627
6628// ConcatenateDatasetAttr is an optional argument to ConcatenateDataset.
6629type ConcatenateDatasetAttr func(optionalAttr)
6630
6631// ConcatenateDatasetMetadata sets the optional metadata attribute to value.
6632// If not specified, defaults to ""
6633func ConcatenateDatasetMetadata(value string) ConcatenateDatasetAttr {
6634	return func(m optionalAttr) {
6635		m["metadata"] = value
6636	}
6637}
6638
6639// Creates a dataset that concatenates `input_dataset` with `another_dataset`.
6640func ConcatenateDataset(scope *Scope, input_dataset tf.Output, another_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ConcatenateDatasetAttr) (handle tf.Output) {
6641	if scope.Err() != nil {
6642		return
6643	}
6644	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
6645	for _, a := range optional {
6646		a(attrs)
6647	}
6648	opspec := tf.OpSpec{
6649		Type: "ConcatenateDataset",
6650		Input: []tf.Input{
6651			input_dataset, another_dataset,
6652		},
6653		Attrs: attrs,
6654	}
6655	op := scope.AddOperation(opspec)
6656	return op.Output(0)
6657}
6658
6659// An op that sets up the centralized structures for a distributed TPU system.
6660//
6661// Returns A vector containing the global TPU id of each TPU on the host.
6662func ConfigureAndInitializeGlobalTPU(scope *Scope) (output tf.Output) {
6663	if scope.Err() != nil {
6664		return
6665	}
6666	opspec := tf.OpSpec{
6667		Type: "ConfigureAndInitializeGlobalTPU",
6668	}
6669	op := scope.AddOperation(opspec)
6670	return op.Output(0)
6671}
6672
6673// ConfigureDistributedTPUAttr is an optional argument to ConfigureDistributedTPU.
6674type ConfigureDistributedTPUAttr func(optionalAttr)
6675
6676// ConfigureDistributedTPUEmbeddingConfig sets the optional embedding_config attribute to value.
6677//
6678// value: Reserved. Do not use.
6679// If not specified, defaults to ""
6680func ConfigureDistributedTPUEmbeddingConfig(value string) ConfigureDistributedTPUAttr {
6681	return func(m optionalAttr) {
6682		m["embedding_config"] = value
6683	}
6684}
6685
6686// ConfigureDistributedTPUTpuEmbeddingConfig sets the optional tpu_embedding_config attribute to value.
6687//
6688// value: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that
6689// describes the embedding lookups of the program.
6690// If not specified, defaults to ""
6691func ConfigureDistributedTPUTpuEmbeddingConfig(value string) ConfigureDistributedTPUAttr {
6692	return func(m optionalAttr) {
6693		m["tpu_embedding_config"] = value
6694	}
6695}
6696
6697// ConfigureDistributedTPUIsGlobalInit sets the optional is_global_init attribute to value.
6698//
6699// value: Reserved. Do not use.
6700// If not specified, defaults to false
6701func ConfigureDistributedTPUIsGlobalInit(value bool) ConfigureDistributedTPUAttr {
6702	return func(m optionalAttr) {
6703		m["is_global_init"] = value
6704	}
6705}
6706
6707// ConfigureDistributedTPUEnableWholeMeshCompilations sets the optional enable_whole_mesh_compilations attribute to value.
6708// If not specified, defaults to false
6709func ConfigureDistributedTPUEnableWholeMeshCompilations(value bool) ConfigureDistributedTPUAttr {
6710	return func(m optionalAttr) {
6711		m["enable_whole_mesh_compilations"] = value
6712	}
6713}
6714
6715// ConfigureDistributedTPUCompilationFailureClosesChips sets the optional compilation_failure_closes_chips attribute to value.
6716// If not specified, defaults to true
6717func ConfigureDistributedTPUCompilationFailureClosesChips(value bool) ConfigureDistributedTPUAttr {
6718	return func(m optionalAttr) {
6719		m["compilation_failure_closes_chips"] = value
6720	}
6721}
6722
6723// ConfigureDistributedTPUTpuCancellationClosesChips sets the optional tpu_cancellation_closes_chips attribute to value.
6724// If not specified, defaults to 0
6725func ConfigureDistributedTPUTpuCancellationClosesChips(value int64) ConfigureDistributedTPUAttr {
6726	return func(m optionalAttr) {
6727		m["tpu_cancellation_closes_chips"] = value
6728	}
6729}
6730
6731// Sets up the centralized structures for a distributed TPU system.
6732//
6733// Returns A serialized tensorflow.tpu.TopologyProto that describes the TPU
6734// topology.
6735func ConfigureDistributedTPU(scope *Scope, optional ...ConfigureDistributedTPUAttr) (topology tf.Output) {
6736	if scope.Err() != nil {
6737		return
6738	}
6739	attrs := map[string]interface{}{}
6740	for _, a := range optional {
6741		a(attrs)
6742	}
6743	opspec := tf.OpSpec{
6744		Type: "ConfigureDistributedTPU",
6745
6746		Attrs: attrs,
6747	}
6748	op := scope.AddOperation(opspec)
6749	return op.Output(0)
6750}
6751
6752// Sets up TPUEmbedding in a distributed TPU system.
6753//
6754// Arguments:
6755//
6756//	config: Serialized tensorflow.tpu.TPUEmbeddingConfiguration that
6757//
6758// describes the embedding lookups of the program.
6759//
6760// Returns the created operation.
6761func ConfigureTPUEmbedding(scope *Scope, config string) (o *tf.Operation) {
6762	if scope.Err() != nil {
6763		return
6764	}
6765	attrs := map[string]interface{}{"config": config}
6766	opspec := tf.OpSpec{
6767		Type: "ConfigureTPUEmbedding",
6768
6769		Attrs: attrs,
6770	}
6771	return scope.AddOperation(opspec)
6772}
6773
6774// An op that configures the TPUEmbedding software on a host.
6775//
6776// Arguments:
6777//
6778//	common_config: A string-encoded common configuration proto containing metadata
6779//
6780// about the TPUEmbedding partitioner output.
6781//
6782//	memory_config: A string-encoded memory config proto containing metadata about
6783//
6784// the memory allocations reserved for TPUEmbedding.
6785//
6786//	config: An TPUEmbeddingConfiguration proto serialized to a string,
6787//
6788// describing the desired TPUEmbedding configuration.
6789//
6790// Returns A string containing metadata about the hostname and RPC port
6791// used for communication with this host.
6792func ConfigureTPUEmbeddingHost(scope *Scope, common_config tf.Output, memory_config tf.Output, config string) (network_config tf.Output) {
6793	if scope.Err() != nil {
6794		return
6795	}
6796	attrs := map[string]interface{}{"config": config}
6797	opspec := tf.OpSpec{
6798		Type: "ConfigureTPUEmbeddingHost",
6799		Input: []tf.Input{
6800			common_config, memory_config,
6801		},
6802		Attrs: attrs,
6803	}
6804	op := scope.AddOperation(opspec)
6805	return op.Output(0)
6806}
6807
6808// An op that configures the TPUEmbedding software on a host.
6809//
6810// Arguments:
6811//
6812//	common_config: A string-encoded CommonConfiguration proto containing metadata
6813//
6814// about the TPUEmbedding partitioner output and the HBM size (in bytes) required
6815// for operation.
6816//
6817// Returns A string-encoded memory configuration containing metadata about
6818// the memory allocations reserved for TPUEmbedding.
6819func ConfigureTPUEmbeddingMemory(scope *Scope, common_config tf.Output) (memory_config tf.Output) {
6820	if scope.Err() != nil {
6821		return
6822	}
6823	opspec := tf.OpSpec{
6824		Type: "ConfigureTPUEmbeddingMemory",
6825		Input: []tf.Input{
6826			common_config,
6827		},
6828	}
6829	op := scope.AddOperation(opspec)
6830	return op.Output(0)
6831}
6832
6833// Returns the complex conjugate of a complex number.
6834//
6835// Given a tensor `input` of complex numbers, this operation returns a tensor of
6836// complex numbers that are the complex conjugate of each element in `input`. The
6837// complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
6838// real part and *b* is the imaginary part.
6839//
6840// The complex conjugate returned by this operation is of the form \\(a - bj\\).
6841//
6842// For example:
6843//
6844// ```
6845// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
6846// tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
6847// ```
6848func Conj(scope *Scope, input tf.Output) (output tf.Output) {
6849	if scope.Err() != nil {
6850		return
6851	}
6852	opspec := tf.OpSpec{
6853		Type: "Conj",
6854		Input: []tf.Input{
6855			input,
6856		},
6857	}
6858	op := scope.AddOperation(opspec)
6859	return op.Output(0)
6860}
6861
6862// Shuffle dimensions of x according to a permutation and conjugate the result.
6863//
6864// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
6865//
6866//	`y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
6867//	`y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
6868func ConjugateTranspose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
6869	if scope.Err() != nil {
6870		return
6871	}
6872	opspec := tf.OpSpec{
6873		Type: "ConjugateTranspose",
6874		Input: []tf.Input{
6875			x, perm,
6876		},
6877	}
6878	op := scope.AddOperation(opspec)
6879	return op.Output(0)
6880}
6881
6882// An op that sets up communication between TPUEmbedding host software instances
6883//
6884// after ConfigureTPUEmbeddingHost has been called on each host.
6885//
6886// Arguments:
6887//
6888//	network_configs: Strings containing metadata about the hostname and RPC port
6889//
6890// used for communication with all hosts.
6891//
6892// Returns the created operation.
6893func ConnectTPUEmbeddingHosts(scope *Scope, network_configs []tf.Output) (o *tf.Operation) {
6894	if scope.Err() != nil {
6895		return
6896	}
6897	opspec := tf.OpSpec{
6898		Type: "ConnectTPUEmbeddingHosts",
6899		Input: []tf.Input{
6900			tf.OutputList(network_configs),
6901		},
6902	}
6903	return scope.AddOperation(opspec)
6904}
6905
6906// This op consumes a lock created by `MutexLock`.
6907//
6908// This op exists to consume a tensor created by `MutexLock` (other than
6909// direct control dependencies).  It should be the only that consumes the tensor,
6910// and will raise an error if it is not.  Its only purpose is to keep the
6911// mutex lock tensor alive until it is consumed by this op.
6912//
6913// **NOTE**: This operation must run on the same device as its input.  This may
6914// be enforced via the `colocate_with` mechanism.
6915//
6916// Arguments:
6917//
6918//	mutex_lock: A tensor returned by `MutexLock`.
6919//
6920// Returns the created operation.
6921func ConsumeMutexLock(scope *Scope, mutex_lock tf.Output) (o *tf.Operation) {
6922	if scope.Err() != nil {
6923		return
6924	}
6925	opspec := tf.OpSpec{
6926		Type: "ConsumeMutexLock",
6927		Input: []tf.Input{
6928			mutex_lock,
6929		},
6930	}
6931	return scope.AddOperation(opspec)
6932}
6933
6934// Does nothing. Serves as a control trigger for scheduling.
6935//
6936// Only useful as a placeholder for control edges.
6937//
6938// Returns the created operation.
6939func ControlTrigger(scope *Scope) (o *tf.Operation) {
6940	if scope.Err() != nil {
6941		return
6942	}
6943	opspec := tf.OpSpec{
6944		Type: "ControlTrigger",
6945	}
6946	return scope.AddOperation(opspec)
6947}
6948
6949// Conv2DAttr is an optional argument to Conv2D.
6950type Conv2DAttr func(optionalAttr)
6951
6952// Conv2DUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
6953// If not specified, defaults to true
6954func Conv2DUseCudnnOnGpu(value bool) Conv2DAttr {
6955	return func(m optionalAttr) {
6956		m["use_cudnn_on_gpu"] = value
6957	}
6958}
6959
6960// Conv2DExplicitPaddings sets the optional explicit_paddings attribute to value.
6961//
6962// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
6963// dimension, the amount of padding inserted before and after the dimension is
6964// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
6965// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
6966// If not specified, defaults to {}
6967func Conv2DExplicitPaddings(value []int64) Conv2DAttr {
6968	return func(m optionalAttr) {
6969		m["explicit_paddings"] = value
6970	}
6971}
6972
6973// Conv2DDataFormat sets the optional data_format attribute to value.
6974//
6975// value: Specify the data format of the input and output data. With the
6976// default format "NHWC", the data is stored in the order of:
6977//
6978//	[batch, height, width, channels].
6979//
6980// Alternatively, the format could be "NCHW", the data storage order of:
6981//
6982//	[batch, channels, height, width].
6983//
6984// If not specified, defaults to "NHWC"
6985func Conv2DDataFormat(value string) Conv2DAttr {
6986	return func(m optionalAttr) {
6987		m["data_format"] = value
6988	}
6989}
6990
6991// Conv2DDilations sets the optional dilations attribute to value.
6992//
6993// value: 1-D tensor of length 4.  The dilation factor for each dimension of
6994// `input`. If set to k > 1, there will be k-1 skipped cells between each
6995// filter element on that dimension. The dimension order is determined by the
6996// value of `data_format`, see above for details. Dilations in the batch and
6997// depth dimensions must be 1.
6998// If not specified, defaults to {i:1 i:1 i:1 i:1}
6999func Conv2DDilations(value []int64) Conv2DAttr {
7000	return func(m optionalAttr) {
7001		m["dilations"] = value
7002	}
7003}
7004
7005// Computes a 2-D convolution given 4-D `input` and `filter` tensors.
7006//
7007// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
7008// and a filter / kernel tensor of shape
7009// `[filter_height, filter_width, in_channels, out_channels]`, this op
7010// performs the following:
7011//
7012//  1. Flattens the filter to a 2-D matrix with shape
7013//     `[filter_height * filter_width * in_channels, output_channels]`.
7014//  2. Extracts image patches from the input tensor to form a *virtual*
7015//     tensor of shape `[batch, out_height, out_width,
7016//     filter_height * filter_width * in_channels]`.
7017//  3. For each patch, right-multiplies the filter matrix and the image patch
7018//     vector.
7019//
7020// In detail, with the default NHWC format,
7021//
7022//	output[b, i, j, k] =
7023//	    sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
7024//	                    filter[di, dj, q, k]
7025//
7026// Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
7027// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
7028//
7029// Arguments:
7030//
7031//	input: A 4-D tensor. The dimension order is interpreted according to the value
7032//
7033// of `data_format`, see below for details.
7034//
7035//	filter: A 4-D tensor of shape
7036//
7037// `[filter_height, filter_width, in_channels, out_channels]`
7038//
7039//	strides: 1-D tensor of length 4.  The stride of the sliding window for each
7040//
7041// dimension of `input`. The dimension order is determined by the value of
7042// `data_format`, see below for details.
7043//
7044//	padding: The type of padding algorithm to use.
7045//
7046// Returns A 4-D tensor. The dimension order is determined by the value of
7047// `data_format`, see below for details.
7048func Conv2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv2DAttr) (output tf.Output) {
7049	if scope.Err() != nil {
7050		return
7051	}
7052	attrs := map[string]interface{}{"strides": strides, "padding": padding}
7053	for _, a := range optional {
7054		a(attrs)
7055	}
7056	opspec := tf.OpSpec{
7057		Type: "Conv2D",
7058		Input: []tf.Input{
7059			input, filter,
7060		},
7061		Attrs: attrs,
7062	}
7063	op := scope.AddOperation(opspec)
7064	return op.Output(0)
7065}
7066
7067// Conv2DBackpropFilterAttr is an optional argument to Conv2DBackpropFilter.
7068type Conv2DBackpropFilterAttr func(optionalAttr)
7069
7070// Conv2DBackpropFilterUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
7071// If not specified, defaults to true
7072func Conv2DBackpropFilterUseCudnnOnGpu(value bool) Conv2DBackpropFilterAttr {
7073	return func(m optionalAttr) {
7074		m["use_cudnn_on_gpu"] = value
7075	}
7076}
7077
7078// Conv2DBackpropFilterExplicitPaddings sets the optional explicit_paddings attribute to value.
7079//
7080// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
7081// dimension, the amount of padding inserted before and after the dimension is
7082// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
7083// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
7084// If not specified, defaults to {}
7085func Conv2DBackpropFilterExplicitPaddings(value []int64) Conv2DBackpropFilterAttr {
7086	return func(m optionalAttr) {
7087		m["explicit_paddings"] = value
7088	}
7089}
7090
7091// Conv2DBackpropFilterDataFormat sets the optional data_format attribute to value.
7092//
7093// value: Specify the data format of the input and output data. With the
7094// default format "NHWC", the data is stored in the order of:
7095//
7096//	[batch, in_height, in_width, in_channels].
7097//
7098// Alternatively, the format could be "NCHW", the data storage order of:
7099//
7100//	[batch, in_channels, in_height, in_width].
7101//
7102// If not specified, defaults to "NHWC"
7103func Conv2DBackpropFilterDataFormat(value string) Conv2DBackpropFilterAttr {
7104	return func(m optionalAttr) {
7105		m["data_format"] = value
7106	}
7107}
7108
7109// Conv2DBackpropFilterDilations sets the optional dilations attribute to value.
7110//
7111// value: 1-D tensor of length 4.  The dilation factor for each dimension of
7112// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
7113// element on that dimension. The dimension order is determined by the value of
7114// `data_format`, see above for details. Dilations in the batch and depth
7115// dimensions must be 1.
7116// If not specified, defaults to {i:1 i:1 i:1 i:1}
7117func Conv2DBackpropFilterDilations(value []int64) Conv2DBackpropFilterAttr {
7118	return func(m optionalAttr) {
7119		m["dilations"] = value
7120	}
7121}
7122
7123// Computes the gradients of convolution with respect to the filter.
7124//
7125// Arguments:
7126//
7127//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
7128//	filter_sizes: An integer vector representing the tensor shape of `filter`,
7129//
7130// where `filter` is a 4-D
7131// `[filter_height, filter_width, in_channels, out_channels]` tensor.
7132//
7133//	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
7134//
7135// Gradients w.r.t. the output of the convolution.
7136//
7137//	strides: The stride of the sliding window for each dimension of the input
7138//
7139// of the convolution. Must be in the same order as the dimension specified with
7140// format.
7141//
7142//	padding: The type of padding algorithm to use.
7143//
7144// Returns 4-D with shape
7145// `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
7146// the `filter` input of the convolution.
7147func Conv2DBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropFilterAttr) (output tf.Output) {
7148	if scope.Err() != nil {
7149		return
7150	}
7151	attrs := map[string]interface{}{"strides": strides, "padding": padding}
7152	for _, a := range optional {
7153		a(attrs)
7154	}
7155	opspec := tf.OpSpec{
7156		Type: "Conv2DBackpropFilter",
7157		Input: []tf.Input{
7158			input, filter_sizes, out_backprop,
7159		},
7160		Attrs: attrs,
7161	}
7162	op := scope.AddOperation(opspec)
7163	return op.Output(0)
7164}
7165
7166// Conv2DBackpropInputAttr is an optional argument to Conv2DBackpropInput.
7167type Conv2DBackpropInputAttr func(optionalAttr)
7168
7169// Conv2DBackpropInputUseCudnnOnGpu sets the optional use_cudnn_on_gpu attribute to value.
7170// If not specified, defaults to true
7171func Conv2DBackpropInputUseCudnnOnGpu(value bool) Conv2DBackpropInputAttr {
7172	return func(m optionalAttr) {
7173		m["use_cudnn_on_gpu"] = value
7174	}
7175}
7176
7177// Conv2DBackpropInputExplicitPaddings sets the optional explicit_paddings attribute to value.
7178//
7179// value: If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. For the ith
7180// dimension, the amount of padding inserted before and after the dimension is
7181// `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If
7182// `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty.
7183// If not specified, defaults to {}
7184func Conv2DBackpropInputExplicitPaddings(value []int64) Conv2DBackpropInputAttr {
7185	return func(m optionalAttr) {
7186		m["explicit_paddings"] = value
7187	}
7188}
7189
7190// Conv2DBackpropInputDataFormat sets the optional data_format attribute to value.
7191//
7192// value: Specify the data format of the input and output data. With the
7193// default format "NHWC", the data is stored in the order of:
7194//
7195//	[batch, in_height, in_width, in_channels].
7196//
7197// Alternatively, the format could be "NCHW", the data storage order of:
7198//
7199//	[batch, in_channels, in_height, in_width].
7200//
7201// If not specified, defaults to "NHWC"
7202func Conv2DBackpropInputDataFormat(value string) Conv2DBackpropInputAttr {
7203	return func(m optionalAttr) {
7204		m["data_format"] = value
7205	}
7206}
7207
7208// Conv2DBackpropInputDilations sets the optional dilations attribute to value.
7209//
7210// value: 1-D tensor of length 4.  The dilation factor for each dimension of
7211// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
7212// element on that dimension. The dimension order is determined by the value of
7213// `data_format`, see above for details. Dilations in the batch and depth
7214// dimensions must be 1.
7215// If not specified, defaults to {i:1 i:1 i:1 i:1}
7216func Conv2DBackpropInputDilations(value []int64) Conv2DBackpropInputAttr {
7217	return func(m optionalAttr) {
7218		m["dilations"] = value
7219	}
7220}
7221
7222// Computes the gradients of convolution with respect to the input.
7223//
7224// Arguments:
7225//
7226//	input_sizes: An integer vector representing the shape of `input`,
7227//
7228// where `input` is a 4-D `[batch, height, width, channels]` tensor.
7229//
7230//	filter: 4-D with shape
7231//
7232// `[filter_height, filter_width, in_channels, out_channels]`.
7233//
7234//	out_backprop: 4-D with shape `[batch, out_height, out_width, out_channels]`.
7235//
7236// Gradients w.r.t. the output of the convolution.
7237//
7238//	strides: The stride of the sliding window for each dimension of the input
7239//
7240// of the convolution. Must be in the same order as the dimension specified with
7241// format.
7242//
7243//	padding: The type of padding algorithm to use.
7244//
7245// Returns 4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
7246// w.r.t. the input of the convolution.
7247func Conv2DBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv2DBackpropInputAttr) (output tf.Output) {
7248	if scope.Err() != nil {
7249		return
7250	}
7251	attrs := map[string]interface{}{"strides": strides, "padding": padding}
7252	for _, a := range optional {
7253		a(attrs)
7254	}
7255	opspec := tf.OpSpec{
7256		Type: "Conv2DBackpropInput",
7257		Input: []tf.Input{
7258			input_sizes, filter, out_backprop,
7259		},
7260		Attrs: attrs,
7261	}
7262	op := scope.AddOperation(opspec)
7263	return op.Output(0)
7264}
7265
7266// Conv3DAttr is an optional argument to Conv3D.
7267type Conv3DAttr func(optionalAttr)
7268
7269// Conv3DDataFormat sets the optional data_format attribute to value.
7270//
7271// value: The data format of the input and output data. With the
7272// default format "NDHWC", the data is stored in the order of:
7273//
7274//	[batch, in_depth, in_height, in_width, in_channels].
7275//
7276// Alternatively, the format could be "NCDHW", the data storage order is:
7277//
7278//	[batch, in_channels, in_depth, in_height, in_width].
7279//
7280// If not specified, defaults to "NDHWC"
7281func Conv3DDataFormat(value string) Conv3DAttr {
7282	return func(m optionalAttr) {
7283		m["data_format"] = value
7284	}
7285}
7286
7287// Conv3DDilations sets the optional dilations attribute to value.
7288//
7289// value: 1-D tensor of length 5.  The dilation factor for each dimension of
7290// `input`. If set to k > 1, there will be k-1 skipped cells between each
7291// filter element on that dimension. The dimension order is determined by the
7292// value of `data_format`, see above for details. Dilations in the batch and
7293// depth dimensions must be 1.
7294// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
7295func Conv3DDilations(value []int64) Conv3DAttr {
7296	return func(m optionalAttr) {
7297		m["dilations"] = value
7298	}
7299}
7300
7301// Computes a 3-D convolution given 5-D `input` and `filter` tensors.
7302//
7303// In signal processing, cross-correlation is a measure of similarity of
7304// two waveforms as a function of a time-lag applied to one of them. This
7305// is also known as a sliding dot product or sliding inner-product.
7306//
7307// Our Conv3D implements a form of cross-correlation.
7308//
7309// Arguments:
7310//
7311//	input: Shape `[batch, in_depth, in_height, in_width, in_channels]`.
7312//	filter: Shape `[filter_depth, filter_height, filter_width, in_channels,
7313//
7314// out_channels]`. `in_channels` must match between `input` and `filter`.
7315//
7316//	strides: 1-D tensor of length 5. The stride of the sliding window for each
7317//
7318// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
7319//
7320//	padding: The type of padding algorithm to use.
7321func Conv3D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...Conv3DAttr) (output tf.Output) {
7322	if scope.Err() != nil {
7323		return
7324	}
7325	attrs := map[string]interface{}{"strides": strides, "padding": padding}
7326	for _, a := range optional {
7327		a(attrs)
7328	}
7329	opspec := tf.OpSpec{
7330		Type: "Conv3D",
7331		Input: []tf.Input{
7332			input, filter,
7333		},
7334		Attrs: attrs,
7335	}
7336	op := scope.AddOperation(opspec)
7337	return op.Output(0)
7338}
7339
7340// Conv3DBackpropFilterAttr is an optional argument to Conv3DBackpropFilter.
7341type Conv3DBackpropFilterAttr func(optionalAttr)
7342
7343// Conv3DBackpropFilterDilations sets the optional dilations attribute to value.
7344// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
7345func Conv3DBackpropFilterDilations(value []int64) Conv3DBackpropFilterAttr {
7346	return func(m optionalAttr) {
7347		m["dilations"] = value
7348	}
7349}
7350
7351// Computes the gradients of 3-D convolution with respect to the filter.
7352//
7353// DEPRECATED at GraphDef version 10: Use Conv3DBackpropFilterV2
7354//
7355// Arguments:
7356//
7357//	input: Shape `[batch, depth, rows, cols, in_channels]`.
7358//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
7359//
7360// `in_channels` must match between `input` and `filter`.
7361//
7362//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
7363//
7364// out_channels]`.
7365//
7366//	strides: 1-D tensor of length 5. The stride of the sliding window for each
7367//
7368// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
7369//
7370//	padding: The type of padding algorithm to use.
7371func Conv3DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterAttr) (output tf.Output) {
7372	if scope.Err() != nil {
7373		return
7374	}
7375	attrs := map[string]interface{}{"strides": strides, "padding": padding}
7376	for _, a := range optional {
7377		a(attrs)
7378	}
7379	opspec := tf.OpSpec{
7380		Type: "Conv3DBackpropFilter",
7381		Input: []tf.Input{
7382			input, filter, out_backprop,
7383		},
7384		Attrs: attrs,
7385	}
7386	op := scope.AddOperation(opspec)
7387	return op.Output(0)
7388}
7389
7390// Conv3DBackpropFilterV2Attr is an optional argument to Conv3DBackpropFilterV2.
7391type Conv3DBackpropFilterV2Attr func(optionalAttr)
7392
7393// Conv3DBackpropFilterV2DataFormat sets the optional data_format attribute to value.
7394//
7395// value: The data format of the input and output data. With the
7396// default format "NDHWC", the data is stored in the order of:
7397//
7398//	[batch, in_depth, in_height, in_width, in_channels].
7399//
7400// Alternatively, the format could be "NCDHW", the data storage order is:
7401//
7402//	[batch, in_channels, in_depth, in_height, in_width].
7403//
7404// If not specified, defaults to "NDHWC"
7405func Conv3DBackpropFilterV2DataFormat(value string) Conv3DBackpropFilterV2Attr {
7406	return func(m optionalAttr) {
7407		m["data_format"] = value
7408	}
7409}
7410
7411// Conv3DBackpropFilterV2Dilations sets the optional dilations attribute to value.
7412//
7413// value: 1-D tensor of length 5.  The dilation factor for each dimension of
7414// `input`. If set to k > 1, there will be k-1 skipped cells between each
7415// filter element on that dimension. The dimension order is determined by the
7416// value of `data_format`, see above for details. Dilations in the batch and
7417// depth dimensions must be 1.
7418// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
7419func Conv3DBackpropFilterV2Dilations(value []int64) Conv3DBackpropFilterV2Attr {
7420	return func(m optionalAttr) {
7421		m["dilations"] = value
7422	}
7423}
7424
7425// Computes the gradients of 3-D convolution with respect to the filter.
7426//
7427// Arguments:
7428//
7429//	input: Shape `[batch, depth, rows, cols, in_channels]`.
7430//	filter_sizes: An integer vector representing the tensor shape of `filter`,
7431//
7432// where `filter` is a 5-D
7433// `[filter_depth, filter_height, filter_width, in_channels, out_channels]`
7434// tensor.
7435//
7436//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
7437//
7438// out_channels]`.
7439//
7440//	strides: 1-D tensor of length 5. The stride of the sliding window for each
7441//
7442// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
7443//
7444//	padding: The type of padding algorithm to use.
7445func Conv3DBackpropFilterV2(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropFilterV2Attr) (output tf.Output) {
7446	if scope.Err() != nil {
7447		return
7448	}
7449	attrs := map[string]interface{}{"strides": strides, "padding": padding}
7450	for _, a := range optional {
7451		a(attrs)
7452	}
7453	opspec := tf.OpSpec{
7454		Type: "Conv3DBackpropFilterV2",
7455		Input: []tf.Input{
7456			input, filter_sizes, out_backprop,
7457		},
7458		Attrs: attrs,
7459	}
7460	op := scope.AddOperation(opspec)
7461	return op.Output(0)
7462}
7463
7464// Conv3DBackpropInputAttr is an optional argument to Conv3DBackpropInput.
7465type Conv3DBackpropInputAttr func(optionalAttr)
7466
7467// Conv3DBackpropInputDilations sets the optional dilations attribute to value.
7468// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
7469func Conv3DBackpropInputDilations(value []int64) Conv3DBackpropInputAttr {
7470	return func(m optionalAttr) {
7471		m["dilations"] = value
7472	}
7473}
7474
7475// Computes the gradients of 3-D convolution with respect to the input.
7476//
7477// DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2
7478//
7479// Arguments:
7480//
7481//	input: Shape `[batch, depth, rows, cols, in_channels]`.
7482//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
7483//
7484// `in_channels` must match between `input` and `filter`.
7485//
7486//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
7487//
7488// out_channels]`.
7489//
7490//	strides: 1-D tensor of length 5. The stride of the sliding window for each
7491//
7492// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
7493//
7494//	padding: The type of padding algorithm to use.
7495func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputAttr) (output tf.Output) {
7496	if scope.Err() != nil {
7497		return
7498	}
7499	attrs := map[string]interface{}{"strides": strides, "padding": padding}
7500	for _, a := range optional {
7501		a(attrs)
7502	}
7503	opspec := tf.OpSpec{
7504		Type: "Conv3DBackpropInput",
7505		Input: []tf.Input{
7506			input, filter, out_backprop,
7507		},
7508		Attrs: attrs,
7509	}
7510	op := scope.AddOperation(opspec)
7511	return op.Output(0)
7512}
7513
7514// Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2.
7515type Conv3DBackpropInputV2Attr func(optionalAttr)
7516
7517// Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value.
7518//
7519// value: The data format of the input and output data. With the
7520// default format "NDHWC", the data is stored in the order of:
7521//
7522//	[batch, in_depth, in_height, in_width, in_channels].
7523//
7524// Alternatively, the format could be "NCDHW", the data storage order is:
7525//
7526//	[batch, in_channels, in_depth, in_height, in_width].
7527//
7528// If not specified, defaults to "NDHWC"
7529func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr {
7530	return func(m optionalAttr) {
7531		m["data_format"] = value
7532	}
7533}
7534
7535// Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value.
7536//
7537// value: 1-D tensor of length 5.  The dilation factor for each dimension of
7538// `input`. If set to k > 1, there will be k-1 skipped cells between each
7539// filter element on that dimension. The dimension order is determined by the
7540// value of `data_format`, see above for details. Dilations in the batch and
7541// depth dimensions must be 1.
7542// If not specified, defaults to {i:1 i:1 i:1 i:1 i:1}
7543func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr {
7544	return func(m optionalAttr) {
7545		m["dilations"] = value
7546	}
7547}
7548
7549// Computes the gradients of 3-D convolution with respect to the input.
7550//
7551// Arguments:
7552//
7553//	input_sizes: An integer vector representing the tensor shape of `input`,
7554//
7555// where `input` is a 5-D
7556// `[batch, depth, rows, cols, in_channels]` tensor.
7557//
7558//	filter: Shape `[depth, rows, cols, in_channels, out_channels]`.
7559//
7560// `in_channels` must match between `input` and `filter`.
7561//
7562//	out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
7563//
7564// out_channels]`.
7565//
7566//	strides: 1-D tensor of length 5. The stride of the sliding window for each
7567//
7568// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
7569//
7570//	padding: The type of padding algorithm to use.
7571func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output) {
7572	if scope.Err() != nil {
7573		return
7574	}
7575	attrs := map[string]interface{}{"strides": strides, "padding": padding}
7576	for _, a := range optional {
7577		a(attrs)
7578	}
7579	opspec := tf.OpSpec{
7580		Type: "Conv3DBackpropInputV2",
7581		Input: []tf.Input{
7582			input_sizes, filter, out_backprop,
7583		},
7584		Attrs: attrs,
7585	}
7586	op := scope.AddOperation(opspec)
7587	return op.Output(0)
7588}
7589
7590// CopyAttr is an optional argument to Copy.
7591type CopyAttr func(optionalAttr)
7592
7593// CopyTensorName sets the optional tensor_name attribute to value.
7594//
7595// value: The name of the input tensor.
7596// If not specified, defaults to ""
7597func CopyTensorName(value string) CopyAttr {
7598	return func(m optionalAttr) {
7599		m["tensor_name"] = value
7600	}
7601}
7602
7603// CopyDebugOpsSpec sets the optional debug_ops_spec attribute to value.
7604//
7605// value: A list of debug op spec (op, url, gated_grpc) for attached debug
7606// ops. Each element of the list has the format
7607// <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
7608// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
7609// "DebugIdentity;file:///tmp/tfdbg_1;0".
7610// If not specified, defaults to {}
7611func CopyDebugOpsSpec(value []string) CopyAttr {
7612	return func(m optionalAttr) {
7613		m["debug_ops_spec"] = value
7614	}
7615}
7616
7617// Copy a tensor from CPU-to-CPU or GPU-to-GPU.
7618//
7619// Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
7620// device on which the tensor is allocated.
7621// N.B.: If the all downstream attached debug ops are disabled given the current
7622// gRPC gating status, the output will simply forward the input tensor without
7623// deep-copying. See the documentation of Debug* ops for more details.
7624//
7625// Unlike the CopyHost Op, this op does not have HostMemory constraint on its
7626// input or output.
7627//
7628// Arguments:
7629//
7630//	input: Input tensor.
7631func Copy(scope *Scope, input tf.Output, optional ...CopyAttr) (output tf.Output) {
7632	if scope.Err() != nil {
7633		return
7634	}
7635	attrs := map[string]interface{}{}
7636	for _, a := range optional {
7637		a(attrs)
7638	}
7639	opspec := tf.OpSpec{
7640		Type: "Copy",
7641		Input: []tf.Input{
7642			input,
7643		},
7644		Attrs: attrs,
7645	}
7646	op := scope.AddOperation(opspec)
7647	return op.Output(0)
7648}
7649
7650// CopyHostAttr is an optional argument to CopyHost.
7651type CopyHostAttr func(optionalAttr)
7652
7653// CopyHostTensorName sets the optional tensor_name attribute to value.
7654//
7655// value: The name of the input tensor.
7656// If not specified, defaults to ""
7657func CopyHostTensorName(value string) CopyHostAttr {
7658	return func(m optionalAttr) {
7659		m["tensor_name"] = value
7660	}
7661}
7662
7663// CopyHostDebugOpsSpec sets the optional debug_ops_spec attribute to value.
7664//
7665// value: A list of debug op spec (op, url, gated_grpc) for attached debug
7666// ops. Each element of the list has the format
7667// <debug_op>;<grpc_url>;<gated_grpc>, wherein gated_grpc is boolean represented
7668// as 0/1. E.g., "DebugIdentity;grpc://foo:3333;1",
7669// "DebugIdentity;file:///tmp/tfdbg_1;0".
7670// If not specified, defaults to {}
7671func CopyHostDebugOpsSpec(value []string) CopyHostAttr {
7672	return func(m optionalAttr) {
7673		m["debug_ops_spec"] = value
7674	}
7675}
7676
7677// Copy a tensor to host.
7678//
7679// Performs CPU-to-CPU deep-copying of tensor.
7680// N.B.: If the all downstream attached debug ops are disabled given the current
7681// gRPC gating status, the output will simply forward the input tensor without
7682// deep-copying. See the documentation of Debug* ops for more details.
7683//
7684// Unlike the Copy Op, this op has HostMemory constraint on its input or output.
7685//
7686// Arguments:
7687//
7688//	input: Input tensor.
7689func CopyHost(scope *Scope, input tf.Output, optional ...CopyHostAttr) (output tf.Output) {
7690	if scope.Err() != nil {
7691		return
7692	}
7693	attrs := map[string]interface{}{}
7694	for _, a := range optional {
7695		a(attrs)
7696	}
7697	opspec := tf.OpSpec{
7698		Type: "CopyHost",
7699		Input: []tf.Input{
7700			input,
7701		},
7702		Attrs: attrs,
7703	}
7704	op := scope.AddOperation(opspec)
7705	return op.Output(0)
7706}
7707
7708// Computes cos of x element-wise.
7709//
7710//	Given an input tensor, this function computes cosine of every
7711//	element in the tensor. Input range is `(-inf, inf)` and
7712//	output range is `[-1,1]`. If input lies outside the boundary, `nan`
7713//	is returned.
7714//
7715//	```python
7716//	x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
7717//	tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]
7718//	```
7719func Cos(scope *Scope, x tf.Output) (y tf.Output) {
7720	if scope.Err() != nil {
7721		return
7722	}
7723	opspec := tf.OpSpec{
7724		Type: "Cos",
7725		Input: []tf.Input{
7726			x,
7727		},
7728	}
7729	op := scope.AddOperation(opspec)
7730	return op.Output(0)
7731}
7732
7733// Computes hyperbolic cosine of x element-wise.
7734//
7735//	Given an input tensor, this function computes hyperbolic cosine of every
7736//	element in the tensor. Input range is `[-inf, inf]` and output range
7737//	is `[1, inf]`.
7738//
7739//	```python
7740//	x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
7741//	tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]
7742//	```
7743func Cosh(scope *Scope, x tf.Output) (y tf.Output) {
7744	if scope.Err() != nil {
7745		return
7746	}
7747	opspec := tf.OpSpec{
7748		Type: "Cosh",
7749		Input: []tf.Input{
7750			x,
7751		},
7752	}
7753	op := scope.AddOperation(opspec)
7754	return op.Output(0)
7755}
7756
7757// CropAndResizeAttr is an optional argument to CropAndResize.
7758type CropAndResizeAttr func(optionalAttr)
7759
7760// CropAndResizeMethod sets the optional method attribute to value.
7761//
7762// value: A string specifying the sampling method for resizing. It can be either
7763// `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling
7764// methods are supported: Bilinear and Nearest Neighbor.
7765// If not specified, defaults to "bilinear"
7766func CropAndResizeMethod(value string) CropAndResizeAttr {
7767	return func(m optionalAttr) {
7768		m["method"] = value
7769	}
7770}
7771
7772// CropAndResizeExtrapolationValue sets the optional extrapolation_value attribute to value.
7773//
7774// value: Value used for extrapolation, when applicable.
7775// If not specified, defaults to 0
7776func CropAndResizeExtrapolationValue(value float32) CropAndResizeAttr {
7777	return func(m optionalAttr) {
7778		m["extrapolation_value"] = value
7779	}
7780}
7781
7782// Extracts crops from the input image tensor and resizes them.
7783//
7784// Extracts crops from the input image tensor and resizes them using bilinear
7785// sampling or nearest neighbor sampling (possibly with aspect ratio change) to a
7786// common output size specified by `crop_size`. This is more general than the
7787// `crop_to_bounding_box` op which extracts a fixed size slice from the input image
7788// and does not allow resizing or aspect ratio change.
7789//
7790// Returns a tensor with `crops` from the input `image` at positions defined at the
7791// bounding box locations in `boxes`. The cropped boxes are all resized (with
7792// bilinear or nearest neighbor interpolation) to a fixed
7793// `size = [crop_height, crop_width]`. The result is a 4-D tensor
7794// `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned.
7795// In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical
7796// results to using `tf.image.resize_bilinear()` or
7797// `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with
7798// `align_corners=True`.
7799//
7800// Arguments:
7801//
7802//	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
7803//
7804// Both `image_height` and `image_width` need to be positive.
7805//
7806//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
7807//
7808// specifies the coordinates of a box in the `box_ind[i]` image and is specified
7809// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
7810// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
7811// `[0, 1]` interval of normalized image height is mapped to
7812// `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in
7813// which case the sampled crop is an up-down flipped version of the original
7814// image. The width dimension is treated similarly. Normalized coordinates
7815// outside the `[0, 1]` range are allowed, in which case we use
7816// `extrapolation_value` to extrapolate the input image values.
7817//
7818//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
7819//
7820// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
7821//
7822//	crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All
7823//
7824// cropped image patches are resized to this size. The aspect ratio of the image
7825// content is not preserved. Both `crop_height` and `crop_width` need to be
7826// positive.
7827//
7828// Returns A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
7829func CropAndResize(scope *Scope, image tf.Output, boxes tf.Output, box_ind tf.Output, crop_size tf.Output, optional ...CropAndResizeAttr) (crops tf.Output) {
7830	if scope.Err() != nil {
7831		return
7832	}
7833	attrs := map[string]interface{}{}
7834	for _, a := range optional {
7835		a(attrs)
7836	}
7837	opspec := tf.OpSpec{
7838		Type: "CropAndResize",
7839		Input: []tf.Input{
7840			image, boxes, box_ind, crop_size,
7841		},
7842		Attrs: attrs,
7843	}
7844	op := scope.AddOperation(opspec)
7845	return op.Output(0)
7846}
7847
7848// CropAndResizeGradBoxesAttr is an optional argument to CropAndResizeGradBoxes.
7849type CropAndResizeGradBoxesAttr func(optionalAttr)
7850
7851// CropAndResizeGradBoxesMethod sets the optional method attribute to value.
7852//
7853// value: A string specifying the interpolation method. Only 'bilinear' is
7854// supported for now.
7855// If not specified, defaults to "bilinear"
7856func CropAndResizeGradBoxesMethod(value string) CropAndResizeGradBoxesAttr {
7857	return func(m optionalAttr) {
7858		m["method"] = value
7859	}
7860}
7861
7862// Computes the gradient of the crop_and_resize op wrt the input boxes tensor.
7863//
7864// Arguments:
7865//
7866//	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
7867//	image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
7868//
7869// Both `image_height` and `image_width` need to be positive.
7870//
7871//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
7872//
7873// specifies the coordinates of a box in the `box_ind[i]` image and is specified
7874// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
7875// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
7876// `[0, 1]` interval of normalized image height is mapped to
7877// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
7878// which case the sampled crop is an up-down flipped version of the original
7879// image. The width dimension is treated similarly. Normalized coordinates
7880// outside the `[0, 1]` range are allowed, in which case we use
7881// `extrapolation_value` to extrapolate the input image values.
7882//
7883//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
7884//
7885// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
7886//
7887// Returns A 2-D tensor of shape `[num_boxes, 4]`.
7888func CropAndResizeGradBoxes(scope *Scope, grads tf.Output, image tf.Output, boxes tf.Output, box_ind tf.Output, optional ...CropAndResizeGradBoxesAttr) (output tf.Output) {
7889	if scope.Err() != nil {
7890		return
7891	}
7892	attrs := map[string]interface{}{}
7893	for _, a := range optional {
7894		a(attrs)
7895	}
7896	opspec := tf.OpSpec{
7897		Type: "CropAndResizeGradBoxes",
7898		Input: []tf.Input{
7899			grads, image, boxes, box_ind,
7900		},
7901		Attrs: attrs,
7902	}
7903	op := scope.AddOperation(opspec)
7904	return op.Output(0)
7905}
7906
7907// CropAndResizeGradImageAttr is an optional argument to CropAndResizeGradImage.
7908type CropAndResizeGradImageAttr func(optionalAttr)
7909
7910// CropAndResizeGradImageMethod sets the optional method attribute to value.
7911//
7912// value: A string specifying the interpolation method. Only 'bilinear' is
7913// supported for now.
7914// If not specified, defaults to "bilinear"
7915func CropAndResizeGradImageMethod(value string) CropAndResizeGradImageAttr {
7916	return func(m optionalAttr) {
7917		m["method"] = value
7918	}
7919}
7920
7921// Computes the gradient of the crop_and_resize op wrt the input image tensor.
7922//
7923// Arguments:
7924//
7925//	grads: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`.
7926//	boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor
7927//
7928// specifies the coordinates of a box in the `box_ind[i]` image and is specified
7929// in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of
7930// `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the
7931// `[0, 1]` interval of normalized image height is mapped to
7932// `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in
7933// which case the sampled crop is an up-down flipped version of the original
7934// image. The width dimension is treated similarly. Normalized coordinates
7935// outside the `[0, 1]` range are allowed, in which case we use
7936// `extrapolation_value` to extrapolate the input image values.
7937//
7938//	box_ind: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.
7939//
7940// The value of `box_ind[i]` specifies the image that the `i`-th box refers to.
7941//
7942//	image_size: A 1-D tensor with value `[batch, image_height, image_width, depth]`
7943//
7944// containing the original image size. Both `image_height` and `image_width` need
7945// to be positive.
7946//
7947// Returns A 4-D tensor of shape `[batch, image_height, image_width, depth]`.
7948func CropAndResizeGradImage(scope *Scope, grads tf.Output, boxes tf.Output, box_ind tf.Output, image_size tf.Output, T tf.DataType, optional ...CropAndResizeGradImageAttr) (output tf.Output) {
7949	if scope.Err() != nil {
7950		return
7951	}
7952	attrs := map[string]interface{}{"T": T}
7953	for _, a := range optional {
7954		a(attrs)
7955	}
7956	opspec := tf.OpSpec{
7957		Type: "CropAndResizeGradImage",
7958		Input: []tf.Input{
7959			grads, boxes, box_ind, image_size,
7960		},
7961		Attrs: attrs,
7962	}
7963	op := scope.AddOperation(opspec)
7964	return op.Output(0)
7965}
7966
7967// Compute the pairwise cross product.
7968//
7969// `a` and `b` must be the same shape; they can either be simple 3-element vectors,
7970// or any shape where the innermost dimension is 3. In the latter case, each pair
7971// of corresponding 3-element vectors is cross-multiplied independently.
7972//
7973// Arguments:
7974//
7975//	a: A tensor containing 3-element vectors.
7976//	b: Another tensor, of same type and shape as `a`.
7977//
7978// Returns Pairwise cross product of the vectors in `a` and `b`.
7979func Cross(scope *Scope, a tf.Output, b tf.Output) (product tf.Output) {
7980	if scope.Err() != nil {
7981		return
7982	}
7983	opspec := tf.OpSpec{
7984		Type: "Cross",
7985		Input: []tf.Input{
7986			a, b,
7987		},
7988	}
7989	op := scope.AddOperation(opspec)
7990	return op.Output(0)
7991}
7992
7993// An Op to sum inputs across replicated TPU instances.
7994//
7995// Each instance supplies its own input.
7996//
7997// For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.
7998// Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,
7999// and `B, D, F, H` as group 1. Thus we get the outputs:
8000// `[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.
8001//
8002// Arguments:
8003//
8004//	input: The local input to the sum.
8005//	group_assignment: An int32 tensor with shape
8006//
8007// [num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
8008// replica ids in the ith subgroup.
8009//
8010// Returns The sum of all the distributed inputs.
8011func CrossReplicaSum(scope *Scope, input tf.Output, group_assignment tf.Output) (output tf.Output) {
8012	if scope.Err() != nil {
8013		return
8014	}
8015	opspec := tf.OpSpec{
8016		Type: "CrossReplicaSum",
8017		Input: []tf.Input{
8018			input, group_assignment,
8019		},
8020	}
8021	op := scope.AddOperation(opspec)
8022	return op.Output(0)
8023}
8024
8025// CudnnRNNAttr is an optional argument to CudnnRNN.
8026type CudnnRNNAttr func(optionalAttr)
8027
8028// CudnnRNNRnnMode sets the optional rnn_mode attribute to value.
8029// If not specified, defaults to "lstm"
8030func CudnnRNNRnnMode(value string) CudnnRNNAttr {
8031	return func(m optionalAttr) {
8032		m["rnn_mode"] = value
8033	}
8034}
8035
8036// CudnnRNNInputMode sets the optional input_mode attribute to value.
8037// If not specified, defaults to "linear_input"
8038func CudnnRNNInputMode(value string) CudnnRNNAttr {
8039	return func(m optionalAttr) {
8040		m["input_mode"] = value
8041	}
8042}
8043
8044// CudnnRNNDirection sets the optional direction attribute to value.
8045// If not specified, defaults to "unidirectional"
8046func CudnnRNNDirection(value string) CudnnRNNAttr {
8047	return func(m optionalAttr) {
8048		m["direction"] = value
8049	}
8050}
8051
8052// CudnnRNNDropout sets the optional dropout attribute to value.
8053// If not specified, defaults to 0
8054func CudnnRNNDropout(value float32) CudnnRNNAttr {
8055	return func(m optionalAttr) {
8056		m["dropout"] = value
8057	}
8058}
8059
8060// CudnnRNNSeed sets the optional seed attribute to value.
8061// If not specified, defaults to 0
8062func CudnnRNNSeed(value int64) CudnnRNNAttr {
8063	return func(m optionalAttr) {
8064		m["seed"] = value
8065	}
8066}
8067
8068// CudnnRNNSeed2 sets the optional seed2 attribute to value.
8069// If not specified, defaults to 0
8070func CudnnRNNSeed2(value int64) CudnnRNNAttr {
8071	return func(m optionalAttr) {
8072		m["seed2"] = value
8073	}
8074}
8075
8076// CudnnRNNIsTraining sets the optional is_training attribute to value.
8077// If not specified, defaults to true
8078func CudnnRNNIsTraining(value bool) CudnnRNNAttr {
8079	return func(m optionalAttr) {
8080		m["is_training"] = value
8081	}
8082}
8083
8084// A RNN backed by cuDNN.
8085//
8086// Computes the RNN from the input and initial states, with respect to the params
8087// buffer.
8088//
8089// rnn_mode: Indicates the type of the RNN model.
8090// input_mode: Indicate whether there is a linear projection between the input and
8091//
8092//	the actual computation before the first layer. 'skip_input' is only allowed
8093//	when input_size == num_units; 'auto_select' implies 'skip_input' when
8094//	input_size == num_units; otherwise, it implies 'linear_input'.
8095//
8096// direction: Indicates whether a bidirectional model will be used. Should be
8097//
8098//	"unidirectional" or "bidirectional".
8099//
8100// dropout: Dropout probability. When set to 0., dropout is disabled.
8101// seed: The 1st part of a seed to initialize dropout.
8102// seed2: The 2nd part of a seed to initialize dropout.
8103// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
8104// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
8105//
8106//	num_units].
8107//
8108// input_c: For LSTM, a 3-D tensor with the shape of
8109//
8110//	[num_layer * dir, batch, num_units]. For other models, it is ignored.
8111//
8112// params: A 1-D tensor that contains the weights and biases in an opaque layout.
8113//
8114//	The size must be created through CudnnRNNParamsSize, and initialized
8115//	separately. Note that they might not be compatible across different
8116//	generations. So it is a good idea to save and restore
8117//
8118// output: A 3-D tensor with the shape of [seq_length, batch_size,
8119//
8120//	dir * num_units].
8121//
8122// output_h: The same shape has input_h.
8123// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
8124// is_training: Indicates whether this operation is used for inference or
8125//
8126//	training.
8127//
8128// reserve_space: An opaque tensor that can be used in backprop calculation. It
8129//
8130//	is only produced if is_training is false.
8131func CudnnRNN(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNAttr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output) {
8132	if scope.Err() != nil {
8133		return
8134	}
8135	attrs := map[string]interface{}{}
8136	for _, a := range optional {
8137		a(attrs)
8138	}
8139	opspec := tf.OpSpec{
8140		Type: "CudnnRNN",
8141		Input: []tf.Input{
8142			input, input_h, input_c, params,
8143		},
8144		Attrs: attrs,
8145	}
8146	op := scope.AddOperation(opspec)
8147	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
8148}
8149
8150// CudnnRNNBackpropAttr is an optional argument to CudnnRNNBackprop.
8151type CudnnRNNBackpropAttr func(optionalAttr)
8152
8153// CudnnRNNBackpropRnnMode sets the optional rnn_mode attribute to value.
8154// If not specified, defaults to "lstm"
8155func CudnnRNNBackpropRnnMode(value string) CudnnRNNBackpropAttr {
8156	return func(m optionalAttr) {
8157		m["rnn_mode"] = value
8158	}
8159}
8160
8161// CudnnRNNBackpropInputMode sets the optional input_mode attribute to value.
8162// If not specified, defaults to "linear_input"
8163func CudnnRNNBackpropInputMode(value string) CudnnRNNBackpropAttr {
8164	return func(m optionalAttr) {
8165		m["input_mode"] = value
8166	}
8167}
8168
8169// CudnnRNNBackpropDirection sets the optional direction attribute to value.
8170// If not specified, defaults to "unidirectional"
8171func CudnnRNNBackpropDirection(value string) CudnnRNNBackpropAttr {
8172	return func(m optionalAttr) {
8173		m["direction"] = value
8174	}
8175}
8176
8177// CudnnRNNBackpropDropout sets the optional dropout attribute to value.
8178// If not specified, defaults to 0
8179func CudnnRNNBackpropDropout(value float32) CudnnRNNBackpropAttr {
8180	return func(m optionalAttr) {
8181		m["dropout"] = value
8182	}
8183}
8184
8185// CudnnRNNBackpropSeed sets the optional seed attribute to value.
8186// If not specified, defaults to 0
8187func CudnnRNNBackpropSeed(value int64) CudnnRNNBackpropAttr {
8188	return func(m optionalAttr) {
8189		m["seed"] = value
8190	}
8191}
8192
8193// CudnnRNNBackpropSeed2 sets the optional seed2 attribute to value.
8194// If not specified, defaults to 0
8195func CudnnRNNBackpropSeed2(value int64) CudnnRNNBackpropAttr {
8196	return func(m optionalAttr) {
8197		m["seed2"] = value
8198	}
8199}
8200
8201// Backprop step of CudnnRNN.
8202//
8203// Compute the backprop of both data and weights in a RNN.
8204//
8205// rnn_mode: Indicates the type of the RNN model.
8206// input_mode: Indicate whether there is a linear projection between the input and
8207//
8208//	the actual computation before the first layer. 'skip_input' is only allowed
8209//	when input_size == num_units; 'auto_select' implies 'skip_input' when
8210//	input_size == num_units; otherwise, it implies 'linear_input'.
8211//
8212// direction: Indicates whether a bidirectional model will be used. Should be
8213//
8214//	"unidirectional" or "bidirectional".
8215//
8216// dropout: Dropout probability. When set to 0., dropout is disabled.
8217// seed: The 1st part of a seed to initialize dropout.
8218// seed2: The 2nd part of a seed to initialize dropout.
8219// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
8220// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
8221//
8222//	num_units].
8223//
8224// input_c: For LSTM, a 3-D tensor with the shape of
8225//
8226//	[num_layer * dir, batch, num_units]. For other models, it is ignored.
8227//
8228// params: A 1-D tensor that contains the weights and biases in an opaque layout.
8229//
8230//	The size must be created through CudnnRNNParamsSize, and initialized
8231//	separately. Note that they might not be compatible across different
8232//	generations. So it is a good idea to save and restore
8233//
8234// output: A 3-D tensor with the shape of [seq_length, batch_size,
8235//
8236//	dir * num_units].
8237//
8238// output_h: The same shape has input_h.
8239// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
8240// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
8241// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
8242//
8243//	pass.
8244//
8245// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
8246//
8247//	pass.
8248//
8249// reserve_space: The same reserve_space produced in for forward operation.
8250// input_backprop: The backprop to input in the forward pass. Has the same shape
8251//
8252//	as input.
8253//
8254// input_h_backprop: The backprop to input_h in the forward pass. Has the same
8255//
8256//	shape as input_h.
8257//
8258// input_c_backprop: The backprop to input_c in the forward pass. Has the same
8259//
8260//	shape as input_c.
8261//
8262// params_backprop: The backprop to the params buffer in the forward pass. Has the
8263//
8264//	same shape as params.
8265func CudnnRNNBackprop(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, optional ...CudnnRNNBackpropAttr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
8266	if scope.Err() != nil {
8267		return
8268	}
8269	attrs := map[string]interface{}{}
8270	for _, a := range optional {
8271		a(attrs)
8272	}
8273	opspec := tf.OpSpec{
8274		Type: "CudnnRNNBackprop",
8275		Input: []tf.Input{
8276			input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space,
8277		},
8278		Attrs: attrs,
8279	}
8280	op := scope.AddOperation(opspec)
8281	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
8282}
8283
8284// CudnnRNNBackpropV2Attr is an optional argument to CudnnRNNBackpropV2.
8285type CudnnRNNBackpropV2Attr func(optionalAttr)
8286
8287// CudnnRNNBackpropV2RnnMode sets the optional rnn_mode attribute to value.
8288// If not specified, defaults to "lstm"
8289func CudnnRNNBackpropV2RnnMode(value string) CudnnRNNBackpropV2Attr {
8290	return func(m optionalAttr) {
8291		m["rnn_mode"] = value
8292	}
8293}
8294
8295// CudnnRNNBackpropV2InputMode sets the optional input_mode attribute to value.
8296// If not specified, defaults to "linear_input"
8297func CudnnRNNBackpropV2InputMode(value string) CudnnRNNBackpropV2Attr {
8298	return func(m optionalAttr) {
8299		m["input_mode"] = value
8300	}
8301}
8302
8303// CudnnRNNBackpropV2Direction sets the optional direction attribute to value.
8304// If not specified, defaults to "unidirectional"
8305func CudnnRNNBackpropV2Direction(value string) CudnnRNNBackpropV2Attr {
8306	return func(m optionalAttr) {
8307		m["direction"] = value
8308	}
8309}
8310
8311// CudnnRNNBackpropV2Dropout sets the optional dropout attribute to value.
8312// If not specified, defaults to 0
8313func CudnnRNNBackpropV2Dropout(value float32) CudnnRNNBackpropV2Attr {
8314	return func(m optionalAttr) {
8315		m["dropout"] = value
8316	}
8317}
8318
8319// CudnnRNNBackpropV2Seed sets the optional seed attribute to value.
8320// If not specified, defaults to 0
8321func CudnnRNNBackpropV2Seed(value int64) CudnnRNNBackpropV2Attr {
8322	return func(m optionalAttr) {
8323		m["seed"] = value
8324	}
8325}
8326
8327// CudnnRNNBackpropV2Seed2 sets the optional seed2 attribute to value.
8328// If not specified, defaults to 0
8329func CudnnRNNBackpropV2Seed2(value int64) CudnnRNNBackpropV2Attr {
8330	return func(m optionalAttr) {
8331		m["seed2"] = value
8332	}
8333}
8334
8335// Backprop step of CudnnRNN.
8336//
8337// Compute the backprop of both data and weights in a RNN. Takes an extra
8338//
8339//	"host_reserved" inupt than CudnnRNNBackprop, which is used to determine RNN
8340//	cudnnRNNAlgo_t and cudnnMathType_t.
8341//
8342// rnn_mode: Indicates the type of the RNN model.
8343// input_mode: Indicates whether there is a linear projection between the input and
8344//
8345//	the actual computation before the first layer. 'skip_input' is only allowed
8346//	when input_size == num_units; 'auto_select' implies 'skip_input' when
8347//	input_size == num_units; otherwise, it implies 'linear_input'.
8348//
8349// direction: Indicates whether a bidirectional model will be used. Should be
8350//
8351//	"unidirectional" or "bidirectional".
8352//
8353// dropout: Dropout probability. When set to 0., dropout is disabled.
8354// seed: The 1st part of a seed to initialize dropout.
8355// seed2: The 2nd part of a seed to initialize dropout.
8356// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
8357// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
8358//
8359//	num_units].
8360//
8361// input_c: For LSTM, a 3-D tensor with the shape of
8362//
8363//	[num_layer * dir, batch, num_units]. For other models, it is ignored.
8364//
8365// params: A 1-D tensor that contains the weights and biases in an opaque layout.
8366//
8367//	The size must be created through CudnnRNNParamsSize, and initialized
8368//	separately. Note that they might not be compatible across different
8369//	generations. So it is a good idea to save and restore
8370//
8371// output: A 3-D tensor with the shape of [seq_length, batch_size,
8372//
8373//	dir * num_units].
8374//
8375// output_h: The same shape has input_h.
8376// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
8377// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
8378// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
8379//
8380//	pass.
8381//
8382// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
8383//
8384//	pass.
8385//
8386// reserve_space: The same reserve_space produced in the forward operation.
8387// host_reserved: The same host_reserved produced in the forward operation.
8388// input_backprop: The backprop to input in the forward pass. Has the same shape
8389//
8390//	as input.
8391//
8392// input_h_backprop: The backprop to input_h in the forward pass. Has the same
8393//
8394//	shape as input_h.
8395//
8396// input_c_backprop: The backprop to input_c in the forward pass. Has the same
8397//
8398//	shape as input_c.
8399//
8400// params_backprop: The backprop to the params buffer in the forward pass. Has the
8401//
8402//	same shape as params.
8403func CudnnRNNBackpropV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV2Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
8404	if scope.Err() != nil {
8405		return
8406	}
8407	attrs := map[string]interface{}{}
8408	for _, a := range optional {
8409		a(attrs)
8410	}
8411	opspec := tf.OpSpec{
8412		Type: "CudnnRNNBackpropV2",
8413		Input: []tf.Input{
8414			input, input_h, input_c, params, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved,
8415		},
8416		Attrs: attrs,
8417	}
8418	op := scope.AddOperation(opspec)
8419	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
8420}
8421
8422// CudnnRNNBackpropV3Attr is an optional argument to CudnnRNNBackpropV3.
8423type CudnnRNNBackpropV3Attr func(optionalAttr)
8424
8425// CudnnRNNBackpropV3RnnMode sets the optional rnn_mode attribute to value.
8426// If not specified, defaults to "lstm"
8427func CudnnRNNBackpropV3RnnMode(value string) CudnnRNNBackpropV3Attr {
8428	return func(m optionalAttr) {
8429		m["rnn_mode"] = value
8430	}
8431}
8432
8433// CudnnRNNBackpropV3InputMode sets the optional input_mode attribute to value.
8434// If not specified, defaults to "linear_input"
8435func CudnnRNNBackpropV3InputMode(value string) CudnnRNNBackpropV3Attr {
8436	return func(m optionalAttr) {
8437		m["input_mode"] = value
8438	}
8439}
8440
8441// CudnnRNNBackpropV3Direction sets the optional direction attribute to value.
8442// If not specified, defaults to "unidirectional"
8443func CudnnRNNBackpropV3Direction(value string) CudnnRNNBackpropV3Attr {
8444	return func(m optionalAttr) {
8445		m["direction"] = value
8446	}
8447}
8448
8449// CudnnRNNBackpropV3Dropout sets the optional dropout attribute to value.
8450// If not specified, defaults to 0
8451func CudnnRNNBackpropV3Dropout(value float32) CudnnRNNBackpropV3Attr {
8452	return func(m optionalAttr) {
8453		m["dropout"] = value
8454	}
8455}
8456
8457// CudnnRNNBackpropV3Seed sets the optional seed attribute to value.
8458// If not specified, defaults to 0
8459func CudnnRNNBackpropV3Seed(value int64) CudnnRNNBackpropV3Attr {
8460	return func(m optionalAttr) {
8461		m["seed"] = value
8462	}
8463}
8464
8465// CudnnRNNBackpropV3Seed2 sets the optional seed2 attribute to value.
8466// If not specified, defaults to 0
8467func CudnnRNNBackpropV3Seed2(value int64) CudnnRNNBackpropV3Attr {
8468	return func(m optionalAttr) {
8469		m["seed2"] = value
8470	}
8471}
8472
8473// CudnnRNNBackpropV3NumProj sets the optional num_proj attribute to value.
8474// If not specified, defaults to 0
8475func CudnnRNNBackpropV3NumProj(value int64) CudnnRNNBackpropV3Attr {
8476	return func(m optionalAttr) {
8477		m["num_proj"] = value
8478	}
8479}
8480
8481// CudnnRNNBackpropV3TimeMajor sets the optional time_major attribute to value.
8482// If not specified, defaults to true
8483func CudnnRNNBackpropV3TimeMajor(value bool) CudnnRNNBackpropV3Attr {
8484	return func(m optionalAttr) {
8485		m["time_major"] = value
8486	}
8487}
8488
8489// Backprop step of CudnnRNNV3.
8490//
8491// Compute the backprop of both data and weights in a RNN. Takes an extra
8492//
8493//	"sequence_lengths" input than CudnnRNNBackprop.
8494//
8495// rnn_mode: Indicates the type of the RNN model.
8496// input_mode: Indicates whether there is a linear projection between the input and
8497//
8498//	the actual computation before the first layer. 'skip_input' is only allowed
8499//	when input_size == num_units; 'auto_select' implies 'skip_input' when
8500//	input_size == num_units; otherwise, it implies 'linear_input'.
8501//
8502// direction: Indicates whether a bidirectional model will be used. Should be
8503//
8504//	"unidirectional" or "bidirectional".
8505//
8506// dropout: Dropout probability. When set to 0., dropout is disabled.
8507// seed: The 1st part of a seed to initialize dropout.
8508// seed2: The 2nd part of a seed to initialize dropout.
8509// input: If time_major is true, this is a 3-D tensor with the shape of
8510//
8511//	[seq_length, batch_size, input_size]. If time_major is false, the shape is
8512//	[batch_size, seq_length, input_size].
8513//
8514// input_h: If time_major is true, this is a 3-D tensor with the shape of
8515//
8516//	[num_layer * dir, batch_size, num_units]. If time_major is false, the shape
8517//	is [batch_size, num_layer * dir, num_units].
8518//
8519// input_c: For LSTM, a 3-D tensor with the shape of
8520//
8521//	[num_layer * dir, batch, num_units]. For other models, it is ignored.
8522//
8523// params: A 1-D tensor that contains the weights and biases in an opaque layout.
8524//
8525//	The size must be created through CudnnRNNParamsSize, and initialized
8526//	separately. Note that they might not be compatible across different
8527//	generations. So it is a good idea to save and restore
8528//
8529// sequence_lengths: a vector of lengths of each input sequence.
8530// output: If time_major is true, this is a 3-D tensor with the shape of
8531//
8532//	[seq_length, batch_size, dir * num_units]. If time_major is false, the
8533//	shape is [batch_size, seq_length, dir * num_units].
8534//
8535// output_h: The same shape has input_h.
8536// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
8537// output_backprop: A 3-D tensor with the same shape as output in the forward pass.
8538// output_h_backprop: A 3-D tensor with the same shape as output_h in the forward
8539//
8540//	pass.
8541//
8542// output_c_backprop: A 3-D tensor with the same shape as output_c in the forward
8543//
8544//	pass.
8545//
8546// time_major: Indicates whether the input/output format is time major or batch
8547//
8548//	major.
8549//
8550// reserve_space: The same reserve_space produced in the forward operation.
8551// input_backprop: The backprop to input in the forward pass. Has the same shape
8552//
8553//	as input.
8554//
8555// input_h_backprop: The backprop to input_h in the forward pass. Has the same
8556//
8557//	shape as input_h.
8558//
8559// input_c_backprop: The backprop to input_c in the forward pass. Has the same
8560//
8561//	shape as input_c.
8562//
8563// params_backprop: The backprop to the params buffer in the forward pass. Has the
8564//
8565//	same shape as params.
8566func CudnnRNNBackpropV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, output tf.Output, output_h tf.Output, output_c tf.Output, output_backprop tf.Output, output_h_backprop tf.Output, output_c_backprop tf.Output, reserve_space tf.Output, host_reserved tf.Output, optional ...CudnnRNNBackpropV3Attr) (input_backprop tf.Output, input_h_backprop tf.Output, input_c_backprop tf.Output, params_backprop tf.Output) {
8567	if scope.Err() != nil {
8568		return
8569	}
8570	attrs := map[string]interface{}{}
8571	for _, a := range optional {
8572		a(attrs)
8573	}
8574	opspec := tf.OpSpec{
8575		Type: "CudnnRNNBackpropV3",
8576		Input: []tf.Input{
8577			input, input_h, input_c, params, sequence_lengths, output, output_h, output_c, output_backprop, output_h_backprop, output_c_backprop, reserve_space, host_reserved,
8578		},
8579		Attrs: attrs,
8580	}
8581	op := scope.AddOperation(opspec)
8582	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
8583}
8584
8585// CudnnRNNCanonicalToParamsAttr is an optional argument to CudnnRNNCanonicalToParams.
8586type CudnnRNNCanonicalToParamsAttr func(optionalAttr)
8587
8588// CudnnRNNCanonicalToParamsRnnMode sets the optional rnn_mode attribute to value.
8589// If not specified, defaults to "lstm"
8590func CudnnRNNCanonicalToParamsRnnMode(value string) CudnnRNNCanonicalToParamsAttr {
8591	return func(m optionalAttr) {
8592		m["rnn_mode"] = value
8593	}
8594}
8595
8596// CudnnRNNCanonicalToParamsInputMode sets the optional input_mode attribute to value.
8597// If not specified, defaults to "linear_input"
8598func CudnnRNNCanonicalToParamsInputMode(value string) CudnnRNNCanonicalToParamsAttr {
8599	return func(m optionalAttr) {
8600		m["input_mode"] = value
8601	}
8602}
8603
8604// CudnnRNNCanonicalToParamsDirection sets the optional direction attribute to value.
8605// If not specified, defaults to "unidirectional"
8606func CudnnRNNCanonicalToParamsDirection(value string) CudnnRNNCanonicalToParamsAttr {
8607	return func(m optionalAttr) {
8608		m["direction"] = value
8609	}
8610}
8611
8612// CudnnRNNCanonicalToParamsDropout sets the optional dropout attribute to value.
8613// If not specified, defaults to 0
8614func CudnnRNNCanonicalToParamsDropout(value float32) CudnnRNNCanonicalToParamsAttr {
8615	return func(m optionalAttr) {
8616		m["dropout"] = value
8617	}
8618}
8619
8620// CudnnRNNCanonicalToParamsSeed sets the optional seed attribute to value.
8621// If not specified, defaults to 0
8622func CudnnRNNCanonicalToParamsSeed(value int64) CudnnRNNCanonicalToParamsAttr {
8623	return func(m optionalAttr) {
8624		m["seed"] = value
8625	}
8626}
8627
8628// CudnnRNNCanonicalToParamsSeed2 sets the optional seed2 attribute to value.
8629// If not specified, defaults to 0
8630func CudnnRNNCanonicalToParamsSeed2(value int64) CudnnRNNCanonicalToParamsAttr {
8631	return func(m optionalAttr) {
8632		m["seed2"] = value
8633	}
8634}
8635
8636// Converts CudnnRNN params from canonical form to usable form.
8637//
8638// Writes a set of weights into the opaque params buffer so they can be used in
8639// upcoming training or inferences.
8640//
8641// Note that the params buffer may not be compatible across different GPUs. So any
8642// save and restoration should be converted to and from the canonical weights and
8643// biases.
8644//
8645// num_layers: Specifies the number of layers in the RNN model.
8646// num_units: Specifies the size of the hidden state.
8647// input_size: Specifies the size of the input state.
8648// weights: the canonical form of weights that can be used for saving
8649//
8650//	and restoration. They are more likely to be compatible across different
8651//	generations.
8652//
8653// biases: the canonical form of biases that can be used for saving
8654//
8655//	and restoration. They are more likely to be compatible across different
8656//	generations.
8657//
8658// num_params: number of parameter sets for all layers.
8659//
8660//	Each layer may contain multiple parameter sets, with each set consisting of
8661//	a weight matrix and a bias vector.
8662//
8663// rnn_mode: Indicates the type of the RNN model.
8664// input_mode: Indicate whether there is a linear projection between the input and
8665//
8666//	The actual computation before the first layer. 'skip_input' is only allowed
8667//	when input_size == num_units; 'auto_select' implies 'skip_input' when
8668//	input_size == num_units; otherwise, it implies 'linear_input'.
8669//
8670// direction: Indicates whether a bidirectional model will be used.
8671//
8672//	dir = (direction == bidirectional) ? 2 : 1
8673//
8674// dropout: dropout probability. When set to 0., dropout is disabled.
8675// seed: the 1st part of a seed to initialize dropout.
8676// seed2: the 2nd part of a seed to initialize dropout.
8677func CudnnRNNCanonicalToParams(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, weights []tf.Output, biases []tf.Output, optional ...CudnnRNNCanonicalToParamsAttr) (params tf.Output) {
8678	if scope.Err() != nil {
8679		return
8680	}
8681	attrs := map[string]interface{}{}
8682	for _, a := range optional {
8683		a(attrs)
8684	}
8685	opspec := tf.OpSpec{
8686		Type: "CudnnRNNCanonicalToParams",
8687		Input: []tf.Input{
8688			num_layers, num_units, input_size, tf.OutputList(weights), tf.OutputList(biases),
8689		},
8690		Attrs: attrs,
8691	}
8692	op := scope.AddOperation(opspec)
8693	return op.Output(0)
8694}
8695
8696// CudnnRNNCanonicalToParamsV2Attr is an optional argument to CudnnRNNCanonicalToParamsV2.
8697type CudnnRNNCanonicalToParamsV2Attr func(optionalAttr)
8698
8699// CudnnRNNCanonicalToParamsV2RnnMode sets the optional rnn_mode attribute to value.
8700// If not specified, defaults to "lstm"
8701func CudnnRNNCanonicalToParamsV2RnnMode(value string) CudnnRNNCanonicalToParamsV2Attr {
8702	return func(m optionalAttr) {
8703		m["rnn_mode"] = value
8704	}
8705}
8706
8707// CudnnRNNCanonicalToParamsV2InputMode sets the optional input_mode attribute to value.
8708// If not specified, defaults to "linear_input"
8709func CudnnRNNCanonicalToParamsV2InputMode(value string) CudnnRNNCanonicalToParamsV2Attr {
8710	return func(m optionalAttr) {
8711		m["input_mode"] = value
8712	}
8713}
8714
8715// CudnnRNNCanonicalToParamsV2Direction sets the optional direction attribute to value.
8716// If not specified, defaults to "unidirectional"
8717func CudnnRNNCanonicalToParamsV2Direction(value string) CudnnRNNCanonicalToParamsV2Attr {
8718	return func(m optionalAttr) {
8719		m["direction"] = value
8720	}
8721}
8722
8723// CudnnRNNCanonicalToParamsV2Dropout sets the optional dropout attribute to value.
8724// If not specified, defaults to 0
8725func CudnnRNNCanonicalToParamsV2Dropout(value float32) CudnnRNNCanonicalToParamsV2Attr {
8726	return func(m optionalAttr) {
8727		m["dropout"] = value
8728	}
8729}
8730
8731// CudnnRNNCanonicalToParamsV2Seed sets the optional seed attribute to value.
8732// If not specified, defaults to 0
8733func CudnnRNNCanonicalToParamsV2Seed(value int64) CudnnRNNCanonicalToParamsV2Attr {
8734	return func(m optionalAttr) {
8735		m["seed"] = value
8736	}
8737}
8738
8739// CudnnRNNCanonicalToParamsV2Seed2 sets the optional seed2 attribute to value.
8740// If not specified, defaults to 0
8741func CudnnRNNCanonicalToParamsV2Seed2(value int64) CudnnRNNCanonicalToParamsV2Attr {
8742	return func(m optionalAttr) {
8743		m["seed2"] = value
8744	}
8745}
8746
8747// CudnnRNNCanonicalToParamsV2NumProj sets the optional num_proj attribute to value.
8748// If not specified, defaults to 0
8749func CudnnRNNCanonicalToParamsV2NumProj(value int64) CudnnRNNCanonicalToParamsV2Attr {
8750	return func(m optionalAttr) {
8751		m["num_proj"] = value
8752	}
8753}
8754
8755// Converts CudnnRNN params from canonical form to usable form. It supports the projection in LSTM.
8756//
8757// Writes a set of weights into the opaque params buffer so they can be used in
8758// upcoming training or inferences.
8759//
8760// Note that the params buffer may not be compatible across different GPUs. So any
8761// save and restoration should be converted to and from the canonical weights and
8762// biases.
8763//
8764// num_layers: Specifies the number of layers in the RNN model.
8765// num_units: Specifies the size of the hidden state.
8766// input_size: Specifies the size of the input state.
8767// weights: the canonical form of weights that can be used for saving
8768//
8769//	and restoration. They are more likely to be compatible across different
8770//	generations.
8771//
8772// biases: the canonical form of biases that can be used for saving
8773//
8774//	and restoration. They are more likely to be compatible across different
8775//	generations.
8776//
8777// num_params_weights: number of weight parameter matrix for all layers.
8778// num_params_biases: number of bias parameter vector for all layers.
8779// rnn_mode: Indicates the type of the RNN model.
8780// input_mode: Indicate whether there is a linear projection between the input and
8781//
8782//	The actual computation before the first layer. 'skip_input' is only allowed
8783//	when input_size == num_units; 'auto_select' implies 'skip_input' when
8784//	input_size == num_units; otherwise, it implies 'linear_input'.
8785//
8786// direction: Indicates whether a bidirectional model will be used.
8787//
8788//	dir = (direction == bidirectional) ? 2 : 1
8789//
8790// dropout: dropout probability. When set to 0., dropout is disabled.
8791// seed: the 1st part of a seed to initialize dropout.
8792// seed2: the 2nd part of a seed to initialize dropout.
8793// num_proj: The output dimensionality for the projection matrices. If None or 0,
8794//
8795//	no projection is performed.
8796func CudnnRNNCanonicalToParamsV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, weights []tf.Output, biases []tf.Output, optional ...CudnnRNNCanonicalToParamsV2Attr) (params tf.Output) {
8797	if scope.Err() != nil {
8798		return
8799	}
8800	attrs := map[string]interface{}{}
8801	for _, a := range optional {
8802		a(attrs)
8803	}
8804	opspec := tf.OpSpec{
8805		Type: "CudnnRNNCanonicalToParamsV2",
8806		Input: []tf.Input{
8807			num_layers, num_units, input_size, tf.OutputList(weights), tf.OutputList(biases),
8808		},
8809		Attrs: attrs,
8810	}
8811	op := scope.AddOperation(opspec)
8812	return op.Output(0)
8813}
8814
8815// CudnnRNNParamsSizeAttr is an optional argument to CudnnRNNParamsSize.
8816type CudnnRNNParamsSizeAttr func(optionalAttr)
8817
8818// CudnnRNNParamsSizeRnnMode sets the optional rnn_mode attribute to value.
8819// If not specified, defaults to "lstm"
8820func CudnnRNNParamsSizeRnnMode(value string) CudnnRNNParamsSizeAttr {
8821	return func(m optionalAttr) {
8822		m["rnn_mode"] = value
8823	}
8824}
8825
8826// CudnnRNNParamsSizeInputMode sets the optional input_mode attribute to value.
8827// If not specified, defaults to "linear_input"
8828func CudnnRNNParamsSizeInputMode(value string) CudnnRNNParamsSizeAttr {
8829	return func(m optionalAttr) {
8830		m["input_mode"] = value
8831	}
8832}
8833
8834// CudnnRNNParamsSizeDirection sets the optional direction attribute to value.
8835// If not specified, defaults to "unidirectional"
8836func CudnnRNNParamsSizeDirection(value string) CudnnRNNParamsSizeAttr {
8837	return func(m optionalAttr) {
8838		m["direction"] = value
8839	}
8840}
8841
8842// CudnnRNNParamsSizeDropout sets the optional dropout attribute to value.
8843// If not specified, defaults to 0
8844func CudnnRNNParamsSizeDropout(value float32) CudnnRNNParamsSizeAttr {
8845	return func(m optionalAttr) {
8846		m["dropout"] = value
8847	}
8848}
8849
8850// CudnnRNNParamsSizeSeed sets the optional seed attribute to value.
8851// If not specified, defaults to 0
8852func CudnnRNNParamsSizeSeed(value int64) CudnnRNNParamsSizeAttr {
8853	return func(m optionalAttr) {
8854		m["seed"] = value
8855	}
8856}
8857
8858// CudnnRNNParamsSizeSeed2 sets the optional seed2 attribute to value.
8859// If not specified, defaults to 0
8860func CudnnRNNParamsSizeSeed2(value int64) CudnnRNNParamsSizeAttr {
8861	return func(m optionalAttr) {
8862		m["seed2"] = value
8863	}
8864}
8865
8866// CudnnRNNParamsSizeNumProj sets the optional num_proj attribute to value.
8867// If not specified, defaults to 0
8868func CudnnRNNParamsSizeNumProj(value int64) CudnnRNNParamsSizeAttr {
8869	return func(m optionalAttr) {
8870		m["num_proj"] = value
8871	}
8872}
8873
8874// Computes size of weights that can be used by a Cudnn RNN model.
8875//
8876// Return the params size that can be used by the Cudnn RNN model. Subsequent
8877// weight allocation and initialization should use this size.
8878//
8879// num_layers: Specifies the number of layers in the RNN model.
8880// num_units: Specifies the size of the hidden state.
8881// input_size: Specifies the size of the input state.
8882// rnn_mode: Indicates the type of the RNN model.
8883// input_mode: Indicate whether there is a linear projection between the input and
8884//
8885//	The actual computation before the first layer. 'skip_input' is only allowed
8886//	when input_size == num_units; 'auto_select' implies 'skip_input' when
8887//	input_size == num_units; otherwise, it implies 'linear_input'.
8888//
8889// direction: Indicates whether a bidirectional model will be used.
8890//
8891//	dir = (direction == bidirectional) ? 2 : 1
8892//
8893// dropout: dropout probability. When set to 0., dropout is disabled.
8894// seed: the 1st part of a seed to initialize dropout.
8895// seed2: the 2nd part of a seed to initialize dropout.
8896// params_size: The size of the params buffer that should be allocated and
8897//
8898//	initialized for this RNN model. Note that this params buffer may not be
8899//	compatible across GPUs. Please use CudnnRNNParamsWeights and
8900//	CudnnRNNParamsBiases to save and restore them in a way that is compatible
8901//	across different runs.
8902func CudnnRNNParamsSize(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, T tf.DataType, S tf.DataType, optional ...CudnnRNNParamsSizeAttr) (params_size tf.Output) {
8903	if scope.Err() != nil {
8904		return
8905	}
8906	attrs := map[string]interface{}{"T": T, "S": S}
8907	for _, a := range optional {
8908		a(attrs)
8909	}
8910	opspec := tf.OpSpec{
8911		Type: "CudnnRNNParamsSize",
8912		Input: []tf.Input{
8913			num_layers, num_units, input_size,
8914		},
8915		Attrs: attrs,
8916	}
8917	op := scope.AddOperation(opspec)
8918	return op.Output(0)
8919}
8920
8921// CudnnRNNParamsToCanonicalAttr is an optional argument to CudnnRNNParamsToCanonical.
8922type CudnnRNNParamsToCanonicalAttr func(optionalAttr)
8923
8924// CudnnRNNParamsToCanonicalRnnMode sets the optional rnn_mode attribute to value.
8925// If not specified, defaults to "lstm"
8926func CudnnRNNParamsToCanonicalRnnMode(value string) CudnnRNNParamsToCanonicalAttr {
8927	return func(m optionalAttr) {
8928		m["rnn_mode"] = value
8929	}
8930}
8931
8932// CudnnRNNParamsToCanonicalInputMode sets the optional input_mode attribute to value.
8933// If not specified, defaults to "linear_input"
8934func CudnnRNNParamsToCanonicalInputMode(value string) CudnnRNNParamsToCanonicalAttr {
8935	return func(m optionalAttr) {
8936		m["input_mode"] = value
8937	}
8938}
8939
8940// CudnnRNNParamsToCanonicalDirection sets the optional direction attribute to value.
8941// If not specified, defaults to "unidirectional"
8942func CudnnRNNParamsToCanonicalDirection(value string) CudnnRNNParamsToCanonicalAttr {
8943	return func(m optionalAttr) {
8944		m["direction"] = value
8945	}
8946}
8947
8948// CudnnRNNParamsToCanonicalDropout sets the optional dropout attribute to value.
8949// If not specified, defaults to 0
8950func CudnnRNNParamsToCanonicalDropout(value float32) CudnnRNNParamsToCanonicalAttr {
8951	return func(m optionalAttr) {
8952		m["dropout"] = value
8953	}
8954}
8955
8956// CudnnRNNParamsToCanonicalSeed sets the optional seed attribute to value.
8957// If not specified, defaults to 0
8958func CudnnRNNParamsToCanonicalSeed(value int64) CudnnRNNParamsToCanonicalAttr {
8959	return func(m optionalAttr) {
8960		m["seed"] = value
8961	}
8962}
8963
8964// CudnnRNNParamsToCanonicalSeed2 sets the optional seed2 attribute to value.
8965// If not specified, defaults to 0
8966func CudnnRNNParamsToCanonicalSeed2(value int64) CudnnRNNParamsToCanonicalAttr {
8967	return func(m optionalAttr) {
8968		m["seed2"] = value
8969	}
8970}
8971
8972// Retrieves CudnnRNN params in canonical form.
8973//
8974// Retrieves a set of weights from the opaque params buffer that can be saved and
8975// restored in a way compatible with future runs.
8976//
8977// Note that the params buffer may not be compatible across different GPUs. So any
8978// save and restoration should be converted to and from the canonical weights and
8979// biases.
8980//
8981// num_layers: Specifies the number of layers in the RNN model.
8982// num_units: Specifies the size of the hidden state.
8983// input_size: Specifies the size of the input state.
8984// num_params: number of parameter sets for all layers.
8985//
8986//	Each layer may contain multiple parameter sets, with each set consisting of
8987//	a weight matrix and a bias vector.
8988//
8989// weights: the canonical form of weights that can be used for saving
8990//
8991//	and restoration. They are more likely to be compatible across different
8992//	generations.
8993//
8994// biases: the canonical form of biases that can be used for saving
8995//
8996//	and restoration. They are more likely to be compatible across different
8997//	generations.
8998//
8999// rnn_mode: Indicates the type of the RNN model.
9000// input_mode: Indicate whether there is a linear projection between the input and
9001//
9002//	The actual computation before the first layer. 'skip_input' is only allowed
9003//	when input_size == num_units; 'auto_select' implies 'skip_input' when
9004//	input_size == num_units; otherwise, it implies 'linear_input'.
9005//
9006// direction: Indicates whether a bidirectional model will be used.
9007//
9008//	dir = (direction == bidirectional) ? 2 : 1
9009//
9010// dropout: dropout probability. When set to 0., dropout is disabled.
9011// seed: the 1st part of a seed to initialize dropout.
9012// seed2: the 2nd part of a seed to initialize dropout.
9013func CudnnRNNParamsToCanonical(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params int64, optional ...CudnnRNNParamsToCanonicalAttr) (weights []tf.Output, biases []tf.Output) {
9014	if scope.Err() != nil {
9015		return
9016	}
9017	attrs := map[string]interface{}{"num_params": num_params}
9018	for _, a := range optional {
9019		a(attrs)
9020	}
9021	opspec := tf.OpSpec{
9022		Type: "CudnnRNNParamsToCanonical",
9023		Input: []tf.Input{
9024			num_layers, num_units, input_size, params,
9025		},
9026		Attrs: attrs,
9027	}
9028	op := scope.AddOperation(opspec)
9029	if scope.Err() != nil {
9030		return
9031	}
9032	var idx int
9033	var err error
9034	if weights, idx, err = makeOutputList(op, idx, "weights"); err != nil {
9035		scope.UpdateErr("CudnnRNNParamsToCanonical", err)
9036		return
9037	}
9038	if biases, idx, err = makeOutputList(op, idx, "biases"); err != nil {
9039		scope.UpdateErr("CudnnRNNParamsToCanonical", err)
9040		return
9041	}
9042	return weights, biases
9043}
9044
9045// CudnnRNNParamsToCanonicalV2Attr is an optional argument to CudnnRNNParamsToCanonicalV2.
9046type CudnnRNNParamsToCanonicalV2Attr func(optionalAttr)
9047
9048// CudnnRNNParamsToCanonicalV2RnnMode sets the optional rnn_mode attribute to value.
9049// If not specified, defaults to "lstm"
9050func CudnnRNNParamsToCanonicalV2RnnMode(value string) CudnnRNNParamsToCanonicalV2Attr {
9051	return func(m optionalAttr) {
9052		m["rnn_mode"] = value
9053	}
9054}
9055
9056// CudnnRNNParamsToCanonicalV2InputMode sets the optional input_mode attribute to value.
9057// If not specified, defaults to "linear_input"
9058func CudnnRNNParamsToCanonicalV2InputMode(value string) CudnnRNNParamsToCanonicalV2Attr {
9059	return func(m optionalAttr) {
9060		m["input_mode"] = value
9061	}
9062}
9063
9064// CudnnRNNParamsToCanonicalV2Direction sets the optional direction attribute to value.
9065// If not specified, defaults to "unidirectional"
9066func CudnnRNNParamsToCanonicalV2Direction(value string) CudnnRNNParamsToCanonicalV2Attr {
9067	return func(m optionalAttr) {
9068		m["direction"] = value
9069	}
9070}
9071
9072// CudnnRNNParamsToCanonicalV2Dropout sets the optional dropout attribute to value.
9073// If not specified, defaults to 0
9074func CudnnRNNParamsToCanonicalV2Dropout(value float32) CudnnRNNParamsToCanonicalV2Attr {
9075	return func(m optionalAttr) {
9076		m["dropout"] = value
9077	}
9078}
9079
9080// CudnnRNNParamsToCanonicalV2Seed sets the optional seed attribute to value.
9081// If not specified, defaults to 0
9082func CudnnRNNParamsToCanonicalV2Seed(value int64) CudnnRNNParamsToCanonicalV2Attr {
9083	return func(m optionalAttr) {
9084		m["seed"] = value
9085	}
9086}
9087
9088// CudnnRNNParamsToCanonicalV2Seed2 sets the optional seed2 attribute to value.
9089// If not specified, defaults to 0
9090func CudnnRNNParamsToCanonicalV2Seed2(value int64) CudnnRNNParamsToCanonicalV2Attr {
9091	return func(m optionalAttr) {
9092		m["seed2"] = value
9093	}
9094}
9095
9096// CudnnRNNParamsToCanonicalV2NumProj sets the optional num_proj attribute to value.
9097// If not specified, defaults to 0
9098func CudnnRNNParamsToCanonicalV2NumProj(value int64) CudnnRNNParamsToCanonicalV2Attr {
9099	return func(m optionalAttr) {
9100		m["num_proj"] = value
9101	}
9102}
9103
9104// Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM.
9105//
9106// Retrieves a set of weights from the opaque params buffer that can be saved and
9107// restored in a way compatible with future runs.
9108//
9109// Note that the params buffer may not be compatible across different GPUs. So any
9110// save and restoration should be converted to and from the canonical weights and
9111// biases.
9112//
9113// num_layers: Specifies the number of layers in the RNN model.
9114// num_units: Specifies the size of the hidden state.
9115// input_size: Specifies the size of the input state.
9116// num_params_weights: number of weight parameter matrix for all layers.
9117// num_params_biases: number of bias parameter vector for all layers.
9118// weights: the canonical form of weights that can be used for saving
9119//
9120//	and restoration. They are more likely to be compatible across different
9121//	generations.
9122//
9123// biases: the canonical form of biases that can be used for saving
9124//
9125//	and restoration. They are more likely to be compatible across different
9126//	generations.
9127//
9128// rnn_mode: Indicates the type of the RNN model.
9129// input_mode: Indicate whether there is a linear projection between the input and
9130//
9131//	The actual computation before the first layer. 'skip_input' is only allowed
9132//	when input_size == num_units; 'auto_select' implies 'skip_input' when
9133//	input_size == num_units; otherwise, it implies 'linear_input'.
9134//
9135// direction: Indicates whether a bidirectional model will be used.
9136//
9137//	dir = (direction == bidirectional) ? 2 : 1
9138//
9139// dropout: dropout probability. When set to 0., dropout is disabled.
9140// seed: the 1st part of a seed to initialize dropout.
9141// seed2: the 2nd part of a seed to initialize dropout.
9142// num_proj: The output dimensionality for the projection matrices. If None or 0,
9143//
9144//	no projection is performed.
9145func CudnnRNNParamsToCanonicalV2(scope *Scope, num_layers tf.Output, num_units tf.Output, input_size tf.Output, params tf.Output, num_params_weights int64, num_params_biases int64, optional ...CudnnRNNParamsToCanonicalV2Attr) (weights []tf.Output, biases []tf.Output) {
9146	if scope.Err() != nil {
9147		return
9148	}
9149	attrs := map[string]interface{}{"num_params_weights": num_params_weights, "num_params_biases": num_params_biases}
9150	for _, a := range optional {
9151		a(attrs)
9152	}
9153	opspec := tf.OpSpec{
9154		Type: "CudnnRNNParamsToCanonicalV2",
9155		Input: []tf.Input{
9156			num_layers, num_units, input_size, params,
9157		},
9158		Attrs: attrs,
9159	}
9160	op := scope.AddOperation(opspec)
9161	if scope.Err() != nil {
9162		return
9163	}
9164	var idx int
9165	var err error
9166	if weights, idx, err = makeOutputList(op, idx, "weights"); err != nil {
9167		scope.UpdateErr("CudnnRNNParamsToCanonicalV2", err)
9168		return
9169	}
9170	if biases, idx, err = makeOutputList(op, idx, "biases"); err != nil {
9171		scope.UpdateErr("CudnnRNNParamsToCanonicalV2", err)
9172		return
9173	}
9174	return weights, biases
9175}
9176
9177// CudnnRNNV2Attr is an optional argument to CudnnRNNV2.
9178type CudnnRNNV2Attr func(optionalAttr)
9179
9180// CudnnRNNV2RnnMode sets the optional rnn_mode attribute to value.
9181// If not specified, defaults to "lstm"
9182func CudnnRNNV2RnnMode(value string) CudnnRNNV2Attr {
9183	return func(m optionalAttr) {
9184		m["rnn_mode"] = value
9185	}
9186}
9187
9188// CudnnRNNV2InputMode sets the optional input_mode attribute to value.
9189// If not specified, defaults to "linear_input"
9190func CudnnRNNV2InputMode(value string) CudnnRNNV2Attr {
9191	return func(m optionalAttr) {
9192		m["input_mode"] = value
9193	}
9194}
9195
9196// CudnnRNNV2Direction sets the optional direction attribute to value.
9197// If not specified, defaults to "unidirectional"
9198func CudnnRNNV2Direction(value string) CudnnRNNV2Attr {
9199	return func(m optionalAttr) {
9200		m["direction"] = value
9201	}
9202}
9203
9204// CudnnRNNV2Dropout sets the optional dropout attribute to value.
9205// If not specified, defaults to 0
9206func CudnnRNNV2Dropout(value float32) CudnnRNNV2Attr {
9207	return func(m optionalAttr) {
9208		m["dropout"] = value
9209	}
9210}
9211
9212// CudnnRNNV2Seed sets the optional seed attribute to value.
9213// If not specified, defaults to 0
9214func CudnnRNNV2Seed(value int64) CudnnRNNV2Attr {
9215	return func(m optionalAttr) {
9216		m["seed"] = value
9217	}
9218}
9219
9220// CudnnRNNV2Seed2 sets the optional seed2 attribute to value.
9221// If not specified, defaults to 0
9222func CudnnRNNV2Seed2(value int64) CudnnRNNV2Attr {
9223	return func(m optionalAttr) {
9224		m["seed2"] = value
9225	}
9226}
9227
9228// CudnnRNNV2IsTraining sets the optional is_training attribute to value.
9229// If not specified, defaults to true
9230func CudnnRNNV2IsTraining(value bool) CudnnRNNV2Attr {
9231	return func(m optionalAttr) {
9232		m["is_training"] = value
9233	}
9234}
9235
9236// A RNN backed by cuDNN.
9237//
9238// Computes the RNN from the input and initial states, with respect to the params
9239// buffer. Produces one extra output "host_reserved" than CudnnRNN.
9240//
9241// rnn_mode: Indicates the type of the RNN model.
9242// input_mode: Indicates whether there is a linear projection between the input and
9243//
9244//	the actual computation before the first layer. 'skip_input' is only allowed
9245//	when input_size == num_units; 'auto_select' implies 'skip_input' when
9246//	input_size == num_units; otherwise, it implies 'linear_input'.
9247//
9248// direction: Indicates whether a bidirectional model will be used. Should be
9249//
9250//	"unidirectional" or "bidirectional".
9251//
9252// dropout: Dropout probability. When set to 0., dropout is disabled.
9253// seed: The 1st part of a seed to initialize dropout.
9254// seed2: The 2nd part of a seed to initialize dropout.
9255// input: A 3-D tensor with the shape of [seq_length, batch_size, input_size].
9256// input_h: A 3-D tensor with the shape of [num_layer * dir, batch_size,
9257//
9258//	num_units].
9259//
9260// input_c: For LSTM, a 3-D tensor with the shape of
9261//
9262//	[num_layer * dir, batch, num_units]. For other models, it is ignored.
9263//
9264// params: A 1-D tensor that contains the weights and biases in an opaque layout.
9265//
9266//	The size must be created through CudnnRNNParamsSize, and initialized
9267//	separately. Note that they might not be compatible across different
9268//	generations. So it is a good idea to save and restore
9269//
9270// output: A 3-D tensor with the shape of [seq_length, batch_size,
9271//
9272//	dir * num_units].
9273//
9274// output_h: The same shape has input_h.
9275// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
9276// is_training: Indicates whether this operation is used for inference or
9277//
9278//	training.
9279//
9280// reserve_space: An opaque tensor that can be used in backprop calculation. It
9281//
9282//	is only produced if is_training is true.
9283//
9284// host_reserved: An opaque tensor that can be used in backprop calculation. It is
9285//
9286//	only produced if is_training is true. It is output on host memory rather than
9287//	device memory.
9288func CudnnRNNV2(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, optional ...CudnnRNNV2Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output) {
9289	if scope.Err() != nil {
9290		return
9291	}
9292	attrs := map[string]interface{}{}
9293	for _, a := range optional {
9294		a(attrs)
9295	}
9296	opspec := tf.OpSpec{
9297		Type: "CudnnRNNV2",
9298		Input: []tf.Input{
9299			input, input_h, input_c, params,
9300		},
9301		Attrs: attrs,
9302	}
9303	op := scope.AddOperation(opspec)
9304	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
9305}
9306
9307// CudnnRNNV3Attr is an optional argument to CudnnRNNV3.
9308type CudnnRNNV3Attr func(optionalAttr)
9309
9310// CudnnRNNV3RnnMode sets the optional rnn_mode attribute to value.
9311// If not specified, defaults to "lstm"
9312func CudnnRNNV3RnnMode(value string) CudnnRNNV3Attr {
9313	return func(m optionalAttr) {
9314		m["rnn_mode"] = value
9315	}
9316}
9317
9318// CudnnRNNV3InputMode sets the optional input_mode attribute to value.
9319// If not specified, defaults to "linear_input"
9320func CudnnRNNV3InputMode(value string) CudnnRNNV3Attr {
9321	return func(m optionalAttr) {
9322		m["input_mode"] = value
9323	}
9324}
9325
9326// CudnnRNNV3Direction sets the optional direction attribute to value.
9327// If not specified, defaults to "unidirectional"
9328func CudnnRNNV3Direction(value string) CudnnRNNV3Attr {
9329	return func(m optionalAttr) {
9330		m["direction"] = value
9331	}
9332}
9333
9334// CudnnRNNV3Dropout sets the optional dropout attribute to value.
9335// If not specified, defaults to 0
9336func CudnnRNNV3Dropout(value float32) CudnnRNNV3Attr {
9337	return func(m optionalAttr) {
9338		m["dropout"] = value
9339	}
9340}
9341
9342// CudnnRNNV3Seed sets the optional seed attribute to value.
9343// If not specified, defaults to 0
9344func CudnnRNNV3Seed(value int64) CudnnRNNV3Attr {
9345	return func(m optionalAttr) {
9346		m["seed"] = value
9347	}
9348}
9349
9350// CudnnRNNV3Seed2 sets the optional seed2 attribute to value.
9351// If not specified, defaults to 0
9352func CudnnRNNV3Seed2(value int64) CudnnRNNV3Attr {
9353	return func(m optionalAttr) {
9354		m["seed2"] = value
9355	}
9356}
9357
9358// CudnnRNNV3NumProj sets the optional num_proj attribute to value.
9359// If not specified, defaults to 0
9360func CudnnRNNV3NumProj(value int64) CudnnRNNV3Attr {
9361	return func(m optionalAttr) {
9362		m["num_proj"] = value
9363	}
9364}
9365
9366// CudnnRNNV3IsTraining sets the optional is_training attribute to value.
9367// If not specified, defaults to true
9368func CudnnRNNV3IsTraining(value bool) CudnnRNNV3Attr {
9369	return func(m optionalAttr) {
9370		m["is_training"] = value
9371	}
9372}
9373
9374// CudnnRNNV3TimeMajor sets the optional time_major attribute to value.
9375// If not specified, defaults to true
9376func CudnnRNNV3TimeMajor(value bool) CudnnRNNV3Attr {
9377	return func(m optionalAttr) {
9378		m["time_major"] = value
9379	}
9380}
9381
9382// A RNN backed by cuDNN.
9383//
9384// Computes the RNN from the input and initial states, with respect to the params
9385// buffer. Accepts one extra input "sequence_lengths" than CudnnRNN.
9386//
9387// rnn_mode: Indicates the type of the RNN model.
9388// input_mode: Indicates whether there is a linear projection between the input and
9389//
9390//	the actual computation before the first layer. 'skip_input' is only allowed
9391//	when input_size == num_units; 'auto_select' implies 'skip_input' when
9392//	input_size == num_units; otherwise, it implies 'linear_input'.
9393//
9394// direction: Indicates whether a bidirectional model will be used. Should be
9395//
9396//	"unidirectional" or "bidirectional".
9397//
9398// dropout: Dropout probability. When set to 0., dropout is disabled.
9399// seed: The 1st part of a seed to initialize dropout.
9400// seed2: The 2nd part of a seed to initialize dropout.
9401// input: If time_major is true, this is a 3-D tensor with the shape of
9402//
9403//	[seq_length, batch_size, input_size]. If time_major is false, the shape is
9404//	[batch_size, seq_length, input_size].
9405//
9406// input_h: If time_major is true, this is a 3-D tensor with the shape of
9407//
9408//	[num_layer * dir, batch_size, num_units]. If time_major is false, the shape
9409//	is [batch_size, num_layer * dir, num_units].
9410//
9411// input_c: For LSTM, a 3-D tensor with the shape of
9412//
9413//	[num_layer * dir, batch, num_units]. For other models, it is ignored.
9414//
9415// params: A 1-D tensor that contains the weights and biases in an opaque layout.
9416//
9417//	The size must be created through CudnnRNNParamsSize, and initialized
9418//	separately. Note that they might not be compatible across different
9419//	generations. So it is a good idea to save and restore
9420//
9421// sequence_lengths: a vector of lengths of each input sequence.
9422// output: If time_major is true, this is a 3-D tensor with the shape of
9423//
9424//	[seq_length, batch_size, dir * num_units]. If time_major is false, the
9425//	shape is [batch_size, seq_length, dir * num_units].
9426//
9427// output_h: The same shape has input_h.
9428// output_c: The same shape as input_c for LSTM. An empty tensor for other models.
9429// is_training: Indicates whether this operation is used for inference or
9430//
9431//	training.
9432//
9433// time_major: Indicates whether the input/output format is time major or batch
9434//
9435//	major.
9436//
9437// reserve_space: An opaque tensor that can be used in backprop calculation. It
9438//
9439//	is only produced if is_training is true.
9440func CudnnRNNV3(scope *Scope, input tf.Output, input_h tf.Output, input_c tf.Output, params tf.Output, sequence_lengths tf.Output, optional ...CudnnRNNV3Attr) (output tf.Output, output_h tf.Output, output_c tf.Output, reserve_space tf.Output, host_reserved tf.Output) {
9441	if scope.Err() != nil {
9442		return
9443	}
9444	attrs := map[string]interface{}{}
9445	for _, a := range optional {
9446		a(attrs)
9447	}
9448	opspec := tf.OpSpec{
9449		Type: "CudnnRNNV3",
9450		Input: []tf.Input{
9451			input, input_h, input_c, params, sequence_lengths,
9452		},
9453		Attrs: attrs,
9454	}
9455	op := scope.AddOperation(opspec)
9456	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
9457}
9458
9459// CumprodAttr is an optional argument to Cumprod.
9460type CumprodAttr func(optionalAttr)
9461
9462// CumprodExclusive sets the optional exclusive attribute to value.
9463//
9464// value: If `True`, perform exclusive cumprod.
9465// If not specified, defaults to false
9466func CumprodExclusive(value bool) CumprodAttr {
9467	return func(m optionalAttr) {
9468		m["exclusive"] = value
9469	}
9470}
9471
9472// CumprodReverse sets the optional reverse attribute to value.
9473//
9474// value: A `bool` (default: False).
9475// If not specified, defaults to false
9476func CumprodReverse(value bool) CumprodAttr {
9477	return func(m optionalAttr) {
9478		m["reverse"] = value
9479	}
9480}
9481
9482// Compute the cumulative product of the tensor `x` along `axis`.
9483//
9484// By default, this op performs an inclusive cumprod, which means that the first
9485// element of the input is identical to the first element of the output:
9486//
9487// ```python
9488// tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
9489// ```
9490//
9491// By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
9492// performed instead:
9493//
9494// ```python
9495// tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
9496// ```
9497//
9498// By setting the `reverse` kwarg to `True`, the cumprod is performed in the
9499// opposite direction:
9500//
9501// ```python
9502// tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
9503// ```
9504//
9505// This is more efficient than using separate `tf.reverse` ops.
9506//
9507// The `reverse` and `exclusive` kwargs can also be combined:
9508//
9509// ```python
9510// tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
9511// ```
9512//
9513// Arguments:
9514//
9515//	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
9516//
9517// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
9518// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
9519//
9520//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
9521//
9522// `[-rank(x), rank(x))`.
9523func Cumprod(scope *Scope, x tf.Output, axis tf.Output, optional ...CumprodAttr) (out tf.Output) {
9524	if scope.Err() != nil {
9525		return
9526	}
9527	attrs := map[string]interface{}{}
9528	for _, a := range optional {
9529		a(attrs)
9530	}
9531	opspec := tf.OpSpec{
9532		Type: "Cumprod",
9533		Input: []tf.Input{
9534			x, axis,
9535		},
9536		Attrs: attrs,
9537	}
9538	op := scope.AddOperation(opspec)
9539	return op.Output(0)
9540}
9541
9542// CumsumAttr is an optional argument to Cumsum.
9543type CumsumAttr func(optionalAttr)
9544
9545// CumsumExclusive sets the optional exclusive attribute to value.
9546//
9547// value: If `True`, perform exclusive cumsum.
9548// If not specified, defaults to false
9549func CumsumExclusive(value bool) CumsumAttr {
9550	return func(m optionalAttr) {
9551		m["exclusive"] = value
9552	}
9553}
9554
9555// CumsumReverse sets the optional reverse attribute to value.
9556//
9557// value: A `bool` (default: False).
9558// If not specified, defaults to false
9559func CumsumReverse(value bool) CumsumAttr {
9560	return func(m optionalAttr) {
9561		m["reverse"] = value
9562	}
9563}
9564
9565// Compute the cumulative sum of the tensor `x` along `axis`.
9566//
9567// By default, this op performs an inclusive cumsum, which means that the first
9568// element of the input is identical to the first element of the output:
9569//
9570// ```python
9571// tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
9572// ```
9573//
9574// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
9575// performed instead:
9576//
9577// ```python
9578// tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
9579// ```
9580//
9581// By setting the `reverse` kwarg to `True`, the cumsum is performed in the
9582// opposite direction:
9583//
9584// ```python
9585// tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
9586// ```
9587//
9588// This is more efficient than using separate `tf.reverse` ops.
9589//
9590// The `reverse` and `exclusive` kwargs can also be combined:
9591//
9592// ```python
9593// tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
9594// ```
9595//
9596// Arguments:
9597//
9598//	x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
9599//
9600// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
9601// `complex128`, `qint8`, `quint8`, `qint32`, `half`.
9602//
9603//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
9604//
9605// `[-rank(x), rank(x))`.
9606func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output) {
9607	if scope.Err() != nil {
9608		return
9609	}
9610	attrs := map[string]interface{}{}
9611	for _, a := range optional {
9612		a(attrs)
9613	}
9614	opspec := tf.OpSpec{
9615		Type: "Cumsum",
9616		Input: []tf.Input{
9617			x, axis,
9618		},
9619		Attrs: attrs,
9620	}
9621	op := scope.AddOperation(opspec)
9622	return op.Output(0)
9623}
9624
9625// CumulativeLogsumexpAttr is an optional argument to CumulativeLogsumexp.
9626type CumulativeLogsumexpAttr func(optionalAttr)
9627
9628// CumulativeLogsumexpExclusive sets the optional exclusive attribute to value.
9629//
9630// value: If `True`, perform exclusive cumulative log-sum-exp.
9631// If not specified, defaults to false
9632func CumulativeLogsumexpExclusive(value bool) CumulativeLogsumexpAttr {
9633	return func(m optionalAttr) {
9634		m["exclusive"] = value
9635	}
9636}
9637
9638// CumulativeLogsumexpReverse sets the optional reverse attribute to value.
9639//
9640// value: A `bool` (default: False).
9641// If not specified, defaults to false
9642func CumulativeLogsumexpReverse(value bool) CumulativeLogsumexpAttr {
9643	return func(m optionalAttr) {
9644		m["reverse"] = value
9645	}
9646}
9647
9648// Compute the cumulative product of the tensor `x` along `axis`.
9649//
9650// By default, this op performs an inclusive cumulative log-sum-exp,
9651// which means that the first
9652// element of the input is identical to the first element of the output:
9653// ```python
9654// tf.math.cumulative_logsumexp([a, b, c])  # => [a, log(exp(a) + exp(b)), log(exp(a) + exp(b) + exp(c))]
9655// ```
9656//
9657// By setting the `exclusive` kwarg to `True`, an exclusive cumulative log-sum-exp is
9658// performed instead:
9659// ```python
9660// tf.cumulative_logsumexp([a, b, c], exclusive=True)  # => [-inf, a, log(exp(a) * exp(b))]
9661// ```
9662// Note that the neutral element of the log-sum-exp operation is `-inf`,
9663// however, for performance reasons, the minimal value representable by the
9664// floating point type is used instead.
9665//
9666// By setting the `reverse` kwarg to `True`, the cumulative log-sum-exp is performed in the
9667// opposite direction.
9668//
9669// Arguments:
9670//
9671//	x: A `Tensor`. Must be one of the following types: `float16`, `float32`, `float64`.
9672//	axis: A `Tensor` of type `int32` (default: 0). Must be in the range
9673//
9674// `[-rank(x), rank(x))`.
9675func CumulativeLogsumexp(scope *Scope, x tf.Output, axis tf.Output, optional ...CumulativeLogsumexpAttr) (out tf.Output) {
9676	if scope.Err() != nil {
9677		return
9678	}
9679	attrs := map[string]interface{}{}
9680	for _, a := range optional {
9681		a(attrs)
9682	}
9683	opspec := tf.OpSpec{
9684		Type: "CumulativeLogsumexp",
9685		Input: []tf.Input{
9686			x, axis,
9687		},
9688		Attrs: attrs,
9689	}
9690	op := scope.AddOperation(opspec)
9691	return op.Output(0)
9692}
9693
9694// An op that informs a host of the global ids of all the of TPUs in the system.
9695//
9696// Arguments:
9697//
9698//	topology: A serialized tensorflow.tpu.TopologyProto that describes the TPU topology.
9699//
9700// Returns the created operation.
9701func DTensorSetGlobalTPUArray(scope *Scope, topology tf.Output) (o *tf.Operation) {
9702	if scope.Err() != nil {
9703		return
9704	}
9705	opspec := tf.OpSpec{
9706		Type: "DTensorSetGlobalTPUArray",
9707		Input: []tf.Input{
9708			topology,
9709		},
9710	}
9711	return scope.AddOperation(opspec)
9712}
9713
9714// DataFormatDimMapAttr is an optional argument to DataFormatDimMap.
9715type DataFormatDimMapAttr func(optionalAttr)
9716
9717// DataFormatDimMapSrcFormat sets the optional src_format attribute to value.
9718//
9719// value: source data format.
9720// If not specified, defaults to "NHWC"
9721func DataFormatDimMapSrcFormat(value string) DataFormatDimMapAttr {
9722	return func(m optionalAttr) {
9723		m["src_format"] = value
9724	}
9725}
9726
9727// DataFormatDimMapDstFormat sets the optional dst_format attribute to value.
9728//
9729// value: destination data format.
9730// If not specified, defaults to "NCHW"
9731func DataFormatDimMapDstFormat(value string) DataFormatDimMapAttr {
9732	return func(m optionalAttr) {
9733		m["dst_format"] = value
9734	}
9735}
9736
9737// Returns the dimension index in the destination data format given the one in
9738//
9739// the source data format.
9740//
9741// Arguments:
9742//
9743//	x: A Tensor with each element as a dimension index in source data format.
9744//
9745// Must be in the range [-4, 4).
9746//
9747// Returns A Tensor with each element as a dimension index in destination data format.
9748func DataFormatDimMap(scope *Scope, x tf.Output, optional ...DataFormatDimMapAttr) (y tf.Output) {
9749	if scope.Err() != nil {
9750		return
9751	}
9752	attrs := map[string]interface{}{}
9753	for _, a := range optional {
9754		a(attrs)
9755	}
9756	opspec := tf.OpSpec{
9757		Type: "DataFormatDimMap",
9758		Input: []tf.Input{
9759			x,
9760		},
9761		Attrs: attrs,
9762	}
9763	op := scope.AddOperation(opspec)
9764	return op.Output(0)
9765}
9766
9767// DataFormatVecPermuteAttr is an optional argument to DataFormatVecPermute.
9768type DataFormatVecPermuteAttr func(optionalAttr)
9769
9770// DataFormatVecPermuteSrcFormat sets the optional src_format attribute to value.
9771//
9772// value: source data format.
9773// If not specified, defaults to "NHWC"
9774func DataFormatVecPermuteSrcFormat(value string) DataFormatVecPermuteAttr {
9775	return func(m optionalAttr) {
9776		m["src_format"] = value
9777	}
9778}
9779
9780// DataFormatVecPermuteDstFormat sets the optional dst_format attribute to value.
9781//
9782// value: destination data format.
9783// If not specified, defaults to "NCHW"
9784func DataFormatVecPermuteDstFormat(value string) DataFormatVecPermuteAttr {
9785	return func(m optionalAttr) {
9786		m["dst_format"] = value
9787	}
9788}
9789
9790// Permute input tensor from `src_format` to `dst_format`.
9791//
9792// Given source and destination format strings of length n=4 or 5, the input
9793// tensor must be a vector of size n or n-2, or a 2D tensor of shape
9794// (n, 2) or (n-2, 2).
9795//
9796// If the first dimension of the input tensor is n-2, it is assumed that
9797// non-spatial dimensions are omitted (i.e `N`, `C`).
9798//
9799// For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and input:
9800// ```
9801// [1, 2, 3, 4]
9802// ```
9803// , the output will be:
9804// ```
9805// [1, 4, 2, 3]
9806// ```
9807// With `src_format` of `NDHWC`, `dst_format` of `NCDHW`, and input:
9808// ```
9809// [[1, 6], [2, 7], [3, 8], [4, 9], [5, 10]]
9810// ```
9811// , the output will be:
9812// ```
9813// [[1, 6], [5, 10], [2, 7], [3, 8], [4, 9]]
9814// ```
9815// With `src_format` of `NHWC`, `dst_format` of `NCHW`, and input:
9816// ```
9817// [1, 2]
9818// ```
9819// , the output will be:
9820// ```
9821// [1, 2]
9822// ```
9823//
9824// Arguments:
9825//
9826//	x: Tensor of rank 1 or 2 in source data format.
9827//
9828// Returns Tensor of rank 1 or 2 in destination data format.
9829func DataFormatVecPermute(scope *Scope, x tf.Output, optional ...DataFormatVecPermuteAttr) (y tf.Output) {
9830	if scope.Err() != nil {
9831		return
9832	}
9833	attrs := map[string]interface{}{}
9834	for _, a := range optional {
9835		a(attrs)
9836	}
9837	opspec := tf.OpSpec{
9838		Type: "DataFormatVecPermute",
9839		Input: []tf.Input{
9840			x,
9841		},
9842		Attrs: attrs,
9843	}
9844	op := scope.AddOperation(opspec)
9845	return op.Output(0)
9846}
9847
9848// DataServiceDatasetAttr is an optional argument to DataServiceDataset.
9849type DataServiceDatasetAttr func(optionalAttr)
9850
9851// DataServiceDatasetTaskRefreshIntervalHintMs sets the optional task_refresh_interval_hint_ms attribute to value.
9852// If not specified, defaults to -1
9853func DataServiceDatasetTaskRefreshIntervalHintMs(value int64) DataServiceDatasetAttr {
9854	return func(m optionalAttr) {
9855		m["task_refresh_interval_hint_ms"] = value
9856	}
9857}
9858
9859// DataServiceDatasetDataTransferProtocol sets the optional data_transfer_protocol attribute to value.
9860// If not specified, defaults to ""
9861func DataServiceDatasetDataTransferProtocol(value string) DataServiceDatasetAttr {
9862	return func(m optionalAttr) {
9863		m["data_transfer_protocol"] = value
9864	}
9865}
9866
9867// DataServiceDatasetTargetWorkers sets the optional target_workers attribute to value.
9868// If not specified, defaults to "AUTO"
9869func DataServiceDatasetTargetWorkers(value string) DataServiceDatasetAttr {
9870	return func(m optionalAttr) {
9871		m["target_workers"] = value
9872	}
9873}
9874
9875// DataServiceDatasetCrossTrainerCacheOptions sets the optional cross_trainer_cache_options attribute to value.
9876// If not specified, defaults to ""
9877func DataServiceDatasetCrossTrainerCacheOptions(value string) DataServiceDatasetAttr {
9878	return func(m optionalAttr) {
9879		m["cross_trainer_cache_options"] = value
9880	}
9881}
9882
9883// Creates a dataset that reads data from the tf.data service.
9884func DataServiceDataset(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, address tf.Output, protocol tf.Output, job_name tf.Output, max_outstanding_requests tf.Output, iteration_counter tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DataServiceDatasetAttr) (handle tf.Output) {
9885	if scope.Err() != nil {
9886		return
9887	}
9888	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
9889	for _, a := range optional {
9890		a(attrs)
9891	}
9892	opspec := tf.OpSpec{
9893		Type: "DataServiceDataset",
9894		Input: []tf.Input{
9895			dataset_id, processing_mode, address, protocol, job_name, max_outstanding_requests, iteration_counter,
9896		},
9897		Attrs: attrs,
9898	}
9899	op := scope.AddOperation(opspec)
9900	return op.Output(0)
9901}
9902
9903// DataServiceDatasetV2Attr is an optional argument to DataServiceDatasetV2.
9904type DataServiceDatasetV2Attr func(optionalAttr)
9905
9906// DataServiceDatasetV2TaskRefreshIntervalHintMs sets the optional task_refresh_interval_hint_ms attribute to value.
9907// If not specified, defaults to -1
9908func DataServiceDatasetV2TaskRefreshIntervalHintMs(value int64) DataServiceDatasetV2Attr {
9909	return func(m optionalAttr) {
9910		m["task_refresh_interval_hint_ms"] = value
9911	}
9912}
9913
9914// DataServiceDatasetV2DataTransferProtocol sets the optional data_transfer_protocol attribute to value.
9915// If not specified, defaults to ""
9916func DataServiceDatasetV2DataTransferProtocol(value string) DataServiceDatasetV2Attr {
9917	return func(m optionalAttr) {
9918		m["data_transfer_protocol"] = value
9919	}
9920}
9921
9922// DataServiceDatasetV2TargetWorkers sets the optional target_workers attribute to value.
9923// If not specified, defaults to "AUTO"
9924func DataServiceDatasetV2TargetWorkers(value string) DataServiceDatasetV2Attr {
9925	return func(m optionalAttr) {
9926		m["target_workers"] = value
9927	}
9928}
9929
9930// DataServiceDatasetV2CrossTrainerCacheOptions sets the optional cross_trainer_cache_options attribute to value.
9931// If not specified, defaults to ""
9932func DataServiceDatasetV2CrossTrainerCacheOptions(value string) DataServiceDatasetV2Attr {
9933	return func(m optionalAttr) {
9934		m["cross_trainer_cache_options"] = value
9935	}
9936}
9937
9938// Creates a dataset that reads data from the tf.data service.
9939func DataServiceDatasetV2(scope *Scope, dataset_id tf.Output, processing_mode tf.Output, address tf.Output, protocol tf.Output, job_name tf.Output, consumer_index tf.Output, num_consumers tf.Output, max_outstanding_requests tf.Output, iteration_counter tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DataServiceDatasetV2Attr) (handle tf.Output) {
9940	if scope.Err() != nil {
9941		return
9942	}
9943	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
9944	for _, a := range optional {
9945		a(attrs)
9946	}
9947	opspec := tf.OpSpec{
9948		Type: "DataServiceDatasetV2",
9949		Input: []tf.Input{
9950			dataset_id, processing_mode, address, protocol, job_name, consumer_index, num_consumers, max_outstanding_requests, iteration_counter,
9951		},
9952		Attrs: attrs,
9953	}
9954	op := scope.AddOperation(opspec)
9955	return op.Output(0)
9956}
9957
9958// Returns the cardinality of `input_dataset`.
9959//
9960// Returns the cardinality of `input_dataset`.
9961//
9962// Arguments:
9963//
9964//	input_dataset: A variant tensor representing the dataset to return cardinality for.
9965//
9966// Returns The cardinality of `input_dataset`. Named constants are used to represent
9967// infinite and unknown cardinality.
9968func DatasetCardinality(scope *Scope, input_dataset tf.Output) (cardinality tf.Output) {
9969	if scope.Err() != nil {
9970		return
9971	}
9972	opspec := tf.OpSpec{
9973		Type: "DatasetCardinality",
9974		Input: []tf.Input{
9975			input_dataset,
9976		},
9977	}
9978	op := scope.AddOperation(opspec)
9979	return op.Output(0)
9980}
9981
9982// Creates a dataset from the given `graph_def`.
9983//
9984// Creates a dataset from the provided `graph_def`.
9985//
9986// Arguments:
9987//
9988//	graph_def: The graph representation of the dataset (as serialized GraphDef).
9989//
9990// Returns A variant tensor representing the dataset.
9991func DatasetFromGraph(scope *Scope, graph_def tf.Output) (handle tf.Output) {
9992	if scope.Err() != nil {
9993		return
9994	}
9995	opspec := tf.OpSpec{
9996		Type: "DatasetFromGraph",
9997		Input: []tf.Input{
9998			graph_def,
9999		},
10000	}
10001	op := scope.AddOperation(opspec)
10002	return op.Output(0)
10003}
10004
10005// DatasetToGraphAttr is an optional argument to DatasetToGraph.
10006type DatasetToGraphAttr func(optionalAttr)
10007
10008// DatasetToGraphStatefulWhitelist sets the optional stateful_whitelist attribute to value.
10009// If not specified, defaults to {}
10010//
10011// REQUIRES: len(value) >= 0
10012func DatasetToGraphStatefulWhitelist(value []string) DatasetToGraphAttr {
10013	return func(m optionalAttr) {
10014		m["stateful_whitelist"] = value
10015	}
10016}
10017
10018// DatasetToGraphAllowStateful sets the optional allow_stateful attribute to value.
10019// If not specified, defaults to false
10020func DatasetToGraphAllowStateful(value bool) DatasetToGraphAttr {
10021	return func(m optionalAttr) {
10022		m["allow_stateful"] = value
10023	}
10024}
10025
10026// DatasetToGraphStripDeviceAssignment sets the optional strip_device_assignment attribute to value.
10027// If not specified, defaults to false
10028func DatasetToGraphStripDeviceAssignment(value bool) DatasetToGraphAttr {
10029	return func(m optionalAttr) {
10030		m["strip_device_assignment"] = value
10031	}
10032}
10033
10034// Returns a serialized GraphDef representing `input_dataset`.
10035//
10036// Returns a graph representation for `input_dataset`.
10037//
10038// Arguments:
10039//
10040//	input_dataset: A variant tensor representing the dataset to return the graph representation for.
10041//
10042// Returns The graph representation of the dataset (as serialized GraphDef).
10043func DatasetToGraph(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphAttr) (graph tf.Output) {
10044	if scope.Err() != nil {
10045		return
10046	}
10047	attrs := map[string]interface{}{}
10048	for _, a := range optional {
10049		a(attrs)
10050	}
10051	opspec := tf.OpSpec{
10052		Type: "DatasetToGraph",
10053		Input: []tf.Input{
10054			input_dataset,
10055		},
10056		Attrs: attrs,
10057	}
10058	op := scope.AddOperation(opspec)
10059	return op.Output(0)
10060}
10061
10062// DatasetToGraphV2Attr is an optional argument to DatasetToGraphV2.
10063type DatasetToGraphV2Attr func(optionalAttr)
10064
10065// DatasetToGraphV2ExternalStatePolicy sets the optional external_state_policy attribute to value.
10066// If not specified, defaults to 0
10067func DatasetToGraphV2ExternalStatePolicy(value int64) DatasetToGraphV2Attr {
10068	return func(m optionalAttr) {
10069		m["external_state_policy"] = value
10070	}
10071}
10072
10073// DatasetToGraphV2StripDeviceAssignment sets the optional strip_device_assignment attribute to value.
10074// If not specified, defaults to false
10075func DatasetToGraphV2StripDeviceAssignment(value bool) DatasetToGraphV2Attr {
10076	return func(m optionalAttr) {
10077		m["strip_device_assignment"] = value
10078	}
10079}
10080
10081// Returns a serialized GraphDef representing `input_dataset`.
10082//
10083// Returns a graph representation for `input_dataset`.
10084//
10085// Arguments:
10086//
10087//	input_dataset: A variant tensor representing the dataset to return the graph representation for.
10088//
10089// Returns The graph representation of the dataset (as serialized GraphDef).
10090func DatasetToGraphV2(scope *Scope, input_dataset tf.Output, optional ...DatasetToGraphV2Attr) (graph tf.Output) {
10091	if scope.Err() != nil {
10092		return
10093	}
10094	attrs := map[string]interface{}{}
10095	for _, a := range optional {
10096		a(attrs)
10097	}
10098	opspec := tf.OpSpec{
10099		Type: "DatasetToGraphV2",
10100		Input: []tf.Input{
10101			input_dataset,
10102		},
10103		Attrs: attrs,
10104	}
10105	op := scope.AddOperation(opspec)
10106	return op.Output(0)
10107}
10108
10109// DatasetToSingleElementAttr is an optional argument to DatasetToSingleElement.
10110type DatasetToSingleElementAttr func(optionalAttr)
10111
10112// DatasetToSingleElementMetadata sets the optional metadata attribute to value.
10113// If not specified, defaults to ""
10114func DatasetToSingleElementMetadata(value string) DatasetToSingleElementAttr {
10115	return func(m optionalAttr) {
10116		m["metadata"] = value
10117	}
10118}
10119
10120// Outputs the single element from the given dataset.
10121//
10122// Arguments:
10123//
10124//	dataset: A handle to a dataset that contains a single element.
10125//
10126// Returns The components of the single element of `input`.
10127func DatasetToSingleElement(scope *Scope, dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DatasetToSingleElementAttr) (components []tf.Output) {
10128	if scope.Err() != nil {
10129		return
10130	}
10131	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
10132	for _, a := range optional {
10133		a(attrs)
10134	}
10135	opspec := tf.OpSpec{
10136		Type: "DatasetToSingleElement",
10137		Input: []tf.Input{
10138			dataset,
10139		},
10140		Attrs: attrs,
10141	}
10142	op := scope.AddOperation(opspec)
10143	if scope.Err() != nil {
10144		return
10145	}
10146	var idx int
10147	var err error
10148	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
10149		scope.UpdateErr("DatasetToSingleElement", err)
10150		return
10151	}
10152	return components
10153}
10154
10155// Writes the given dataset to the given file using the TFRecord format.
10156//
10157// Arguments:
10158//
10159//	input_dataset: A variant tensor representing the dataset to write.
10160//	filename: A scalar string tensor representing the filename to use.
10161//	compression_type: A scalar string tensor containing either (i) the empty string (no
10162//
10163// compression), (ii) "ZLIB", or (iii) "GZIP".
10164//
10165// Returns the created operation.
10166func DatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation) {
10167	if scope.Err() != nil {
10168		return
10169	}
10170	opspec := tf.OpSpec{
10171		Type: "DatasetToTFRecord",
10172		Input: []tf.Input{
10173			input_dataset, filename, compression_type,
10174		},
10175	}
10176	return scope.AddOperation(opspec)
10177}
10178
10179// Identity op for gradient debugging.
10180//
10181// This op is hidden from public in Python. It is used by TensorFlow Debugger to
10182// register gradient tensors for gradient debugging.
10183// This op operates on non-reference-type tensors.
10184func DebugGradientIdentity(scope *Scope, input tf.Output) (output tf.Output) {
10185	if scope.Err() != nil {
10186		return
10187	}
10188	opspec := tf.OpSpec{
10189		Type: "DebugGradientIdentity",
10190		Input: []tf.Input{
10191			input,
10192		},
10193	}
10194	op := scope.AddOperation(opspec)
10195	return op.Output(0)
10196}
10197
10198// DebugIdentityAttr is an optional argument to DebugIdentity.
10199type DebugIdentityAttr func(optionalAttr)
10200
10201// DebugIdentityDeviceName sets the optional device_name attribute to value.
10202//
10203// value: Name of the device on which the tensor resides.
10204// If not specified, defaults to ""
10205func DebugIdentityDeviceName(value string) DebugIdentityAttr {
10206	return func(m optionalAttr) {
10207		m["device_name"] = value
10208	}
10209}
10210
10211// DebugIdentityTensorName sets the optional tensor_name attribute to value.
10212//
10213// value: Name of the input tensor.
10214// If not specified, defaults to ""
10215func DebugIdentityTensorName(value string) DebugIdentityAttr {
10216	return func(m optionalAttr) {
10217		m["tensor_name"] = value
10218	}
10219}
10220
10221// DebugIdentityDebugUrls sets the optional debug_urls attribute to value.
10222//
10223// value: List of URLs to debug targets, e.g.,
10224//
10225//	file:///foo/tfdbg_dump, grpc:://localhost:11011
10226//
10227// If not specified, defaults to {}
10228func DebugIdentityDebugUrls(value []string) DebugIdentityAttr {
10229	return func(m optionalAttr) {
10230		m["debug_urls"] = value
10231	}
10232}
10233
10234// DebugIdentityGatedGrpc sets the optional gated_grpc attribute to value.
10235//
10236// value: Whether this op will be gated. If any of the debug_urls of this
10237//
10238//	debug node is of the grpc:// scheme, when the value of this attribute is set
10239//	to True, the data will not actually be sent via the grpc stream unless this
10240//	debug op has been enabled at the debug_url. If all of the debug_urls of this
10241//	debug node are of the grpc:// scheme and the debug op is enabled at none of
10242//	them, the output will be an empty Tensor.
10243//
10244// If not specified, defaults to false
10245func DebugIdentityGatedGrpc(value bool) DebugIdentityAttr {
10246	return func(m optionalAttr) {
10247		m["gated_grpc"] = value
10248	}
10249}
10250
10251// Provides an identity mapping of the non-Ref type input tensor for debugging.
10252//
10253// Provides an identity mapping of the non-Ref type input tensor for debugging.
10254//
10255// Arguments:
10256//
10257//	input: Input tensor, non-Reference type
10258func DebugIdentity(scope *Scope, input tf.Output, optional ...DebugIdentityAttr) (output tf.Output) {
10259	if scope.Err() != nil {
10260		return
10261	}
10262	attrs := map[string]interface{}{}
10263	for _, a := range optional {
10264		a(attrs)
10265	}
10266	opspec := tf.OpSpec{
10267		Type: "DebugIdentity",
10268		Input: []tf.Input{
10269			input,
10270		},
10271		Attrs: attrs,
10272	}
10273	op := scope.AddOperation(opspec)
10274	return op.Output(0)
10275}
10276
10277// DebugIdentityV2Attr is an optional argument to DebugIdentityV2.
10278type DebugIdentityV2Attr func(optionalAttr)
10279
10280// DebugIdentityV2TfdbgContextId sets the optional tfdbg_context_id attribute to value.
10281//
10282// value: A tfdbg-generated ID for the context that the op belongs to,
10283//
10284//	e.g., a concrete compiled tf.function.
10285//
10286// If not specified, defaults to ""
10287func DebugIdentityV2TfdbgContextId(value string) DebugIdentityV2Attr {
10288	return func(m optionalAttr) {
10289		m["tfdbg_context_id"] = value
10290	}
10291}
10292
10293// DebugIdentityV2OpName sets the optional op_name attribute to value.
10294//
10295// value: Optional. Name of the op that the debug op is concerned with.
10296//
10297//	Used only for single-tensor trace.
10298//
10299// If not specified, defaults to ""
10300func DebugIdentityV2OpName(value string) DebugIdentityV2Attr {
10301	return func(m optionalAttr) {
10302		m["op_name"] = value
10303	}
10304}
10305
10306// DebugIdentityV2OutputSlot sets the optional output_slot attribute to value.
10307//
10308// value: Optional. Output slot index of the tensor that the debug op
10309//
10310//	is concerned with. Used only for single-tensor trace.
10311//
10312// If not specified, defaults to -1
10313func DebugIdentityV2OutputSlot(value int64) DebugIdentityV2Attr {
10314	return func(m optionalAttr) {
10315		m["output_slot"] = value
10316	}
10317}
10318
10319// DebugIdentityV2TensorDebugMode sets the optional tensor_debug_mode attribute to value.
10320//
10321// value: TensorDebugMode enum value. See debug_event.proto for details.
10322// If not specified, defaults to -1
10323func DebugIdentityV2TensorDebugMode(value int64) DebugIdentityV2Attr {
10324	return func(m optionalAttr) {
10325		m["tensor_debug_mode"] = value
10326	}
10327}
10328
10329// DebugIdentityV2DebugUrls sets the optional debug_urls attribute to value.
10330//
10331// value: List of URLs to debug targets, e.g., file:///foo/tfdbg_dump.
10332// If not specified, defaults to {}
10333func DebugIdentityV2DebugUrls(value []string) DebugIdentityV2Attr {
10334	return func(m optionalAttr) {
10335		m["debug_urls"] = value
10336	}
10337}
10338
10339// DebugIdentityV2CircularBufferSize sets the optional circular_buffer_size attribute to value.
10340// If not specified, defaults to 1000
10341func DebugIdentityV2CircularBufferSize(value int64) DebugIdentityV2Attr {
10342	return func(m optionalAttr) {
10343		m["circular_buffer_size"] = value
10344	}
10345}
10346
10347// DebugIdentityV2TfdbgRunId sets the optional tfdbg_run_id attribute to value.
10348// If not specified, defaults to ""
10349func DebugIdentityV2TfdbgRunId(value string) DebugIdentityV2Attr {
10350	return func(m optionalAttr) {
10351		m["tfdbg_run_id"] = value
10352	}
10353}
10354
10355// Debug Identity V2 Op.
10356//
10357// Provides an identity mapping from input to output, while writing the content of
10358// the input tensor by calling DebugEventsWriter.
10359//
10360// The semantics of the input tensor depends on tensor_debug_mode. In typical
10361// usage, the input tensor comes directly from the user computation only when
10362// graph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a
10363// list of all the possible values of graph_debug_mode). For the other debug modes,
10364// the input tensor should be produced by an additional op or subgraph that
10365// computes summary information about one or more tensors.
10366//
10367// Arguments:
10368//
10369//	input: Input tensor, non-Reference type
10370func DebugIdentityV2(scope *Scope, input tf.Output, optional ...DebugIdentityV2Attr) (output tf.Output) {
10371	if scope.Err() != nil {
10372		return
10373	}
10374	attrs := map[string]interface{}{}
10375	for _, a := range optional {
10376		a(attrs)
10377	}
10378	opspec := tf.OpSpec{
10379		Type: "DebugIdentityV2",
10380		Input: []tf.Input{
10381			input,
10382		},
10383		Attrs: attrs,
10384	}
10385	op := scope.AddOperation(opspec)
10386	return op.Output(0)
10387}
10388
10389// DebugNanCountAttr is an optional argument to DebugNanCount.
10390type DebugNanCountAttr func(optionalAttr)
10391
10392// DebugNanCountDeviceName sets the optional device_name attribute to value.
10393// If not specified, defaults to ""
10394func DebugNanCountDeviceName(value string) DebugNanCountAttr {
10395	return func(m optionalAttr) {
10396		m["device_name"] = value
10397	}
10398}
10399
10400// DebugNanCountTensorName sets the optional tensor_name attribute to value.
10401//
10402// value: Name of the input tensor.
10403// If not specified, defaults to ""
10404func DebugNanCountTensorName(value string) DebugNanCountAttr {
10405	return func(m optionalAttr) {
10406		m["tensor_name"] = value
10407	}
10408}
10409
10410// DebugNanCountDebugUrls sets the optional debug_urls attribute to value.
10411//
10412// value: List of URLs to debug targets, e.g.,
10413//
10414//	file:///foo/tfdbg_dump, grpc:://localhost:11011.
10415//
10416// If not specified, defaults to {}
10417func DebugNanCountDebugUrls(value []string) DebugNanCountAttr {
10418	return func(m optionalAttr) {
10419		m["debug_urls"] = value
10420	}
10421}
10422
10423// DebugNanCountGatedGrpc sets the optional gated_grpc attribute to value.
10424//
10425// value:  Whether this op will be gated. If any of the debug_urls of this
10426//
10427//	debug node is of the grpc:// scheme, when the value of this attribute is set
10428//	to True, the data will not actually be sent via the grpc stream unless this
10429//	debug op has been enabled at the debug_url. If all of the debug_urls of this
10430//	debug node are of the grpc:// scheme and the debug op is enabled at none of
10431//	them, the output will be an empty Tensor.
10432//
10433// If not specified, defaults to false
10434func DebugNanCountGatedGrpc(value bool) DebugNanCountAttr {
10435	return func(m optionalAttr) {
10436		m["gated_grpc"] = value
10437	}
10438}
10439
10440// Debug NaN Value Counter Op.
10441//
10442// Counts number of NaNs in the input tensor, for debugging.
10443//
10444// Arguments:
10445//
10446//	input: Input tensor, non-Reference type.
10447func DebugNanCount(scope *Scope, input tf.Output, optional ...DebugNanCountAttr) (output tf.Output) {
10448	if scope.Err() != nil {
10449		return
10450	}
10451	attrs := map[string]interface{}{}
10452	for _, a := range optional {
10453		a(attrs)
10454	}
10455	opspec := tf.OpSpec{
10456		Type: "DebugNanCount",
10457		Input: []tf.Input{
10458			input,
10459		},
10460		Attrs: attrs,
10461	}
10462	op := scope.AddOperation(opspec)
10463	return op.Output(0)
10464}
10465
10466// DebugNumericSummaryAttr is an optional argument to DebugNumericSummary.
10467type DebugNumericSummaryAttr func(optionalAttr)
10468
10469// DebugNumericSummaryDeviceName sets the optional device_name attribute to value.
10470// If not specified, defaults to ""
10471func DebugNumericSummaryDeviceName(value string) DebugNumericSummaryAttr {
10472	return func(m optionalAttr) {
10473		m["device_name"] = value
10474	}
10475}
10476
10477// DebugNumericSummaryTensorName sets the optional tensor_name attribute to value.
10478//
10479// value: Name of the input tensor.
10480// If not specified, defaults to ""
10481func DebugNumericSummaryTensorName(value string) DebugNumericSummaryAttr {
10482	return func(m optionalAttr) {
10483		m["tensor_name"] = value
10484	}
10485}
10486
10487// DebugNumericSummaryDebugUrls sets the optional debug_urls attribute to value.
10488//
10489// value: List of URLs to debug targets, e.g.,
10490//
10491//	file:///foo/tfdbg_dump, grpc:://localhost:11011.
10492//
10493// If not specified, defaults to {}
10494func DebugNumericSummaryDebugUrls(value []string) DebugNumericSummaryAttr {
10495	return func(m optionalAttr) {
10496		m["debug_urls"] = value
10497	}
10498}
10499
10500// DebugNumericSummaryLowerBound sets the optional lower_bound attribute to value.
10501//
10502// value: (float) The lower bound <= which values will be included in the
10503//
10504//	generalized -inf count. Default: -inf.
10505//
10506// If not specified, defaults to -inf
10507func DebugNumericSummaryLowerBound(value float32) DebugNumericSummaryAttr {
10508	return func(m optionalAttr) {
10509		m["lower_bound"] = value
10510	}
10511}
10512
10513// DebugNumericSummaryUpperBound sets the optional upper_bound attribute to value.
10514//
10515// value: (float) The upper bound >= which values will be included in the
10516//
10517//	generalized +inf count. Default: +inf.
10518//
10519// If not specified, defaults to inf
10520func DebugNumericSummaryUpperBound(value float32) DebugNumericSummaryAttr {
10521	return func(m optionalAttr) {
10522		m["upper_bound"] = value
10523	}
10524}
10525
10526// DebugNumericSummaryMuteIfHealthy sets the optional mute_if_healthy attribute to value.
10527//
10528// value: (bool) Do not send data to the debug URLs unless at least one
10529//
10530//	of elements [2], [3] and [7] (i.e., the nan count and the generalized -inf and
10531//	inf counts) is non-zero.
10532//
10533// If not specified, defaults to false
10534func DebugNumericSummaryMuteIfHealthy(value bool) DebugNumericSummaryAttr {
10535	return func(m optionalAttr) {
10536		m["mute_if_healthy"] = value
10537	}
10538}
10539
10540// DebugNumericSummaryGatedGrpc sets the optional gated_grpc attribute to value.
10541//
10542// value: Whether this op will be gated. If any of the debug_urls of this
10543//
10544//	debug node is of the grpc:// scheme, when the value of this attribute is set
10545//	to True, the data will not actually be sent via the grpc stream unless this
10546//	debug op has been enabled at the debug_url. If all of the debug_urls of this
10547//	debug node are of the grpc:// scheme and the debug op is enabled at none of
10548//	them, the output will be an empty Tensor.
10549//
10550// If not specified, defaults to false
10551func DebugNumericSummaryGatedGrpc(value bool) DebugNumericSummaryAttr {
10552	return func(m optionalAttr) {
10553		m["gated_grpc"] = value
10554	}
10555}
10556
10557// Debug Numeric Summary Op.
10558//
10559// Provide a basic summary of numeric value types, range and distribution.
10560//
10561// output: A double tensor of shape [14 + nDimensions], where nDimensions is the
10562//
10563//	number of dimensions of the tensor's shape. The elements of output are:
10564//	[0]: is initialized (1.0) or not (0.0).
10565//	[1]: total number of elements
10566//	[2]: NaN element count
10567//	[3]: generalized -inf count: elements <= lower_bound. lower_bound is -inf by
10568//	  default.
10569//	[4]: negative element count (excluding -inf), if lower_bound is the default
10570//	  -inf. Otherwise, this is the count of elements > lower_bound and < 0.
10571//	[5]: zero element count
10572//	[6]: positive element count (excluding +inf), if upper_bound is the default
10573//	  +inf. Otherwise, this is the count of elements < upper_bound and > 0.
10574//	[7]: generalized +inf count, elements >= upper_bound. upper_bound is +inf by
10575//	  default.
10576//
10577// Output elements [1:8] are all zero, if the tensor is uninitialized.
10578//
10579//	[8]: minimum of all non-inf and non-NaN elements.
10580//	     If uninitialized or no such element exists: +inf.
10581//	[9]: maximum of all non-inf and non-NaN elements.
10582//	     If uninitialized or no such element exists: -inf.
10583//	[10]: mean of all non-inf and non-NaN elements.
10584//	      If uninitialized or no such element exists: NaN.
10585//	[11]: variance of all non-inf and non-NaN elements.
10586//	      If uninitialized or no such element exists: NaN.
10587//	[12]: Data type of the tensor encoded as an enum integer. See the DataType
10588//	      proto for more details.
10589//	[13]: Number of dimensions of the tensor (ndims).
10590//	[14+]: Sizes of the dimensions.
10591//
10592// Arguments:
10593//
10594//	input: Input tensor, non-Reference type.
10595func DebugNumericSummary(scope *Scope, input tf.Output, optional ...DebugNumericSummaryAttr) (output tf.Output) {
10596	if scope.Err() != nil {
10597		return
10598	}
10599	attrs := map[string]interface{}{}
10600	for _, a := range optional {
10601		a(attrs)
10602	}
10603	opspec := tf.OpSpec{
10604		Type: "DebugNumericSummary",
10605		Input: []tf.Input{
10606			input,
10607		},
10608		Attrs: attrs,
10609	}
10610	op := scope.AddOperation(opspec)
10611	return op.Output(0)
10612}
10613
10614// DebugNumericSummaryV2Attr is an optional argument to DebugNumericSummaryV2.
10615type DebugNumericSummaryV2Attr func(optionalAttr)
10616
10617// DebugNumericSummaryV2OutputDtype sets the optional output_dtype attribute to value.
10618//
10619// value: Optional. The type of the output. Can be float32 or float64 (default: float32).
10620// If not specified, defaults to DT_FLOAT
10621func DebugNumericSummaryV2OutputDtype(value tf.DataType) DebugNumericSummaryV2Attr {
10622	return func(m optionalAttr) {
10623		m["output_dtype"] = value
10624	}
10625}
10626
10627// DebugNumericSummaryV2TensorDebugMode sets the optional tensor_debug_mode attribute to value.
10628//
10629// value: Tensor debug mode: the mode in which the input tensor is summarized
10630//
10631//	by the op. See the TensorDebugMode enum in
10632//	tensorflow/core/protobuf/debug_event.proto for details.
10633//
10634// Supported values:
10635//
10636//	2 (CURT_HEALTH): Output a float32/64 tensor of shape [2]. The 1st
10637//	element is the tensor_id, if provided, and -1 otherwise. The 2nd
10638//	element is a bit which is set to 1 if the input tensor has an
10639//	infinity or nan value, or zero otherwise.
10640//
10641//	3 (CONCISE_HEALTH): Output a float32/64 tensor of shape [5]. The 1st
10642//	element is the tensor_id, if provided, and -1 otherwise. The
10643//	remaining four slots are the total number of elements, -infs,
10644//	+infs, and nans in the input tensor respectively.
10645//
10646//	4 (FULL_HEALTH): Output a float32/64 tensor of shape [11]. The 1st
10647//	element is the tensor_id, if provided, and -1 otherwise. The 2nd
10648//	element is the device_id, if provided, and -1 otherwise. The 3rd
10649//	element holds the datatype value of the input tensor as according
10650//	to the enumerated type in tensorflow/core/framework/types.proto.
10651//	The remaining elements hold the total number of elements, -infs,
10652//	+infs, nans, negative finite numbers, zeros, and positive finite
10653//	numbers in the input tensor respectively.
10654//
10655//	5 (SHAPE): Output a float32/64 tensor of shape [10]. The 1st
10656//	element is the tensor_id, if provided, and -1 otherwise. The 2nd
10657//	element holds the datatype value of the input tensor as according
10658//	to the enumerated type in tensorflow/core/framework/types.proto.
10659//	The 3rd element holds the rank of the tensor. The 4th element holds
10660//	the number of elements within the tensor. Finally the remaining 6
10661//	elements hold the shape of the tensor. If the rank of the tensor
10662//	is lower than 6, the shape is right padded with zeros. If the rank
10663//	is greater than 6, the head of the shape is truncated.
10664//
10665//	6 (FULL_NUMERICS): Output a float32/64 tensor of shape [22]. The 1st
10666//	element is the tensor_id, if provided, and -1 otherwise. The 2nd
10667//	element is the device_id, if provided, and -1 otherwise. The 3rd
10668//	element holds the datatype value of the input tensor as according
10669//	to the enumerated type in tensorflow/core/framework/types.proto.
10670//	The 4th element holds the rank of the tensor. The 5th to 11th
10671//	elements hold the shape of the tensor. If the rank of the tensor
10672//	is lower than 6, the shape is right padded with zeros. If the rank
10673//	is greater than 6, the head of the shape is truncated. The 12th to
10674//	18th elements hold the number of elements, -infs, +infs, nans,
10675//	denormal floats, negative finite numbers, zeros, and positive
10676//	finite numbers in the input tensor respectively. The final four
10677//	elements hold the min value, max value, mean, and variance of the
10678//	input tensor.
10679//
10680//	8 (REDUCE_INF_NAN_THREE_SLOTS): Output a float32/64 tensor of shape
10681//	[3]. The 1st element is -inf if any elements of the input tensor
10682//	is -inf, or zero otherwise. The 2nd element is +inf if any elements
10683//	of the input tensor is +inf, or zero otherwise.  The 3rd element is
10684//	nan if any element of the input tensor is nan, or zero otherwise.
10685//
10686// If not specified, defaults to -1
10687func DebugNumericSummaryV2TensorDebugMode(value int64) DebugNumericSummaryV2Attr {
10688	return func(m optionalAttr) {
10689		m["tensor_debug_mode"] = value
10690	}
10691}
10692
10693// DebugNumericSummaryV2TensorId sets the optional tensor_id attribute to value.
10694//
10695// value: Optional. An integer identifier for the tensor being summarized by this op.
10696// If not specified, defaults to -1
10697func DebugNumericSummaryV2TensorId(value int64) DebugNumericSummaryV2Attr {
10698	return func(m optionalAttr) {
10699		m["tensor_id"] = value
10700	}
10701}
10702
10703// Debug Numeric Summary V2 Op.
10704//
10705// Computes a numeric summary of the input tensor. The shape of the output
10706// depends on the tensor_debug_mode attribute.
10707// This op is used internally by TensorFlow Debugger (tfdbg) v2.
10708//
10709// Arguments:
10710//
10711//	input: Input tensor, to be summarized by the op.
10712func DebugNumericSummaryV2(scope *Scope, input tf.Output, optional ...DebugNumericSummaryV2Attr) (output tf.Output) {
10713	if scope.Err() != nil {
10714		return
10715	}
10716	attrs := map[string]interface{}{}
10717	for _, a := range optional {
10718		a(attrs)
10719	}
10720	opspec := tf.OpSpec{
10721		Type: "DebugNumericSummaryV2",
10722		Input: []tf.Input{
10723			input,
10724		},
10725		Attrs: attrs,
10726	}
10727	op := scope.AddOperation(opspec)
10728	return op.Output(0)
10729}
10730
10731// DecodeAndCropJpegAttr is an optional argument to DecodeAndCropJpeg.
10732type DecodeAndCropJpegAttr func(optionalAttr)
10733
10734// DecodeAndCropJpegChannels sets the optional channels attribute to value.
10735//
10736// value: Number of color channels for the decoded image.
10737// If not specified, defaults to 0
10738func DecodeAndCropJpegChannels(value int64) DecodeAndCropJpegAttr {
10739	return func(m optionalAttr) {
10740		m["channels"] = value
10741	}
10742}
10743
10744// DecodeAndCropJpegRatio sets the optional ratio attribute to value.
10745//
10746// value: Downscaling ratio.
10747// If not specified, defaults to 1
10748func DecodeAndCropJpegRatio(value int64) DecodeAndCropJpegAttr {
10749	return func(m optionalAttr) {
10750		m["ratio"] = value
10751	}
10752}
10753
10754// DecodeAndCropJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
10755//
10756// value: If true use a slower but nicer upscaling of the
10757// chroma planes (yuv420/422 only).
10758// If not specified, defaults to true
10759func DecodeAndCropJpegFancyUpscaling(value bool) DecodeAndCropJpegAttr {
10760	return func(m optionalAttr) {
10761		m["fancy_upscaling"] = value
10762	}
10763}
10764
10765// DecodeAndCropJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
10766//
10767// value: If true try to recover an image from truncated input.
10768// If not specified, defaults to false
10769func DecodeAndCropJpegTryRecoverTruncated(value bool) DecodeAndCropJpegAttr {
10770	return func(m optionalAttr) {
10771		m["try_recover_truncated"] = value
10772	}
10773}
10774
10775// DecodeAndCropJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
10776//
10777// value: The minimum required fraction of lines before a truncated
10778// input is accepted.
10779// If not specified, defaults to 1
10780func DecodeAndCropJpegAcceptableFraction(value float32) DecodeAndCropJpegAttr {
10781	return func(m optionalAttr) {
10782		m["acceptable_fraction"] = value
10783	}
10784}
10785
10786// DecodeAndCropJpegDctMethod sets the optional dct_method attribute to value.
10787//
10788// value: string specifying a hint about the algorithm used for
10789// decompression.  Defaults to "" which maps to a system-specific
10790// default.  Currently valid values are ["INTEGER_FAST",
10791// "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
10792// jpeg library changes to a version that does not have that specific
10793// option.)
10794// If not specified, defaults to ""
10795func DecodeAndCropJpegDctMethod(value string) DecodeAndCropJpegAttr {
10796	return func(m optionalAttr) {
10797		m["dct_method"] = value
10798	}
10799}
10800
10801// Decode and Crop a JPEG-encoded image to a uint8 tensor.
10802//
10803// The attr `channels` indicates the desired number of color channels for the
10804// decoded image.
10805//
10806// Accepted values are:
10807//
10808// *   0: Use the number of channels in the JPEG-encoded image.
10809// *   1: output a grayscale image.
10810// *   3: output an RGB image.
10811//
10812// If needed, the JPEG-encoded image is transformed to match the requested number
10813// of color channels.
10814//
10815// The attr `ratio` allows downscaling the image by an integer factor during
10816// decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
10817// downscaling the image later.
10818//
10819// It is equivalent to a combination of decode and crop, but much faster by only
10820// decoding partial jpeg image.
10821//
10822// Arguments:
10823//
10824//	contents: 0-D.  The JPEG-encoded image.
10825//	crop_window: 1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width].
10826//
10827// Returns 3-D with shape `[height, width, channels]`..
10828func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output, optional ...DecodeAndCropJpegAttr) (image tf.Output) {
10829	if scope.Err() != nil {
10830		return
10831	}
10832	attrs := map[string]interface{}{}
10833	for _, a := range optional {
10834		a(attrs)
10835	}
10836	opspec := tf.OpSpec{
10837		Type: "DecodeAndCropJpeg",
10838		Input: []tf.Input{
10839			contents, crop_window,
10840		},
10841		Attrs: attrs,
10842	}
10843	op := scope.AddOperation(opspec)
10844	return op.Output(0)
10845}
10846
10847// Decode web-safe base64-encoded strings.
10848//
10849// Input may or may not have padding at the end. See
10850// [EncodeBase64](https://www.tensorflow.org/api_docs/python/tf/io/encode_base64)
10851// for padding. Web-safe means that input must use - and _ instead of + and /.
10852//
10853// Arguments:
10854//
10855//	input: Base64 strings to decode.
10856//
10857// Returns Decoded strings.
10858func DecodeBase64(scope *Scope, input tf.Output) (output tf.Output) {
10859	if scope.Err() != nil {
10860		return
10861	}
10862	opspec := tf.OpSpec{
10863		Type: "DecodeBase64",
10864		Input: []tf.Input{
10865			input,
10866		},
10867	}
10868	op := scope.AddOperation(opspec)
10869	return op.Output(0)
10870}
10871
10872// DecodeBmpAttr is an optional argument to DecodeBmp.
10873type DecodeBmpAttr func(optionalAttr)
10874
10875// DecodeBmpChannels sets the optional channels attribute to value.
10876// If not specified, defaults to 0
10877func DecodeBmpChannels(value int64) DecodeBmpAttr {
10878	return func(m optionalAttr) {
10879		m["channels"] = value
10880	}
10881}
10882
10883// Decode the first frame of a BMP-encoded image to a uint8 tensor.
10884//
10885// The attr `channels` indicates the desired number of color channels for the
10886// decoded image.
10887//
10888// Accepted values are:
10889//
10890// *   0: Use the number of channels in the BMP-encoded image.
10891// *   3: output an RGB image.
10892// *   4: output an RGBA image.
10893//
10894// Arguments:
10895//
10896//	contents: 0-D.  The BMP-encoded image.
10897//
10898// Returns 3-D with shape `[height, width, channels]`. RGB order
10899func DecodeBmp(scope *Scope, contents tf.Output, optional ...DecodeBmpAttr) (image tf.Output) {
10900	if scope.Err() != nil {
10901		return
10902	}
10903	attrs := map[string]interface{}{}
10904	for _, a := range optional {
10905		a(attrs)
10906	}
10907	opspec := tf.OpSpec{
10908		Type: "DecodeBmp",
10909		Input: []tf.Input{
10910			contents,
10911		},
10912		Attrs: attrs,
10913	}
10914	op := scope.AddOperation(opspec)
10915	return op.Output(0)
10916}
10917
10918// DecodeCSVAttr is an optional argument to DecodeCSV.
10919type DecodeCSVAttr func(optionalAttr)
10920
10921// DecodeCSVFieldDelim sets the optional field_delim attribute to value.
10922//
10923// value: char delimiter to separate fields in a record.
10924// If not specified, defaults to ","
10925func DecodeCSVFieldDelim(value string) DecodeCSVAttr {
10926	return func(m optionalAttr) {
10927		m["field_delim"] = value
10928	}
10929}
10930
10931// DecodeCSVUseQuoteDelim sets the optional use_quote_delim attribute to value.
10932//
10933// value: If false, treats double quotation marks as regular
10934// characters inside of the string fields (ignoring RFC 4180, Section 2,
10935// Bullet 5).
10936// If not specified, defaults to true
10937func DecodeCSVUseQuoteDelim(value bool) DecodeCSVAttr {
10938	return func(m optionalAttr) {
10939		m["use_quote_delim"] = value
10940	}
10941}
10942
10943// DecodeCSVNaValue sets the optional na_value attribute to value.
10944//
10945// value: Additional string to recognize as NA/NaN.
10946// If not specified, defaults to ""
10947func DecodeCSVNaValue(value string) DecodeCSVAttr {
10948	return func(m optionalAttr) {
10949		m["na_value"] = value
10950	}
10951}
10952
10953// DecodeCSVSelectCols sets the optional select_cols attribute to value.
10954// If not specified, defaults to {}
10955func DecodeCSVSelectCols(value []int64) DecodeCSVAttr {
10956	return func(m optionalAttr) {
10957		m["select_cols"] = value
10958	}
10959}
10960
10961// Convert CSV records to tensors. Each column maps to one tensor.
10962//
10963// RFC 4180 format is expected for the CSV records.
10964// (https://tools.ietf.org/html/rfc4180)
10965// Note that we allow leading and trailing spaces with int or float field.
10966//
10967// Arguments:
10968//
10969//	records: Each string is a record/row in the csv and all records should have
10970//
10971// the same format.
10972//
10973//	record_defaults: One tensor per column of the input record, with either a
10974//
10975// scalar default value for that column or an empty vector if the column is
10976// required.
10977//
10978// Returns Each tensor will have the same shape as records.
10979func DecodeCSV(scope *Scope, records tf.Output, record_defaults []tf.Output, optional ...DecodeCSVAttr) (output []tf.Output) {
10980	if scope.Err() != nil {
10981		return
10982	}
10983	attrs := map[string]interface{}{}
10984	for _, a := range optional {
10985		a(attrs)
10986	}
10987	opspec := tf.OpSpec{
10988		Type: "DecodeCSV",
10989		Input: []tf.Input{
10990			records, tf.OutputList(record_defaults),
10991		},
10992		Attrs: attrs,
10993	}
10994	op := scope.AddOperation(opspec)
10995	if scope.Err() != nil {
10996		return
10997	}
10998	var idx int
10999	var err error
11000	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
11001		scope.UpdateErr("DecodeCSV", err)
11002		return
11003	}
11004	return output
11005}
11006
11007// DecodeCompressedAttr is an optional argument to DecodeCompressed.
11008type DecodeCompressedAttr func(optionalAttr)
11009
11010// DecodeCompressedCompressionType sets the optional compression_type attribute to value.
11011//
11012// value: A scalar containing either (i) the empty string (no
11013// compression), (ii) "ZLIB", or (iii) "GZIP".
11014// If not specified, defaults to ""
11015func DecodeCompressedCompressionType(value string) DecodeCompressedAttr {
11016	return func(m optionalAttr) {
11017		m["compression_type"] = value
11018	}
11019}
11020
11021// Decompress strings.
11022//
11023// This op decompresses each element of the `bytes` input `Tensor`, which
11024// is assumed to be compressed using the given `compression_type`.
11025//
11026// The `output` is a string `Tensor` of the same shape as `bytes`,
11027// each element containing the decompressed data from the corresponding
11028// element in `bytes`.
11029//
11030// Arguments:
11031//
11032//	bytes: A Tensor of string which is compressed.
11033//
11034// Returns A Tensor with the same shape as input `bytes`, uncompressed
11035// from bytes.
11036func DecodeCompressed(scope *Scope, bytes tf.Output, optional ...DecodeCompressedAttr) (output tf.Output) {
11037	if scope.Err() != nil {
11038		return
11039	}
11040	attrs := map[string]interface{}{}
11041	for _, a := range optional {
11042		a(attrs)
11043	}
11044	opspec := tf.OpSpec{
11045		Type: "DecodeCompressed",
11046		Input: []tf.Input{
11047			bytes,
11048		},
11049		Attrs: attrs,
11050	}
11051	op := scope.AddOperation(opspec)
11052	return op.Output(0)
11053}
11054
11055// Decode the frame(s) of a GIF-encoded image to a uint8 tensor.
11056//
11057// GIF images with frame or transparency compression are not supported.
11058// On Linux and MacOS systems, convert animated GIFs from compressed to
11059// uncompressed by running:
11060//
11061//	convert $src.gif -coalesce $dst.gif
11062//
11063// This op also supports decoding JPEGs and PNGs, though it is cleaner to use
11064// `tf.io.decode_image`.
11065//
11066// Arguments:
11067//
11068//	contents: 0-D.  The GIF-encoded image.
11069//
11070// Returns 4-D with shape `[num_frames, height, width, 3]`. RGB channel order.
11071func DecodeGif(scope *Scope, contents tf.Output) (image tf.Output) {
11072	if scope.Err() != nil {
11073		return
11074	}
11075	opspec := tf.OpSpec{
11076		Type: "DecodeGif",
11077		Input: []tf.Input{
11078			contents,
11079		},
11080	}
11081	op := scope.AddOperation(opspec)
11082	return op.Output(0)
11083}
11084
11085// DecodeImageAttr is an optional argument to DecodeImage.
11086type DecodeImageAttr func(optionalAttr)
11087
11088// DecodeImageChannels sets the optional channels attribute to value.
11089//
11090// value: Number of color channels for the decoded image.
11091// If not specified, defaults to 0
11092func DecodeImageChannels(value int64) DecodeImageAttr {
11093	return func(m optionalAttr) {
11094		m["channels"] = value
11095	}
11096}
11097
11098// DecodeImageDtype sets the optional dtype attribute to value.
11099//
11100// value: The desired DType of the returned Tensor.
11101// If not specified, defaults to DT_UINT8
11102func DecodeImageDtype(value tf.DataType) DecodeImageAttr {
11103	return func(m optionalAttr) {
11104		m["dtype"] = value
11105	}
11106}
11107
11108// DecodeImageExpandAnimations sets the optional expand_animations attribute to value.
11109//
11110// value: Controls the output shape of the returned op. If True, the returned op will
11111// produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all
11112// GIFs, whether animated or not. If, False, the returned op will produce a 3-D
11113// tensor for all file types and will truncate animated GIFs to the first frame.
11114// If not specified, defaults to true
11115func DecodeImageExpandAnimations(value bool) DecodeImageAttr {
11116	return func(m optionalAttr) {
11117		m["expand_animations"] = value
11118	}
11119}
11120
11121// Function for decode_bmp, decode_gif, decode_jpeg, and decode_png.
11122//
11123// Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the
11124// appropriate operation to convert the input bytes string into a Tensor of type
11125// dtype.
11126//
11127// *NOTE*: decode_gif returns a 4-D array [num_frames, height, width, 3], as
11128// opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays
11129// [height, width, num_channels]. Make sure to take this into account when
11130// constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or
11131// PNG files. Alternately, set the expand_animations argument of this function to
11132// False, in which case the op will return 3-dimensional tensors and will truncate
11133// animated GIF files to the first frame.
11134//
11135// *NOTE*: If the first frame of an animated GIF does not occupy the entire
11136// canvas (maximum frame width x maximum frame height), then it fills the
11137// unoccupied areas (in the first frame) with zeros (black). For frames after the
11138// first frame that does not occupy the entire canvas, it uses the previous
11139// frame to fill the unoccupied areas.
11140//
11141// Arguments:
11142//
11143//	contents: 0-D. The encoded image bytes.
11144//
11145// Returns 3-D with shape `[height, width, channels]` or 4-D with shape
11146// `[frame, height, width, channels]`..
11147func DecodeImage(scope *Scope, contents tf.Output, optional ...DecodeImageAttr) (image tf.Output) {
11148	if scope.Err() != nil {
11149		return
11150	}
11151	attrs := map[string]interface{}{}
11152	for _, a := range optional {
11153		a(attrs)
11154	}
11155	opspec := tf.OpSpec{
11156		Type: "DecodeImage",
11157		Input: []tf.Input{
11158			contents,
11159		},
11160		Attrs: attrs,
11161	}
11162	op := scope.AddOperation(opspec)
11163	return op.Output(0)
11164}
11165
11166// Convert JSON-encoded Example records to binary protocol buffer strings.
11167//
11168// Note: This is **not** a general purpose JSON parsing op.
11169//
11170// This op converts JSON-serialized
11171// `tf.train.Example` (created with `json_format.MessageToJson`, following the
11172// [standard JSON mapping](https://developers.google.com/protocol-buffers/docs/proto3#json))
11173// to a binary-serialized `tf.train.Example` (equivalent to
11174// `Example.SerializeToString()`) suitable for conversion to tensors with
11175// `tf.io.parse_example`.
11176//
11177// Arguments:
11178//
11179//	json_examples: Each string is a JSON object serialized according to the JSON
11180//
11181// mapping of the Example proto.
11182//
11183// Returns Each string is a binary Example protocol buffer corresponding
11184// to the respective element of `json_examples`.
11185func DecodeJSONExample(scope *Scope, json_examples tf.Output) (binary_examples tf.Output) {
11186	if scope.Err() != nil {
11187		return
11188	}
11189	opspec := tf.OpSpec{
11190		Type: "DecodeJSONExample",
11191		Input: []tf.Input{
11192			json_examples,
11193		},
11194	}
11195	op := scope.AddOperation(opspec)
11196	return op.Output(0)
11197}
11198
11199// DecodeJpegAttr is an optional argument to DecodeJpeg.
11200type DecodeJpegAttr func(optionalAttr)
11201
11202// DecodeJpegChannels sets the optional channels attribute to value.
11203//
11204// value: Number of color channels for the decoded image.
11205// If not specified, defaults to 0
11206func DecodeJpegChannels(value int64) DecodeJpegAttr {
11207	return func(m optionalAttr) {
11208		m["channels"] = value
11209	}
11210}
11211
11212// DecodeJpegRatio sets the optional ratio attribute to value.
11213//
11214// value: Downscaling ratio.
11215// If not specified, defaults to 1
11216func DecodeJpegRatio(value int64) DecodeJpegAttr {
11217	return func(m optionalAttr) {
11218		m["ratio"] = value
11219	}
11220}
11221
11222// DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value.
11223//
11224// value: If true use a slower but nicer upscaling of the
11225// chroma planes (yuv420/422 only).
11226// If not specified, defaults to true
11227func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr {
11228	return func(m optionalAttr) {
11229		m["fancy_upscaling"] = value
11230	}
11231}
11232
11233// DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value.
11234//
11235// value: If true try to recover an image from truncated input.
11236// If not specified, defaults to false
11237func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr {
11238	return func(m optionalAttr) {
11239		m["try_recover_truncated"] = value
11240	}
11241}
11242
11243// DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value.
11244//
11245// value: The minimum required fraction of lines before a truncated
11246// input is accepted.
11247// If not specified, defaults to 1
11248func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr {
11249	return func(m optionalAttr) {
11250		m["acceptable_fraction"] = value
11251	}
11252}
11253
11254// DecodeJpegDctMethod sets the optional dct_method attribute to value.
11255//
11256// value: string specifying a hint about the algorithm used for
11257// decompression.  Defaults to "" which maps to a system-specific
11258// default.  Currently valid values are ["INTEGER_FAST",
11259// "INTEGER_ACCURATE"].  The hint may be ignored (e.g., the internal
11260// jpeg library changes to a version that does not have that specific
11261// option.)
11262// If not specified, defaults to ""
11263func DecodeJpegDctMethod(value string) DecodeJpegAttr {
11264	return func(m optionalAttr) {
11265		m["dct_method"] = value
11266	}
11267}
11268
11269// Decode a JPEG-encoded image to a uint8 tensor.
11270//
11271// The attr `channels` indicates the desired number of color channels for the
11272// decoded image.
11273//
11274// Accepted values are:
11275//
11276// *   0: Use the number of channels in the JPEG-encoded image.
11277// *   1: output a grayscale image.
11278// *   3: output an RGB image.
11279//
11280// If needed, the JPEG-encoded image is transformed to match the requested number
11281// of color channels.
11282//
11283// The attr `ratio` allows downscaling the image by an integer factor during
11284// decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
11285// downscaling the image later.
11286//
11287// This op also supports decoding PNGs and non-animated GIFs since the interface is
11288// the same, though it is cleaner to use `tf.io.decode_image`.
11289//
11290// Arguments:
11291//
11292//	contents: 0-D.  The JPEG-encoded image.
11293//
11294// Returns 3-D with shape `[height, width, channels]`..
11295func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output) {
11296	if scope.Err() != nil {
11297		return
11298	}
11299	attrs := map[string]interface{}{}
11300	for _, a := range optional {
11301		a(attrs)
11302	}
11303	opspec := tf.OpSpec{
11304		Type: "DecodeJpeg",
11305		Input: []tf.Input{
11306			contents,
11307		},
11308		Attrs: attrs,
11309	}
11310	op := scope.AddOperation(opspec)
11311	return op.Output(0)
11312}
11313
11314// DecodePaddedRawAttr is an optional argument to DecodePaddedRaw.
11315type DecodePaddedRawAttr func(optionalAttr)
11316
11317// DecodePaddedRawLittleEndian sets the optional little_endian attribute to value.
11318//
11319// value: Whether the input `input_bytes` is in little-endian order. Ignored for
11320// `out_type` values that are stored in a single byte, like `uint8`
11321// If not specified, defaults to true
11322func DecodePaddedRawLittleEndian(value bool) DecodePaddedRawAttr {
11323	return func(m optionalAttr) {
11324		m["little_endian"] = value
11325	}
11326}
11327
11328// Reinterpret the bytes of a string as a vector of numbers.
11329//
11330// Arguments:
11331//
11332//	input_bytes: Tensor of string to be decoded.
11333//	fixed_length: Length in bytes for each element of the decoded output. Must be a multiple
11334//
11335// of the size of the output type.
11336//
11337// Returns A Tensor with one more dimension than the input `bytes`. The added dimension
11338// will have size equal to the length of the elements of `bytes` divided by the
11339// number of bytes to represent `out_type`.
11340func DecodePaddedRaw(scope *Scope, input_bytes tf.Output, fixed_length tf.Output, out_type tf.DataType, optional ...DecodePaddedRawAttr) (output tf.Output) {
11341	if scope.Err() != nil {
11342		return
11343	}
11344	attrs := map[string]interface{}{"out_type": out_type}
11345	for _, a := range optional {
11346		a(attrs)
11347	}
11348	opspec := tf.OpSpec{
11349		Type: "DecodePaddedRaw",
11350		Input: []tf.Input{
11351			input_bytes, fixed_length,
11352		},
11353		Attrs: attrs,
11354	}
11355	op := scope.AddOperation(opspec)
11356	return op.Output(0)
11357}
11358
11359// DecodePngAttr is an optional argument to DecodePng.
11360type DecodePngAttr func(optionalAttr)
11361
11362// DecodePngChannels sets the optional channels attribute to value.
11363//
11364// value: Number of color channels for the decoded image.
11365// If not specified, defaults to 0
11366func DecodePngChannels(value int64) DecodePngAttr {
11367	return func(m optionalAttr) {
11368		m["channels"] = value
11369	}
11370}
11371
11372// DecodePngDtype sets the optional dtype attribute to value.
11373// If not specified, defaults to DT_UINT8
11374func DecodePngDtype(value tf.DataType) DecodePngAttr {
11375	return func(m optionalAttr) {
11376		m["dtype"] = value
11377	}
11378}
11379
11380// Decode a PNG-encoded image to a uint8 or uint16 tensor.
11381//
11382// The attr `channels` indicates the desired number of color channels for the
11383// decoded image.
11384//
11385// Accepted values are:
11386//
11387// *   0: Use the number of channels in the PNG-encoded image.
11388// *   1: output a grayscale image.
11389// *   3: output an RGB image.
11390// *   4: output an RGBA image.
11391//
11392// If needed, the PNG-encoded image is transformed to match the requested number
11393// of color channels.
11394//
11395// This op also supports decoding JPEGs and non-animated GIFs since the interface
11396// is the same, though it is cleaner to use `tf.io.decode_image`.
11397//
11398// Arguments:
11399//
11400//	contents: 0-D.  The PNG-encoded image.
11401//
11402// Returns 3-D with shape `[height, width, channels]`.
11403func DecodePng(scope *Scope, contents tf.Output, optional ...DecodePngAttr) (image tf.Output) {
11404	if scope.Err() != nil {
11405		return
11406	}
11407	attrs := map[string]interface{}{}
11408	for _, a := range optional {
11409		a(attrs)
11410	}
11411	opspec := tf.OpSpec{
11412		Type: "DecodePng",
11413		Input: []tf.Input{
11414			contents,
11415		},
11416		Attrs: attrs,
11417	}
11418	op := scope.AddOperation(opspec)
11419	return op.Output(0)
11420}
11421
11422// DecodeProtoV2Attr is an optional argument to DecodeProtoV2.
11423type DecodeProtoV2Attr func(optionalAttr)
11424
11425// DecodeProtoV2DescriptorSource sets the optional descriptor_source attribute to value.
11426//
11427// value: Either the special value `local://` or a path to a file containing
11428// a serialized `FileDescriptorSet`.
11429// If not specified, defaults to "local://"
11430func DecodeProtoV2DescriptorSource(value string) DecodeProtoV2Attr {
11431	return func(m optionalAttr) {
11432		m["descriptor_source"] = value
11433	}
11434}
11435
11436// DecodeProtoV2MessageFormat sets the optional message_format attribute to value.
11437//
11438// value: Either `binary` or `text`.
11439// If not specified, defaults to "binary"
11440func DecodeProtoV2MessageFormat(value string) DecodeProtoV2Attr {
11441	return func(m optionalAttr) {
11442		m["message_format"] = value
11443	}
11444}
11445
11446// DecodeProtoV2Sanitize sets the optional sanitize attribute to value.
11447//
11448// value: Whether to sanitize the result or not.
11449// If not specified, defaults to false
11450func DecodeProtoV2Sanitize(value bool) DecodeProtoV2Attr {
11451	return func(m optionalAttr) {
11452		m["sanitize"] = value
11453	}
11454}
11455
11456// The op extracts fields from a serialized protocol buffers message into tensors.
11457//
11458// Note: This API is designed for orthogonality rather than human-friendliness. It
11459// can be used to parse input protos by hand, but it is intended for use in
11460// generated code.
11461//
11462// The `decode_proto` op extracts fields from a serialized protocol buffers
11463// message into tensors.  The fields in `field_names` are decoded and converted
11464// to the corresponding `output_types` if possible.
11465//
11466// A `message_type` name must be provided to give context for the field names.
11467// The actual message descriptor can be looked up either in the linked-in
11468// descriptor pool or a filename provided by the caller using the
11469// `descriptor_source` attribute.
11470//
11471// Each output tensor is a dense tensor. This means that it is padded to hold
11472// the largest number of repeated elements seen in the input minibatch. (The
11473// shape is also padded by one to prevent zero-sized dimensions). The actual
11474// repeat counts for each example in the minibatch can be found in the `sizes`
11475// output. In many cases the output of `decode_proto` is fed immediately into
11476// tf.squeeze if missing values are not a concern. When using tf.squeeze, always
11477// pass the squeeze dimension explicitly to avoid surprises.
11478//
11479// For the most part, the mapping between Proto field types and TensorFlow dtypes
11480// is straightforward. However, there are a few special cases:
11481//
11482// - A proto field that contains a submessage or group can only be converted
11483// to `DT_STRING` (the serialized submessage). This is to reduce the complexity
11484// of the API. The resulting string can be used as input to another instance of
11485// the decode_proto op.
11486//
11487// - TensorFlow lacks support for unsigned integers. The ops represent uint64
11488// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious
11489// way). Unsigned int32 values can be represented exactly by specifying type
11490// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in
11491// the `output_types` attribute.
11492//
11493// - `map` fields are not directly decoded. They are treated as `repeated` fields,
11494// of the appropriate entry type. The proto-compiler defines entry types for each
11495// map field. The type-name is the field name, converted to "CamelCase" with
11496// "Entry" appended. The `tf.train.Features.FeatureEntry` message is an example of
11497// one of these implicit `Entry` types.
11498//
11499// - `enum` fields should be read as int32.
11500//
11501// Both binary and text proto serializations are supported, and can be
11502// chosen using the `format` attribute.
11503//
11504// The `descriptor_source` attribute selects the source of protocol
11505// descriptors to consult when looking up `message_type`. This may be:
11506//
11507// - An empty string  or "local://", in which case protocol descriptors are
11508// created for C++ (not Python) proto definitions linked to the binary.
11509//
11510// - A file, in which case protocol descriptors are created from the file,
11511// which is expected to contain a `FileDescriptorSet` serialized as a string.
11512// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`
11513// and `--include_imports` options to the protocol compiler `protoc`.
11514//
11515// - A "bytes://<bytes>", in which protocol descriptors are created from `<bytes>`,
11516// which is expected to be a `FileDescriptorSet` serialized as a string.
11517//
11518// Arguments:
11519//
11520//	bytes: Tensor of serialized protos with shape `batch_shape`.
11521//	message_type: Name of the proto message type to decode.
11522//	field_names: List of strings containing proto field names. An extension field can be decoded
11523//
11524// by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME.
11525//
11526//	output_types: List of TF types to use for the respective field in field_names.
11527//
11528// Returns:
11529//
11530//	sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`.
11531//
11532// Each entry is the number of values found for the corresponding field.
11533// Optional fields may have 0 or 1 values.
11534//
11535//	values: List of tensors containing values for the corresponding field.
11536//
11537// `values[i]` has datatype `output_types[i]`
11538// and shape `[batch_shape, max(sizes[...,i])]`.
11539func DecodeProtoV2(scope *Scope, bytes tf.Output, message_type string, field_names []string, output_types []tf.DataType, optional ...DecodeProtoV2Attr) (sizes tf.Output, values []tf.Output) {
11540	if scope.Err() != nil {
11541		return
11542	}
11543	attrs := map[string]interface{}{"message_type": message_type, "field_names": field_names, "output_types": output_types}
11544	for _, a := range optional {
11545		a(attrs)
11546	}
11547	opspec := tf.OpSpec{
11548		Type: "DecodeProtoV2",
11549		Input: []tf.Input{
11550			bytes,
11551		},
11552		Attrs: attrs,
11553	}
11554	op := scope.AddOperation(opspec)
11555	if scope.Err() != nil {
11556		return
11557	}
11558	var idx int
11559	var err error
11560	sizes = op.Output(idx)
11561	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
11562		scope.UpdateErr("DecodeProtoV2", err)
11563		return
11564	}
11565	return sizes, values
11566}
11567
11568// DecodeRawAttr is an optional argument to DecodeRaw.
11569type DecodeRawAttr func(optionalAttr)
11570
11571// DecodeRawLittleEndian sets the optional little_endian attribute to value.
11572//
11573// value: Whether the input `bytes` are in little-endian order.
11574// Ignored for `out_type` values that are stored in a single byte like
11575// `uint8`.
11576// If not specified, defaults to true
11577func DecodeRawLittleEndian(value bool) DecodeRawAttr {
11578	return func(m optionalAttr) {
11579		m["little_endian"] = value
11580	}
11581}
11582
11583// Reinterpret the bytes of a string as a vector of numbers.
11584//
11585// Arguments:
11586//
11587//	bytes: All the elements must have the same length.
11588//
11589// Returns A Tensor with one more dimension than the input `bytes`.  The
11590// added dimension will have size equal to the length of the elements
11591// of `bytes` divided by the number of bytes to represent `out_type`.
11592func DecodeRaw(scope *Scope, bytes tf.Output, out_type tf.DataType, optional ...DecodeRawAttr) (output tf.Output) {
11593	if scope.Err() != nil {
11594		return
11595	}
11596	attrs := map[string]interface{}{"out_type": out_type}
11597	for _, a := range optional {
11598		a(attrs)
11599	}
11600	opspec := tf.OpSpec{
11601		Type: "DecodeRaw",
11602		Input: []tf.Input{
11603			bytes,
11604		},
11605		Attrs: attrs,
11606	}
11607	op := scope.AddOperation(opspec)
11608	return op.Output(0)
11609}
11610
11611// DecodeWavAttr is an optional argument to DecodeWav.
11612type DecodeWavAttr func(optionalAttr)
11613
11614// DecodeWavDesiredChannels sets the optional desired_channels attribute to value.
11615//
11616// value: Number of sample channels wanted.
11617// If not specified, defaults to -1
11618func DecodeWavDesiredChannels(value int64) DecodeWavAttr {
11619	return func(m optionalAttr) {
11620		m["desired_channels"] = value
11621	}
11622}
11623
11624// DecodeWavDesiredSamples sets the optional desired_samples attribute to value.
11625//
11626// value: Length of audio requested.
11627// If not specified, defaults to -1
11628func DecodeWavDesiredSamples(value int64) DecodeWavAttr {
11629	return func(m optionalAttr) {
11630		m["desired_samples"] = value
11631	}
11632}
11633
11634// Decode a 16-bit PCM WAV file to a float tensor.
11635//
11636// The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float.
11637//
11638// When desired_channels is set, if the input contains fewer channels than this
11639// then the last channel will be duplicated to give the requested number, else if
11640// the input has more channels than requested then the additional channels will be
11641// ignored.
11642//
11643// If desired_samples is set, then the audio will be cropped or padded with zeroes
11644// to the requested length.
11645//
11646// The first output contains a Tensor with the content of the audio samples. The
11647// lowest dimension will be the number of channels, and the second will be the
11648// number of samples. For example, a ten-sample-long stereo WAV file should give an
11649// output shape of [10, 2].
11650//
11651// Arguments:
11652//
11653//	contents: The WAV-encoded audio, usually from a file.
11654//
11655// Returns:
11656//
11657//	audio: 2-D with shape `[length, channels]`.
11658//	sample_rate: Scalar holding the sample rate found in the WAV header.
11659func DecodeWav(scope *Scope, contents tf.Output, optional ...DecodeWavAttr) (audio tf.Output, sample_rate tf.Output) {
11660	if scope.Err() != nil {
11661		return
11662	}
11663	attrs := map[string]interface{}{}
11664	for _, a := range optional {
11665		a(attrs)
11666	}
11667	opspec := tf.OpSpec{
11668		Type: "DecodeWav",
11669		Input: []tf.Input{
11670			contents,
11671		},
11672		Attrs: attrs,
11673	}
11674	op := scope.AddOperation(opspec)
11675	return op.Output(0), op.Output(1)
11676}
11677
11678// Makes a copy of `x`.
11679//
11680// Arguments:
11681//
11682//	x: The source tensor of type `T`.
11683//
11684// Returns     y: A `Tensor` of type `T`. A copy of `x`. Guaranteed that `y`
11685//
11686//	is not an alias of `x`.
11687func DeepCopy(scope *Scope, x tf.Output) (y tf.Output) {
11688	if scope.Err() != nil {
11689		return
11690	}
11691	opspec := tf.OpSpec{
11692		Type: "DeepCopy",
11693		Input: []tf.Input{
11694			x,
11695		},
11696	}
11697	op := scope.AddOperation(opspec)
11698	return op.Output(0)
11699}
11700
11701// A container for an iterator resource.
11702//
11703// Arguments:
11704//
11705//	handle: A handle to the iterator to delete.
11706//	deleter: A variant deleter.
11707//
11708// Returns the created operation.
11709func DeleteIterator(scope *Scope, handle tf.Output, deleter tf.Output) (o *tf.Operation) {
11710	if scope.Err() != nil {
11711		return
11712	}
11713	opspec := tf.OpSpec{
11714		Type: "DeleteIterator",
11715		Input: []tf.Input{
11716			handle, deleter,
11717		},
11718	}
11719	return scope.AddOperation(opspec)
11720}
11721
11722// A container for an iterator resource.
11723//
11724// Arguments:
11725//
11726//	multi_device_iterator: A handle to the multi device iterator to delete.
11727//	iterators: A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.
11728//	deleter: A variant deleter.
11729//
11730// Returns the created operation.
11731func DeleteMultiDeviceIterator(scope *Scope, multi_device_iterator tf.Output, iterators []tf.Output, deleter tf.Output) (o *tf.Operation) {
11732	if scope.Err() != nil {
11733		return
11734	}
11735	opspec := tf.OpSpec{
11736		Type: "DeleteMultiDeviceIterator",
11737		Input: []tf.Input{
11738			multi_device_iterator, tf.OutputList(iterators), deleter,
11739		},
11740	}
11741	return scope.AddOperation(opspec)
11742}
11743
11744// Delete the tensor specified by its handle in the session.
11745//
11746// Arguments:
11747//
11748//	handle: The handle for a tensor stored in the session state.
11749//
11750// Returns the created operation.
11751func DeleteSessionTensor(scope *Scope, handle tf.Output) (o *tf.Operation) {
11752	if scope.Err() != nil {
11753		return
11754	}
11755	opspec := tf.OpSpec{
11756		Type: "DeleteSessionTensor",
11757		Input: []tf.Input{
11758			handle,
11759		},
11760	}
11761	return scope.AddOperation(opspec)
11762}
11763
11764// DenseBincountAttr is an optional argument to DenseBincount.
11765type DenseBincountAttr func(optionalAttr)
11766
11767// DenseBincountBinaryOutput sets the optional binary_output attribute to value.
11768//
11769// value: bool; Whether the kernel should count the appearance or number of occurrences.
11770// If not specified, defaults to false
11771func DenseBincountBinaryOutput(value bool) DenseBincountAttr {
11772	return func(m optionalAttr) {
11773		m["binary_output"] = value
11774	}
11775}
11776
11777// Counts the number of occurrences of each value in an integer array.
11778//
11779// Outputs a vector with length `size` and the same dtype as `weights`. If
11780// `weights` are empty, then index `i` stores the number of times the value `i` is
11781// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
11782// the value in `weights` at each index where the corresponding value in `arr` is
11783// `i`.
11784//
11785// Values in `arr` outside of the range [0, size) are ignored.
11786//
11787// Arguments:
11788//
11789//	input: 1D or 2D int `Tensor`.
11790//	size: non-negative int scalar `Tensor`.
11791//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
11792//
11793// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
11794// equal to 1.
11795//
11796// Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].
11797// The counts or summed weights for each value in the range [0, size).
11798func DenseBincount(scope *Scope, input tf.Output, size tf.Output, weights tf.Output, optional ...DenseBincountAttr) (output tf.Output) {
11799	if scope.Err() != nil {
11800		return
11801	}
11802	attrs := map[string]interface{}{}
11803	for _, a := range optional {
11804		a(attrs)
11805	}
11806	opspec := tf.OpSpec{
11807		Type: "DenseBincount",
11808		Input: []tf.Input{
11809			input, size, weights,
11810		},
11811		Attrs: attrs,
11812	}
11813	op := scope.AddOperation(opspec)
11814	return op.Output(0)
11815}
11816
11817// DenseCountSparseOutputAttr is an optional argument to DenseCountSparseOutput.
11818type DenseCountSparseOutputAttr func(optionalAttr)
11819
11820// DenseCountSparseOutputMinlength sets the optional minlength attribute to value.
11821//
11822// value: Minimum value to count. Can be set to -1 for no minimum.
11823// If not specified, defaults to -1
11824//
11825// REQUIRES: value >= -1
11826func DenseCountSparseOutputMinlength(value int64) DenseCountSparseOutputAttr {
11827	return func(m optionalAttr) {
11828		m["minlength"] = value
11829	}
11830}
11831
11832// DenseCountSparseOutputMaxlength sets the optional maxlength attribute to value.
11833//
11834// value: Maximum value to count. Can be set to -1 for no maximum.
11835// If not specified, defaults to -1
11836//
11837// REQUIRES: value >= -1
11838func DenseCountSparseOutputMaxlength(value int64) DenseCountSparseOutputAttr {
11839	return func(m optionalAttr) {
11840		m["maxlength"] = value
11841	}
11842}
11843
11844// Performs sparse-output bin counting for a tf.tensor input.
11845//
11846//	Counts the number of times each value occurs in the input.
11847//
11848// Arguments:
11849//
11850//	values: Tensor containing data to count.
11851//	weights: A Tensor of the same shape as indices containing per-index weight values. May
11852//
11853// also be the empty tensor if no weights are used.
11854//
11855//	binary_output: Whether to output the number of occurrences of each value or 1.
11856//
11857// Returns:
11858//
11859//	output_indices: Indices tensor for the resulting sparse tensor object.
11860//	output_values: Values tensor for the resulting sparse tensor object.
11861//	output_dense_shape: Shape tensor for the resulting sparse tensor object.
11862func DenseCountSparseOutput(scope *Scope, values tf.Output, weights tf.Output, binary_output bool, optional ...DenseCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output) {
11863	if scope.Err() != nil {
11864		return
11865	}
11866	attrs := map[string]interface{}{"binary_output": binary_output}
11867	for _, a := range optional {
11868		a(attrs)
11869	}
11870	opspec := tf.OpSpec{
11871		Type: "DenseCountSparseOutput",
11872		Input: []tf.Input{
11873			values, weights,
11874		},
11875		Attrs: attrs,
11876	}
11877	op := scope.AddOperation(opspec)
11878	return op.Output(0), op.Output(1), op.Output(2)
11879}
11880
11881// Converts a dense tensor to a (possibly batched) CSRSparseMatrix.
11882//
11883// Arguments:
11884//
11885//	dense_input: A Dense tensor.
11886//	indices: Indices of nonzero elements.
11887//
11888// Returns A (possibly batched) CSRSparseMatrix.
11889func DenseToCSRSparseMatrix(scope *Scope, dense_input tf.Output, indices tf.Output) (sparse_output tf.Output) {
11890	if scope.Err() != nil {
11891		return
11892	}
11893	opspec := tf.OpSpec{
11894		Type: "DenseToCSRSparseMatrix",
11895		Input: []tf.Input{
11896			dense_input, indices,
11897		},
11898	}
11899	op := scope.AddOperation(opspec)
11900	return op.Output(0)
11901}
11902
11903// DenseToDenseSetOperationAttr is an optional argument to DenseToDenseSetOperation.
11904type DenseToDenseSetOperationAttr func(optionalAttr)
11905
11906// DenseToDenseSetOperationValidateIndices sets the optional validate_indices attribute to value.
11907// If not specified, defaults to true
11908func DenseToDenseSetOperationValidateIndices(value bool) DenseToDenseSetOperationAttr {
11909	return func(m optionalAttr) {
11910		m["validate_indices"] = value
11911	}
11912}
11913
11914// Applies set operation along last dimension of 2 `Tensor` inputs.
11915//
11916// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
11917//
11918// Output `result` is a `SparseTensor` represented by `result_indices`,
11919// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
11920// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
11921// dimension contains the result of `set_operation` applied to the corresponding
11922// `[0...n-1]` dimension of `set`.
11923//
11924// Arguments:
11925//
11926//	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
11927//
11928// Dimension `n` contains values in a set, duplicates are allowed but ignored.
11929//
11930//	set2: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
11931//
11932// Dimension `n` contains values in a set, duplicates are allowed but ignored.
11933//
11934// Returns:
11935//
11936//	result_indices: 2D indices of a `SparseTensor`.
11937//	result_values: 1D values of a `SparseTensor`.
11938//	result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
11939//
11940// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
11941// is the max result set size across all `0...n-1` dimensions.
11942func DenseToDenseSetOperation(scope *Scope, set1 tf.Output, set2 tf.Output, set_operation string, optional ...DenseToDenseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
11943	if scope.Err() != nil {
11944		return
11945	}
11946	attrs := map[string]interface{}{"set_operation": set_operation}
11947	for _, a := range optional {
11948		a(attrs)
11949	}
11950	opspec := tf.OpSpec{
11951		Type: "DenseToDenseSetOperation",
11952		Input: []tf.Input{
11953			set1, set2,
11954		},
11955		Attrs: attrs,
11956	}
11957	op := scope.AddOperation(opspec)
11958	return op.Output(0), op.Output(1), op.Output(2)
11959}
11960
11961// Creates a dataset that batches input elements into a SparseTensor.
11962//
11963// Arguments:
11964//
11965//	input_dataset: A handle to an input dataset. Must have a single component.
11966//	batch_size: A scalar representing the number of elements to accumulate in a
11967//
11968// batch.
11969//
11970//	row_shape: A vector representing the dense shape of each row in the produced
11971//
11972// SparseTensor. The shape may be partially specified, using `-1` to indicate
11973// that a particular dimension should use the maximum size of all batch elements.
11974func DenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
11975	if scope.Err() != nil {
11976		return
11977	}
11978	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
11979	opspec := tf.OpSpec{
11980		Type: "DenseToSparseBatchDataset",
11981		Input: []tf.Input{
11982			input_dataset, batch_size, row_shape,
11983		},
11984		Attrs: attrs,
11985	}
11986	op := scope.AddOperation(opspec)
11987	return op.Output(0)
11988}
11989
11990// DenseToSparseSetOperationAttr is an optional argument to DenseToSparseSetOperation.
11991type DenseToSparseSetOperationAttr func(optionalAttr)
11992
11993// DenseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
11994// If not specified, defaults to true
11995func DenseToSparseSetOperationValidateIndices(value bool) DenseToSparseSetOperationAttr {
11996	return func(m optionalAttr) {
11997		m["validate_indices"] = value
11998	}
11999}
12000
12001// Applies set operation along last dimension of `Tensor` and `SparseTensor`.
12002//
12003// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
12004//
12005// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
12006// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
12007// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
12008// ignored.
12009//
12010// If `validate_indices` is `True`, this op validates the order and range of `set2`
12011// indices.
12012//
12013// Output `result` is a `SparseTensor` represented by `result_indices`,
12014// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
12015// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
12016// dimension contains the result of `set_operation` applied to the corresponding
12017// `[0...n-1]` dimension of `set`.
12018//
12019// Arguments:
12020//
12021//	set1: `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
12022//
12023// Dimension `n` contains values in a set, duplicates are allowed but ignored.
12024//
12025//	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
12026//
12027// order.
12028//
12029//	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
12030//
12031// order.
12032//
12033//	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
12034//
12035// be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
12036// max set size across `n-1` dimensions.
12037//
12038// Returns:
12039//
12040//	result_indices: 2D indices of a `SparseTensor`.
12041//	result_values: 1D values of a `SparseTensor`.
12042//	result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
12043//
12044// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
12045// is the max result set size across all `0...n-1` dimensions.
12046func DenseToSparseSetOperation(scope *Scope, set1 tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...DenseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
12047	if scope.Err() != nil {
12048		return
12049	}
12050	attrs := map[string]interface{}{"set_operation": set_operation}
12051	for _, a := range optional {
12052		a(attrs)
12053	}
12054	opspec := tf.OpSpec{
12055		Type: "DenseToSparseSetOperation",
12056		Input: []tf.Input{
12057			set1, set2_indices, set2_values, set2_shape,
12058		},
12059		Attrs: attrs,
12060	}
12061	op := scope.AddOperation(opspec)
12062	return op.Output(0), op.Output(1), op.Output(2)
12063}
12064
12065// DepthToSpaceAttr is an optional argument to DepthToSpace.
12066type DepthToSpaceAttr func(optionalAttr)
12067
12068// DepthToSpaceDataFormat sets the optional data_format attribute to value.
12069// If not specified, defaults to "NHWC"
12070func DepthToSpaceDataFormat(value string) DepthToSpaceAttr {
12071	return func(m optionalAttr) {
12072		m["data_format"] = value
12073	}
12074}
12075
12076// DepthToSpace for tensors of type T.
12077//
12078// Rearranges data from depth into blocks of spatial data.
12079// This is the reverse transformation of SpaceToDepth. More specifically,
12080// this op outputs a copy of the input tensor where values from the `depth`
12081// dimension are moved in spatial blocks to the `height` and `width` dimensions.
12082// The attr `block_size` indicates the input block size and how the data is moved.
12083//
12084//   - Chunks of data of size `block_size * block_size` from depth are rearranged
12085//     into non-overlapping blocks of size `block_size x block_size`
12086//   - The width of the output tensor is `input_depth * block_size`, whereas the
12087//     height is `input_height * block_size`.
12088//   - The Y, X coordinates within each block of the output image are determined
12089//     by the high order component of the input channel index.
12090//   - The depth of the input tensor must be divisible by
12091//     `block_size * block_size`.
12092//
12093// The `data_format` attr specifies the layout of the input and output tensors
12094// with the following options:
12095//
12096//	"NHWC": `[ batch, height, width, channels ]`
12097//	"NCHW": `[ batch, channels, height, width ]`
12098//	"NCHW_VECT_C":
12099//	    `qint8 [ batch, channels / 4, height, width, 4 ]`
12100//
12101// It is useful to consider the operation as transforming a 6-D Tensor.
12102// e.g. for data_format = NHWC,
12103//
12104//	Each element in the input tensor can be specified via 6 coordinates,
12105//	ordered by decreasing memory layout significance as:
12106//	n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates
12107//	                   within the input image, bX, bY means coordinates
12108//	                   within the output block, oC means output channels).
12109//	The output would be the input transposed to the following layout:
12110//	n,iY,bY,iX,bX,oC
12111//
12112// This operation is useful for resizing the activations between convolutions
12113// (but keeping all data), e.g. instead of pooling. It is also useful for training
12114// purely convolutional models.
12115//
12116// For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
12117// block_size = 2:
12118//
12119// ```
12120// x = [[[[1, 2, 3, 4]]]]
12121//
12122// ```
12123//
12124// This operation will output a tensor of shape `[1, 2, 2, 1]`:
12125//
12126// ```
12127//
12128//	[[[[1], [2]],
12129//	  [[3], [4]]]]
12130//
12131// ```
12132//
12133// Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
12134// the corresponding output will have 2x2 elements and will have a depth of
12135// 1 channel (1 = `4 / (block_size * block_size)`).
12136// The output element shape is `[2, 2, 1]`.
12137//
12138// For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
12139//
12140// ```
12141// x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
12142// ```
12143//
12144// This operation, for block size of 2, will return the following tensor of shape
12145// `[1, 2, 2, 3]`
12146//
12147// ```
12148//
12149//	[[[[1, 2, 3], [4, 5, 6]],
12150//	  [[7, 8, 9], [10, 11, 12]]]]
12151//
12152// ```
12153//
12154// Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
12155//
12156// ```
12157// x =  [[[[1, 2, 3, 4],
12158//
12159//	 [5, 6, 7, 8]],
12160//	[[9, 10, 11, 12],
12161//	 [13, 14, 15, 16]]]]
12162//
12163// ```
12164//
12165// the operator will return the following tensor of shape `[1 4 4 1]`:
12166//
12167// ```
12168// x = [[[ [1],   [2],  [5],  [6]],
12169//
12170//	[ [3],   [4],  [7],  [8]],
12171//	[ [9],  [10], [13],  [14]],
12172//	[ [11], [12], [15],  [16]]]]
12173//
12174// ```
12175//
12176// Arguments:
12177//
12178//	block_size: The size of the spatial block, same as in Space2Depth.
12179func DepthToSpace(scope *Scope, input tf.Output, block_size int64, optional ...DepthToSpaceAttr) (output tf.Output) {
12180	if scope.Err() != nil {
12181		return
12182	}
12183	attrs := map[string]interface{}{"block_size": block_size}
12184	for _, a := range optional {
12185		a(attrs)
12186	}
12187	opspec := tf.OpSpec{
12188		Type: "DepthToSpace",
12189		Input: []tf.Input{
12190			input,
12191		},
12192		Attrs: attrs,
12193	}
12194	op := scope.AddOperation(opspec)
12195	return op.Output(0)
12196}
12197
12198// DepthwiseConv2dNativeAttr is an optional argument to DepthwiseConv2dNative.
12199type DepthwiseConv2dNativeAttr func(optionalAttr)
12200
12201// DepthwiseConv2dNativeExplicitPaddings sets the optional explicit_paddings attribute to value.
12202// If not specified, defaults to {}
12203func DepthwiseConv2dNativeExplicitPaddings(value []int64) DepthwiseConv2dNativeAttr {
12204	return func(m optionalAttr) {
12205		m["explicit_paddings"] = value
12206	}
12207}
12208
12209// DepthwiseConv2dNativeDataFormat sets the optional data_format attribute to value.
12210//
12211// value: Specify the data format of the input and output data. With the
12212// default format "NHWC", the data is stored in the order of:
12213//
12214//	[batch, height, width, channels].
12215//
12216// Alternatively, the format could be "NCHW", the data storage order of:
12217//
12218//	[batch, channels, height, width].
12219//
12220// If not specified, defaults to "NHWC"
12221func DepthwiseConv2dNativeDataFormat(value string) DepthwiseConv2dNativeAttr {
12222	return func(m optionalAttr) {
12223		m["data_format"] = value
12224	}
12225}
12226
12227// DepthwiseConv2dNativeDilations sets the optional dilations attribute to value.
12228//
12229// value: 1-D tensor of length 4.  The dilation factor for each dimension of
12230// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
12231// element on that dimension. The dimension order is determined by the value of
12232// `data_format`, see above for details. Dilations in the batch and depth
12233// dimensions must be 1.
12234// If not specified, defaults to {i:1 i:1 i:1 i:1}
12235func DepthwiseConv2dNativeDilations(value []int64) DepthwiseConv2dNativeAttr {
12236	return func(m optionalAttr) {
12237		m["dilations"] = value
12238	}
12239}
12240
12241// Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
12242//
12243// Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
12244// and a filter / kernel tensor of shape
12245// `[filter_height, filter_width, in_channels, channel_multiplier]`, containing
12246// `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
12247// a different filter to each input channel (expanding from 1 channel to
12248// `channel_multiplier` channels for each), then concatenates the results
12249// together. Thus, the output has `in_channels * channel_multiplier` channels.
12250//
12251// ```
12252// for k in 0..in_channels-1
12253//
12254//	for q in 0..channel_multiplier-1
12255//	  output[b, i, j, k * channel_multiplier + q] =
12256//	    sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
12257//	                      filter[di, dj, k, q]
12258//
12259// ```
12260//
12261// Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
12262// horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
12263//
12264// Arguments:
12265//
12266//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
12267//
12268// of `input`.
12269//
12270//	padding: The type of padding algorithm to use.
12271func DepthwiseConv2dNative(scope *Scope, input tf.Output, filter tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeAttr) (output tf.Output) {
12272	if scope.Err() != nil {
12273		return
12274	}
12275	attrs := map[string]interface{}{"strides": strides, "padding": padding}
12276	for _, a := range optional {
12277		a(attrs)
12278	}
12279	opspec := tf.OpSpec{
12280		Type: "DepthwiseConv2dNative",
12281		Input: []tf.Input{
12282			input, filter,
12283		},
12284		Attrs: attrs,
12285	}
12286	op := scope.AddOperation(opspec)
12287	return op.Output(0)
12288}
12289
12290// DepthwiseConv2dNativeBackpropFilterAttr is an optional argument to DepthwiseConv2dNativeBackpropFilter.
12291type DepthwiseConv2dNativeBackpropFilterAttr func(optionalAttr)
12292
12293// DepthwiseConv2dNativeBackpropFilterExplicitPaddings sets the optional explicit_paddings attribute to value.
12294// If not specified, defaults to {}
12295func DepthwiseConv2dNativeBackpropFilterExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
12296	return func(m optionalAttr) {
12297		m["explicit_paddings"] = value
12298	}
12299}
12300
12301// DepthwiseConv2dNativeBackpropFilterDataFormat sets the optional data_format attribute to value.
12302//
12303// value: Specify the data format of the input and output data. With the
12304// default format "NHWC", the data is stored in the order of:
12305//
12306//	[batch, height, width, channels].
12307//
12308// Alternatively, the format could be "NCHW", the data storage order of:
12309//
12310//	[batch, channels, height, width].
12311//
12312// If not specified, defaults to "NHWC"
12313func DepthwiseConv2dNativeBackpropFilterDataFormat(value string) DepthwiseConv2dNativeBackpropFilterAttr {
12314	return func(m optionalAttr) {
12315		m["data_format"] = value
12316	}
12317}
12318
12319// DepthwiseConv2dNativeBackpropFilterDilations sets the optional dilations attribute to value.
12320//
12321// value: 1-D tensor of length 4.  The dilation factor for each dimension of
12322// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
12323// element on that dimension. The dimension order is determined by the value of
12324// `data_format`, see above for details. Dilations in the batch and depth
12325// dimensions must be 1.
12326// If not specified, defaults to {i:1 i:1 i:1 i:1}
12327func DepthwiseConv2dNativeBackpropFilterDilations(value []int64) DepthwiseConv2dNativeBackpropFilterAttr {
12328	return func(m optionalAttr) {
12329		m["dilations"] = value
12330	}
12331}
12332
12333// Computes the gradients of depthwise convolution with respect to the filter.
12334//
12335// Arguments:
12336//
12337//	input: 4-D with shape based on `data_format`.  For example, if
12338//
12339// `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
12340// in_width, in_channels]` tensor.
12341//
12342//	filter_sizes: An integer vector representing the tensor shape of `filter`,
12343//
12344// where `filter` is a 4-D
12345// `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.
12346//
12347//	out_backprop: 4-D with shape  based on `data_format`.
12348//
12349// For example, if `data_format` is 'NHWC' then
12350// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
12351// Gradients w.r.t. the output of the convolution.
12352//
12353//	strides: The stride of the sliding window for each dimension of the input
12354//
12355// of the convolution.
12356//
12357//	padding: The type of padding algorithm to use.
12358//
12359// Returns 4-D with shape
12360// `[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
12361// the `filter` input of the convolution.
12362func DepthwiseConv2dNativeBackpropFilter(scope *Scope, input tf.Output, filter_sizes tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropFilterAttr) (output tf.Output) {
12363	if scope.Err() != nil {
12364		return
12365	}
12366	attrs := map[string]interface{}{"strides": strides, "padding": padding}
12367	for _, a := range optional {
12368		a(attrs)
12369	}
12370	opspec := tf.OpSpec{
12371		Type: "DepthwiseConv2dNativeBackpropFilter",
12372		Input: []tf.Input{
12373			input, filter_sizes, out_backprop,
12374		},
12375		Attrs: attrs,
12376	}
12377	op := scope.AddOperation(opspec)
12378	return op.Output(0)
12379}
12380
12381// DepthwiseConv2dNativeBackpropInputAttr is an optional argument to DepthwiseConv2dNativeBackpropInput.
12382type DepthwiseConv2dNativeBackpropInputAttr func(optionalAttr)
12383
12384// DepthwiseConv2dNativeBackpropInputExplicitPaddings sets the optional explicit_paddings attribute to value.
12385// If not specified, defaults to {}
12386func DepthwiseConv2dNativeBackpropInputExplicitPaddings(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
12387	return func(m optionalAttr) {
12388		m["explicit_paddings"] = value
12389	}
12390}
12391
12392// DepthwiseConv2dNativeBackpropInputDataFormat sets the optional data_format attribute to value.
12393//
12394// value: Specify the data format of the input and output data. With the
12395// default format "NHWC", the data is stored in the order of:
12396//
12397//	[batch, height, width, channels].
12398//
12399// Alternatively, the format could be "NCHW", the data storage order of:
12400//
12401//	[batch, channels, height, width].
12402//
12403// If not specified, defaults to "NHWC"
12404func DepthwiseConv2dNativeBackpropInputDataFormat(value string) DepthwiseConv2dNativeBackpropInputAttr {
12405	return func(m optionalAttr) {
12406		m["data_format"] = value
12407	}
12408}
12409
12410// DepthwiseConv2dNativeBackpropInputDilations sets the optional dilations attribute to value.
12411//
12412// value: 1-D tensor of length 4.  The dilation factor for each dimension of
12413// `input`. If set to k > 1, there will be k-1 skipped cells between each filter
12414// element on that dimension. The dimension order is determined by the value of
12415// `data_format`, see above for details. Dilations in the batch and depth
12416// dimensions must be 1.
12417// If not specified, defaults to {i:1 i:1 i:1 i:1}
12418func DepthwiseConv2dNativeBackpropInputDilations(value []int64) DepthwiseConv2dNativeBackpropInputAttr {
12419	return func(m optionalAttr) {
12420		m["dilations"] = value
12421	}
12422}
12423
12424// Computes the gradients of depthwise convolution with respect to the input.
12425//
12426// Arguments:
12427//
12428//	input_sizes: An integer vector representing the shape of `input`, based
12429//
12430// on `data_format`.  For example, if `data_format` is 'NHWC' then
12431//
12432//	 `input` is a 4-D `[batch, height, width, channels]` tensor.
12433//		filter: 4-D with shape
12434//
12435// `[filter_height, filter_width, in_channels, depthwise_multiplier]`.
12436//
12437//	out_backprop: 4-D with shape  based on `data_format`.
12438//
12439// For example, if `data_format` is 'NHWC' then
12440// out_backprop shape is `[batch, out_height, out_width, out_channels]`.
12441// Gradients w.r.t. the output of the convolution.
12442//
12443//	strides: The stride of the sliding window for each dimension of the input
12444//
12445// of the convolution.
12446//
12447//	padding: The type of padding algorithm to use.
12448//
12449// Returns 4-D with shape according to `data_format`.  For example, if
12450// `data_format` is 'NHWC', output shape is `[batch, in_height,
12451// in_width, in_channels]`.  Gradient w.r.t. the input of the
12452// convolution.
12453func DepthwiseConv2dNativeBackpropInput(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...DepthwiseConv2dNativeBackpropInputAttr) (output tf.Output) {
12454	if scope.Err() != nil {
12455		return
12456	}
12457	attrs := map[string]interface{}{"strides": strides, "padding": padding}
12458	for _, a := range optional {
12459		a(attrs)
12460	}
12461	opspec := tf.OpSpec{
12462		Type: "DepthwiseConv2dNativeBackpropInput",
12463		Input: []tf.Input{
12464			input_sizes, filter, out_backprop,
12465		},
12466		Attrs: attrs,
12467	}
12468	op := scope.AddOperation(opspec)
12469	return op.Output(0)
12470}
12471
12472// DequantizeAttr is an optional argument to Dequantize.
12473type DequantizeAttr func(optionalAttr)
12474
12475// DequantizeMode sets the optional mode attribute to value.
12476// If not specified, defaults to "MIN_COMBINED"
12477func DequantizeMode(value string) DequantizeAttr {
12478	return func(m optionalAttr) {
12479		m["mode"] = value
12480	}
12481}
12482
12483// DequantizeNarrowRange sets the optional narrow_range attribute to value.
12484// If not specified, defaults to false
12485func DequantizeNarrowRange(value bool) DequantizeAttr {
12486	return func(m optionalAttr) {
12487		m["narrow_range"] = value
12488	}
12489}
12490
12491// DequantizeAxis sets the optional axis attribute to value.
12492// If not specified, defaults to -1
12493func DequantizeAxis(value int64) DequantizeAttr {
12494	return func(m optionalAttr) {
12495		m["axis"] = value
12496	}
12497}
12498
12499// DequantizeDtype sets the optional dtype attribute to value.
12500//
12501// value: Type of the output tensor. Currently Dequantize supports float and bfloat16.
12502// If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode.
12503// If not specified, defaults to DT_FLOAT
12504func DequantizeDtype(value tf.DataType) DequantizeAttr {
12505	return func(m optionalAttr) {
12506		m["dtype"] = value
12507	}
12508}
12509
12510// Dequantize the 'input' tensor into a float or bfloat16 Tensor.
12511//
12512// [min_range, max_range] are scalar floats that specify the range for
12513// the output. The 'mode' attribute controls exactly which calculations are
12514// used to convert the float values to their quantized equivalents.
12515//
12516// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
12517//
12518// ```
12519// if T == qint8: in[i] += (range(T) + 1)/ 2.0
12520// out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
12521// ```
12522// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
12523//
12524// *MIN_COMBINED Mode Example*
12525//
12526// If the input comes from a QuantizedRelu6, the output type is
12527// quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
12528// 0-6.  The min_range and max_range values are therefore 0.0 and 6.0.
12529// Dequantize on quint8 will take each value, cast to float, and multiply
12530// by 6 / 255.
12531// Note that if quantizedtype is qint8, the operation will additionally add
12532// each value by 128 prior to casting.
12533//
12534// If the mode is 'MIN_FIRST', then this approach is used:
12535//
12536// ```c++
12537// num_discrete_values = 1 << (# of bits in T)
12538// range_adjust = num_discrete_values / (num_discrete_values - 1)
12539// range = (range_max - range_min) * range_adjust
12540// range_scale = range / num_discrete_values
12541// const double offset_input = static_cast<double>(input) - lowest_quantized;
12542// result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
12543// ```
12544//
12545// If the mode is `SCALED`, dequantization is performed by multiplying each
12546// input value by a scaling_factor. (Thus an input of 0 always maps to 0.0).
12547//
12548// The scaling_factor is determined from `min_range`, `max_range`, and
12549// `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}`
12550// and `QuantizeV2`, using the following algorithm:
12551//
12552// ```c++
12553//
12554//	const int min_expected_T = std::numeric_limits<T>::min() +
12555//	  (narrow_range ? 1 : 0);
12556//	const int max_expected_T = std::numeric_limits<T>::max();
12557//	const float max_expected_T = std::numeric_limits<float>::max();
12558//
12559//	const float scale_factor =
12560//	  (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T)
12561//	                                       : std::max(min_range / min_expected_T,
12562//	                                                  max_range / max_expected_T);
12563//
12564// ```
12565//
12566// Arguments:
12567//
12568//	min_range: The minimum scalar value possibly produced for the input.
12569//	max_range: The maximum scalar value possibly produced for the input.
12570func Dequantize(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, optional ...DequantizeAttr) (output tf.Output) {
12571	if scope.Err() != nil {
12572		return
12573	}
12574	attrs := map[string]interface{}{}
12575	for _, a := range optional {
12576		a(attrs)
12577	}
12578	opspec := tf.OpSpec{
12579		Type: "Dequantize",
12580		Input: []tf.Input{
12581			input, min_range, max_range,
12582		},
12583		Attrs: attrs,
12584	}
12585	op := scope.AddOperation(opspec)
12586	return op.Output(0)
12587}
12588
12589// Converts the given variant tensor to an iterator and stores it in the given resource.
12590//
12591// Arguments:
12592//
12593//	resource_handle: A handle to an iterator resource.
12594//	serialized: A variant tensor storing the state of the iterator contained in the
12595//
12596// resource.
12597//
12598// Returns the created operation.
12599func DeserializeIterator(scope *Scope, resource_handle tf.Output, serialized tf.Output) (o *tf.Operation) {
12600	if scope.Err() != nil {
12601		return
12602	}
12603	opspec := tf.OpSpec{
12604		Type: "DeserializeIterator",
12605		Input: []tf.Input{
12606			resource_handle, serialized,
12607		},
12608	}
12609	return scope.AddOperation(opspec)
12610}
12611
12612// Deserialize and concatenate `SparseTensors` from a serialized minibatch.
12613//
12614// The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
12615// `N` is the minibatch size and the rows correspond to packed outputs of
12616// `SerializeSparse`.  The ranks of the original `SparseTensor` objects
12617// must all match.  When the final `SparseTensor` is created, it has rank one
12618// higher than the ranks of the incoming `SparseTensor` objects
12619// (they have been concatenated along a new row dimension).
12620//
12621// The output `SparseTensor` object's shape values for all dimensions but the
12622// first are the max across the input `SparseTensor` objects' shape values
12623// for the corresponding dimensions.  Its first shape value is `N`, the minibatch
12624// size.
12625//
12626// The input `SparseTensor` objects' indices are assumed ordered in
12627// standard lexicographic order.  If this is not the case, after this
12628// step run `SparseReorder` to restore index ordering.
12629//
12630// For example, if the serialized input is a `[2 x 3]` matrix representing two
12631// original `SparseTensor` objects:
12632//
12633//	index = [ 0]
12634//	        [10]
12635//	        [20]
12636//	values = [1, 2, 3]
12637//	shape = [50]
12638//
12639// and
12640//
12641//	index = [ 2]
12642//	        [10]
12643//	values = [4, 5]
12644//	shape = [30]
12645//
12646// then the final deserialized `SparseTensor` will be:
12647//
12648//	index = [0  0]
12649//	        [0 10]
12650//	        [0 20]
12651//	        [1  2]
12652//	        [1 10]
12653//	values = [1, 2, 3, 4, 5]
12654//	shape = [2 50]
12655//
12656// Arguments:
12657//
12658//	serialized_sparse: 2-D, The `N` serialized `SparseTensor` objects.
12659//
12660// Must have 3 columns.
12661//
12662//	dtype: The `dtype` of the serialized `SparseTensor` objects.
12663func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
12664	if scope.Err() != nil {
12665		return
12666	}
12667	attrs := map[string]interface{}{"dtype": dtype}
12668	opspec := tf.OpSpec{
12669		Type: "DeserializeManySparse",
12670		Input: []tf.Input{
12671			serialized_sparse,
12672		},
12673		Attrs: attrs,
12674	}
12675	op := scope.AddOperation(opspec)
12676	return op.Output(0), op.Output(1), op.Output(2)
12677}
12678
12679// Deserialize `SparseTensor` objects.
12680//
12681// The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
12682// the last dimension stores serialized `SparseTensor` objects and the other N
12683// dimensions (N >= 0) correspond to a batch. The ranks of the original
12684// `SparseTensor` objects must all match. When the final `SparseTensor` is
12685// created, its rank is the rank of the incoming `SparseTensor` objects plus N;
12686// the sparse tensors have been concatenated along new dimensions, one for each
12687// batch.
12688//
12689// The output `SparseTensor` object's shape values for the original dimensions
12690// are the max across the input `SparseTensor` objects' shape values for the
12691// corresponding dimensions. The new dimensions match the size of the batch.
12692//
12693// The input `SparseTensor` objects' indices are assumed ordered in
12694// standard lexicographic order.  If this is not the case, after this
12695// step run `SparseReorder` to restore index ordering.
12696//
12697// For example, if the serialized input is a `[2 x 3]` matrix representing two
12698// original `SparseTensor` objects:
12699//
12700//	index = [ 0]
12701//	        [10]
12702//	        [20]
12703//	values = [1, 2, 3]
12704//	shape = [50]
12705//
12706// and
12707//
12708//	index = [ 2]
12709//	        [10]
12710//	values = [4, 5]
12711//	shape = [30]
12712//
12713// then the final deserialized `SparseTensor` will be:
12714//
12715//	index = [0  0]
12716//	        [0 10]
12717//	        [0 20]
12718//	        [1  2]
12719//	        [1 10]
12720//	values = [1, 2, 3, 4, 5]
12721//	shape = [2 50]
12722//
12723// Arguments:
12724//
12725//	serialized_sparse: The serialized `SparseTensor` objects. The last dimension
12726//
12727// must have 3 columns.
12728//
12729//	dtype: The `dtype` of the serialized `SparseTensor` objects.
12730func DeserializeSparse(scope *Scope, serialized_sparse tf.Output, dtype tf.DataType) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
12731	if scope.Err() != nil {
12732		return
12733	}
12734	attrs := map[string]interface{}{"dtype": dtype}
12735	opspec := tf.OpSpec{
12736		Type: "DeserializeSparse",
12737		Input: []tf.Input{
12738			serialized_sparse,
12739		},
12740		Attrs: attrs,
12741	}
12742	op := scope.AddOperation(opspec)
12743	return op.Output(0), op.Output(1), op.Output(2)
12744}
12745
12746// DestroyResourceOpAttr is an optional argument to DestroyResourceOp.
12747type DestroyResourceOpAttr func(optionalAttr)
12748
12749// DestroyResourceOpIgnoreLookupError sets the optional ignore_lookup_error attribute to value.
12750//
12751// value: whether to ignore the error when the resource
12752// doesn't exist.
12753// If not specified, defaults to true
12754func DestroyResourceOpIgnoreLookupError(value bool) DestroyResourceOpAttr {
12755	return func(m optionalAttr) {
12756		m["ignore_lookup_error"] = value
12757	}
12758}
12759
12760// Deletes the resource specified by the handle.
12761//
12762// All subsequent operations using the resource will result in a NotFound
12763// error status.
12764//
12765// Arguments:
12766//
12767//	resource: handle to the resource to delete.
12768//
12769// Returns the created operation.
12770func DestroyResourceOp(scope *Scope, resource tf.Output, optional ...DestroyResourceOpAttr) (o *tf.Operation) {
12771	if scope.Err() != nil {
12772		return
12773	}
12774	attrs := map[string]interface{}{}
12775	for _, a := range optional {
12776		a(attrs)
12777	}
12778	opspec := tf.OpSpec{
12779		Type: "DestroyResourceOp",
12780		Input: []tf.Input{
12781			resource,
12782		},
12783		Attrs: attrs,
12784	}
12785	return scope.AddOperation(opspec)
12786}
12787
12788// Return the index of device the op runs.
12789//
12790// Given a list of device names, this operation returns the index of the device
12791// this op runs. The length of the list is returned in two cases:
12792// (1) Device does not exist in the given device list.
12793// (2) It is in XLA compilation.
12794func DeviceIndex(scope *Scope, device_names []string) (index tf.Output) {
12795	if scope.Err() != nil {
12796		return
12797	}
12798	attrs := map[string]interface{}{"device_names": device_names}
12799	opspec := tf.OpSpec{
12800		Type: "DeviceIndex",
12801
12802		Attrs: attrs,
12803	}
12804	op := scope.AddOperation(opspec)
12805	return op.Output(0)
12806}
12807
12808// Returns a diagonal tensor with a given diagonal values.
12809//
12810// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
12811// everything else padded with zeros. The diagonal is computed as follows:
12812//
12813// Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
12814// rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
12815//
12816// `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
12817//
12818// For example:
12819//
12820// ```
12821// # 'diagonal' is [1, 2, 3, 4]
12822// tf.diag(diagonal) ==> [[1, 0, 0, 0]
12823//
12824//	[0, 2, 0, 0]
12825//	[0, 0, 3, 0]
12826//	[0, 0, 0, 4]]
12827//
12828// ```
12829//
12830// Arguments:
12831//
12832//	diagonal: Rank k tensor where k is at most 1.
12833func Diag(scope *Scope, diagonal tf.Output) (output tf.Output) {
12834	if scope.Err() != nil {
12835		return
12836	}
12837	opspec := tf.OpSpec{
12838		Type: "Diag",
12839		Input: []tf.Input{
12840			diagonal,
12841		},
12842	}
12843	op := scope.AddOperation(opspec)
12844	return op.Output(0)
12845}
12846
12847// Returns the diagonal part of the tensor.
12848//
12849// This operation returns a tensor with the `diagonal` part
12850// of the `input`. The `diagonal` part is computed as follows:
12851//
12852// Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
12853// tensor of rank `k` with dimensions `[D1,..., Dk]` where:
12854//
12855// `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
12856//
12857// For example:
12858//
12859// ```
12860// # 'input' is [[1, 0, 0, 0]
12861//
12862//	[0, 2, 0, 0]
12863//	[0, 0, 3, 0]
12864//	[0, 0, 0, 4]]
12865//
12866// tf.diag_part(input) ==> [1, 2, 3, 4]
12867// ```
12868//
12869// Arguments:
12870//
12871//	input: Rank k tensor where k is even and not zero.
12872//
12873// Returns The extracted diagonal.
12874func DiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
12875	if scope.Err() != nil {
12876		return
12877	}
12878	opspec := tf.OpSpec{
12879		Type: "DiagPart",
12880		Input: []tf.Input{
12881			input,
12882		},
12883	}
12884	op := scope.AddOperation(opspec)
12885	return op.Output(0)
12886}
12887
12888// Computes Psi, the derivative of Lgamma (the log of the absolute value of
12889//
12890// `Gamma(x)`), element-wise.
12891func Digamma(scope *Scope, x tf.Output) (y tf.Output) {
12892	if scope.Err() != nil {
12893		return
12894	}
12895	opspec := tf.OpSpec{
12896		Type: "Digamma",
12897		Input: []tf.Input{
12898			x,
12899		},
12900	}
12901	op := scope.AddOperation(opspec)
12902	return op.Output(0)
12903}
12904
12905// Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors.
12906//
12907// The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
12908// `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
12909// input channel is processed independently of the others with its own structuring
12910// function. The `output` tensor has shape
12911// `[batch, out_height, out_width, depth]`. The spatial dimensions of the output
12912// tensor depend on the `padding` algorithm. We currently only support the default
12913// "NHWC" `data_format`.
12914//
12915// In detail, the grayscale morphological 2-D dilation is the max-sum correlation
12916// (for consistency with `conv2d`, we use unmirrored filters):
12917//
12918//	output[b, y, x, c] =
12919//	   max_{dy, dx} input[b,
12920//	                      strides[1] * y + rates[1] * dy,
12921//	                      strides[2] * x + rates[2] * dx,
12922//	                      c] +
12923//	                filter[dy, dx, c]
12924//
12925// Max-pooling is a special case when the filter has size equal to the pooling
12926// kernel size and contains all zeros.
12927//
12928// Note on duality: The dilation of `input` by the `filter` is equal to the
12929// negation of the erosion of `-input` by the reflected `filter`.
12930//
12931// Arguments:
12932//
12933//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
12934//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
12935//	strides: The stride of the sliding window for each dimension of the input
12936//
12937// tensor. Must be: `[1, stride_height, stride_width, 1]`.
12938//
12939//	rates: The input stride for atrous morphological dilation. Must be:
12940//
12941// `[1, rate_height, rate_width, 1]`.
12942//
12943//	padding: The type of padding algorithm to use.
12944//
12945// Returns 4-D with shape `[batch, out_height, out_width, depth]`.
12946func Dilation2D(scope *Scope, input tf.Output, filter tf.Output, strides []int64, rates []int64, padding string) (output tf.Output) {
12947	if scope.Err() != nil {
12948		return
12949	}
12950	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
12951	opspec := tf.OpSpec{
12952		Type: "Dilation2D",
12953		Input: []tf.Input{
12954			input, filter,
12955		},
12956		Attrs: attrs,
12957	}
12958	op := scope.AddOperation(opspec)
12959	return op.Output(0)
12960}
12961
12962// Computes the gradient of morphological 2-D dilation with respect to the filter.
12963//
12964// Arguments:
12965//
12966//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
12967//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
12968//	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
12969//	strides: 1-D of length 4. The stride of the sliding window for each dimension of
12970//
12971// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
12972//
12973//	rates: 1-D of length 4. The input stride for atrous morphological dilation.
12974//
12975// Must be: `[1, rate_height, rate_width, 1]`.
12976//
12977//	padding: The type of padding algorithm to use.
12978//
12979// Returns 3-D with shape `[filter_height, filter_width, depth]`.
12980func Dilation2DBackpropFilter(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (filter_backprop tf.Output) {
12981	if scope.Err() != nil {
12982		return
12983	}
12984	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
12985	opspec := tf.OpSpec{
12986		Type: "Dilation2DBackpropFilter",
12987		Input: []tf.Input{
12988			input, filter, out_backprop,
12989		},
12990		Attrs: attrs,
12991	}
12992	op := scope.AddOperation(opspec)
12993	return op.Output(0)
12994}
12995
12996// Computes the gradient of morphological 2-D dilation with respect to the input.
12997//
12998// Arguments:
12999//
13000//	input: 4-D with shape `[batch, in_height, in_width, depth]`.
13001//	filter: 3-D with shape `[filter_height, filter_width, depth]`.
13002//	out_backprop: 4-D with shape `[batch, out_height, out_width, depth]`.
13003//	strides: 1-D of length 4. The stride of the sliding window for each dimension of
13004//
13005// the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
13006//
13007//	rates: 1-D of length 4. The input stride for atrous morphological dilation.
13008//
13009// Must be: `[1, rate_height, rate_width, 1]`.
13010//
13011//	padding: The type of padding algorithm to use.
13012//
13013// Returns 4-D with shape `[batch, in_height, in_width, depth]`.
13014func Dilation2DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, rates []int64, padding string) (in_backprop tf.Output) {
13015	if scope.Err() != nil {
13016		return
13017	}
13018	attrs := map[string]interface{}{"strides": strides, "rates": rates, "padding": padding}
13019	opspec := tf.OpSpec{
13020		Type: "Dilation2DBackpropInput",
13021		Input: []tf.Input{
13022			input, filter, out_backprop,
13023		},
13024		Attrs: attrs,
13025	}
13026	op := scope.AddOperation(opspec)
13027	return op.Output(0)
13028}
13029
13030// DirectedInterleaveDatasetAttr is an optional argument to DirectedInterleaveDataset.
13031type DirectedInterleaveDatasetAttr func(optionalAttr)
13032
13033// DirectedInterleaveDatasetStopOnEmptyDataset sets the optional stop_on_empty_dataset attribute to value.
13034// If not specified, defaults to false
13035func DirectedInterleaveDatasetStopOnEmptyDataset(value bool) DirectedInterleaveDatasetAttr {
13036	return func(m optionalAttr) {
13037		m["stop_on_empty_dataset"] = value
13038	}
13039}
13040
13041// A substitute for `InterleaveDataset` on a fixed list of `N` datasets.
13042//
13043// Arguments:
13044//
13045//	selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the
13046//
13047// `N` data inputs should produce the next output element.
13048//
13049//	data_input_datasets: `N` datasets with the same type that will be interleaved according to
13050//
13051// the values of `selector_input_dataset`.
13052func DirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, data_input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...DirectedInterleaveDatasetAttr) (handle tf.Output) {
13053	if scope.Err() != nil {
13054		return
13055	}
13056	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
13057	for _, a := range optional {
13058		a(attrs)
13059	}
13060	opspec := tf.OpSpec{
13061		Type: "DirectedInterleaveDataset",
13062		Input: []tf.Input{
13063			selector_input_dataset, tf.OutputList(data_input_datasets),
13064		},
13065		Attrs: attrs,
13066	}
13067	op := scope.AddOperation(opspec)
13068	return op.Output(0)
13069}
13070
13071// Turns off the copy-on-read mode.
13072//
13073// Turns off the copy-on-read mode of a resource variable. If the variable is not in copy-on-read mode, this op has no effect.
13074//
13075// Arguments:
13076//
13077//	resource: The resource handle of the resource variable.
13078//
13079// Returns the created operation.
13080func DisableCopyOnRead(scope *Scope, resource tf.Output) (o *tf.Operation) {
13081	if scope.Err() != nil {
13082		return
13083	}
13084	opspec := tf.OpSpec{
13085		Type: "DisableCopyOnRead",
13086		Input: []tf.Input{
13087			resource,
13088		},
13089	}
13090	return scope.AddOperation(opspec)
13091}
13092
13093// Returns x / y element-wise.
13094//
13095// *NOTE*: `Div` supports broadcasting. More about broadcasting
13096// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13097func Div(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13098	if scope.Err() != nil {
13099		return
13100	}
13101	opspec := tf.OpSpec{
13102		Type: "Div",
13103		Input: []tf.Input{
13104			x, y,
13105		},
13106	}
13107	op := scope.AddOperation(opspec)
13108	return op.Output(0)
13109}
13110
13111// Returns 0 if the denominator is zero.
13112//
13113// *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting
13114// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
13115func DivNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
13116	if scope.Err() != nil {
13117		return
13118	}
13119	opspec := tf.OpSpec{
13120		Type: "DivNoNan",
13121		Input: []tf.Input{
13122			x, y,
13123		},
13124	}
13125	op := scope.AddOperation(opspec)
13126	return op.Output(0)
13127}
13128
13129// Draw bounding boxes on a batch of images.
13130//
13131// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
13132// boxes specified by the locations in `boxes`. The coordinates of the each
13133// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
13134// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
13135// height of the underlying image.
13136//
13137// For example, if an image is 100 x 200 pixels (height x width) and the bounding
13138// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
13139// the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
13140//
13141// Parts of the bounding box may fall outside the image.
13142//
13143// Arguments:
13144//
13145//	images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
13146//	boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
13147//
13148// boxes.
13149//
13150// Returns 4-D with the same shape as `images`. The batch of input images with
13151// bounding boxes drawn on the images.
13152func DrawBoundingBoxes(scope *Scope, images tf.Output, boxes tf.Output) (output tf.Output) {
13153	if scope.Err() != nil {
13154		return
13155	}
13156	opspec := tf.OpSpec{
13157		Type: "DrawBoundingBoxes",
13158		Input: []tf.Input{
13159			images, boxes,
13160		},
13161	}
13162	op := scope.AddOperation(opspec)
13163	return op.Output(0)
13164}
13165
13166// Draw bounding boxes on a batch of images.
13167//
13168// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
13169// boxes specified by the locations in `boxes`. The coordinates of the each
13170// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
13171// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
13172// height of the underlying image.
13173//
13174// For example, if an image is 100 x 200 pixels (height x width) and the bounding
13175// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
13176// the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
13177//
13178// Parts of the bounding box may fall outside the image.
13179//
13180// Arguments:
13181//
13182//	images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
13183//	boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
13184//
13185// boxes.
13186//
13187//	colors: 2-D. A list of RGBA colors to cycle through for the boxes.
13188//
13189// Returns 4-D with the same shape as `images`. The batch of input images with
13190// bounding boxes drawn on the images.
13191func DrawBoundingBoxesV2(scope *Scope, images tf.Output, boxes tf.Output, colors tf.Output) (output tf.Output) {
13192	if scope.Err() != nil {
13193		return
13194	}
13195	opspec := tf.OpSpec{
13196		Type: "DrawBoundingBoxesV2",
13197		Input: []tf.Input{
13198			images, boxes, colors,
13199		},
13200	}
13201	op := scope.AddOperation(opspec)
13202	return op.Output(0)
13203}
13204
13205// DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr is an optional argument to DynamicEnqueueTPUEmbeddingArbitraryTensorBatch.
13206type DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr func(optionalAttr)
13207
13208// DynamicEnqueueTPUEmbeddingArbitraryTensorBatchCombiners sets the optional combiners attribute to value.
13209//
13210// value: A list of string scalars, one for each embedding table that specify
13211// how to normalize the embedding activations after weighted summation.
13212// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
13213// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
13214// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
13215// all tables.
13216// If not specified, defaults to {}
13217func DynamicEnqueueTPUEmbeddingArbitraryTensorBatchCombiners(value []string) DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr {
13218	return func(m optionalAttr) {
13219		m["combiners"] = value
13220	}
13221}
13222
13223// Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
13224//
13225// embedding_indices[i] and aggregation_weights[i] correspond
13226// to the ith feature.
13227//
13228// The tensors at corresponding positions in the three input lists (sample_indices,
13229// embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1
13230// with dim_size() equal to the total number of lookups into the table described by
13231// the corresponding feature.
13232//
13233// Arguments:
13234//
13235//	sample_indices_or_row_splits: A list of rank 2 Tensors specifying the training example to which the
13236//
13237// corresponding embedding_indices and aggregation_weights values belong.
13238// If the size of its first dimension is 0, we assume each embedding_indices
13239// belongs to a different sample. Both int32 and int64 are allowed and will
13240// be converted to int32 internally.
13241//
13242// Or a list of rank 1 Tensors specifying the row splits for splitting
13243// embedding_indices and aggregation_weights into rows. It corresponds to
13244// ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When
13245// enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged.
13246// the row splits is 1-D dense tensor. When empty, we assume a dense tensor is
13247// passed to the op Both int32 and int64 are allowed and will be converted to
13248// int32 internally.
13249//
13250//	embedding_indices: A list of rank 1 Tensors, indices into the embedding
13251//
13252// tables. Both int32 and int64 are allowed and will be converted to
13253// int32 internally.
13254//
13255//	aggregation_weights: A list of rank 1 Tensors containing per training
13256//
13257// example aggregation weights. Both float32 and float64 are allowed and will
13258// be converted to float32 internally.
13259//
13260//	mode_override: A string input that overrides the mode specified in the
13261//
13262// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
13263// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
13264// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
13265//
13266//	device_ordinal: The TPU device to use. Should be >= 0 and less than the number
13267//
13268// of TPU cores in the task on which the node is placed.
13269//
13270// Returns the created operation.
13271func DynamicEnqueueTPUEmbeddingArbitraryTensorBatch(scope *Scope, sample_indices_or_row_splits []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, device_ordinal tf.Output, optional ...DynamicEnqueueTPUEmbeddingArbitraryTensorBatchAttr) (o *tf.Operation) {
13272	if scope.Err() != nil {
13273		return
13274	}
13275	attrs := map[string]interface{}{}
13276	for _, a := range optional {
13277		a(attrs)
13278	}
13279	opspec := tf.OpSpec{
13280		Type: "DynamicEnqueueTPUEmbeddingArbitraryTensorBatch",
13281		Input: []tf.Input{
13282			tf.OutputList(sample_indices_or_row_splits), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override, device_ordinal,
13283		},
13284		Attrs: attrs,
13285	}
13286	return scope.AddOperation(opspec)
13287}
13288
13289// Partitions `data` into `num_partitions` tensors using indices from `partitions`.
13290//
13291// For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
13292// becomes part of `outputs[partitions[js]]`.  The slices with `partitions[js] = i`
13293// are placed in `outputs[i]` in lexicographic order of `js`, and the first
13294// dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`.
13295// In detail,
13296//
13297// ```python
13298//
13299//	outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:]
13300//
13301//	outputs[i] = pack([data[js, ...] for js if partitions[js] == i])
13302//
13303// ```
13304//
13305// `data.shape` must start with `partitions.shape`.
13306//
13307// For example:
13308//
13309// ```python
13310//
13311//	# Scalar partitions.
13312//	partitions = 1
13313//	num_partitions = 2
13314//	data = [10, 20]
13315//	outputs[0] = []  # Empty with shape [0, 2]
13316//	outputs[1] = [[10, 20]]
13317//
13318//	# Vector partitions.
13319//	partitions = [0, 0, 1, 1, 0]
13320//	num_partitions = 2
13321//	data = [10, 20, 30, 40, 50]
13322//	outputs[0] = [10, 20, 50]
13323//	outputs[1] = [30, 40]
13324//
13325// ```
13326//
13327// See `dynamic_stitch` for an example on how to merge partitions back.
13328//
13329// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13330// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicPartition.png" alt>
13331// </div>
13332//
13333// Arguments:
13334//
13335//	partitions: Any shape.  Indices in the range `[0, num_partitions)`.
13336//	num_partitions: The number of partitions to output.
13337func DynamicPartition(scope *Scope, data tf.Output, partitions tf.Output, num_partitions int64) (outputs []tf.Output) {
13338	if scope.Err() != nil {
13339		return
13340	}
13341	attrs := map[string]interface{}{"num_partitions": num_partitions}
13342	opspec := tf.OpSpec{
13343		Type: "DynamicPartition",
13344		Input: []tf.Input{
13345			data, partitions,
13346		},
13347		Attrs: attrs,
13348	}
13349	op := scope.AddOperation(opspec)
13350	if scope.Err() != nil {
13351		return
13352	}
13353	var idx int
13354	var err error
13355	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
13356		scope.UpdateErr("DynamicPartition", err)
13357		return
13358	}
13359	return outputs
13360}
13361
13362// Interleave the values from the `data` tensors into a single tensor.
13363//
13364// # Builds a merged tensor such that
13365//
13366// ```python
13367//
13368//	merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
13369//
13370// ```
13371//
13372// For example, if each `indices[m]` is scalar or vector, we have
13373//
13374// ```python
13375//
13376//	# Scalar indices:
13377//	merged[indices[m], ...] = data[m][...]
13378//
13379//	# Vector indices:
13380//	merged[indices[m][i], ...] = data[m][i, ...]
13381//
13382// ```
13383//
13384// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
13385// and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
13386// must have `data[i].shape = indices[i].shape + constant`.  In terms of this
13387// `constant`, the output shape is
13388//
13389//	merged.shape = [max(indices)] + constant
13390//
13391// Values are merged in order, so if an index appears in both `indices[m][i]` and
13392// `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
13393// merged result. If you do not need this guarantee, ParallelDynamicStitch might
13394// perform better on some devices.
13395//
13396// For example:
13397//
13398// ```python
13399//
13400//	indices[0] = 6
13401//	indices[1] = [4, 1]
13402//	indices[2] = [[5, 2], [0, 3]]
13403//	data[0] = [61, 62]
13404//	data[1] = [[41, 42], [11, 12]]
13405//	data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
13406//	merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
13407//	          [51, 52], [61, 62]]
13408//
13409// ```
13410//
13411// This method can be used to merge partitions created by `dynamic_partition`
13412// as illustrated on the following example:
13413//
13414// ```python
13415//
13416//	# Apply function (increments x_i) on elements for which a certain condition
13417//	# apply (x_i != -1 in this example).
13418//	x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
13419//	condition_mask=tf.not_equal(x,tf.constant(-1.))
13420//	partitioned_data = tf.dynamic_partition(
13421//	    x, tf.cast(condition_mask, tf.int32) , 2)
13422//	partitioned_data[1] = partitioned_data[1] + 1.0
13423//	condition_indices = tf.dynamic_partition(
13424//	    tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
13425//	x = tf.dynamic_stitch(condition_indices, partitioned_data)
13426//	# Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
13427//	# unchanged.
13428//
13429// ```
13430//
13431// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13432// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
13433// </div>
13434func DynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
13435	if scope.Err() != nil {
13436		return
13437	}
13438	opspec := tf.OpSpec{
13439		Type: "DynamicStitch",
13440		Input: []tf.Input{
13441			tf.OutputList(indices), tf.OutputList(data),
13442		},
13443	}
13444	op := scope.AddOperation(opspec)
13445	return op.Output(0)
13446}
13447
13448// EagerPyFuncAttr is an optional argument to EagerPyFunc.
13449type EagerPyFuncAttr func(optionalAttr)
13450
13451// EagerPyFuncIsAsync sets the optional is_async attribute to value.
13452// If not specified, defaults to false
13453func EagerPyFuncIsAsync(value bool) EagerPyFuncAttr {
13454	return func(m optionalAttr) {
13455		m["is_async"] = value
13456	}
13457}
13458
13459// Eagerly executes a python function to compute func(input)->output. The
13460//
13461// semantics of the input, output, and attributes are the same as those for
13462// PyFunc.
13463func EagerPyFunc(scope *Scope, input []tf.Output, token string, Tout []tf.DataType, optional ...EagerPyFuncAttr) (output []tf.Output) {
13464	if scope.Err() != nil {
13465		return
13466	}
13467	attrs := map[string]interface{}{"token": token, "Tout": Tout}
13468	for _, a := range optional {
13469		a(attrs)
13470	}
13471	opspec := tf.OpSpec{
13472		Type: "EagerPyFunc",
13473		Input: []tf.Input{
13474			tf.OutputList(input),
13475		},
13476		Attrs: attrs,
13477	}
13478	op := scope.AddOperation(opspec)
13479	if scope.Err() != nil {
13480		return
13481	}
13482	var idx int
13483	var err error
13484	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
13485		scope.UpdateErr("EagerPyFunc", err)
13486		return
13487	}
13488	return output
13489}
13490
13491// EditDistanceAttr is an optional argument to EditDistance.
13492type EditDistanceAttr func(optionalAttr)
13493
13494// EditDistanceNormalize sets the optional normalize attribute to value.
13495//
13496// value: boolean (if true, edit distances are normalized by length of truth).
13497//
13498// The output is:
13499// If not specified, defaults to true
13500func EditDistanceNormalize(value bool) EditDistanceAttr {
13501	return func(m optionalAttr) {
13502		m["normalize"] = value
13503	}
13504}
13505
13506// Computes the (possibly normalized) Levenshtein Edit Distance.
13507//
13508// The inputs are variable-length sequences provided by SparseTensors
13509//
13510//	(hypothesis_indices, hypothesis_values, hypothesis_shape)
13511//
13512// and
13513//
13514//	(truth_indices, truth_values, truth_shape).
13515//
13516// The inputs are:
13517//
13518// Arguments:
13519//
13520//	hypothesis_indices: The indices of the hypothesis list SparseTensor.
13521//
13522// This is an N x R int64 matrix.
13523//
13524//	hypothesis_values: The values of the hypothesis list SparseTensor.
13525//
13526// This is an N-length vector.
13527//
13528//	hypothesis_shape: The shape of the hypothesis list SparseTensor.
13529//
13530// This is an R-length vector.
13531//
13532//	truth_indices: The indices of the truth list SparseTensor.
13533//
13534// This is an M x R int64 matrix.
13535//
13536//	truth_values: The values of the truth list SparseTensor.
13537//
13538// This is an M-length vector.
13539//
13540//	truth_shape: truth indices, vector.
13541//
13542// Returns A dense float tensor with rank R - 1.
13543//
13544// For the example input:
13545//
13546//	// hypothesis represents a 2x1 matrix with variable-length values:
13547//	//   (0,0) = ["a"]
13548//	//   (1,0) = ["b"]
13549//	hypothesis_indices = [[0, 0, 0],
13550//	                      [1, 0, 0]]
13551//	hypothesis_values = ["a", "b"]
13552//	hypothesis_shape = [2, 1, 1]
13553//
13554//	// truth represents a 2x2 matrix with variable-length values:
13555//	//   (0,0) = []
13556//	//   (0,1) = ["a"]
13557//	//   (1,0) = ["b", "c"]
13558//	//   (1,1) = ["a"]
13559//	truth_indices = [[0, 1, 0],
13560//	                 [1, 0, 0],
13561//	                 [1, 0, 1],
13562//	                 [1, 1, 0]]
13563//	truth_values = ["a", "b", "c", "a"]
13564//	truth_shape = [2, 2, 2]
13565//	normalize = true
13566//
13567// The output will be:
13568//
13569//	// output is a 2x2 matrix with edit distances normalized by truth lengths.
13570//	output = [[inf, 1.0],  // (0,0): no truth, (0,1): no hypothesis
13571//	          [0.5, 1.0]]  // (1,0): addition, (1,1): no hypothesis
13572func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output) {
13573	if scope.Err() != nil {
13574		return
13575	}
13576	attrs := map[string]interface{}{}
13577	for _, a := range optional {
13578		a(attrs)
13579	}
13580	opspec := tf.OpSpec{
13581		Type: "EditDistance",
13582		Input: []tf.Input{
13583			hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape,
13584		},
13585		Attrs: attrs,
13586	}
13587	op := scope.AddOperation(opspec)
13588	return op.Output(0)
13589}
13590
13591// EigAttr is an optional argument to Eig.
13592type EigAttr func(optionalAttr)
13593
13594// EigComputeV sets the optional compute_v attribute to value.
13595//
13596// value: If `True` then eigenvectors will be computed and returned in `v`.
13597// Otherwise, only the eigenvalues will be computed.
13598// If not specified, defaults to true
13599func EigComputeV(value bool) EigAttr {
13600	return func(m optionalAttr) {
13601		m["compute_v"] = value
13602	}
13603}
13604
13605// Computes the eigen decomposition of one or more square matrices.
13606//
13607// Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in
13608// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
13609// are sorted in non-decreasing order.
13610//
13611// ```python
13612// # a is a tensor.
13613// # e is a tensor of eigenvalues.
13614// # v is a tensor of eigenvectors.
13615// e, v = eig(a)
13616// e = eig(a, compute_v=False)
13617// ```
13618//
13619// Arguments:
13620//
13621//	input: `Tensor` input of shape `[N, N]`.
13622//
13623// Returns:
13624//
13625//	e: Eigenvalues. Shape is `[N]`.
13626//	v: Eigenvectors. Shape is `[N, N]`.
13627func Eig(scope *Scope, input tf.Output, Tout tf.DataType, optional ...EigAttr) (e tf.Output, v tf.Output) {
13628	if scope.Err() != nil {
13629		return
13630	}
13631	attrs := map[string]interface{}{"Tout": Tout}
13632	for _, a := range optional {
13633		a(attrs)
13634	}
13635	opspec := tf.OpSpec{
13636		Type: "Eig",
13637		Input: []tf.Input{
13638			input,
13639		},
13640		Attrs: attrs,
13641	}
13642	op := scope.AddOperation(opspec)
13643	return op.Output(0), op.Output(1)
13644}
13645
13646// Tensor contraction according to Einstein summation convention.
13647//
13648// Implements generalized Tensor contraction and reduction. Each input Tensor must
13649// have a corresponding input subscript appearing in the comma-separated left-hand
13650// side of the equation. The right-hand side of the equation consists of the
13651// output subscript. The input subscripts and the output subscript should consist
13652// of zero or more named axis labels and at most one ellipsis (`...`).
13653//
13654// The named axis labels may be any single character other than those having
13655// special meaning, namely `,.->`. The behavior of this Op is undefined if it
13656// receives an ill-formatted equation; since the validation is done at
13657// graph-building time, we omit format validation checks at runtime.
13658//
13659// Note: This Op is *not* intended to be called by the user; instead users should
13660// call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.
13661//
13662// Operations are applied to the input(s) according to the following rules:
13663//
13664//	(a) Generalized Diagonals: For input dimensions corresponding to axis labels
13665//	    appearing more than once in the same input subscript, we take the
13666//	    generalized (`k`-dimensional) diagonal.
13667//	    For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the
13668//	    generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`,
13669//	    `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`.
13670//
13671//	(b) Reduction: Axes corresponding to labels appearing only in one input
13672//	    subscript but not in the output subscript are summed over prior to Tensor
13673//	    contraction.
13674//	    For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are
13675//	    the reduction axis labels.
13676//
13677//	(c) Batch Dimensions: Axes corresponding to labels appearing in each of the
13678//	    input subscripts and also in the output subscript make up the batch
13679//	    dimensions in Tensor contraction. Unnamed axis labels corresponding to
13680//	    ellipsis (`...`) also correspond to batch dimensions.
13681//	    For example, for the equation denoting batch matrix multiplication,
13682//	    `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension.
13683//
13684//	(d) Contraction: In case of binary einsum, axes corresponding to labels
13685//	    appearing in two different inputs (and not in the output) are contracted
13686//	    against each other.
13687//	    Considering the batch matrix multiplication equation again
13688//	    (`bij,bjk->bik`), the contracted axis label is `j`.
13689//
13690//	(e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis
13691//	    labels, the opposite operation of (a) is applied. For example, in the
13692//	    equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]`
13693//	    are all zeros, except for the (generalized) diagonal which is populated
13694//	    with values from the input.
13695//	    Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is
13696//	    provided to enable computing the symbolic gradient of `tf.einsum`.
13697//
13698// The output subscripts must contain only labels appearing in at least one of the
13699// input subscripts. Furthermore, all dimensions mapping to the same axis label
13700// must be equal.
13701//
13702// Any of the input and output subscripts may contain at most a single ellipsis
13703// (`...`). These ellipsis are mapped against dimensions not corresponding to any
13704// named axis label. If two inputs contain ellipsis, then they are broadcasted
13705// according to standard NumPy broadcasting
13706// [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
13707//
13708// The broadcasted dimensions are placed in the corresponding location of the
13709// ellipsis in the output subscript. If the broadcasted dimensions are non-empty
13710// and the output subscripts do not contain ellipsis, then an InvalidArgument error
13711// is raised.
13712//
13713// @compatibility(numpy)
13714// Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).
13715//
13716// Comparison with `numpy.einsum`:
13717//
13718//   - This Op only supports unary and binary forms of `numpy.einsum`.
13719//   - This Op does not support implicit form. (i.e. equations without `->`).
13720//   - This Op also supports repeated indices in the output subscript, which is not
13721//     supported by `numpy.einsum`.
13722//
13723// @end_compatibility
13724//
13725// Arguments:
13726//
13727//	inputs: List of 1 or 2 Tensors.
13728//	equation: String describing the Einstein Summation operation; in the format of np.einsum.
13729//
13730// Returns Output Tensor with shape depending upon `equation`.
13731func Einsum(scope *Scope, inputs []tf.Output, equation string) (output tf.Output) {
13732	if scope.Err() != nil {
13733		return
13734	}
13735	attrs := map[string]interface{}{"equation": equation}
13736	opspec := tf.OpSpec{
13737		Type: "Einsum",
13738		Input: []tf.Input{
13739			tf.OutputList(inputs),
13740		},
13741		Attrs: attrs,
13742	}
13743	op := scope.AddOperation(opspec)
13744	return op.Output(0)
13745}
13746
13747// Computes the exponential linear function.
13748//
13749// The ELU function is defined as:
13750//
13751//   - $ e ^ x - 1 $ if $ x < 0 $
13752//   - $ x $ if $ x >= 0 $
13753//
13754// Examples:
13755//
13756// >>> tf.nn.elu(1.0)
13757// <tf.Tensor: shape=(), dtype=float32, numpy=1.0>
13758// >>> tf.nn.elu(0.0)
13759// <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
13760// >>> tf.nn.elu(-1000.0)
13761// <tf.Tensor: shape=(), dtype=float32, numpy=-1.0>
13762//
13763// See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
13764// ](http://arxiv.org/abs/1511.07289)
13765func Elu(scope *Scope, features tf.Output) (activations tf.Output) {
13766	if scope.Err() != nil {
13767		return
13768	}
13769	opspec := tf.OpSpec{
13770		Type: "Elu",
13771		Input: []tf.Input{
13772			features,
13773		},
13774	}
13775	op := scope.AddOperation(opspec)
13776	return op.Output(0)
13777}
13778
13779// Computes gradients for the exponential linear (Elu) operation.
13780//
13781// Arguments:
13782//
13783//	gradients: The backpropagated gradients to the corresponding Elu operation.
13784//	outputs: The outputs of the corresponding Elu operation.
13785//
13786// Returns The gradients: `gradients * (outputs + 1)` if outputs < 0,
13787// `gradients` otherwise.
13788func EluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
13789	if scope.Err() != nil {
13790		return
13791	}
13792	opspec := tf.OpSpec{
13793		Type: "EluGrad",
13794		Input: []tf.Input{
13795			gradients, outputs,
13796		},
13797	}
13798	op := scope.AddOperation(opspec)
13799	return op.Output(0)
13800}
13801
13802// EmptyAttr is an optional argument to Empty.
13803type EmptyAttr func(optionalAttr)
13804
13805// EmptyInit sets the optional init attribute to value.
13806//
13807// value: If True, initialize the returned tensor with the default value of dtype.  Otherwise, the implementation is free not to initializethe tensor's content.
13808// If not specified, defaults to false
13809func EmptyInit(value bool) EmptyAttr {
13810	return func(m optionalAttr) {
13811		m["init"] = value
13812	}
13813}
13814
13815// Creates a tensor with the given shape.
13816//
13817// This operation creates a tensor of `shape` and `dtype`.
13818//
13819// Arguments:
13820//
13821//	shape: 1-D. Represents the shape of the output tensor.
13822//
13823// Returns A `Tensor` of type `T`.
13824func Empty(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...EmptyAttr) (output tf.Output) {
13825	if scope.Err() != nil {
13826		return
13827	}
13828	attrs := map[string]interface{}{"dtype": dtype}
13829	for _, a := range optional {
13830		a(attrs)
13831	}
13832	opspec := tf.OpSpec{
13833		Type: "Empty",
13834		Input: []tf.Input{
13835			shape,
13836		},
13837		Attrs: attrs,
13838	}
13839	op := scope.AddOperation(opspec)
13840	return op.Output(0)
13841}
13842
13843// Creates and returns an empty tensor list.
13844//
13845// All list elements must be tensors of dtype element_dtype and shape compatible
13846// with element_shape.
13847//
13848// handle: an empty tensor list.
13849// element_dtype: the type of elements in the list.
13850// element_shape: a shape compatible with that of elements in the list.
13851func EmptyTensorList(scope *Scope, element_shape tf.Output, max_num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output) {
13852	if scope.Err() != nil {
13853		return
13854	}
13855	attrs := map[string]interface{}{"element_dtype": element_dtype}
13856	opspec := tf.OpSpec{
13857		Type: "EmptyTensorList",
13858		Input: []tf.Input{
13859			element_shape, max_num_elements,
13860		},
13861		Attrs: attrs,
13862	}
13863	op := scope.AddOperation(opspec)
13864	return op.Output(0)
13865}
13866
13867// Creates and returns an empty tensor map.
13868//
13869// handle: an empty tensor map
13870func EmptyTensorMap(scope *Scope) (handle tf.Output) {
13871	if scope.Err() != nil {
13872		return
13873	}
13874	opspec := tf.OpSpec{
13875		Type: "EmptyTensorMap",
13876	}
13877	op := scope.AddOperation(opspec)
13878	return op.Output(0)
13879}
13880
13881// EncodeBase64Attr is an optional argument to EncodeBase64.
13882type EncodeBase64Attr func(optionalAttr)
13883
13884// EncodeBase64Pad sets the optional pad attribute to value.
13885//
13886// value: Bool whether padding is applied at the ends.
13887// If not specified, defaults to false
13888func EncodeBase64Pad(value bool) EncodeBase64Attr {
13889	return func(m optionalAttr) {
13890		m["pad"] = value
13891	}
13892}
13893
13894// Encode strings into web-safe base64 format.
13895//
13896// Refer to [this article](https://en.wikipedia.org/wiki/Base64) for more information on
13897// base64 format. Base64 strings may have padding with '=' at the
13898// end so that the encoded has length multiple of 4. See Padding section of the
13899// link above.
13900//
13901// Web-safe means that the encoder uses - and _ instead of + and /.
13902//
13903// Arguments:
13904//
13905//	input: Strings to be encoded.
13906//
13907// Returns Input strings encoded in base64.
13908func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output) {
13909	if scope.Err() != nil {
13910		return
13911	}
13912	attrs := map[string]interface{}{}
13913	for _, a := range optional {
13914		a(attrs)
13915	}
13916	opspec := tf.OpSpec{
13917		Type: "EncodeBase64",
13918		Input: []tf.Input{
13919			input,
13920		},
13921		Attrs: attrs,
13922	}
13923	op := scope.AddOperation(opspec)
13924	return op.Output(0)
13925}
13926
13927// EncodeJpegAttr is an optional argument to EncodeJpeg.
13928type EncodeJpegAttr func(optionalAttr)
13929
13930// EncodeJpegFormat sets the optional format attribute to value.
13931//
13932// value: Per pixel image format.
13933// If not specified, defaults to ""
13934func EncodeJpegFormat(value string) EncodeJpegAttr {
13935	return func(m optionalAttr) {
13936		m["format"] = value
13937	}
13938}
13939
13940// EncodeJpegQuality sets the optional quality attribute to value.
13941//
13942// value: Quality of the compression from 0 to 100 (higher is better and slower).
13943// If not specified, defaults to 95
13944func EncodeJpegQuality(value int64) EncodeJpegAttr {
13945	return func(m optionalAttr) {
13946		m["quality"] = value
13947	}
13948}
13949
13950// EncodeJpegProgressive sets the optional progressive attribute to value.
13951//
13952// value: If True, create a JPEG that loads progressively (coarse to fine).
13953// If not specified, defaults to false
13954func EncodeJpegProgressive(value bool) EncodeJpegAttr {
13955	return func(m optionalAttr) {
13956		m["progressive"] = value
13957	}
13958}
13959
13960// EncodeJpegOptimizeSize sets the optional optimize_size attribute to value.
13961//
13962// value: If True, spend CPU/RAM to reduce size with no quality change.
13963// If not specified, defaults to false
13964func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr {
13965	return func(m optionalAttr) {
13966		m["optimize_size"] = value
13967	}
13968}
13969
13970// EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value.
13971//
13972// value: See http://en.wikipedia.org/wiki/Chroma_subsampling.
13973// If not specified, defaults to true
13974func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr {
13975	return func(m optionalAttr) {
13976		m["chroma_downsampling"] = value
13977	}
13978}
13979
13980// EncodeJpegDensityUnit sets the optional density_unit attribute to value.
13981//
13982// value: Unit used to specify `x_density` and `y_density`:
13983// pixels per inch (`'in'`) or centimeter (`'cm'`).
13984// If not specified, defaults to "in"
13985func EncodeJpegDensityUnit(value string) EncodeJpegAttr {
13986	return func(m optionalAttr) {
13987		m["density_unit"] = value
13988	}
13989}
13990
13991// EncodeJpegXDensity sets the optional x_density attribute to value.
13992//
13993// value: Horizontal pixels per density unit.
13994// If not specified, defaults to 300
13995func EncodeJpegXDensity(value int64) EncodeJpegAttr {
13996	return func(m optionalAttr) {
13997		m["x_density"] = value
13998	}
13999}
14000
14001// EncodeJpegYDensity sets the optional y_density attribute to value.
14002//
14003// value: Vertical pixels per density unit.
14004// If not specified, defaults to 300
14005func EncodeJpegYDensity(value int64) EncodeJpegAttr {
14006	return func(m optionalAttr) {
14007		m["y_density"] = value
14008	}
14009}
14010
14011// EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value.
14012//
14013// value: If not empty, embed this XMP metadata in the image header.
14014// If not specified, defaults to ""
14015func EncodeJpegXmpMetadata(value string) EncodeJpegAttr {
14016	return func(m optionalAttr) {
14017		m["xmp_metadata"] = value
14018	}
14019}
14020
14021// JPEG-encode an image.
14022//
14023// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
14024//
14025// The attr `format` can be used to override the color format of the encoded
14026// output.  Values can be:
14027//
14028//   - `”`: Use a default format based on the number of channels in the image.
14029//   - `grayscale`: Output a grayscale JPEG image.  The `channels` dimension
14030//     of `image` must be 1.
14031//   - `rgb`: Output an RGB JPEG image. The `channels` dimension
14032//     of `image` must be 3.
14033//
14034// If `format` is not specified or is the empty string, a default format is picked
14035// in function of the number of channels in `image`:
14036//
14037// *   1: Output a grayscale image.
14038// *   3: Output an RGB image.
14039//
14040// Arguments:
14041//
14042//	image: 3-D with shape `[height, width, channels]`.
14043//
14044// Returns 0-D. JPEG-encoded image.
14045func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output) {
14046	if scope.Err() != nil {
14047		return
14048	}
14049	attrs := map[string]interface{}{}
14050	for _, a := range optional {
14051		a(attrs)
14052	}
14053	opspec := tf.OpSpec{
14054		Type: "EncodeJpeg",
14055		Input: []tf.Input{
14056			image,
14057		},
14058		Attrs: attrs,
14059	}
14060	op := scope.AddOperation(opspec)
14061	return op.Output(0)
14062}
14063
14064// JPEG encode input image with provided compression quality.
14065//
14066// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`.
14067// `quality` is an int32 jpeg compression quality value between 0 and 100.
14068//
14069// Arguments:
14070//
14071//	images: Images to adjust.  At least 3-D.
14072//	quality: An int quality to encode to.
14073//
14074// Returns 0-D. JPEG-encoded image.
14075func EncodeJpegVariableQuality(scope *Scope, images tf.Output, quality tf.Output) (contents tf.Output) {
14076	if scope.Err() != nil {
14077		return
14078	}
14079	opspec := tf.OpSpec{
14080		Type: "EncodeJpegVariableQuality",
14081		Input: []tf.Input{
14082			images, quality,
14083		},
14084	}
14085	op := scope.AddOperation(opspec)
14086	return op.Output(0)
14087}
14088
14089// EncodePngAttr is an optional argument to EncodePng.
14090type EncodePngAttr func(optionalAttr)
14091
14092// EncodePngCompression sets the optional compression attribute to value.
14093//
14094// value: Compression level.
14095// If not specified, defaults to -1
14096func EncodePngCompression(value int64) EncodePngAttr {
14097	return func(m optionalAttr) {
14098		m["compression"] = value
14099	}
14100}
14101
14102// PNG-encode an image.
14103//
14104// `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]`
14105// where `channels` is:
14106//
14107// *   1: for grayscale.
14108// *   2: for grayscale + alpha.
14109// *   3: for RGB.
14110// *   4: for RGBA.
14111//
14112// The ZLIB compression level, `compression`, can be -1 for the PNG-encoder
14113// default or a value from 0 to 9.  9 is the highest compression level, generating
14114// the smallest output, but is slower.
14115//
14116// Arguments:
14117//
14118//	image: 3-D with shape `[height, width, channels]`.
14119//
14120// Returns 0-D. PNG-encoded image.
14121func EncodePng(scope *Scope, image tf.Output, optional ...EncodePngAttr) (contents tf.Output) {
14122	if scope.Err() != nil {
14123		return
14124	}
14125	attrs := map[string]interface{}{}
14126	for _, a := range optional {
14127		a(attrs)
14128	}
14129	opspec := tf.OpSpec{
14130		Type: "EncodePng",
14131		Input: []tf.Input{
14132			image,
14133		},
14134		Attrs: attrs,
14135	}
14136	op := scope.AddOperation(opspec)
14137	return op.Output(0)
14138}
14139
14140// EncodeProtoAttr is an optional argument to EncodeProto.
14141type EncodeProtoAttr func(optionalAttr)
14142
14143// EncodeProtoDescriptorSource sets the optional descriptor_source attribute to value.
14144// If not specified, defaults to "local://"
14145func EncodeProtoDescriptorSource(value string) EncodeProtoAttr {
14146	return func(m optionalAttr) {
14147		m["descriptor_source"] = value
14148	}
14149}
14150
14151// The op serializes protobuf messages provided in the input tensors.
14152//
14153// The types of the tensors in `values` must match the schema for the fields
14154// specified in `field_names`. All the tensors in `values` must have a common
14155// shape prefix, *batch_shape*.
14156//
14157// The `sizes` tensor specifies repeat counts for each field.  The repeat count
14158// (last dimension) of a each tensor in `values` must be greater than or equal
14159// to corresponding repeat count in `sizes`.
14160//
14161// A `message_type` name must be provided to give context for the field names.
14162// The actual message descriptor can be looked up either in the linked-in
14163// descriptor pool or a filename provided by the caller using the
14164// `descriptor_source` attribute.
14165//
14166// For the most part, the mapping between Proto field types and TensorFlow dtypes
14167// is straightforward. However, there are a few special cases:
14168//
14169// - A proto field that contains a submessage or group can only be converted
14170// to `DT_STRING` (the serialized submessage). This is to reduce the complexity
14171// of the API. The resulting string can be used as input to another instance of
14172// the decode_proto op.
14173//
14174// - TensorFlow lacks support for unsigned integers. The ops represent uint64
14175// types as a `DT_INT64` with the same twos-complement bit pattern (the obvious
14176// way). Unsigned int32 values can be represented exactly by specifying type
14177// `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in
14178// the `output_types` attribute.
14179//
14180// The `descriptor_source` attribute selects the source of protocol
14181// descriptors to consult when looking up `message_type`. This may be:
14182//
14183// - An empty string  or "local://", in which case protocol descriptors are
14184// created for C++ (not Python) proto definitions linked to the binary.
14185//
14186// - A file, in which case protocol descriptors are created from the file,
14187// which is expected to contain a `FileDescriptorSet` serialized as a string.
14188// NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out`
14189// and `--include_imports` options to the protocol compiler `protoc`.
14190//
14191// - A "bytes://<bytes>", in which protocol descriptors are created from `<bytes>`,
14192// which is expected to be a `FileDescriptorSet` serialized as a string.
14193//
14194// Arguments:
14195//
14196//	sizes: Tensor of int32 with shape `[batch_shape, len(field_names)]`.
14197//	values: List of tensors containing values for the corresponding field.
14198//	field_names: List of strings containing proto field names.
14199//	message_type: Name of the proto message type to decode.
14200//
14201// Returns Tensor of serialized protos with shape `batch_shape`.
14202func EncodeProto(scope *Scope, sizes tf.Output, values []tf.Output, field_names []string, message_type string, optional ...EncodeProtoAttr) (bytes tf.Output) {
14203	if scope.Err() != nil {
14204		return
14205	}
14206	attrs := map[string]interface{}{"field_names": field_names, "message_type": message_type}
14207	for _, a := range optional {
14208		a(attrs)
14209	}
14210	opspec := tf.OpSpec{
14211		Type: "EncodeProto",
14212		Input: []tf.Input{
14213			sizes, tf.OutputList(values),
14214		},
14215		Attrs: attrs,
14216	}
14217	op := scope.AddOperation(opspec)
14218	return op.Output(0)
14219}
14220
14221// Encode audio data using the WAV file format.
14222//
14223// This operation will generate a string suitable to be saved out to create a .wav
14224// audio file. It will be encoded in the 16-bit PCM format. It takes in float
14225// values in the range -1.0f to 1.0f, and any outside that value will be clamped to
14226// that range.
14227//
14228// `audio` is a 2-D float Tensor of shape `[length, channels]`.
14229// `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100).
14230//
14231// Arguments:
14232//
14233//	audio: 2-D with shape `[length, channels]`.
14234//	sample_rate: Scalar containing the sample frequency.
14235//
14236// Returns 0-D. WAV-encoded file contents.
14237func EncodeWav(scope *Scope, audio tf.Output, sample_rate tf.Output) (contents tf.Output) {
14238	if scope.Err() != nil {
14239		return
14240	}
14241	opspec := tf.OpSpec{
14242		Type: "EncodeWav",
14243		Input: []tf.Input{
14244			audio, sample_rate,
14245		},
14246	}
14247	op := scope.AddOperation(opspec)
14248	return op.Output(0)
14249}
14250
14251// EnqueueTPUEmbeddingArbitraryTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingArbitraryTensorBatch.
14252type EnqueueTPUEmbeddingArbitraryTensorBatchAttr func(optionalAttr)
14253
14254// EnqueueTPUEmbeddingArbitraryTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
14255//
14256// value: The TPU device to use. Should be >= 0 and less than the number
14257// of TPU cores in the task on which the node is placed.
14258// If not specified, defaults to -1
14259func EnqueueTPUEmbeddingArbitraryTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingArbitraryTensorBatchAttr {
14260	return func(m optionalAttr) {
14261		m["device_ordinal"] = value
14262	}
14263}
14264
14265// EnqueueTPUEmbeddingArbitraryTensorBatchCombiners sets the optional combiners attribute to value.
14266//
14267// value: A list of string scalars, one for each embedding table that specify
14268// how to normalize the embedding activations after weighted summation.
14269// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
14270// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
14271// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
14272// all tables.
14273// If not specified, defaults to {}
14274func EnqueueTPUEmbeddingArbitraryTensorBatchCombiners(value []string) EnqueueTPUEmbeddingArbitraryTensorBatchAttr {
14275	return func(m optionalAttr) {
14276		m["combiners"] = value
14277	}
14278}
14279
14280// Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
14281//
14282// embedding_indices[i] and aggregation_weights[i] correspond
14283// to the ith feature.
14284//
14285// The tensors at corresponding positions in the three input lists (sample_indices,
14286// embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1
14287// with dim_size() equal to the total number of lookups into the table described by
14288// the corresponding feature.
14289//
14290// Arguments:
14291//
14292//	sample_indices_or_row_splits: A list of rank 2 Tensors specifying the training example to which the
14293//
14294// corresponding embedding_indices and aggregation_weights values belong.
14295// If the size of its first dimension is 0, we assume each embedding_indices
14296// belongs to a different sample. Both int32 and int64 are allowed and will
14297// be converted to int32 internally.
14298//
14299// Or a list of rank 1 Tensors specifying the row splits for splitting
14300// embedding_indices and aggregation_weights into rows. It corresponds to
14301// ids.row_splits in embedding_lookup(), when ids is a RaggedTensor. When
14302// enqueuing N-D ragged tensor, only the last dimension is allowed to be ragged.
14303// the row splits is 1-D dense tensor. When empty, we assume a dense tensor is
14304// passed to the op Both int32 and int64 are allowed and will be converted to
14305// int32 internally.
14306//
14307//	embedding_indices: A list of rank 1 Tensors, indices into the embedding
14308//
14309// tables. Both int32 and int64 are allowed and will be converted to
14310// int32 internally.
14311//
14312//	aggregation_weights: A list of rank 1 Tensors containing per training
14313//
14314// example aggregation weights. Both float32 and float64 are allowed and will
14315// be converted to float32 internally.
14316//
14317//	mode_override: A string input that overrides the mode specified in the
14318//
14319// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
14320// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
14321// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
14322//
14323// Returns the created operation.
14324func EnqueueTPUEmbeddingArbitraryTensorBatch(scope *Scope, sample_indices_or_row_splits []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingArbitraryTensorBatchAttr) (o *tf.Operation) {
14325	if scope.Err() != nil {
14326		return
14327	}
14328	attrs := map[string]interface{}{}
14329	for _, a := range optional {
14330		a(attrs)
14331	}
14332	opspec := tf.OpSpec{
14333		Type: "EnqueueTPUEmbeddingArbitraryTensorBatch",
14334		Input: []tf.Input{
14335			tf.OutputList(sample_indices_or_row_splits), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
14336		},
14337		Attrs: attrs,
14338	}
14339	return scope.AddOperation(opspec)
14340}
14341
14342// EnqueueTPUEmbeddingBatchAttr is an optional argument to EnqueueTPUEmbeddingBatch.
14343type EnqueueTPUEmbeddingBatchAttr func(optionalAttr)
14344
14345// EnqueueTPUEmbeddingBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
14346//
14347// value: The TPU device to use. This should be -1 when the Op
14348// is running on a TPU device, and >= 0 when the Op is running on the CPU
14349// device.
14350// If not specified, defaults to -1
14351func EnqueueTPUEmbeddingBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingBatchAttr {
14352	return func(m optionalAttr) {
14353		m["device_ordinal"] = value
14354	}
14355}
14356
14357// EnqueueTPUEmbeddingBatchCombiners sets the optional combiners attribute to value.
14358//
14359// value: A list of string scalars, one for each embedding table that specify
14360// how to normalize the embedding activations after weighted summation.
14361// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
14362// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
14363// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
14364// all tables.
14365// If not specified, defaults to {}
14366func EnqueueTPUEmbeddingBatchCombiners(value []string) EnqueueTPUEmbeddingBatchAttr {
14367	return func(m optionalAttr) {
14368		m["combiners"] = value
14369	}
14370}
14371
14372// An op that enqueues a list of input batch tensors to TPUEmbedding.
14373//
14374// An op that enqueues a list of input batch tensors to TPUEmbedding.
14375//
14376// Arguments:
14377//
14378//	batch: A list of 1D tensors, one for each embedding table, containing the
14379//
14380// batch inputs encoded as dist_belief.SparseFeatures protos. If the weight
14381// field in the SparseFeatures proto is not populated for an ID, a weight of
14382// 1.0 is assumed.
14383//
14384//	mode_override: A string input that overrides the mode specified in the
14385//
14386// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
14387// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
14388// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
14389//
14390// Returns the created operation.
14391func EnqueueTPUEmbeddingBatch(scope *Scope, batch []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingBatchAttr) (o *tf.Operation) {
14392	if scope.Err() != nil {
14393		return
14394	}
14395	attrs := map[string]interface{}{}
14396	for _, a := range optional {
14397		a(attrs)
14398	}
14399	opspec := tf.OpSpec{
14400		Type: "EnqueueTPUEmbeddingBatch",
14401		Input: []tf.Input{
14402			tf.OutputList(batch), mode_override,
14403		},
14404		Attrs: attrs,
14405	}
14406	return scope.AddOperation(opspec)
14407}
14408
14409// EnqueueTPUEmbeddingIntegerBatchAttr is an optional argument to EnqueueTPUEmbeddingIntegerBatch.
14410type EnqueueTPUEmbeddingIntegerBatchAttr func(optionalAttr)
14411
14412// EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
14413//
14414// value: The TPU device to use. Should be >= 0 and less than the number
14415// of TPU cores in the task on which the node is placed.
14416// If not specified, defaults to -1
14417func EnqueueTPUEmbeddingIntegerBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingIntegerBatchAttr {
14418	return func(m optionalAttr) {
14419		m["device_ordinal"] = value
14420	}
14421}
14422
14423// An op that enqueues a list of input batch tensors to TPUEmbedding.
14424//
14425// Arguments:
14426//
14427//	batch: A list of 1D tensors, one for each embedding table, containing the
14428//
14429// indices into the tables.
14430//
14431//	mode_override: A string input that overrides the mode specified in the
14432//
14433// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
14434// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
14435// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
14436//
14437// Returns the created operation.
14438func EnqueueTPUEmbeddingIntegerBatch(scope *Scope, batch []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingIntegerBatchAttr) (o *tf.Operation) {
14439	if scope.Err() != nil {
14440		return
14441	}
14442	attrs := map[string]interface{}{}
14443	for _, a := range optional {
14444		a(attrs)
14445	}
14446	opspec := tf.OpSpec{
14447		Type: "EnqueueTPUEmbeddingIntegerBatch",
14448		Input: []tf.Input{
14449			tf.OutputList(batch), mode_override,
14450		},
14451		Attrs: attrs,
14452	}
14453	return scope.AddOperation(opspec)
14454}
14455
14456// EnqueueTPUEmbeddingRaggedTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingRaggedTensorBatch.
14457type EnqueueTPUEmbeddingRaggedTensorBatchAttr func(optionalAttr)
14458
14459// EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
14460//
14461// value: The TPU device to use. Should be >= 0 and less than the number
14462// of TPU cores in the task on which the node is placed.
14463// If not specified, defaults to -1
14464func EnqueueTPUEmbeddingRaggedTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
14465	return func(m optionalAttr) {
14466		m["device_ordinal"] = value
14467	}
14468}
14469
14470// EnqueueTPUEmbeddingRaggedTensorBatchCombiners sets the optional combiners attribute to value.
14471//
14472// value: A list of string scalars, one for each embedding table that specify
14473// how to normalize the embedding activations after weighted summation.
14474// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
14475// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
14476// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
14477// all tables.
14478// If not specified, defaults to {}
14479func EnqueueTPUEmbeddingRaggedTensorBatchCombiners(value []string) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
14480	return func(m optionalAttr) {
14481		m["combiners"] = value
14482	}
14483}
14484
14485// EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths sets the optional max_sequence_lengths attribute to value.
14486// If not specified, defaults to {}
14487func EnqueueTPUEmbeddingRaggedTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
14488	return func(m optionalAttr) {
14489		m["max_sequence_lengths"] = value
14490	}
14491}
14492
14493// EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures sets the optional num_features attribute to value.
14494// If not specified, defaults to {}
14495func EnqueueTPUEmbeddingRaggedTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingRaggedTensorBatchAttr {
14496	return func(m optionalAttr) {
14497		m["num_features"] = value
14498	}
14499}
14500
14501// Eases the porting of code that uses tf.nn.embedding_lookup().
14502//
14503// sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond
14504// to the ith feature. table_ids[i] indicates which embedding table to look up ith
14505// feature.
14506//
14507// The tensors at corresponding positions in two of the input lists,
14508// embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1
14509// with dim_size() equal to the total number of lookups into the table described by
14510// the corresponding feature.
14511//
14512// Arguments:
14513//
14514//	sample_splits: A list of rank 1 Tensors specifying the break points for splitting
14515//
14516// embedding_indices and aggregation_weights into rows.
14517// It corresponds to ids.row_splits in embedding_lookup(), when ids is a
14518// RaggedTensor.
14519//
14520//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
14521//
14522// It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.
14523//
14524//	aggregation_weights: A list of rank 1 Tensors containing per training example
14525//
14526// aggregation weights. It corresponds to the values field of a RaggedTensor
14527// with the same row_splits as ids in embedding_lookup(), when ids is a
14528// RaggedTensor.
14529//
14530//	mode_override: A string input that overrides the mode specified in the
14531//
14532// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
14533// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
14534// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
14535//
14536//	table_ids: A list of integers specifying the identifier of the embedding table
14537//
14538// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the
14539// corresponding input. The ith input is looked up using table_ids[i]. The size
14540// of the table_ids list must be equal to that of sample_indices,
14541// embedding_indices and aggregation_weights.
14542//
14543// Returns the created operation.
14544func EnqueueTPUEmbeddingRaggedTensorBatch(scope *Scope, sample_splits []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, table_ids []int64, optional ...EnqueueTPUEmbeddingRaggedTensorBatchAttr) (o *tf.Operation) {
14545	if scope.Err() != nil {
14546		return
14547	}
14548	attrs := map[string]interface{}{"table_ids": table_ids}
14549	for _, a := range optional {
14550		a(attrs)
14551	}
14552	opspec := tf.OpSpec{
14553		Type: "EnqueueTPUEmbeddingRaggedTensorBatch",
14554		Input: []tf.Input{
14555			tf.OutputList(sample_splits), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
14556		},
14557		Attrs: attrs,
14558	}
14559	return scope.AddOperation(opspec)
14560}
14561
14562// EnqueueTPUEmbeddingSparseBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseBatch.
14563type EnqueueTPUEmbeddingSparseBatchAttr func(optionalAttr)
14564
14565// EnqueueTPUEmbeddingSparseBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
14566//
14567// value: The TPU device to use. Should be >= 0 and less than the number
14568// of TPU cores in the task on which the node is placed.
14569// If not specified, defaults to -1
14570func EnqueueTPUEmbeddingSparseBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseBatchAttr {
14571	return func(m optionalAttr) {
14572		m["device_ordinal"] = value
14573	}
14574}
14575
14576// EnqueueTPUEmbeddingSparseBatchCombiners sets the optional combiners attribute to value.
14577//
14578// value: A list of string scalars, one for each embedding table that specify
14579// how to normalize the embedding activations after weighted summation.
14580// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
14581// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
14582// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
14583// all tables.
14584// If not specified, defaults to {}
14585func EnqueueTPUEmbeddingSparseBatchCombiners(value []string) EnqueueTPUEmbeddingSparseBatchAttr {
14586	return func(m optionalAttr) {
14587		m["combiners"] = value
14588	}
14589}
14590
14591// An op that enqueues TPUEmbedding input indices from a SparseTensor.
14592//
14593// This Op eases the porting of code that uses embedding_lookup_sparse(),
14594// although some Python preprocessing of the SparseTensor arguments to
14595// embedding_lookup_sparse() is required to produce the arguments to this Op,
14596// since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training
14597// step.
14598//
14599// The tensors at corresponding positions in the three input lists
14600// must have the same shape, i.e. rank 1 with dim_size() equal to the total
14601// number of lookups into the table described by the corresponding table_id.
14602//
14603// Arguments:
14604//
14605//	sample_indices: A list of rank 1 Tensors specifying the training example and
14606//
14607// feature to which the corresponding embedding_indices and aggregation_weights
14608// values belong. sample_indices[i] must equal b * nf + f, where nf is the
14609// number of features from the corresponding table, f is in [0, nf), and
14610// b is in [0, batch size).
14611//
14612//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
14613//	aggregation_weights: A list of rank 1 Tensors containing per sample -- i.e. per
14614//
14615// (training example, feature) -- aggregation weights.
14616//
14617//	mode_override: A string input that overrides the mode specified in the
14618//
14619// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
14620// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
14621// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
14622//
14623// Returns the created operation.
14624func EnqueueTPUEmbeddingSparseBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, optional ...EnqueueTPUEmbeddingSparseBatchAttr) (o *tf.Operation) {
14625	if scope.Err() != nil {
14626		return
14627	}
14628	attrs := map[string]interface{}{}
14629	for _, a := range optional {
14630		a(attrs)
14631	}
14632	opspec := tf.OpSpec{
14633		Type: "EnqueueTPUEmbeddingSparseBatch",
14634		Input: []tf.Input{
14635			tf.OutputList(sample_indices), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
14636		},
14637		Attrs: attrs,
14638	}
14639	return scope.AddOperation(opspec)
14640}
14641
14642// EnqueueTPUEmbeddingSparseTensorBatchAttr is an optional argument to EnqueueTPUEmbeddingSparseTensorBatch.
14643type EnqueueTPUEmbeddingSparseTensorBatchAttr func(optionalAttr)
14644
14645// EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal sets the optional device_ordinal attribute to value.
14646//
14647// value: The TPU device to use. Should be >= 0 and less than the number
14648// of TPU cores in the task on which the node is placed.
14649// If not specified, defaults to -1
14650func EnqueueTPUEmbeddingSparseTensorBatchDeviceOrdinal(value int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
14651	return func(m optionalAttr) {
14652		m["device_ordinal"] = value
14653	}
14654}
14655
14656// EnqueueTPUEmbeddingSparseTensorBatchCombiners sets the optional combiners attribute to value.
14657//
14658// value: A list of string scalars, one for each embedding table that specify
14659// how to normalize the embedding activations after weighted summation.
14660// Supported combiners are 'mean', 'sum', or 'sqrtn'. It is invalid to have
14661// the sum of the weights be 0 for 'mean' or the sum of the squared weights be
14662// 0 for 'sqrtn'. If combiners isn't passed, the default is to use 'sum' for
14663// all tables.
14664// If not specified, defaults to {}
14665func EnqueueTPUEmbeddingSparseTensorBatchCombiners(value []string) EnqueueTPUEmbeddingSparseTensorBatchAttr {
14666	return func(m optionalAttr) {
14667		m["combiners"] = value
14668	}
14669}
14670
14671// EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths sets the optional max_sequence_lengths attribute to value.
14672// If not specified, defaults to {}
14673func EnqueueTPUEmbeddingSparseTensorBatchMaxSequenceLengths(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
14674	return func(m optionalAttr) {
14675		m["max_sequence_lengths"] = value
14676	}
14677}
14678
14679// EnqueueTPUEmbeddingSparseTensorBatchNumFeatures sets the optional num_features attribute to value.
14680// If not specified, defaults to {}
14681func EnqueueTPUEmbeddingSparseTensorBatchNumFeatures(value []int64) EnqueueTPUEmbeddingSparseTensorBatchAttr {
14682	return func(m optionalAttr) {
14683		m["num_features"] = value
14684	}
14685}
14686
14687// Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
14688//
14689// sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond
14690// to the ith feature. table_ids[i] indicates which embedding table to look up ith
14691// feature.
14692//
14693// The tensors at corresponding positions in the three input lists (sample_indices,
14694// embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1
14695// with dim_size() equal to the total number of lookups into the table described by
14696// the corresponding feature.
14697//
14698// Arguments:
14699//
14700//	sample_indices: A list of rank 1 Tensors specifying the training example to
14701//
14702// which the corresponding embedding_indices and aggregation_weights values
14703// belong. It corresponds to sp_ids.indices[:,0] in  embedding_lookup_sparse().
14704//
14705//	embedding_indices: A list of rank 1 Tensors, indices into the embedding tables.
14706//
14707// It corresponds to sp_ids.values in embedding_lookup_sparse().
14708//
14709//	aggregation_weights: A list of rank 1 Tensors containing per training example
14710//
14711// aggregation weights. It corresponds to sp_weights.values in
14712// embedding_lookup_sparse().
14713//
14714//	mode_override: A string input that overrides the mode specified in the
14715//
14716// TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
14717// 'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
14718// in TPUEmbeddingConfiguration is used, otherwise mode_override is used.
14719//
14720//	table_ids: A list of integers specifying the identifier of the embedding table
14721//
14722// (offset of TableDescriptor in the TPUEmbeddingConfiguration) to lookup the
14723// corresponding input. The ith input is looked up using table_ids[i]. The size
14724// of the table_ids list must be equal to that of sample_indices,
14725// embedding_indices and aggregation_weights.
14726//
14727// Returns the created operation.
14728func EnqueueTPUEmbeddingSparseTensorBatch(scope *Scope, sample_indices []tf.Output, embedding_indices []tf.Output, aggregation_weights []tf.Output, mode_override tf.Output, table_ids []int64, optional ...EnqueueTPUEmbeddingSparseTensorBatchAttr) (o *tf.Operation) {
14729	if scope.Err() != nil {
14730		return
14731	}
14732	attrs := map[string]interface{}{"table_ids": table_ids}
14733	for _, a := range optional {
14734		a(attrs)
14735	}
14736	opspec := tf.OpSpec{
14737		Type: "EnqueueTPUEmbeddingSparseTensorBatch",
14738		Input: []tf.Input{
14739			tf.OutputList(sample_indices), tf.OutputList(embedding_indices), tf.OutputList(aggregation_weights), mode_override,
14740		},
14741		Attrs: attrs,
14742	}
14743	return scope.AddOperation(opspec)
14744}
14745
14746// Ensures that the tensor's shape matches the expected shape.
14747//
14748// Raises an error if the input tensor's shape does not match the specified shape.
14749// Returns the input tensor otherwise.
14750//
14751// Arguments:
14752//
14753//	input: A tensor, whose shape is to be validated.
14754//	shape: The expected (possibly partially specified) shape of the input tensor.
14755//
14756// Returns A tensor with the same shape and contents as the input tensor or value.
14757func EnsureShape(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
14758	if scope.Err() != nil {
14759		return
14760	}
14761	attrs := map[string]interface{}{"shape": shape}
14762	opspec := tf.OpSpec{
14763		Type: "EnsureShape",
14764		Input: []tf.Input{
14765			input,
14766		},
14767		Attrs: attrs,
14768	}
14769	op := scope.AddOperation(opspec)
14770	return op.Output(0)
14771}
14772
14773// EnterAttr is an optional argument to Enter.
14774type EnterAttr func(optionalAttr)
14775
14776// EnterIsConstant sets the optional is_constant attribute to value.
14777//
14778// value: If true, the output is constant within the child frame.
14779// If not specified, defaults to false
14780func EnterIsConstant(value bool) EnterAttr {
14781	return func(m optionalAttr) {
14782		m["is_constant"] = value
14783	}
14784}
14785
14786// EnterParallelIterations sets the optional parallel_iterations attribute to value.
14787//
14788// value: The number of iterations allowed to run in parallel.
14789// If not specified, defaults to 10
14790func EnterParallelIterations(value int64) EnterAttr {
14791	return func(m optionalAttr) {
14792		m["parallel_iterations"] = value
14793	}
14794}
14795
14796// Creates or finds a child frame, and makes `data` available to the child frame.
14797//
14798// This op is used together with `Exit` to create loops in the graph.
14799// The unique `frame_name` is used by the `Executor` to identify frames. If
14800// `is_constant` is true, `output` is a constant in the child frame; otherwise
14801// it may be changed in the child frame. At most `parallel_iterations` iterations
14802// are run in parallel in the child frame.
14803//
14804// Arguments:
14805//
14806//	data: The tensor to be made available to the child frame.
14807//	frame_name: The name of the child frame.
14808//
14809// Returns The same tensor as `data`.
14810func Enter(scope *Scope, data tf.Output, frame_name string, optional ...EnterAttr) (output tf.Output) {
14811	if scope.Err() != nil {
14812		return
14813	}
14814	attrs := map[string]interface{}{"frame_name": frame_name}
14815	for _, a := range optional {
14816		a(attrs)
14817	}
14818	opspec := tf.OpSpec{
14819		Type: "Enter",
14820		Input: []tf.Input{
14821			data,
14822		},
14823		Attrs: attrs,
14824	}
14825	op := scope.AddOperation(opspec)
14826	return op.Output(0)
14827}
14828
14829// EqualAttr is an optional argument to Equal.
14830type EqualAttr func(optionalAttr)
14831
14832// EqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value.
14833// If not specified, defaults to true
14834func EqualIncompatibleShapeError(value bool) EqualAttr {
14835	return func(m optionalAttr) {
14836		m["incompatible_shape_error"] = value
14837	}
14838}
14839
14840// Returns the truth value of (x == y) element-wise.
14841//
14842// *NOTE*: `Equal` supports broadcasting. More about broadcasting
14843// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
14844//
14845// ```python
14846// x = tf.constant([2, 4])
14847// y = tf.constant(2)
14848// tf.math.equal(x, y) ==> array([True, False])
14849//
14850// x = tf.constant([2, 4])
14851// y = tf.constant([2, 4])
14852// tf.math.equal(x, y) ==> array([True,  True])
14853// ```
14854func Equal(scope *Scope, x tf.Output, y tf.Output, optional ...EqualAttr) (z tf.Output) {
14855	if scope.Err() != nil {
14856		return
14857	}
14858	attrs := map[string]interface{}{}
14859	for _, a := range optional {
14860		a(attrs)
14861	}
14862	opspec := tf.OpSpec{
14863		Type: "Equal",
14864		Input: []tf.Input{
14865			x, y,
14866		},
14867		Attrs: attrs,
14868	}
14869	op := scope.AddOperation(opspec)
14870	return op.Output(0)
14871}
14872
14873// Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$.
14874func Erf(scope *Scope, x tf.Output) (y tf.Output) {
14875	if scope.Err() != nil {
14876		return
14877	}
14878	opspec := tf.OpSpec{
14879		Type: "Erf",
14880		Input: []tf.Input{
14881			x,
14882		},
14883	}
14884	op := scope.AddOperation(opspec)
14885	return op.Output(0)
14886}
14887
14888// Computes the complementary error function of `x` element-wise.
14889func Erfc(scope *Scope, x tf.Output) (y tf.Output) {
14890	if scope.Err() != nil {
14891		return
14892	}
14893	opspec := tf.OpSpec{
14894		Type: "Erfc",
14895		Input: []tf.Input{
14896			x,
14897		},
14898	}
14899	op := scope.AddOperation(opspec)
14900	return op.Output(0)
14901}
14902
14903// EuclideanNormAttr is an optional argument to EuclideanNorm.
14904type EuclideanNormAttr func(optionalAttr)
14905
14906// EuclideanNormKeepDims sets the optional keep_dims attribute to value.
14907//
14908// value: If true, retain reduced dimensions with length 1.
14909// If not specified, defaults to false
14910func EuclideanNormKeepDims(value bool) EuclideanNormAttr {
14911	return func(m optionalAttr) {
14912		m["keep_dims"] = value
14913	}
14914}
14915
14916// Computes the euclidean norm of elements across dimensions of a tensor.
14917//
14918// Reduces `input` along the dimensions given in `axis`. Unless
14919// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
14920// `axis`. If `keep_dims` is true, the reduced dimensions are
14921// retained with length 1.
14922//
14923// Arguments:
14924//
14925//	input: The tensor to reduce.
14926//	axis: The dimensions to reduce. Must be in the range
14927//
14928// `[-rank(input), rank(input))`.
14929//
14930// Returns The reduced tensor.
14931func EuclideanNorm(scope *Scope, input tf.Output, axis tf.Output, optional ...EuclideanNormAttr) (output tf.Output) {
14932	if scope.Err() != nil {
14933		return
14934	}
14935	attrs := map[string]interface{}{}
14936	for _, a := range optional {
14937		a(attrs)
14938	}
14939	opspec := tf.OpSpec{
14940		Type: "EuclideanNorm",
14941		Input: []tf.Input{
14942			input, axis,
14943		},
14944		Attrs: attrs,
14945	}
14946	op := scope.AddOperation(opspec)
14947	return op.Output(0)
14948}
14949
14950// An op that executes the TPUEmbedding partitioner on the central configuration
14951//
14952// device and computes the HBM size (in bytes) required for TPUEmbedding operation.
14953//
14954// Arguments:
14955//
14956//	config: An TPUEmbeddingConfiguration proto serialized to a string,
14957//
14958// describing the desired TPUEmbedding configuration.
14959//
14960// Returns A string-encoded common configuration proto
14961// containing metadata about the TPUEmbedding partitioner output and
14962// the HBM size (in bytes) required for operation.
14963func ExecuteTPUEmbeddingPartitioner(scope *Scope, config string) (common_config tf.Output) {
14964	if scope.Err() != nil {
14965		return
14966	}
14967	attrs := map[string]interface{}{"config": config}
14968	opspec := tf.OpSpec{
14969		Type: "ExecuteTPUEmbeddingPartitioner",
14970
14971		Attrs: attrs,
14972	}
14973	op := scope.AddOperation(opspec)
14974	return op.Output(0)
14975}
14976
14977// Exits the current frame to its parent frame.
14978//
14979// Exit makes its input `data` available to the parent frame.
14980//
14981// Arguments:
14982//
14983//	data: The tensor to be made available to the parent frame.
14984//
14985// Returns The same tensor as `data`.
14986func Exit(scope *Scope, data tf.Output) (output tf.Output) {
14987	if scope.Err() != nil {
14988		return
14989	}
14990	opspec := tf.OpSpec{
14991		Type: "Exit",
14992		Input: []tf.Input{
14993			data,
14994		},
14995	}
14996	op := scope.AddOperation(opspec)
14997	return op.Output(0)
14998}
14999
15000// Computes exponential of x element-wise.  \\(y = e^x\\).
15001//
15002//	This function computes the exponential of every element in the input tensor.
15003//	i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor.
15004//	`e` denotes Euler's number and is approximately equal to 2.718281.
15005//	Output is positive for any real input.
15006//
15007//	```python
15008//	x = tf.constant(2.0)
15009//	tf.math.exp(x) ==> 7.389056
15010//
15011//	x = tf.constant([2.0, 8.0])
15012//	tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)
15013//	```
15014//
15015//	For complex numbers, the exponential value is calculated as follows:
15016//
15017//	```
15018//	e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)
15019//	```
15020//
15021//	Let's consider complex number 1+1j as an example.
15022//	e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)
15023//
15024//	```python
15025//	x = tf.constant(1 + 1j)
15026//	tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j
15027//	```
15028func Exp(scope *Scope, x tf.Output) (y tf.Output) {
15029	if scope.Err() != nil {
15030		return
15031	}
15032	opspec := tf.OpSpec{
15033		Type: "Exp",
15034		Input: []tf.Input{
15035			x,
15036		},
15037	}
15038	op := scope.AddOperation(opspec)
15039	return op.Output(0)
15040}
15041
15042// Inserts a dimension of 1 into a tensor's shape.
15043//
15044// Given a tensor `input`, this operation inserts a dimension of 1 at the
15045// dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
15046// zero; if you specify a negative number for `axis` it is counted backward from
15047// the end.
15048//
15049// This operation is useful if you want to add a batch dimension to a single
15050// element. For example, if you have a single image of shape `[height, width,
15051// channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
15052// which will make the shape `[1, height, width, channels]`.
15053//
15054// Other examples:
15055//
15056// ```
15057// # 't' is a tensor of shape [2]
15058// shape(expand_dims(t, 0)) ==> [1, 2]
15059// shape(expand_dims(t, 1)) ==> [2, 1]
15060// shape(expand_dims(t, -1)) ==> [2, 1]
15061//
15062// # 't2' is a tensor of shape [2, 3, 5]
15063// shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
15064// shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
15065// shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
15066// ```
15067//
15068// This operation requires that:
15069//
15070// `-1-input.dims() <= dim <= input.dims()`
15071//
15072// This operation is related to `squeeze()`, which removes dimensions of
15073// size 1.
15074//
15075// Arguments:
15076//
15077//	axis: 0-D (scalar). Specifies the dimension index at which to
15078//
15079// expand the shape of `input`. Must be in the range
15080// `[-rank(input) - 1, rank(input)]`.
15081//
15082// Returns Contains the same data as `input`, but its shape has an additional
15083// dimension of size 1 added.
15084func ExpandDims(scope *Scope, input tf.Output, axis tf.Output) (output tf.Output) {
15085	if scope.Err() != nil {
15086		return
15087	}
15088	opspec := tf.OpSpec{
15089		Type: "ExpandDims",
15090		Input: []tf.Input{
15091			input, axis,
15092		},
15093	}
15094	op := scope.AddOperation(opspec)
15095	return op.Output(0)
15096}
15097
15098// ExperimentalAutoShardDatasetAttr is an optional argument to ExperimentalAutoShardDataset.
15099type ExperimentalAutoShardDatasetAttr func(optionalAttr)
15100
15101// ExperimentalAutoShardDatasetAutoShardPolicy sets the optional auto_shard_policy attribute to value.
15102// If not specified, defaults to 0
15103func ExperimentalAutoShardDatasetAutoShardPolicy(value int64) ExperimentalAutoShardDatasetAttr {
15104	return func(m optionalAttr) {
15105		m["auto_shard_policy"] = value
15106	}
15107}
15108
15109// Creates a dataset that shards the input dataset.
15110//
15111// Creates a dataset that shards the input dataset by num_workers, returning a
15112// sharded dataset for the index-th worker. This attempts to automatically shard
15113// a dataset by examining the Dataset graph and inserting a shard op before the
15114// inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset).
15115//
15116// This dataset will throw a NotFound error if we cannot shard the dataset
15117// automatically.
15118//
15119// Arguments:
15120//
15121//	input_dataset: A variant tensor representing the input dataset.
15122//	num_workers: A scalar representing the number of workers to distribute this dataset across.
15123//	index: A scalar representing the index of the current worker out of num_workers.
15124func ExperimentalAutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalAutoShardDatasetAttr) (handle tf.Output) {
15125	if scope.Err() != nil {
15126		return
15127	}
15128	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15129	for _, a := range optional {
15130		a(attrs)
15131	}
15132	opspec := tf.OpSpec{
15133		Type: "ExperimentalAutoShardDataset",
15134		Input: []tf.Input{
15135			input_dataset, num_workers, index,
15136		},
15137		Attrs: attrs,
15138	}
15139	op := scope.AddOperation(opspec)
15140	return op.Output(0)
15141}
15142
15143// Records the bytes size of each element of `input_dataset` in a StatsAggregator.
15144func ExperimentalBytesProducedStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15145	if scope.Err() != nil {
15146		return
15147	}
15148	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15149	opspec := tf.OpSpec{
15150		Type: "ExperimentalBytesProducedStatsDataset",
15151		Input: []tf.Input{
15152			input_dataset, tag,
15153		},
15154		Attrs: attrs,
15155	}
15156	op := scope.AddOperation(opspec)
15157	return op.Output(0)
15158}
15159
15160// Returns the cardinality of `input_dataset`.
15161//
15162// Returns the cardinality of `input_dataset`.
15163//
15164// Arguments:
15165//
15166//	input_dataset: A variant tensor representing the dataset to return cardinality for.
15167//
15168// Returns The cardinality of `input_dataset`. Named constants are used to represent
15169// infinite and unknown cardinality.
15170func ExperimentalDatasetCardinality(scope *Scope, input_dataset tf.Output) (cardinality tf.Output) {
15171	if scope.Err() != nil {
15172		return
15173	}
15174	opspec := tf.OpSpec{
15175		Type: "ExperimentalDatasetCardinality",
15176		Input: []tf.Input{
15177			input_dataset,
15178		},
15179	}
15180	op := scope.AddOperation(opspec)
15181	return op.Output(0)
15182}
15183
15184// Writes the given dataset to the given file using the TFRecord format.
15185//
15186// Arguments:
15187//
15188//	input_dataset: A variant tensor representing the dataset to write.
15189//	filename: A scalar string tensor representing the filename to use.
15190//	compression_type: A scalar string tensor containing either (i) the empty string (no
15191//
15192// compression), (ii) "ZLIB", or (iii) "GZIP".
15193//
15194// Returns the created operation.
15195func ExperimentalDatasetToTFRecord(scope *Scope, input_dataset tf.Output, filename tf.Output, compression_type tf.Output) (o *tf.Operation) {
15196	if scope.Err() != nil {
15197		return
15198	}
15199	opspec := tf.OpSpec{
15200		Type: "ExperimentalDatasetToTFRecord",
15201		Input: []tf.Input{
15202			input_dataset, filename, compression_type,
15203		},
15204	}
15205	return scope.AddOperation(opspec)
15206}
15207
15208// Creates a dataset that batches input elements into a SparseTensor.
15209//
15210// Arguments:
15211//
15212//	input_dataset: A handle to an input dataset. Must have a single component.
15213//	batch_size: A scalar representing the number of elements to accumulate in a
15214//
15215// batch.
15216//
15217//	row_shape: A vector representing the dense shape of each row in the produced
15218//
15219// SparseTensor. The shape may be partially specified, using `-1` to indicate
15220// that a particular dimension should use the maximum size of all batch elements.
15221func ExperimentalDenseToSparseBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, row_shape tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15222	if scope.Err() != nil {
15223		return
15224	}
15225	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15226	opspec := tf.OpSpec{
15227		Type: "ExperimentalDenseToSparseBatchDataset",
15228		Input: []tf.Input{
15229			input_dataset, batch_size, row_shape,
15230		},
15231		Attrs: attrs,
15232	}
15233	op := scope.AddOperation(opspec)
15234	return op.Output(0)
15235}
15236
15237// A substitute for `InterleaveDataset` on a fixed list of `N` datasets.
15238//
15239// Arguments:
15240//
15241//	selector_input_dataset: A dataset of scalar `DT_INT64` elements that determines which of the
15242//
15243// `N` data inputs should produce the next output element.
15244//
15245//	data_input_datasets: `N` datasets with the same type that will be interleaved according to
15246//
15247// the values of `selector_input_dataset`.
15248func ExperimentalDirectedInterleaveDataset(scope *Scope, selector_input_dataset tf.Output, data_input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15249	if scope.Err() != nil {
15250		return
15251	}
15252	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15253	opspec := tf.OpSpec{
15254		Type: "ExperimentalDirectedInterleaveDataset",
15255		Input: []tf.Input{
15256			selector_input_dataset, tf.OutputList(data_input_datasets),
15257		},
15258		Attrs: attrs,
15259	}
15260	op := scope.AddOperation(opspec)
15261	return op.Output(0)
15262}
15263
15264// ExperimentalIgnoreErrorsDatasetAttr is an optional argument to ExperimentalIgnoreErrorsDataset.
15265type ExperimentalIgnoreErrorsDatasetAttr func(optionalAttr)
15266
15267// ExperimentalIgnoreErrorsDatasetLogWarning sets the optional log_warning attribute to value.
15268// If not specified, defaults to false
15269func ExperimentalIgnoreErrorsDatasetLogWarning(value bool) ExperimentalIgnoreErrorsDatasetAttr {
15270	return func(m optionalAttr) {
15271		m["log_warning"] = value
15272	}
15273}
15274
15275// Creates a dataset that contains the elements of `input_dataset` ignoring errors.
15276func ExperimentalIgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalIgnoreErrorsDatasetAttr) (handle tf.Output) {
15277	if scope.Err() != nil {
15278		return
15279	}
15280	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15281	for _, a := range optional {
15282		a(attrs)
15283	}
15284	opspec := tf.OpSpec{
15285		Type: "ExperimentalIgnoreErrorsDataset",
15286		Input: []tf.Input{
15287			input_dataset,
15288		},
15289		Attrs: attrs,
15290	}
15291	op := scope.AddOperation(opspec)
15292	return op.Output(0)
15293}
15294
15295// Returns the name of the device on which `resource` has been placed.
15296func ExperimentalIteratorGetDevice(scope *Scope, resource tf.Output) (device tf.Output) {
15297	if scope.Err() != nil {
15298		return
15299	}
15300	opspec := tf.OpSpec{
15301		Type: "ExperimentalIteratorGetDevice",
15302		Input: []tf.Input{
15303			resource,
15304		},
15305	}
15306	op := scope.AddOperation(opspec)
15307	return op.Output(0)
15308}
15309
15310// Records the latency of producing `input_dataset` elements in a StatsAggregator.
15311func ExperimentalLatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15312	if scope.Err() != nil {
15313		return
15314	}
15315	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15316	opspec := tf.OpSpec{
15317		Type: "ExperimentalLatencyStatsDataset",
15318		Input: []tf.Input{
15319			input_dataset, tag,
15320		},
15321		Attrs: attrs,
15322	}
15323	op := scope.AddOperation(opspec)
15324	return op.Output(0)
15325}
15326
15327// Creates a dataset that overrides the maximum intra-op parallelism.
15328//
15329// Arguments:
15330//
15331//	max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use.
15332func ExperimentalMaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15333	if scope.Err() != nil {
15334		return
15335	}
15336	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15337	opspec := tf.OpSpec{
15338		Type: "ExperimentalMaxIntraOpParallelismDataset",
15339		Input: []tf.Input{
15340			input_dataset, max_intra_op_parallelism,
15341		},
15342		Attrs: attrs,
15343	}
15344	op := scope.AddOperation(opspec)
15345	return op.Output(0)
15346}
15347
15348// ExperimentalParseExampleDatasetAttr is an optional argument to ExperimentalParseExampleDataset.
15349type ExperimentalParseExampleDatasetAttr func(optionalAttr)
15350
15351// ExperimentalParseExampleDatasetSloppy sets the optional sloppy attribute to value.
15352// If not specified, defaults to false
15353func ExperimentalParseExampleDatasetSloppy(value bool) ExperimentalParseExampleDatasetAttr {
15354	return func(m optionalAttr) {
15355		m["sloppy"] = value
15356	}
15357}
15358
15359// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
15360//
15361// Arguments:
15362//
15363//	dense_defaults: A dict mapping string keys to `Tensor`s.
15364//
15365// The keys of the dict must match the dense_keys of the feature.
15366//
15367//	sparse_keys: A list of string keys in the examples features.
15368//
15369// The results for these keys will be returned as `SparseTensor` objects.
15370//
15371//	dense_keys: A list of Ndense string Tensors (scalars).
15372//
15373// The keys expected in the Examples features associated with dense values.
15374//
15375//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
15376//
15377// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
15378// and `tf.string` (`BytesList`) are supported.
15379//
15380//	dense_shapes: List of tuples with the same length as `dense_keys`.
15381//
15382// The shape of the data for each dense feature referenced by `dense_keys`.
15383// Required for any input tensors identified by `dense_keys`.  Must be
15384// either fully defined, or may contain an unknown first dimension.
15385// An unknown first dimension means the feature is treated as having
15386// a variable number of blocks, and the output shape along this dimension
15387// is considered unknown at graph build time.  Padding is applied for
15388// minibatch elements smaller than the maximum number of blocks for the
15389// given feature along this dimension.
15390//
15391//	output_types: The type list for the return values.
15392//	output_shapes: The list of shapes being produced.
15393func ExperimentalParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalParseExampleDatasetAttr) (handle tf.Output) {
15394	if scope.Err() != nil {
15395		return
15396	}
15397	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
15398	for _, a := range optional {
15399		a(attrs)
15400	}
15401	opspec := tf.OpSpec{
15402		Type: "ExperimentalParseExampleDataset",
15403		Input: []tf.Input{
15404			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
15405		},
15406		Attrs: attrs,
15407	}
15408	op := scope.AddOperation(opspec)
15409	return op.Output(0)
15410}
15411
15412// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
15413//
15414// Arguments:
15415//
15416//	num_threads: Identifies the number of threads to use for the private threadpool.
15417func ExperimentalPrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15418	if scope.Err() != nil {
15419		return
15420	}
15421	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15422	opspec := tf.OpSpec{
15423		Type: "ExperimentalPrivateThreadPoolDataset",
15424		Input: []tf.Input{
15425			input_dataset, num_threads,
15426		},
15427		Attrs: attrs,
15428	}
15429	op := scope.AddOperation(opspec)
15430	return op.Output(0)
15431}
15432
15433// Creates a Dataset that returns pseudorandom numbers.
15434//
15435// Arguments:
15436//
15437//	seed: A scalar seed for the random number generator. If either seed or
15438//
15439// seed2 is set to be non-zero, the random number generator is seeded
15440// by the given seed.  Otherwise, a random seed is used.
15441//
15442//	seed2: A second scalar seed to avoid seed collision.
15443func ExperimentalRandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15444	if scope.Err() != nil {
15445		return
15446	}
15447	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15448	opspec := tf.OpSpec{
15449		Type: "ExperimentalRandomDataset",
15450		Input: []tf.Input{
15451			seed, seed2,
15452		},
15453		Attrs: attrs,
15454	}
15455	op := scope.AddOperation(opspec)
15456	return op.Output(0)
15457}
15458
15459// ExperimentalRebatchDatasetAttr is an optional argument to ExperimentalRebatchDataset.
15460type ExperimentalRebatchDatasetAttr func(optionalAttr)
15461
15462// ExperimentalRebatchDatasetUseFallback sets the optional use_fallback attribute to value.
15463// If not specified, defaults to true
15464func ExperimentalRebatchDatasetUseFallback(value bool) ExperimentalRebatchDatasetAttr {
15465	return func(m optionalAttr) {
15466		m["use_fallback"] = value
15467	}
15468}
15469
15470// Creates a dataset that changes the batch size.
15471//
15472// Creates a dataset that changes the batch size of the dataset to current batch
15473// size // num_replicas.
15474//
15475// Arguments:
15476//
15477//	input_dataset: A variant tensor representing the input dataset.
15478//	num_replicas: A scalar representing the number of replicas to distribute this batch across. As
15479//
15480// a result of this transformation the current batch size would end up being
15481// divided  by this parameter.
15482func ExperimentalRebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ExperimentalRebatchDatasetAttr) (handle tf.Output) {
15483	if scope.Err() != nil {
15484		return
15485	}
15486	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15487	for _, a := range optional {
15488		a(attrs)
15489	}
15490	opspec := tf.OpSpec{
15491		Type: "ExperimentalRebatchDataset",
15492		Input: []tf.Input{
15493			input_dataset, num_replicas,
15494		},
15495		Attrs: attrs,
15496	}
15497	op := scope.AddOperation(opspec)
15498	return op.Output(0)
15499}
15500
15501// Creates a dataset that passes a sliding window over `input_dataset`.
15502//
15503// Arguments:
15504//
15505//	window_size: A scalar representing the number of elements in the
15506//
15507// sliding window.
15508//
15509//	window_shift: A scalar representing the steps moving the sliding window
15510//
15511// forward in one iteration. It must be positive.
15512//
15513//	window_stride: A scalar representing the stride of the input elements of the sliding window.
15514//
15515// It must be positive.
15516func ExperimentalSlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15517	if scope.Err() != nil {
15518		return
15519	}
15520	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15521	opspec := tf.OpSpec{
15522		Type: "ExperimentalSlidingWindowDataset",
15523		Input: []tf.Input{
15524			input_dataset, window_size, window_shift, window_stride,
15525		},
15526		Attrs: attrs,
15527	}
15528	op := scope.AddOperation(opspec)
15529	return op.Output(0)
15530}
15531
15532// Creates a dataset that executes a SQL query and emits rows of the result set.
15533//
15534// Arguments:
15535//
15536//	driver_name: The database type. Currently, the only supported type is 'sqlite'.
15537//	data_source_name: A connection string to connect to the database.
15538//	query: A SQL query to execute.
15539func ExperimentalSqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15540	if scope.Err() != nil {
15541		return
15542	}
15543	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15544	opspec := tf.OpSpec{
15545		Type: "ExperimentalSqlDataset",
15546		Input: []tf.Input{
15547			driver_name, data_source_name, query,
15548		},
15549		Attrs: attrs,
15550	}
15551	op := scope.AddOperation(opspec)
15552	return op.Output(0)
15553}
15554
15555// ExperimentalStatsAggregatorHandleAttr is an optional argument to ExperimentalStatsAggregatorHandle.
15556type ExperimentalStatsAggregatorHandleAttr func(optionalAttr)
15557
15558// ExperimentalStatsAggregatorHandleContainer sets the optional container attribute to value.
15559// If not specified, defaults to ""
15560func ExperimentalStatsAggregatorHandleContainer(value string) ExperimentalStatsAggregatorHandleAttr {
15561	return func(m optionalAttr) {
15562		m["container"] = value
15563	}
15564}
15565
15566// ExperimentalStatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
15567// If not specified, defaults to ""
15568func ExperimentalStatsAggregatorHandleSharedName(value string) ExperimentalStatsAggregatorHandleAttr {
15569	return func(m optionalAttr) {
15570		m["shared_name"] = value
15571	}
15572}
15573
15574// Creates a statistics manager resource.
15575func ExperimentalStatsAggregatorHandle(scope *Scope, optional ...ExperimentalStatsAggregatorHandleAttr) (handle tf.Output) {
15576	if scope.Err() != nil {
15577		return
15578	}
15579	attrs := map[string]interface{}{}
15580	for _, a := range optional {
15581		a(attrs)
15582	}
15583	opspec := tf.OpSpec{
15584		Type: "ExperimentalStatsAggregatorHandle",
15585
15586		Attrs: attrs,
15587	}
15588	op := scope.AddOperation(opspec)
15589	return op.Output(0)
15590}
15591
15592// Produces a summary of any statistics recorded by the given statistics manager.
15593func ExperimentalStatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
15594	if scope.Err() != nil {
15595		return
15596	}
15597	opspec := tf.OpSpec{
15598		Type: "ExperimentalStatsAggregatorSummary",
15599		Input: []tf.Input{
15600			iterator,
15601		},
15602	}
15603	op := scope.AddOperation(opspec)
15604	return op.Output(0)
15605}
15606
15607// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
15608//
15609// Arguments:
15610//
15611//	thread_pool: A resource produced by the ThreadPoolHandle op.
15612func ExperimentalThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15613	if scope.Err() != nil {
15614		return
15615	}
15616	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15617	opspec := tf.OpSpec{
15618		Type: "ExperimentalThreadPoolDataset",
15619		Input: []tf.Input{
15620			input_dataset, thread_pool,
15621		},
15622		Attrs: attrs,
15623	}
15624	op := scope.AddOperation(opspec)
15625	return op.Output(0)
15626}
15627
15628// ExperimentalThreadPoolHandleAttr is an optional argument to ExperimentalThreadPoolHandle.
15629type ExperimentalThreadPoolHandleAttr func(optionalAttr)
15630
15631// ExperimentalThreadPoolHandleMaxIntraOpParallelism sets the optional max_intra_op_parallelism attribute to value.
15632//
15633// value: The maximum degree of parallelism to use within operations that execute on this
15634// threadpool.
15635// If not specified, defaults to 1
15636func ExperimentalThreadPoolHandleMaxIntraOpParallelism(value int64) ExperimentalThreadPoolHandleAttr {
15637	return func(m optionalAttr) {
15638		m["max_intra_op_parallelism"] = value
15639	}
15640}
15641
15642// ExperimentalThreadPoolHandleContainer sets the optional container attribute to value.
15643// If not specified, defaults to ""
15644func ExperimentalThreadPoolHandleContainer(value string) ExperimentalThreadPoolHandleAttr {
15645	return func(m optionalAttr) {
15646		m["container"] = value
15647	}
15648}
15649
15650// ExperimentalThreadPoolHandleSharedName sets the optional shared_name attribute to value.
15651// If not specified, defaults to ""
15652func ExperimentalThreadPoolHandleSharedName(value string) ExperimentalThreadPoolHandleAttr {
15653	return func(m optionalAttr) {
15654		m["shared_name"] = value
15655	}
15656}
15657
15658// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
15659//
15660// Arguments:
15661//
15662//	num_threads: The number of threads in the thread pool.
15663//	display_name: A human-readable name for the threads that may be visible in some
15664//
15665// visualizations.
15666// threadpool.
15667//
15668// Returns A resource that can be consumed by one or more ExperimentalThreadPoolDataset
15669// ops.
15670func ExperimentalThreadPoolHandle(scope *Scope, num_threads int64, display_name string, optional ...ExperimentalThreadPoolHandleAttr) (handle tf.Output) {
15671	if scope.Err() != nil {
15672		return
15673	}
15674	attrs := map[string]interface{}{"num_threads": num_threads, "display_name": display_name}
15675	for _, a := range optional {
15676		a(attrs)
15677	}
15678	opspec := tf.OpSpec{
15679		Type: "ExperimentalThreadPoolHandle",
15680
15681		Attrs: attrs,
15682	}
15683	op := scope.AddOperation(opspec)
15684	return op.Output(0)
15685}
15686
15687// A dataset that splits the elements of its input into multiple elements.
15688func ExperimentalUnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15689	if scope.Err() != nil {
15690		return
15691	}
15692	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15693	opspec := tf.OpSpec{
15694		Type: "ExperimentalUnbatchDataset",
15695		Input: []tf.Input{
15696			input_dataset,
15697		},
15698		Attrs: attrs,
15699	}
15700	op := scope.AddOperation(opspec)
15701	return op.Output(0)
15702}
15703
15704// Creates a dataset that contains the unique elements of `input_dataset`.
15705func ExperimentalUniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
15706	if scope.Err() != nil {
15707		return
15708	}
15709	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
15710	opspec := tf.OpSpec{
15711		Type: "ExperimentalUniqueDataset",
15712		Input: []tf.Input{
15713			input_dataset,
15714		},
15715		Attrs: attrs,
15716	}
15717	op := scope.AddOperation(opspec)
15718	return op.Output(0)
15719}
15720
15721// Computes `exp(x) - 1` element-wise.
15722//
15723//	i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor.
15724//	`e` denotes Euler's number and is approximately equal to 2.718281.
15725//
15726//	```python
15727//	x = tf.constant(2.0)
15728//	tf.math.expm1(x) ==> 6.389056
15729//
15730//	x = tf.constant([2.0, 8.0])
15731//	tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32)
15732//
15733//	x = tf.constant(1 + 1j)
15734//	tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j)
15735//	```
15736func Expm1(scope *Scope, x tf.Output) (y tf.Output) {
15737	if scope.Err() != nil {
15738		return
15739	}
15740	opspec := tf.OpSpec{
15741		Type: "Expm1",
15742		Input: []tf.Input{
15743			x,
15744		},
15745	}
15746	op := scope.AddOperation(opspec)
15747	return op.Output(0)
15748}
15749
15750// ExtractGlimpseAttr is an optional argument to ExtractGlimpse.
15751type ExtractGlimpseAttr func(optionalAttr)
15752
15753// ExtractGlimpseCentered sets the optional centered attribute to value.
15754//
15755// value: indicates if the offset coordinates are centered relative to
15756// the image, in which case the (0, 0) offset is relative to the center
15757// of the input images. If false, the (0,0) offset corresponds to the
15758// upper left corner of the input images.
15759// If not specified, defaults to true
15760func ExtractGlimpseCentered(value bool) ExtractGlimpseAttr {
15761	return func(m optionalAttr) {
15762		m["centered"] = value
15763	}
15764}
15765
15766// ExtractGlimpseNormalized sets the optional normalized attribute to value.
15767//
15768// value: indicates if the offset coordinates are normalized.
15769// If not specified, defaults to true
15770func ExtractGlimpseNormalized(value bool) ExtractGlimpseAttr {
15771	return func(m optionalAttr) {
15772		m["normalized"] = value
15773	}
15774}
15775
15776// ExtractGlimpseUniformNoise sets the optional uniform_noise attribute to value.
15777//
15778// value: indicates if the noise should be generated using a
15779// uniform distribution or a Gaussian distribution.
15780// If not specified, defaults to true
15781func ExtractGlimpseUniformNoise(value bool) ExtractGlimpseAttr {
15782	return func(m optionalAttr) {
15783		m["uniform_noise"] = value
15784	}
15785}
15786
15787// ExtractGlimpseNoise sets the optional noise attribute to value.
15788//
15789// value: indicates if the noise should `uniform`, `gaussian`, or
15790// `zero`. The default is `uniform` which means the noise type
15791// will be decided by `uniform_noise`.
15792// If not specified, defaults to "uniform"
15793func ExtractGlimpseNoise(value string) ExtractGlimpseAttr {
15794	return func(m optionalAttr) {
15795		m["noise"] = value
15796	}
15797}
15798
15799// Extracts a glimpse from the input tensor.
15800//
15801// Returns a set of windows called glimpses extracted at location
15802// `offsets` from the input tensor. If the windows only partially
15803// overlaps the inputs, the non overlapping areas will be filled with
15804// random noise.
15805//
15806// The result is a 4-D tensor of shape `[batch_size, glimpse_height,
15807// glimpse_width, channels]`. The channels and batch dimensions are the
15808// same as that of the input tensor. The height and width of the output
15809// windows are specified in the `size` parameter.
15810//
15811// The argument `normalized` and `centered` controls how the windows are built:
15812//
15813//   - If the coordinates are normalized but not centered, 0.0 and 1.0
15814//     correspond to the minimum and maximum of each height and width
15815//     dimension.
15816//   - If the coordinates are both normalized and centered, they range from
15817//     -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
15818//     left corner, the lower right corner is located at (1.0, 1.0) and the
15819//     center is at (0, 0).
15820//   - If the coordinates are not normalized they are interpreted as
15821//     numbers of pixels.
15822//
15823// Arguments:
15824//
15825//	input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
15826//	size: A 1-D tensor of 2 elements containing the size of the glimpses
15827//
15828// to extract.  The glimpse height must be specified first, following
15829// by the glimpse width.
15830//
15831//	offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
15832//
15833// the y, x locations of the center of each window.
15834//
15835// Returns A tensor representing the glimpses `[batch_size,
15836// glimpse_height, glimpse_width, channels]`.
15837func ExtractGlimpse(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseAttr) (glimpse tf.Output) {
15838	if scope.Err() != nil {
15839		return
15840	}
15841	attrs := map[string]interface{}{}
15842	for _, a := range optional {
15843		a(attrs)
15844	}
15845	opspec := tf.OpSpec{
15846		Type: "ExtractGlimpse",
15847		Input: []tf.Input{
15848			input, size, offsets,
15849		},
15850		Attrs: attrs,
15851	}
15852	op := scope.AddOperation(opspec)
15853	return op.Output(0)
15854}
15855
15856// ExtractGlimpseV2Attr is an optional argument to ExtractGlimpseV2.
15857type ExtractGlimpseV2Attr func(optionalAttr)
15858
15859// ExtractGlimpseV2Centered sets the optional centered attribute to value.
15860//
15861// value: indicates if the offset coordinates are centered relative to
15862// the image, in which case the (0, 0) offset is relative to the center
15863// of the input images. If false, the (0,0) offset corresponds to the
15864// upper left corner of the input images.
15865// If not specified, defaults to true
15866func ExtractGlimpseV2Centered(value bool) ExtractGlimpseV2Attr {
15867	return func(m optionalAttr) {
15868		m["centered"] = value
15869	}
15870}
15871
15872// ExtractGlimpseV2Normalized sets the optional normalized attribute to value.
15873//
15874// value: indicates if the offset coordinates are normalized.
15875// If not specified, defaults to true
15876func ExtractGlimpseV2Normalized(value bool) ExtractGlimpseV2Attr {
15877	return func(m optionalAttr) {
15878		m["normalized"] = value
15879	}
15880}
15881
15882// ExtractGlimpseV2UniformNoise sets the optional uniform_noise attribute to value.
15883//
15884// value: indicates if the noise should be generated using a
15885// uniform distribution or a Gaussian distribution.
15886// If not specified, defaults to true
15887func ExtractGlimpseV2UniformNoise(value bool) ExtractGlimpseV2Attr {
15888	return func(m optionalAttr) {
15889		m["uniform_noise"] = value
15890	}
15891}
15892
15893// ExtractGlimpseV2Noise sets the optional noise attribute to value.
15894//
15895// value: indicates if the noise should `uniform`, `gaussian`, or
15896// `zero`. The default is `uniform` which means the noise type
15897// will be decided by `uniform_noise`.
15898// If not specified, defaults to "uniform"
15899func ExtractGlimpseV2Noise(value string) ExtractGlimpseV2Attr {
15900	return func(m optionalAttr) {
15901		m["noise"] = value
15902	}
15903}
15904
15905// Extracts a glimpse from the input tensor.
15906//
15907// Returns a set of windows called glimpses extracted at location
15908// `offsets` from the input tensor. If the windows only partially
15909// overlaps the inputs, the non overlapping areas will be filled with
15910// random noise.
15911//
15912// The result is a 4-D tensor of shape `[batch_size, glimpse_height,
15913// glimpse_width, channels]`. The channels and batch dimensions are the
15914// same as that of the input tensor. The height and width of the output
15915// windows are specified in the `size` parameter.
15916//
15917// The argument `normalized` and `centered` controls how the windows are built:
15918//
15919//   - If the coordinates are normalized but not centered, 0.0 and 1.0
15920//     correspond to the minimum and maximum of each height and width
15921//     dimension.
15922//   - If the coordinates are both normalized and centered, they range from
15923//     -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper
15924//     left corner, the lower right corner is located at (1.0, 1.0) and the
15925//     center is at (0, 0).
15926//   - If the coordinates are not normalized they are interpreted as
15927//     numbers of pixels.
15928//
15929// Arguments:
15930//
15931//	input: A 4-D float tensor of shape `[batch_size, height, width, channels]`.
15932//	size: A 1-D tensor of 2 elements containing the size of the glimpses
15933//
15934// to extract.  The glimpse height must be specified first, following
15935// by the glimpse width.
15936//
15937//	offsets: A 2-D integer tensor of shape `[batch_size, 2]` containing
15938//
15939// the y, x locations of the center of each window.
15940//
15941// Returns A tensor representing the glimpses `[batch_size,
15942// glimpse_height, glimpse_width, channels]`.
15943func ExtractGlimpseV2(scope *Scope, input tf.Output, size tf.Output, offsets tf.Output, optional ...ExtractGlimpseV2Attr) (glimpse tf.Output) {
15944	if scope.Err() != nil {
15945		return
15946	}
15947	attrs := map[string]interface{}{}
15948	for _, a := range optional {
15949		a(attrs)
15950	}
15951	opspec := tf.OpSpec{
15952		Type: "ExtractGlimpseV2",
15953		Input: []tf.Input{
15954			input, size, offsets,
15955		},
15956		Attrs: attrs,
15957	}
15958	op := scope.AddOperation(opspec)
15959	return op.Output(0)
15960}
15961
15962// Extract `patches` from `images` and put them in the "depth" output dimension.
15963//
15964// Arguments:
15965//
15966//	images: 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
15967//	ksizes: The size of the sliding window for each dimension of `images`.
15968//	strides: How far the centers of two consecutive patches are in
15969//
15970// the images. Must be: `[1, stride_rows, stride_cols, 1]`.
15971//
15972//	rates: Must be: `[1, rate_rows, rate_cols, 1]`. This is the
15973//
15974// input stride, specifying how far two consecutive patch samples are in the
15975// input. Equivalent to extracting patches with
15976// `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
15977// subsampling them spatially by a factor of `rates`. This is equivalent to
15978// `rate` in dilated (a.k.a. Atrous) convolutions.
15979//
15980//	padding: The type of padding algorithm to use.
15981//
15982// Returns 4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
15983// ksize_cols * depth]` containing image patches with size
15984// `ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
15985// `out_rows` and `out_cols` are the dimensions of the output patches.
15986func ExtractImagePatches(scope *Scope, images tf.Output, ksizes []int64, strides []int64, rates []int64, padding string) (patches tf.Output) {
15987	if scope.Err() != nil {
15988		return
15989	}
15990	attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "rates": rates, "padding": padding}
15991	opspec := tf.OpSpec{
15992		Type: "ExtractImagePatches",
15993		Input: []tf.Input{
15994			images,
15995		},
15996		Attrs: attrs,
15997	}
15998	op := scope.AddOperation(opspec)
15999	return op.Output(0)
16000}
16001
16002// ExtractJpegShapeAttr is an optional argument to ExtractJpegShape.
16003type ExtractJpegShapeAttr func(optionalAttr)
16004
16005// ExtractJpegShapeOutputType sets the optional output_type attribute to value.
16006//
16007// value: (Optional) The output type of the operation (int32 or int64).
16008// Defaults to int32.
16009// If not specified, defaults to DT_INT32
16010func ExtractJpegShapeOutputType(value tf.DataType) ExtractJpegShapeAttr {
16011	return func(m optionalAttr) {
16012		m["output_type"] = value
16013	}
16014}
16015
16016// Extract the shape information of a JPEG-encoded image.
16017//
16018// This op only parses the image header, so it is much faster than DecodeJpeg.
16019//
16020// Arguments:
16021//
16022//	contents: 0-D. The JPEG-encoded image.
16023//
16024// Returns 1-D. The image shape with format [height, width, channels].
16025func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegShapeAttr) (image_shape tf.Output) {
16026	if scope.Err() != nil {
16027		return
16028	}
16029	attrs := map[string]interface{}{}
16030	for _, a := range optional {
16031		a(attrs)
16032	}
16033	opspec := tf.OpSpec{
16034		Type: "ExtractJpegShape",
16035		Input: []tf.Input{
16036			contents,
16037		},
16038		Attrs: attrs,
16039	}
16040	op := scope.AddOperation(opspec)
16041	return op.Output(0)
16042}
16043
16044// Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension of `extract_image_patches`.
16045//
16046// Arguments:
16047//
16048//	input: 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`.
16049//	ksizes: The size of the sliding window for each dimension of `input`.
16050//	strides: 1-D of length 5. How far the centers of two consecutive patches are in
16051//
16052// `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`.
16053//
16054//	padding: The type of padding algorithm to use.
16055//
16056// The size-related attributes are specified as follows:
16057//
16058// ```python
16059// ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1]
16060// strides = [1, stride_planes, strides_rows, strides_cols, 1]
16061// ```
16062//
16063// Returns 5-D Tensor with shape `[batch, out_planes, out_rows, out_cols,
16064// ksize_planes * ksize_rows * ksize_cols * depth]` containing patches
16065// with size `ksize_planes x ksize_rows x ksize_cols x depth` vectorized
16066// in the "depth" dimension. Note `out_planes`, `out_rows` and `out_cols`
16067// are the dimensions of the output patches.
16068func ExtractVolumePatches(scope *Scope, input tf.Output, ksizes []int64, strides []int64, padding string) (patches tf.Output) {
16069	if scope.Err() != nil {
16070		return
16071	}
16072	attrs := map[string]interface{}{"ksizes": ksizes, "strides": strides, "padding": padding}
16073	opspec := tf.OpSpec{
16074		Type: "ExtractVolumePatches",
16075		Input: []tf.Input{
16076			input,
16077		},
16078		Attrs: attrs,
16079	}
16080	op := scope.AddOperation(opspec)
16081	return op.Output(0)
16082}
16083
16084// Fast Fourier transform.
16085//
16086// Computes the 1-dimensional discrete Fourier transform over the inner-most
16087// dimension of `input`.
16088//
16089// Arguments:
16090//
16091//	input: A complex tensor.
16092//
16093// Returns A complex tensor of the same shape as `input`. The inner-most
16094//
16095//	dimension of `input` is replaced with its 1D Fourier transform.
16096//
16097// @compatibility(numpy)
16098// Equivalent to np.fft.fft
16099// @end_compatibility
16100func FFT(scope *Scope, input tf.Output) (output tf.Output) {
16101	if scope.Err() != nil {
16102		return
16103	}
16104	opspec := tf.OpSpec{
16105		Type: "FFT",
16106		Input: []tf.Input{
16107			input,
16108		},
16109	}
16110	op := scope.AddOperation(opspec)
16111	return op.Output(0)
16112}
16113
16114// 2D fast Fourier transform.
16115//
16116// Computes the 2-dimensional discrete Fourier transform over the inner-most
16117// 2 dimensions of `input`.
16118//
16119// Arguments:
16120//
16121//	input: A complex tensor.
16122//
16123// Returns A complex tensor of the same shape as `input`. The inner-most 2
16124//
16125//	dimensions of `input` are replaced with their 2D Fourier transform.
16126//
16127// @compatibility(numpy)
16128// Equivalent to np.fft.fft2
16129// @end_compatibility
16130func FFT2D(scope *Scope, input tf.Output) (output tf.Output) {
16131	if scope.Err() != nil {
16132		return
16133	}
16134	opspec := tf.OpSpec{
16135		Type: "FFT2D",
16136		Input: []tf.Input{
16137			input,
16138		},
16139	}
16140	op := scope.AddOperation(opspec)
16141	return op.Output(0)
16142}
16143
16144// 3D fast Fourier transform.
16145//
16146// Computes the 3-dimensional discrete Fourier transform over the inner-most 3
16147// dimensions of `input`.
16148//
16149// Arguments:
16150//
16151//	input: A complex tensor.
16152//
16153// Returns A complex tensor of the same shape as `input`. The inner-most 3
16154//
16155//	dimensions of `input` are replaced with their 3D Fourier transform.
16156//
16157// @compatibility(numpy)
16158// Equivalent to np.fft.fftn with 3 dimensions.
16159// @end_compatibility
16160func FFT3D(scope *Scope, input tf.Output) (output tf.Output) {
16161	if scope.Err() != nil {
16162		return
16163	}
16164	opspec := tf.OpSpec{
16165		Type: "FFT3D",
16166		Input: []tf.Input{
16167			input,
16168		},
16169	}
16170	op := scope.AddOperation(opspec)
16171	return op.Output(0)
16172}
16173
16174// FIFOQueueV2Attr is an optional argument to FIFOQueueV2.
16175type FIFOQueueV2Attr func(optionalAttr)
16176
16177// FIFOQueueV2Shapes sets the optional shapes attribute to value.
16178//
16179// value: The shape of each component in a value. The length of this attr must
16180// be either 0 or the same as the length of component_types. If the length of
16181// this attr is 0, the shapes of queue elements are not constrained, and
16182// only one element may be dequeued at a time.
16183// If not specified, defaults to {}
16184//
16185// REQUIRES: len(value) >= 0
16186func FIFOQueueV2Shapes(value []tf.Shape) FIFOQueueV2Attr {
16187	return func(m optionalAttr) {
16188		m["shapes"] = value
16189	}
16190}
16191
16192// FIFOQueueV2Capacity sets the optional capacity attribute to value.
16193//
16194// value: The upper bound on the number of elements in this queue.
16195// Negative numbers mean no limit.
16196// If not specified, defaults to -1
16197func FIFOQueueV2Capacity(value int64) FIFOQueueV2Attr {
16198	return func(m optionalAttr) {
16199		m["capacity"] = value
16200	}
16201}
16202
16203// FIFOQueueV2Container sets the optional container attribute to value.
16204//
16205// value: If non-empty, this queue is placed in the given container.
16206// Otherwise, a default container is used.
16207// If not specified, defaults to ""
16208func FIFOQueueV2Container(value string) FIFOQueueV2Attr {
16209	return func(m optionalAttr) {
16210		m["container"] = value
16211	}
16212}
16213
16214// FIFOQueueV2SharedName sets the optional shared_name attribute to value.
16215//
16216// value: If non-empty, this queue will be shared under the given name
16217// across multiple sessions.
16218// If not specified, defaults to ""
16219func FIFOQueueV2SharedName(value string) FIFOQueueV2Attr {
16220	return func(m optionalAttr) {
16221		m["shared_name"] = value
16222	}
16223}
16224
16225// A queue that produces elements in first-in first-out order.
16226//
16227// Arguments:
16228//
16229//	component_types: The type of each component in a value.
16230//
16231// Returns The handle to the queue.
16232func FIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...FIFOQueueV2Attr) (handle tf.Output) {
16233	if scope.Err() != nil {
16234		return
16235	}
16236	attrs := map[string]interface{}{"component_types": component_types}
16237	for _, a := range optional {
16238		a(attrs)
16239	}
16240	opspec := tf.OpSpec{
16241		Type: "FIFOQueueV2",
16242
16243		Attrs: attrs,
16244	}
16245	op := scope.AddOperation(opspec)
16246	return op.Output(0)
16247}
16248
16249// Output a fact about factorials.
16250func Fact(scope *Scope) (fact tf.Output) {
16251	if scope.Err() != nil {
16252		return
16253	}
16254	opspec := tf.OpSpec{
16255		Type: "Fact",
16256	}
16257	op := scope.AddOperation(opspec)
16258	return op.Output(0)
16259}
16260
16261//	This op is used as a placeholder in If branch functions. It doesn't provide a
16262//	valid output when run, so must either be removed (e.g. replaced with a
16263//	function input) or guaranteed not to be used (e.g. if mirroring an
16264//	intermediate output needed for the gradient computation of the other branch).
16265//
16266// Arguments:
16267//
16268//		dtype: The type of the output.
16269//		shape:     The purported shape of the output. This is only used for shape inference;
16270//	    the output will not necessarily have this shape. Can be a partial shape.
16271//
16272// Returns     \"Fake\" output value. This should not be consumed by another op.
16273func FakeParam(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
16274	if scope.Err() != nil {
16275		return
16276	}
16277	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
16278	opspec := tf.OpSpec{
16279		Type: "FakeParam",
16280
16281		Attrs: attrs,
16282	}
16283	op := scope.AddOperation(opspec)
16284	return op.Output(0)
16285}
16286
16287// FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs.
16288type FakeQuantWithMinMaxArgsAttr func(optionalAttr)
16289
16290// FakeQuantWithMinMaxArgsMin sets the optional min attribute to value.
16291// If not specified, defaults to -6
16292func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr {
16293	return func(m optionalAttr) {
16294		m["min"] = value
16295	}
16296}
16297
16298// FakeQuantWithMinMaxArgsMax sets the optional max attribute to value.
16299// If not specified, defaults to 6
16300func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr {
16301	return func(m optionalAttr) {
16302		m["max"] = value
16303	}
16304}
16305
16306// FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value.
16307// If not specified, defaults to 8
16308func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr {
16309	return func(m optionalAttr) {
16310		m["num_bits"] = value
16311	}
16312}
16313
16314// FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value.
16315// If not specified, defaults to false
16316func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr {
16317	return func(m optionalAttr) {
16318		m["narrow_range"] = value
16319	}
16320}
16321
16322// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
16323//
16324// # Attributes
16325//
16326// *   `[min; max]` define the clamping range for the `inputs` data.
16327// *   `inputs` values are quantized into the quantization range (
16328// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
16329// when it is true) and then de-quantized and output as floats in `[min; max]`
16330// interval.
16331// *   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
16332//
16333// Before quantization, `min` and `max` values are adjusted with the following
16334// logic.
16335// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
16336// the behavior can be unexpected:
16337//
16338// *   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
16339// *   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
16340// *   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
16341// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
16342//
16343// Quantization is called fake since the output is still in floating point.
16344func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) {
16345	if scope.Err() != nil {
16346		return
16347	}
16348	attrs := map[string]interface{}{}
16349	for _, a := range optional {
16350		a(attrs)
16351	}
16352	opspec := tf.OpSpec{
16353		Type: "FakeQuantWithMinMaxArgs",
16354		Input: []tf.Input{
16355			inputs,
16356		},
16357		Attrs: attrs,
16358	}
16359	op := scope.AddOperation(opspec)
16360	return op.Output(0)
16361}
16362
16363// FakeQuantWithMinMaxArgsGradientAttr is an optional argument to FakeQuantWithMinMaxArgsGradient.
16364type FakeQuantWithMinMaxArgsGradientAttr func(optionalAttr)
16365
16366// FakeQuantWithMinMaxArgsGradientMin sets the optional min attribute to value.
16367// If not specified, defaults to -6
16368func FakeQuantWithMinMaxArgsGradientMin(value float32) FakeQuantWithMinMaxArgsGradientAttr {
16369	return func(m optionalAttr) {
16370		m["min"] = value
16371	}
16372}
16373
16374// FakeQuantWithMinMaxArgsGradientMax sets the optional max attribute to value.
16375// If not specified, defaults to 6
16376func FakeQuantWithMinMaxArgsGradientMax(value float32) FakeQuantWithMinMaxArgsGradientAttr {
16377	return func(m optionalAttr) {
16378		m["max"] = value
16379	}
16380}
16381
16382// FakeQuantWithMinMaxArgsGradientNumBits sets the optional num_bits attribute to value.
16383// If not specified, defaults to 8
16384func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgsGradientAttr {
16385	return func(m optionalAttr) {
16386		m["num_bits"] = value
16387	}
16388}
16389
16390// FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value.
16391// If not specified, defaults to false
16392func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr {
16393	return func(m optionalAttr) {
16394		m["narrow_range"] = value
16395	}
16396}
16397
16398// Compute gradients for a FakeQuantWithMinMaxArgs operation.
16399//
16400// Arguments:
16401//
16402//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
16403//	inputs: Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
16404//
16405// Returns Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
16406// `gradients * (inputs >= min && inputs <= max)`.
16407func FakeQuantWithMinMaxArgsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsGradientAttr) (backprops tf.Output) {
16408	if scope.Err() != nil {
16409		return
16410	}
16411	attrs := map[string]interface{}{}
16412	for _, a := range optional {
16413		a(attrs)
16414	}
16415	opspec := tf.OpSpec{
16416		Type: "FakeQuantWithMinMaxArgsGradient",
16417		Input: []tf.Input{
16418			gradients, inputs,
16419		},
16420		Attrs: attrs,
16421	}
16422	op := scope.AddOperation(opspec)
16423	return op.Output(0)
16424}
16425
16426// FakeQuantWithMinMaxVarsAttr is an optional argument to FakeQuantWithMinMaxVars.
16427type FakeQuantWithMinMaxVarsAttr func(optionalAttr)
16428
16429// FakeQuantWithMinMaxVarsNumBits sets the optional num_bits attribute to value.
16430// If not specified, defaults to 8
16431func FakeQuantWithMinMaxVarsNumBits(value int64) FakeQuantWithMinMaxVarsAttr {
16432	return func(m optionalAttr) {
16433		m["num_bits"] = value
16434	}
16435}
16436
16437// FakeQuantWithMinMaxVarsNarrowRange sets the optional narrow_range attribute to value.
16438// If not specified, defaults to false
16439func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr {
16440	return func(m optionalAttr) {
16441		m["narrow_range"] = value
16442	}
16443}
16444
16445// Fake-quantize the 'inputs' tensor of type float via global float scalars
16446//
16447// Fake-quantize the `inputs` tensor of type float via global float scalars
16448// `min` and `max` to `outputs` tensor of same shape as `inputs`.
16449//
16450// # Attributes
16451//
16452// *   `[min; max]` define the clamping range for the `inputs` data.
16453// *   `inputs` values are quantized into the quantization range (
16454// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
16455// when it is true) and then de-quantized and output as floats in `[min; max]`
16456// interval.
16457// *   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
16458//
16459// Before quantization, `min` and `max` values are adjusted with the following
16460// logic.
16461// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
16462// the behavior can be unexpected:
16463//
16464// *   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
16465// *   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
16466// *   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
16467// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
16468//
16469// This operation has a gradient and thus allows for training `min` and `max`
16470// values.
16471func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsAttr) (outputs tf.Output) {
16472	if scope.Err() != nil {
16473		return
16474	}
16475	attrs := map[string]interface{}{}
16476	for _, a := range optional {
16477		a(attrs)
16478	}
16479	opspec := tf.OpSpec{
16480		Type: "FakeQuantWithMinMaxVars",
16481		Input: []tf.Input{
16482			inputs, min, max,
16483		},
16484		Attrs: attrs,
16485	}
16486	op := scope.AddOperation(opspec)
16487	return op.Output(0)
16488}
16489
16490// FakeQuantWithMinMaxVarsGradientAttr is an optional argument to FakeQuantWithMinMaxVarsGradient.
16491type FakeQuantWithMinMaxVarsGradientAttr func(optionalAttr)
16492
16493// FakeQuantWithMinMaxVarsGradientNumBits sets the optional num_bits attribute to value.
16494//
16495// value: The bitwidth of the quantization; between 2 and 8, inclusive.
16496// If not specified, defaults to 8
16497func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVarsGradientAttr {
16498	return func(m optionalAttr) {
16499		m["num_bits"] = value
16500	}
16501}
16502
16503// FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
16504//
16505// value: Whether to quantize into 2^num_bits - 1 distinct values.
16506// If not specified, defaults to false
16507func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr {
16508	return func(m optionalAttr) {
16509		m["narrow_range"] = value
16510	}
16511}
16512
16513// Compute gradients for a FakeQuantWithMinMaxVars operation.
16514//
16515// Arguments:
16516//
16517//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
16518//	inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation.
16519//
16520// min, max: Quantization interval, scalar floats.
16521//
16522// Returns:
16523//
16524//	backprops_wrt_input: Backpropagated gradients w.r.t. inputs:
16525//
16526// `gradients * (inputs >= min && inputs <= max)`.
16527//
16528//	backprop_wrt_min: Backpropagated gradients w.r.t. min parameter:
16529//
16530// `sum(gradients * (inputs < min))`.
16531//
16532//	backprop_wrt_max: Backpropagated gradients w.r.t. max parameter:
16533//
16534// `sum(gradients * (inputs > max))`.
16535func FakeQuantWithMinMaxVarsGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
16536	if scope.Err() != nil {
16537		return
16538	}
16539	attrs := map[string]interface{}{}
16540	for _, a := range optional {
16541		a(attrs)
16542	}
16543	opspec := tf.OpSpec{
16544		Type: "FakeQuantWithMinMaxVarsGradient",
16545		Input: []tf.Input{
16546			gradients, inputs, min, max,
16547		},
16548		Attrs: attrs,
16549	}
16550	op := scope.AddOperation(opspec)
16551	return op.Output(0), op.Output(1), op.Output(2)
16552}
16553
16554// FakeQuantWithMinMaxVarsPerChannelAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannel.
16555type FakeQuantWithMinMaxVarsPerChannelAttr func(optionalAttr)
16556
16557// FakeQuantWithMinMaxVarsPerChannelNumBits sets the optional num_bits attribute to value.
16558// If not specified, defaults to 8
16559func FakeQuantWithMinMaxVarsPerChannelNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelAttr {
16560	return func(m optionalAttr) {
16561		m["num_bits"] = value
16562	}
16563}
16564
16565// FakeQuantWithMinMaxVarsPerChannelNarrowRange sets the optional narrow_range attribute to value.
16566// If not specified, defaults to false
16567func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelAttr {
16568	return func(m optionalAttr) {
16569		m["narrow_range"] = value
16570	}
16571}
16572
16573// Fake-quantize the 'inputs' tensor of type float via per-channel floats
16574//
16575// Fake-quantize the `inputs` tensor of type float per-channel and one of the
16576// shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max`
16577// of shape `[d]` to `outputs` tensor of same shape as `inputs`.
16578//
16579// # Attributes
16580//
16581// *   `[min; max]` define the clamping range for the `inputs` data.
16582// *   `inputs` values are quantized into the quantization range (
16583// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
16584// when it is true) and then de-quantized and output as floats in `[min; max]`
16585// interval.
16586// *   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
16587//
16588// Before quantization, `min` and `max` values are adjusted with the following
16589// logic.
16590// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
16591// the behavior can be unexpected:
16592//
16593// *   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
16594// *   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
16595// *   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
16596// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
16597//
16598// This operation has a gradient and thus allows for training `min` and `max`
16599// values.
16600func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelAttr) (outputs tf.Output) {
16601	if scope.Err() != nil {
16602		return
16603	}
16604	attrs := map[string]interface{}{}
16605	for _, a := range optional {
16606		a(attrs)
16607	}
16608	opspec := tf.OpSpec{
16609		Type: "FakeQuantWithMinMaxVarsPerChannel",
16610		Input: []tf.Input{
16611			inputs, min, max,
16612		},
16613		Attrs: attrs,
16614	}
16615	op := scope.AddOperation(opspec)
16616	return op.Output(0)
16617}
16618
16619// FakeQuantWithMinMaxVarsPerChannelGradientAttr is an optional argument to FakeQuantWithMinMaxVarsPerChannelGradient.
16620type FakeQuantWithMinMaxVarsPerChannelGradientAttr func(optionalAttr)
16621
16622// FakeQuantWithMinMaxVarsPerChannelGradientNumBits sets the optional num_bits attribute to value.
16623//
16624// value: The bitwidth of the quantization; between 2 and 16, inclusive.
16625// If not specified, defaults to 8
16626func FakeQuantWithMinMaxVarsPerChannelGradientNumBits(value int64) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
16627	return func(m optionalAttr) {
16628		m["num_bits"] = value
16629	}
16630}
16631
16632// FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange sets the optional narrow_range attribute to value.
16633//
16634// value: Whether to quantize into 2^num_bits - 1 distinct values.
16635// If not specified, defaults to false
16636func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
16637	return func(m optionalAttr) {
16638		m["narrow_range"] = value
16639	}
16640}
16641
16642// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
16643//
16644// Arguments:
16645//
16646//	gradients: Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
16647//
16648// shape one of: `[d]`, `[b, d]`,  `[b, h, w, d]`.
16649//
16650//		inputs: Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
16651//	  same as `gradients`.
16652//
16653// min, max: Quantization interval, floats of shape `[d]`.
16654//
16655// Returns:
16656//
16657//	backprops_wrt_input: Backpropagated gradients w.r.t. inputs, shape same as
16658//
16659// `inputs`:
16660//
16661//	  `gradients * (inputs >= min && inputs <= max)`.
16662//		backprop_wrt_min: Backpropagated gradients w.r.t. min parameter, shape `[d]`:
16663//
16664// `sum_per_d(gradients * (inputs < min))`.
16665//
16666//	backprop_wrt_max: Backpropagated gradients w.r.t. max parameter, shape `[d]`:
16667//
16668// `sum_per_d(gradients * (inputs > max))`.
16669func FakeQuantWithMinMaxVarsPerChannelGradient(scope *Scope, gradients tf.Output, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelGradientAttr) (backprops_wrt_input tf.Output, backprop_wrt_min tf.Output, backprop_wrt_max tf.Output) {
16670	if scope.Err() != nil {
16671		return
16672	}
16673	attrs := map[string]interface{}{}
16674	for _, a := range optional {
16675		a(attrs)
16676	}
16677	opspec := tf.OpSpec{
16678		Type: "FakeQuantWithMinMaxVarsPerChannelGradient",
16679		Input: []tf.Input{
16680			gradients, inputs, min, max,
16681		},
16682		Attrs: attrs,
16683	}
16684	op := scope.AddOperation(opspec)
16685	return op.Output(0), op.Output(1), op.Output(2)
16686}
16687
16688// Set configuration of the file system.
16689//
16690// Arguments:
16691//
16692//	scheme: File system scheme.
16693//	key: The name of the configuration option.
16694//	value: The value of the configuration option.
16695//
16696// Returns the created operation.
16697func FileSystemSetConfiguration(scope *Scope, scheme tf.Output, key tf.Output, value tf.Output) (o *tf.Operation) {
16698	if scope.Err() != nil {
16699		return
16700	}
16701	opspec := tf.OpSpec{
16702		Type: "FileSystemSetConfiguration",
16703		Input: []tf.Input{
16704			scheme, key, value,
16705		},
16706	}
16707	return scope.AddOperation(opspec)
16708}
16709
16710// Creates a tensor filled with a scalar value.
16711//
16712// This operation creates a tensor of shape `dims` and fills it with `value`.
16713//
16714// For example:
16715//
16716// ```
16717// # Output tensor has shape [2, 3].
16718// fill([2, 3], 9) ==> [[9, 9, 9]
16719//
16720//	[9, 9, 9]]
16721//
16722// ```
16723//
16724// `tf.fill` differs from `tf.constant` in a few ways:
16725//
16726//   - `tf.fill` only supports scalar contents, whereas `tf.constant` supports
16727//     Tensor values.
16728//   - `tf.fill` creates an Op in the computation graph that constructs the actual
16729//     Tensor value at runtime. This is in contrast to `tf.constant` which embeds
16730//     the entire Tensor into the graph with a `Const` node.
16731//   - Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
16732//     based on other runtime Tensors, unlike `tf.constant`.
16733//
16734// Arguments:
16735//
16736//	dims: 1-D. Represents the shape of the output tensor.
16737//	value: 0-D (scalar). Value to fill the returned tensor.
16738//
16739// @compatibility(numpy)
16740// Equivalent to np.full
16741// @end_compatibility
16742func Fill(scope *Scope, dims tf.Output, value tf.Output) (output tf.Output) {
16743	if scope.Err() != nil {
16744		return
16745	}
16746	opspec := tf.OpSpec{
16747		Type: "Fill",
16748		Input: []tf.Input{
16749			dims, value,
16750		},
16751	}
16752	op := scope.AddOperation(opspec)
16753	return op.Output(0)
16754}
16755
16756// Creates a dataset containing elements of first component of `input_dataset` having true in the last component.
16757func FilterByLastComponentDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (output tf.Output) {
16758	if scope.Err() != nil {
16759		return
16760	}
16761	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
16762	opspec := tf.OpSpec{
16763		Type: "FilterByLastComponentDataset",
16764		Input: []tf.Input{
16765			input_dataset,
16766		},
16767		Attrs: attrs,
16768	}
16769	op := scope.AddOperation(opspec)
16770	return op.Output(0)
16771}
16772
16773// FinalizeDatasetAttr is an optional argument to FinalizeDataset.
16774type FinalizeDatasetAttr func(optionalAttr)
16775
16776// FinalizeDatasetHasCapturedRef sets the optional has_captured_ref attribute to value.
16777// If not specified, defaults to false
16778func FinalizeDatasetHasCapturedRef(value bool) FinalizeDatasetAttr {
16779	return func(m optionalAttr) {
16780		m["has_captured_ref"] = value
16781	}
16782}
16783
16784// Creates a dataset by applying `tf.data.Options` to `input_dataset`.
16785//
16786// Arguments:
16787//
16788//	input_dataset: A variant tensor representing the input dataset.
16789func FinalizeDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...FinalizeDatasetAttr) (handle tf.Output) {
16790	if scope.Err() != nil {
16791		return
16792	}
16793	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
16794	for _, a := range optional {
16795		a(attrs)
16796	}
16797	opspec := tf.OpSpec{
16798		Type: "FinalizeDataset",
16799		Input: []tf.Input{
16800			input_dataset,
16801		},
16802		Attrs: attrs,
16803	}
16804	op := scope.AddOperation(opspec)
16805	return op.Output(0)
16806}
16807
16808// An op that finalizes the TPUEmbedding configuration.
16809//
16810// Arguments:
16811//
16812//	common_config: A string-encoded common configuration proto containing metadata
16813//
16814// about the TPUEmbedding partitioner output and the HBM size (in bytes) required
16815// for operation.
16816//
16817//	memory_config: A string-encoded memory config proto containing metadata about
16818//
16819// the memory allocations reserved for TPUEmbedding.
16820//
16821// Returns the created operation.
16822func FinalizeTPUEmbedding(scope *Scope, common_config tf.Output, memory_config tf.Output) (o *tf.Operation) {
16823	if scope.Err() != nil {
16824		return
16825	}
16826	opspec := tf.OpSpec{
16827		Type: "FinalizeTPUEmbedding",
16828		Input: []tf.Input{
16829			common_config, memory_config,
16830		},
16831	}
16832	return scope.AddOperation(opspec)
16833}
16834
16835// Generates fingerprint values.
16836//
16837// Generates fingerprint values of `data`.
16838//
16839// Fingerprint op considers the first dimension of `data` as the batch dimension,
16840// and `output[i]` contains the fingerprint value generated from contents in
16841// `data[i, ...]` for all `i`.
16842//
16843// Fingerprint op writes fingerprint values as byte arrays. For example, the
16844// default method `farmhash64` generates a 64-bit fingerprint value at a time.
16845// This 8-byte value is written out as an `uint8` array of size 8, in little-endian
16846// order.
16847//
16848// For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4),
16849// and that the fingerprint method is `farmhash64`. In this case, the output shape
16850// is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of
16851// each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in
16852// `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers
16853// in `data[1, :, :]`.
16854//
16855// Note that this op fingerprints the raw underlying buffer, and it does not
16856// fingerprint Tensor's metadata such as data type and/or shape. For example, the
16857// fingerprint values are invariant under reshapes and bitcasts as long as the
16858// batch dimension remain the same:
16859//
16860// ```
16861// Fingerprint(data) == Fingerprint(Reshape(data, ...))
16862// Fingerprint(data) == Fingerprint(Bitcast(data, ...))
16863// ```
16864//
16865// For string data, one should expect `Fingerprint(data) !=
16866// Fingerprint(ReduceJoin(data))` in general.
16867//
16868// Arguments:
16869//
16870//	data: Must have rank 1 or higher.
16871//	method: Fingerprint method used by this op. Currently available method is
16872//
16873// `farmhash::fingerprint64`.
16874//
16875// Returns A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
16876// `data`'s first dimension, and the second dimension size depends on the
16877// fingerprint algorithm.
16878func Fingerprint(scope *Scope, data tf.Output, method tf.Output) (fingerprint tf.Output) {
16879	if scope.Err() != nil {
16880		return
16881	}
16882	opspec := tf.OpSpec{
16883		Type: "Fingerprint",
16884		Input: []tf.Input{
16885			data, method,
16886		},
16887	}
16888	op := scope.AddOperation(opspec)
16889	return op.Output(0)
16890}
16891
16892// FixedLengthRecordDatasetAttr is an optional argument to FixedLengthRecordDataset.
16893type FixedLengthRecordDatasetAttr func(optionalAttr)
16894
16895// FixedLengthRecordDatasetMetadata sets the optional metadata attribute to value.
16896// If not specified, defaults to ""
16897func FixedLengthRecordDatasetMetadata(value string) FixedLengthRecordDatasetAttr {
16898	return func(m optionalAttr) {
16899		m["metadata"] = value
16900	}
16901}
16902
16903// Creates a dataset that emits the records from one or more binary files.
16904//
16905// Arguments:
16906//
16907//	filenames: A scalar or a vector containing the name(s) of the file(s) to be
16908//
16909// read.
16910//
16911//	header_bytes: A scalar representing the number of bytes to skip at the
16912//
16913// beginning of a file.
16914//
16915//	record_bytes: A scalar representing the number of bytes in each record.
16916//	footer_bytes: A scalar representing the number of bytes to skip at the end
16917//
16918// of a file.
16919//
16920//	buffer_size: A scalar representing the number of bytes to buffer. Must be > 0.
16921func FixedLengthRecordDataset(scope *Scope, filenames tf.Output, header_bytes tf.Output, record_bytes tf.Output, footer_bytes tf.Output, buffer_size tf.Output, optional ...FixedLengthRecordDatasetAttr) (handle tf.Output) {
16922	if scope.Err() != nil {
16923		return
16924	}
16925	attrs := map[string]interface{}{}
16926	for _, a := range optional {
16927		a(attrs)
16928	}
16929	opspec := tf.OpSpec{
16930		Type: "FixedLengthRecordDataset",
16931		Input: []tf.Input{
16932			filenames, header_bytes, record_bytes, footer_bytes, buffer_size,
16933		},
16934		Attrs: attrs,
16935	}
16936	op := scope.AddOperation(opspec)
16937	return op.Output(0)
16938}
16939
16940// FixedLengthRecordReaderV2Attr is an optional argument to FixedLengthRecordReaderV2.
16941type FixedLengthRecordReaderV2Attr func(optionalAttr)
16942
16943// FixedLengthRecordReaderV2HeaderBytes sets the optional header_bytes attribute to value.
16944//
16945// value: Number of bytes in the header, defaults to 0.
16946// If not specified, defaults to 0
16947func FixedLengthRecordReaderV2HeaderBytes(value int64) FixedLengthRecordReaderV2Attr {
16948	return func(m optionalAttr) {
16949		m["header_bytes"] = value
16950	}
16951}
16952
16953// FixedLengthRecordReaderV2FooterBytes sets the optional footer_bytes attribute to value.
16954//
16955// value: Number of bytes in the footer, defaults to 0.
16956// If not specified, defaults to 0
16957func FixedLengthRecordReaderV2FooterBytes(value int64) FixedLengthRecordReaderV2Attr {
16958	return func(m optionalAttr) {
16959		m["footer_bytes"] = value
16960	}
16961}
16962
16963// FixedLengthRecordReaderV2HopBytes sets the optional hop_bytes attribute to value.
16964//
16965// value: Number of bytes to hop before each read. Default of 0 means using
16966// record_bytes.
16967// If not specified, defaults to 0
16968func FixedLengthRecordReaderV2HopBytes(value int64) FixedLengthRecordReaderV2Attr {
16969	return func(m optionalAttr) {
16970		m["hop_bytes"] = value
16971	}
16972}
16973
16974// FixedLengthRecordReaderV2Container sets the optional container attribute to value.
16975//
16976// value: If non-empty, this reader is placed in the given container.
16977// Otherwise, a default container is used.
16978// If not specified, defaults to ""
16979func FixedLengthRecordReaderV2Container(value string) FixedLengthRecordReaderV2Attr {
16980	return func(m optionalAttr) {
16981		m["container"] = value
16982	}
16983}
16984
16985// FixedLengthRecordReaderV2SharedName sets the optional shared_name attribute to value.
16986//
16987// value: If non-empty, this reader is named in the given bucket
16988// with this shared_name. Otherwise, the node name is used instead.
16989// If not specified, defaults to ""
16990func FixedLengthRecordReaderV2SharedName(value string) FixedLengthRecordReaderV2Attr {
16991	return func(m optionalAttr) {
16992		m["shared_name"] = value
16993	}
16994}
16995
16996// FixedLengthRecordReaderV2Encoding sets the optional encoding attribute to value.
16997//
16998// value: The type of encoding for the file. Currently ZLIB and GZIP
16999// are supported. Defaults to none.
17000// If not specified, defaults to ""
17001func FixedLengthRecordReaderV2Encoding(value string) FixedLengthRecordReaderV2Attr {
17002	return func(m optionalAttr) {
17003		m["encoding"] = value
17004	}
17005}
17006
17007// A Reader that outputs fixed-length records from a file.
17008//
17009// Arguments:
17010//
17011//	record_bytes: Number of bytes in the record.
17012//
17013// Returns The handle to reference the Reader.
17014func FixedLengthRecordReaderV2(scope *Scope, record_bytes int64, optional ...FixedLengthRecordReaderV2Attr) (reader_handle tf.Output) {
17015	if scope.Err() != nil {
17016		return
17017	}
17018	attrs := map[string]interface{}{"record_bytes": record_bytes}
17019	for _, a := range optional {
17020		a(attrs)
17021	}
17022	opspec := tf.OpSpec{
17023		Type: "FixedLengthRecordReaderV2",
17024
17025		Attrs: attrs,
17026	}
17027	op := scope.AddOperation(opspec)
17028	return op.Output(0)
17029}
17030
17031// FixedUnigramCandidateSamplerAttr is an optional argument to FixedUnigramCandidateSampler.
17032type FixedUnigramCandidateSamplerAttr func(optionalAttr)
17033
17034// FixedUnigramCandidateSamplerVocabFile sets the optional vocab_file attribute to value.
17035//
17036// value: Each valid line in this file (which should have a CSV-like format)
17037// corresponds to a valid word ID. IDs are in sequential order, starting from
17038// num_reserved_ids. The last entry in each line is expected to be a value
17039// corresponding to the count or relative probability. Exactly one of vocab_file
17040// and unigrams needs to be passed to this op.
17041// If not specified, defaults to ""
17042func FixedUnigramCandidateSamplerVocabFile(value string) FixedUnigramCandidateSamplerAttr {
17043	return func(m optionalAttr) {
17044		m["vocab_file"] = value
17045	}
17046}
17047
17048// FixedUnigramCandidateSamplerDistortion sets the optional distortion attribute to value.
17049//
17050// value: The distortion is used to skew the unigram probability distribution.
17051// Each weight is first raised to the distortion's power before adding to the
17052// internal unigram distribution. As a result, distortion = 1.0 gives regular
17053// unigram sampling (as defined by the vocab file), and distortion = 0.0 gives
17054// a uniform distribution.
17055// If not specified, defaults to 1
17056func FixedUnigramCandidateSamplerDistortion(value float32) FixedUnigramCandidateSamplerAttr {
17057	return func(m optionalAttr) {
17058		m["distortion"] = value
17059	}
17060}
17061
17062// FixedUnigramCandidateSamplerNumReservedIds sets the optional num_reserved_ids attribute to value.
17063//
17064// value: Optionally some reserved IDs can be added in the range [0,
17065// ..., num_reserved_ids) by the users. One use case is that a special unknown
17066// word token is used as ID 0. These IDs will have a sampling probability of 0.
17067// If not specified, defaults to 0
17068func FixedUnigramCandidateSamplerNumReservedIds(value int64) FixedUnigramCandidateSamplerAttr {
17069	return func(m optionalAttr) {
17070		m["num_reserved_ids"] = value
17071	}
17072}
17073
17074// FixedUnigramCandidateSamplerNumShards sets the optional num_shards attribute to value.
17075//
17076// value: A sampler can be used to sample from a subset of the original range
17077// in order to speed up the whole computation through parallelism. This parameter
17078// (together with 'shard') indicates the number of partitions that are being
17079// used in the overall computation.
17080// If not specified, defaults to 1
17081//
17082// REQUIRES: value >= 1
17083func FixedUnigramCandidateSamplerNumShards(value int64) FixedUnigramCandidateSamplerAttr {
17084	return func(m optionalAttr) {
17085		m["num_shards"] = value
17086	}
17087}
17088
17089// FixedUnigramCandidateSamplerShard sets the optional shard attribute to value.
17090//
17091// value: A sampler can be used to sample from a subset of the original range
17092// in order to speed up the whole computation through parallelism. This parameter
17093// (together with 'num_shards') indicates the particular partition number of a
17094// sampler op, when partitioning is being used.
17095// If not specified, defaults to 0
17096//
17097// REQUIRES: value >= 0
17098func FixedUnigramCandidateSamplerShard(value int64) FixedUnigramCandidateSamplerAttr {
17099	return func(m optionalAttr) {
17100		m["shard"] = value
17101	}
17102}
17103
17104// FixedUnigramCandidateSamplerUnigrams sets the optional unigrams attribute to value.
17105//
17106// value: A list of unigram counts or probabilities, one per ID in sequential
17107// order. Exactly one of vocab_file and unigrams should be passed to this op.
17108// If not specified, defaults to {}
17109func FixedUnigramCandidateSamplerUnigrams(value []float32) FixedUnigramCandidateSamplerAttr {
17110	return func(m optionalAttr) {
17111		m["unigrams"] = value
17112	}
17113}
17114
17115// FixedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
17116//
17117// value: If either seed or seed2 are set to be non-zero, the random number
17118// generator is seeded by the given seed.  Otherwise, it is seeded by a
17119// random seed.
17120// If not specified, defaults to 0
17121func FixedUnigramCandidateSamplerSeed(value int64) FixedUnigramCandidateSamplerAttr {
17122	return func(m optionalAttr) {
17123		m["seed"] = value
17124	}
17125}
17126
17127// FixedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
17128//
17129// value: An second seed to avoid seed collision.
17130// If not specified, defaults to 0
17131func FixedUnigramCandidateSamplerSeed2(value int64) FixedUnigramCandidateSamplerAttr {
17132	return func(m optionalAttr) {
17133		m["seed2"] = value
17134	}
17135}
17136
17137// Generates labels for candidate sampling with a learned unigram distribution.
17138//
17139// A unigram sampler could use a fixed unigram distribution read from a
17140// file or passed in as an in-memory array instead of building up the distribution
17141// from data on the fly. There is also an option to skew the distribution by
17142// applying a distortion power to the weights.
17143//
17144// The vocabulary file should be in CSV-like format, with the last field
17145// being the weight associated with the word.
17146//
17147// For each batch, this op picks a single set of sampled candidate labels.
17148//
17149// The advantages of sampling candidates per-batch are simplicity and the
17150// possibility of efficient dense matrix multiplication. The disadvantage is that
17151// the sampled candidates must be chosen independently of the context and of the
17152// true labels.
17153//
17154// Arguments:
17155//
17156//	true_classes: A batch_size * num_true matrix, in which each row contains the
17157//
17158// IDs of the num_true target_classes in the corresponding original label.
17159//
17160//	num_true: Number of true labels per context.
17161//	num_sampled: Number of candidates to randomly sample.
17162//	unique: If unique is true, we sample with rejection, so that all sampled
17163//
17164// candidates in a batch are unique. This requires some approximation to
17165// estimate the post-rejection sampling probabilities.
17166//
17167//	range_max: The sampler will sample integers from the interval [0, range_max).
17168//
17169// Returns:
17170//
17171//	sampled_candidates: A vector of length num_sampled, in which each element is
17172//
17173// the ID of a sampled candidate.
17174//
17175//	true_expected_count: A batch_size * num_true matrix, representing
17176//
17177// the number of times each candidate is expected to occur in a batch
17178// of sampled candidates. If unique=true, then this is a probability.
17179//
17180//	sampled_expected_count: A vector of length num_sampled, for each sampled
17181//
17182// candidate representing the number of times the candidate is expected
17183// to occur in a batch of sampled candidates.  If unique=true, then this is a
17184// probability.
17185func FixedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...FixedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
17186	if scope.Err() != nil {
17187		return
17188	}
17189	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
17190	for _, a := range optional {
17191		a(attrs)
17192	}
17193	opspec := tf.OpSpec{
17194		Type: "FixedUnigramCandidateSampler",
17195		Input: []tf.Input{
17196			true_classes,
17197		},
17198		Attrs: attrs,
17199	}
17200	op := scope.AddOperation(opspec)
17201	return op.Output(0), op.Output(1), op.Output(2)
17202}
17203
17204// Returns element-wise largest integer not greater than x.
17205func Floor(scope *Scope, x tf.Output) (y tf.Output) {
17206	if scope.Err() != nil {
17207		return
17208	}
17209	opspec := tf.OpSpec{
17210		Type: "Floor",
17211		Input: []tf.Input{
17212			x,
17213		},
17214	}
17215	op := scope.AddOperation(opspec)
17216	return op.Output(0)
17217}
17218
17219// Returns x // y element-wise.
17220//
17221// *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
17222// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
17223func FloorDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
17224	if scope.Err() != nil {
17225		return
17226	}
17227	opspec := tf.OpSpec{
17228		Type: "FloorDiv",
17229		Input: []tf.Input{
17230			x, y,
17231		},
17232	}
17233	op := scope.AddOperation(opspec)
17234	return op.Output(0)
17235}
17236
17237// Returns element-wise remainder of division.
17238//
17239// This follows Python semantics in that the
17240// result here is consistent with a flooring divide. E.g.
17241// `floor(x / y) * y + floormod(x, y) = x`, regardless of the signs of x and y.
17242//
17243// *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
17244// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
17245func FloorMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
17246	if scope.Err() != nil {
17247		return
17248	}
17249	opspec := tf.OpSpec{
17250		Type: "FloorMod",
17251		Input: []tf.Input{
17252			x, y,
17253		},
17254	}
17255	op := scope.AddOperation(opspec)
17256	return op.Output(0)
17257}
17258
17259// FractionalAvgPoolAttr is an optional argument to FractionalAvgPool.
17260type FractionalAvgPoolAttr func(optionalAttr)
17261
17262// FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value.
17263//
17264// value: When set to True, generates the pooling sequence in a
17265// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
17266// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
17267// difference between pseudorandom and random.
17268// If not specified, defaults to false
17269func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr {
17270	return func(m optionalAttr) {
17271		m["pseudo_random"] = value
17272	}
17273}
17274
17275// FractionalAvgPoolOverlapping sets the optional overlapping attribute to value.
17276//
17277// value: When set to True, it means when pooling, the values at the boundary
17278// of adjacent pooling cells are used by both cells. For example:
17279//
17280// `index  0  1  2  3  4`
17281//
17282// `value  20 5  16 3  7`
17283//
17284// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
17285// The result would be [41/3, 26/3] for fractional avg pooling.
17286// If not specified, defaults to false
17287func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr {
17288	return func(m optionalAttr) {
17289		m["overlapping"] = value
17290	}
17291}
17292
17293// FractionalAvgPoolDeterministic sets the optional deterministic attribute to value.
17294//
17295// value: When set to True, a fixed pooling region will be used when
17296// iterating over a FractionalAvgPool node in the computation graph. Mainly used
17297// in unit test to make FractionalAvgPool deterministic.
17298// If not specified, defaults to false
17299func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr {
17300	return func(m optionalAttr) {
17301		m["deterministic"] = value
17302	}
17303}
17304
17305// FractionalAvgPoolSeed sets the optional seed attribute to value.
17306//
17307// value: If either seed or seed2 are set to be non-zero, the random number
17308// generator is seeded by the given seed.  Otherwise, it is seeded by a
17309// random seed.
17310// If not specified, defaults to 0
17311func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr {
17312	return func(m optionalAttr) {
17313		m["seed"] = value
17314	}
17315}
17316
17317// FractionalAvgPoolSeed2 sets the optional seed2 attribute to value.
17318//
17319// value: An second seed to avoid seed collision.
17320// If not specified, defaults to 0
17321func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr {
17322	return func(m optionalAttr) {
17323		m["seed2"] = value
17324	}
17325}
17326
17327// Performs fractional average pooling on the input.
17328//
17329// Fractional average pooling is similar to Fractional max pooling in the pooling
17330// region generation step. The only difference is that after pooling regions are
17331// generated, a mean operation is performed instead of a max operation in each
17332// pooling region.
17333//
17334// Arguments:
17335//
17336//	value: 4-D with shape `[batch, height, width, channels]`.
17337//	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
17338//
17339// supports row and col dimension and should be >= 1.0. For example, a valid
17340// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
17341// must be 1.0 because we don't allow pooling on batch and channels
17342// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
17343// respectively.
17344//
17345// Returns:
17346//
17347//	output: output tensor after fractional avg pooling.
17348//	row_pooling_sequence: row pooling sequence, needed to calculate gradient.
17349//	col_pooling_sequence: column pooling sequence, needed to calculate gradient.
17350func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
17351	if scope.Err() != nil {
17352		return
17353	}
17354	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
17355	for _, a := range optional {
17356		a(attrs)
17357	}
17358	opspec := tf.OpSpec{
17359		Type: "FractionalAvgPool",
17360		Input: []tf.Input{
17361			value,
17362		},
17363		Attrs: attrs,
17364	}
17365	op := scope.AddOperation(opspec)
17366	return op.Output(0), op.Output(1), op.Output(2)
17367}
17368
17369// FractionalAvgPoolGradAttr is an optional argument to FractionalAvgPoolGrad.
17370type FractionalAvgPoolGradAttr func(optionalAttr)
17371
17372// FractionalAvgPoolGradOverlapping sets the optional overlapping attribute to value.
17373//
17374// value: When set to True, it means when pooling, the values at the boundary
17375// of adjacent pooling cells are used by both cells. For example:
17376//
17377// `index  0  1  2  3  4`
17378//
17379// `value  20 5  16 3  7`
17380//
17381// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
17382// The result would be [41/3, 26/3] for fractional avg pooling.
17383// If not specified, defaults to false
17384func FractionalAvgPoolGradOverlapping(value bool) FractionalAvgPoolGradAttr {
17385	return func(m optionalAttr) {
17386		m["overlapping"] = value
17387	}
17388}
17389
17390// Computes gradient of the FractionalAvgPool function.
17391//
17392// Unlike FractionalMaxPoolGrad, we don't need to find arg_max for
17393// FractionalAvgPoolGrad, we just need to evenly back-propagate each element of
17394// out_backprop to those indices that form the same pooling cell. Therefore, we
17395// just need to know the shape of original input tensor, instead of the whole
17396// tensor.
17397//
17398// Arguments:
17399//
17400//	orig_input_tensor_shape: Original input tensor shape for `fractional_avg_pool`
17401//	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
17402//
17403// w.r.t. the output of `fractional_avg_pool`.
17404//
17405//	row_pooling_sequence: row pooling sequence, form pooling region with
17406//
17407// col_pooling_sequence.
17408//
17409//	col_pooling_sequence: column pooling sequence, form pooling region with
17410//
17411// row_pooling sequence.
17412//
17413// Returns 4-D.  Gradients w.r.t. the input of `fractional_avg_pool`.
17414func FractionalAvgPoolGrad(scope *Scope, orig_input_tensor_shape tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalAvgPoolGradAttr) (output tf.Output) {
17415	if scope.Err() != nil {
17416		return
17417	}
17418	attrs := map[string]interface{}{}
17419	for _, a := range optional {
17420		a(attrs)
17421	}
17422	opspec := tf.OpSpec{
17423		Type: "FractionalAvgPoolGrad",
17424		Input: []tf.Input{
17425			orig_input_tensor_shape, out_backprop, row_pooling_sequence, col_pooling_sequence,
17426		},
17427		Attrs: attrs,
17428	}
17429	op := scope.AddOperation(opspec)
17430	return op.Output(0)
17431}
17432
17433// FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
17434type FractionalMaxPoolAttr func(optionalAttr)
17435
17436// FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
17437//
17438// value: When set to True, generates the pooling sequence in a
17439// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
17440// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
17441// difference between pseudorandom and random.
17442// If not specified, defaults to false
17443func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr {
17444	return func(m optionalAttr) {
17445		m["pseudo_random"] = value
17446	}
17447}
17448
17449// FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
17450//
17451// value: When set to True, it means when pooling, the values at the boundary
17452// of adjacent pooling cells are used by both cells. For example:
17453//
17454// `index  0  1  2  3  4`
17455//
17456// `value  20 5  16 3  7`
17457//
17458// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
17459// The result would be [20, 16] for fractional max pooling.
17460// If not specified, defaults to false
17461func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr {
17462	return func(m optionalAttr) {
17463		m["overlapping"] = value
17464	}
17465}
17466
17467// FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
17468//
17469// value: When set to True, a fixed pooling region will be used when
17470// iterating over a FractionalMaxPool node in the computation graph. Mainly used
17471// in unit test to make FractionalMaxPool deterministic.
17472// If not specified, defaults to false
17473func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr {
17474	return func(m optionalAttr) {
17475		m["deterministic"] = value
17476	}
17477}
17478
17479// FractionalMaxPoolSeed sets the optional seed attribute to value.
17480//
17481// value: If either seed or seed2 are set to be non-zero, the random number
17482// generator is seeded by the given seed.  Otherwise, it is seeded by a
17483// random seed.
17484// If not specified, defaults to 0
17485func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr {
17486	return func(m optionalAttr) {
17487		m["seed"] = value
17488	}
17489}
17490
17491// FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
17492//
17493// value: An second seed to avoid seed collision.
17494// If not specified, defaults to 0
17495func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr {
17496	return func(m optionalAttr) {
17497		m["seed2"] = value
17498	}
17499}
17500
17501// Performs fractional max pooling on the input.
17502//
17503// Fractional max pooling is slightly different than regular max pooling.  In
17504// regular max pooling, you downsize an input set by taking the maximum value of
17505// smaller N x N subsections of the set (often 2x2), and try to reduce the set by
17506// a factor of N, where N is an integer.  Fractional max pooling, as you might
17507// expect from the word "fractional", means that the overall reduction ratio N
17508// does not have to be an integer.
17509//
17510// The sizes of the pooling regions are generated randomly but are fairly uniform.
17511// For example, let's look at the height dimension, and the constraints on the
17512// list of rows that will be pool boundaries.
17513//
17514// First we define the following:
17515//
17516// 1.  input_row_length : the number of rows from the input set
17517// 2.  output_row_length : which will be smaller than the input
17518// 3.  alpha = input_row_length / output_row_length : our reduction ratio
17519// 4.  K = floor(alpha)
17520// 5.  row_pooling_sequence : this is the result list of pool boundary rows
17521//
17522// Then, row_pooling_sequence should satisfy:
17523//
17524// 1.  a[0] = 0 : the first value of the sequence is 0
17525// 2.  a[end] = input_row_length : the last value of the sequence is the size
17526// 3.  K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
17527// 4.  length(row_pooling_sequence) = output_row_length+1
17528//
17529// For more details on fractional max pooling, see this paper:
17530// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
17531//
17532// Arguments:
17533//
17534//	value: 4-D with shape `[batch, height, width, channels]`.
17535//	pooling_ratio: Pooling ratio for each dimension of `value`, currently only
17536//
17537// supports row and col dimension and should be >= 1.0. For example, a valid
17538// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
17539// must be 1.0 because we don't allow pooling on batch and channels
17540// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
17541// respectively.
17542//
17543// Returns:
17544//
17545//	output: output tensor after fractional max pooling.
17546//	row_pooling_sequence: row pooling sequence, needed to calculate gradient.
17547//	col_pooling_sequence: column pooling sequence, needed to calculate gradient.
17548func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
17549	if scope.Err() != nil {
17550		return
17551	}
17552	attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
17553	for _, a := range optional {
17554		a(attrs)
17555	}
17556	opspec := tf.OpSpec{
17557		Type: "FractionalMaxPool",
17558		Input: []tf.Input{
17559			value,
17560		},
17561		Attrs: attrs,
17562	}
17563	op := scope.AddOperation(opspec)
17564	return op.Output(0), op.Output(1), op.Output(2)
17565}
17566
17567// FractionalMaxPoolGradAttr is an optional argument to FractionalMaxPoolGrad.
17568type FractionalMaxPoolGradAttr func(optionalAttr)
17569
17570// FractionalMaxPoolGradOverlapping sets the optional overlapping attribute to value.
17571//
17572// value: When set to True, it means when pooling, the values at the boundary
17573// of adjacent pooling cells are used by both cells. For example:
17574//
17575// `index  0  1  2  3  4`
17576//
17577// `value  20 5  16 3  7`
17578//
17579// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
17580// The result would be [20, 16] for fractional max pooling.
17581// If not specified, defaults to false
17582func FractionalMaxPoolGradOverlapping(value bool) FractionalMaxPoolGradAttr {
17583	return func(m optionalAttr) {
17584		m["overlapping"] = value
17585	}
17586}
17587
17588// Computes gradient of the FractionalMaxPool function.
17589//
17590// Arguments:
17591//
17592//	orig_input: Original input for `fractional_max_pool`
17593//	orig_output: Original output for `fractional_max_pool`
17594//	out_backprop: 4-D with shape `[batch, height, width, channels]`.  Gradients
17595//
17596// w.r.t. the output of `fractional_max_pool`.
17597//
17598//	row_pooling_sequence: row pooling sequence, form pooling region with
17599//
17600// col_pooling_sequence.
17601//
17602//	col_pooling_sequence: column pooling sequence, form pooling region with
17603//
17604// row_pooling sequence.
17605//
17606// Returns 4-D.  Gradients w.r.t. the input of `fractional_max_pool`.
17607func FractionalMaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, out_backprop tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output, optional ...FractionalMaxPoolGradAttr) (output tf.Output) {
17608	if scope.Err() != nil {
17609		return
17610	}
17611	attrs := map[string]interface{}{}
17612	for _, a := range optional {
17613		a(attrs)
17614	}
17615	opspec := tf.OpSpec{
17616		Type: "FractionalMaxPoolGrad",
17617		Input: []tf.Input{
17618			orig_input, orig_output, out_backprop, row_pooling_sequence, col_pooling_sequence,
17619		},
17620		Attrs: attrs,
17621	}
17622	op := scope.AddOperation(opspec)
17623	return op.Output(0)
17624}
17625
17626// FusedBatchNormAttr is an optional argument to FusedBatchNorm.
17627type FusedBatchNormAttr func(optionalAttr)
17628
17629// FusedBatchNormEpsilon sets the optional epsilon attribute to value.
17630//
17631// value: A small float number added to the variance of x.
17632// If not specified, defaults to 0.0001
17633func FusedBatchNormEpsilon(value float32) FusedBatchNormAttr {
17634	return func(m optionalAttr) {
17635		m["epsilon"] = value
17636	}
17637}
17638
17639// FusedBatchNormExponentialAvgFactor sets the optional exponential_avg_factor attribute to value.
17640// If not specified, defaults to 1
17641func FusedBatchNormExponentialAvgFactor(value float32) FusedBatchNormAttr {
17642	return func(m optionalAttr) {
17643		m["exponential_avg_factor"] = value
17644	}
17645}
17646
17647// FusedBatchNormDataFormat sets the optional data_format attribute to value.
17648//
17649// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
17650// If not specified, defaults to "NHWC"
17651func FusedBatchNormDataFormat(value string) FusedBatchNormAttr {
17652	return func(m optionalAttr) {
17653		m["data_format"] = value
17654	}
17655}
17656
17657// FusedBatchNormIsTraining sets the optional is_training attribute to value.
17658//
17659// value: A bool value to indicate the operation is for training (default)
17660// or inference.
17661// If not specified, defaults to true
17662func FusedBatchNormIsTraining(value bool) FusedBatchNormAttr {
17663	return func(m optionalAttr) {
17664		m["is_training"] = value
17665	}
17666}
17667
17668// Batch normalization.
17669//
17670// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
17671// The size of 1D Tensors matches the dimension C of the 4D Tensors.
17672//
17673// Arguments:
17674//
17675//	x: A 4D Tensor for input data.
17676//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
17677//	offset: A 1D Tensor for offset, to shift to the normalized x.
17678//	mean: A 1D Tensor for population mean. Used for inference only;
17679//
17680// must be empty for training.
17681//
17682//	variance: A 1D Tensor for population variance. Used for inference only;
17683//
17684// must be empty for training.
17685//
17686// Returns:
17687//
17688//	y: A 4D Tensor for output data.
17689//	batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
17690//
17691// to compute the running mean.
17692//
17693//	batch_variance: A 1D Tensor for the computed batch variance, to be used by
17694//
17695// TensorFlow to compute the running variance.
17696//
17697//	reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
17698//
17699// in the gradient computation.
17700//
17701//	reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
17702//
17703// in the cuDNN case), to be reused in the gradient computation.
17704func FusedBatchNorm(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormAttr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
17705	if scope.Err() != nil {
17706		return
17707	}
17708	attrs := map[string]interface{}{}
17709	for _, a := range optional {
17710		a(attrs)
17711	}
17712	opspec := tf.OpSpec{
17713		Type: "FusedBatchNorm",
17714		Input: []tf.Input{
17715			x, scale, offset, mean, variance,
17716		},
17717		Attrs: attrs,
17718	}
17719	op := scope.AddOperation(opspec)
17720	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
17721}
17722
17723// FusedBatchNormGradAttr is an optional argument to FusedBatchNormGrad.
17724type FusedBatchNormGradAttr func(optionalAttr)
17725
17726// FusedBatchNormGradEpsilon sets the optional epsilon attribute to value.
17727//
17728// value: A small float number added to the variance of x.
17729// If not specified, defaults to 0.0001
17730func FusedBatchNormGradEpsilon(value float32) FusedBatchNormGradAttr {
17731	return func(m optionalAttr) {
17732		m["epsilon"] = value
17733	}
17734}
17735
17736// FusedBatchNormGradDataFormat sets the optional data_format attribute to value.
17737//
17738// value: The data format for y_backprop, x, x_backprop.
17739// Either "NHWC" (default) or "NCHW".
17740// If not specified, defaults to "NHWC"
17741func FusedBatchNormGradDataFormat(value string) FusedBatchNormGradAttr {
17742	return func(m optionalAttr) {
17743		m["data_format"] = value
17744	}
17745}
17746
17747// FusedBatchNormGradIsTraining sets the optional is_training attribute to value.
17748//
17749// value: A bool value to indicate the operation is for training (default)
17750// or inference.
17751// If not specified, defaults to true
17752func FusedBatchNormGradIsTraining(value bool) FusedBatchNormGradAttr {
17753	return func(m optionalAttr) {
17754		m["is_training"] = value
17755	}
17756}
17757
17758// Gradient for batch normalization.
17759//
17760// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
17761// The size of 1D Tensors matches the dimension C of the 4D Tensors.
17762//
17763// Arguments:
17764//
17765//	y_backprop: A 4D Tensor for the gradient with respect to y.
17766//	x: A 4D Tensor for input data.
17767//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
17768//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
17769//
17770// mean to be reused in gradient computation. When is_training is
17771// False, a 1D Tensor for the population mean to be reused in both
17772// 1st and 2nd order gradient computation.
17773//
17774//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
17775//
17776// variance (inverted variance in the cuDNN case) to be reused in
17777// gradient computation. When is_training is False, a 1D Tensor
17778// for the population variance to be reused in both 1st and 2nd
17779// order gradient computation.
17780//
17781// Returns:
17782//
17783//	x_backprop: A 4D Tensor for the gradient with respect to x.
17784//	scale_backprop: A 1D Tensor for the gradient with respect to scale.
17785//	offset_backprop: A 1D Tensor for the gradient with respect to offset.
17786//	reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
17787//	reserve_space_4: Unused placeholder to match the variance input
17788//
17789// in FusedBatchNorm.
17790func FusedBatchNormGrad(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradAttr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
17791	if scope.Err() != nil {
17792		return
17793	}
17794	attrs := map[string]interface{}{}
17795	for _, a := range optional {
17796		a(attrs)
17797	}
17798	opspec := tf.OpSpec{
17799		Type: "FusedBatchNormGrad",
17800		Input: []tf.Input{
17801			y_backprop, x, scale, reserve_space_1, reserve_space_2,
17802		},
17803		Attrs: attrs,
17804	}
17805	op := scope.AddOperation(opspec)
17806	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
17807}
17808
17809// FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2.
17810type FusedBatchNormGradV2Attr func(optionalAttr)
17811
17812// FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value.
17813//
17814// value: A small float number added to the variance of x.
17815// If not specified, defaults to 0.0001
17816func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr {
17817	return func(m optionalAttr) {
17818		m["epsilon"] = value
17819	}
17820}
17821
17822// FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value.
17823//
17824// value: The data format for y_backprop, x, x_backprop.
17825// Either "NHWC" (default) or "NCHW".
17826// If not specified, defaults to "NHWC"
17827func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr {
17828	return func(m optionalAttr) {
17829		m["data_format"] = value
17830	}
17831}
17832
17833// FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value.
17834//
17835// value: A bool value to indicate the operation is for training (default)
17836// or inference.
17837// If not specified, defaults to true
17838func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr {
17839	return func(m optionalAttr) {
17840		m["is_training"] = value
17841	}
17842}
17843
17844// Gradient for batch normalization.
17845//
17846// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
17847// The size of 1D Tensors matches the dimension C of the 4D Tensors.
17848//
17849// Arguments:
17850//
17851//	y_backprop: A 4D Tensor for the gradient with respect to y.
17852//	x: A 4D Tensor for input data.
17853//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
17854//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
17855//
17856// mean to be reused in gradient computation. When is_training is
17857// False, a 1D Tensor for the population mean to be reused in both
17858// 1st and 2nd order gradient computation.
17859//
17860//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
17861//
17862// variance (inverted variance in the cuDNN case) to be reused in
17863// gradient computation. When is_training is False, a 1D Tensor
17864// for the population variance to be reused in both 1st and 2nd
17865// order gradient computation.
17866//
17867// Returns:
17868//
17869//	x_backprop: A 4D Tensor for the gradient with respect to x.
17870//	scale_backprop: A 1D Tensor for the gradient with respect to scale.
17871//	offset_backprop: A 1D Tensor for the gradient with respect to offset.
17872//	reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm.
17873//	reserve_space_4: Unused placeholder to match the variance input
17874//
17875// in FusedBatchNorm.
17876func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) {
17877	if scope.Err() != nil {
17878		return
17879	}
17880	attrs := map[string]interface{}{}
17881	for _, a := range optional {
17882		a(attrs)
17883	}
17884	opspec := tf.OpSpec{
17885		Type: "FusedBatchNormGradV2",
17886		Input: []tf.Input{
17887			y_backprop, x, scale, reserve_space_1, reserve_space_2,
17888		},
17889		Attrs: attrs,
17890	}
17891	op := scope.AddOperation(opspec)
17892	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
17893}
17894
17895// FusedBatchNormGradV3Attr is an optional argument to FusedBatchNormGradV3.
17896type FusedBatchNormGradV3Attr func(optionalAttr)
17897
17898// FusedBatchNormGradV3Epsilon sets the optional epsilon attribute to value.
17899//
17900// value: A small float number added to the variance of x.
17901// If not specified, defaults to 0.0001
17902func FusedBatchNormGradV3Epsilon(value float32) FusedBatchNormGradV3Attr {
17903	return func(m optionalAttr) {
17904		m["epsilon"] = value
17905	}
17906}
17907
17908// FusedBatchNormGradV3DataFormat sets the optional data_format attribute to value.
17909//
17910// value: The data format for y_backprop, x, x_backprop.
17911// Either "NHWC" (default) or "NCHW".
17912// If not specified, defaults to "NHWC"
17913func FusedBatchNormGradV3DataFormat(value string) FusedBatchNormGradV3Attr {
17914	return func(m optionalAttr) {
17915		m["data_format"] = value
17916	}
17917}
17918
17919// FusedBatchNormGradV3IsTraining sets the optional is_training attribute to value.
17920//
17921// value: A bool value to indicate the operation is for training (default)
17922// or inference.
17923// If not specified, defaults to true
17924func FusedBatchNormGradV3IsTraining(value bool) FusedBatchNormGradV3Attr {
17925	return func(m optionalAttr) {
17926		m["is_training"] = value
17927	}
17928}
17929
17930// Gradient for batch normalization.
17931//
17932// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
17933// The size of 1D Tensors matches the dimension C of the 4D Tensors.
17934//
17935// Arguments:
17936//
17937//	y_backprop: A 4D Tensor for the gradient with respect to y.
17938//	x: A 4D Tensor for input data.
17939//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
17940//	reserve_space_1: When is_training is True, a 1D Tensor for the computed batch
17941//
17942// mean to be reused in gradient computation. When is_training is
17943// False, a 1D Tensor for the population mean to be reused in both
17944// 1st and 2nd order gradient computation.
17945//
17946//	reserve_space_2: When is_training is True, a 1D Tensor for the computed batch
17947//
17948// variance (inverted variance in the cuDNN case) to be reused in
17949// gradient computation. When is_training is False, a 1D Tensor
17950// for the population variance to be reused in both 1st and 2nd
17951// order gradient computation.
17952//
17953//	reserve_space_3: When is_training is True, a 1D Tensor for some intermediate results to be reused
17954//
17955// in gradient computation. When is_training is False, a dummy empty Tensor will be
17956// created.
17957//
17958// Returns:
17959//
17960//	x_backprop: A 4D Tensor for the gradient with respect to x.
17961//	scale_backprop: A 1D Tensor for the gradient with respect to scale.
17962//	offset_backprop: A 1D Tensor for the gradient with respect to offset.
17963//	reserve_space_4: Unused placeholder to match the mean input in FusedBatchNorm.
17964//	reserve_space_5: Unused placeholder to match the variance input
17965//
17966// in FusedBatchNorm.
17967func FusedBatchNormGradV3(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, reserve_space_3 tf.Output, optional ...FusedBatchNormGradV3Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_4 tf.Output, reserve_space_5 tf.Output) {
17968	if scope.Err() != nil {
17969		return
17970	}
17971	attrs := map[string]interface{}{}
17972	for _, a := range optional {
17973		a(attrs)
17974	}
17975	opspec := tf.OpSpec{
17976		Type: "FusedBatchNormGradV3",
17977		Input: []tf.Input{
17978			y_backprop, x, scale, reserve_space_1, reserve_space_2, reserve_space_3,
17979		},
17980		Attrs: attrs,
17981	}
17982	op := scope.AddOperation(opspec)
17983	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
17984}
17985
17986// FusedBatchNormV2Attr is an optional argument to FusedBatchNormV2.
17987type FusedBatchNormV2Attr func(optionalAttr)
17988
17989// FusedBatchNormV2Epsilon sets the optional epsilon attribute to value.
17990//
17991// value: A small float number added to the variance of x.
17992// If not specified, defaults to 0.0001
17993func FusedBatchNormV2Epsilon(value float32) FusedBatchNormV2Attr {
17994	return func(m optionalAttr) {
17995		m["epsilon"] = value
17996	}
17997}
17998
17999// FusedBatchNormV2ExponentialAvgFactor sets the optional exponential_avg_factor attribute to value.
18000// If not specified, defaults to 1
18001func FusedBatchNormV2ExponentialAvgFactor(value float32) FusedBatchNormV2Attr {
18002	return func(m optionalAttr) {
18003		m["exponential_avg_factor"] = value
18004	}
18005}
18006
18007// FusedBatchNormV2DataFormat sets the optional data_format attribute to value.
18008//
18009// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
18010// If not specified, defaults to "NHWC"
18011func FusedBatchNormV2DataFormat(value string) FusedBatchNormV2Attr {
18012	return func(m optionalAttr) {
18013		m["data_format"] = value
18014	}
18015}
18016
18017// FusedBatchNormV2IsTraining sets the optional is_training attribute to value.
18018//
18019// value: A bool value to indicate the operation is for training (default)
18020// or inference.
18021// If not specified, defaults to true
18022func FusedBatchNormV2IsTraining(value bool) FusedBatchNormV2Attr {
18023	return func(m optionalAttr) {
18024		m["is_training"] = value
18025	}
18026}
18027
18028// Batch normalization.
18029//
18030// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
18031// The size of 1D Tensors matches the dimension C of the 4D Tensors.
18032//
18033// Arguments:
18034//
18035//	x: A 4D Tensor for input data.
18036//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
18037//	offset: A 1D Tensor for offset, to shift to the normalized x.
18038//	mean: A 1D Tensor for population mean. Used for inference only;
18039//
18040// must be empty for training.
18041//
18042//	variance: A 1D Tensor for population variance. Used for inference only;
18043//
18044// must be empty for training.
18045//
18046// Returns:
18047//
18048//	y: A 4D Tensor for output data.
18049//	batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
18050//
18051// to compute the running mean.
18052//
18053//	batch_variance: A 1D Tensor for the computed batch variance, to be used by
18054//
18055// TensorFlow to compute the running variance.
18056//
18057//	reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
18058//
18059// in the gradient computation.
18060//
18061//	reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
18062//
18063// in the cuDNN case), to be reused in the gradient computation.
18064func FusedBatchNormV2(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV2Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output) {
18065	if scope.Err() != nil {
18066		return
18067	}
18068	attrs := map[string]interface{}{}
18069	for _, a := range optional {
18070		a(attrs)
18071	}
18072	opspec := tf.OpSpec{
18073		Type: "FusedBatchNormV2",
18074		Input: []tf.Input{
18075			x, scale, offset, mean, variance,
18076		},
18077		Attrs: attrs,
18078	}
18079	op := scope.AddOperation(opspec)
18080	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
18081}
18082
18083// FusedBatchNormV3Attr is an optional argument to FusedBatchNormV3.
18084type FusedBatchNormV3Attr func(optionalAttr)
18085
18086// FusedBatchNormV3Epsilon sets the optional epsilon attribute to value.
18087//
18088// value: A small float number added to the variance of x.
18089// If not specified, defaults to 0.0001
18090func FusedBatchNormV3Epsilon(value float32) FusedBatchNormV3Attr {
18091	return func(m optionalAttr) {
18092		m["epsilon"] = value
18093	}
18094}
18095
18096// FusedBatchNormV3ExponentialAvgFactor sets the optional exponential_avg_factor attribute to value.
18097// If not specified, defaults to 1
18098func FusedBatchNormV3ExponentialAvgFactor(value float32) FusedBatchNormV3Attr {
18099	return func(m optionalAttr) {
18100		m["exponential_avg_factor"] = value
18101	}
18102}
18103
18104// FusedBatchNormV3DataFormat sets the optional data_format attribute to value.
18105//
18106// value: The data format for x and y. Either "NHWC" (default) or "NCHW".
18107// If not specified, defaults to "NHWC"
18108func FusedBatchNormV3DataFormat(value string) FusedBatchNormV3Attr {
18109	return func(m optionalAttr) {
18110		m["data_format"] = value
18111	}
18112}
18113
18114// FusedBatchNormV3IsTraining sets the optional is_training attribute to value.
18115//
18116// value: A bool value to indicate the operation is for training (default)
18117// or inference.
18118// If not specified, defaults to true
18119func FusedBatchNormV3IsTraining(value bool) FusedBatchNormV3Attr {
18120	return func(m optionalAttr) {
18121		m["is_training"] = value
18122	}
18123}
18124
18125// Batch normalization.
18126//
18127// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
18128// The size of 1D Tensors matches the dimension C of the 4D Tensors.
18129//
18130// Arguments:
18131//
18132//	x: A 4D Tensor for input data.
18133//	scale: A 1D Tensor for scaling factor, to scale the normalized x.
18134//	offset: A 1D Tensor for offset, to shift to the normalized x.
18135//	mean: A 1D Tensor for population mean. Used for inference only;
18136//
18137// must be empty for training.
18138//
18139//	variance: A 1D Tensor for population variance. Used for inference only;
18140//
18141// must be empty for training.
18142//
18143// Returns:
18144//
18145//	y: A 4D Tensor for output data.
18146//	batch_mean: A 1D Tensor for the computed batch mean, to be used by TensorFlow
18147//
18148// to compute the running mean.
18149//
18150//	batch_variance: A 1D Tensor for the computed batch variance, to be used by
18151//
18152// TensorFlow to compute the running variance.
18153//
18154//	reserve_space_1: A 1D Tensor for the computed batch mean, to be reused
18155//
18156// in the gradient computation.
18157//
18158//	reserve_space_2: A 1D Tensor for the computed batch variance (inverted variance
18159//
18160// in the cuDNN case), to be reused in the gradient computation.
18161//
18162//	reserve_space_3: A 1D Tensor for some intermediate results, to be reused in the gradient
18163//
18164// computation for better efficiency.
18165func FusedBatchNormV3(scope *Scope, x tf.Output, scale tf.Output, offset tf.Output, mean tf.Output, variance tf.Output, optional ...FusedBatchNormV3Attr) (y tf.Output, batch_mean tf.Output, batch_variance tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, reserve_space_3 tf.Output) {
18166	if scope.Err() != nil {
18167		return
18168	}
18169	attrs := map[string]interface{}{}
18170	for _, a := range optional {
18171		a(attrs)
18172	}
18173	opspec := tf.OpSpec{
18174		Type: "FusedBatchNormV3",
18175		Input: []tf.Input{
18176			x, scale, offset, mean, variance,
18177		},
18178		Attrs: attrs,
18179	}
18180	op := scope.AddOperation(opspec)
18181	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5)
18182}
18183
18184// Performs a padding as a preprocess during a convolution.
18185//
18186// Similar to FusedResizeAndPadConv2d, this op allows for an optimized
18187// implementation where the spatial padding transformation stage is fused with the
18188// im2col lookup, but in this case without the bilinear filtering required for
18189// resizing. Fusing the padding prevents the need to write out the intermediate
18190// results as whole tensors, reducing memory pressure, and we can get some latency
18191// gains by merging the transformation calculations.
18192// The data_format attribute for Conv2D isn't supported by this op, and 'NHWC'
18193// order is used instead.
18194// Internally this op uses a single per-graph scratch buffer, which means that it
18195// will block if multiple versions are being run in parallel. This is because this
18196// operator is primarily an optimization to minimize memory usage.
18197//
18198// Arguments:
18199//
18200//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
18201//	paddings: A two-column matrix specifying the padding sizes. The number of
18202//
18203// rows must be the same as the rank of `input`.
18204//
18205//	filter: 4-D with shape
18206//
18207// `[filter_height, filter_width, in_channels, out_channels]`.
18208//
18209//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
18210//
18211// of `input`. Must be in the same order as the dimension specified with format.
18212//
18213//	padding: The type of padding algorithm to use.
18214func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string) (output tf.Output) {
18215	if scope.Err() != nil {
18216		return
18217	}
18218	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
18219	opspec := tf.OpSpec{
18220		Type: "FusedPadConv2D",
18221		Input: []tf.Input{
18222			input, paddings, filter,
18223		},
18224		Attrs: attrs,
18225	}
18226	op := scope.AddOperation(opspec)
18227	return op.Output(0)
18228}
18229
18230// FusedResizeAndPadConv2DAttr is an optional argument to FusedResizeAndPadConv2D.
18231type FusedResizeAndPadConv2DAttr func(optionalAttr)
18232
18233// FusedResizeAndPadConv2DResizeAlignCorners sets the optional resize_align_corners attribute to value.
18234//
18235// value: If true, the centers of the 4 corner pixels of the input and output tensors are
18236// aligned, preserving the values at the corner pixels. Defaults to false.
18237// If not specified, defaults to false
18238func FusedResizeAndPadConv2DResizeAlignCorners(value bool) FusedResizeAndPadConv2DAttr {
18239	return func(m optionalAttr) {
18240		m["resize_align_corners"] = value
18241	}
18242}
18243
18244// Performs a resize and padding as a preprocess during a convolution.
18245//
18246// It's often possible to do spatial transformations more efficiently as part of
18247// the packing stage of a convolution, so this op allows for an optimized
18248// implementation where these stages are fused together. This prevents the need to
18249// write out the intermediate results as whole tensors, reducing memory pressure,
18250// and we can get some latency gains by merging the transformation calculations.
18251// The data_format attribute for Conv2D isn't supported by this op, and defaults to
18252// 'NHWC' order.
18253// Internally this op uses a single per-graph scratch buffer, which means that it
18254// will block if multiple versions are being run in parallel. This is because this
18255// operator is primarily an optimization to minimize memory usage.
18256//
18257// Arguments:
18258//
18259//	input: 4-D with shape `[batch, in_height, in_width, in_channels]`.
18260//	size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
18261//
18262// new size for the images.
18263//
18264//	paddings: A two-column matrix specifying the padding sizes. The number of
18265//
18266// rows must be the same as the rank of `input`.
18267//
18268//	filter: 4-D with shape
18269//
18270// `[filter_height, filter_width, in_channels, out_channels]`.
18271//
18272//	strides: 1-D of length 4.  The stride of the sliding window for each dimension
18273//
18274// of `input`. Must be in the same order as the dimension specified with format.
18275//
18276//	padding: The type of padding algorithm to use.
18277func FusedResizeAndPadConv2D(scope *Scope, input tf.Output, size tf.Output, paddings tf.Output, filter tf.Output, mode string, strides []int64, padding string, optional ...FusedResizeAndPadConv2DAttr) (output tf.Output) {
18278	if scope.Err() != nil {
18279		return
18280	}
18281	attrs := map[string]interface{}{"mode": mode, "strides": strides, "padding": padding}
18282	for _, a := range optional {
18283		a(attrs)
18284	}
18285	opspec := tf.OpSpec{
18286		Type: "FusedResizeAndPadConv2D",
18287		Input: []tf.Input{
18288			input, size, paddings, filter,
18289		},
18290		Attrs: attrs,
18291	}
18292	op := scope.AddOperation(opspec)
18293	return op.Output(0)
18294}
18295
18296// Computes the GRU cell forward propagation for 1 time step.
18297//
18298// Args
18299//
18300//	x: Input to the GRU cell.
18301//	h_prev: State input from the previous GRU cell.
18302//	w_ru: Weight matrix for the reset and update gate.
18303//	w_c: Weight matrix for the cell connection gate.
18304//	b_ru: Bias vector for the reset and update gate.
18305//	b_c: Bias vector for the cell connection gate.
18306//
18307// Returns
18308//
18309//	r: Output of the reset gate.
18310//	u: Output of the update gate.
18311//	c: Output of the cell connection gate.
18312//	h: Current state of the GRU cell.
18313//
18314// Note on notation of the variables:
18315//
18316// Concatenation of a and b is represented by a_b
18317// Element-wise dot product of a and b is represented by ab
18318// Element-wise dot product is represented by \circ
18319// Matrix multiplication is represented by *
18320//
18321// Biases are initialized with :
18322// `b_ru` - constant_initializer(1.0)
18323// `b_c` - constant_initializer(0.0)
18324//
18325// This kernel op implements the following mathematical equations:
18326//
18327// ```
18328// x_h_prev = [x, h_prev]
18329//
18330// [r_bar u_bar] = x_h_prev * w_ru + b_ru
18331//
18332// r = sigmoid(r_bar)
18333// u = sigmoid(u_bar)
18334//
18335// h_prevr = h_prev \circ r
18336//
18337// x_h_prevr = [x h_prevr]
18338//
18339// c_bar = x_h_prevr * w_c + b_c
18340// c = tanh(c_bar)
18341//
18342// h = (1-u) \circ c + u \circ h_prev
18343// ```
18344func GRUBlockCell(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, b_ru tf.Output, b_c tf.Output) (r tf.Output, u tf.Output, c tf.Output, h tf.Output) {
18345	if scope.Err() != nil {
18346		return
18347	}
18348	opspec := tf.OpSpec{
18349		Type: "GRUBlockCell",
18350		Input: []tf.Input{
18351			x, h_prev, w_ru, w_c, b_ru, b_c,
18352		},
18353	}
18354	op := scope.AddOperation(opspec)
18355	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
18356}
18357
18358// Computes the GRU cell back-propagation for 1 time step.
18359//
18360// Args
18361//
18362//	x: Input to the GRU cell.
18363//	h_prev: State input from the previous GRU cell.
18364//	w_ru: Weight matrix for the reset and update gate.
18365//	w_c: Weight matrix for the cell connection gate.
18366//	b_ru: Bias vector for the reset and update gate.
18367//	b_c: Bias vector for the cell connection gate.
18368//	r: Output of the reset gate.
18369//	u: Output of the update gate.
18370//	c: Output of the cell connection gate.
18371//	d_h: Gradients of the h_new wrt to objective function.
18372//
18373// Returns
18374//
18375//	d_x: Gradients of the x wrt to objective function.
18376//	d_h_prev: Gradients of the h wrt to objective function.
18377//	d_c_bar Gradients of the c_bar wrt to objective function.
18378//	d_r_bar_u_bar Gradients of the r_bar & u_bar wrt to objective function.
18379//
18380// This kernel op implements the following mathematical equations:
18381//
18382// Note on notation of the variables:
18383//
18384// Concatenation of a and b is represented by a_b
18385// Element-wise dot product of a and b is represented by ab
18386// Element-wise dot product is represented by \circ
18387// Matrix multiplication is represented by *
18388//
18389// Additional notes for clarity:
18390//
18391// `w_ru` can be segmented into 4 different matrices.
18392// ```
18393// w_ru = [w_r_x w_u_x
18394//
18395//	w_r_h_prev w_u_h_prev]
18396//
18397// ```
18398// Similarly, `w_c` can be segmented into 2 different matrices.
18399// ```
18400// w_c = [w_c_x w_c_h_prevr]
18401// ```
18402// Same goes for biases.
18403// ```
18404// b_ru = [b_ru_x b_ru_h]
18405// b_c = [b_c_x b_c_h]
18406// ```
18407// Another note on notation:
18408// ```
18409// d_x = d_x_component_1 + d_x_component_2
18410//
18411// where d_x_component_1 = d_r_bar * w_r_x^T + d_u_bar * w_r_x^T
18412// and d_x_component_2 = d_c_bar * w_c_x^T
18413//
18414// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + d_h \circ u
18415// where d_h_prev_componenet_1 = d_r_bar * w_r_h_prev^T + d_u_bar * w_r_h_prev^T
18416// ```
18417//
18418// Mathematics behind the Gradients below:
18419// ```
18420// d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
18421// d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)
18422//
18423// d_r_bar_u_bar = [d_r_bar d_u_bar]
18424//
18425// [d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T
18426//
18427// [d_x_component_2 d_h_prevr] = d_c_bar * w_c^T
18428//
18429// d_x = d_x_component_1 + d_x_component_2
18430//
18431// d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
18432// ```
18433// Below calculation is performed in the python wrapper for the Gradients
18434// (not in the gradient kernel.)
18435// ```
18436// d_w_ru = x_h_prevr^T * d_c_bar
18437//
18438// d_w_c = x_h_prev^T * d_r_bar_u_bar
18439//
18440// d_b_ru = sum of d_r_bar_u_bar along axis = 0
18441//
18442// d_b_c = sum of d_c_bar along axis = 0
18443// ```
18444func GRUBlockCellGrad(scope *Scope, x tf.Output, h_prev tf.Output, w_ru tf.Output, w_c tf.Output, b_ru tf.Output, b_c tf.Output, r tf.Output, u tf.Output, c tf.Output, d_h tf.Output) (d_x tf.Output, d_h_prev tf.Output, d_c_bar tf.Output, d_r_bar_u_bar tf.Output) {
18445	if scope.Err() != nil {
18446		return
18447	}
18448	opspec := tf.OpSpec{
18449		Type: "GRUBlockCellGrad",
18450		Input: []tf.Input{
18451			x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h,
18452		},
18453	}
18454	op := scope.AddOperation(opspec)
18455	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
18456}
18457
18458// GatherAttr is an optional argument to Gather.
18459type GatherAttr func(optionalAttr)
18460
18461// GatherValidateIndices sets the optional validate_indices attribute to value.
18462// If not specified, defaults to true
18463func GatherValidateIndices(value bool) GatherAttr {
18464	return func(m optionalAttr) {
18465		m["validate_indices"] = value
18466	}
18467}
18468
18469// Gather slices from `params` according to `indices`.
18470//
18471// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
18472// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
18473//
18474// ```python
18475//
18476//	# Scalar indices
18477//	output[:, ..., :] = params[indices, :, ... :]
18478//
18479//	# Vector indices
18480//	output[i, :, ..., :] = params[indices[i], :, ... :]
18481//
18482//	# Higher rank indices
18483//	output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
18484//
18485// ```
18486//
18487// If `indices` is a permutation and `len(indices) == params.shape[0]` then
18488// this operation will permute `params` accordingly.
18489//
18490// `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
18491// `indices` are always validated to be within range. If assigned to GPU,
18492// out-of-bound indices result in safe but unspecified behavior, which may include
18493// raising an error.
18494//
18495// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
18496// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
18497// </div>
18498func Gather(scope *Scope, params tf.Output, indices tf.Output, optional ...GatherAttr) (output tf.Output) {
18499	if scope.Err() != nil {
18500		return
18501	}
18502	attrs := map[string]interface{}{}
18503	for _, a := range optional {
18504		a(attrs)
18505	}
18506	opspec := tf.OpSpec{
18507		Type: "Gather",
18508		Input: []tf.Input{
18509			params, indices,
18510		},
18511		Attrs: attrs,
18512	}
18513	op := scope.AddOperation(opspec)
18514	return op.Output(0)
18515}
18516
18517// Gather slices from `params` into a Tensor with shape specified by `indices`.
18518//
18519// `indices` is a K-dimensional integer tensor, best thought of as a
18520// (K-1)-dimensional tensor of indices into `params`, where each element defines a
18521// slice of `params`:
18522//
18523//	output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
18524//
18525// Whereas in `tf.gather` `indices` defines slices into the `axis`
18526// dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
18527// first `N` dimensions of `params`, where `N = indices.shape[-1]`.
18528//
18529// The last dimension of `indices` can be at most the rank of
18530// `params`:
18531//
18532//	indices.shape[-1] <= params.rank
18533//
18534// The last dimension of `indices` corresponds to elements
18535// (if `indices.shape[-1] == params.rank`) or slices
18536// (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
18537// of `params`.  The output tensor has shape
18538//
18539//	indices.shape[:-1] + params.shape[indices.shape[-1]:]
18540//
18541// Note that on CPU, if an out of bound index is found, an error is returned.
18542// On GPU, if an out of bound index is found, a 0 is stored in the
18543// corresponding output value.
18544//
18545// Some examples below.
18546//
18547// Simple indexing into a matrix:
18548//
18549// ```python
18550//
18551//	indices = [[0, 0], [1, 1]]
18552//	params = [['a', 'b'], ['c', 'd']]
18553//	output = ['a', 'd']
18554//
18555// ```
18556//
18557// Slice indexing into a matrix:
18558//
18559// ```python
18560//
18561//	indices = [[1], [0]]
18562//	params = [['a', 'b'], ['c', 'd']]
18563//	output = [['c', 'd'], ['a', 'b']]
18564//
18565// ```
18566//
18567// Indexing into a 3-tensor:
18568//
18569// ```python
18570//
18571//	indices = [[1]]
18572//	params = [[['a0', 'b0'], ['c0', 'd0']],
18573//	          [['a1', 'b1'], ['c1', 'd1']]]
18574//	output = [[['a1', 'b1'], ['c1', 'd1']]]
18575//
18576//
18577//	indices = [[0, 1], [1, 0]]
18578//	params = [[['a0', 'b0'], ['c0', 'd0']],
18579//	          [['a1', 'b1'], ['c1', 'd1']]]
18580//	output = [['c0', 'd0'], ['a1', 'b1']]
18581//
18582//
18583//	indices = [[0, 0, 1], [1, 0, 1]]
18584//	params = [[['a0', 'b0'], ['c0', 'd0']],
18585//	          [['a1', 'b1'], ['c1', 'd1']]]
18586//	output = ['b0', 'b1']
18587//
18588// ```
18589//
18590// Batched indexing into a matrix:
18591//
18592// ```python
18593//
18594//	indices = [[[0, 0]], [[0, 1]]]
18595//	params = [['a', 'b'], ['c', 'd']]
18596//	output = [['a'], ['b']]
18597//
18598// ```
18599//
18600// Batched slice indexing into a matrix:
18601//
18602// ```python
18603//
18604//	indices = [[[1]], [[0]]]
18605//	params = [['a', 'b'], ['c', 'd']]
18606//	output = [[['c', 'd']], [['a', 'b']]]
18607//
18608// ```
18609//
18610// Batched indexing into a 3-tensor:
18611//
18612// ```python
18613//
18614//	indices = [[[1]], [[0]]]
18615//	params = [[['a0', 'b0'], ['c0', 'd0']],
18616//	          [['a1', 'b1'], ['c1', 'd1']]]
18617//	output = [[[['a1', 'b1'], ['c1', 'd1']]],
18618//	          [[['a0', 'b0'], ['c0', 'd0']]]]
18619//
18620//	indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
18621//	params = [[['a0', 'b0'], ['c0', 'd0']],
18622//	          [['a1', 'b1'], ['c1', 'd1']]]
18623//	output = [[['c0', 'd0'], ['a1', 'b1']],
18624//	          [['a0', 'b0'], ['c1', 'd1']]]
18625//
18626//
18627//	indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
18628//	params = [[['a0', 'b0'], ['c0', 'd0']],
18629//	          [['a1', 'b1'], ['c1', 'd1']]]
18630//	output = [['b0', 'b1'], ['d0', 'c1']]
18631//
18632// ```
18633//
18634// See also `tf.gather` and `tf.batch_gather`.
18635//
18636// Arguments:
18637//
18638//	params: The tensor from which to gather values.
18639//	indices: Index tensor.
18640//
18641// Returns Values from `params` gathered from indices given by `indices`, with
18642// shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.
18643func GatherNd(scope *Scope, params tf.Output, indices tf.Output) (output tf.Output) {
18644	if scope.Err() != nil {
18645		return
18646	}
18647	opspec := tf.OpSpec{
18648		Type: "GatherNd",
18649		Input: []tf.Input{
18650			params, indices,
18651		},
18652	}
18653	op := scope.AddOperation(opspec)
18654	return op.Output(0)
18655}
18656
18657// GatherV2Attr is an optional argument to GatherV2.
18658type GatherV2Attr func(optionalAttr)
18659
18660// GatherV2BatchDims sets the optional batch_dims attribute to value.
18661// If not specified, defaults to 0
18662func GatherV2BatchDims(value int64) GatherV2Attr {
18663	return func(m optionalAttr) {
18664		m["batch_dims"] = value
18665	}
18666}
18667
18668// Gather slices from `params` axis `axis` according to `indices`.
18669//
18670// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
18671// Produces an output tensor with shape `params.shape[:axis] +
18672// indices.shape[batch_dims:] + params.shape[axis + 1:]` where:
18673//
18674// ```python
18675//
18676//	# Scalar indices (output is rank(params) - 1).
18677//	output[a_0, ..., a_n, b_0, ..., b_n] =
18678//	  params[a_0, ..., a_n, indices, b_0, ..., b_n]
18679//
18680//	# Vector indices (output is rank(params)).
18681//	output[a_0, ..., a_n, i, b_0, ..., b_n] =
18682//	  params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
18683//
18684//	# Higher rank indices (output is rank(params) + rank(indices) - 1).
18685//	output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
18686//	  params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
18687//
18688// ```
18689//
18690// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
18691// <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
18692// </div>
18693//
18694// Note that on CPU, if an out of bound index is found, an error is returned.
18695// On GPU, if an out of bound index is found, a 0 is stored in the
18696// corresponding output value.
18697//
18698// See also `tf.batch_gather` and `tf.gather_nd`.
18699//
18700// Arguments:
18701//
18702//	params: The tensor from which to gather values. Must be at least rank
18703//
18704// `axis + 1`.
18705//
18706//	indices: Index tensor. Must be in range `[0, params.shape[axis])`.
18707//	axis: The axis in `params` to gather `indices` from. Defaults to the first
18708//
18709// dimension. Supports negative indexes.
18710//
18711// Returns Values from `params` gathered from indices given by `indices`, with
18712// shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
18713func GatherV2(scope *Scope, params tf.Output, indices tf.Output, axis tf.Output, optional ...GatherV2Attr) (output tf.Output) {
18714	if scope.Err() != nil {
18715		return
18716	}
18717	attrs := map[string]interface{}{}
18718	for _, a := range optional {
18719		a(attrs)
18720	}
18721	opspec := tf.OpSpec{
18722		Type: "GatherV2",
18723		Input: []tf.Input{
18724			params, indices, axis,
18725		},
18726		Attrs: attrs,
18727	}
18728	op := scope.AddOperation(opspec)
18729	return op.Output(0)
18730}
18731
18732// GenerateBoundingBoxProposalsAttr is an optional argument to GenerateBoundingBoxProposals.
18733type GenerateBoundingBoxProposalsAttr func(optionalAttr)
18734
18735// GenerateBoundingBoxProposalsPostNmsTopn sets the optional post_nms_topn attribute to value.
18736//
18737// value: An integer. Maximum number of rois in the output.
18738// If not specified, defaults to 300
18739func GenerateBoundingBoxProposalsPostNmsTopn(value int64) GenerateBoundingBoxProposalsAttr {
18740	return func(m optionalAttr) {
18741		m["post_nms_topn"] = value
18742	}
18743}
18744
18745// This op produces Region of Interests from given bounding boxes(bbox_deltas) encoded wrt anchors according to eq.2 in arXiv:1506.01497
18746//
18747//	The op selects top `pre_nms_topn` scoring boxes, decodes them with respect to anchors,
18748//	applies non-maximal suppression on overlapping boxes with higher than
18749//	`nms_threshold` intersection-over-union (iou) value, discarding boxes where shorter
18750//	side is less than `min_size`.
18751//	Inputs:
18752//	`scores`: A 4D tensor of shape [Batch, Height, Width, Num Anchors] containing the scores per anchor at given position
18753//	`bbox_deltas`: is a tensor of shape [Batch, Height, Width, 4 x Num Anchors] boxes encoded to each anchor
18754//	`anchors`: A 1D tensor of shape [4 x Num Anchors], representing the anchors.
18755//	Outputs:
18756//	`rois`: output RoIs, a 3D tensor of shape [Batch, post_nms_topn, 4], padded by 0 if less than post_nms_topn candidates found.
18757//	`roi_probabilities`: probability scores of each roi in 'rois', a 2D tensor of shape [Batch,post_nms_topn], padded with 0 if needed, sorted by scores.
18758//
18759// Arguments:
18760//
18761//	scores: A 4-D float tensor of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted.
18762//	bbox_deltas: A 4-D float tensor of shape `[num_images, height, width, 4 x num_anchors]`. encoding boxes with respec to each anchor.
18763//
18764// Coordinates are given in the form [dy, dx, dh, dw].
18765//
18766//	image_info: A 2-D float tensor of shape `[num_images, 5]` containing image information Height, Width, Scale.
18767//	anchors: A 2-D float tensor of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form [y1, x1, y2, x2].
18768//	nms_threshold: A scalar float tensor for non-maximal-suppression threshold.
18769//	pre_nms_topn: A scalar int tensor for the number of top scoring boxes to be used as input.
18770//	min_size: A scalar float tensor. Any box that has a smaller size than min_size will be discarded.
18771//
18772// Returns:
18773//
18774//	rois: A 3-D float tensor of shape `[num_images,post_nms_topn,4]` representing the selected
18775//
18776// region of interest boxes. Sorted in descending order in scores.
18777//
18778//	roi_probabilities: A 2-D float tensor of shape `[num_images, post_nms_topn]` representing the score of the
18779//
18780// region of interest box in `rois` tensor at the same index.
18781func GenerateBoundingBoxProposals(scope *Scope, scores tf.Output, bbox_deltas tf.Output, image_info tf.Output, anchors tf.Output, nms_threshold tf.Output, pre_nms_topn tf.Output, min_size tf.Output, optional ...GenerateBoundingBoxProposalsAttr) (rois tf.Output, roi_probabilities tf.Output) {
18782	if scope.Err() != nil {
18783		return
18784	}
18785	attrs := map[string]interface{}{}
18786	for _, a := range optional {
18787		a(attrs)
18788	}
18789	opspec := tf.OpSpec{
18790		Type: "GenerateBoundingBoxProposals",
18791		Input: []tf.Input{
18792			scores, bbox_deltas, image_info, anchors, nms_threshold, pre_nms_topn, min_size,
18793		},
18794		Attrs: attrs,
18795	}
18796	op := scope.AddOperation(opspec)
18797	return op.Output(0), op.Output(1)
18798}
18799
18800// GenerateVocabRemappingAttr is an optional argument to GenerateVocabRemapping.
18801type GenerateVocabRemappingAttr func(optionalAttr)
18802
18803// GenerateVocabRemappingOldVocabSize sets the optional old_vocab_size attribute to value.
18804//
18805// value: Number of entries in the old vocab file to consider.  If -1,
18806// use the entire old vocabulary.
18807// If not specified, defaults to -1
18808//
18809// REQUIRES: value >= -1
18810func GenerateVocabRemappingOldVocabSize(value int64) GenerateVocabRemappingAttr {
18811	return func(m optionalAttr) {
18812		m["old_vocab_size"] = value
18813	}
18814}
18815
18816// Given a path to new and old vocabulary files, returns a remapping Tensor of
18817//
18818// length `num_new_vocab`, where `remapping[i]` contains the row number in the old
18819// vocabulary that corresponds to row `i` in the new vocabulary (starting at line
18820// `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i`
18821// in the new vocabulary is not in the old vocabulary.  The old vocabulary is
18822// constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the
18823// default value of -1.
18824//
18825// `num_vocab_offset` enables
18826// use in the partitioned variable case, and should generally be set through
18827// examining partitioning info.  The format of the files should be a text file,
18828// with each line containing a single entity within the vocabulary.
18829//
18830// For example, with `new_vocab_file` a text file containing each of the following
18831// elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3],
18832// `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be
18833// `[0, -1, 2]`.
18834//
18835// The op also returns a count of how many entries in the new vocabulary
18836// were present in the old vocabulary, which is used to calculate the number of
18837// values to initialize in a weight matrix remapping
18838//
18839// This functionality can be used to remap both row vocabularies (typically,
18840// features) and column vocabularies (typically, classes) from TensorFlow
18841// checkpoints.  Note that the partitioning logic relies on contiguous vocabularies
18842// corresponding to div-partitioned variables.  Moreover, the underlying remapping
18843// uses an IndexTable (as opposed to an inexact CuckooTable), so client code should
18844// use the corresponding index_table_from_file() as the FeatureColumn framework
18845// does (as opposed to tf.feature_to_id(), which uses a CuckooTable).
18846//
18847// Arguments:
18848//
18849//	new_vocab_file: Path to the new vocab file.
18850//	old_vocab_file: Path to the old vocab file.
18851//	new_vocab_offset: How many entries into the new vocab file to start reading.
18852//	num_new_vocab: Number of entries in the new vocab file to remap.
18853//
18854// Returns:
18855//
18856//	remapping: A Tensor of length num_new_vocab where the element at index i
18857//
18858// is equal to the old ID that maps to the new ID i.  This element is -1 for any
18859// new ID that is not found in the old vocabulary.
18860//
18861//	num_present: Number of new vocab entries found in old vocab.
18862func GenerateVocabRemapping(scope *Scope, new_vocab_file tf.Output, old_vocab_file tf.Output, new_vocab_offset int64, num_new_vocab int64, optional ...GenerateVocabRemappingAttr) (remapping tf.Output, num_present tf.Output) {
18863	if scope.Err() != nil {
18864		return
18865	}
18866	attrs := map[string]interface{}{"new_vocab_offset": new_vocab_offset, "num_new_vocab": num_new_vocab}
18867	for _, a := range optional {
18868		a(attrs)
18869	}
18870	opspec := tf.OpSpec{
18871		Type: "GenerateVocabRemapping",
18872		Input: []tf.Input{
18873			new_vocab_file, old_vocab_file,
18874		},
18875		Attrs: attrs,
18876	}
18877	op := scope.AddOperation(opspec)
18878	return op.Output(0), op.Output(1)
18879}
18880
18881// Gets the element at the specified index in a dataset.
18882func GetElementAtIndex(scope *Scope, dataset tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
18883	if scope.Err() != nil {
18884		return
18885	}
18886	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
18887	opspec := tf.OpSpec{
18888		Type: "GetElementAtIndex",
18889		Input: []tf.Input{
18890			dataset, index,
18891		},
18892		Attrs: attrs,
18893	}
18894	op := scope.AddOperation(opspec)
18895	if scope.Err() != nil {
18896		return
18897	}
18898	var idx int
18899	var err error
18900	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
18901		scope.UpdateErr("GetElementAtIndex", err)
18902		return
18903	}
18904	return components
18905}
18906
18907// Returns the `tf.data.Options` attached to `input_dataset`.
18908//
18909// Arguments:
18910//
18911//	input_dataset: A variant tensor representing the input dataset.
18912func GetOptions(scope *Scope, input_dataset tf.Output) (serialized_options tf.Output) {
18913	if scope.Err() != nil {
18914		return
18915	}
18916	opspec := tf.OpSpec{
18917		Type: "GetOptions",
18918		Input: []tf.Input{
18919			input_dataset,
18920		},
18921	}
18922	op := scope.AddOperation(opspec)
18923	return op.Output(0)
18924}
18925
18926// Store the input tensor in the state of the current session.
18927//
18928// Arguments:
18929//
18930//	value: The tensor to be stored.
18931//
18932// Returns The handle for the tensor stored in the session state, represented
18933// as a string.
18934func GetSessionHandle(scope *Scope, value tf.Output) (handle tf.Output) {
18935	if scope.Err() != nil {
18936		return
18937	}
18938	opspec := tf.OpSpec{
18939		Type: "GetSessionHandle",
18940		Input: []tf.Input{
18941			value,
18942		},
18943	}
18944	op := scope.AddOperation(opspec)
18945	return op.Output(0)
18946}
18947
18948// Store the input tensor in the state of the current session.
18949//
18950// Arguments:
18951//
18952//	value: The tensor to be stored.
18953//
18954// Returns The handle for the tensor stored in the session state, represented
18955// as a ResourceHandle object.
18956func GetSessionHandleV2(scope *Scope, value tf.Output) (handle tf.Output) {
18957	if scope.Err() != nil {
18958		return
18959	}
18960	opspec := tf.OpSpec{
18961		Type: "GetSessionHandleV2",
18962		Input: []tf.Input{
18963			value,
18964		},
18965	}
18966	op := scope.AddOperation(opspec)
18967	return op.Output(0)
18968}
18969
18970// Get the value of the tensor specified by its handle.
18971//
18972// Arguments:
18973//
18974//	handle: The handle for a tensor stored in the session state.
18975//	dtype: The type of the output value.
18976//
18977// Returns The tensor for the given handle.
18978func GetSessionTensor(scope *Scope, handle tf.Output, dtype tf.DataType) (value tf.Output) {
18979	if scope.Err() != nil {
18980		return
18981	}
18982	attrs := map[string]interface{}{"dtype": dtype}
18983	opspec := tf.OpSpec{
18984		Type: "GetSessionTensor",
18985		Input: []tf.Input{
18986			handle,
18987		},
18988		Attrs: attrs,
18989	}
18990	op := scope.AddOperation(opspec)
18991	return op.Output(0)
18992}
18993
18994// Returns the truth value of (x > y) element-wise.
18995//
18996// *NOTE*: `Greater` supports broadcasting. More about broadcasting
18997// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
18998//
18999// Example:
19000//
19001// ```python
19002// x = tf.constant([5, 4, 6])
19003// y = tf.constant([5, 2, 5])
19004// tf.math.greater(x, y) ==> [False, True, True]
19005//
19006// x = tf.constant([5, 4, 6])
19007// y = tf.constant([5])
19008// tf.math.greater(x, y) ==> [False, False, True]
19009// ```
19010func Greater(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
19011	if scope.Err() != nil {
19012		return
19013	}
19014	opspec := tf.OpSpec{
19015		Type: "Greater",
19016		Input: []tf.Input{
19017			x, y,
19018		},
19019	}
19020	op := scope.AddOperation(opspec)
19021	return op.Output(0)
19022}
19023
19024// Returns the truth value of (x >= y) element-wise.
19025//
19026// *NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
19027// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
19028//
19029// Example:
19030//
19031// ```python
19032// x = tf.constant([5, 4, 6, 7])
19033// y = tf.constant([5, 2, 5, 10])
19034// tf.math.greater_equal(x, y) ==> [True, True, True, False]
19035//
19036// x = tf.constant([5, 4, 6, 7])
19037// y = tf.constant([5])
19038// tf.math.greater_equal(x, y) ==> [True, False, True, True]
19039// ```
19040func GreaterEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
19041	if scope.Err() != nil {
19042		return
19043	}
19044	opspec := tf.OpSpec{
19045		Type: "GreaterEqual",
19046		Input: []tf.Input{
19047			x, y,
19048		},
19049	}
19050	op := scope.AddOperation(opspec)
19051	return op.Output(0)
19052}
19053
19054// Gives a guarantee to the TF runtime that the input tensor is a constant.
19055//
19056// The runtime is then free to make optimizations based on this.
19057//
19058// Only accepts value typed tensors as inputs and rejects resource variable handles
19059// as input.
19060//
19061// Returns the input tensor without modification.
19062func GuaranteeConst(scope *Scope, input tf.Output) (output tf.Output) {
19063	if scope.Err() != nil {
19064		return
19065	}
19066	opspec := tf.OpSpec{
19067		Type: "GuaranteeConst",
19068		Input: []tf.Input{
19069			input,
19070		},
19071	}
19072	op := scope.AddOperation(opspec)
19073	return op.Output(0)
19074}
19075
19076// Convert one or more images from HSV to RGB.
19077//
19078// Outputs a tensor of the same shape as the `images` tensor, containing the RGB
19079// value of the pixels. The output is only well defined if the value in `images`
19080// are in `[0,1]`.
19081//
19082// See `rgb_to_hsv` for a description of the HSV encoding.
19083//
19084// Arguments:
19085//
19086//	images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3.
19087//
19088// Returns `images` converted to RGB.
19089func HSVToRGB(scope *Scope, images tf.Output) (output tf.Output) {
19090	if scope.Err() != nil {
19091		return
19092	}
19093	opspec := tf.OpSpec{
19094		Type: "HSVToRGB",
19095		Input: []tf.Input{
19096			images,
19097		},
19098	}
19099	op := scope.AddOperation(opspec)
19100	return op.Output(0)
19101}
19102
19103// HashTableV2Attr is an optional argument to HashTableV2.
19104type HashTableV2Attr func(optionalAttr)
19105
19106// HashTableV2Container sets the optional container attribute to value.
19107//
19108// value: If non-empty, this table is placed in the given container.
19109// Otherwise, a default container is used.
19110// If not specified, defaults to ""
19111func HashTableV2Container(value string) HashTableV2Attr {
19112	return func(m optionalAttr) {
19113		m["container"] = value
19114	}
19115}
19116
19117// HashTableV2SharedName sets the optional shared_name attribute to value.
19118//
19119// value: If non-empty, this table is shared under the given name across
19120// multiple sessions.
19121// If not specified, defaults to ""
19122func HashTableV2SharedName(value string) HashTableV2Attr {
19123	return func(m optionalAttr) {
19124		m["shared_name"] = value
19125	}
19126}
19127
19128// HashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
19129//
19130// value: If true and shared_name is empty, the table is shared
19131// using the node name.
19132// If not specified, defaults to false
19133func HashTableV2UseNodeNameSharing(value bool) HashTableV2Attr {
19134	return func(m optionalAttr) {
19135		m["use_node_name_sharing"] = value
19136	}
19137}
19138
19139// Creates a non-initialized hash table.
19140//
19141// This op creates a hash table, specifying the type of its keys and values.
19142// Before using the table you will have to initialize it.  After initialization the
19143// table will be immutable.
19144//
19145// Arguments:
19146//
19147//	key_dtype: Type of the table keys.
19148//	value_dtype: Type of the table values.
19149//
19150// Returns Handle to a table.
19151func HashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...HashTableV2Attr) (table_handle tf.Output) {
19152	if scope.Err() != nil {
19153		return
19154	}
19155	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
19156	for _, a := range optional {
19157		a(attrs)
19158	}
19159	opspec := tf.OpSpec{
19160		Type: "HashTableV2",
19161
19162		Attrs: attrs,
19163	}
19164	op := scope.AddOperation(opspec)
19165	return op.Output(0)
19166}
19167
19168// HistogramFixedWidthAttr is an optional argument to HistogramFixedWidth.
19169type HistogramFixedWidthAttr func(optionalAttr)
19170
19171// HistogramFixedWidthDtype sets the optional dtype attribute to value.
19172// If not specified, defaults to DT_INT32
19173func HistogramFixedWidthDtype(value tf.DataType) HistogramFixedWidthAttr {
19174	return func(m optionalAttr) {
19175		m["dtype"] = value
19176	}
19177}
19178
19179// Return histogram of values.
19180//
19181// Given the tensor `values`, this operation returns a rank 1 histogram counting
19182// the number of entries in `values` that fall into every bin.  The bins are
19183// equal width and determined by the arguments `value_range` and `nbins`.
19184//
19185// ```python
19186// # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
19187// nbins = 5
19188// value_range = [0.0, 5.0]
19189// new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
19190//
19191// with tf.get_default_session() as sess:
19192//
19193//	hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
19194//	variables.global_variables_initializer().run()
19195//	sess.run(hist) => [2, 1, 1, 0, 2]
19196//
19197// ```
19198//
19199// Arguments:
19200//
19201//	values: Numeric `Tensor`.
19202//	value_range: Shape [2] `Tensor` of same `dtype` as `values`.
19203//
19204// values <= value_range[0] will be mapped to hist[0],
19205// values >= value_range[1] will be mapped to hist[-1].
19206//
19207//	nbins: Scalar `int32 Tensor`.  Number of histogram bins.
19208//
19209// Returns A 1-D `Tensor` holding histogram of values.
19210func HistogramFixedWidth(scope *Scope, values tf.Output, value_range tf.Output, nbins tf.Output, optional ...HistogramFixedWidthAttr) (out tf.Output) {
19211	if scope.Err() != nil {
19212		return
19213	}
19214	attrs := map[string]interface{}{}
19215	for _, a := range optional {
19216		a(attrs)
19217	}
19218	opspec := tf.OpSpec{
19219		Type: "HistogramFixedWidth",
19220		Input: []tf.Input{
19221			values, value_range, nbins,
19222		},
19223		Attrs: attrs,
19224	}
19225	op := scope.AddOperation(opspec)
19226	return op.Output(0)
19227}
19228
19229// Outputs a `Summary` protocol buffer with a histogram.
19230//
19231// The generated
19232// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
19233// has one summary value containing a histogram for `values`.
19234//
19235// This op reports an `InvalidArgument` error if any value is not finite.
19236//
19237// Arguments:
19238//
19239//	tag: Scalar.  Tag to use for the `Summary.Value`.
19240//	values: Any shape. Values to use to build the histogram.
19241//
19242// Returns Scalar. Serialized `Summary` protocol buffer.
19243func HistogramSummary(scope *Scope, tag tf.Output, values tf.Output) (summary tf.Output) {
19244	if scope.Err() != nil {
19245		return
19246	}
19247	opspec := tf.OpSpec{
19248		Type: "HistogramSummary",
19249		Input: []tf.Input{
19250			tag, values,
19251		},
19252	}
19253	op := scope.AddOperation(opspec)
19254	return op.Output(0)
19255}
19256
19257// Returns a constant tensor on the host. Only for writing C++ tests.
19258//
19259// Arguments:
19260//
19261//	value: Attr `value` is the tensor to return.
19262func HostConst(scope *Scope, value tf.Tensor, dtype tf.DataType) (output tf.Output) {
19263	if scope.Err() != nil {
19264		return
19265	}
19266	attrs := map[string]interface{}{"value": value, "dtype": dtype}
19267	opspec := tf.OpSpec{
19268		Type: "HostConst",
19269
19270		Attrs: attrs,
19271	}
19272	op := scope.AddOperation(opspec)
19273	return op.Output(0)
19274}
19275
19276// Inverse fast Fourier transform.
19277//
19278// Computes the inverse 1-dimensional discrete Fourier transform over the
19279// inner-most dimension of `input`.
19280//
19281// Arguments:
19282//
19283//	input: A complex tensor.
19284//
19285// Returns A complex tensor of the same shape as `input`. The inner-most
19286//
19287//	dimension of `input` is replaced with its inverse 1D Fourier transform.
19288//
19289// @compatibility(numpy)
19290// Equivalent to np.fft.ifft
19291// @end_compatibility
19292func IFFT(scope *Scope, input tf.Output) (output tf.Output) {
19293	if scope.Err() != nil {
19294		return
19295	}
19296	opspec := tf.OpSpec{
19297		Type: "IFFT",
19298		Input: []tf.Input{
19299			input,
19300		},
19301	}
19302	op := scope.AddOperation(opspec)
19303	return op.Output(0)
19304}
19305
19306// Inverse 2D fast Fourier transform.
19307//
19308// Computes the inverse 2-dimensional discrete Fourier transform over the
19309// inner-most 2 dimensions of `input`.
19310//
19311// Arguments:
19312//
19313//	input: A complex tensor.
19314//
19315// Returns A complex tensor of the same shape as `input`. The inner-most 2
19316//
19317//	dimensions of `input` are replaced with their inverse 2D Fourier transform.
19318//
19319// @compatibility(numpy)
19320// Equivalent to np.fft.ifft2
19321// @end_compatibility
19322func IFFT2D(scope *Scope, input tf.Output) (output tf.Output) {
19323	if scope.Err() != nil {
19324		return
19325	}
19326	opspec := tf.OpSpec{
19327		Type: "IFFT2D",
19328		Input: []tf.Input{
19329			input,
19330		},
19331	}
19332	op := scope.AddOperation(opspec)
19333	return op.Output(0)
19334}
19335
19336// Inverse 3D fast Fourier transform.
19337//
19338// Computes the inverse 3-dimensional discrete Fourier transform over the
19339// inner-most 3 dimensions of `input`.
19340//
19341// Arguments:
19342//
19343//	input: A complex tensor.
19344//
19345// Returns A complex tensor of the same shape as `input`. The inner-most 3
19346//
19347//	dimensions of `input` are replaced with their inverse 3D Fourier transform.
19348//
19349// @compatibility(numpy)
19350// Equivalent to np.fft.ifftn with 3 dimensions.
19351// @end_compatibility
19352func IFFT3D(scope *Scope, input tf.Output) (output tf.Output) {
19353	if scope.Err() != nil {
19354		return
19355	}
19356	opspec := tf.OpSpec{
19357		Type: "IFFT3D",
19358		Input: []tf.Input{
19359			input,
19360		},
19361	}
19362	op := scope.AddOperation(opspec)
19363	return op.Output(0)
19364}
19365
19366// IRFFTAttr is an optional argument to IRFFT.
19367type IRFFTAttr func(optionalAttr)
19368
19369// IRFFTTreal sets the optional Treal attribute to value.
19370// If not specified, defaults to DT_FLOAT
19371func IRFFTTreal(value tf.DataType) IRFFTAttr {
19372	return func(m optionalAttr) {
19373		m["Treal"] = value
19374	}
19375}
19376
19377// Inverse real-valued fast Fourier transform.
19378//
19379// Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
19380// signal over the inner-most dimension of `input`.
19381//
19382// The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
19383// `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
19384// `fft_length` is not provided, it is computed from the size of the inner-most
19385// dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
19386// compute `input` is odd, it should be provided since it cannot be inferred
19387// properly.
19388//
19389// Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
19390// than the corresponding dimension of `input`, the dimension is cropped. If it is
19391// larger, the dimension is padded with zeros.
19392//
19393// Arguments:
19394//
19395//	input: A complex tensor.
19396//	fft_length: An int32 tensor of shape [1]. The FFT length.
19397//
19398// Returns A float32 tensor of the same rank as `input`. The inner-most
19399//
19400//	dimension of `input` is replaced with the `fft_length` samples of its inverse
19401//	1D Fourier transform.
19402//
19403// @compatibility(numpy)
19404// Equivalent to np.fft.irfft
19405// @end_compatibility
19406func IRFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFTAttr) (output tf.Output) {
19407	if scope.Err() != nil {
19408		return
19409	}
19410	attrs := map[string]interface{}{}
19411	for _, a := range optional {
19412		a(attrs)
19413	}
19414	opspec := tf.OpSpec{
19415		Type: "IRFFT",
19416		Input: []tf.Input{
19417			input, fft_length,
19418		},
19419		Attrs: attrs,
19420	}
19421	op := scope.AddOperation(opspec)
19422	return op.Output(0)
19423}
19424
19425// IRFFT2DAttr is an optional argument to IRFFT2D.
19426type IRFFT2DAttr func(optionalAttr)
19427
19428// IRFFT2DTreal sets the optional Treal attribute to value.
19429// If not specified, defaults to DT_FLOAT
19430func IRFFT2DTreal(value tf.DataType) IRFFT2DAttr {
19431	return func(m optionalAttr) {
19432		m["Treal"] = value
19433	}
19434}
19435
19436// Inverse 2D real-valued fast Fourier transform.
19437//
19438// Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
19439// signal over the inner-most 2 dimensions of `input`.
19440//
19441// The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
19442// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
19443// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
19444// from the size of the inner-most 2 dimensions of `input`. If the FFT length used
19445// to compute `input` is odd, it should be provided since it cannot be inferred
19446// properly.
19447//
19448// Along each axis `IRFFT2D` is computed on, if `fft_length` (or
19449// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
19450// corresponding dimension of `input`, the dimension is cropped. If it is larger,
19451// the dimension is padded with zeros.
19452//
19453// Arguments:
19454//
19455//	input: A complex tensor.
19456//	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
19457//
19458// Returns A float32 tensor of the same rank as `input`. The inner-most 2
19459//
19460//	dimensions of `input` are replaced with the `fft_length` samples of their
19461//	inverse 2D Fourier transform.
19462//
19463// @compatibility(numpy)
19464// Equivalent to np.fft.irfft2
19465// @end_compatibility
19466func IRFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT2DAttr) (output tf.Output) {
19467	if scope.Err() != nil {
19468		return
19469	}
19470	attrs := map[string]interface{}{}
19471	for _, a := range optional {
19472		a(attrs)
19473	}
19474	opspec := tf.OpSpec{
19475		Type: "IRFFT2D",
19476		Input: []tf.Input{
19477			input, fft_length,
19478		},
19479		Attrs: attrs,
19480	}
19481	op := scope.AddOperation(opspec)
19482	return op.Output(0)
19483}
19484
19485// IRFFT3DAttr is an optional argument to IRFFT3D.
19486type IRFFT3DAttr func(optionalAttr)
19487
19488// IRFFT3DTreal sets the optional Treal attribute to value.
19489// If not specified, defaults to DT_FLOAT
19490func IRFFT3DTreal(value tf.DataType) IRFFT3DAttr {
19491	return func(m optionalAttr) {
19492		m["Treal"] = value
19493	}
19494}
19495
19496// Inverse 3D real-valued fast Fourier transform.
19497//
19498// Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
19499// signal over the inner-most 3 dimensions of `input`.
19500//
19501// The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
19502// The inner-most dimension contains the `fft_length / 2 + 1` unique components of
19503// the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
19504// from the size of the inner-most 3 dimensions of `input`. If the FFT length used
19505// to compute `input` is odd, it should be provided since it cannot be inferred
19506// properly.
19507//
19508// Along each axis `IRFFT3D` is computed on, if `fft_length` (or
19509// `fft_length / 2 + 1` for the inner-most dimension) is smaller than the
19510// corresponding dimension of `input`, the dimension is cropped. If it is larger,
19511// the dimension is padded with zeros.
19512//
19513// Arguments:
19514//
19515//	input: A complex tensor.
19516//	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
19517//
19518// Returns A float32 tensor of the same rank as `input`. The inner-most 3
19519//
19520//	dimensions of `input` are replaced with the `fft_length` samples of their
19521//	inverse 3D real Fourier transform.
19522//
19523// @compatibility(numpy)
19524// Equivalent to np.irfftn with 3 dimensions.
19525// @end_compatibility
19526func IRFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...IRFFT3DAttr) (output tf.Output) {
19527	if scope.Err() != nil {
19528		return
19529	}
19530	attrs := map[string]interface{}{}
19531	for _, a := range optional {
19532		a(attrs)
19533	}
19534	opspec := tf.OpSpec{
19535		Type: "IRFFT3D",
19536		Input: []tf.Input{
19537			input, fft_length,
19538		},
19539		Attrs: attrs,
19540	}
19541	op := scope.AddOperation(opspec)
19542	return op.Output(0)
19543}
19544
19545// Return a tensor with the same shape and contents as the input tensor or value.
19546func Identity(scope *Scope, input tf.Output) (output tf.Output) {
19547	if scope.Err() != nil {
19548		return
19549	}
19550	opspec := tf.OpSpec{
19551		Type: "Identity",
19552		Input: []tf.Input{
19553			input,
19554		},
19555	}
19556	op := scope.AddOperation(opspec)
19557	return op.Output(0)
19558}
19559
19560// Returns a list of tensors with the same shapes and contents as the input
19561//
19562// tensors.
19563//
19564// This op can be used to override the gradient for complicated functions. For
19565// example, suppose y = f(x) and we wish to apply a custom function g for backprop
19566// such that dx = g(dy). In Python,
19567//
19568// ```python
19569// with tf.get_default_graph().gradient_override_map(
19570//
19571//	  {'IdentityN': 'OverrideGradientWithG'}):
19572//	y, _ = identity_n([f(x), x])
19573//
19574// @tf.RegisterGradient('OverrideGradientWithG')
19575// def ApplyG(op, dy, _):
19576//
19577//	return [None, g(dy)]  # Do not backprop to f(x).
19578//
19579// ```
19580func IdentityN(scope *Scope, input []tf.Output) (output []tf.Output) {
19581	if scope.Err() != nil {
19582		return
19583	}
19584	opspec := tf.OpSpec{
19585		Type: "IdentityN",
19586		Input: []tf.Input{
19587			tf.OutputList(input),
19588		},
19589	}
19590	op := scope.AddOperation(opspec)
19591	if scope.Err() != nil {
19592		return
19593	}
19594	var idx int
19595	var err error
19596	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
19597		scope.UpdateErr("IdentityN", err)
19598		return
19599	}
19600	return output
19601}
19602
19603// IdentityReaderV2Attr is an optional argument to IdentityReaderV2.
19604type IdentityReaderV2Attr func(optionalAttr)
19605
19606// IdentityReaderV2Container sets the optional container attribute to value.
19607//
19608// value: If non-empty, this reader is placed in the given container.
19609// Otherwise, a default container is used.
19610// If not specified, defaults to ""
19611func IdentityReaderV2Container(value string) IdentityReaderV2Attr {
19612	return func(m optionalAttr) {
19613		m["container"] = value
19614	}
19615}
19616
19617// IdentityReaderV2SharedName sets the optional shared_name attribute to value.
19618//
19619// value: If non-empty, this reader is named in the given bucket
19620// with this shared_name. Otherwise, the node name is used instead.
19621// If not specified, defaults to ""
19622func IdentityReaderV2SharedName(value string) IdentityReaderV2Attr {
19623	return func(m optionalAttr) {
19624		m["shared_name"] = value
19625	}
19626}
19627
19628// A Reader that outputs the queued work as both the key and value.
19629//
19630// To use, enqueue strings in a Queue.  ReaderRead will take the front
19631// work string and output (work, work).
19632//
19633// Returns The handle to reference the Reader.
19634func IdentityReaderV2(scope *Scope, optional ...IdentityReaderV2Attr) (reader_handle tf.Output) {
19635	if scope.Err() != nil {
19636		return
19637	}
19638	attrs := map[string]interface{}{}
19639	for _, a := range optional {
19640		a(attrs)
19641	}
19642	opspec := tf.OpSpec{
19643		Type: "IdentityReaderV2",
19644
19645		Attrs: attrs,
19646	}
19647	op := scope.AddOperation(opspec)
19648	return op.Output(0)
19649}
19650
19651// Compute the lower regularized incomplete Gamma function `P(a, x)`.
19652//
19653// The lower regularized incomplete Gamma function is defined as:
19654//
19655// \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
19656//
19657// where
19658//
19659// \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\)
19660//
19661// is the lower incomplete Gamma function.
19662//
19663// Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
19664// Gamma function.
19665func Igamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
19666	if scope.Err() != nil {
19667		return
19668	}
19669	opspec := tf.OpSpec{
19670		Type: "Igamma",
19671		Input: []tf.Input{
19672			a, x,
19673		},
19674	}
19675	op := scope.AddOperation(opspec)
19676	return op.Output(0)
19677}
19678
19679// Computes the gradient of `igamma(a, x)` wrt `a`.
19680func IgammaGradA(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
19681	if scope.Err() != nil {
19682		return
19683	}
19684	opspec := tf.OpSpec{
19685		Type: "IgammaGradA",
19686		Input: []tf.Input{
19687			a, x,
19688		},
19689	}
19690	op := scope.AddOperation(opspec)
19691	return op.Output(0)
19692}
19693
19694// Compute the upper regularized incomplete Gamma function `Q(a, x)`.
19695//
19696// The upper regularized incomplete Gamma function is defined as:
19697//
19698// \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
19699//
19700// where
19701//
19702// \\(Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
19703//
19704// is the upper incomplete Gamma function.
19705//
19706// Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
19707// Gamma function.
19708func Igammac(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
19709	if scope.Err() != nil {
19710		return
19711	}
19712	opspec := tf.OpSpec{
19713		Type: "Igammac",
19714		Input: []tf.Input{
19715			a, x,
19716		},
19717	}
19718	op := scope.AddOperation(opspec)
19719	return op.Output(0)
19720}
19721
19722// IgnoreErrorsDatasetAttr is an optional argument to IgnoreErrorsDataset.
19723type IgnoreErrorsDatasetAttr func(optionalAttr)
19724
19725// IgnoreErrorsDatasetLogWarning sets the optional log_warning attribute to value.
19726// If not specified, defaults to false
19727func IgnoreErrorsDatasetLogWarning(value bool) IgnoreErrorsDatasetAttr {
19728	return func(m optionalAttr) {
19729		m["log_warning"] = value
19730	}
19731}
19732
19733// Creates a dataset that contains the elements of `input_dataset` ignoring errors.
19734func IgnoreErrorsDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...IgnoreErrorsDatasetAttr) (handle tf.Output) {
19735	if scope.Err() != nil {
19736		return
19737	}
19738	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
19739	for _, a := range optional {
19740		a(attrs)
19741	}
19742	opspec := tf.OpSpec{
19743		Type: "IgnoreErrorsDataset",
19744		Input: []tf.Input{
19745			input_dataset,
19746		},
19747		Attrs: attrs,
19748	}
19749	op := scope.AddOperation(opspec)
19750	return op.Output(0)
19751}
19752
19753// ImagAttr is an optional argument to Imag.
19754type ImagAttr func(optionalAttr)
19755
19756// ImagTout sets the optional Tout attribute to value.
19757// If not specified, defaults to DT_FLOAT
19758func ImagTout(value tf.DataType) ImagAttr {
19759	return func(m optionalAttr) {
19760		m["Tout"] = value
19761	}
19762}
19763
19764// Returns the imaginary part of a complex number.
19765//
19766// Given a tensor `input` of complex numbers, this operation returns a tensor of
19767// type `float` that is the imaginary part of each element in `input`. All
19768// elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
19769// is the real part and *b* is the imaginary part returned by this operation.
19770//
19771// For example:
19772//
19773// ```
19774// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
19775// tf.imag(input) ==> [4.75, 5.75]
19776// ```
19777func Imag(scope *Scope, input tf.Output, optional ...ImagAttr) (output tf.Output) {
19778	if scope.Err() != nil {
19779		return
19780	}
19781	attrs := map[string]interface{}{}
19782	for _, a := range optional {
19783		a(attrs)
19784	}
19785	opspec := tf.OpSpec{
19786		Type: "Imag",
19787		Input: []tf.Input{
19788			input,
19789		},
19790		Attrs: attrs,
19791	}
19792	op := scope.AddOperation(opspec)
19793	return op.Output(0)
19794}
19795
19796// ImageProjectiveTransformV2Attr is an optional argument to ImageProjectiveTransformV2.
19797type ImageProjectiveTransformV2Attr func(optionalAttr)
19798
19799// ImageProjectiveTransformV2FillMode sets the optional fill_mode attribute to value.
19800//
19801// value: Fill mode, "REFLECT", "WRAP", or "CONSTANT".
19802// If not specified, defaults to "CONSTANT"
19803func ImageProjectiveTransformV2FillMode(value string) ImageProjectiveTransformV2Attr {
19804	return func(m optionalAttr) {
19805		m["fill_mode"] = value
19806	}
19807}
19808
19809// Applies the given transform to each of the images.
19810//
19811// If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps
19812// the *output* point `(x, y)` to a transformed *input* point
19813// `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
19814// `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input
19815// image, the output pixel is set to 0.
19816//
19817// Arguments:
19818//
19819//	images: 4-D with shape `[batch, height, width, channels]`.
19820//	transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3
19821//
19822// projective transformation matrix, with the last entry assumed to be 1. If there
19823// is one row, the same transformation will be applied to all images.
19824//
19825//	output_shape: 1-D Tensor [new_height, new_width].
19826//	interpolation: Interpolation method, "NEAREST" or "BILINEAR".
19827//
19828// Returns 4-D with shape
19829// `[batch, new_height, new_width, channels]`.
19830func ImageProjectiveTransformV2(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, interpolation string, optional ...ImageProjectiveTransformV2Attr) (transformed_images tf.Output) {
19831	if scope.Err() != nil {
19832		return
19833	}
19834	attrs := map[string]interface{}{"interpolation": interpolation}
19835	for _, a := range optional {
19836		a(attrs)
19837	}
19838	opspec := tf.OpSpec{
19839		Type: "ImageProjectiveTransformV2",
19840		Input: []tf.Input{
19841			images, transforms, output_shape,
19842		},
19843		Attrs: attrs,
19844	}
19845	op := scope.AddOperation(opspec)
19846	return op.Output(0)
19847}
19848
19849// ImageProjectiveTransformV3Attr is an optional argument to ImageProjectiveTransformV3.
19850type ImageProjectiveTransformV3Attr func(optionalAttr)
19851
19852// ImageProjectiveTransformV3FillMode sets the optional fill_mode attribute to value.
19853//
19854// value: Fill mode, "REFLECT", "WRAP", "CONSTANT", or "NEAREST".
19855// If not specified, defaults to "CONSTANT"
19856func ImageProjectiveTransformV3FillMode(value string) ImageProjectiveTransformV3Attr {
19857	return func(m optionalAttr) {
19858		m["fill_mode"] = value
19859	}
19860}
19861
19862// Applies the given transform to each of the images.
19863//
19864// If one row of `transforms` is `[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps
19865// the *output* point `(x, y)` to a transformed *input* point
19866// `(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`, where
19867// `k = c0 x + c1 y + 1`. If the transformed point lays outside of the input
19868// image, the output pixel is set to fill_value.
19869//
19870// Arguments:
19871//
19872//	images: 4-D with shape `[batch, height, width, channels]`.
19873//	transforms: 2-D Tensor, `[batch, 8]` or `[1, 8]` matrix, where each row corresponds to a 3 x 3
19874//
19875// projective transformation matrix, with the last entry assumed to be 1. If there
19876// is one row, the same transformation will be applied to all images.
19877//
19878//	output_shape: 1-D Tensor [new_height, new_width].
19879//	fill_value: float, the value to be filled when fill_mode is constant".
19880//	interpolation: Interpolation method, "NEAREST" or "BILINEAR".
19881//
19882// Returns 4-D with shape
19883// `[batch, new_height, new_width, channels]`.
19884func ImageProjectiveTransformV3(scope *Scope, images tf.Output, transforms tf.Output, output_shape tf.Output, fill_value tf.Output, interpolation string, optional ...ImageProjectiveTransformV3Attr) (transformed_images tf.Output) {
19885	if scope.Err() != nil {
19886		return
19887	}
19888	attrs := map[string]interface{}{"interpolation": interpolation}
19889	for _, a := range optional {
19890		a(attrs)
19891	}
19892	opspec := tf.OpSpec{
19893		Type: "ImageProjectiveTransformV3",
19894		Input: []tf.Input{
19895			images, transforms, output_shape, fill_value,
19896		},
19897		Attrs: attrs,
19898	}
19899	op := scope.AddOperation(opspec)
19900	return op.Output(0)
19901}
19902
19903// ImageSummaryAttr is an optional argument to ImageSummary.
19904type ImageSummaryAttr func(optionalAttr)
19905
19906// ImageSummaryMaxImages sets the optional max_images attribute to value.
19907//
19908// value: Max number of batch elements to generate images for.
19909// If not specified, defaults to 3
19910//
19911// REQUIRES: value >= 1
19912func ImageSummaryMaxImages(value int64) ImageSummaryAttr {
19913	return func(m optionalAttr) {
19914		m["max_images"] = value
19915	}
19916}
19917
19918// ImageSummaryBadColor sets the optional bad_color attribute to value.
19919//
19920// value: Color to use for pixels with non-finite values.
19921// If not specified, defaults to {dtype:DT_UINT8 tensor_shape:{dim:{size:4}} int_val:255 int_val:0 int_val:0 int_val:255}
19922func ImageSummaryBadColor(value tf.Tensor) ImageSummaryAttr {
19923	return func(m optionalAttr) {
19924		m["bad_color"] = value
19925	}
19926}
19927
19928// Outputs a `Summary` protocol buffer with images.
19929//
19930// The summary has up to `max_images` summary values containing images. The
19931// images are built from `tensor` which must be 4-D with shape `[batch_size,
19932// height, width, channels]` and where `channels` can be:
19933//
19934// *  1: `tensor` is interpreted as Grayscale.
19935// *  3: `tensor` is interpreted as RGB.
19936// *  4: `tensor` is interpreted as RGBA.
19937//
19938// The images have the same number of channels as the input tensor. For float
19939// input, the values are normalized one image at a time to fit in the range
19940// `[0, 255]`.  `uint8` values are unchanged.  The op uses two different
19941// normalization algorithms:
19942//
19943//   - If the input values are all positive, they are rescaled so the largest one
19944//     is 255.
19945//
19946//   - If any input value is negative, the values are shifted so input value 0.0
19947//     is at 127.  They are then rescaled so that either the smallest value is 0,
19948//     or the largest one is 255.
19949//
19950// The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
19951// build the `tag` of the summary values:
19952//
19953//   - If `max_images` is 1, the summary value tag is '*tag*/image'.
19954//   - If `max_images` is greater than 1, the summary value tags are
19955//     generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
19956//
19957// The `bad_color` argument is the color to use in the generated images for
19958// non-finite input values.  It is a `uint8` 1-D tensor of length `channels`.
19959// Each element must be in the range `[0, 255]` (It represents the value of a
19960// pixel in the output image).  Non-finite values in the input tensor are
19961// replaced by this tensor in the output image.  The default value is the color
19962// red.
19963//
19964// Arguments:
19965//
19966//	tag: Scalar. Used to build the `tag` attribute of the summary values.
19967//	tensor: 4-D of shape `[batch_size, height, width, channels]` where
19968//
19969// `channels` is 1, 3, or 4.
19970//
19971// Returns Scalar. Serialized `Summary` protocol buffer.
19972func ImageSummary(scope *Scope, tag tf.Output, tensor tf.Output, optional ...ImageSummaryAttr) (summary tf.Output) {
19973	if scope.Err() != nil {
19974		return
19975	}
19976	attrs := map[string]interface{}{}
19977	for _, a := range optional {
19978		a(attrs)
19979	}
19980	opspec := tf.OpSpec{
19981		Type: "ImageSummary",
19982		Input: []tf.Input{
19983			tag, tensor,
19984		},
19985		Attrs: attrs,
19986	}
19987	op := scope.AddOperation(opspec)
19988	return op.Output(0)
19989}
19990
19991// Returns immutable tensor from memory region.
19992//
19993// The current implementation memmaps the tensor from a file.
19994//
19995// Arguments:
19996//
19997//	dtype: Type of the returned tensor.
19998//	shape: Shape of the returned tensor.
19999//	memory_region_name: Name of readonly memory region used by the tensor, see
20000//
20001// NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
20002func ImmutableConst(scope *Scope, dtype tf.DataType, shape tf.Shape, memory_region_name string) (tensor tf.Output) {
20003	if scope.Err() != nil {
20004		return
20005	}
20006	attrs := map[string]interface{}{"dtype": dtype, "shape": shape, "memory_region_name": memory_region_name}
20007	opspec := tf.OpSpec{
20008		Type: "ImmutableConst",
20009
20010		Attrs: attrs,
20011	}
20012	op := scope.AddOperation(opspec)
20013	return op.Output(0)
20014}
20015
20016// Says whether the targets are in the top `K` predictions.
20017//
20018// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
20019// prediction for the target class is among the top `k` predictions among
20020// all predictions for example `i`. Note that the behavior of `InTopK` differs
20021// from the `TopK` op in its handling of ties; if multiple classes have the
20022// same prediction value and straddle the top-`k` boundary, all of those
20023// classes are considered to be in the top `k`.
20024//
20025// More formally, let
20026//
20027//	\\(predictions_i\\) be the predictions for all classes for example `i`,
20028//	\\(targets_i\\) be the target class for example `i`,
20029//	\\(out_i\\) be the output for example `i`,
20030//
20031// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
20032//
20033// Arguments:
20034//
20035//	predictions: A `batch_size` x `classes` tensor.
20036//	targets: A `batch_size` vector of class ids.
20037//	k: Number of top elements to look at for computing precision.
20038//
20039// Returns Computed Precision at `k` as a `bool Tensor`.
20040func InTopK(scope *Scope, predictions tf.Output, targets tf.Output, k int64) (precision tf.Output) {
20041	if scope.Err() != nil {
20042		return
20043	}
20044	attrs := map[string]interface{}{"k": k}
20045	opspec := tf.OpSpec{
20046		Type: "InTopK",
20047		Input: []tf.Input{
20048			predictions, targets,
20049		},
20050		Attrs: attrs,
20051	}
20052	op := scope.AddOperation(opspec)
20053	return op.Output(0)
20054}
20055
20056// Says whether the targets are in the top `K` predictions.
20057//
20058// This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
20059// prediction for the target class is among the top `k` predictions among
20060// all predictions for example `i`. Note that the behavior of `InTopK` differs
20061// from the `TopK` op in its handling of ties; if multiple classes have the
20062// same prediction value and straddle the top-`k` boundary, all of those
20063// classes are considered to be in the top `k`.
20064//
20065// More formally, let
20066//
20067//	\\(predictions_i\\) be the predictions for all classes for example `i`,
20068//	\\(targets_i\\) be the target class for example `i`,
20069//	\\(out_i\\) be the output for example `i`,
20070//
20071// $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
20072//
20073// Arguments:
20074//
20075//	predictions: A `batch_size` x `classes` tensor.
20076//	targets: A `batch_size` vector of class ids.
20077//	k: Number of top elements to look at for computing precision.
20078//
20079// Returns Computed precision at `k` as a `bool Tensor`.
20080func InTopKV2(scope *Scope, predictions tf.Output, targets tf.Output, k tf.Output) (precision tf.Output) {
20081	if scope.Err() != nil {
20082		return
20083	}
20084	opspec := tf.OpSpec{
20085		Type: "InTopKV2",
20086		Input: []tf.Input{
20087			predictions, targets, k,
20088		},
20089	}
20090	op := scope.AddOperation(opspec)
20091	return op.Output(0)
20092}
20093
20094// A placeholder op for a value that will be fed into the computation.
20095//
20096// Arguments:
20097//
20098//	dtype: The type of elements in the tensor.
20099//	shape: The shape of the tensor.
20100//
20101// Returns A tensor that will be provided using the infeed mechanism.
20102func InfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
20103	if scope.Err() != nil {
20104		return
20105	}
20106	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
20107	opspec := tf.OpSpec{
20108		Type: "InfeedDequeue",
20109
20110		Attrs: attrs,
20111	}
20112	op := scope.AddOperation(opspec)
20113	return op.Output(0)
20114}
20115
20116// Fetches multiple values from infeed as an XLA tuple.
20117//
20118// Arguments:
20119//
20120//	dtypes: The element types of each element in `outputs`.
20121//	shapes: The shapes of each tensor in `outputs`.
20122//
20123// Returns A list of tensors that will be provided using the infeed mechanism.
20124func InfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output) {
20125	if scope.Err() != nil {
20126		return
20127	}
20128	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
20129	opspec := tf.OpSpec{
20130		Type: "InfeedDequeueTuple",
20131
20132		Attrs: attrs,
20133	}
20134	op := scope.AddOperation(opspec)
20135	if scope.Err() != nil {
20136		return
20137	}
20138	var idx int
20139	var err error
20140	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
20141		scope.UpdateErr("InfeedDequeueTuple", err)
20142		return
20143	}
20144	return outputs
20145}
20146
20147// InfeedEnqueueAttr is an optional argument to InfeedEnqueue.
20148type InfeedEnqueueAttr func(optionalAttr)
20149
20150// InfeedEnqueueShape sets the optional shape attribute to value.
20151//
20152// value: The shape of the tensor.
20153// If not specified, defaults to {}
20154func InfeedEnqueueShape(value tf.Shape) InfeedEnqueueAttr {
20155	return func(m optionalAttr) {
20156		m["shape"] = value
20157	}
20158}
20159
20160// InfeedEnqueueLayout sets the optional layout attribute to value.
20161//
20162// value: A vector holding the requested layout in minor-to-major sequence.
20163// If a layout attribute is passed, but its values are all -1, the layout will
20164// be computed by the infeed operation.
20165// If not specified, defaults to {}
20166func InfeedEnqueueLayout(value []int64) InfeedEnqueueAttr {
20167	return func(m optionalAttr) {
20168		m["layout"] = value
20169	}
20170}
20171
20172// InfeedEnqueueDeviceOrdinal sets the optional device_ordinal attribute to value.
20173//
20174// value: The TPU device to use. This should be -1 when the Op
20175// is running on a TPU device, and >= 0 when the Op is running on the CPU
20176// device.
20177// If not specified, defaults to -1
20178func InfeedEnqueueDeviceOrdinal(value int64) InfeedEnqueueAttr {
20179	return func(m optionalAttr) {
20180		m["device_ordinal"] = value
20181	}
20182}
20183
20184// An op which feeds a single Tensor value into the computation.
20185//
20186// Arguments:
20187//
20188//	input: A tensor that will be provided using the infeed mechanism.
20189//
20190// Returns the created operation.
20191func InfeedEnqueue(scope *Scope, input tf.Output, optional ...InfeedEnqueueAttr) (o *tf.Operation) {
20192	if scope.Err() != nil {
20193		return
20194	}
20195	attrs := map[string]interface{}{}
20196	for _, a := range optional {
20197		a(attrs)
20198	}
20199	opspec := tf.OpSpec{
20200		Type: "InfeedEnqueue",
20201		Input: []tf.Input{
20202			input,
20203		},
20204		Attrs: attrs,
20205	}
20206	return scope.AddOperation(opspec)
20207}
20208
20209// InfeedEnqueuePrelinearizedBufferAttr is an optional argument to InfeedEnqueuePrelinearizedBuffer.
20210type InfeedEnqueuePrelinearizedBufferAttr func(optionalAttr)
20211
20212// InfeedEnqueuePrelinearizedBufferDeviceOrdinal sets the optional device_ordinal attribute to value.
20213//
20214// value: The TPU device to use. This should be -1 when the Op is running on a TPU device
20215// and = 0 when the Op is running on the CPU device.
20216// If not specified, defaults to -1
20217func InfeedEnqueuePrelinearizedBufferDeviceOrdinal(value int64) InfeedEnqueuePrelinearizedBufferAttr {
20218	return func(m optionalAttr) {
20219		m["device_ordinal"] = value
20220	}
20221}
20222
20223// An op which enqueues prelinearized buffer into TPU infeed.
20224//
20225// Arguments:
20226//
20227//	input: A variant tensor representing linearized output.
20228//
20229// Returns the created operation.
20230func InfeedEnqueuePrelinearizedBuffer(scope *Scope, input tf.Output, optional ...InfeedEnqueuePrelinearizedBufferAttr) (o *tf.Operation) {
20231	if scope.Err() != nil {
20232		return
20233	}
20234	attrs := map[string]interface{}{}
20235	for _, a := range optional {
20236		a(attrs)
20237	}
20238	opspec := tf.OpSpec{
20239		Type: "InfeedEnqueuePrelinearizedBuffer",
20240		Input: []tf.Input{
20241			input,
20242		},
20243		Attrs: attrs,
20244	}
20245	return scope.AddOperation(opspec)
20246}
20247
20248// InfeedEnqueueTupleAttr is an optional argument to InfeedEnqueueTuple.
20249type InfeedEnqueueTupleAttr func(optionalAttr)
20250
20251// InfeedEnqueueTupleLayouts sets the optional layouts attribute to value.
20252//
20253// value: A vector holding the requested layout in minor-to-major sequence for
20254// all the tuple shapes, in the order the shapes appear in the "shapes" input.
20255// The layout elements for a sub-shape can be set to -1, in which case the
20256// corresponding layout will be computed by the infeed operation.
20257// If not specified, defaults to {}
20258func InfeedEnqueueTupleLayouts(value []int64) InfeedEnqueueTupleAttr {
20259	return func(m optionalAttr) {
20260		m["layouts"] = value
20261	}
20262}
20263
20264// InfeedEnqueueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
20265//
20266// value: The TPU device to use. This should be -1 when the Op
20267// is running on a TPU device, and >= 0 when the Op is running on the CPU
20268// device.
20269// If not specified, defaults to -1
20270func InfeedEnqueueTupleDeviceOrdinal(value int64) InfeedEnqueueTupleAttr {
20271	return func(m optionalAttr) {
20272		m["device_ordinal"] = value
20273	}
20274}
20275
20276// Feeds multiple Tensor values into the computation as an XLA tuple.
20277//
20278// Arguments:
20279//
20280//	inputs: A list of tensors that will be provided using the infeed mechanism.
20281//	shapes: The shapes of each tensor in `inputs`.
20282//
20283// Returns the created operation.
20284func InfeedEnqueueTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...InfeedEnqueueTupleAttr) (o *tf.Operation) {
20285	if scope.Err() != nil {
20286		return
20287	}
20288	attrs := map[string]interface{}{"shapes": shapes}
20289	for _, a := range optional {
20290		a(attrs)
20291	}
20292	opspec := tf.OpSpec{
20293		Type: "InfeedEnqueueTuple",
20294		Input: []tf.Input{
20295			tf.OutputList(inputs),
20296		},
20297		Attrs: attrs,
20298	}
20299	return scope.AddOperation(opspec)
20300}
20301
20302// InitializeTableFromTextFileV2Attr is an optional argument to InitializeTableFromTextFileV2.
20303type InitializeTableFromTextFileV2Attr func(optionalAttr)
20304
20305// InitializeTableFromTextFileV2VocabSize sets the optional vocab_size attribute to value.
20306//
20307// value: Number of elements of the file, use -1 if unknown.
20308// If not specified, defaults to -1
20309//
20310// REQUIRES: value >= -1
20311func InitializeTableFromTextFileV2VocabSize(value int64) InitializeTableFromTextFileV2Attr {
20312	return func(m optionalAttr) {
20313		m["vocab_size"] = value
20314	}
20315}
20316
20317// InitializeTableFromTextFileV2Delimiter sets the optional delimiter attribute to value.
20318//
20319// value: Delimiter to separate fields in a line.
20320// If not specified, defaults to "\t"
20321func InitializeTableFromTextFileV2Delimiter(value string) InitializeTableFromTextFileV2Attr {
20322	return func(m optionalAttr) {
20323		m["delimiter"] = value
20324	}
20325}
20326
20327// InitializeTableFromTextFileV2Offset sets the optional offset attribute to value.
20328// If not specified, defaults to 0
20329func InitializeTableFromTextFileV2Offset(value int64) InitializeTableFromTextFileV2Attr {
20330	return func(m optionalAttr) {
20331		m["offset"] = value
20332	}
20333}
20334
20335// Initializes a table from a text file.
20336//
20337// It inserts one key-value pair into the table for each line of the file.
20338// The key and value is extracted from the whole line content, elements from the
20339// split line based on `delimiter` or the line number (starting from zero).
20340// Where to extract the key and value from a line is specified by `key_index` and
20341// `value_index`.
20342//
20343//   - A value of -1 means use the line number(starting from zero), expects `int64`.
20344//   - A value of -2 means use the whole line content, expects `string`.
20345//   - A value >= 0 means use the index (starting at zero) of the split line based
20346//     on `delimiter`.
20347//
20348// Arguments:
20349//
20350//	table_handle: Handle to a table which will be initialized.
20351//	filename: Filename of a vocabulary text file.
20352//	key_index: Column index in a line to get the table `key` values from.
20353//	value_index: Column index that represents information of a line to get the table
20354//
20355// `value` values from.
20356//
20357// Returns the created operation.
20358func InitializeTableFromTextFileV2(scope *Scope, table_handle tf.Output, filename tf.Output, key_index int64, value_index int64, optional ...InitializeTableFromTextFileV2Attr) (o *tf.Operation) {
20359	if scope.Err() != nil {
20360		return
20361	}
20362	attrs := map[string]interface{}{"key_index": key_index, "value_index": value_index}
20363	for _, a := range optional {
20364		a(attrs)
20365	}
20366	opspec := tf.OpSpec{
20367		Type: "InitializeTableFromTextFileV2",
20368		Input: []tf.Input{
20369			table_handle, filename,
20370		},
20371		Attrs: attrs,
20372	}
20373	return scope.AddOperation(opspec)
20374}
20375
20376// Table initializer that takes two tensors for keys and values respectively.
20377//
20378// Arguments:
20379//
20380//	table_handle: Handle to a table which will be initialized.
20381//	keys: Keys of type Tkey.
20382//	values: Values of type Tval.
20383//
20384// Returns the created operation.
20385func InitializeTableV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
20386	if scope.Err() != nil {
20387		return
20388	}
20389	opspec := tf.OpSpec{
20390		Type: "InitializeTableV2",
20391		Input: []tf.Input{
20392			table_handle, keys, values,
20393		},
20394	}
20395	return scope.AddOperation(opspec)
20396}
20397
20398// Adds v into specified rows of x.
20399//
20400//	Computes y = x; y[i, :] += v; return y.
20401//
20402// Arguments:
20403//
20404//	x: A `Tensor` of type T.
20405//	i: A vector. Indices into the left-most dimension of `x`.
20406//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
20407//
20408// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
20409func InplaceAdd(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
20410	if scope.Err() != nil {
20411		return
20412	}
20413	opspec := tf.OpSpec{
20414		Type: "InplaceAdd",
20415		Input: []tf.Input{
20416			x, i, v,
20417		},
20418	}
20419	op := scope.AddOperation(opspec)
20420	return op.Output(0)
20421}
20422
20423//	Subtracts `v` into specified rows of `x`.
20424//
20425//	Computes y = x; y[i, :] -= v; return y.
20426//
20427// Arguments:
20428//
20429//	x: A `Tensor` of type T.
20430//	i: A vector. Indices into the left-most dimension of `x`.
20431//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
20432//
20433// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
20434func InplaceSub(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
20435	if scope.Err() != nil {
20436		return
20437	}
20438	opspec := tf.OpSpec{
20439		Type: "InplaceSub",
20440		Input: []tf.Input{
20441			x, i, v,
20442		},
20443	}
20444	op := scope.AddOperation(opspec)
20445	return op.Output(0)
20446}
20447
20448// Updates specified rows 'i' with values 'v'.
20449//
20450// Computes `x[i, :] = v; return x`.
20451//
20452// Originally this function is mutative however for compilation we make this
20453// operation create / operate on a copy of `x`.
20454//
20455// Arguments:
20456//
20457//	x: A tensor of type `T`.
20458//	i: A vector. Indices into the left-most dimension of `x`.
20459//	v: A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.
20460//
20461// Returns A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.
20462func InplaceUpdate(scope *Scope, x tf.Output, i tf.Output, v tf.Output) (y tf.Output) {
20463	if scope.Err() != nil {
20464		return
20465	}
20466	opspec := tf.OpSpec{
20467		Type: "InplaceUpdate",
20468		Input: []tf.Input{
20469			x, i, v,
20470		},
20471	}
20472	op := scope.AddOperation(opspec)
20473	return op.Output(0)
20474}
20475
20476// Computes the reciprocal of x element-wise.
20477//
20478// I.e., \\(y = 1 / x\\).
20479func Inv(scope *Scope, x tf.Output) (y tf.Output) {
20480	if scope.Err() != nil {
20481		return
20482	}
20483	opspec := tf.OpSpec{
20484		Type: "Inv",
20485		Input: []tf.Input{
20486			x,
20487		},
20488	}
20489	op := scope.AddOperation(opspec)
20490	return op.Output(0)
20491}
20492
20493// Computes the gradient for the inverse of `x` wrt its input.
20494//
20495// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
20496// is the corresponding input gradient.
20497func InvGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
20498	if scope.Err() != nil {
20499		return
20500	}
20501	opspec := tf.OpSpec{
20502		Type: "InvGrad",
20503		Input: []tf.Input{
20504			y, dy,
20505		},
20506	}
20507	op := scope.AddOperation(opspec)
20508	return op.Output(0)
20509}
20510
20511// Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010.
20512//
20513// Flip each bit of supported types.  For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101.
20514// This operation is performed on each element of the tensor argument `x`.
20515//
20516// Example:
20517// ```python
20518// import tensorflow as tf
20519// from tensorflow.python.ops import bitwise_ops
20520//
20521// # flip 2 (00000010) to -3 (11111101)
20522// tf.assert_equal(-3, bitwise_ops.invert(2))
20523//
20524// dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
20525//
20526//	dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
20527//
20528// inputs = [0, 5, 3, 14]
20529// for dtype in dtype_list:
20530//
20531//	# Because of issues with negative numbers, let's test this indirectly.
20532//	# 1. invert(a) and a = 0
20533//	# 2. invert(a) or a = invert(0)
20534//	input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)
20535//	not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(
20536//	                                    input_tensor, bitwise_ops.invert(input_tensor)),
20537//	                                  bitwise_ops.bitwise_or(
20538//	                                    input_tensor, bitwise_ops.invert(input_tensor)),
20539//	                                  bitwise_ops.invert(
20540//	                                    tf.constant(0, dtype=dtype))]
20541//
20542//	expected = tf.constant([0, 0, 0, 0], dtype=tf.float32)
20543//	tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected)
20544//
20545//	expected = tf.cast([not_0] * 4, tf.float32)
20546//	tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected)
20547//
20548//	# For unsigned dtypes let's also check the result directly.
20549//	if dtype.is_unsigned:
20550//	  inverted = bitwise_ops.invert(input_tensor)
20551//	  expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32)
20552//	  tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))
20553//
20554// ```
20555func Invert(scope *Scope, x tf.Output) (y tf.Output) {
20556	if scope.Err() != nil {
20557		return
20558	}
20559	opspec := tf.OpSpec{
20560		Type: "Invert",
20561		Input: []tf.Input{
20562			x,
20563		},
20564	}
20565	op := scope.AddOperation(opspec)
20566	return op.Output(0)
20567}
20568
20569// Computes the inverse permutation of a tensor.
20570//
20571// This operation computes the inverse of an index permutation. It takes a 1-D
20572// integer tensor `x`, which represents the indices of a zero-based array, and
20573// swaps each value with its index position. In other words, for an output tensor
20574// `y` and an input tensor `x`, this operation computes the following:
20575//
20576// `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
20577//
20578// The values must include 0. There can be no duplicate values or negative values.
20579//
20580// For example:
20581//
20582// ```
20583// # tensor `x` is [3, 4, 0, 2, 1]
20584// invert_permutation(x) ==> [2, 4, 3, 0, 1]
20585// ```
20586//
20587// Arguments:
20588//
20589//	x: 1-D.
20590//
20591// Returns 1-D.
20592func InvertPermutation(scope *Scope, x tf.Output) (y tf.Output) {
20593	if scope.Err() != nil {
20594		return
20595	}
20596	opspec := tf.OpSpec{
20597		Type: "InvertPermutation",
20598		Input: []tf.Input{
20599			x,
20600		},
20601	}
20602	op := scope.AddOperation(opspec)
20603	return op.Output(0)
20604}
20605
20606// Checks whether a tree ensemble has been initialized.
20607//
20608// Arguments:
20609//
20610//	tree_ensemble_handle: Handle to the tree ensemble resource.
20611//
20612// Returns output boolean on whether it is initialized or not.
20613func IsBoostedTreesEnsembleInitialized(scope *Scope, tree_ensemble_handle tf.Output) (is_initialized tf.Output) {
20614	if scope.Err() != nil {
20615		return
20616	}
20617	opspec := tf.OpSpec{
20618		Type: "IsBoostedTreesEnsembleInitialized",
20619		Input: []tf.Input{
20620			tree_ensemble_handle,
20621		},
20622	}
20623	op := scope.AddOperation(opspec)
20624	return op.Output(0)
20625}
20626
20627// Checks whether a quantile stream has been initialized.
20628//
20629// An Op that checks if quantile stream resource is initialized.
20630//
20631// Arguments:
20632//
20633//	quantile_stream_resource_handle: resource; The reference to quantile stream resource handle.
20634//
20635// Returns bool; True if the resource is initialized, False otherwise.
20636func IsBoostedTreesQuantileStreamResourceInitialized(scope *Scope, quantile_stream_resource_handle tf.Output) (is_initialized tf.Output) {
20637	if scope.Err() != nil {
20638		return
20639	}
20640	opspec := tf.OpSpec{
20641		Type: "IsBoostedTreesQuantileStreamResourceInitialized",
20642		Input: []tf.Input{
20643			quantile_stream_resource_handle,
20644		},
20645	}
20646	op := scope.AddOperation(opspec)
20647	return op.Output(0)
20648}
20649
20650// Returns which elements of x are finite.
20651//
20652// @compatibility(numpy)
20653// Equivalent to np.isfinite
20654// @end_compatibility
20655//
20656// Example:
20657//
20658// ```python
20659// x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan])
20660// tf.math.is_finite(x) ==> [True, True, True, False, False]
20661// ```
20662func IsFinite(scope *Scope, x tf.Output) (y tf.Output) {
20663	if scope.Err() != nil {
20664		return
20665	}
20666	opspec := tf.OpSpec{
20667		Type: "IsFinite",
20668		Input: []tf.Input{
20669			x,
20670		},
20671	}
20672	op := scope.AddOperation(opspec)
20673	return op.Output(0)
20674}
20675
20676// Returns which elements of x are Inf.
20677//
20678// @compatibility(numpy)
20679// Equivalent to np.isinf
20680// @end_compatibility
20681//
20682// Example:
20683//
20684// ```python
20685// x = tf.constant([5.0, np.inf, 6.8, np.inf])
20686// tf.math.is_inf(x) ==> [False, True, False, True]
20687// ```
20688func IsInf(scope *Scope, x tf.Output) (y tf.Output) {
20689	if scope.Err() != nil {
20690		return
20691	}
20692	opspec := tf.OpSpec{
20693		Type: "IsInf",
20694		Input: []tf.Input{
20695			x,
20696		},
20697	}
20698	op := scope.AddOperation(opspec)
20699	return op.Output(0)
20700}
20701
20702// Returns which elements of x are NaN.
20703//
20704// @compatibility(numpy)
20705// Equivalent to np.isnan
20706// @end_compatibility
20707//
20708// Example:
20709//
20710// ```python
20711// x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf])
20712// tf.math.is_nan(x) ==> [False, True, False, True, False]
20713// ```
20714func IsNan(scope *Scope, x tf.Output) (y tf.Output) {
20715	if scope.Err() != nil {
20716		return
20717	}
20718	opspec := tf.OpSpec{
20719		Type: "IsNan",
20720		Input: []tf.Input{
20721			x,
20722		},
20723	}
20724	op := scope.AddOperation(opspec)
20725	return op.Output(0)
20726}
20727
20728// IsTPUEmbeddingInitializedAttr is an optional argument to IsTPUEmbeddingInitialized.
20729type IsTPUEmbeddingInitializedAttr func(optionalAttr)
20730
20731// IsTPUEmbeddingInitializedConfig sets the optional config attribute to value.
20732// If not specified, defaults to ""
20733func IsTPUEmbeddingInitializedConfig(value string) IsTPUEmbeddingInitializedAttr {
20734	return func(m optionalAttr) {
20735		m["config"] = value
20736	}
20737}
20738
20739// Whether TPU Embedding is initialized in a distributed TPU system.
20740func IsTPUEmbeddingInitialized(scope *Scope, optional ...IsTPUEmbeddingInitializedAttr) (is_tpu_embedding_initialized tf.Output) {
20741	if scope.Err() != nil {
20742		return
20743	}
20744	attrs := map[string]interface{}{}
20745	for _, a := range optional {
20746		a(attrs)
20747	}
20748	opspec := tf.OpSpec{
20749		Type: "IsTPUEmbeddingInitialized",
20750
20751		Attrs: attrs,
20752	}
20753	op := scope.AddOperation(opspec)
20754	return op.Output(0)
20755}
20756
20757// IsotonicRegressionAttr is an optional argument to IsotonicRegression.
20758type IsotonicRegressionAttr func(optionalAttr)
20759
20760// IsotonicRegressionOutputDtype sets the optional output_dtype attribute to value.
20761//
20762// value: Dtype of output.
20763// If not specified, defaults to DT_FLOAT
20764func IsotonicRegressionOutputDtype(value tf.DataType) IsotonicRegressionAttr {
20765	return func(m optionalAttr) {
20766		m["output_dtype"] = value
20767	}
20768}
20769
20770// Solves a batch of isotonic regression problems.
20771//
20772// Arguments:
20773//
20774//	input: A (batch_size, dim)-tensor holding a batch of inputs.
20775//
20776// Returns:
20777//
20778//	output: A (batch_size, dim)-tensor holding the per-batch element solutions.
20779//	segments: An int32 (batch_size, dim)-tensor with the segments.
20780func IsotonicRegression(scope *Scope, input tf.Output, optional ...IsotonicRegressionAttr) (output tf.Output, segments tf.Output) {
20781	if scope.Err() != nil {
20782		return
20783	}
20784	attrs := map[string]interface{}{}
20785	for _, a := range optional {
20786		a(attrs)
20787	}
20788	opspec := tf.OpSpec{
20789		Type: "IsotonicRegression",
20790		Input: []tf.Input{
20791			input,
20792		},
20793		Attrs: attrs,
20794	}
20795	op := scope.AddOperation(opspec)
20796	return op.Output(0), op.Output(1)
20797}
20798
20799// A container for an iterator resource.
20800//
20801// Returns A handle to the iterator that can be passed to a "MakeIterator"
20802// or "IteratorGetNext" op.
20803func Iterator(scope *Scope, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
20804	if scope.Err() != nil {
20805		return
20806	}
20807	attrs := map[string]interface{}{"shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
20808	opspec := tf.OpSpec{
20809		Type: "Iterator",
20810
20811		Attrs: attrs,
20812	}
20813	op := scope.AddOperation(opspec)
20814	return op.Output(0)
20815}
20816
20817// IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle.
20818type IteratorFromStringHandleAttr func(optionalAttr)
20819
20820// IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
20821//
20822// value: If specified, defines the type of each tuple component in an
20823// element produced by the resulting iterator.
20824// If not specified, defaults to {}
20825//
20826// REQUIRES: len(value) >= 0
20827func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr {
20828	return func(m optionalAttr) {
20829		m["output_types"] = value
20830	}
20831}
20832
20833// IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
20834//
20835// value: If specified, defines the shape of each tuple component in an
20836// element produced by the resulting iterator.
20837// If not specified, defaults to {}
20838//
20839// REQUIRES: len(value) >= 0
20840func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr {
20841	return func(m optionalAttr) {
20842		m["output_shapes"] = value
20843	}
20844}
20845
20846// Converts the given string representing a handle to an iterator to a resource.
20847//
20848// Arguments:
20849//
20850//	string_handle: A string representation of the given handle.
20851//
20852// Returns A handle to an iterator resource.
20853func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output) {
20854	if scope.Err() != nil {
20855		return
20856	}
20857	attrs := map[string]interface{}{}
20858	for _, a := range optional {
20859		a(attrs)
20860	}
20861	opspec := tf.OpSpec{
20862		Type: "IteratorFromStringHandle",
20863		Input: []tf.Input{
20864			string_handle,
20865		},
20866		Attrs: attrs,
20867	}
20868	op := scope.AddOperation(opspec)
20869	return op.Output(0)
20870}
20871
20872// Returns the name of the device on which `resource` has been placed.
20873func IteratorGetDevice(scope *Scope, resource tf.Output) (device tf.Output) {
20874	if scope.Err() != nil {
20875		return
20876	}
20877	opspec := tf.OpSpec{
20878		Type: "IteratorGetDevice",
20879		Input: []tf.Input{
20880			resource,
20881		},
20882	}
20883	op := scope.AddOperation(opspec)
20884	return op.Output(0)
20885}
20886
20887// Gets the next output from the given iterator .
20888func IteratorGetNext(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
20889	if scope.Err() != nil {
20890		return
20891	}
20892	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
20893	opspec := tf.OpSpec{
20894		Type: "IteratorGetNext",
20895		Input: []tf.Input{
20896			iterator,
20897		},
20898		Attrs: attrs,
20899	}
20900	op := scope.AddOperation(opspec)
20901	if scope.Err() != nil {
20902		return
20903	}
20904	var idx int
20905	var err error
20906	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
20907		scope.UpdateErr("IteratorGetNext", err)
20908		return
20909	}
20910	return components
20911}
20912
20913// Gets the next output from the given iterator as an Optional variant.
20914func IteratorGetNextAsOptional(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (optional tf.Output) {
20915	if scope.Err() != nil {
20916		return
20917	}
20918	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
20919	opspec := tf.OpSpec{
20920		Type: "IteratorGetNextAsOptional",
20921		Input: []tf.Input{
20922			iterator,
20923		},
20924		Attrs: attrs,
20925	}
20926	op := scope.AddOperation(opspec)
20927	return op.Output(0)
20928}
20929
20930// Gets the next output from the given iterator.
20931//
20932// This operation is a synchronous version IteratorGetNext. It should only be used
20933// in situations where the iterator does not block the calling thread, or where
20934// the calling thread is not a member of the thread pool used to execute parallel
20935// operations (e.g. in eager mode).
20936func IteratorGetNextSync(scope *Scope, iterator tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
20937	if scope.Err() != nil {
20938		return
20939	}
20940	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
20941	opspec := tf.OpSpec{
20942		Type: "IteratorGetNextSync",
20943		Input: []tf.Input{
20944			iterator,
20945		},
20946		Attrs: attrs,
20947	}
20948	op := scope.AddOperation(opspec)
20949	if scope.Err() != nil {
20950		return
20951	}
20952	var idx int
20953	var err error
20954	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
20955		scope.UpdateErr("IteratorGetNextSync", err)
20956		return
20957	}
20958	return components
20959}
20960
20961// Converts the given `resource_handle` representing an iterator to a string.
20962//
20963// Arguments:
20964//
20965//	resource_handle: A handle to an iterator resource.
20966//
20967// Returns A string representation of the given handle.
20968func IteratorToStringHandle(scope *Scope, resource_handle tf.Output) (string_handle tf.Output) {
20969	if scope.Err() != nil {
20970		return
20971	}
20972	opspec := tf.OpSpec{
20973		Type: "IteratorToStringHandle",
20974		Input: []tf.Input{
20975			resource_handle,
20976		},
20977	}
20978	op := scope.AddOperation(opspec)
20979	return op.Output(0)
20980}
20981
20982// Returns the index of a data point that should be added to the seed set.
20983//
20984// Entries in distances are assumed to be squared distances of candidate points to
20985// the already sampled centers in the seed set. The op constructs one Markov chain
20986// of the k-MC^2 algorithm and returns the index of one candidate point to be added
20987// as an additional cluster center.
20988//
20989// Arguments:
20990//
20991//	distances: Vector with squared distances to the closest previously sampled cluster center
20992//
20993// for each candidate point.
20994//
20995//	seed: Scalar. Seed for initializing the random number generator.
20996//
20997// Returns Scalar with the index of the sampled point.
20998func KMC2ChainInitialization(scope *Scope, distances tf.Output, seed tf.Output) (index tf.Output) {
20999	if scope.Err() != nil {
21000		return
21001	}
21002	opspec := tf.OpSpec{
21003		Type: "KMC2ChainInitialization",
21004		Input: []tf.Input{
21005			distances, seed,
21006		},
21007	}
21008	op := scope.AddOperation(opspec)
21009	return op.Output(0)
21010}
21011
21012// Selects num_to_sample rows of input using the KMeans++ criterion.
21013//
21014// Rows of points are assumed to be input points. One row is selected at random.
21015// Subsequent rows are sampled with probability proportional to the squared L2
21016// distance from the nearest row selected thus far till num_to_sample rows have
21017// been sampled.
21018//
21019// Arguments:
21020//
21021//	points: Matrix of shape (n, d). Rows are assumed to be input points.
21022//	num_to_sample: Scalar. The number of rows to sample. This value must not be larger than n.
21023//	seed: Scalar. Seed for initializing the random number generator.
21024//	num_retries_per_sample: Scalar. For each row that is sampled, this parameter
21025//
21026// specifies the number of additional points to draw from the current
21027// distribution before selecting the best. If a negative value is specified, a
21028// heuristic is used to sample O(log(num_to_sample)) additional points.
21029//
21030// Returns Matrix of shape (num_to_sample, d). The sampled rows.
21031func KmeansPlusPlusInitialization(scope *Scope, points tf.Output, num_to_sample tf.Output, seed tf.Output, num_retries_per_sample tf.Output) (samples tf.Output) {
21032	if scope.Err() != nil {
21033		return
21034	}
21035	opspec := tf.OpSpec{
21036		Type: "KmeansPlusPlusInitialization",
21037		Input: []tf.Input{
21038			points, num_to_sample, seed, num_retries_per_sample,
21039		},
21040	}
21041	op := scope.AddOperation(opspec)
21042	return op.Output(0)
21043}
21044
21045// Computes the Kth order statistic of a data set. The current
21046//
21047// implementation uses a binary search requiring exactly 32 passes over
21048// the input data. The running time is linear with respect to input
21049// size. The median-of-medians algorithm is probably faster, but is
21050// difficult to implement efficiently in XLA. The implementation imposes
21051// a total ordering on floats. The ordering is consistent with the usual
21052// partial order.  Positive NaNs are greater than positive
21053// infinity. Negative NaNs are less than negative infinity. NaNs with
21054// distinct payloads are treated as distinct. Subnormal numbers are
21055// preserved (not flushed to zero). Positive infinity is greater than all
21056// numbers. Negative infinity is less than all numbers. Positive is
21057// greater than negative zero. There are less than k values greater than
21058// the kth order statistic. There are at least k values greater than or
21059// equal to the Kth order statistic. The semantics are not the same as
21060// top_k_unique.
21061func KthOrderStatistic(scope *Scope, input tf.Output, k int64) (output tf.Output) {
21062	if scope.Err() != nil {
21063		return
21064	}
21065	attrs := map[string]interface{}{"k": k}
21066	opspec := tf.OpSpec{
21067		Type: "KthOrderStatistic",
21068		Input: []tf.Input{
21069			input,
21070		},
21071		Attrs: attrs,
21072	}
21073	op := scope.AddOperation(opspec)
21074	return op.Output(0)
21075}
21076
21077// L2 Loss.
21078//
21079// Computes half the L2 norm of a tensor without the `sqrt`:
21080//
21081//	output = sum(t ** 2) / 2
21082//
21083// Arguments:
21084//
21085//	t: Typically 2-D, but may have any dimensions.
21086//
21087// Returns 0-D.
21088func L2Loss(scope *Scope, t tf.Output) (output tf.Output) {
21089	if scope.Err() != nil {
21090		return
21091	}
21092	opspec := tf.OpSpec{
21093		Type: "L2Loss",
21094		Input: []tf.Input{
21095			t,
21096		},
21097	}
21098	op := scope.AddOperation(opspec)
21099	return op.Output(0)
21100}
21101
21102// Creates a dataset that emits the key-value pairs in one or more LMDB files.
21103//
21104// The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary
21105// key-value database. This dataset can read the contents of LMDB database files,
21106// the names of which generally have the `.mdb` suffix.
21107//
21108// Each output element consists of a key-value pair represented as a pair of
21109// scalar string `Tensor`s, where the first `Tensor` contains the key and the
21110// second `Tensor` contains the value.
21111//
21112// LMDB uses different file formats on big- and little-endian machines.
21113// `LMDBDataset` can only read files in the format of the host machine.
21114//
21115// Arguments:
21116//
21117//	filenames: A scalar or a vector containing the name(s) of the binary file(s) to be
21118//
21119// read.
21120func LMDBDataset(scope *Scope, filenames tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
21121	if scope.Err() != nil {
21122		return
21123	}
21124	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
21125	opspec := tf.OpSpec{
21126		Type: "LMDBDataset",
21127		Input: []tf.Input{
21128			filenames,
21129		},
21130		Attrs: attrs,
21131	}
21132	op := scope.AddOperation(opspec)
21133	return op.Output(0)
21134}
21135
21136// LRNAttr is an optional argument to LRN.
21137type LRNAttr func(optionalAttr)
21138
21139// LRNDepthRadius sets the optional depth_radius attribute to value.
21140//
21141// value: 0-D.  Half-width of the 1-D normalization window.
21142// If not specified, defaults to 5
21143func LRNDepthRadius(value int64) LRNAttr {
21144	return func(m optionalAttr) {
21145		m["depth_radius"] = value
21146	}
21147}
21148
21149// LRNBias sets the optional bias attribute to value.
21150//
21151// value: An offset (usually positive to avoid dividing by 0).
21152// If not specified, defaults to 1
21153func LRNBias(value float32) LRNAttr {
21154	return func(m optionalAttr) {
21155		m["bias"] = value
21156	}
21157}
21158
21159// LRNAlpha sets the optional alpha attribute to value.
21160//
21161// value: A scale factor, usually positive.
21162// If not specified, defaults to 1
21163func LRNAlpha(value float32) LRNAttr {
21164	return func(m optionalAttr) {
21165		m["alpha"] = value
21166	}
21167}
21168
21169// LRNBeta sets the optional beta attribute to value.
21170//
21171// value: An exponent.
21172// If not specified, defaults to 0.5
21173func LRNBeta(value float32) LRNAttr {
21174	return func(m optionalAttr) {
21175		m["beta"] = value
21176	}
21177}
21178
21179// Local Response Normalization.
21180//
21181// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
21182// dimension), and each vector is normalized independently.  Within a given vector,
21183// each component is divided by the weighted, squared sum of inputs within
21184// `depth_radius`.  In detail,
21185//
21186//	sqr_sum[a, b, c, d] =
21187//	    sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
21188//	output = input / (bias + alpha * sqr_sum) ** beta
21189//
21190// For details, see [Krizhevsky et al., ImageNet classification with deep
21191// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
21192//
21193// Arguments:
21194//
21195//	input: 4-D.
21196func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output) {
21197	if scope.Err() != nil {
21198		return
21199	}
21200	attrs := map[string]interface{}{}
21201	for _, a := range optional {
21202		a(attrs)
21203	}
21204	opspec := tf.OpSpec{
21205		Type: "LRN",
21206		Input: []tf.Input{
21207			input,
21208		},
21209		Attrs: attrs,
21210	}
21211	op := scope.AddOperation(opspec)
21212	return op.Output(0)
21213}
21214
21215// LRNGradAttr is an optional argument to LRNGrad.
21216type LRNGradAttr func(optionalAttr)
21217
21218// LRNGradDepthRadius sets the optional depth_radius attribute to value.
21219//
21220// value: A depth radius.
21221// If not specified, defaults to 5
21222func LRNGradDepthRadius(value int64) LRNGradAttr {
21223	return func(m optionalAttr) {
21224		m["depth_radius"] = value
21225	}
21226}
21227
21228// LRNGradBias sets the optional bias attribute to value.
21229//
21230// value: An offset (usually > 0 to avoid dividing by 0).
21231// If not specified, defaults to 1
21232func LRNGradBias(value float32) LRNGradAttr {
21233	return func(m optionalAttr) {
21234		m["bias"] = value
21235	}
21236}
21237
21238// LRNGradAlpha sets the optional alpha attribute to value.
21239//
21240// value: A scale factor, usually positive.
21241// If not specified, defaults to 1
21242func LRNGradAlpha(value float32) LRNGradAttr {
21243	return func(m optionalAttr) {
21244		m["alpha"] = value
21245	}
21246}
21247
21248// LRNGradBeta sets the optional beta attribute to value.
21249//
21250// value: An exponent.
21251// If not specified, defaults to 0.5
21252func LRNGradBeta(value float32) LRNGradAttr {
21253	return func(m optionalAttr) {
21254		m["beta"] = value
21255	}
21256}
21257
21258// Gradients for Local Response Normalization.
21259//
21260// Arguments:
21261//
21262//	input_grads: 4-D with shape `[batch, height, width, channels]`.
21263//	input_image: 4-D with shape `[batch, height, width, channels]`.
21264//	output_image: 4-D with shape `[batch, height, width, channels]`.
21265//
21266// Returns The gradients for LRN.
21267func LRNGrad(scope *Scope, input_grads tf.Output, input_image tf.Output, output_image tf.Output, optional ...LRNGradAttr) (output tf.Output) {
21268	if scope.Err() != nil {
21269		return
21270	}
21271	attrs := map[string]interface{}{}
21272	for _, a := range optional {
21273		a(attrs)
21274	}
21275	opspec := tf.OpSpec{
21276		Type: "LRNGrad",
21277		Input: []tf.Input{
21278			input_grads, input_image, output_image,
21279		},
21280		Attrs: attrs,
21281	}
21282	op := scope.AddOperation(opspec)
21283	return op.Output(0)
21284}
21285
21286// LSTMBlockCellAttr is an optional argument to LSTMBlockCell.
21287type LSTMBlockCellAttr func(optionalAttr)
21288
21289// LSTMBlockCellForgetBias sets the optional forget_bias attribute to value.
21290//
21291// value: The forget gate bias.
21292// If not specified, defaults to 1
21293func LSTMBlockCellForgetBias(value float32) LSTMBlockCellAttr {
21294	return func(m optionalAttr) {
21295		m["forget_bias"] = value
21296	}
21297}
21298
21299// LSTMBlockCellCellClip sets the optional cell_clip attribute to value.
21300//
21301// value: Value to clip the 'cs' value to.
21302// If not specified, defaults to 3
21303func LSTMBlockCellCellClip(value float32) LSTMBlockCellAttr {
21304	return func(m optionalAttr) {
21305		m["cell_clip"] = value
21306	}
21307}
21308
21309// LSTMBlockCellUsePeephole sets the optional use_peephole attribute to value.
21310//
21311// value: Whether to use peephole weights.
21312// If not specified, defaults to false
21313func LSTMBlockCellUsePeephole(value bool) LSTMBlockCellAttr {
21314	return func(m optionalAttr) {
21315		m["use_peephole"] = value
21316	}
21317}
21318
21319// Computes the LSTM cell forward propagation for 1 time step.
21320//
21321// This implementation uses 1 weight matrix and 1 bias vector, and there's an
21322// optional peephole connection.
21323//
21324// This kernel op implements the following mathematical equations:
21325//
21326// ```python
21327// xh = [x, h_prev]
21328// [i, f, ci, o] = xh * w + b
21329// f = f + forget_bias
21330//
21331// if not use_peephole:
21332//
21333//	wci = wcf = wco = 0
21334//
21335// i = sigmoid(cs_prev * wci + i)
21336// f = sigmoid(cs_prev * wcf + f)
21337// ci = tanh(ci)
21338//
21339// cs = ci .* i + cs_prev .* f
21340// cs = clip(cs, cell_clip)
21341//
21342// o = sigmoid(cs * wco + o)
21343// co = tanh(cs)
21344// h = co .* o
21345// ```
21346//
21347// Arguments:
21348//
21349//	x: The input to the LSTM cell, shape (batch_size, num_inputs).
21350//	cs_prev: Value of the cell state at previous time step.
21351//	h_prev: Output of the previous cell at previous time step.
21352//	w: The weight matrix.
21353//	wci: The weight matrix for input gate peephole connection.
21354//	wcf: The weight matrix for forget gate peephole connection.
21355//	wco: The weight matrix for output gate peephole connection.
21356//	b: The bias vector.
21357//
21358// Returns:
21359//
21360//	i: The input gate.
21361//	cs: The cell state before the tanh.
21362//	f: The forget gate.
21363//	o: The output gate.
21364//	ci: The cell input.
21365//	co: The cell after the tanh.
21366//	h: The output h vector.
21367func LSTMBlockCell(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, optional ...LSTMBlockCellAttr) (i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, h tf.Output) {
21368	if scope.Err() != nil {
21369		return
21370	}
21371	attrs := map[string]interface{}{}
21372	for _, a := range optional {
21373		a(attrs)
21374	}
21375	opspec := tf.OpSpec{
21376		Type: "LSTMBlockCell",
21377		Input: []tf.Input{
21378			x, cs_prev, h_prev, w, wci, wcf, wco, b,
21379		},
21380		Attrs: attrs,
21381	}
21382	op := scope.AddOperation(opspec)
21383	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
21384}
21385
21386// Computes the LSTM cell backward propagation for 1 timestep.
21387//
21388// This implementation is to be used in conjunction of LSTMBlockCell.
21389//
21390// Arguments:
21391//
21392//	x: The input to the LSTM cell, shape (batch_size, num_inputs).
21393//	cs_prev: The previous cell state.
21394//	h_prev: The previous h state.
21395//	w: The weight matrix.
21396//	wci: The weight matrix for input gate peephole connection.
21397//	wcf: The weight matrix for forget gate peephole connection.
21398//	wco: The weight matrix for output gate peephole connection.
21399//	b: The bias vector.
21400//	i: The input gate.
21401//	cs: The cell state before the tanh.
21402//	f: The forget gate.
21403//	o: The output gate.
21404//	ci: The cell input.
21405//	co: The cell after the tanh.
21406//	cs_grad: The current gradient of cs.
21407//	h_grad: The gradient of h vector.
21408//	use_peephole: Whether the cell uses peephole connections.
21409//
21410// Returns:
21411//
21412//	cs_prev_grad: The gradient of cs to be back-propped.
21413//	dicfo: The derivative wrt to [i, cs, f, o].
21414//	wci_grad: The gradient for wci to be back-propped.
21415//	wcf_grad: The gradient for wcf to be back-propped.
21416//	wco_grad: The gradient for wco to be back-propped.
21417func LSTMBlockCellGrad(scope *Scope, x tf.Output, cs_prev tf.Output, h_prev tf.Output, w tf.Output, wci tf.Output, wcf tf.Output, wco tf.Output, b tf.Output, i tf.Output, cs tf.Output, f tf.Output, o tf.Output, ci tf.Output, co tf.Output, cs_grad tf.Output, h_grad tf.Output, use_peephole bool) (cs_prev_grad tf.Output, dicfo tf.Output, wci_grad tf.Output, wcf_grad tf.Output, wco_grad tf.Output) {
21418	if scope.Err() != nil {
21419		return
21420	}
21421	attrs := map[string]interface{}{"use_peephole": use_peephole}
21422	opspec := tf.OpSpec{
21423		Type: "LSTMBlockCellGrad",
21424		Input: []tf.Input{
21425			x, cs_prev, h_prev, w, wci, wcf, wco, b, i, cs, f, o, ci, co, cs_grad, h_grad,
21426		},
21427		Attrs: attrs,
21428	}
21429	op := scope.AddOperation(opspec)
21430	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4)
21431}
21432
21433// Records the latency of producing `input_dataset` elements in a StatsAggregator.
21434func LatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
21435	if scope.Err() != nil {
21436		return
21437	}
21438	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
21439	opspec := tf.OpSpec{
21440		Type: "LatencyStatsDataset",
21441		Input: []tf.Input{
21442			input_dataset, tag,
21443		},
21444		Attrs: attrs,
21445	}
21446	op := scope.AddOperation(opspec)
21447	return op.Output(0)
21448}
21449
21450// LeakyReluAttr is an optional argument to LeakyRelu.
21451type LeakyReluAttr func(optionalAttr)
21452
21453// LeakyReluAlpha sets the optional alpha attribute to value.
21454// If not specified, defaults to 0.2
21455func LeakyReluAlpha(value float32) LeakyReluAttr {
21456	return func(m optionalAttr) {
21457		m["alpha"] = value
21458	}
21459}
21460
21461// Computes rectified linear: `max(features, features * alpha)`.
21462func LeakyRelu(scope *Scope, features tf.Output, optional ...LeakyReluAttr) (activations tf.Output) {
21463	if scope.Err() != nil {
21464		return
21465	}
21466	attrs := map[string]interface{}{}
21467	for _, a := range optional {
21468		a(attrs)
21469	}
21470	opspec := tf.OpSpec{
21471		Type: "LeakyRelu",
21472		Input: []tf.Input{
21473			features,
21474		},
21475		Attrs: attrs,
21476	}
21477	op := scope.AddOperation(opspec)
21478	return op.Output(0)
21479}
21480
21481// LeakyReluGradAttr is an optional argument to LeakyReluGrad.
21482type LeakyReluGradAttr func(optionalAttr)
21483
21484// LeakyReluGradAlpha sets the optional alpha attribute to value.
21485// If not specified, defaults to 0.2
21486func LeakyReluGradAlpha(value float32) LeakyReluGradAttr {
21487	return func(m optionalAttr) {
21488		m["alpha"] = value
21489	}
21490}
21491
21492// Computes rectified linear gradients for a LeakyRelu operation.
21493//
21494// Arguments:
21495//
21496//	gradients: The backpropagated gradients to the corresponding LeakyRelu operation.
21497//	features: The features passed as input to the corresponding LeakyRelu operation,
21498//
21499// OR the outputs of that operation (both work equivalently).
21500//
21501// Returns `gradients * (features > 0) + alpha * gradients * (features <= 0)`.
21502func LeakyReluGrad(scope *Scope, gradients tf.Output, features tf.Output, optional ...LeakyReluGradAttr) (backprops tf.Output) {
21503	if scope.Err() != nil {
21504		return
21505	}
21506	attrs := map[string]interface{}{}
21507	for _, a := range optional {
21508		a(attrs)
21509	}
21510	opspec := tf.OpSpec{
21511		Type: "LeakyReluGrad",
21512		Input: []tf.Input{
21513			gradients, features,
21514		},
21515		Attrs: attrs,
21516	}
21517	op := scope.AddOperation(opspec)
21518	return op.Output(0)
21519}
21520
21521// LearnedUnigramCandidateSamplerAttr is an optional argument to LearnedUnigramCandidateSampler.
21522type LearnedUnigramCandidateSamplerAttr func(optionalAttr)
21523
21524// LearnedUnigramCandidateSamplerSeed sets the optional seed attribute to value.
21525//
21526// value: If either seed or seed2 are set to be non-zero, the random number
21527// generator is seeded by the given seed.  Otherwise, it is seeded by a
21528// random seed.
21529// If not specified, defaults to 0
21530func LearnedUnigramCandidateSamplerSeed(value int64) LearnedUnigramCandidateSamplerAttr {
21531	return func(m optionalAttr) {
21532		m["seed"] = value
21533	}
21534}
21535
21536// LearnedUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
21537//
21538// value: An second seed to avoid seed collision.
21539// If not specified, defaults to 0
21540func LearnedUnigramCandidateSamplerSeed2(value int64) LearnedUnigramCandidateSamplerAttr {
21541	return func(m optionalAttr) {
21542		m["seed2"] = value
21543	}
21544}
21545
21546// Generates labels for candidate sampling with a learned unigram distribution.
21547//
21548// See explanations of candidate sampling and the data formats at
21549// go/candidate-sampling.
21550//
21551// For each batch, this op picks a single set of sampled candidate labels.
21552//
21553// The advantages of sampling candidates per-batch are simplicity and the
21554// possibility of efficient dense matrix multiplication. The disadvantage is that
21555// the sampled candidates must be chosen independently of the context and of the
21556// true labels.
21557//
21558// Arguments:
21559//
21560//	true_classes: A batch_size * num_true matrix, in which each row contains the
21561//
21562// IDs of the num_true target_classes in the corresponding original label.
21563//
21564//	num_true: Number of true labels per context.
21565//	num_sampled: Number of candidates to randomly sample.
21566//	unique: If unique is true, we sample with rejection, so that all sampled
21567//
21568// candidates in a batch are unique. This requires some approximation to
21569// estimate the post-rejection sampling probabilities.
21570//
21571//	range_max: The sampler will sample integers from the interval [0, range_max).
21572//
21573// Returns:
21574//
21575//	sampled_candidates: A vector of length num_sampled, in which each element is
21576//
21577// the ID of a sampled candidate.
21578//
21579//	true_expected_count: A batch_size * num_true matrix, representing
21580//
21581// the number of times each candidate is expected to occur in a batch
21582// of sampled candidates. If unique=true, then this is a probability.
21583//
21584//	sampled_expected_count: A vector of length num_sampled, for each sampled
21585//
21586// candidate representing the number of times the candidate is expected
21587// to occur in a batch of sampled candidates.  If unique=true, then this is a
21588// probability.
21589func LearnedUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LearnedUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
21590	if scope.Err() != nil {
21591		return
21592	}
21593	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
21594	for _, a := range optional {
21595		a(attrs)
21596	}
21597	opspec := tf.OpSpec{
21598		Type: "LearnedUnigramCandidateSampler",
21599		Input: []tf.Input{
21600			true_classes,
21601		},
21602		Attrs: attrs,
21603	}
21604	op := scope.AddOperation(opspec)
21605	return op.Output(0), op.Output(1), op.Output(2)
21606}
21607
21608// Elementwise computes the bitwise left-shift of `x` and `y`.
21609//
21610// If `y` is negative, or greater than or equal to the width of `x` in bits the
21611// result is implementation defined.
21612//
21613// Example:
21614//
21615// ```python
21616// import tensorflow as tf
21617// from tensorflow.python.ops import bitwise_ops
21618// import numpy as np
21619// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
21620//
21621// for dtype in dtype_list:
21622//
21623//	lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
21624//	rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
21625//
21626//	left_shift_result = bitwise_ops.left_shift(lhs, rhs)
21627//
21628//	print(left_shift_result)
21629//
21630// # This will print:
21631// # tf.Tensor([ -32   -5 -128    0], shape=(4,), dtype=int8)
21632// # tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int16)
21633// # tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int32)
21634// # tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int64)
21635//
21636// lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
21637// rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
21638// bitwise_ops.left_shift(lhs, rhs)
21639// # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
21640// ```
21641func LeftShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21642	if scope.Err() != nil {
21643		return
21644	}
21645	opspec := tf.OpSpec{
21646		Type: "LeftShift",
21647		Input: []tf.Input{
21648			x, y,
21649		},
21650	}
21651	op := scope.AddOperation(opspec)
21652	return op.Output(0)
21653}
21654
21655// Returns the truth value of (x < y) element-wise.
21656//
21657// *NOTE*: `Less` supports broadcasting. More about broadcasting
21658// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
21659//
21660// Example:
21661//
21662// ```python
21663// x = tf.constant([5, 4, 6])
21664// y = tf.constant([5])
21665// tf.math.less(x, y) ==> [False, True, False]
21666//
21667// x = tf.constant([5, 4, 6])
21668// y = tf.constant([5, 6, 7])
21669// tf.math.less(x, y) ==> [False, True, True]
21670// ```
21671func Less(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21672	if scope.Err() != nil {
21673		return
21674	}
21675	opspec := tf.OpSpec{
21676		Type: "Less",
21677		Input: []tf.Input{
21678			x, y,
21679		},
21680	}
21681	op := scope.AddOperation(opspec)
21682	return op.Output(0)
21683}
21684
21685// Returns the truth value of (x <= y) element-wise.
21686//
21687// *NOTE*: `LessEqual` supports broadcasting. More about broadcasting
21688// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
21689//
21690// Example:
21691//
21692// ```python
21693// x = tf.constant([5, 4, 6])
21694// y = tf.constant([5])
21695// tf.math.less_equal(x, y) ==> [True, True, False]
21696//
21697// x = tf.constant([5, 4, 6])
21698// y = tf.constant([5, 6, 6])
21699// tf.math.less_equal(x, y) ==> [True, True, True]
21700// ```
21701func LessEqual(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
21702	if scope.Err() != nil {
21703		return
21704	}
21705	opspec := tf.OpSpec{
21706		Type: "LessEqual",
21707		Input: []tf.Input{
21708			x, y,
21709		},
21710	}
21711	op := scope.AddOperation(opspec)
21712	return op.Output(0)
21713}
21714
21715// Computes the log of the absolute value of `Gamma(x)` element-wise.
21716//
21717//	For positive numbers, this function computes log((input - 1)!) for every element in the tensor.
21718//	`lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539`
21719//
21720// Example:
21721//
21722// ```python
21723// x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6])
21724// tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685]
21725// ```
21726func Lgamma(scope *Scope, x tf.Output) (y tf.Output) {
21727	if scope.Err() != nil {
21728		return
21729	}
21730	opspec := tf.OpSpec{
21731		Type: "Lgamma",
21732		Input: []tf.Input{
21733			x,
21734		},
21735	}
21736	op := scope.AddOperation(opspec)
21737	return op.Output(0)
21738}
21739
21740// Generates values in an interval.
21741//
21742// A sequence of `num` evenly-spaced values are generated beginning at `start`.
21743// If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
21744// so that the last one is exactly `stop`.
21745//
21746// For example:
21747//
21748// ```
21749// tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
21750// ```
21751//
21752// Arguments:
21753//
21754//	start: 0-D tensor. First entry in the range.
21755//	stop: 0-D tensor. Last entry in the range.
21756//	num: 0-D tensor. Number of values to generate.
21757//
21758// Returns 1-D. The generated values.
21759func LinSpace(scope *Scope, start tf.Output, stop tf.Output, num tf.Output) (output tf.Output) {
21760	if scope.Err() != nil {
21761		return
21762	}
21763	opspec := tf.OpSpec{
21764		Type: "LinSpace",
21765		Input: []tf.Input{
21766			start, stop, num,
21767		},
21768	}
21769	op := scope.AddOperation(opspec)
21770	return op.Output(0)
21771}
21772
21773// ListDatasetAttr is an optional argument to ListDataset.
21774type ListDatasetAttr func(optionalAttr)
21775
21776// ListDatasetMetadata sets the optional metadata attribute to value.
21777// If not specified, defaults to ""
21778func ListDatasetMetadata(value string) ListDatasetAttr {
21779	return func(m optionalAttr) {
21780		m["metadata"] = value
21781	}
21782}
21783
21784// Creates a dataset that emits each of `tensors` once.
21785func ListDataset(scope *Scope, tensors []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ListDatasetAttr) (handle tf.Output) {
21786	if scope.Err() != nil {
21787		return
21788	}
21789	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
21790	for _, a := range optional {
21791		a(attrs)
21792	}
21793	opspec := tf.OpSpec{
21794		Type: "ListDataset",
21795		Input: []tf.Input{
21796			tf.OutputList(tensors),
21797		},
21798		Attrs: attrs,
21799	}
21800	op := scope.AddOperation(opspec)
21801	return op.Output(0)
21802}
21803
21804// ListDiffAttr is an optional argument to ListDiff.
21805type ListDiffAttr func(optionalAttr)
21806
21807// ListDiffOutIdx sets the optional out_idx attribute to value.
21808// If not specified, defaults to DT_INT32
21809func ListDiffOutIdx(value tf.DataType) ListDiffAttr {
21810	return func(m optionalAttr) {
21811		m["out_idx"] = value
21812	}
21813}
21814
21815// Computes the difference between two lists of numbers or strings.
21816//
21817// Given a list `x` and a list `y`, this operation returns a list `out` that
21818// represents all values that are in `x` but not in `y`. The returned list `out`
21819// is sorted in the same order that the numbers appear in `x` (duplicates are
21820// preserved). This operation also returns a list `idx` that represents the
21821// position of each `out` element in `x`. In other words:
21822//
21823// `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
21824//
21825// For example, given this input:
21826//
21827// ```
21828// x = [1, 2, 3, 4, 5, 6]
21829// y = [1, 3, 5]
21830// ```
21831//
21832// This operation would return:
21833//
21834// ```
21835// out ==> [2, 4, 6]
21836// idx ==> [1, 3, 5]
21837// ```
21838//
21839// Arguments:
21840//
21841//	x: 1-D. Values to keep.
21842//	y: 1-D. Values to remove.
21843//
21844// Returns:
21845//
21846//	out: 1-D. Values present in `x` but not in `y`.
21847//	idx: 1-D. Positions of `x` values preserved in `out`.
21848func ListDiff(scope *Scope, x tf.Output, y tf.Output, optional ...ListDiffAttr) (out tf.Output, idx tf.Output) {
21849	if scope.Err() != nil {
21850		return
21851	}
21852	attrs := map[string]interface{}{}
21853	for _, a := range optional {
21854		a(attrs)
21855	}
21856	opspec := tf.OpSpec{
21857		Type: "ListDiff",
21858		Input: []tf.Input{
21859			x, y,
21860		},
21861		Attrs: attrs,
21862	}
21863	op := scope.AddOperation(opspec)
21864	return op.Output(0), op.Output(1)
21865}
21866
21867// An op that loads optimization parameters into embedding memory.
21868//
21869// An op that loads optimization parameters into embedding memory. Must be
21870// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct embedding
21871// table configuration. For example, this op is used to install parameters that are
21872// loaded from a checkpoint before a training loop is executed.  For Adagrad,
21873// auxiliary1 should be the accumulators. For SGD, all of the auxiliary* values
21874// should be empty. For FTRL, auxiliary1 should be the accumulators and auxiliary2
21875// should be the linear terms. For ADAM, auxiliary1 should be the momenta and
21876// auxiliary2 should be the velocities.
21877//
21878// Arguments:
21879//
21880//	parameters: A list of tensors, one for each embedding table,
21881//
21882// containing the initial embedding table parameters to use in embedding
21883// lookups.
21884//
21885//	auxiliary1: A list of tensors, one for each embedding table, containing the
21886//
21887// initial values of the first auxiliary optimization parameter to use in embedding
21888// training loop updates. The shape of each entry is ignored (and thus can be
21889// empty) for those tables whose optimization algorithms do not have at least one
21890// auxiliary parameter.
21891//
21892//	auxiliary2: A list of tensors, one for each embedding table, containing the
21893//
21894// initial values of the second auxiliary optimization parameter to use in
21895// embedding training loop updates. The shape of each entry is ignored (and thus
21896// can be empty) for those tables whose optimization algorithms do not have at
21897// least two auxiliary
21898//
21899//	auxiliary3: A list of tensors, one for each embedding table, containing the
21900//
21901// initial values of the third auxiliary optimization parameter to use in embedding
21902// training loop updates. The shape of each entry is ignored (and thus can be
21903// empty) for those tables whose optimization algorithms do not have three
21904// auxiliary parameters.
21905//
21906//	auxiliary4: A list of tensors, one for each embedding table, containing the
21907//
21908// initial values of the second auxiliary optimization parameter to use in
21909// embedding training loop updates. The shape of each entry is ignored (and thus
21910// can be empty) for those tables whose optimization algorithms do not have at
21911// least four auxiliary
21912//
21913//	auxiliary5: A list of tensors, one for each embedding table, containing the
21914//
21915// initial values of the third auxiliary optimization parameter to use in embedding
21916// training loop updates. The shape of each entry is ignored (and thus can be
21917// empty) for those tables whose optimization algorithms do not have five
21918// auxiliary parameters.
21919//
21920//	auxiliary6: A list of tensors, one for each embedding table, containing the
21921//
21922// initial values of the second auxiliary optimization parameter to use in
21923// embedding training loop updates. The shape of each entry is ignored (and thus
21924// can be empty) for those tables whose optimization algorithms do not have at
21925// least six auxiliary
21926//
21927//	auxiliary7: A list of tensors, one for each embedding table, containing the
21928//
21929// initial values of the third auxiliary optimization parameter to use in embedding
21930// training loop updates. The shape of each entry is ignored (and thus can be
21931// empty) for those tables whose optimization algorithms do not have sevan
21932// auxiliary parameters.
21933//
21934//	config: An TPUEmbeddingConfiguration proto describing the
21935//
21936// table parameters being loaded, serialized to a string.
21937//
21938//	num_shards: Number of shards into which the embedding tables are divided.
21939//	shard_id: Identifier of shard for this operation.
21940//
21941// Returns the created operation.
21942func LoadAllTPUEmbeddingParameters(scope *Scope, parameters []tf.Output, auxiliary1 []tf.Output, auxiliary2 []tf.Output, auxiliary3 []tf.Output, auxiliary4 []tf.Output, auxiliary5 []tf.Output, auxiliary6 []tf.Output, auxiliary7 []tf.Output, config string, num_shards int64, shard_id int64) (o *tf.Operation) {
21943	if scope.Err() != nil {
21944		return
21945	}
21946	attrs := map[string]interface{}{"config": config, "num_shards": num_shards, "shard_id": shard_id}
21947	opspec := tf.OpSpec{
21948		Type: "LoadAllTPUEmbeddingParameters",
21949		Input: []tf.Input{
21950			tf.OutputList(parameters), tf.OutputList(auxiliary1), tf.OutputList(auxiliary2), tf.OutputList(auxiliary3), tf.OutputList(auxiliary4), tf.OutputList(auxiliary5), tf.OutputList(auxiliary6), tf.OutputList(auxiliary7),
21951		},
21952		Attrs: attrs,
21953	}
21954	return scope.AddOperation(opspec)
21955}
21956
21957// LoadAndRemapMatrixAttr is an optional argument to LoadAndRemapMatrix.
21958type LoadAndRemapMatrixAttr func(optionalAttr)
21959
21960// LoadAndRemapMatrixMaxRowsInMemory sets the optional max_rows_in_memory attribute to value.
21961//
21962// value: The maximum number of rows to load from the checkpoint at
21963// once. If less than or equal to 0, the entire matrix will be loaded into
21964// memory. Setting this arg trades increased disk reads for lower memory usage.
21965// If not specified, defaults to -1
21966func LoadAndRemapMatrixMaxRowsInMemory(value int64) LoadAndRemapMatrixAttr {
21967	return func(m optionalAttr) {
21968		m["max_rows_in_memory"] = value
21969	}
21970}
21971
21972// Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint
21973//
21974// at `ckpt_path` and potentially reorders its rows and columns using the
21975// specified remappings.
21976//
21977// Most users should use one of the wrapper initializers (such as
21978// `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this
21979// function directly.
21980//
21981// The remappings are 1-D tensors with the following properties:
21982//
21983//   - `row_remapping` must have exactly `num_rows` entries. Row `i` of the output
21984//     matrix will be initialized from the row corresponding to index
21985//     `row_remapping[i]` in the old `Tensor` from the checkpoint.
21986//   - `col_remapping` must have either 0 entries (indicating that no column
21987//     reordering is needed) or `num_cols` entries. If specified, column `j` of the
21988//     output matrix will be initialized from the column corresponding to index
21989//     `col_remapping[j]` in the old `Tensor` from the checkpoint.
21990//   - A value of -1 in either of the remappings signifies a "missing" entry. In that
21991//     case, values from the `initializing_values` tensor will be used to fill that
21992//     missing row or column. If `row_remapping` has `r` missing entries and
21993//     `col_remapping` has `c` missing entries, then the following condition must be
21994//     true:
21995//
21996// `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)`
21997//
21998// The remapping tensors can be generated using the GenerateVocabRemapping op.
21999//
22000// As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1],
22001// initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing
22002// the value from row i, column j of the old tensor in the checkpoint, the output
22003// matrix will look like the following:
22004//
22005// [[w(1, 0),  w(1, 2),  0.5],
22006//
22007//	[w(0, 0),  w(0, 2), -0.5],
22008//	[0.25,    -0.25,      42]]
22009//
22010// Arguments:
22011//
22012//	ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from
22013//
22014// which the old matrix `Tensor` will be loaded.
22015//
22016//	old_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
22017//	row_remapping: An int `Tensor` of row remappings (generally created by
22018//
22019// `generate_vocab_remapping`).  Even if no row remapping is needed, this must
22020// still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted
22021// index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`).
22022//
22023//	col_remapping: An int `Tensor` of column remappings (generally created by
22024//
22025// `generate_vocab_remapping`).  May be a size-0 `Tensor` if only row remapping
22026// is to be done (e.g. column ordering is the same).
22027//
22028//	initializing_values: A float `Tensor` containing  values to fill in for cells
22029//
22030// in the output matrix that are not loaded from the checkpoint. Length must be
22031// exactly the same as the number of missing / new cells.
22032//
22033//	num_rows: Number of rows (length of the 1st dimension) in the output matrix.
22034//	num_cols: Number of columns (length of the 2nd dimension) in the output matrix.
22035//
22036// Returns Output matrix containing existing values loaded from the
22037// checkpoint, and with any missing values filled in from initializing_values.
22038func LoadAndRemapMatrix(scope *Scope, ckpt_path tf.Output, old_tensor_name tf.Output, row_remapping tf.Output, col_remapping tf.Output, initializing_values tf.Output, num_rows int64, num_cols int64, optional ...LoadAndRemapMatrixAttr) (output_matrix tf.Output) {
22039	if scope.Err() != nil {
22040		return
22041	}
22042	attrs := map[string]interface{}{"num_rows": num_rows, "num_cols": num_cols}
22043	for _, a := range optional {
22044		a(attrs)
22045	}
22046	opspec := tf.OpSpec{
22047		Type: "LoadAndRemapMatrix",
22048		Input: []tf.Input{
22049			ckpt_path, old_tensor_name, row_remapping, col_remapping, initializing_values,
22050		},
22051		Attrs: attrs,
22052	}
22053	op := scope.AddOperation(opspec)
22054	return op.Output(0)
22055}
22056
22057// LoadTPUEmbeddingADAMParametersAttr is an optional argument to LoadTPUEmbeddingADAMParameters.
22058type LoadTPUEmbeddingADAMParametersAttr func(optionalAttr)
22059
22060// LoadTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value.
22061// If not specified, defaults to -1
22062func LoadTPUEmbeddingADAMParametersTableId(value int64) LoadTPUEmbeddingADAMParametersAttr {
22063	return func(m optionalAttr) {
22064		m["table_id"] = value
22065	}
22066}
22067
22068// LoadTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value.
22069// If not specified, defaults to ""
22070func LoadTPUEmbeddingADAMParametersTableName(value string) LoadTPUEmbeddingADAMParametersAttr {
22071	return func(m optionalAttr) {
22072		m["table_name"] = value
22073	}
22074}
22075
22076// LoadTPUEmbeddingADAMParametersConfig sets the optional config attribute to value.
22077// If not specified, defaults to ""
22078func LoadTPUEmbeddingADAMParametersConfig(value string) LoadTPUEmbeddingADAMParametersAttr {
22079	return func(m optionalAttr) {
22080		m["config"] = value
22081	}
22082}
22083
22084// Load ADAM embedding parameters.
22085//
22086// An op that loads optimization parameters into HBM for embedding. Must be
22087// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22088// embedding table configuration. For example, this op is used to install
22089// parameters that are loaded from a checkpoint before a training loop is
22090// executed.
22091//
22092// Arguments:
22093//
22094//	parameters: Value of parameters used in the ADAM optimization algorithm.
22095//	momenta: Value of momenta used in the ADAM optimization algorithm.
22096//	velocities: Value of velocities used in the ADAM optimization algorithm.
22097//
22098// Returns the created operation.
22099func LoadTPUEmbeddingADAMParameters(scope *Scope, parameters tf.Output, momenta tf.Output, velocities tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingADAMParametersAttr) (o *tf.Operation) {
22100	if scope.Err() != nil {
22101		return
22102	}
22103	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22104	for _, a := range optional {
22105		a(attrs)
22106	}
22107	opspec := tf.OpSpec{
22108		Type: "LoadTPUEmbeddingADAMParameters",
22109		Input: []tf.Input{
22110			parameters, momenta, velocities,
22111		},
22112		Attrs: attrs,
22113	}
22114	return scope.AddOperation(opspec)
22115}
22116
22117// LoadTPUEmbeddingAdadeltaParametersAttr is an optional argument to LoadTPUEmbeddingAdadeltaParameters.
22118type LoadTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
22119
22120// LoadTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value.
22121// If not specified, defaults to -1
22122func LoadTPUEmbeddingAdadeltaParametersTableId(value int64) LoadTPUEmbeddingAdadeltaParametersAttr {
22123	return func(m optionalAttr) {
22124		m["table_id"] = value
22125	}
22126}
22127
22128// LoadTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value.
22129// If not specified, defaults to ""
22130func LoadTPUEmbeddingAdadeltaParametersTableName(value string) LoadTPUEmbeddingAdadeltaParametersAttr {
22131	return func(m optionalAttr) {
22132		m["table_name"] = value
22133	}
22134}
22135
22136// LoadTPUEmbeddingAdadeltaParametersConfig sets the optional config attribute to value.
22137// If not specified, defaults to ""
22138func LoadTPUEmbeddingAdadeltaParametersConfig(value string) LoadTPUEmbeddingAdadeltaParametersAttr {
22139	return func(m optionalAttr) {
22140		m["config"] = value
22141	}
22142}
22143
22144// Load Adadelta embedding parameters.
22145//
22146// An op that loads optimization parameters into HBM for embedding. Must be
22147// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22148// embedding table configuration. For example, this op is used to install
22149// parameters that are loaded from a checkpoint before a training loop is
22150// executed.
22151//
22152// Arguments:
22153//
22154//	parameters: Value of parameters used in the Adadelta optimization algorithm.
22155//	accumulators: Value of accumulators used in the Adadelta optimization algorithm.
22156//	updates: Value of updates used in the Adadelta optimization algorithm.
22157//
22158// Returns the created operation.
22159func LoadTPUEmbeddingAdadeltaParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, updates tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdadeltaParametersAttr) (o *tf.Operation) {
22160	if scope.Err() != nil {
22161		return
22162	}
22163	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22164	for _, a := range optional {
22165		a(attrs)
22166	}
22167	opspec := tf.OpSpec{
22168		Type: "LoadTPUEmbeddingAdadeltaParameters",
22169		Input: []tf.Input{
22170			parameters, accumulators, updates,
22171		},
22172		Attrs: attrs,
22173	}
22174	return scope.AddOperation(opspec)
22175}
22176
22177// LoadTPUEmbeddingAdagradMomentumParametersAttr is an optional argument to LoadTPUEmbeddingAdagradMomentumParameters.
22178type LoadTPUEmbeddingAdagradMomentumParametersAttr func(optionalAttr)
22179
22180// LoadTPUEmbeddingAdagradMomentumParametersTableId sets the optional table_id attribute to value.
22181// If not specified, defaults to -1
22182func LoadTPUEmbeddingAdagradMomentumParametersTableId(value int64) LoadTPUEmbeddingAdagradMomentumParametersAttr {
22183	return func(m optionalAttr) {
22184		m["table_id"] = value
22185	}
22186}
22187
22188// LoadTPUEmbeddingAdagradMomentumParametersTableName sets the optional table_name attribute to value.
22189// If not specified, defaults to ""
22190func LoadTPUEmbeddingAdagradMomentumParametersTableName(value string) LoadTPUEmbeddingAdagradMomentumParametersAttr {
22191	return func(m optionalAttr) {
22192		m["table_name"] = value
22193	}
22194}
22195
22196// LoadTPUEmbeddingAdagradMomentumParametersConfig sets the optional config attribute to value.
22197// If not specified, defaults to ""
22198func LoadTPUEmbeddingAdagradMomentumParametersConfig(value string) LoadTPUEmbeddingAdagradMomentumParametersAttr {
22199	return func(m optionalAttr) {
22200		m["config"] = value
22201	}
22202}
22203
22204// Load Adagrad Momentum embedding parameters.
22205//
22206// An op that loads optimization parameters into HBM for embedding. Must be
22207// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22208// embedding table configuration. For example, this op is used to install
22209// parameters that are loaded from a checkpoint before a training loop is
22210// executed.
22211//
22212// Arguments:
22213//
22214//	parameters: Value of parameters used in the Adagrad Momentum optimization algorithm.
22215//	accumulators: Value of accumulators used in the Adagrad Momentum optimization algorithm.
22216//	momenta: Value of momenta used in the Adagrad Momentum optimization algorithm.
22217//
22218// Returns the created operation.
22219func LoadTPUEmbeddingAdagradMomentumParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, momenta tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradMomentumParametersAttr) (o *tf.Operation) {
22220	if scope.Err() != nil {
22221		return
22222	}
22223	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22224	for _, a := range optional {
22225		a(attrs)
22226	}
22227	opspec := tf.OpSpec{
22228		Type: "LoadTPUEmbeddingAdagradMomentumParameters",
22229		Input: []tf.Input{
22230			parameters, accumulators, momenta,
22231		},
22232		Attrs: attrs,
22233	}
22234	return scope.AddOperation(opspec)
22235}
22236
22237// LoadTPUEmbeddingAdagradParametersAttr is an optional argument to LoadTPUEmbeddingAdagradParameters.
22238type LoadTPUEmbeddingAdagradParametersAttr func(optionalAttr)
22239
22240// LoadTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value.
22241// If not specified, defaults to -1
22242func LoadTPUEmbeddingAdagradParametersTableId(value int64) LoadTPUEmbeddingAdagradParametersAttr {
22243	return func(m optionalAttr) {
22244		m["table_id"] = value
22245	}
22246}
22247
22248// LoadTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value.
22249// If not specified, defaults to ""
22250func LoadTPUEmbeddingAdagradParametersTableName(value string) LoadTPUEmbeddingAdagradParametersAttr {
22251	return func(m optionalAttr) {
22252		m["table_name"] = value
22253	}
22254}
22255
22256// LoadTPUEmbeddingAdagradParametersConfig sets the optional config attribute to value.
22257// If not specified, defaults to ""
22258func LoadTPUEmbeddingAdagradParametersConfig(value string) LoadTPUEmbeddingAdagradParametersAttr {
22259	return func(m optionalAttr) {
22260		m["config"] = value
22261	}
22262}
22263
22264// Load Adagrad embedding parameters.
22265//
22266// An op that loads optimization parameters into HBM for embedding. Must be
22267// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22268// embedding table configuration. For example, this op is used to install
22269// parameters that are loaded from a checkpoint before a training loop is
22270// executed.
22271//
22272// Arguments:
22273//
22274//	parameters: Value of parameters used in the Adagrad optimization algorithm.
22275//	accumulators: Value of accumulators used in the Adagrad optimization algorithm.
22276//
22277// Returns the created operation.
22278func LoadTPUEmbeddingAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradParametersAttr) (o *tf.Operation) {
22279	if scope.Err() != nil {
22280		return
22281	}
22282	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22283	for _, a := range optional {
22284		a(attrs)
22285	}
22286	opspec := tf.OpSpec{
22287		Type: "LoadTPUEmbeddingAdagradParameters",
22288		Input: []tf.Input{
22289			parameters, accumulators,
22290		},
22291		Attrs: attrs,
22292	}
22293	return scope.AddOperation(opspec)
22294}
22295
22296// LoadTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingCenteredRMSPropParameters.
22297type LoadTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
22298
22299// LoadTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
22300// If not specified, defaults to -1
22301func LoadTPUEmbeddingCenteredRMSPropParametersTableId(value int64) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
22302	return func(m optionalAttr) {
22303		m["table_id"] = value
22304	}
22305}
22306
22307// LoadTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
22308// If not specified, defaults to ""
22309func LoadTPUEmbeddingCenteredRMSPropParametersTableName(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
22310	return func(m optionalAttr) {
22311		m["table_name"] = value
22312	}
22313}
22314
22315// LoadTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value.
22316// If not specified, defaults to ""
22317func LoadTPUEmbeddingCenteredRMSPropParametersConfig(value string) LoadTPUEmbeddingCenteredRMSPropParametersAttr {
22318	return func(m optionalAttr) {
22319		m["config"] = value
22320	}
22321}
22322
22323// Load centered RMSProp embedding parameters.
22324//
22325// An op that loads optimization parameters into HBM for embedding. Must be
22326// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22327// embedding table configuration. For example, this op is used to install
22328// parameters that are loaded from a checkpoint before a training loop is
22329// executed.
22330//
22331// Arguments:
22332//
22333//	parameters: Value of parameters used in the centered RMSProp optimization algorithm.
22334//	ms: Value of ms used in the centered RMSProp optimization algorithm.
22335//	mom: Value of mom used in the centered RMSProp optimization algorithm.
22336//	mg: Value of mg used in the centered RMSProp optimization algorithm.
22337//
22338// Returns the created operation.
22339func LoadTPUEmbeddingCenteredRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingCenteredRMSPropParametersAttr) (o *tf.Operation) {
22340	if scope.Err() != nil {
22341		return
22342	}
22343	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22344	for _, a := range optional {
22345		a(attrs)
22346	}
22347	opspec := tf.OpSpec{
22348		Type: "LoadTPUEmbeddingCenteredRMSPropParameters",
22349		Input: []tf.Input{
22350			parameters, ms, mom, mg,
22351		},
22352		Attrs: attrs,
22353	}
22354	return scope.AddOperation(opspec)
22355}
22356
22357// LoadTPUEmbeddingFTRLParametersAttr is an optional argument to LoadTPUEmbeddingFTRLParameters.
22358type LoadTPUEmbeddingFTRLParametersAttr func(optionalAttr)
22359
22360// LoadTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value.
22361// If not specified, defaults to -1
22362func LoadTPUEmbeddingFTRLParametersTableId(value int64) LoadTPUEmbeddingFTRLParametersAttr {
22363	return func(m optionalAttr) {
22364		m["table_id"] = value
22365	}
22366}
22367
22368// LoadTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value.
22369// If not specified, defaults to ""
22370func LoadTPUEmbeddingFTRLParametersTableName(value string) LoadTPUEmbeddingFTRLParametersAttr {
22371	return func(m optionalAttr) {
22372		m["table_name"] = value
22373	}
22374}
22375
22376// LoadTPUEmbeddingFTRLParametersConfig sets the optional config attribute to value.
22377// If not specified, defaults to ""
22378func LoadTPUEmbeddingFTRLParametersConfig(value string) LoadTPUEmbeddingFTRLParametersAttr {
22379	return func(m optionalAttr) {
22380		m["config"] = value
22381	}
22382}
22383
22384// Load FTRL embedding parameters.
22385//
22386// An op that loads optimization parameters into HBM for embedding. Must be
22387// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22388// embedding table configuration. For example, this op is used to install
22389// parameters that are loaded from a checkpoint before a training loop is
22390// executed.
22391//
22392// Arguments:
22393//
22394//	parameters: Value of parameters used in the FTRL optimization algorithm.
22395//	accumulators: Value of accumulators used in the FTRL optimization algorithm.
22396//	linears: Value of linears used in the FTRL optimization algorithm.
22397//
22398// Returns the created operation.
22399func LoadTPUEmbeddingFTRLParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, linears tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFTRLParametersAttr) (o *tf.Operation) {
22400	if scope.Err() != nil {
22401		return
22402	}
22403	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22404	for _, a := range optional {
22405		a(attrs)
22406	}
22407	opspec := tf.OpSpec{
22408		Type: "LoadTPUEmbeddingFTRLParameters",
22409		Input: []tf.Input{
22410			parameters, accumulators, linears,
22411		},
22412		Attrs: attrs,
22413	}
22414	return scope.AddOperation(opspec)
22415}
22416
22417// LoadTPUEmbeddingFrequencyEstimatorParametersAttr is an optional argument to LoadTPUEmbeddingFrequencyEstimatorParameters.
22418type LoadTPUEmbeddingFrequencyEstimatorParametersAttr func(optionalAttr)
22419
22420// LoadTPUEmbeddingFrequencyEstimatorParametersTableId sets the optional table_id attribute to value.
22421// If not specified, defaults to -1
22422func LoadTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) LoadTPUEmbeddingFrequencyEstimatorParametersAttr {
22423	return func(m optionalAttr) {
22424		m["table_id"] = value
22425	}
22426}
22427
22428// LoadTPUEmbeddingFrequencyEstimatorParametersTableName sets the optional table_name attribute to value.
22429// If not specified, defaults to ""
22430func LoadTPUEmbeddingFrequencyEstimatorParametersTableName(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr {
22431	return func(m optionalAttr) {
22432		m["table_name"] = value
22433	}
22434}
22435
22436// LoadTPUEmbeddingFrequencyEstimatorParametersConfig sets the optional config attribute to value.
22437// If not specified, defaults to ""
22438func LoadTPUEmbeddingFrequencyEstimatorParametersConfig(value string) LoadTPUEmbeddingFrequencyEstimatorParametersAttr {
22439	return func(m optionalAttr) {
22440		m["config"] = value
22441	}
22442}
22443
22444// Load frequency estimator embedding parameters.
22445//
22446// An op that loads optimization parameters into HBM for embedding. Must be
22447// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22448// embedding table configuration. For example, this op is used to install
22449// parameters that are loaded from a checkpoint before a training loop is
22450// executed.
22451//
22452// Arguments:
22453//
22454//	parameters: Value of parameters used in the frequency estimator optimization algorithm.
22455//	last_hit_step: Value of last_hit_step used in the frequency estimator optimization algorithm.
22456//
22457// Returns the created operation.
22458func LoadTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, parameters tf.Output, last_hit_step tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingFrequencyEstimatorParametersAttr) (o *tf.Operation) {
22459	if scope.Err() != nil {
22460		return
22461	}
22462	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22463	for _, a := range optional {
22464		a(attrs)
22465	}
22466	opspec := tf.OpSpec{
22467		Type: "LoadTPUEmbeddingFrequencyEstimatorParameters",
22468		Input: []tf.Input{
22469			parameters, last_hit_step,
22470		},
22471		Attrs: attrs,
22472	}
22473	return scope.AddOperation(opspec)
22474}
22475
22476// LoadTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to LoadTPUEmbeddingMDLAdagradLightParameters.
22477type LoadTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
22478
22479// LoadTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value.
22480// If not specified, defaults to -1
22481func LoadTPUEmbeddingMDLAdagradLightParametersTableId(value int64) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
22482	return func(m optionalAttr) {
22483		m["table_id"] = value
22484	}
22485}
22486
22487// LoadTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value.
22488// If not specified, defaults to ""
22489func LoadTPUEmbeddingMDLAdagradLightParametersTableName(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
22490	return func(m optionalAttr) {
22491		m["table_name"] = value
22492	}
22493}
22494
22495// LoadTPUEmbeddingMDLAdagradLightParametersConfig sets the optional config attribute to value.
22496// If not specified, defaults to ""
22497func LoadTPUEmbeddingMDLAdagradLightParametersConfig(value string) LoadTPUEmbeddingMDLAdagradLightParametersAttr {
22498	return func(m optionalAttr) {
22499		m["config"] = value
22500	}
22501}
22502
22503// Load MDL Adagrad Light embedding parameters.
22504//
22505// An op that loads optimization parameters into HBM for embedding. Must be
22506// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22507// embedding table configuration. For example, this op is used to install
22508// parameters that are loaded from a checkpoint before a training loop is
22509// executed.
22510//
22511// Arguments:
22512//
22513//	parameters: Value of parameters used in the MDL Adagrad Light optimization algorithm.
22514//	accumulators: Value of accumulators used in the MDL Adagrad Light optimization algorithm.
22515//	weights: Value of weights used in the MDL Adagrad Light optimization algorithm.
22516//	benefits: Value of benefits used in the MDL Adagrad Light optimization algorithm.
22517//
22518// Returns the created operation.
22519func LoadTPUEmbeddingMDLAdagradLightParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMDLAdagradLightParametersAttr) (o *tf.Operation) {
22520	if scope.Err() != nil {
22521		return
22522	}
22523	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22524	for _, a := range optional {
22525		a(attrs)
22526	}
22527	opspec := tf.OpSpec{
22528		Type: "LoadTPUEmbeddingMDLAdagradLightParameters",
22529		Input: []tf.Input{
22530			parameters, accumulators, weights, benefits,
22531		},
22532		Attrs: attrs,
22533	}
22534	return scope.AddOperation(opspec)
22535}
22536
22537// LoadTPUEmbeddingMomentumParametersAttr is an optional argument to LoadTPUEmbeddingMomentumParameters.
22538type LoadTPUEmbeddingMomentumParametersAttr func(optionalAttr)
22539
22540// LoadTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value.
22541// If not specified, defaults to -1
22542func LoadTPUEmbeddingMomentumParametersTableId(value int64) LoadTPUEmbeddingMomentumParametersAttr {
22543	return func(m optionalAttr) {
22544		m["table_id"] = value
22545	}
22546}
22547
22548// LoadTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value.
22549// If not specified, defaults to ""
22550func LoadTPUEmbeddingMomentumParametersTableName(value string) LoadTPUEmbeddingMomentumParametersAttr {
22551	return func(m optionalAttr) {
22552		m["table_name"] = value
22553	}
22554}
22555
22556// LoadTPUEmbeddingMomentumParametersConfig sets the optional config attribute to value.
22557// If not specified, defaults to ""
22558func LoadTPUEmbeddingMomentumParametersConfig(value string) LoadTPUEmbeddingMomentumParametersAttr {
22559	return func(m optionalAttr) {
22560		m["config"] = value
22561	}
22562}
22563
22564// Load Momentum embedding parameters.
22565//
22566// An op that loads optimization parameters into HBM for embedding. Must be
22567// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22568// embedding table configuration. For example, this op is used to install
22569// parameters that are loaded from a checkpoint before a training loop is
22570// executed.
22571//
22572// Arguments:
22573//
22574//	parameters: Value of parameters used in the Momentum optimization algorithm.
22575//	momenta: Value of momenta used in the Momentum optimization algorithm.
22576//
22577// Returns the created operation.
22578func LoadTPUEmbeddingMomentumParameters(scope *Scope, parameters tf.Output, momenta tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingMomentumParametersAttr) (o *tf.Operation) {
22579	if scope.Err() != nil {
22580		return
22581	}
22582	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22583	for _, a := range optional {
22584		a(attrs)
22585	}
22586	opspec := tf.OpSpec{
22587		Type: "LoadTPUEmbeddingMomentumParameters",
22588		Input: []tf.Input{
22589			parameters, momenta,
22590		},
22591		Attrs: attrs,
22592	}
22593	return scope.AddOperation(opspec)
22594}
22595
22596// LoadTPUEmbeddingProximalAdagradParametersAttr is an optional argument to LoadTPUEmbeddingProximalAdagradParameters.
22597type LoadTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
22598
22599// LoadTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value.
22600// If not specified, defaults to -1
22601func LoadTPUEmbeddingProximalAdagradParametersTableId(value int64) LoadTPUEmbeddingProximalAdagradParametersAttr {
22602	return func(m optionalAttr) {
22603		m["table_id"] = value
22604	}
22605}
22606
22607// LoadTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value.
22608// If not specified, defaults to ""
22609func LoadTPUEmbeddingProximalAdagradParametersTableName(value string) LoadTPUEmbeddingProximalAdagradParametersAttr {
22610	return func(m optionalAttr) {
22611		m["table_name"] = value
22612	}
22613}
22614
22615// LoadTPUEmbeddingProximalAdagradParametersConfig sets the optional config attribute to value.
22616// If not specified, defaults to ""
22617func LoadTPUEmbeddingProximalAdagradParametersConfig(value string) LoadTPUEmbeddingProximalAdagradParametersAttr {
22618	return func(m optionalAttr) {
22619		m["config"] = value
22620	}
22621}
22622
22623// Load proximal Adagrad embedding parameters.
22624//
22625// An op that loads optimization parameters into HBM for embedding. Must be
22626// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22627// embedding table configuration. For example, this op is used to install
22628// parameters that are loaded from a checkpoint before a training loop is
22629// executed.
22630//
22631// Arguments:
22632//
22633//	parameters: Value of parameters used in the proximal Adagrad optimization algorithm.
22634//	accumulators: Value of accumulators used in the proximal Adagrad optimization algorithm.
22635//
22636// Returns the created operation.
22637func LoadTPUEmbeddingProximalAdagradParameters(scope *Scope, parameters tf.Output, accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingProximalAdagradParametersAttr) (o *tf.Operation) {
22638	if scope.Err() != nil {
22639		return
22640	}
22641	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22642	for _, a := range optional {
22643		a(attrs)
22644	}
22645	opspec := tf.OpSpec{
22646		Type: "LoadTPUEmbeddingProximalAdagradParameters",
22647		Input: []tf.Input{
22648			parameters, accumulators,
22649		},
22650		Attrs: attrs,
22651	}
22652	return scope.AddOperation(opspec)
22653}
22654
22655// LoadTPUEmbeddingRMSPropParametersAttr is an optional argument to LoadTPUEmbeddingRMSPropParameters.
22656type LoadTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
22657
22658// LoadTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value.
22659// If not specified, defaults to -1
22660func LoadTPUEmbeddingRMSPropParametersTableId(value int64) LoadTPUEmbeddingRMSPropParametersAttr {
22661	return func(m optionalAttr) {
22662		m["table_id"] = value
22663	}
22664}
22665
22666// LoadTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value.
22667// If not specified, defaults to ""
22668func LoadTPUEmbeddingRMSPropParametersTableName(value string) LoadTPUEmbeddingRMSPropParametersAttr {
22669	return func(m optionalAttr) {
22670		m["table_name"] = value
22671	}
22672}
22673
22674// LoadTPUEmbeddingRMSPropParametersConfig sets the optional config attribute to value.
22675// If not specified, defaults to ""
22676func LoadTPUEmbeddingRMSPropParametersConfig(value string) LoadTPUEmbeddingRMSPropParametersAttr {
22677	return func(m optionalAttr) {
22678		m["config"] = value
22679	}
22680}
22681
22682// Load RMSProp embedding parameters.
22683//
22684// An op that loads optimization parameters into HBM for embedding. Must be
22685// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22686// embedding table configuration. For example, this op is used to install
22687// parameters that are loaded from a checkpoint before a training loop is
22688// executed.
22689//
22690// Arguments:
22691//
22692//	parameters: Value of parameters used in the RMSProp optimization algorithm.
22693//	ms: Value of ms used in the RMSProp optimization algorithm.
22694//	mom: Value of mom used in the RMSProp optimization algorithm.
22695//
22696// Returns the created operation.
22697func LoadTPUEmbeddingRMSPropParameters(scope *Scope, parameters tf.Output, ms tf.Output, mom tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingRMSPropParametersAttr) (o *tf.Operation) {
22698	if scope.Err() != nil {
22699		return
22700	}
22701	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22702	for _, a := range optional {
22703		a(attrs)
22704	}
22705	opspec := tf.OpSpec{
22706		Type: "LoadTPUEmbeddingRMSPropParameters",
22707		Input: []tf.Input{
22708			parameters, ms, mom,
22709		},
22710		Attrs: attrs,
22711	}
22712	return scope.AddOperation(opspec)
22713}
22714
22715// LoadTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to LoadTPUEmbeddingStochasticGradientDescentParameters.
22716type LoadTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
22717
22718// LoadTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value.
22719// If not specified, defaults to -1
22720func LoadTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
22721	return func(m optionalAttr) {
22722		m["table_id"] = value
22723	}
22724}
22725
22726// LoadTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value.
22727// If not specified, defaults to ""
22728func LoadTPUEmbeddingStochasticGradientDescentParametersTableName(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
22729	return func(m optionalAttr) {
22730		m["table_name"] = value
22731	}
22732}
22733
22734// LoadTPUEmbeddingStochasticGradientDescentParametersConfig sets the optional config attribute to value.
22735// If not specified, defaults to ""
22736func LoadTPUEmbeddingStochasticGradientDescentParametersConfig(value string) LoadTPUEmbeddingStochasticGradientDescentParametersAttr {
22737	return func(m optionalAttr) {
22738		m["config"] = value
22739	}
22740}
22741
22742// Load SGD embedding parameters.
22743//
22744// An op that loads optimization parameters into HBM for embedding. Must be
22745// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
22746// embedding table configuration. For example, this op is used to install
22747// parameters that are loaded from a checkpoint before a training loop is
22748// executed.
22749//
22750// Arguments:
22751//
22752//	parameters: Value of parameters used in the stochastic gradient descent optimization algorithm.
22753//
22754// Returns the created operation.
22755func LoadTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, parameters tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingStochasticGradientDescentParametersAttr) (o *tf.Operation) {
22756	if scope.Err() != nil {
22757		return
22758	}
22759	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
22760	for _, a := range optional {
22761		a(attrs)
22762	}
22763	opspec := tf.OpSpec{
22764		Type: "LoadTPUEmbeddingStochasticGradientDescentParameters",
22765		Input: []tf.Input{
22766			parameters,
22767		},
22768		Attrs: attrs,
22769	}
22770	return scope.AddOperation(opspec)
22771}
22772
22773// Computes natural logarithm of x element-wise.
22774//
22775// I.e., \\(y = \log_e x\\).
22776//
22777// Example:
22778//
22779// ```python
22780// x = tf.constant([0, 0.5, 1, 5])
22781// tf.math.log(x) ==> [-inf, -0.6931472,  0. ,  1.609438]
22782// ```
22783func Log(scope *Scope, x tf.Output) (y tf.Output) {
22784	if scope.Err() != nil {
22785		return
22786	}
22787	opspec := tf.OpSpec{
22788		Type: "Log",
22789		Input: []tf.Input{
22790			x,
22791		},
22792	}
22793	op := scope.AddOperation(opspec)
22794	return op.Output(0)
22795}
22796
22797// Computes natural logarithm of (1 + x) element-wise.
22798//
22799// I.e., \\(y = \log_e (1 + x)\\).
22800//
22801// Example:
22802//
22803// ```python
22804// x = tf.constant([0, 0.5, 1, 5])
22805// tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595]
22806// ```
22807func Log1p(scope *Scope, x tf.Output) (y tf.Output) {
22808	if scope.Err() != nil {
22809		return
22810	}
22811	opspec := tf.OpSpec{
22812		Type: "Log1p",
22813		Input: []tf.Input{
22814			x,
22815		},
22816	}
22817	op := scope.AddOperation(opspec)
22818	return op.Output(0)
22819}
22820
22821// Computes the sign and the log of the absolute value of the determinant of
22822//
22823// one or more square matrices.
22824//
22825// The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
22826// form square matrices. The outputs are two tensors containing the signs and
22827// absolute values of the log determinants for all N input submatrices
22828// `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`.
22829// The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU`
22830// is the `LU` decomposition of the input and `P` is the corresponding
22831// permutation matrix.
22832//
22833// Arguments:
22834//
22835//	input: Shape is `[N, M, M]`.
22836//
22837// Returns:
22838//
22839//	sign: The signs of the log determinants of the inputs. Shape is `[N]`.
22840//	log_abs_determinant: The logs of the absolute values of the determinants
22841//
22842// of the N input matrices.  Shape is `[N]`.
22843func LogMatrixDeterminant(scope *Scope, input tf.Output) (sign tf.Output, log_abs_determinant tf.Output) {
22844	if scope.Err() != nil {
22845		return
22846	}
22847	opspec := tf.OpSpec{
22848		Type: "LogMatrixDeterminant",
22849		Input: []tf.Input{
22850			input,
22851		},
22852	}
22853	op := scope.AddOperation(opspec)
22854	return op.Output(0), op.Output(1)
22855}
22856
22857// Computes log softmax activations.
22858//
22859// For each batch `i` and class `j` we have
22860//
22861//	logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
22862//
22863// Arguments:
22864//
22865//	logits: 2-D with shape `[batch_size, num_classes]`.
22866//
22867// Returns Same shape as `logits`.
22868func LogSoftmax(scope *Scope, logits tf.Output) (logsoftmax tf.Output) {
22869	if scope.Err() != nil {
22870		return
22871	}
22872	opspec := tf.OpSpec{
22873		Type: "LogSoftmax",
22874		Input: []tf.Input{
22875			logits,
22876		},
22877	}
22878	op := scope.AddOperation(opspec)
22879	return op.Output(0)
22880}
22881
22882// LogUniformCandidateSamplerAttr is an optional argument to LogUniformCandidateSampler.
22883type LogUniformCandidateSamplerAttr func(optionalAttr)
22884
22885// LogUniformCandidateSamplerSeed sets the optional seed attribute to value.
22886//
22887// value: If either seed or seed2 are set to be non-zero, the random number
22888// generator is seeded by the given seed.  Otherwise, it is seeded by a
22889// random seed.
22890// If not specified, defaults to 0
22891func LogUniformCandidateSamplerSeed(value int64) LogUniformCandidateSamplerAttr {
22892	return func(m optionalAttr) {
22893		m["seed"] = value
22894	}
22895}
22896
22897// LogUniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
22898//
22899// value: An second seed to avoid seed collision.
22900// If not specified, defaults to 0
22901func LogUniformCandidateSamplerSeed2(value int64) LogUniformCandidateSamplerAttr {
22902	return func(m optionalAttr) {
22903		m["seed2"] = value
22904	}
22905}
22906
22907// Generates labels for candidate sampling with a log-uniform distribution.
22908//
22909// See explanations of candidate sampling and the data formats at
22910// go/candidate-sampling.
22911//
22912// For each batch, this op picks a single set of sampled candidate labels.
22913//
22914// The advantages of sampling candidates per-batch are simplicity and the
22915// possibility of efficient dense matrix multiplication. The disadvantage is that
22916// the sampled candidates must be chosen independently of the context and of the
22917// true labels.
22918//
22919// Arguments:
22920//
22921//	true_classes: A batch_size * num_true matrix, in which each row contains the
22922//
22923// IDs of the num_true target_classes in the corresponding original label.
22924//
22925//	num_true: Number of true labels per context.
22926//	num_sampled: Number of candidates to randomly sample.
22927//	unique: If unique is true, we sample with rejection, so that all sampled
22928//
22929// candidates in a batch are unique. This requires some approximation to
22930// estimate the post-rejection sampling probabilities.
22931//
22932//	range_max: The sampler will sample integers from the interval [0, range_max).
22933//
22934// Returns:
22935//
22936//	sampled_candidates: A vector of length num_sampled, in which each element is
22937//
22938// the ID of a sampled candidate.
22939//
22940//	true_expected_count: A batch_size * num_true matrix, representing
22941//
22942// the number of times each candidate is expected to occur in a batch
22943// of sampled candidates. If unique=true, then this is a probability.
22944//
22945//	sampled_expected_count: A vector of length num_sampled, for each sampled
22946//
22947// candidate representing the number of times the candidate is expected
22948// to occur in a batch of sampled candidates.  If unique=true, then this is a
22949// probability.
22950func LogUniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...LogUniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
22951	if scope.Err() != nil {
22952		return
22953	}
22954	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
22955	for _, a := range optional {
22956		a(attrs)
22957	}
22958	opspec := tf.OpSpec{
22959		Type: "LogUniformCandidateSampler",
22960		Input: []tf.Input{
22961			true_classes,
22962		},
22963		Attrs: attrs,
22964	}
22965	op := scope.AddOperation(opspec)
22966	return op.Output(0), op.Output(1), op.Output(2)
22967}
22968
22969// Returns the truth value of x AND y element-wise.
22970//
22971// *NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
22972// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
22973func LogicalAnd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
22974	if scope.Err() != nil {
22975		return
22976	}
22977	opspec := tf.OpSpec{
22978		Type: "LogicalAnd",
22979		Input: []tf.Input{
22980			x, y,
22981		},
22982	}
22983	op := scope.AddOperation(opspec)
22984	return op.Output(0)
22985}
22986
22987// Returns the truth value of `NOT x` element-wise.
22988//
22989// Arguments:
22990//
22991//	x: A `Tensor` of type `bool`.
22992//
22993// Returns A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.
22994func LogicalNot(scope *Scope, x tf.Output) (y tf.Output) {
22995	if scope.Err() != nil {
22996		return
22997	}
22998	opspec := tf.OpSpec{
22999		Type: "LogicalNot",
23000		Input: []tf.Input{
23001			x,
23002		},
23003	}
23004	op := scope.AddOperation(opspec)
23005	return op.Output(0)
23006}
23007
23008// Returns the truth value of x OR y element-wise.
23009//
23010// *NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
23011// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
23012func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
23013	if scope.Err() != nil {
23014		return
23015	}
23016	opspec := tf.OpSpec{
23017		Type: "LogicalOr",
23018		Input: []tf.Input{
23019			x, y,
23020		},
23021	}
23022	op := scope.AddOperation(opspec)
23023	return op.Output(0)
23024}
23025
23026// Outputs all keys and values in the table.
23027//
23028// Arguments:
23029//
23030//	table_handle: Handle to the table.
23031//
23032// Returns:
23033//
23034//	keys: Vector of all keys present in the table.
23035//	values: Tensor of all values in the table. Indexed in parallel with `keys`.
23036func LookupTableExportV2(scope *Scope, table_handle tf.Output, Tkeys tf.DataType, Tvalues tf.DataType) (keys tf.Output, values tf.Output) {
23037	if scope.Err() != nil {
23038		return
23039	}
23040	attrs := map[string]interface{}{"Tkeys": Tkeys, "Tvalues": Tvalues}
23041	opspec := tf.OpSpec{
23042		Type: "LookupTableExportV2",
23043		Input: []tf.Input{
23044			table_handle,
23045		},
23046		Attrs: attrs,
23047	}
23048	op := scope.AddOperation(opspec)
23049	return op.Output(0), op.Output(1)
23050}
23051
23052// Looks up keys in a table, outputs the corresponding values.
23053//
23054// The tensor `keys` must of the same type as the keys of the table.
23055// The output `values` is of the type of the table values.
23056//
23057// The scalar `default_value` is the value output for keys not present in the
23058// table. It must also be of the same type as the table values.
23059//
23060// Arguments:
23061//
23062//	table_handle: Handle to the table.
23063//	keys: Any shape.  Keys to look up.
23064//
23065// Returns Same shape as `keys`.  Values found in the table, or `default_values`
23066// for missing keys.
23067func LookupTableFindV2(scope *Scope, table_handle tf.Output, keys tf.Output, default_value tf.Output) (values tf.Output) {
23068	if scope.Err() != nil {
23069		return
23070	}
23071	opspec := tf.OpSpec{
23072		Type: "LookupTableFindV2",
23073		Input: []tf.Input{
23074			table_handle, keys, default_value,
23075		},
23076	}
23077	op := scope.AddOperation(opspec)
23078	return op.Output(0)
23079}
23080
23081// Replaces the contents of the table with the specified keys and values.
23082//
23083// The tensor `keys` must be of the same type as the keys of the table.
23084// The tensor `values` must be of the type of the table values.
23085//
23086// Arguments:
23087//
23088//	table_handle: Handle to the table.
23089//	keys: Any shape.  Keys to look up.
23090//	values: Values to associate with keys.
23091//
23092// Returns the created operation.
23093func LookupTableImportV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
23094	if scope.Err() != nil {
23095		return
23096	}
23097	opspec := tf.OpSpec{
23098		Type: "LookupTableImportV2",
23099		Input: []tf.Input{
23100			table_handle, keys, values,
23101		},
23102	}
23103	return scope.AddOperation(opspec)
23104}
23105
23106// Updates the table to associates keys with values.
23107//
23108// The tensor `keys` must be of the same type as the keys of the table.
23109// The tensor `values` must be of the type of the table values.
23110//
23111// Arguments:
23112//
23113//	table_handle: Handle to the table.
23114//	keys: Any shape.  Keys to look up.
23115//	values: Values to associate with keys.
23116//
23117// Returns the created operation.
23118func LookupTableInsertV2(scope *Scope, table_handle tf.Output, keys tf.Output, values tf.Output) (o *tf.Operation) {
23119	if scope.Err() != nil {
23120		return
23121	}
23122	opspec := tf.OpSpec{
23123		Type: "LookupTableInsertV2",
23124		Input: []tf.Input{
23125			table_handle, keys, values,
23126		},
23127	}
23128	return scope.AddOperation(opspec)
23129}
23130
23131// Removes keys and its associated values from a table.
23132//
23133// The tensor `keys` must of the same type as the keys of the table. Keys not
23134// already in the table are silently ignored.
23135//
23136// Arguments:
23137//
23138//	table_handle: Handle to the table.
23139//	keys: Any shape.  Keys of the elements to remove.
23140//
23141// Returns the created operation.
23142func LookupTableRemoveV2(scope *Scope, table_handle tf.Output, keys tf.Output) (o *tf.Operation) {
23143	if scope.Err() != nil {
23144		return
23145	}
23146	opspec := tf.OpSpec{
23147		Type: "LookupTableRemoveV2",
23148		Input: []tf.Input{
23149			table_handle, keys,
23150		},
23151	}
23152	return scope.AddOperation(opspec)
23153}
23154
23155// Computes the number of elements in the given table.
23156//
23157// Arguments:
23158//
23159//	table_handle: Handle to the table.
23160//
23161// Returns Scalar that contains number of elements in the table.
23162func LookupTableSizeV2(scope *Scope, table_handle tf.Output) (size tf.Output) {
23163	if scope.Err() != nil {
23164		return
23165	}
23166	opspec := tf.OpSpec{
23167		Type: "LookupTableSizeV2",
23168		Input: []tf.Input{
23169			table_handle,
23170		},
23171	}
23172	op := scope.AddOperation(opspec)
23173	return op.Output(0)
23174}
23175
23176// Forwards the input to the output.
23177//
23178// This operator represents the loop termination condition used by the
23179// "pivot" switches of a loop.
23180//
23181// Arguments:
23182//
23183//	input: A boolean scalar, representing the branch predicate of the Switch op.
23184//
23185// Returns The same tensor as `input`.
23186func LoopCond(scope *Scope, input tf.Output) (output tf.Output) {
23187	if scope.Err() != nil {
23188		return
23189	}
23190	opspec := tf.OpSpec{
23191		Type: "LoopCond",
23192		Input: []tf.Input{
23193			input,
23194		},
23195	}
23196	op := scope.AddOperation(opspec)
23197	return op.Output(0)
23198}
23199
23200// LowerBoundAttr is an optional argument to LowerBound.
23201type LowerBoundAttr func(optionalAttr)
23202
23203// LowerBoundOutType sets the optional out_type attribute to value.
23204// If not specified, defaults to DT_INT32
23205func LowerBoundOutType(value tf.DataType) LowerBoundAttr {
23206	return func(m optionalAttr) {
23207		m["out_type"] = value
23208	}
23209}
23210
23211// Applies lower_bound(sorted_search_values, values) along each row.
23212//
23213// Each set of rows with the same index in (sorted_inputs, values) is treated
23214// independently.  The resulting row is the equivalent of calling
23215// `np.searchsorted(sorted_inputs, values, side='left')`.
23216//
23217// The result is not a global index to the entire
23218// `Tensor`, but rather just the index in the last dimension.
23219//
23220// A 2-D example:
23221//
23222//	sorted_sequence = [[0, 3, 9, 9, 10],
23223//	                   [1, 2, 3, 4, 5]]
23224//	values = [[2, 4, 9],
23225//	          [0, 2, 6]]
23226//
23227//	result = LowerBound(sorted_sequence, values)
23228//
23229//	result == [[1, 2, 2],
23230//	           [0, 1, 5]]
23231//
23232// Arguments:
23233//
23234//	sorted_inputs: 2-D Tensor where each row is ordered.
23235//	values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
23236//
23237// the values that will be searched for in `sorted_search_values`.
23238//
23239// Returns A `Tensor` with the same shape as `values`.  It contains the first scalar index
23240// into the last dimension where values can be inserted without changing the
23241// ordered property.
23242func LowerBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...LowerBoundAttr) (output tf.Output) {
23243	if scope.Err() != nil {
23244		return
23245	}
23246	attrs := map[string]interface{}{}
23247	for _, a := range optional {
23248		a(attrs)
23249	}
23250	opspec := tf.OpSpec{
23251		Type: "LowerBound",
23252		Input: []tf.Input{
23253			sorted_inputs, values,
23254		},
23255		Attrs: attrs,
23256	}
23257	op := scope.AddOperation(opspec)
23258	return op.Output(0)
23259}
23260
23261// LuAttr is an optional argument to Lu.
23262type LuAttr func(optionalAttr)
23263
23264// LuOutputIdxType sets the optional output_idx_type attribute to value.
23265// If not specified, defaults to DT_INT32
23266func LuOutputIdxType(value tf.DataType) LuAttr {
23267	return func(m optionalAttr) {
23268		m["output_idx_type"] = value
23269	}
23270}
23271
23272// Computes the LU decomposition of one or more square matrices.
23273//
23274// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
23275// form square matrices.
23276//
23277// The input has to be invertible.
23278//
23279// The output consists of two tensors LU and P containing the LU decomposition
23280// of all input submatrices `[..., :, :]`. LU encodes the lower triangular and
23281// upper triangular factors.
23282//
23283// For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of
23284// shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower
23285// triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose
23286// entries correspond to the upper triangular part, including the diagonal, of LU.
23287//
23288// P represents a permutation matrix encoded as a list of indices each between `0`
23289// and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to
23290// P, then the L, U and P satisfies P_mat * input = L * U.
23291//
23292// Arguments:
23293//
23294//	input: A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of
23295//
23296// size `[M, M]`.
23297//
23298// Returns:
23299//
23300//	lu: A tensor of shape `[..., M, M]` whose strictly lower triangular part denotes the
23301//
23302// lower triangular factor `L` with unit diagonal, and whose upper triangular part
23303// denotes the upper triangular factor `U`.
23304//
23305//	p: Permutation of the rows encoded as a list of indices in `0..M-1`. Shape is
23306//
23307// `[..., M]`.
23308// @compatibility(scipy)
23309// Similar to `scipy.linalg.lu`, except the triangular factors `L` and `U` are
23310// packed into a single tensor, the permutation is applied to `input` instead of
23311// the right hand side and the permutation `P` is returned as a list of indices
23312// instead of a permutation matrix.
23313// @end_compatibility
23314func Lu(scope *Scope, input tf.Output, optional ...LuAttr) (lu tf.Output, p tf.Output) {
23315	if scope.Err() != nil {
23316		return
23317	}
23318	attrs := map[string]interface{}{}
23319	for _, a := range optional {
23320		a(attrs)
23321	}
23322	opspec := tf.OpSpec{
23323		Type: "Lu",
23324		Input: []tf.Input{
23325			input,
23326		},
23327		Attrs: attrs,
23328	}
23329	op := scope.AddOperation(opspec)
23330	return op.Output(0), op.Output(1)
23331}
23332
23333// Makes a new iterator from the given `dataset` and stores it in `iterator`.
23334//
23335// This operation may be executed multiple times. Each execution will reset the
23336// iterator in `iterator` to the first element of `dataset`.
23337//
23338// Returns the created operation.
23339func MakeIterator(scope *Scope, dataset tf.Output, iterator tf.Output) (o *tf.Operation) {
23340	if scope.Err() != nil {
23341		return
23342	}
23343	opspec := tf.OpSpec{
23344		Type: "MakeIterator",
23345		Input: []tf.Input{
23346			dataset, iterator,
23347		},
23348	}
23349	return scope.AddOperation(opspec)
23350}
23351
23352// Make all elements in the non-Batch dimension unique, but \"close\" to
23353//
23354// their initial value. Never returns a sub-normal number. Never returns
23355// zero. The sign of each input element is always identical to the sign
23356// of the corresponding output element. Behavior for infinite elements is
23357// undefined. Behavior for subnormal elements is undefined.
23358func MakeUnique(scope *Scope, input tf.Output) (output tf.Output) {
23359	if scope.Err() != nil {
23360		return
23361	}
23362	opspec := tf.OpSpec{
23363		Type: "MakeUnique",
23364		Input: []tf.Input{
23365			input,
23366		},
23367	}
23368	op := scope.AddOperation(opspec)
23369	return op.Output(0)
23370}
23371
23372// MapClearAttr is an optional argument to MapClear.
23373type MapClearAttr func(optionalAttr)
23374
23375// MapClearCapacity sets the optional capacity attribute to value.
23376// If not specified, defaults to 0
23377//
23378// REQUIRES: value >= 0
23379func MapClearCapacity(value int64) MapClearAttr {
23380	return func(m optionalAttr) {
23381		m["capacity"] = value
23382	}
23383}
23384
23385// MapClearMemoryLimit sets the optional memory_limit attribute to value.
23386// If not specified, defaults to 0
23387//
23388// REQUIRES: value >= 0
23389func MapClearMemoryLimit(value int64) MapClearAttr {
23390	return func(m optionalAttr) {
23391		m["memory_limit"] = value
23392	}
23393}
23394
23395// MapClearContainer sets the optional container attribute to value.
23396// If not specified, defaults to ""
23397func MapClearContainer(value string) MapClearAttr {
23398	return func(m optionalAttr) {
23399		m["container"] = value
23400	}
23401}
23402
23403// MapClearSharedName sets the optional shared_name attribute to value.
23404// If not specified, defaults to ""
23405func MapClearSharedName(value string) MapClearAttr {
23406	return func(m optionalAttr) {
23407		m["shared_name"] = value
23408	}
23409}
23410
23411// Op removes all elements in the underlying container.
23412//
23413// Returns the created operation.
23414func MapClear(scope *Scope, dtypes []tf.DataType, optional ...MapClearAttr) (o *tf.Operation) {
23415	if scope.Err() != nil {
23416		return
23417	}
23418	attrs := map[string]interface{}{"dtypes": dtypes}
23419	for _, a := range optional {
23420		a(attrs)
23421	}
23422	opspec := tf.OpSpec{
23423		Type: "MapClear",
23424
23425		Attrs: attrs,
23426	}
23427	return scope.AddOperation(opspec)
23428}
23429
23430// MapIncompleteSizeAttr is an optional argument to MapIncompleteSize.
23431type MapIncompleteSizeAttr func(optionalAttr)
23432
23433// MapIncompleteSizeCapacity sets the optional capacity attribute to value.
23434// If not specified, defaults to 0
23435//
23436// REQUIRES: value >= 0
23437func MapIncompleteSizeCapacity(value int64) MapIncompleteSizeAttr {
23438	return func(m optionalAttr) {
23439		m["capacity"] = value
23440	}
23441}
23442
23443// MapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
23444// If not specified, defaults to 0
23445//
23446// REQUIRES: value >= 0
23447func MapIncompleteSizeMemoryLimit(value int64) MapIncompleteSizeAttr {
23448	return func(m optionalAttr) {
23449		m["memory_limit"] = value
23450	}
23451}
23452
23453// MapIncompleteSizeContainer sets the optional container attribute to value.
23454// If not specified, defaults to ""
23455func MapIncompleteSizeContainer(value string) MapIncompleteSizeAttr {
23456	return func(m optionalAttr) {
23457		m["container"] = value
23458	}
23459}
23460
23461// MapIncompleteSizeSharedName sets the optional shared_name attribute to value.
23462// If not specified, defaults to ""
23463func MapIncompleteSizeSharedName(value string) MapIncompleteSizeAttr {
23464	return func(m optionalAttr) {
23465		m["shared_name"] = value
23466	}
23467}
23468
23469// Op returns the number of incomplete elements in the underlying container.
23470func MapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...MapIncompleteSizeAttr) (size tf.Output) {
23471	if scope.Err() != nil {
23472		return
23473	}
23474	attrs := map[string]interface{}{"dtypes": dtypes}
23475	for _, a := range optional {
23476		a(attrs)
23477	}
23478	opspec := tf.OpSpec{
23479		Type: "MapIncompleteSize",
23480
23481		Attrs: attrs,
23482	}
23483	op := scope.AddOperation(opspec)
23484	return op.Output(0)
23485}
23486
23487// MapPeekAttr is an optional argument to MapPeek.
23488type MapPeekAttr func(optionalAttr)
23489
23490// MapPeekCapacity sets the optional capacity attribute to value.
23491// If not specified, defaults to 0
23492//
23493// REQUIRES: value >= 0
23494func MapPeekCapacity(value int64) MapPeekAttr {
23495	return func(m optionalAttr) {
23496		m["capacity"] = value
23497	}
23498}
23499
23500// MapPeekMemoryLimit sets the optional memory_limit attribute to value.
23501// If not specified, defaults to 0
23502//
23503// REQUIRES: value >= 0
23504func MapPeekMemoryLimit(value int64) MapPeekAttr {
23505	return func(m optionalAttr) {
23506		m["memory_limit"] = value
23507	}
23508}
23509
23510// MapPeekContainer sets the optional container attribute to value.
23511// If not specified, defaults to ""
23512func MapPeekContainer(value string) MapPeekAttr {
23513	return func(m optionalAttr) {
23514		m["container"] = value
23515	}
23516}
23517
23518// MapPeekSharedName sets the optional shared_name attribute to value.
23519// If not specified, defaults to ""
23520func MapPeekSharedName(value string) MapPeekAttr {
23521	return func(m optionalAttr) {
23522		m["shared_name"] = value
23523	}
23524}
23525
23526// Op peeks at the values at the specified key.  If the
23527//
23528// underlying container does not contain this key
23529// this op will block until it does.
23530func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output) {
23531	if scope.Err() != nil {
23532		return
23533	}
23534	attrs := map[string]interface{}{"dtypes": dtypes}
23535	for _, a := range optional {
23536		a(attrs)
23537	}
23538	opspec := tf.OpSpec{
23539		Type: "MapPeek",
23540		Input: []tf.Input{
23541			key, indices,
23542		},
23543		Attrs: attrs,
23544	}
23545	op := scope.AddOperation(opspec)
23546	if scope.Err() != nil {
23547		return
23548	}
23549	var idx int
23550	var err error
23551	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
23552		scope.UpdateErr("MapPeek", err)
23553		return
23554	}
23555	return values
23556}
23557
23558// MapSizeAttr is an optional argument to MapSize.
23559type MapSizeAttr func(optionalAttr)
23560
23561// MapSizeCapacity sets the optional capacity attribute to value.
23562// If not specified, defaults to 0
23563//
23564// REQUIRES: value >= 0
23565func MapSizeCapacity(value int64) MapSizeAttr {
23566	return func(m optionalAttr) {
23567		m["capacity"] = value
23568	}
23569}
23570
23571// MapSizeMemoryLimit sets the optional memory_limit attribute to value.
23572// If not specified, defaults to 0
23573//
23574// REQUIRES: value >= 0
23575func MapSizeMemoryLimit(value int64) MapSizeAttr {
23576	return func(m optionalAttr) {
23577		m["memory_limit"] = value
23578	}
23579}
23580
23581// MapSizeContainer sets the optional container attribute to value.
23582// If not specified, defaults to ""
23583func MapSizeContainer(value string) MapSizeAttr {
23584	return func(m optionalAttr) {
23585		m["container"] = value
23586	}
23587}
23588
23589// MapSizeSharedName sets the optional shared_name attribute to value.
23590// If not specified, defaults to ""
23591func MapSizeSharedName(value string) MapSizeAttr {
23592	return func(m optionalAttr) {
23593		m["shared_name"] = value
23594	}
23595}
23596
23597// Op returns the number of elements in the underlying container.
23598func MapSize(scope *Scope, dtypes []tf.DataType, optional ...MapSizeAttr) (size tf.Output) {
23599	if scope.Err() != nil {
23600		return
23601	}
23602	attrs := map[string]interface{}{"dtypes": dtypes}
23603	for _, a := range optional {
23604		a(attrs)
23605	}
23606	opspec := tf.OpSpec{
23607		Type: "MapSize",
23608
23609		Attrs: attrs,
23610	}
23611	op := scope.AddOperation(opspec)
23612	return op.Output(0)
23613}
23614
23615// MapStageAttr is an optional argument to MapStage.
23616type MapStageAttr func(optionalAttr)
23617
23618// MapStageCapacity sets the optional capacity attribute to value.
23619//
23620// value: Maximum number of elements in the Staging Area. If > 0, inserts
23621// on the container will block when the capacity is reached.
23622// If not specified, defaults to 0
23623//
23624// REQUIRES: value >= 0
23625func MapStageCapacity(value int64) MapStageAttr {
23626	return func(m optionalAttr) {
23627		m["capacity"] = value
23628	}
23629}
23630
23631// MapStageMemoryLimit sets the optional memory_limit attribute to value.
23632// If not specified, defaults to 0
23633//
23634// REQUIRES: value >= 0
23635func MapStageMemoryLimit(value int64) MapStageAttr {
23636	return func(m optionalAttr) {
23637		m["memory_limit"] = value
23638	}
23639}
23640
23641// MapStageContainer sets the optional container attribute to value.
23642//
23643// value: If non-empty, this queue is placed in the given container. Otherwise,
23644// a default container is used.
23645// If not specified, defaults to ""
23646func MapStageContainer(value string) MapStageAttr {
23647	return func(m optionalAttr) {
23648		m["container"] = value
23649	}
23650}
23651
23652// MapStageSharedName sets the optional shared_name attribute to value.
23653//
23654// value: It is necessary to match this name to the matching Unstage Op.
23655// If not specified, defaults to ""
23656func MapStageSharedName(value string) MapStageAttr {
23657	return func(m optionalAttr) {
23658		m["shared_name"] = value
23659	}
23660}
23661
23662// Stage (key, values) in the underlying container which behaves like a hashtable.
23663//
23664// Arguments:
23665//
23666//	key: int64
23667//
23668//	values: a list of tensors
23669//
23670// dtypes A list of data types that inserted values should adhere to.
23671//
23672// Returns the created operation.
23673func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation) {
23674	if scope.Err() != nil {
23675		return
23676	}
23677	attrs := map[string]interface{}{"dtypes": dtypes}
23678	for _, a := range optional {
23679		a(attrs)
23680	}
23681	opspec := tf.OpSpec{
23682		Type: "MapStage",
23683		Input: []tf.Input{
23684			key, indices, tf.OutputList(values),
23685		},
23686		Attrs: attrs,
23687	}
23688	return scope.AddOperation(opspec)
23689}
23690
23691// MapUnstageAttr is an optional argument to MapUnstage.
23692type MapUnstageAttr func(optionalAttr)
23693
23694// MapUnstageCapacity sets the optional capacity attribute to value.
23695// If not specified, defaults to 0
23696//
23697// REQUIRES: value >= 0
23698func MapUnstageCapacity(value int64) MapUnstageAttr {
23699	return func(m optionalAttr) {
23700		m["capacity"] = value
23701	}
23702}
23703
23704// MapUnstageMemoryLimit sets the optional memory_limit attribute to value.
23705// If not specified, defaults to 0
23706//
23707// REQUIRES: value >= 0
23708func MapUnstageMemoryLimit(value int64) MapUnstageAttr {
23709	return func(m optionalAttr) {
23710		m["memory_limit"] = value
23711	}
23712}
23713
23714// MapUnstageContainer sets the optional container attribute to value.
23715// If not specified, defaults to ""
23716func MapUnstageContainer(value string) MapUnstageAttr {
23717	return func(m optionalAttr) {
23718		m["container"] = value
23719	}
23720}
23721
23722// MapUnstageSharedName sets the optional shared_name attribute to value.
23723// If not specified, defaults to ""
23724func MapUnstageSharedName(value string) MapUnstageAttr {
23725	return func(m optionalAttr) {
23726		m["shared_name"] = value
23727	}
23728}
23729
23730// Op removes and returns the values associated with the key
23731//
23732// from the underlying container.   If the underlying container
23733// does not contain this key, the op will block until it does.
23734func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageAttr) (values []tf.Output) {
23735	if scope.Err() != nil {
23736		return
23737	}
23738	attrs := map[string]interface{}{"dtypes": dtypes}
23739	for _, a := range optional {
23740		a(attrs)
23741	}
23742	opspec := tf.OpSpec{
23743		Type: "MapUnstage",
23744		Input: []tf.Input{
23745			key, indices,
23746		},
23747		Attrs: attrs,
23748	}
23749	op := scope.AddOperation(opspec)
23750	if scope.Err() != nil {
23751		return
23752	}
23753	var idx int
23754	var err error
23755	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
23756		scope.UpdateErr("MapUnstage", err)
23757		return
23758	}
23759	return values
23760}
23761
23762// MapUnstageNoKeyAttr is an optional argument to MapUnstageNoKey.
23763type MapUnstageNoKeyAttr func(optionalAttr)
23764
23765// MapUnstageNoKeyCapacity sets the optional capacity attribute to value.
23766// If not specified, defaults to 0
23767//
23768// REQUIRES: value >= 0
23769func MapUnstageNoKeyCapacity(value int64) MapUnstageNoKeyAttr {
23770	return func(m optionalAttr) {
23771		m["capacity"] = value
23772	}
23773}
23774
23775// MapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
23776// If not specified, defaults to 0
23777//
23778// REQUIRES: value >= 0
23779func MapUnstageNoKeyMemoryLimit(value int64) MapUnstageNoKeyAttr {
23780	return func(m optionalAttr) {
23781		m["memory_limit"] = value
23782	}
23783}
23784
23785// MapUnstageNoKeyContainer sets the optional container attribute to value.
23786// If not specified, defaults to ""
23787func MapUnstageNoKeyContainer(value string) MapUnstageNoKeyAttr {
23788	return func(m optionalAttr) {
23789		m["container"] = value
23790	}
23791}
23792
23793// MapUnstageNoKeySharedName sets the optional shared_name attribute to value.
23794// If not specified, defaults to ""
23795func MapUnstageNoKeySharedName(value string) MapUnstageNoKeyAttr {
23796	return func(m optionalAttr) {
23797		m["shared_name"] = value
23798	}
23799}
23800
23801// Op removes and returns a random (key, value)
23802//
23803// from the underlying container.   If the underlying container
23804// does not contain elements, the op will block until it does.
23805func MapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
23806	if scope.Err() != nil {
23807		return
23808	}
23809	attrs := map[string]interface{}{"dtypes": dtypes}
23810	for _, a := range optional {
23811		a(attrs)
23812	}
23813	opspec := tf.OpSpec{
23814		Type: "MapUnstageNoKey",
23815		Input: []tf.Input{
23816			indices,
23817		},
23818		Attrs: attrs,
23819	}
23820	op := scope.AddOperation(opspec)
23821	if scope.Err() != nil {
23822		return
23823	}
23824	var idx int
23825	var err error
23826	key = op.Output(idx)
23827	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
23828		scope.UpdateErr("MapUnstageNoKey", err)
23829		return
23830	}
23831	return key, values
23832}
23833
23834// MatMulAttr is an optional argument to MatMul.
23835type MatMulAttr func(optionalAttr)
23836
23837// MatMulTransposeA sets the optional transpose_a attribute to value.
23838//
23839// value: If true, "a" is transposed before multiplication.
23840// If not specified, defaults to false
23841func MatMulTransposeA(value bool) MatMulAttr {
23842	return func(m optionalAttr) {
23843		m["transpose_a"] = value
23844	}
23845}
23846
23847// MatMulTransposeB sets the optional transpose_b attribute to value.
23848//
23849// value: If true, "b" is transposed before multiplication.
23850// If not specified, defaults to false
23851func MatMulTransposeB(value bool) MatMulAttr {
23852	return func(m optionalAttr) {
23853		m["transpose_b"] = value
23854	}
23855}
23856
23857// Multiply the matrix "a" by the matrix "b".
23858//
23859// The inputs must be two-dimensional matrices and the inner dimension of
23860// "a" (after being transposed if transpose_a is true) must match the
23861// outer dimension of "b" (after being transposed if transposed_b is
23862// true).
23863//
23864// *Note*: The default kernel implementation for MatMul on GPUs uses
23865// cublas.
23866func MatMul(scope *Scope, a tf.Output, b tf.Output, optional ...MatMulAttr) (product tf.Output) {
23867	if scope.Err() != nil {
23868		return
23869	}
23870	attrs := map[string]interface{}{}
23871	for _, a := range optional {
23872		a(attrs)
23873	}
23874	opspec := tf.OpSpec{
23875		Type: "MatMul",
23876		Input: []tf.Input{
23877			a, b,
23878		},
23879		Attrs: attrs,
23880	}
23881	op := scope.AddOperation(opspec)
23882	return op.Output(0)
23883}
23884
23885// Returns the set of files matching one or more glob patterns.
23886//
23887// Note that this routine only supports wildcard characters in the
23888// basename portion of the pattern, not in the directory portion.
23889// Note also that the order of filenames returned is deterministic.
23890//
23891// Arguments:
23892//
23893//	pattern: Shell wildcard pattern(s). Scalar or vector of type string.
23894//
23895// Returns A vector of matching filenames.
23896func MatchingFiles(scope *Scope, pattern tf.Output) (filenames tf.Output) {
23897	if scope.Err() != nil {
23898		return
23899	}
23900	opspec := tf.OpSpec{
23901		Type: "MatchingFiles",
23902		Input: []tf.Input{
23903			pattern,
23904		},
23905	}
23906	op := scope.AddOperation(opspec)
23907	return op.Output(0)
23908}
23909
23910// Copy a tensor setting everything outside a central band in each innermost matrix to zero.
23911//
23912// The `band` part is computed as follows:
23913// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
23914// tensor with the same shape where
23915//
23916// `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
23917//
23918// # The indicator function
23919//
23920// `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
23921//
23922//	(num_upper < 0 || (n-m) <= num_upper)`.
23923//
23924// For example:
23925//
23926// ```
23927// # if 'input' is [[ 0,  1,  2, 3]
23928// #                [-1,  0,  1, 2]
23929// #                [-2, -1,  0, 1]
23930// #                [-3, -2, -1, 0]],
23931//
23932// tf.linalg.band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
23933//
23934//	[-1,  0,  1, 2]
23935//	[ 0, -1,  0, 1]
23936//	[ 0,  0, -1, 0]],
23937//
23938// tf.linalg.band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
23939//
23940//	[-1,  0,  1, 0]
23941//	[-2, -1,  0, 1]
23942//	[ 0, -2, -1, 0]]
23943//
23944// ```
23945//
23946// Useful special cases:
23947//
23948// ```
23949//
23950//	tf.linalg.band_part(input, 0, -1) ==> Upper triangular part.
23951//	tf.linalg.band_part(input, -1, 0) ==> Lower triangular part.
23952//	tf.linalg.band_part(input, 0, 0) ==> Diagonal.
23953//
23954// ```
23955//
23956// Arguments:
23957//
23958//	input: Rank `k` tensor.
23959//	num_lower: 0-D tensor. Number of subdiagonals to keep. If negative, keep entire
23960//
23961// lower triangle.
23962//
23963//	num_upper: 0-D tensor. Number of superdiagonals to keep. If negative, keep
23964//
23965// entire upper triangle.
23966//
23967// Returns Rank `k` tensor of the same shape as input. The extracted banded tensor.
23968func MatrixBandPart(scope *Scope, input tf.Output, num_lower tf.Output, num_upper tf.Output) (band tf.Output) {
23969	if scope.Err() != nil {
23970		return
23971	}
23972	opspec := tf.OpSpec{
23973		Type: "MatrixBandPart",
23974		Input: []tf.Input{
23975			input, num_lower, num_upper,
23976		},
23977	}
23978	op := scope.AddOperation(opspec)
23979	return op.Output(0)
23980}
23981
23982// Computes the determinant of one or more square matrices.
23983//
23984// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
23985// form square matrices. The output is a tensor containing the determinants
23986// for all input submatrices `[..., :, :]`.
23987//
23988// Arguments:
23989//
23990//	input: Shape is `[..., M, M]`.
23991//
23992// Returns Shape is `[...]`.
23993func MatrixDeterminant(scope *Scope, input tf.Output) (output tf.Output) {
23994	if scope.Err() != nil {
23995		return
23996	}
23997	opspec := tf.OpSpec{
23998		Type: "MatrixDeterminant",
23999		Input: []tf.Input{
24000			input,
24001		},
24002	}
24003	op := scope.AddOperation(opspec)
24004	return op.Output(0)
24005}
24006
24007// Returns a batched diagonal tensor with a given batched diagonal values.
24008//
24009// Given a `diagonal`, this operation returns a tensor with the `diagonal` and
24010// everything else padded with zeros. The diagonal is computed as follows:
24011//
24012// Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
24013// tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
24014//
24015// `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
24016//
24017// For example:
24018//
24019// ```
24020// # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
24021//
24022// and diagonal.shape = (2, 4)
24023//
24024// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
24025//
24026//	 [0, 2, 0, 0]
24027//	 [0, 0, 3, 0]
24028//	 [0, 0, 0, 4]],
24029//	[[5, 0, 0, 0]
24030//	 [0, 6, 0, 0]
24031//	 [0, 0, 7, 0]
24032//	 [0, 0, 0, 8]]]
24033//
24034// which has shape (2, 4, 4)
24035// ```
24036//
24037// Arguments:
24038//
24039//	diagonal: Rank `k`, where `k >= 1`.
24040//
24041// Returns Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
24042func MatrixDiag(scope *Scope, diagonal tf.Output) (output tf.Output) {
24043	if scope.Err() != nil {
24044		return
24045	}
24046	opspec := tf.OpSpec{
24047		Type: "MatrixDiag",
24048		Input: []tf.Input{
24049			diagonal,
24050		},
24051	}
24052	op := scope.AddOperation(opspec)
24053	return op.Output(0)
24054}
24055
24056// Returns the batched diagonal part of a batched tensor.
24057//
24058// This operation returns a tensor with the `diagonal` part
24059// of the batched `input`. The `diagonal` part is computed as follows:
24060//
24061// Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
24062// tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
24063//
24064// `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
24065//
24066// The input must be at least a matrix.
24067//
24068// For example:
24069//
24070// ```
24071// # 'input' is [[[1, 0, 0, 0]
24072//
24073//	 [0, 2, 0, 0]
24074//	 [0, 0, 3, 0]
24075//	 [0, 0, 0, 4]],
24076//	[[5, 0, 0, 0]
24077//	 [0, 6, 0, 0]
24078//	 [0, 0, 7, 0]
24079//	 [0, 0, 0, 8]]]
24080//
24081// and input.shape = (2, 4, 4)
24082//
24083// tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
24084//
24085// which has shape (2, 4)
24086// ```
24087//
24088// Arguments:
24089//
24090//	input: Rank `k` tensor where `k >= 2`.
24091//
24092// Returns The extracted diagonal(s) having shape
24093// `diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
24094func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {
24095	if scope.Err() != nil {
24096		return
24097	}
24098	opspec := tf.OpSpec{
24099		Type: "MatrixDiagPart",
24100		Input: []tf.Input{
24101			input,
24102		},
24103	}
24104	op := scope.AddOperation(opspec)
24105	return op.Output(0)
24106}
24107
24108// Returns the batched diagonal part of a batched tensor.
24109//
24110// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
24111// `input`.
24112//
24113// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
24114// Let `max_diag_len` be the maximum length among all diagonals to be extracted,
24115// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
24116// Let `num_diags` be the number of diagonals to extract,
24117// `num_diags = k[1] - k[0] + 1`.
24118//
24119// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
24120// `[I, J, ..., L, max_diag_len]` and values:
24121//
24122// ```
24123// diagonal[i, j, ..., l, n]
24124//
24125//	= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
24126//	  padding_value                 ; otherwise.
24127//
24128// ```
24129// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
24130//
24131// Otherwise, the output tensor has rank `r` with dimensions
24132// `[I, J, ..., L, num_diags, max_diag_len]` with values:
24133//
24134// ```
24135// diagonal[i, j, ..., l, m, n]
24136//
24137//	= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
24138//	  padding_value                 ; otherwise.
24139//
24140// ```
24141// where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`.
24142//
24143// The input must be at least a matrix.
24144//
24145// For example:
24146//
24147// ```
24148// input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
24149//
24150//	 [5, 6, 7, 8],
24151//	 [9, 8, 7, 6]],
24152//	[[5, 4, 3, 2],
24153//	 [1, 2, 3, 4],
24154//	 [5, 6, 7, 8]]])
24155//
24156// # A main diagonal from each batch.
24157// tf.matrix_diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
24158//
24159//	[5, 2, 7]]
24160//
24161// # A superdiagonal from each batch.
24162// tf.matrix_diag_part(input, k = 1)
24163//
24164//	==> [[2, 7, 6],  # Output shape: (2, 3)
24165//	     [4, 3, 8]]
24166//
24167// # A tridiagonal band from each batch.
24168// tf.matrix_diag_part(input, k = (-1, 1))
24169//
24170//	==> [[[2, 7, 6],  # Output shape: (2, 3, 3)
24171//	      [1, 6, 7],
24172//	      [5, 8, 0]],
24173//	     [[4, 3, 8],
24174//	      [5, 2, 7],
24175//	      [1, 6, 0]]]
24176//
24177// # Padding value = 9
24178// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
24179//
24180//	==> [[[4, 9, 9],  # Output shape: (2, 3, 3)
24181//	      [3, 8, 9],
24182//	      [2, 7, 6]],
24183//	     [[2, 9, 9],
24184//	      [3, 4, 9],
24185//	      [4, 3, 8]]]
24186//
24187// ```
24188//
24189// Arguments:
24190//
24191//	input: Rank `r` tensor where `r >= 2`.
24192//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
24193//
24194// diagonal, and negative value means subdiagonals. `k` can be a single integer
24195// (for a single diagonal) or a pair of integers specifying the low and high ends
24196// of a matrix band. `k[0]` must not be larger than `k[1]`.
24197//
24198//	padding_value: The value to fill the area outside the specified diagonal band with.
24199//
24200// Default is 0.
24201//
24202// Returns The extracted diagonal(s).
24203func MatrixDiagPartV2(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output) (diagonal tf.Output) {
24204	if scope.Err() != nil {
24205		return
24206	}
24207	opspec := tf.OpSpec{
24208		Type: "MatrixDiagPartV2",
24209		Input: []tf.Input{
24210			input, k, padding_value,
24211		},
24212	}
24213	op := scope.AddOperation(opspec)
24214	return op.Output(0)
24215}
24216
24217// MatrixDiagPartV3Attr is an optional argument to MatrixDiagPartV3.
24218type MatrixDiagPartV3Attr func(optionalAttr)
24219
24220// MatrixDiagPartV3Align sets the optional align attribute to value.
24221//
24222// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
24223// a string specifying how superdiagonals and subdiagonals should be aligned,
24224// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
24225// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
24226// to the right (left-pads the row) and subdiagonals to the left (right-pads the
24227// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
24228// the opposite alignment.
24229// If not specified, defaults to "RIGHT_LEFT"
24230func MatrixDiagPartV3Align(value string) MatrixDiagPartV3Attr {
24231	return func(m optionalAttr) {
24232		m["align"] = value
24233	}
24234}
24235
24236// Returns the batched diagonal part of a batched tensor.
24237//
24238// Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
24239// `input`.
24240//
24241// Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
24242// Let `max_diag_len` be the maximum length among all diagonals to be extracted,
24243// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
24244// Let `num_diags` be the number of diagonals to extract,
24245// `num_diags = k[1] - k[0] + 1`.
24246//
24247// If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
24248// `[I, J, ..., L, max_diag_len]` and values:
24249//
24250// ```
24251// diagonal[i, j, ..., l, n]
24252//
24253//	= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
24254//	  padding_value                 ; otherwise.
24255//
24256// ```
24257// where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
24258//
24259// Otherwise, the output tensor has rank `r` with dimensions
24260// `[I, J, ..., L, num_diags, max_diag_len]` with values:
24261//
24262// ```
24263// diagonal[i, j, ..., l, m, n]
24264//
24265//	= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
24266//	  padding_value                 ; otherwise.
24267//
24268// ```
24269// where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
24270//
24271// `offset` is zero except when the alignment of the diagonal is to the right.
24272// ```
24273// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
24274//
24275//	                                  and `d >= 0`) or
24276//	                                (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
24277//	                                  and `d <= 0`)
24278//	0                          ; otherwise
24279//
24280// ```
24281// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
24282//
24283// The input must be at least a matrix.
24284//
24285// For example:
24286//
24287// ```
24288// input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
24289//
24290//	 [5, 6, 7, 8],
24291//	 [9, 8, 7, 6]],
24292//	[[5, 4, 3, 2],
24293//	 [1, 2, 3, 4],
24294//	 [5, 6, 7, 8]]])
24295//
24296// # A main diagonal from each batch.
24297// tf.matrix_diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
24298//
24299//	[5, 2, 7]]
24300//
24301// # A superdiagonal from each batch.
24302// tf.matrix_diag_part(input, k = 1)
24303//
24304//	==> [[2, 7, 6],  # Output shape: (2, 3)
24305//	     [4, 3, 8]]
24306//
24307// # A band from each batch.
24308// tf.matrix_diag_part(input, k = (-1, 2))
24309//
24310//	==> [[[0, 3, 8],  # Output shape: (2, 4, 3)
24311//	      [2, 7, 6],
24312//	      [1, 6, 7],
24313//	      [5, 8, 0]],
24314//	     [[0, 3, 4],
24315//	      [4, 3, 8],
24316//	      [5, 2, 7],
24317//	      [1, 6, 0]]]
24318//
24319// # LEFT_RIGHT alignment.
24320// tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT")
24321//
24322//	==> [[[3, 8, 0],  # Output shape: (2, 4, 3)
24323//	      [2, 7, 6],
24324//	      [1, 6, 7],
24325//	      [0, 5, 8]],
24326//	     [[3, 4, 0],
24327//	      [4, 3, 8],
24328//	      [5, 2, 7],
24329//	      [0, 1, 6]]]
24330//
24331// # max_diag_len can be shorter than the main diagonal.
24332// tf.matrix_diag_part(input, k = (-2, -1))
24333//
24334//	==> [[[5, 8],
24335//	      [9, 0]],
24336//	     [[1, 6],
24337//	      [5, 0]]]
24338//
24339// # padding_value = 9
24340// tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
24341//
24342//	==> [[[9, 9, 4],  # Output shape: (2, 3, 3)
24343//	      [9, 3, 8],
24344//	      [2, 7, 6]],
24345//	     [[9, 9, 2],
24346//	      [9, 3, 4],
24347//	      [4, 3, 8]]]
24348//
24349// ```
24350//
24351// Arguments:
24352//
24353//	input: Rank `r` tensor where `r >= 2`.
24354//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
24355//
24356// diagonal, and negative value means subdiagonals. `k` can be a single integer
24357// (for a single diagonal) or a pair of integers specifying the low and high ends
24358// of a matrix band. `k[0]` must not be larger than `k[1]`.
24359//
24360//	padding_value: The value to fill the area outside the specified diagonal band with.
24361//
24362// Default is 0.
24363//
24364// Returns The extracted diagonal(s).
24365func MatrixDiagPartV3(scope *Scope, input tf.Output, k tf.Output, padding_value tf.Output, optional ...MatrixDiagPartV3Attr) (diagonal tf.Output) {
24366	if scope.Err() != nil {
24367		return
24368	}
24369	attrs := map[string]interface{}{}
24370	for _, a := range optional {
24371		a(attrs)
24372	}
24373	opspec := tf.OpSpec{
24374		Type: "MatrixDiagPartV3",
24375		Input: []tf.Input{
24376			input, k, padding_value,
24377		},
24378		Attrs: attrs,
24379	}
24380	op := scope.AddOperation(opspec)
24381	return op.Output(0)
24382}
24383
24384// Returns a batched diagonal tensor with given batched diagonal values.
24385//
24386// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
24387// diagonals of a matrix, with everything else padded with `padding`. `num_rows`
24388// and `num_cols` specify the dimension of the innermost matrix of the output. If
24389// both are not specified, the op assumes the innermost matrix is square and infers
24390// its size from `k` and the innermost dimension of `diagonal`. If only one of them
24391// is specified, the op assumes the unspecified value is the smallest possible
24392// based on other criteria.
24393//
24394// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
24395// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
24396// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
24397// `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
24398//
24399// The second innermost dimension of `diagonal` has double meaning.
24400// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
24401// [I, J, ..., M], and the output tensor is:
24402//
24403// ```
24404// output[i, j, ..., l, m, n]
24405//
24406//	= diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
24407//	  padding_value                             ; otherwise
24408//
24409// ```
24410//
24411// Otherwise, `M` is treated as the number of diagonals for the matrix in the
24412// same batch (`M = k[1]-k[0]+1`), and the output tensor is:
24413//
24414// ```
24415// output[i, j, ..., l, m, n]
24416//
24417//	= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
24418//	  padding_value                                     ; otherwise
24419//
24420// ```
24421// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
24422//
24423// For example:
24424//
24425// ```
24426// # The main diagonal.
24427// diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
24428//
24429//	[5, 6, 7, 8]])
24430//
24431// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
24432//
24433//	 [0, 2, 0, 0],
24434//	 [0, 0, 3, 0],
24435//	 [0, 0, 0, 4]],
24436//	[[5, 0, 0, 0],
24437//	 [0, 6, 0, 0],
24438//	 [0, 0, 7, 0],
24439//	 [0, 0, 0, 8]]]
24440//
24441// # A superdiagonal (per batch).
24442// diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
24443//
24444//	[4, 5, 6]])
24445//
24446// tf.matrix_diag(diagonal, k = 1)
24447//
24448//	==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
24449//	      [0, 0, 2, 0],
24450//	      [0, 0, 0, 3],
24451//	      [0, 0, 0, 0]],
24452//	     [[0, 4, 0, 0],
24453//	      [0, 0, 5, 0],
24454//	      [0, 0, 0, 6],
24455//	      [0, 0, 0, 0]]]
24456//
24457// # A band of diagonals.
24458// diagonals = np.array([[[1, 2, 3],  # Input shape: (2, 2, 3)
24459//
24460//	 [4, 5, 0]],
24461//	[[6, 7, 9],
24462//	 [9, 1, 0]]])
24463//
24464// tf.matrix_diag(diagonals, k = (-1, 0))
24465//
24466//	==> [[[1, 0, 0],  # Output shape: (2, 3, 3)
24467//	      [4, 2, 0],
24468//	      [0, 5, 3]],
24469//	     [[6, 0, 0],
24470//	      [9, 7, 0],
24471//	      [0, 1, 9]]]
24472//
24473// # Rectangular matrix.
24474// diagonal = np.array([1, 2])  # Input shape: (2)
24475// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
24476//
24477//	==> [[0, 0, 0, 0],  # Output shape: (3, 4)
24478//	     [1, 0, 0, 0],
24479//	     [0, 2, 0, 0]]
24480//
24481// # Rectangular matrix with inferred num_cols and padding_value = 9.
24482// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
24483//
24484//	==> [[9, 9],  # Output shape: (3, 2)
24485//	     [1, 9],
24486//	     [9, 2]]
24487//
24488// ```
24489//
24490// Arguments:
24491//
24492//	diagonal: Rank `r`, where `r >= 1`
24493//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
24494//
24495// diagonal, and negative value means subdiagonals. `k` can be a single integer
24496// (for a single diagonal) or a pair of integers specifying the low and high ends
24497// of a matrix band. `k[0]` must not be larger than `k[1]`.
24498//
24499//	num_rows: The number of rows of the output matrix. If it is not provided, the op assumes
24500//
24501// the output matrix is a square matrix and infers the matrix size from k and the
24502// innermost dimension of `diagonal`.
24503//
24504//	num_cols: The number of columns of the output matrix. If it is not provided, the op
24505//
24506// assumes the output matrix is a square matrix and infers the matrix size from
24507// k and the innermost dimension of `diagonal`.
24508//
24509//	padding_value: The number to fill the area outside the specified diagonal band with.
24510//
24511// Default is 0.
24512//
24513// Returns Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.
24514func MatrixDiagV2(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, num_cols tf.Output, padding_value tf.Output) (output tf.Output) {
24515	if scope.Err() != nil {
24516		return
24517	}
24518	opspec := tf.OpSpec{
24519		Type: "MatrixDiagV2",
24520		Input: []tf.Input{
24521			diagonal, k, num_rows, num_cols, padding_value,
24522		},
24523	}
24524	op := scope.AddOperation(opspec)
24525	return op.Output(0)
24526}
24527
24528// MatrixDiagV3Attr is an optional argument to MatrixDiagV3.
24529type MatrixDiagV3Attr func(optionalAttr)
24530
24531// MatrixDiagV3Align sets the optional align attribute to value.
24532//
24533// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
24534// a string specifying how superdiagonals and subdiagonals should be aligned,
24535// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
24536// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
24537// to the right (left-pads the row) and subdiagonals to the left (right-pads the
24538// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
24539// the opposite alignment.
24540// If not specified, defaults to "RIGHT_LEFT"
24541func MatrixDiagV3Align(value string) MatrixDiagV3Attr {
24542	return func(m optionalAttr) {
24543		m["align"] = value
24544	}
24545}
24546
24547// Returns a batched diagonal tensor with given batched diagonal values.
24548//
24549// Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
24550// diagonals of a matrix, with everything else padded with `padding`. `num_rows`
24551// and `num_cols` specify the dimension of the innermost matrix of the output. If
24552// both are not specified, the op assumes the innermost matrix is square and infers
24553// its size from `k` and the innermost dimension of `diagonal`. If only one of them
24554// is specified, the op assumes the unspecified value is the smallest possible
24555// based on other criteria.
24556//
24557// Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
24558// rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
24559// diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
24560// `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
24561//
24562// The second innermost dimension of `diagonal` has double meaning.
24563// When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
24564// [I, J, ..., M], and the output tensor is:
24565//
24566// ```
24567// output[i, j, ..., l, m, n]
24568//
24569//	= diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
24570//	  padding_value                             ; otherwise
24571//
24572// ```
24573//
24574// Otherwise, `M` is treated as the number of diagonals for the matrix in the
24575// same batch (`M = k[1]-k[0]+1`), and the output tensor is:
24576//
24577// ```
24578// output[i, j, ..., l, m, n]
24579//
24580//	= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
24581//	  padding_value                                     ; otherwise
24582//
24583// ```
24584// where `d = n - m`, `diag_index = [k] - d`, and
24585// `index_in_diag = n - max(d, 0) + offset`.
24586//
24587// `offset` is zero except when the alignment of the diagonal is to the right.
24588// ```
24589// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
24590//
24591//	                                  and `d >= 0`) or
24592//	                                (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
24593//	                                  and `d <= 0`)
24594//	0                          ; otherwise
24595//
24596// ```
24597// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
24598//
24599// For example:
24600//
24601// ```
24602// # The main diagonal.
24603// diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
24604//
24605//	[5, 6, 7, 8]])
24606//
24607// tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
24608//
24609//	 [0, 2, 0, 0],
24610//	 [0, 0, 3, 0],
24611//	 [0, 0, 0, 4]],
24612//	[[5, 0, 0, 0],
24613//	 [0, 6, 0, 0],
24614//	 [0, 0, 7, 0],
24615//	 [0, 0, 0, 8]]]
24616//
24617// # A superdiagonal (per batch).
24618// diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
24619//
24620//	[4, 5, 6]])
24621//
24622// tf.matrix_diag(diagonal, k = 1)
24623//
24624//	==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
24625//	      [0, 0, 2, 0],
24626//	      [0, 0, 0, 3],
24627//	      [0, 0, 0, 0]],
24628//	     [[0, 4, 0, 0],
24629//	      [0, 0, 5, 0],
24630//	      [0, 0, 0, 6],
24631//	      [0, 0, 0, 0]]]
24632//
24633// # A tridiagonal band (per batch).
24634// diagonals = np.array([[[0, 8, 9],  # Input shape: (2, 2, 3)
24635//
24636//	 [1, 2, 3],
24637//	 [4, 5, 0]],
24638//	[[0, 2, 3],
24639//	 [6, 7, 9],
24640//	 [9, 1, 0]]])
24641//
24642// tf.matrix_diag(diagonals, k = (-1, 1))
24643//
24644//	==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
24645//	      [4, 2, 9],
24646//	      [0, 5, 3]],
24647//	     [[6, 2, 0],
24648//	      [9, 7, 3],
24649//	      [0, 1, 9]]]
24650//
24651// # LEFT_RIGHT alignment.
24652// diagonals = np.array([[[8, 9, 0],  # Input shape: (2, 2, 3)
24653//
24654//	 [1, 2, 3],
24655//	 [0, 4, 5]],
24656//	[[2, 3, 0],
24657//	 [6, 7, 9],
24658//	 [0, 9, 1]]])
24659//
24660// tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT")
24661//
24662//	==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
24663//	      [4, 2, 9],
24664//	      [0, 5, 3]],
24665//	     [[6, 2, 0],
24666//	      [9, 7, 3],
24667//	      [0, 1, 9]]]
24668//
24669// # Rectangular matrix.
24670// diagonal = np.array([1, 2])  # Input shape: (2)
24671// tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
24672//
24673//	==> [[0, 0, 0, 0],  # Output shape: (3, 4)
24674//	     [1, 0, 0, 0],
24675//	     [0, 2, 0, 0]]
24676//
24677// # Rectangular matrix with inferred num_cols and padding_value = 9.
24678// tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
24679//
24680//	==> [[9, 9],  # Output shape: (3, 2)
24681//	     [1, 9],
24682//	     [9, 2]]
24683//
24684// ```
24685//
24686// Arguments:
24687//
24688//	diagonal: Rank `r`, where `r >= 1`
24689//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
24690//
24691// diagonal, and negative value means subdiagonals. `k` can be a single integer
24692// (for a single diagonal) or a pair of integers specifying the low and high ends
24693// of a matrix band. `k[0]` must not be larger than `k[1]`.
24694//
24695//	num_rows: The number of rows of the output matrix. If it is not provided, the op assumes
24696//
24697// the output matrix is a square matrix and infers the matrix size from k and the
24698// innermost dimension of `diagonal`.
24699//
24700//	num_cols: The number of columns of the output matrix. If it is not provided, the op
24701//
24702// assumes the output matrix is a square matrix and infers the matrix size from
24703// k and the innermost dimension of `diagonal`.
24704//
24705//	padding_value: The number to fill the area outside the specified diagonal band with.
24706//
24707// Default is 0.
24708//
24709// Returns Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.
24710func MatrixDiagV3(scope *Scope, diagonal tf.Output, k tf.Output, num_rows tf.Output, num_cols tf.Output, padding_value tf.Output, optional ...MatrixDiagV3Attr) (output tf.Output) {
24711	if scope.Err() != nil {
24712		return
24713	}
24714	attrs := map[string]interface{}{}
24715	for _, a := range optional {
24716		a(attrs)
24717	}
24718	opspec := tf.OpSpec{
24719		Type: "MatrixDiagV3",
24720		Input: []tf.Input{
24721			diagonal, k, num_rows, num_cols, padding_value,
24722		},
24723		Attrs: attrs,
24724	}
24725	op := scope.AddOperation(opspec)
24726	return op.Output(0)
24727}
24728
24729// Deprecated, use python implementation tf.linalg.matrix_exponential.
24730//
24731// DEPRECATED at GraphDef version 27: Use Python implementation tf.linalg.matrix_exponential instead.
24732func MatrixExponential(scope *Scope, input tf.Output) (output tf.Output) {
24733	if scope.Err() != nil {
24734		return
24735	}
24736	opspec := tf.OpSpec{
24737		Type: "MatrixExponential",
24738		Input: []tf.Input{
24739			input,
24740		},
24741	}
24742	op := scope.AddOperation(opspec)
24743	return op.Output(0)
24744}
24745
24746// MatrixInverseAttr is an optional argument to MatrixInverse.
24747type MatrixInverseAttr func(optionalAttr)
24748
24749// MatrixInverseAdjoint sets the optional adjoint attribute to value.
24750// If not specified, defaults to false
24751func MatrixInverseAdjoint(value bool) MatrixInverseAttr {
24752	return func(m optionalAttr) {
24753		m["adjoint"] = value
24754	}
24755}
24756
24757// Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).
24758//
24759// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
24760// form square matrices. The output is a tensor of the same shape as the input
24761// containing the inverse for all input submatrices `[..., :, :]`.
24762//
24763// The op uses LU decomposition with partial pivoting to compute the inverses.
24764//
24765// If a matrix is not invertible there is no guarantee what the op does. It
24766// may detect the condition and raise an exception or it may simply return a
24767// garbage result.
24768//
24769// Arguments:
24770//
24771//	input: Shape is `[..., M, M]`.
24772//
24773// Returns Shape is `[..., M, M]`.
24774//
24775// @compatibility(numpy)
24776// Equivalent to np.linalg.inv
24777// @end_compatibility
24778func MatrixInverse(scope *Scope, input tf.Output, optional ...MatrixInverseAttr) (output tf.Output) {
24779	if scope.Err() != nil {
24780		return
24781	}
24782	attrs := map[string]interface{}{}
24783	for _, a := range optional {
24784		a(attrs)
24785	}
24786	opspec := tf.OpSpec{
24787		Type: "MatrixInverse",
24788		Input: []tf.Input{
24789			input,
24790		},
24791		Attrs: attrs,
24792	}
24793	op := scope.AddOperation(opspec)
24794	return op.Output(0)
24795}
24796
24797// Computes the matrix logarithm of one or more square matrices:
24798//
24799// \\(log(exp(A)) = A\\)
24800//
24801// This op is only defined for complex matrices. If A is positive-definite and
24802// real, then casting to a complex matrix, taking the logarithm and casting back
24803// to a real matrix will give the correct result.
24804//
24805// This function computes the matrix logarithm using the Schur-Parlett algorithm.
24806// Details of the algorithm can be found in Section 11.6.2 of:
24807// Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008.
24808// ISBN 978-0-898716-46-7.
24809//
24810// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
24811// form square matrices. The output is a tensor of the same shape as the input
24812// containing the exponential for all input submatrices `[..., :, :]`.
24813//
24814// Arguments:
24815//
24816//	input: Shape is `[..., M, M]`.
24817//
24818// Returns Shape is `[..., M, M]`.
24819//
24820// @compatibility(scipy)
24821// Equivalent to scipy.linalg.logm
24822// @end_compatibility
24823func MatrixLogarithm(scope *Scope, input tf.Output) (output tf.Output) {
24824	if scope.Err() != nil {
24825		return
24826	}
24827	opspec := tf.OpSpec{
24828		Type: "MatrixLogarithm",
24829		Input: []tf.Input{
24830			input,
24831		},
24832	}
24833	op := scope.AddOperation(opspec)
24834	return op.Output(0)
24835}
24836
24837// Returns a batched matrix tensor with new batched diagonal values.
24838//
24839// Given `input` and `diagonal`, this operation returns a tensor with the
24840// same shape and values as `input`, except for the main diagonal of the
24841// innermost matrices.  These will be overwritten by the values in `diagonal`.
24842//
24843// The output is computed as follows:
24844//
24845// Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
24846// `k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
24847// tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
24848//
24849//   - `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
24850//   - `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
24851//
24852// Arguments:
24853//
24854//	input: Rank `k+1`, where `k >= 1`.
24855//	diagonal: Rank `k`, where `k >= 1`.
24856//
24857// Returns Rank `k+1`, with `output.shape = input.shape`.
24858func MatrixSetDiag(scope *Scope, input tf.Output, diagonal tf.Output) (output tf.Output) {
24859	if scope.Err() != nil {
24860		return
24861	}
24862	opspec := tf.OpSpec{
24863		Type: "MatrixSetDiag",
24864		Input: []tf.Input{
24865			input, diagonal,
24866		},
24867	}
24868	op := scope.AddOperation(opspec)
24869	return op.Output(0)
24870}
24871
24872// Returns a batched matrix tensor with new batched diagonal values.
24873//
24874// Given `input` and `diagonal`, this operation returns a tensor with the
24875// same shape and values as `input`, except for the specified diagonals of the
24876// innermost matrices. These will be overwritten by the values in `diagonal`.
24877//
24878// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
24879// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
24880// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
24881// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
24882// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
24883// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
24884//
24885// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
24886// If `k` is scalar or `k[0] == k[1]`:
24887//
24888// ```
24889// output[i, j, ..., l, m, n]
24890//
24891//	= diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
24892//	  input[i, j, ..., l, m, n]              ; otherwise
24893//
24894// ```
24895//
24896// Otherwise,
24897//
24898// ```
24899// output[i, j, ..., l, m, n]
24900//
24901//	= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
24902//	  input[i, j, ..., l, m, n]                         ; otherwise
24903//
24904// ```
24905// where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
24906//
24907// For example:
24908//
24909// ```
24910// # The main diagonal.
24911// input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
24912//
24913//	 [7, 7, 7, 7],
24914//	 [7, 7, 7, 7]],
24915//	[[7, 7, 7, 7],
24916//	 [7, 7, 7, 7],
24917//	 [7, 7, 7, 7]]])
24918//
24919// diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
24920//
24921//	[4, 5, 6]])
24922//
24923// tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
24924//
24925//	 [7, 2, 7, 7],
24926//	 [7, 7, 3, 7]],
24927//	[[4, 7, 7, 7],
24928//	 [7, 5, 7, 7],
24929//	 [7, 7, 6, 7]]]
24930//
24931// # A superdiagonal (per batch).
24932// tf.matrix_set_diag(diagonal, k = 1)
24933//
24934//	==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
24935//	      [7, 7, 2, 7],
24936//	      [7, 7, 7, 3]],
24937//	     [[7, 4, 7, 7],
24938//	      [7, 7, 5, 7],
24939//	      [7, 7, 7, 6]]]
24940//
24941// # A band of diagonals.
24942// diagonals = np.array([[[1, 2, 3],  # Diagonal shape: (2, 2, 3)
24943//
24944//	 [4, 5, 0]],
24945//	[[6, 1, 2],
24946//	 [3, 4, 0]]])
24947//
24948// tf.matrix_set_diag(diagonals, k = (-1, 0))
24949//
24950//	==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
24951//	      [4, 2, 7, 7],
24952//	      [0, 5, 3, 7]],
24953//	     [[6, 7, 7, 7],
24954//	      [3, 1, 7, 7],
24955//	      [7, 4, 2, 7]]]
24956//
24957// ```
24958//
24959// Arguments:
24960//
24961//	input: Rank `r+1`, where `r >= 1`.
24962//	diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
24963//
24964// `k >= 1`.
24965//
24966//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
24967//
24968// diagonal, and negative value means subdiagonals. `k` can be a single integer
24969// (for a single diagonal) or a pair of integers specifying the low and high ends
24970// of a matrix band. `k[0]` must not be larger than `k[1]`.
24971//
24972// Returns Rank `r+1`, with `output.shape = input.shape`.
24973func MatrixSetDiagV2(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output) (output tf.Output) {
24974	if scope.Err() != nil {
24975		return
24976	}
24977	opspec := tf.OpSpec{
24978		Type: "MatrixSetDiagV2",
24979		Input: []tf.Input{
24980			input, diagonal, k,
24981		},
24982	}
24983	op := scope.AddOperation(opspec)
24984	return op.Output(0)
24985}
24986
24987// MatrixSetDiagV3Attr is an optional argument to MatrixSetDiagV3.
24988type MatrixSetDiagV3Attr func(optionalAttr)
24989
24990// MatrixSetDiagV3Align sets the optional align attribute to value.
24991//
24992// value: Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is
24993// a string specifying how superdiagonals and subdiagonals should be aligned,
24994// respectively. There are four possible alignments: "RIGHT_LEFT" (default),
24995// "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals
24996// to the right (left-pads the row) and subdiagonals to the left (right-pads the
24997// row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is
24998// the opposite alignment.
24999// If not specified, defaults to "RIGHT_LEFT"
25000func MatrixSetDiagV3Align(value string) MatrixSetDiagV3Attr {
25001	return func(m optionalAttr) {
25002		m["align"] = value
25003	}
25004}
25005
25006// Returns a batched matrix tensor with new batched diagonal values.
25007//
25008// Given `input` and `diagonal`, this operation returns a tensor with the
25009// same shape and values as `input`, except for the specified diagonals of the
25010// innermost matrices. These will be overwritten by the values in `diagonal`.
25011//
25012// `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
25013// `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
25014// Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
25015// `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
25016// `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
25017// `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
25018//
25019// The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
25020// If `k` is scalar or `k[0] == k[1]`:
25021//
25022// ```
25023// output[i, j, ..., l, m, n]
25024//
25025//	= diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
25026//	  input[i, j, ..., l, m, n]              ; otherwise
25027//
25028// ```
25029//
25030// Otherwise,
25031//
25032// ```
25033// output[i, j, ..., l, m, n]
25034//
25035//	= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
25036//	  input[i, j, ..., l, m, n]                         ; otherwise
25037//
25038// ```
25039// where `d = n - m`, `diag_index = k[1] - d`, and
25040// `index_in_diag = n - max(d, 0) + offset`.
25041//
25042// `offset` is zero except when the alignment of the diagonal is to the right.
25043// ```
25044// offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
25045//
25046//	                                  and `d >= 0`) or
25047//	                                (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
25048//	                                  and `d <= 0`)
25049//	0                          ; otherwise
25050//
25051// ```
25052// where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
25053//
25054// For example:
25055//
25056// ```
25057// # The main diagonal.
25058// input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
25059//
25060//	 [7, 7, 7, 7],
25061//	 [7, 7, 7, 7]],
25062//	[[7, 7, 7, 7],
25063//	 [7, 7, 7, 7],
25064//	 [7, 7, 7, 7]]])
25065//
25066// diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
25067//
25068//	[4, 5, 6]])
25069//
25070// tf.matrix_set_diag(input, diagonal)
25071//
25072//	==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
25073//	      [7, 2, 7, 7],
25074//	      [7, 7, 3, 7]],
25075//	     [[4, 7, 7, 7],
25076//	      [7, 5, 7, 7],
25077//	      [7, 7, 6, 7]]]
25078//
25079// # A superdiagonal (per batch).
25080// tf.matrix_set_diag(input, diagonal, k = 1)
25081//
25082//	==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
25083//	      [7, 7, 2, 7],
25084//	      [7, 7, 7, 3]],
25085//	     [[7, 4, 7, 7],
25086//	      [7, 7, 5, 7],
25087//	      [7, 7, 7, 6]]]
25088//
25089// # A band of diagonals.
25090// diagonals = np.array([[[0, 9, 1],  # Diagonal shape: (2, 4, 3)
25091//
25092//	 [6, 5, 8],
25093//	 [1, 2, 3],
25094//	 [4, 5, 0]],
25095//	[[0, 1, 2],
25096//	 [5, 6, 4],
25097//	 [6, 1, 2],
25098//	 [3, 4, 0]]])
25099//
25100// tf.matrix_set_diag(input, diagonals, k = (-1, 2))
25101//
25102//	==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
25103//	      [4, 2, 5, 1],
25104//	      [7, 5, 3, 8]],
25105//	     [[6, 5, 1, 7],
25106//	      [3, 1, 6, 2],
25107//	      [7, 4, 2, 4]]]
25108//
25109// # LEFT_RIGHT alignment.
25110// diagonals = np.array([[[9, 1, 0],  # Diagonal shape: (2, 4, 3)
25111//
25112//	 [6, 5, 8],
25113//	 [1, 2, 3],
25114//	 [0, 4, 5]],
25115//	[[1, 2, 0],
25116//	 [5, 6, 4],
25117//	 [6, 1, 2],
25118//	 [0, 3, 4]]])
25119//
25120// tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
25121//
25122//	==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
25123//	      [4, 2, 5, 1],
25124//	      [7, 5, 3, 8]],
25125//	     [[6, 5, 1, 7],
25126//	      [3, 1, 6, 2],
25127//	      [7, 4, 2, 4]]]
25128//
25129// ```
25130//
25131// Arguments:
25132//
25133//	input: Rank `r+1`, where `r >= 1`.
25134//	diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
25135//
25136// `k >= 1`.
25137//
25138//	k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
25139//
25140// diagonal, and negative value means subdiagonals. `k` can be a single integer
25141// (for a single diagonal) or a pair of integers specifying the low and high ends
25142// of a matrix band. `k[0]` must not be larger than `k[1]`.
25143//
25144// Returns Rank `r+1`, with `output.shape = input.shape`.
25145func MatrixSetDiagV3(scope *Scope, input tf.Output, diagonal tf.Output, k tf.Output, optional ...MatrixSetDiagV3Attr) (output tf.Output) {
25146	if scope.Err() != nil {
25147		return
25148	}
25149	attrs := map[string]interface{}{}
25150	for _, a := range optional {
25151		a(attrs)
25152	}
25153	opspec := tf.OpSpec{
25154		Type: "MatrixSetDiagV3",
25155		Input: []tf.Input{
25156			input, diagonal, k,
25157		},
25158		Attrs: attrs,
25159	}
25160	op := scope.AddOperation(opspec)
25161	return op.Output(0)
25162}
25163
25164// MatrixSolveAttr is an optional argument to MatrixSolve.
25165type MatrixSolveAttr func(optionalAttr)
25166
25167// MatrixSolveAdjoint sets the optional adjoint attribute to value.
25168//
25169// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
25170// adjoint.
25171// If not specified, defaults to false
25172func MatrixSolveAdjoint(value bool) MatrixSolveAttr {
25173	return func(m optionalAttr) {
25174		m["adjoint"] = value
25175	}
25176}
25177
25178// Solves systems of linear equations.
25179//
25180// `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
25181// form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
25182// a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
25183// satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
25184// If `adjoint` is `True` then each output matrix satisfies
25185// `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
25186//
25187// Arguments:
25188//
25189//	matrix: Shape is `[..., M, M]`.
25190//	rhs: Shape is `[..., M, K]`.
25191//
25192// Returns Shape is `[..., M, K]`.
25193func MatrixSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixSolveAttr) (output tf.Output) {
25194	if scope.Err() != nil {
25195		return
25196	}
25197	attrs := map[string]interface{}{}
25198	for _, a := range optional {
25199		a(attrs)
25200	}
25201	opspec := tf.OpSpec{
25202		Type: "MatrixSolve",
25203		Input: []tf.Input{
25204			matrix, rhs,
25205		},
25206		Attrs: attrs,
25207	}
25208	op := scope.AddOperation(opspec)
25209	return op.Output(0)
25210}
25211
25212// MatrixSolveLsAttr is an optional argument to MatrixSolveLs.
25213type MatrixSolveLsAttr func(optionalAttr)
25214
25215// MatrixSolveLsFast sets the optional fast attribute to value.
25216// If not specified, defaults to true
25217func MatrixSolveLsFast(value bool) MatrixSolveLsAttr {
25218	return func(m optionalAttr) {
25219		m["fast"] = value
25220	}
25221}
25222
25223// Solves one or more linear least-squares problems.
25224//
25225// `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
25226// form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
25227// type as `matrix` and shape `[..., M, K]`.
25228// The output is a tensor shape `[..., N, K]` where each output matrix solves
25229// each of the equations
25230// `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
25231// in the least squares sense.
25232//
25233// We use the following notation for (complex) matrix and right-hand sides
25234// in the batch:
25235//
25236// `matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
25237// `rhs`=\\(B  \in \mathbb{C}^{m \times k}\\),
25238// `output`=\\(X  \in \mathbb{C}^{n \times k}\\),
25239// `l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
25240//
25241// If `fast` is `True`, then the solution is computed by solving the normal
25242// equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
25243// \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
25244// problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\).
25245// If \\(m \lt n\\) then `output` is computed as
25246// \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
25247// minimum-norm solution to the under-determined linear system, i.e.
25248// \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
25249// subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
25250// when \\(A\\) is numerically full rank and has a condition number
25251// \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is
25252// sufficiently large.
25253//
25254// If `fast` is `False` an algorithm based on the numerically robust complete
25255// orthogonal decomposition is used. This computes the minimum-norm
25256// least-squares solution, even when \\(A\\) is rank deficient. This path is
25257// typically 6-7 times slower than the fast path. If `fast` is `False` then
25258// `l2_regularizer` is ignored.
25259//
25260// Arguments:
25261//
25262//	matrix: Shape is `[..., M, N]`.
25263//	rhs: Shape is `[..., M, K]`.
25264//	l2_regularizer: Scalar tensor.
25265//
25266// @compatibility(numpy)
25267// Equivalent to np.linalg.lstsq
25268// @end_compatibility
25269//
25270// Returns Shape is `[..., N, K]`.
25271func MatrixSolveLs(scope *Scope, matrix tf.Output, rhs tf.Output, l2_regularizer tf.Output, optional ...MatrixSolveLsAttr) (output tf.Output) {
25272	if scope.Err() != nil {
25273		return
25274	}
25275	attrs := map[string]interface{}{}
25276	for _, a := range optional {
25277		a(attrs)
25278	}
25279	opspec := tf.OpSpec{
25280		Type: "MatrixSolveLs",
25281		Input: []tf.Input{
25282			matrix, rhs, l2_regularizer,
25283		},
25284		Attrs: attrs,
25285	}
25286	op := scope.AddOperation(opspec)
25287	return op.Output(0)
25288}
25289
25290// Computes the matrix square root of one or more square matrices:
25291//
25292// matmul(sqrtm(A), sqrtm(A)) = A
25293//
25294// The input matrix should be invertible. If the input matrix is real, it should
25295// have no eigenvalues which are real and negative (pairs of complex conjugate
25296// eigenvalues are allowed).
25297//
25298// The matrix square root is computed by first reducing the matrix to
25299// quasi-triangular form with the real Schur decomposition. The square root
25300// of the quasi-triangular matrix is then computed directly. Details of
25301// the algorithm can be found in: Nicholas J. Higham, "Computing real
25302// square roots of a real matrix", Linear Algebra Appl., 1987.
25303//
25304// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
25305// form square matrices. The output is a tensor of the same shape as the input
25306// containing the matrix square root for all input submatrices `[..., :, :]`.
25307//
25308// Arguments:
25309//
25310//	input: Shape is `[..., M, M]`.
25311//
25312// Returns Shape is `[..., M, M]`.
25313//
25314// @compatibility(scipy)
25315// Equivalent to scipy.linalg.sqrtm
25316// @end_compatibility
25317func MatrixSquareRoot(scope *Scope, input tf.Output) (output tf.Output) {
25318	if scope.Err() != nil {
25319		return
25320	}
25321	opspec := tf.OpSpec{
25322		Type: "MatrixSquareRoot",
25323		Input: []tf.Input{
25324			input,
25325		},
25326	}
25327	op := scope.AddOperation(opspec)
25328	return op.Output(0)
25329}
25330
25331// MatrixTriangularSolveAttr is an optional argument to MatrixTriangularSolve.
25332type MatrixTriangularSolveAttr func(optionalAttr)
25333
25334// MatrixTriangularSolveLower sets the optional lower attribute to value.
25335//
25336// value: Boolean indicating whether the innermost matrices in `matrix` are
25337// lower or upper triangular.
25338// If not specified, defaults to true
25339func MatrixTriangularSolveLower(value bool) MatrixTriangularSolveAttr {
25340	return func(m optionalAttr) {
25341		m["lower"] = value
25342	}
25343}
25344
25345// MatrixTriangularSolveAdjoint sets the optional adjoint attribute to value.
25346//
25347// value: Boolean indicating whether to solve with `matrix` or its (block-wise)
25348//
25349//	adjoint.
25350//
25351// @compatibility(numpy)
25352// Equivalent to scipy.linalg.solve_triangular
25353// @end_compatibility
25354// If not specified, defaults to false
25355func MatrixTriangularSolveAdjoint(value bool) MatrixTriangularSolveAttr {
25356	return func(m optionalAttr) {
25357		m["adjoint"] = value
25358	}
25359}
25360
25361// Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.
25362//
25363// `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
25364// square matrices. If `lower` is `True` then the strictly upper triangular part
25365// of each inner-most matrix is assumed to be zero and not accessed.
25366// If `lower` is False then the strictly lower triangular part of each inner-most
25367// matrix is assumed to be zero and not accessed.
25368// `rhs` is a tensor of shape `[..., M, N]`.
25369//
25370// The output is a tensor of shape `[..., M, N]`. If `adjoint` is
25371// `True` then the innermost matrices in `output` satisfy matrix equations
25372// `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
25373// If `adjoint` is `False` then the strictly then the  innermost matrices in
25374// `output` satisfy matrix equations
25375// `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
25376//
25377// Note, the batch shapes for the inputs only need to broadcast.
25378//
25379// Example:
25380// ```python
25381//
25382// a = tf.constant([[3,  0,  0,  0],
25383//
25384//	[2,  1,  0,  0],
25385//	[1,  0,  1,  0],
25386//	[1,  1,  1,  1]], dtype=tf.float32)
25387//
25388// b = tf.constant([[4],
25389//
25390//	[2],
25391//	[4],
25392//	[2]], dtype=tf.float32)
25393//
25394// x = tf.linalg.triangular_solve(a, b, lower=True)
25395// x
25396// # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
25397// # array([[ 1.3333334 ],
25398// #        [-0.66666675],
25399// #        [ 2.6666665 ],
25400// #        [-1.3333331 ]], dtype=float32)>
25401//
25402// # in python3 one can use `a@x`
25403// tf.matmul(a, x)
25404// # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
25405// # array([[4.       ],
25406// #        [2.       ],
25407// #        [4.       ],
25408// #        [1.9999999]], dtype=float32)>
25409// ```
25410//
25411// Arguments:
25412//
25413//	matrix: Shape is `[..., M, M]`.
25414//	rhs: Shape is `[..., M, K]`.
25415//
25416// Returns Shape is `[..., M, K]`.
25417func MatrixTriangularSolve(scope *Scope, matrix tf.Output, rhs tf.Output, optional ...MatrixTriangularSolveAttr) (output tf.Output) {
25418	if scope.Err() != nil {
25419		return
25420	}
25421	attrs := map[string]interface{}{}
25422	for _, a := range optional {
25423		a(attrs)
25424	}
25425	opspec := tf.OpSpec{
25426		Type: "MatrixTriangularSolve",
25427		Input: []tf.Input{
25428			matrix, rhs,
25429		},
25430		Attrs: attrs,
25431	}
25432	op := scope.AddOperation(opspec)
25433	return op.Output(0)
25434}
25435
25436// MaxAttr is an optional argument to Max.
25437type MaxAttr func(optionalAttr)
25438
25439// MaxKeepDims sets the optional keep_dims attribute to value.
25440//
25441// value: If true, retain reduced dimensions with length 1.
25442// If not specified, defaults to false
25443func MaxKeepDims(value bool) MaxAttr {
25444	return func(m optionalAttr) {
25445		m["keep_dims"] = value
25446	}
25447}
25448
25449// Computes the maximum of elements across dimensions of a tensor.
25450//
25451// Reduces `input` along the dimensions given in `axis`. Unless
25452// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
25453// `axis`. If `keep_dims` is true, the reduced dimensions are
25454// retained with length 1.
25455//
25456// Arguments:
25457//
25458//	input: The tensor to reduce.
25459//	axis: The dimensions to reduce. Must be in the range
25460//
25461// `[-rank(input), rank(input))`.
25462//
25463// Returns The reduced tensor.
25464func Max(scope *Scope, input tf.Output, axis tf.Output, optional ...MaxAttr) (output tf.Output) {
25465	if scope.Err() != nil {
25466		return
25467	}
25468	attrs := map[string]interface{}{}
25469	for _, a := range optional {
25470		a(attrs)
25471	}
25472	opspec := tf.OpSpec{
25473		Type: "Max",
25474		Input: []tf.Input{
25475			input, axis,
25476		},
25477		Attrs: attrs,
25478	}
25479	op := scope.AddOperation(opspec)
25480	return op.Output(0)
25481}
25482
25483// Creates a dataset that overrides the maximum intra-op parallelism.
25484//
25485// Arguments:
25486//
25487//	max_intra_op_parallelism: Identifies the maximum intra-op parallelism to use.
25488func MaxIntraOpParallelismDataset(scope *Scope, input_dataset tf.Output, max_intra_op_parallelism tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
25489	if scope.Err() != nil {
25490		return
25491	}
25492	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
25493	opspec := tf.OpSpec{
25494		Type: "MaxIntraOpParallelismDataset",
25495		Input: []tf.Input{
25496			input_dataset, max_intra_op_parallelism,
25497		},
25498		Attrs: attrs,
25499	}
25500	op := scope.AddOperation(opspec)
25501	return op.Output(0)
25502}
25503
25504// MaxPoolAttr is an optional argument to MaxPool.
25505type MaxPoolAttr func(optionalAttr)
25506
25507// MaxPoolExplicitPaddings sets the optional explicit_paddings attribute to value.
25508// If not specified, defaults to {}
25509func MaxPoolExplicitPaddings(value []int64) MaxPoolAttr {
25510	return func(m optionalAttr) {
25511		m["explicit_paddings"] = value
25512	}
25513}
25514
25515// MaxPoolDataFormat sets the optional data_format attribute to value.
25516//
25517// value: Specify the data format of the input and output data. With the
25518// default format "NHWC", the data is stored in the order of:
25519//
25520//	[batch, in_height, in_width, in_channels].
25521//
25522// Alternatively, the format could be "NCHW", the data storage order of:
25523//
25524//	[batch, in_channels, in_height, in_width].
25525//
25526// If not specified, defaults to "NHWC"
25527func MaxPoolDataFormat(value string) MaxPoolAttr {
25528	return func(m optionalAttr) {
25529		m["data_format"] = value
25530	}
25531}
25532
25533// Performs max pooling on the input.
25534//
25535// Arguments:
25536//
25537//	input: 4-D input to pool over.
25538//	ksize: The size of the window for each dimension of the input tensor.
25539//	strides: The stride of the sliding window for each dimension of the
25540//
25541// input tensor.
25542//
25543//	padding: The type of padding algorithm to use.
25544//
25545// Returns The max pooled output tensor.
25546func MaxPool(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolAttr) (output tf.Output) {
25547	if scope.Err() != nil {
25548		return
25549	}
25550	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25551	for _, a := range optional {
25552		a(attrs)
25553	}
25554	opspec := tf.OpSpec{
25555		Type: "MaxPool",
25556		Input: []tf.Input{
25557			input,
25558		},
25559		Attrs: attrs,
25560	}
25561	op := scope.AddOperation(opspec)
25562	return op.Output(0)
25563}
25564
25565// MaxPool3DAttr is an optional argument to MaxPool3D.
25566type MaxPool3DAttr func(optionalAttr)
25567
25568// MaxPool3DDataFormat sets the optional data_format attribute to value.
25569//
25570// value: The data format of the input and output data. With the
25571// default format "NDHWC", the data is stored in the order of:
25572//
25573//	[batch, in_depth, in_height, in_width, in_channels].
25574//
25575// Alternatively, the format could be "NCDHW", the data storage order is:
25576//
25577//	[batch, in_channels, in_depth, in_height, in_width].
25578//
25579// If not specified, defaults to "NDHWC"
25580func MaxPool3DDataFormat(value string) MaxPool3DAttr {
25581	return func(m optionalAttr) {
25582		m["data_format"] = value
25583	}
25584}
25585
25586// Performs 3D max pooling on the input.
25587//
25588// Arguments:
25589//
25590//	input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over.
25591//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
25592//
25593// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
25594//
25595//	strides: 1-D tensor of length 5. The stride of the sliding window for each
25596//
25597// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
25598//
25599//	padding: The type of padding algorithm to use.
25600//
25601// Returns The max pooled output tensor.
25602func MaxPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DAttr) (output tf.Output) {
25603	if scope.Err() != nil {
25604		return
25605	}
25606	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25607	for _, a := range optional {
25608		a(attrs)
25609	}
25610	opspec := tf.OpSpec{
25611		Type: "MaxPool3D",
25612		Input: []tf.Input{
25613			input,
25614		},
25615		Attrs: attrs,
25616	}
25617	op := scope.AddOperation(opspec)
25618	return op.Output(0)
25619}
25620
25621// MaxPool3DGradAttr is an optional argument to MaxPool3DGrad.
25622type MaxPool3DGradAttr func(optionalAttr)
25623
25624// MaxPool3DGradDataFormat sets the optional data_format attribute to value.
25625//
25626// value: The data format of the input and output data. With the
25627// default format "NDHWC", the data is stored in the order of:
25628//
25629//	[batch, in_depth, in_height, in_width, in_channels].
25630//
25631// Alternatively, the format could be "NCDHW", the data storage order is:
25632//
25633//	[batch, in_channels, in_depth, in_height, in_width].
25634//
25635// If not specified, defaults to "NDHWC"
25636func MaxPool3DGradDataFormat(value string) MaxPool3DGradAttr {
25637	return func(m optionalAttr) {
25638		m["data_format"] = value
25639	}
25640}
25641
25642// Computes gradients of 3D max pooling function.
25643//
25644// Arguments:
25645//
25646//	orig_input: The original input tensor.
25647//	orig_output: The original output tensor.
25648//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
25649//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
25650//
25651// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
25652//
25653//	strides: 1-D tensor of length 5. The stride of the sliding window for each
25654//
25655// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
25656//
25657//	padding: The type of padding algorithm to use.
25658func MaxPool3DGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradAttr) (output tf.Output) {
25659	if scope.Err() != nil {
25660		return
25661	}
25662	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25663	for _, a := range optional {
25664		a(attrs)
25665	}
25666	opspec := tf.OpSpec{
25667		Type: "MaxPool3DGrad",
25668		Input: []tf.Input{
25669			orig_input, orig_output, grad,
25670		},
25671		Attrs: attrs,
25672	}
25673	op := scope.AddOperation(opspec)
25674	return op.Output(0)
25675}
25676
25677// MaxPool3DGradGradAttr is an optional argument to MaxPool3DGradGrad.
25678type MaxPool3DGradGradAttr func(optionalAttr)
25679
25680// MaxPool3DGradGradDataFormat sets the optional data_format attribute to value.
25681//
25682// value: The data format of the input and output data. With the
25683// default format "NDHWC", the data is stored in the order of:
25684//
25685//	[batch, in_depth, in_height, in_width, in_channels].
25686//
25687// Alternatively, the format could be "NCDHW", the data storage order is:
25688//
25689//	[batch, in_channels, in_depth, in_height, in_width].
25690//
25691// If not specified, defaults to "NDHWC"
25692func MaxPool3DGradGradDataFormat(value string) MaxPool3DGradGradAttr {
25693	return func(m optionalAttr) {
25694		m["data_format"] = value
25695	}
25696}
25697
25698// Computes second-order gradients of the maxpooling function.
25699//
25700// Arguments:
25701//
25702//	orig_input: The original input tensor.
25703//	orig_output: The original output tensor.
25704//	grad: Output backprop of shape `[batch, depth, rows, cols, channels]`.
25705//	ksize: 1-D tensor of length 5. The size of the window for each dimension of
25706//
25707// the input tensor. Must have `ksize[0] = ksize[4] = 1`.
25708//
25709//	strides: 1-D tensor of length 5. The stride of the sliding window for each
25710//
25711// dimension of `input`. Must have `strides[0] = strides[4] = 1`.
25712//
25713//	padding: The type of padding algorithm to use.
25714//
25715// Returns Gradients of gradients w.r.t. the input to `max_pool`.
25716func MaxPool3DGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPool3DGradGradAttr) (output tf.Output) {
25717	if scope.Err() != nil {
25718		return
25719	}
25720	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25721	for _, a := range optional {
25722		a(attrs)
25723	}
25724	opspec := tf.OpSpec{
25725		Type: "MaxPool3DGradGrad",
25726		Input: []tf.Input{
25727			orig_input, orig_output, grad,
25728		},
25729		Attrs: attrs,
25730	}
25731	op := scope.AddOperation(opspec)
25732	return op.Output(0)
25733}
25734
25735// MaxPoolGradAttr is an optional argument to MaxPoolGrad.
25736type MaxPoolGradAttr func(optionalAttr)
25737
25738// MaxPoolGradExplicitPaddings sets the optional explicit_paddings attribute to value.
25739// If not specified, defaults to {}
25740func MaxPoolGradExplicitPaddings(value []int64) MaxPoolGradAttr {
25741	return func(m optionalAttr) {
25742		m["explicit_paddings"] = value
25743	}
25744}
25745
25746// MaxPoolGradDataFormat sets the optional data_format attribute to value.
25747//
25748// value: Specify the data format of the input and output data. With the
25749// default format "NHWC", the data is stored in the order of:
25750//
25751//	[batch, in_height, in_width, in_channels].
25752//
25753// Alternatively, the format could be "NCHW", the data storage order of:
25754//
25755//	[batch, in_channels, in_height, in_width].
25756//
25757// If not specified, defaults to "NHWC"
25758func MaxPoolGradDataFormat(value string) MaxPoolGradAttr {
25759	return func(m optionalAttr) {
25760		m["data_format"] = value
25761	}
25762}
25763
25764// Computes gradients of the maxpooling function.
25765//
25766// Arguments:
25767//
25768//	orig_input: The original input tensor.
25769//	orig_output: The original output tensor.
25770//	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
25771//	ksize: The size of the window for each dimension of the input tensor.
25772//	strides: The stride of the sliding window for each dimension of the
25773//
25774// input tensor.
25775//
25776//	padding: The type of padding algorithm to use.
25777//
25778// Returns Gradients w.r.t. the input to `max_pool`.
25779func MaxPoolGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradAttr) (output tf.Output) {
25780	if scope.Err() != nil {
25781		return
25782	}
25783	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25784	for _, a := range optional {
25785		a(attrs)
25786	}
25787	opspec := tf.OpSpec{
25788		Type: "MaxPoolGrad",
25789		Input: []tf.Input{
25790			orig_input, orig_output, grad,
25791		},
25792		Attrs: attrs,
25793	}
25794	op := scope.AddOperation(opspec)
25795	return op.Output(0)
25796}
25797
25798// MaxPoolGradGradAttr is an optional argument to MaxPoolGradGrad.
25799type MaxPoolGradGradAttr func(optionalAttr)
25800
25801// MaxPoolGradGradDataFormat sets the optional data_format attribute to value.
25802//
25803// value: Specify the data format of the input and output data. With the
25804// default format "NHWC", the data is stored in the order of:
25805//
25806//	[batch, in_height, in_width, in_channels].
25807//
25808// Alternatively, the format could be "NCHW", the data storage order of:
25809//
25810//	[batch, in_channels, in_height, in_width].
25811//
25812// If not specified, defaults to "NHWC"
25813func MaxPoolGradGradDataFormat(value string) MaxPoolGradGradAttr {
25814	return func(m optionalAttr) {
25815		m["data_format"] = value
25816	}
25817}
25818
25819// Computes second-order gradients of the maxpooling function.
25820//
25821// Arguments:
25822//
25823//	orig_input: The original input tensor.
25824//	orig_output: The original output tensor.
25825//	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
25826//	ksize: The size of the window for each dimension of the input tensor.
25827//	strides: The stride of the sliding window for each dimension of the
25828//
25829// input tensor.
25830//
25831//	padding: The type of padding algorithm to use.
25832//
25833// Returns Gradients of gradients w.r.t. the input to `max_pool`.
25834func MaxPoolGradGrad(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradAttr) (output tf.Output) {
25835	if scope.Err() != nil {
25836		return
25837	}
25838	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25839	for _, a := range optional {
25840		a(attrs)
25841	}
25842	opspec := tf.OpSpec{
25843		Type: "MaxPoolGradGrad",
25844		Input: []tf.Input{
25845			orig_input, orig_output, grad,
25846		},
25847		Attrs: attrs,
25848	}
25849	op := scope.AddOperation(opspec)
25850	return op.Output(0)
25851}
25852
25853// MaxPoolGradGradV2Attr is an optional argument to MaxPoolGradGradV2.
25854type MaxPoolGradGradV2Attr func(optionalAttr)
25855
25856// MaxPoolGradGradV2DataFormat sets the optional data_format attribute to value.
25857//
25858// value: Specify the data format of the input and output data. With the
25859// default format "NHWC", the data is stored in the order of:
25860//
25861//	[batch, in_height, in_width, in_channels].
25862//
25863// Alternatively, the format could be "NCHW", the data storage order of:
25864//
25865//	[batch, in_channels, in_height, in_width].
25866//
25867// If not specified, defaults to "NHWC"
25868func MaxPoolGradGradV2DataFormat(value string) MaxPoolGradGradV2Attr {
25869	return func(m optionalAttr) {
25870		m["data_format"] = value
25871	}
25872}
25873
25874// Computes second-order gradients of the maxpooling function.
25875//
25876// Arguments:
25877//
25878//	orig_input: The original input tensor.
25879//	orig_output: The original output tensor.
25880//	grad: 4-D.  Gradients of gradients w.r.t. the input of `max_pool`.
25881//	ksize: The size of the window for each dimension of the input tensor.
25882//	strides: The stride of the sliding window for each dimension of the
25883//
25884// input tensor.
25885//
25886//	padding: The type of padding algorithm to use.
25887//
25888// Returns Gradients of gradients w.r.t. the input to `max_pool`.
25889func MaxPoolGradGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradGradV2Attr) (output tf.Output) {
25890	if scope.Err() != nil {
25891		return
25892	}
25893	attrs := map[string]interface{}{"padding": padding}
25894	for _, a := range optional {
25895		a(attrs)
25896	}
25897	opspec := tf.OpSpec{
25898		Type: "MaxPoolGradGradV2",
25899		Input: []tf.Input{
25900			orig_input, orig_output, grad, ksize, strides,
25901		},
25902		Attrs: attrs,
25903	}
25904	op := scope.AddOperation(opspec)
25905	return op.Output(0)
25906}
25907
25908// MaxPoolGradGradWithArgmaxAttr is an optional argument to MaxPoolGradGradWithArgmax.
25909type MaxPoolGradGradWithArgmaxAttr func(optionalAttr)
25910
25911// MaxPoolGradGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
25912//
25913// value: Whether to include batch dimension in flattened index of `argmax`.
25914// If not specified, defaults to false
25915func MaxPoolGradGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradGradWithArgmaxAttr {
25916	return func(m optionalAttr) {
25917		m["include_batch_in_index"] = value
25918	}
25919}
25920
25921// Computes second-order gradients of the maxpooling function.
25922//
25923// Arguments:
25924//
25925//	input: The original input.
25926//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
25927//
25928// input of `max_pool`.
25929//
25930//	argmax: The indices of the maximum values chosen for each output of `max_pool`.
25931//	ksize: The size of the window for each dimension of the input tensor.
25932//	strides: The stride of the sliding window for each dimension of the
25933//
25934// input tensor.
25935//
25936//	padding: The type of padding algorithm to use.
25937//
25938// Returns Gradients of gradients w.r.t. the input of `max_pool`.
25939func MaxPoolGradGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradGradWithArgmaxAttr) (output tf.Output) {
25940	if scope.Err() != nil {
25941		return
25942	}
25943	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
25944	for _, a := range optional {
25945		a(attrs)
25946	}
25947	opspec := tf.OpSpec{
25948		Type: "MaxPoolGradGradWithArgmax",
25949		Input: []tf.Input{
25950			input, grad, argmax,
25951		},
25952		Attrs: attrs,
25953	}
25954	op := scope.AddOperation(opspec)
25955	return op.Output(0)
25956}
25957
25958// MaxPoolGradV2Attr is an optional argument to MaxPoolGradV2.
25959type MaxPoolGradV2Attr func(optionalAttr)
25960
25961// MaxPoolGradV2DataFormat sets the optional data_format attribute to value.
25962//
25963// value: Specify the data format of the input and output data. With the
25964// default format "NHWC", the data is stored in the order of:
25965//
25966//	[batch, in_height, in_width, in_channels].
25967//
25968// Alternatively, the format could be "NCHW", the data storage order of:
25969//
25970//	[batch, in_channels, in_height, in_width].
25971//
25972// If not specified, defaults to "NHWC"
25973func MaxPoolGradV2DataFormat(value string) MaxPoolGradV2Attr {
25974	return func(m optionalAttr) {
25975		m["data_format"] = value
25976	}
25977}
25978
25979// Computes gradients of the maxpooling function.
25980//
25981// Arguments:
25982//
25983//	orig_input: The original input tensor.
25984//	orig_output: The original output tensor.
25985//	grad: 4-D.  Gradients w.r.t. the output of `max_pool`.
25986//	ksize: The size of the window for each dimension of the input tensor.
25987//	strides: The stride of the sliding window for each dimension of the
25988//
25989// input tensor.
25990//
25991//	padding: The type of padding algorithm to use.
25992//
25993// Returns Gradients w.r.t. the input to `max_pool`.
25994func MaxPoolGradV2(scope *Scope, orig_input tf.Output, orig_output tf.Output, grad tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolGradV2Attr) (output tf.Output) {
25995	if scope.Err() != nil {
25996		return
25997	}
25998	attrs := map[string]interface{}{"padding": padding}
25999	for _, a := range optional {
26000		a(attrs)
26001	}
26002	opspec := tf.OpSpec{
26003		Type: "MaxPoolGradV2",
26004		Input: []tf.Input{
26005			orig_input, orig_output, grad, ksize, strides,
26006		},
26007		Attrs: attrs,
26008	}
26009	op := scope.AddOperation(opspec)
26010	return op.Output(0)
26011}
26012
26013// MaxPoolGradWithArgmaxAttr is an optional argument to MaxPoolGradWithArgmax.
26014type MaxPoolGradWithArgmaxAttr func(optionalAttr)
26015
26016// MaxPoolGradWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
26017//
26018// value: Whether to include batch dimension in flattened index of `argmax`.
26019// If not specified, defaults to false
26020func MaxPoolGradWithArgmaxIncludeBatchInIndex(value bool) MaxPoolGradWithArgmaxAttr {
26021	return func(m optionalAttr) {
26022		m["include_batch_in_index"] = value
26023	}
26024}
26025
26026// Computes gradients of the maxpooling function.
26027//
26028// Arguments:
26029//
26030//	input: The original input.
26031//	grad: 4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t. the
26032//
26033// output of `max_pool`.
26034//
26035//	argmax: The indices of the maximum values chosen for each output of `max_pool`.
26036//	ksize: The size of the window for each dimension of the input tensor.
26037//	strides: The stride of the sliding window for each dimension of the
26038//
26039// input tensor.
26040//
26041//	padding: The type of padding algorithm to use.
26042//
26043// Returns Gradients w.r.t. the input of `max_pool`.
26044func MaxPoolGradWithArgmax(scope *Scope, input tf.Output, grad tf.Output, argmax tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolGradWithArgmaxAttr) (output tf.Output) {
26045	if scope.Err() != nil {
26046		return
26047	}
26048	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
26049	for _, a := range optional {
26050		a(attrs)
26051	}
26052	opspec := tf.OpSpec{
26053		Type: "MaxPoolGradWithArgmax",
26054		Input: []tf.Input{
26055			input, grad, argmax,
26056		},
26057		Attrs: attrs,
26058	}
26059	op := scope.AddOperation(opspec)
26060	return op.Output(0)
26061}
26062
26063// MaxPoolV2Attr is an optional argument to MaxPoolV2.
26064type MaxPoolV2Attr func(optionalAttr)
26065
26066// MaxPoolV2DataFormat sets the optional data_format attribute to value.
26067//
26068// value: Specify the data format of the input and output data. With the
26069// default format "NHWC", the data is stored in the order of:
26070//
26071//	[batch, in_height, in_width, in_channels].
26072//
26073// Alternatively, the format could be "NCHW", the data storage order of:
26074//
26075//	[batch, in_channels, in_height, in_width].
26076//
26077// If not specified, defaults to "NHWC"
26078func MaxPoolV2DataFormat(value string) MaxPoolV2Attr {
26079	return func(m optionalAttr) {
26080		m["data_format"] = value
26081	}
26082}
26083
26084// Performs max pooling on the input.
26085//
26086// Arguments:
26087//
26088//	input: 4-D input to pool over.
26089//	ksize: The size of the window for each dimension of the input tensor.
26090//	strides: The stride of the sliding window for each dimension of the
26091//
26092// input tensor.
26093//
26094//	padding: The type of padding algorithm to use.
26095//
26096// Returns The max pooled output tensor.
26097func MaxPoolV2(scope *Scope, input tf.Output, ksize tf.Output, strides tf.Output, padding string, optional ...MaxPoolV2Attr) (output tf.Output) {
26098	if scope.Err() != nil {
26099		return
26100	}
26101	attrs := map[string]interface{}{"padding": padding}
26102	for _, a := range optional {
26103		a(attrs)
26104	}
26105	opspec := tf.OpSpec{
26106		Type: "MaxPoolV2",
26107		Input: []tf.Input{
26108			input, ksize, strides,
26109		},
26110		Attrs: attrs,
26111	}
26112	op := scope.AddOperation(opspec)
26113	return op.Output(0)
26114}
26115
26116// MaxPoolWithArgmaxAttr is an optional argument to MaxPoolWithArgmax.
26117type MaxPoolWithArgmaxAttr func(optionalAttr)
26118
26119// MaxPoolWithArgmaxTargmax sets the optional Targmax attribute to value.
26120// If not specified, defaults to DT_INT64
26121func MaxPoolWithArgmaxTargmax(value tf.DataType) MaxPoolWithArgmaxAttr {
26122	return func(m optionalAttr) {
26123		m["Targmax"] = value
26124	}
26125}
26126
26127// MaxPoolWithArgmaxIncludeBatchInIndex sets the optional include_batch_in_index attribute to value.
26128//
26129// value: Whether to include batch dimension in flattened index of `argmax`.
26130// If not specified, defaults to false
26131func MaxPoolWithArgmaxIncludeBatchInIndex(value bool) MaxPoolWithArgmaxAttr {
26132	return func(m optionalAttr) {
26133		m["include_batch_in_index"] = value
26134	}
26135}
26136
26137// Performs max pooling on the input and outputs both max values and indices.
26138//
26139// The indices in `argmax` are flattened, so that a maximum value at position
26140// `[b, y, x, c]` becomes flattened index:
26141// `(y * width + x) * channels + c` if `include_batch_in_index` is False;
26142// `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True.
26143//
26144// The indices returned are always in `[0, height) x [0, width)` before flattening,
26145// even if padding is involved and the mathematically correct answer is outside
26146// (either negative or too large).  This is a bug, but fixing it is difficult to do
26147// in a safe backwards compatible way, especially due to flattening.
26148//
26149// Arguments:
26150//
26151//	input: 4-D with shape `[batch, height, width, channels]`.  Input to pool over.
26152//	ksize: The size of the window for each dimension of the input tensor.
26153//	strides: The stride of the sliding window for each dimension of the
26154//
26155// input tensor.
26156//
26157//	padding: The type of padding algorithm to use.
26158//
26159// Returns:
26160//
26161//	output: The max pooled output tensor.
26162//	argmax: 4-D.  The flattened indices of the max values chosen for each output.
26163func MaxPoolWithArgmax(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...MaxPoolWithArgmaxAttr) (output tf.Output, argmax tf.Output) {
26164	if scope.Err() != nil {
26165		return
26166	}
26167	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
26168	for _, a := range optional {
26169		a(attrs)
26170	}
26171	opspec := tf.OpSpec{
26172		Type: "MaxPoolWithArgmax",
26173		Input: []tf.Input{
26174			input,
26175		},
26176		Attrs: attrs,
26177	}
26178	op := scope.AddOperation(opspec)
26179	return op.Output(0), op.Output(1)
26180}
26181
26182// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
26183//
26184// *NOTE*: `Maximum` supports broadcasting. More about broadcasting
26185// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
26186func Maximum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
26187	if scope.Err() != nil {
26188		return
26189	}
26190	opspec := tf.OpSpec{
26191		Type: "Maximum",
26192		Input: []tf.Input{
26193			x, y,
26194		},
26195	}
26196	op := scope.AddOperation(opspec)
26197	return op.Output(0)
26198}
26199
26200// MeanAttr is an optional argument to Mean.
26201type MeanAttr func(optionalAttr)
26202
26203// MeanKeepDims sets the optional keep_dims attribute to value.
26204//
26205// value: If true, retain reduced dimensions with length 1.
26206// If not specified, defaults to false
26207func MeanKeepDims(value bool) MeanAttr {
26208	return func(m optionalAttr) {
26209		m["keep_dims"] = value
26210	}
26211}
26212
26213// Computes the mean of elements across dimensions of a tensor.
26214//
26215// Reduces `input` along the dimensions given in `axis`. Unless
26216// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
26217// `axis`. If `keep_dims` is true, the reduced dimensions are
26218// retained with length 1.
26219//
26220// Arguments:
26221//
26222//	input: The tensor to reduce.
26223//	axis: The dimensions to reduce. Must be in the range
26224//
26225// `[-rank(input), rank(input))`.
26226//
26227// Returns The reduced tensor.
26228func Mean(scope *Scope, input tf.Output, axis tf.Output, optional ...MeanAttr) (output tf.Output) {
26229	if scope.Err() != nil {
26230		return
26231	}
26232	attrs := map[string]interface{}{}
26233	for _, a := range optional {
26234		a(attrs)
26235	}
26236	opspec := tf.OpSpec{
26237		Type: "Mean",
26238		Input: []tf.Input{
26239			input, axis,
26240		},
26241		Attrs: attrs,
26242	}
26243	op := scope.AddOperation(opspec)
26244	return op.Output(0)
26245}
26246
26247// Forwards the value of an available tensor from `inputs` to `output`.
26248//
26249// `Merge` waits for at least one of the tensors in `inputs` to become available.
26250// It is usually combined with `Switch` to implement branching.
26251//
26252// `Merge` forwards the first tensor to become available to `output`, and sets
26253// `value_index` to its index in `inputs`.
26254//
26255// Arguments:
26256//
26257//	inputs: The input tensors, exactly one of which will become available.
26258//
26259// Returns:
26260//
26261//	output: Will be set to the available input tensor.
26262//	value_index: The index of the chosen input tensor in `inputs`.
26263func Merge(scope *Scope, inputs []tf.Output) (output tf.Output, value_index tf.Output) {
26264	if scope.Err() != nil {
26265		return
26266	}
26267	opspec := tf.OpSpec{
26268		Type: "Merge",
26269		Input: []tf.Input{
26270			tf.OutputList(inputs),
26271		},
26272	}
26273	op := scope.AddOperation(opspec)
26274	return op.Output(0), op.Output(1)
26275}
26276
26277// Merges summaries.
26278//
26279// This op creates a
26280// [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
26281// protocol buffer that contains the union of all the values in the input
26282// summaries.
26283//
26284// When the Op is run, it reports an `InvalidArgument` error if multiple values
26285// in the summaries to merge use the same tag.
26286//
26287// Arguments:
26288//
26289//	inputs: Can be of any shape.  Each must contain serialized `Summary` protocol
26290//
26291// buffers.
26292//
26293// Returns Scalar. Serialized `Summary` protocol buffer.
26294func MergeSummary(scope *Scope, inputs []tf.Output) (summary tf.Output) {
26295	if scope.Err() != nil {
26296		return
26297	}
26298	opspec := tf.OpSpec{
26299		Type: "MergeSummary",
26300		Input: []tf.Input{
26301			tf.OutputList(inputs),
26302		},
26303	}
26304	op := scope.AddOperation(opspec)
26305	return op.Output(0)
26306}
26307
26308// MergeV2CheckpointsAttr is an optional argument to MergeV2Checkpoints.
26309type MergeV2CheckpointsAttr func(optionalAttr)
26310
26311// MergeV2CheckpointsDeleteOldDirs sets the optional delete_old_dirs attribute to value.
26312//
26313// value: see above.
26314// If not specified, defaults to true
26315func MergeV2CheckpointsDeleteOldDirs(value bool) MergeV2CheckpointsAttr {
26316	return func(m optionalAttr) {
26317		m["delete_old_dirs"] = value
26318	}
26319}
26320
26321// MergeV2CheckpointsAllowMissingFiles sets the optional allow_missing_files attribute to value.
26322//
26323// value: see above.
26324// If not specified, defaults to false
26325func MergeV2CheckpointsAllowMissingFiles(value bool) MergeV2CheckpointsAttr {
26326	return func(m optionalAttr) {
26327		m["allow_missing_files"] = value
26328	}
26329}
26330
26331// V2 format specific: merges the metadata files of sharded checkpoints.  The
26332//
26333// result is one logical checkpoint, with one physical metadata file and renamed
26334// data files.
26335//
26336// Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
26337//
26338// If delete_old_dirs is true, attempts to delete recursively the dirname of each
26339// path in the input checkpoint_prefixes.  This is useful when those paths are non
26340// user-facing temporary locations.
26341//
26342// If allow_missing_files is true, merges the checkpoint prefixes as long as
26343// at least one file exists. Otherwise, if no files exist, an error will be thrown.
26344// The default value for allow_missing_files is false.
26345//
26346// Arguments:
26347//
26348//	checkpoint_prefixes: prefixes of V2 checkpoints to merge.
26349//	destination_prefix: scalar.  The desired final prefix.  Allowed to be the same
26350//
26351// as one of the checkpoint_prefixes.
26352//
26353// Returns the created operation.
26354func MergeV2Checkpoints(scope *Scope, checkpoint_prefixes tf.Output, destination_prefix tf.Output, optional ...MergeV2CheckpointsAttr) (o *tf.Operation) {
26355	if scope.Err() != nil {
26356		return
26357	}
26358	attrs := map[string]interface{}{}
26359	for _, a := range optional {
26360		a(attrs)
26361	}
26362	opspec := tf.OpSpec{
26363		Type: "MergeV2Checkpoints",
26364		Input: []tf.Input{
26365			checkpoint_prefixes, destination_prefix,
26366		},
26367		Attrs: attrs,
26368	}
26369	return scope.AddOperation(opspec)
26370}
26371
26372// MfccAttr is an optional argument to Mfcc.
26373type MfccAttr func(optionalAttr)
26374
26375// MfccUpperFrequencyLimit sets the optional upper_frequency_limit attribute to value.
26376//
26377// value: The highest frequency to use when calculating the
26378// ceptstrum.
26379// If not specified, defaults to 4000
26380func MfccUpperFrequencyLimit(value float32) MfccAttr {
26381	return func(m optionalAttr) {
26382		m["upper_frequency_limit"] = value
26383	}
26384}
26385
26386// MfccLowerFrequencyLimit sets the optional lower_frequency_limit attribute to value.
26387//
26388// value: The lowest frequency to use when calculating the
26389// ceptstrum.
26390// If not specified, defaults to 20
26391func MfccLowerFrequencyLimit(value float32) MfccAttr {
26392	return func(m optionalAttr) {
26393		m["lower_frequency_limit"] = value
26394	}
26395}
26396
26397// MfccFilterbankChannelCount sets the optional filterbank_channel_count attribute to value.
26398//
26399// value: Resolution of the Mel bank used internally.
26400// If not specified, defaults to 40
26401func MfccFilterbankChannelCount(value int64) MfccAttr {
26402	return func(m optionalAttr) {
26403		m["filterbank_channel_count"] = value
26404	}
26405}
26406
26407// MfccDctCoefficientCount sets the optional dct_coefficient_count attribute to value.
26408//
26409// value: How many output channels to produce per time slice.
26410// If not specified, defaults to 13
26411func MfccDctCoefficientCount(value int64) MfccAttr {
26412	return func(m optionalAttr) {
26413		m["dct_coefficient_count"] = value
26414	}
26415}
26416
26417// Transforms a spectrogram into a form that's useful for speech recognition.
26418//
26419// Mel Frequency Cepstral Coefficients are a way of representing audio data that's
26420// been effective as an input feature for machine learning. They are created by
26421// taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the
26422// higher frequencies that are less significant to the human ear. They have a long
26423// history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
26424// is a good resource to learn more.
26425//
26426// Arguments:
26427//
26428//	spectrogram: Typically produced by the Spectrogram op, with magnitude_squared
26429//
26430// set to true.
26431//
26432//	sample_rate: How many samples per second the source audio used.
26433func Mfcc(scope *Scope, spectrogram tf.Output, sample_rate tf.Output, optional ...MfccAttr) (output tf.Output) {
26434	if scope.Err() != nil {
26435		return
26436	}
26437	attrs := map[string]interface{}{}
26438	for _, a := range optional {
26439		a(attrs)
26440	}
26441	opspec := tf.OpSpec{
26442		Type: "Mfcc",
26443		Input: []tf.Input{
26444			spectrogram, sample_rate,
26445		},
26446		Attrs: attrs,
26447	}
26448	op := scope.AddOperation(opspec)
26449	return op.Output(0)
26450}
26451
26452// MinAttr is an optional argument to Min.
26453type MinAttr func(optionalAttr)
26454
26455// MinKeepDims sets the optional keep_dims attribute to value.
26456//
26457// value: If true, retain reduced dimensions with length 1.
26458// If not specified, defaults to false
26459func MinKeepDims(value bool) MinAttr {
26460	return func(m optionalAttr) {
26461		m["keep_dims"] = value
26462	}
26463}
26464
26465// Computes the minimum of elements across dimensions of a tensor.
26466//
26467// Reduces `input` along the dimensions given in `axis`. Unless
26468// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
26469// `axis`. If `keep_dims` is true, the reduced dimensions are
26470// retained with length 1.
26471//
26472// Arguments:
26473//
26474//	input: The tensor to reduce.
26475//	axis: The dimensions to reduce. Must be in the range
26476//
26477// `[-rank(input), rank(input))`.
26478//
26479// Returns The reduced tensor.
26480func Min(scope *Scope, input tf.Output, axis tf.Output, optional ...MinAttr) (output tf.Output) {
26481	if scope.Err() != nil {
26482		return
26483	}
26484	attrs := map[string]interface{}{}
26485	for _, a := range optional {
26486		a(attrs)
26487	}
26488	opspec := tf.OpSpec{
26489		Type: "Min",
26490		Input: []tf.Input{
26491			input, axis,
26492		},
26493		Attrs: attrs,
26494	}
26495	op := scope.AddOperation(opspec)
26496	return op.Output(0)
26497}
26498
26499// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
26500//
26501// *NOTE*: `Minimum` supports broadcasting. More about broadcasting
26502// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
26503func Minimum(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
26504	if scope.Err() != nil {
26505		return
26506	}
26507	opspec := tf.OpSpec{
26508		Type: "Minimum",
26509		Input: []tf.Input{
26510			x, y,
26511		},
26512	}
26513	op := scope.AddOperation(opspec)
26514	return op.Output(0)
26515}
26516
26517// Pads a tensor with mirrored values.
26518//
26519// This operation pads a `input` with mirrored values according to the `paddings`
26520// you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
26521// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
26522// how many values to add before the contents of `input` in that dimension, and
26523// `paddings[D, 1]` indicates how many values to add after the contents of `input`
26524// in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
26525// than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
26526// (if false, respectively).
26527//
26528// The padded size of each dimension D of the output is:
26529//
26530// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
26531//
26532// For example:
26533//
26534// ```
26535// # 't' is [[1, 2, 3], [4, 5, 6]].
26536// # 'paddings' is [[1, 1]], [2, 2]].
26537// # 'mode' is SYMMETRIC.
26538// # rank of 't' is 2.
26539// pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
26540//
26541//	[2, 1, 1, 2, 3, 3, 2]
26542//	[5, 4, 4, 5, 6, 6, 5]
26543//	[5, 4, 4, 5, 6, 6, 5]]
26544//
26545// ```
26546//
26547// Arguments:
26548//
26549//	input: The input tensor to be padded.
26550//	paddings: A two-column matrix specifying the padding sizes. The number of
26551//
26552// rows must be the same as the rank of `input`.
26553//
26554//	mode: Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
26555//
26556// do not include the borders, while in symmetric mode the padded regions
26557// do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
26558// is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
26559// it is `[1, 2, 3, 3, 2]` in symmetric mode.
26560//
26561// Returns The padded tensor.
26562func MirrorPad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
26563	if scope.Err() != nil {
26564		return
26565	}
26566	attrs := map[string]interface{}{"mode": mode}
26567	opspec := tf.OpSpec{
26568		Type: "MirrorPad",
26569		Input: []tf.Input{
26570			input, paddings,
26571		},
26572		Attrs: attrs,
26573	}
26574	op := scope.AddOperation(opspec)
26575	return op.Output(0)
26576}
26577
26578// Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
26579//
26580// This operation folds the padded areas of `input` by `MirrorPad` according to the
26581// `paddings` you specify. `paddings` must be the same as `paddings` argument
26582// given to the corresponding `MirrorPad` op.
26583//
26584// The folded size of each dimension D of the output is:
26585//
26586// `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
26587//
26588// For example:
26589//
26590// ```
26591// # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
26592// # 'paddings' is [[0, 1]], [0, 1]].
26593// # 'mode' is SYMMETRIC.
26594// # rank of 't' is 2.
26595// pad(t, paddings) ==> [[ 1,  5]
26596//
26597//	[11, 28]]
26598//
26599// ```
26600//
26601// Arguments:
26602//
26603//	input: The input tensor to be folded.
26604//	paddings: A two-column matrix specifying the padding sizes. The number of
26605//
26606// rows must be the same as the rank of `input`.
26607//
26608//	mode: The mode used in the `MirrorPad` op.
26609//
26610// Returns The folded tensor.
26611func MirrorPadGrad(scope *Scope, input tf.Output, paddings tf.Output, mode string) (output tf.Output) {
26612	if scope.Err() != nil {
26613		return
26614	}
26615	attrs := map[string]interface{}{"mode": mode}
26616	opspec := tf.OpSpec{
26617		Type: "MirrorPadGrad",
26618		Input: []tf.Input{
26619			input, paddings,
26620		},
26621		Attrs: attrs,
26622	}
26623	op := scope.AddOperation(opspec)
26624	return op.Output(0)
26625}
26626
26627// Wraps an arbitrary MLIR computation expressed as a module with a main() function.
26628//
26629// This operation does not have an associated kernel and is not intended to be
26630// executed in a regular TensorFlow session. Instead it is intended to be used for
26631// testing or for special case where a user intends to pass custom MLIR computation
26632// through a TensorFlow graph with the intent of having custom tooling processing
26633// it downstream (when targeting a different environment, like TensorFlow lite for
26634// example).
26635// The MLIR module is expected to have a main() function that will be used as an
26636// entry point. The inputs to the operations will be passed as argument to the
26637// main() function and the returned values of the main function mapped to the
26638// outputs.
26639// Example usage:
26640//
26641// ```
26642// import tensorflow as tf
26643// from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
26644//
26645// mlir_module = ”'python
26646//
26647//	func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
26648//	   %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
26649//	   return %ret : tensor<10x10xf32>
26650//	}
26651//
26652// ”'
26653//
26654// @tf.function
26655// def foo(x, y):
26656//
26657//	return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
26658//
26659// graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
26660// ```
26661func MlirPassthroughOp(scope *Scope, inputs []tf.Output, mlir_module string, Toutputs []tf.DataType) (outputs []tf.Output) {
26662	if scope.Err() != nil {
26663		return
26664	}
26665	attrs := map[string]interface{}{"mlir_module": mlir_module, "Toutputs": Toutputs}
26666	opspec := tf.OpSpec{
26667		Type: "MlirPassthroughOp",
26668		Input: []tf.Input{
26669			tf.OutputList(inputs),
26670		},
26671		Attrs: attrs,
26672	}
26673	op := scope.AddOperation(opspec)
26674	if scope.Err() != nil {
26675		return
26676	}
26677	var idx int
26678	var err error
26679	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
26680		scope.UpdateErr("MlirPassthroughOp", err)
26681		return
26682	}
26683	return outputs
26684}
26685
26686// Returns element-wise remainder of division. This emulates C semantics in that
26687//
26688// the result here is consistent with a truncating divide. E.g.
26689// `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
26690//
26691// *NOTE*: `Mod` supports broadcasting. More about broadcasting
26692// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
26693func Mod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
26694	if scope.Err() != nil {
26695		return
26696	}
26697	opspec := tf.OpSpec{
26698		Type: "Mod",
26699		Input: []tf.Input{
26700			x, y,
26701		},
26702	}
26703	op := scope.AddOperation(opspec)
26704	return op.Output(0)
26705}
26706
26707// ModelDatasetAttr is an optional argument to ModelDataset.
26708type ModelDatasetAttr func(optionalAttr)
26709
26710// ModelDatasetAlgorithm sets the optional algorithm attribute to value.
26711// If not specified, defaults to 0
26712func ModelDatasetAlgorithm(value int64) ModelDatasetAttr {
26713	return func(m optionalAttr) {
26714		m["algorithm"] = value
26715	}
26716}
26717
26718// ModelDatasetCpuBudget sets the optional cpu_budget attribute to value.
26719// If not specified, defaults to 0
26720func ModelDatasetCpuBudget(value int64) ModelDatasetAttr {
26721	return func(m optionalAttr) {
26722		m["cpu_budget"] = value
26723	}
26724}
26725
26726// ModelDatasetRamBudget sets the optional ram_budget attribute to value.
26727// If not specified, defaults to 0
26728func ModelDatasetRamBudget(value int64) ModelDatasetAttr {
26729	return func(m optionalAttr) {
26730		m["ram_budget"] = value
26731	}
26732}
26733
26734// Identity transformation that models performance.
26735//
26736// Identity transformation that models performance.
26737//
26738// Arguments:
26739//
26740//	input_dataset: A variant tensor representing the input dataset.
26741func ModelDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ModelDatasetAttr) (handle tf.Output) {
26742	if scope.Err() != nil {
26743		return
26744	}
26745	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26746	for _, a := range optional {
26747		a(attrs)
26748	}
26749	opspec := tf.OpSpec{
26750		Type: "ModelDataset",
26751		Input: []tf.Input{
26752			input_dataset,
26753		},
26754		Attrs: attrs,
26755	}
26756	op := scope.AddOperation(opspec)
26757	return op.Output(0)
26758}
26759
26760// Returns x * y element-wise.
26761//
26762// *NOTE*: `Multiply` supports broadcasting. More about broadcasting
26763// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
26764func Mul(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
26765	if scope.Err() != nil {
26766		return
26767	}
26768	opspec := tf.OpSpec{
26769		Type: "Mul",
26770		Input: []tf.Input{
26771			x, y,
26772		},
26773	}
26774	op := scope.AddOperation(opspec)
26775	return op.Output(0)
26776}
26777
26778// Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN.
26779//
26780// *NOTE*: `MulNoNan` supports broadcasting. More about broadcasting
26781// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
26782func MulNoNan(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
26783	if scope.Err() != nil {
26784		return
26785	}
26786	opspec := tf.OpSpec{
26787		Type: "MulNoNan",
26788		Input: []tf.Input{
26789			x, y,
26790		},
26791	}
26792	op := scope.AddOperation(opspec)
26793	return op.Output(0)
26794}
26795
26796// Creates a MultiDeviceIterator resource.
26797//
26798// Arguments:
26799//
26800//	devices: A list of devices the iterator works across.
26801//	shared_name: If non-empty, this resource will be shared under the given name
26802//
26803// across multiple sessions.
26804//
26805//	container: If non-empty, this resource is placed in the given container.
26806//
26807// Otherwise, a default container is used.
26808//
26809//	output_types: The type list for the return values.
26810//	output_shapes: The list of shapes being produced.
26811//
26812// Returns Handle to the resource created.
26813func MultiDeviceIterator(scope *Scope, devices []string, shared_name string, container string, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
26814	if scope.Err() != nil {
26815		return
26816	}
26817	attrs := map[string]interface{}{"devices": devices, "shared_name": shared_name, "container": container, "output_types": output_types, "output_shapes": output_shapes}
26818	opspec := tf.OpSpec{
26819		Type: "MultiDeviceIterator",
26820
26821		Attrs: attrs,
26822	}
26823	op := scope.AddOperation(opspec)
26824	return op.Output(0)
26825}
26826
26827// MultiDeviceIteratorFromStringHandleAttr is an optional argument to MultiDeviceIteratorFromStringHandle.
26828type MultiDeviceIteratorFromStringHandleAttr func(optionalAttr)
26829
26830// MultiDeviceIteratorFromStringHandleOutputTypes sets the optional output_types attribute to value.
26831//
26832// value: The type list for the return values.
26833// If not specified, defaults to {}
26834//
26835// REQUIRES: len(value) >= 0
26836func MultiDeviceIteratorFromStringHandleOutputTypes(value []tf.DataType) MultiDeviceIteratorFromStringHandleAttr {
26837	return func(m optionalAttr) {
26838		m["output_types"] = value
26839	}
26840}
26841
26842// MultiDeviceIteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value.
26843//
26844// value: The list of shapes being produced.
26845// If not specified, defaults to {}
26846//
26847// REQUIRES: len(value) >= 0
26848func MultiDeviceIteratorFromStringHandleOutputShapes(value []tf.Shape) MultiDeviceIteratorFromStringHandleAttr {
26849	return func(m optionalAttr) {
26850		m["output_shapes"] = value
26851	}
26852}
26853
26854// Generates a MultiDeviceIterator resource from its provided string handle.
26855//
26856// Arguments:
26857//
26858//	string_handle: String representing the resource.
26859//
26860// Returns A MultiDeviceIterator resource.
26861func MultiDeviceIteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...MultiDeviceIteratorFromStringHandleAttr) (multi_device_iterator tf.Output) {
26862	if scope.Err() != nil {
26863		return
26864	}
26865	attrs := map[string]interface{}{}
26866	for _, a := range optional {
26867		a(attrs)
26868	}
26869	opspec := tf.OpSpec{
26870		Type: "MultiDeviceIteratorFromStringHandle",
26871		Input: []tf.Input{
26872			string_handle,
26873		},
26874		Attrs: attrs,
26875	}
26876	op := scope.AddOperation(opspec)
26877	return op.Output(0)
26878}
26879
26880// Gets next element for the provided shard number.
26881//
26882// Arguments:
26883//
26884//	multi_device_iterator: A MultiDeviceIterator resource.
26885//	shard_num: Integer representing which shard to fetch data for.
26886//	incarnation_id: Which incarnation of the MultiDeviceIterator is running.
26887//	output_types: The type list for the return values.
26888//	output_shapes: The list of shapes being produced.
26889//
26890// Returns Result of the get_next on the dataset.
26891func MultiDeviceIteratorGetNextFromShard(scope *Scope, multi_device_iterator tf.Output, shard_num tf.Output, incarnation_id tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
26892	if scope.Err() != nil {
26893		return
26894	}
26895	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
26896	opspec := tf.OpSpec{
26897		Type: "MultiDeviceIteratorGetNextFromShard",
26898		Input: []tf.Input{
26899			multi_device_iterator, shard_num, incarnation_id,
26900		},
26901		Attrs: attrs,
26902	}
26903	op := scope.AddOperation(opspec)
26904	if scope.Err() != nil {
26905		return
26906	}
26907	var idx int
26908	var err error
26909	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
26910		scope.UpdateErr("MultiDeviceIteratorGetNextFromShard", err)
26911		return
26912	}
26913	return components
26914}
26915
26916// Initializes the multi device iterator with the given dataset.
26917//
26918// Arguments:
26919//
26920//	dataset: Dataset to be iterated upon.
26921//	multi_device_iterator: A MultiDeviceIteratorResource.
26922//	max_buffer_size: The maximum size of the host side per device buffer to keep.
26923//
26924// Returns An int64 indicating which incarnation of the MultiDeviceIterator
26925// is running.
26926func MultiDeviceIteratorInit(scope *Scope, dataset tf.Output, multi_device_iterator tf.Output, max_buffer_size tf.Output) (incarnation_id tf.Output) {
26927	if scope.Err() != nil {
26928		return
26929	}
26930	opspec := tf.OpSpec{
26931		Type: "MultiDeviceIteratorInit",
26932		Input: []tf.Input{
26933			dataset, multi_device_iterator, max_buffer_size,
26934		},
26935	}
26936	op := scope.AddOperation(opspec)
26937	return op.Output(0)
26938}
26939
26940// Produces a string handle for the given MultiDeviceIterator.
26941//
26942// Arguments:
26943//
26944//	multi_device_iterator: A MultiDeviceIterator resource.
26945//
26946// Returns A string representing the resource.
26947func MultiDeviceIteratorToStringHandle(scope *Scope, multi_device_iterator tf.Output) (string_handle tf.Output) {
26948	if scope.Err() != nil {
26949		return
26950	}
26951	opspec := tf.OpSpec{
26952		Type: "MultiDeviceIteratorToStringHandle",
26953		Input: []tf.Input{
26954			multi_device_iterator,
26955		},
26956	}
26957	op := scope.AddOperation(opspec)
26958	return op.Output(0)
26959}
26960
26961// MultinomialAttr is an optional argument to Multinomial.
26962type MultinomialAttr func(optionalAttr)
26963
26964// MultinomialSeed sets the optional seed attribute to value.
26965//
26966// value: If either seed or seed2 is set to be non-zero, the internal random number
26967// generator is seeded by the given seed.  Otherwise, a random seed is used.
26968// If not specified, defaults to 0
26969func MultinomialSeed(value int64) MultinomialAttr {
26970	return func(m optionalAttr) {
26971		m["seed"] = value
26972	}
26973}
26974
26975// MultinomialSeed2 sets the optional seed2 attribute to value.
26976//
26977// value: A second seed to avoid seed collision.
26978// If not specified, defaults to 0
26979func MultinomialSeed2(value int64) MultinomialAttr {
26980	return func(m optionalAttr) {
26981		m["seed2"] = value
26982	}
26983}
26984
26985// MultinomialOutputDtype sets the optional output_dtype attribute to value.
26986// If not specified, defaults to DT_INT64
26987func MultinomialOutputDtype(value tf.DataType) MultinomialAttr {
26988	return func(m optionalAttr) {
26989		m["output_dtype"] = value
26990	}
26991}
26992
26993// Draws samples from a multinomial distribution.
26994//
26995// Arguments:
26996//
26997//	logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
26998//
26999// represents the unnormalized log probabilities for all classes.
27000//
27001//	num_samples: 0-D.  Number of independent samples to draw for each row slice.
27002//
27003// Returns 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
27004// contains the drawn class labels with range `[0, num_classes)`.
27005func Multinomial(scope *Scope, logits tf.Output, num_samples tf.Output, optional ...MultinomialAttr) (output tf.Output) {
27006	if scope.Err() != nil {
27007		return
27008	}
27009	attrs := map[string]interface{}{}
27010	for _, a := range optional {
27011		a(attrs)
27012	}
27013	opspec := tf.OpSpec{
27014		Type: "Multinomial",
27015		Input: []tf.Input{
27016			logits, num_samples,
27017		},
27018		Attrs: attrs,
27019	}
27020	op := scope.AddOperation(opspec)
27021	return op.Output(0)
27022}
27023
27024// MutableDenseHashTableV2Attr is an optional argument to MutableDenseHashTableV2.
27025type MutableDenseHashTableV2Attr func(optionalAttr)
27026
27027// MutableDenseHashTableV2Container sets the optional container attribute to value.
27028//
27029// value: If non-empty, this table is placed in the given container.
27030// Otherwise, a default container is used.
27031// If not specified, defaults to ""
27032func MutableDenseHashTableV2Container(value string) MutableDenseHashTableV2Attr {
27033	return func(m optionalAttr) {
27034		m["container"] = value
27035	}
27036}
27037
27038// MutableDenseHashTableV2SharedName sets the optional shared_name attribute to value.
27039//
27040// value: If non-empty, this table is shared under the given name across
27041// multiple sessions.
27042// If not specified, defaults to ""
27043func MutableDenseHashTableV2SharedName(value string) MutableDenseHashTableV2Attr {
27044	return func(m optionalAttr) {
27045		m["shared_name"] = value
27046	}
27047}
27048
27049// MutableDenseHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
27050// If not specified, defaults to false
27051func MutableDenseHashTableV2UseNodeNameSharing(value bool) MutableDenseHashTableV2Attr {
27052	return func(m optionalAttr) {
27053		m["use_node_name_sharing"] = value
27054	}
27055}
27056
27057// MutableDenseHashTableV2ValueShape sets the optional value_shape attribute to value.
27058//
27059// value: The shape of each value.
27060// If not specified, defaults to {}
27061func MutableDenseHashTableV2ValueShape(value tf.Shape) MutableDenseHashTableV2Attr {
27062	return func(m optionalAttr) {
27063		m["value_shape"] = value
27064	}
27065}
27066
27067// MutableDenseHashTableV2InitialNumBuckets sets the optional initial_num_buckets attribute to value.
27068//
27069// value: The initial number of hash table buckets. Must be a power
27070// to 2.
27071// If not specified, defaults to 131072
27072func MutableDenseHashTableV2InitialNumBuckets(value int64) MutableDenseHashTableV2Attr {
27073	return func(m optionalAttr) {
27074		m["initial_num_buckets"] = value
27075	}
27076}
27077
27078// MutableDenseHashTableV2MaxLoadFactor sets the optional max_load_factor attribute to value.
27079//
27080// value: The maximum ratio between number of entries and number of
27081// buckets before growing the table. Must be between 0 and 1.
27082// If not specified, defaults to 0.8
27083func MutableDenseHashTableV2MaxLoadFactor(value float32) MutableDenseHashTableV2Attr {
27084	return func(m optionalAttr) {
27085		m["max_load_factor"] = value
27086	}
27087}
27088
27089// Creates an empty hash table that uses tensors as the backing store.
27090//
27091// It uses "open addressing" with quadratic reprobing to resolve
27092// collisions.
27093//
27094// This op creates a mutable hash table, specifying the type of its keys and
27095// values. Each value must be a scalar. Data can be inserted into the table using
27096// the insert operations. It does not support the initialization operation.
27097//
27098// Arguments:
27099//
27100//	empty_key: The key used to represent empty key buckets internally. Must not
27101//
27102// be used in insert or lookup operations.
27103//
27104//	value_dtype: Type of the table values.
27105//
27106// Returns Handle to a table.
27107func MutableDenseHashTableV2(scope *Scope, empty_key tf.Output, deleted_key tf.Output, value_dtype tf.DataType, optional ...MutableDenseHashTableV2Attr) (table_handle tf.Output) {
27108	if scope.Err() != nil {
27109		return
27110	}
27111	attrs := map[string]interface{}{"value_dtype": value_dtype}
27112	for _, a := range optional {
27113		a(attrs)
27114	}
27115	opspec := tf.OpSpec{
27116		Type: "MutableDenseHashTableV2",
27117		Input: []tf.Input{
27118			empty_key, deleted_key,
27119		},
27120		Attrs: attrs,
27121	}
27122	op := scope.AddOperation(opspec)
27123	return op.Output(0)
27124}
27125
27126// MutableHashTableOfTensorsV2Attr is an optional argument to MutableHashTableOfTensorsV2.
27127type MutableHashTableOfTensorsV2Attr func(optionalAttr)
27128
27129// MutableHashTableOfTensorsV2Container sets the optional container attribute to value.
27130//
27131// value: If non-empty, this table is placed in the given container.
27132// Otherwise, a default container is used.
27133// If not specified, defaults to ""
27134func MutableHashTableOfTensorsV2Container(value string) MutableHashTableOfTensorsV2Attr {
27135	return func(m optionalAttr) {
27136		m["container"] = value
27137	}
27138}
27139
27140// MutableHashTableOfTensorsV2SharedName sets the optional shared_name attribute to value.
27141//
27142// value: If non-empty, this table is shared under the given name across
27143// multiple sessions.
27144// If not specified, defaults to ""
27145func MutableHashTableOfTensorsV2SharedName(value string) MutableHashTableOfTensorsV2Attr {
27146	return func(m optionalAttr) {
27147		m["shared_name"] = value
27148	}
27149}
27150
27151// MutableHashTableOfTensorsV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
27152// If not specified, defaults to false
27153func MutableHashTableOfTensorsV2UseNodeNameSharing(value bool) MutableHashTableOfTensorsV2Attr {
27154	return func(m optionalAttr) {
27155		m["use_node_name_sharing"] = value
27156	}
27157}
27158
27159// MutableHashTableOfTensorsV2ValueShape sets the optional value_shape attribute to value.
27160// If not specified, defaults to {}
27161func MutableHashTableOfTensorsV2ValueShape(value tf.Shape) MutableHashTableOfTensorsV2Attr {
27162	return func(m optionalAttr) {
27163		m["value_shape"] = value
27164	}
27165}
27166
27167// Creates an empty hash table.
27168//
27169// This op creates a mutable hash table, specifying the type of its keys and
27170// values. Each value must be a vector. Data can be inserted into the table using
27171// the insert operations. It does not support the initialization operation.
27172//
27173// Arguments:
27174//
27175//	key_dtype: Type of the table keys.
27176//	value_dtype: Type of the table values.
27177//
27178// Returns Handle to a table.
27179func MutableHashTableOfTensorsV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableOfTensorsV2Attr) (table_handle tf.Output) {
27180	if scope.Err() != nil {
27181		return
27182	}
27183	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
27184	for _, a := range optional {
27185		a(attrs)
27186	}
27187	opspec := tf.OpSpec{
27188		Type: "MutableHashTableOfTensorsV2",
27189
27190		Attrs: attrs,
27191	}
27192	op := scope.AddOperation(opspec)
27193	return op.Output(0)
27194}
27195
27196// MutableHashTableV2Attr is an optional argument to MutableHashTableV2.
27197type MutableHashTableV2Attr func(optionalAttr)
27198
27199// MutableHashTableV2Container sets the optional container attribute to value.
27200//
27201// value: If non-empty, this table is placed in the given container.
27202// Otherwise, a default container is used.
27203// If not specified, defaults to ""
27204func MutableHashTableV2Container(value string) MutableHashTableV2Attr {
27205	return func(m optionalAttr) {
27206		m["container"] = value
27207	}
27208}
27209
27210// MutableHashTableV2SharedName sets the optional shared_name attribute to value.
27211//
27212// value: If non-empty, this table is shared under the given name across
27213// multiple sessions.
27214// If not specified, defaults to ""
27215func MutableHashTableV2SharedName(value string) MutableHashTableV2Attr {
27216	return func(m optionalAttr) {
27217		m["shared_name"] = value
27218	}
27219}
27220
27221// MutableHashTableV2UseNodeNameSharing sets the optional use_node_name_sharing attribute to value.
27222//
27223// value: If true and shared_name is empty, the table is shared
27224// using the node name.
27225// If not specified, defaults to false
27226func MutableHashTableV2UseNodeNameSharing(value bool) MutableHashTableV2Attr {
27227	return func(m optionalAttr) {
27228		m["use_node_name_sharing"] = value
27229	}
27230}
27231
27232// Creates an empty hash table.
27233//
27234// This op creates a mutable hash table, specifying the type of its keys and
27235// values. Each value must be a scalar. Data can be inserted into the table using
27236// the insert operations. It does not support the initialization operation.
27237//
27238// Arguments:
27239//
27240//	key_dtype: Type of the table keys.
27241//	value_dtype: Type of the table values.
27242//
27243// Returns Handle to a table.
27244func MutableHashTableV2(scope *Scope, key_dtype tf.DataType, value_dtype tf.DataType, optional ...MutableHashTableV2Attr) (table_handle tf.Output) {
27245	if scope.Err() != nil {
27246		return
27247	}
27248	attrs := map[string]interface{}{"key_dtype": key_dtype, "value_dtype": value_dtype}
27249	for _, a := range optional {
27250		a(attrs)
27251	}
27252	opspec := tf.OpSpec{
27253		Type: "MutableHashTableV2",
27254
27255		Attrs: attrs,
27256	}
27257	op := scope.AddOperation(opspec)
27258	return op.Output(0)
27259}
27260
27261// Locks a mutex resource.  The output is the lock.  So long as the lock tensor
27262//
27263// is alive, any other request to use `MutexLock` with this mutex will wait.
27264//
27265// This is particularly useful for creating a critical section when used in
27266// conjunction with `MutexLockIdentity`:
27267//
27268// ```python
27269//
27270// mutex = mutex_v2(
27271//
27272//	shared_name=handle_name, container=container, name=name)
27273//
27274// def execute_in_critical_section(fn, *args, **kwargs):
27275//
27276//	lock = gen_resource_variable_ops.mutex_lock(mutex)
27277//
27278//	with ops.control_dependencies([lock]):
27279//	  r = fn(*args, **kwargs)
27280//
27281//	with ops.control_dependencies(nest.flatten(r)):
27282//	  with ops.colocate_with(mutex):
27283//	    ensure_lock_exists = mutex_lock_identity(lock)
27284//
27285//	  # Make sure that if any element of r is accessed, all of
27286//	  # them are executed together.
27287//	  r = nest.map_structure(tf.identity, r)
27288//
27289//	with ops.control_dependencies([ensure_lock_exists]):
27290//	  return nest.map_structure(tf.identity, r)
27291//
27292// ```
27293//
27294// While `fn` is running in the critical section, no other functions which wish to
27295// use this critical section may run.
27296//
27297// Often the use case is that two executions of the same graph, in parallel,
27298// wish to run `fn`; and we wish to ensure that only one of them executes
27299// at a time.  This is especially important if `fn` modifies one or more
27300// variables at a time.
27301//
27302// It is also useful if two separate functions must share a resource, but we
27303// wish to ensure the usage is exclusive.
27304//
27305// Arguments:
27306//
27307//	mutex: The mutex resource to lock.
27308//
27309// Returns A tensor that keeps a shared pointer to a lock on the mutex;
27310// when the Tensor is destroyed, the use count on the shared pointer is decreased
27311// by 1.  When it reaches 0, the lock is released.
27312func MutexLock(scope *Scope, mutex tf.Output) (mutex_lock tf.Output) {
27313	if scope.Err() != nil {
27314		return
27315	}
27316	opspec := tf.OpSpec{
27317		Type: "MutexLock",
27318		Input: []tf.Input{
27319			mutex,
27320		},
27321	}
27322	op := scope.AddOperation(opspec)
27323	return op.Output(0)
27324}
27325
27326// MutexV2Attr is an optional argument to MutexV2.
27327type MutexV2Attr func(optionalAttr)
27328
27329// MutexV2Container sets the optional container attribute to value.
27330//
27331// value: If non-empty, this variable is placed in the given container.
27332// Otherwise, a default container is used.
27333// If not specified, defaults to ""
27334func MutexV2Container(value string) MutexV2Attr {
27335	return func(m optionalAttr) {
27336		m["container"] = value
27337	}
27338}
27339
27340// MutexV2SharedName sets the optional shared_name attribute to value.
27341//
27342// value: If non-empty, this variable is named in the given bucket
27343// with this shared_name. Otherwise, the node name is used instead.
27344// If not specified, defaults to ""
27345func MutexV2SharedName(value string) MutexV2Attr {
27346	return func(m optionalAttr) {
27347		m["shared_name"] = value
27348	}
27349}
27350
27351// Creates a Mutex resource that can be locked by `MutexLock`.
27352//
27353// Returns The mutex resource.
27354func MutexV2(scope *Scope, optional ...MutexV2Attr) (resource tf.Output) {
27355	if scope.Err() != nil {
27356		return
27357	}
27358	attrs := map[string]interface{}{}
27359	for _, a := range optional {
27360		a(attrs)
27361	}
27362	opspec := tf.OpSpec{
27363		Type: "MutexV2",
27364
27365		Attrs: attrs,
27366	}
27367	op := scope.AddOperation(opspec)
27368	return op.Output(0)
27369}
27370
27371// Outputs a tensor containing the reduction across all input tensors.
27372//
27373// Outputs a tensor containing the reduction across all input tensors passed to ops
27374// within the same `shared_name.
27375//
27376// The graph should be constructed so if one op runs with shared_name value `c`,
27377// then `num_devices` ops will run with shared_name value `c`.  Failure to do so
27378// will cause the graph execution to fail to complete.
27379//
27380// input: the input to the reduction
27381// data: the value of the reduction across all `num_devices` devices.
27382// reduction: the reduction operation to perform.
27383// num_devices: The number of devices participating in this reduction.
27384// shared_name: Identifier that shared between ops of the same reduction.
27385func NcclAllReduce(scope *Scope, input tf.Output, reduction string, num_devices int64, shared_name string) (data tf.Output) {
27386	if scope.Err() != nil {
27387		return
27388	}
27389	attrs := map[string]interface{}{"reduction": reduction, "num_devices": num_devices, "shared_name": shared_name}
27390	opspec := tf.OpSpec{
27391		Type: "NcclAllReduce",
27392		Input: []tf.Input{
27393			input,
27394		},
27395		Attrs: attrs,
27396	}
27397	op := scope.AddOperation(opspec)
27398	return op.Output(0)
27399}
27400
27401// Sends `input` to all devices that are connected to the output.
27402//
27403// Sends `input` to all devices that are connected to the output.
27404//
27405// The graph should be constructed so that all ops connected to the output have a
27406// valid device assignment, and the op itself is assigned one of these devices.
27407//
27408// input: The input to the broadcast.
27409// output: The same as input.
27410// shape: The shape of the input tensor.
27411func NcclBroadcast(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
27412	if scope.Err() != nil {
27413		return
27414	}
27415	attrs := map[string]interface{}{"shape": shape}
27416	opspec := tf.OpSpec{
27417		Type: "NcclBroadcast",
27418		Input: []tf.Input{
27419			input,
27420		},
27421		Attrs: attrs,
27422	}
27423	op := scope.AddOperation(opspec)
27424	return op.Output(0)
27425}
27426
27427// Reduces `input` from `num_devices` using `reduction` to a single device.
27428//
27429// Reduces `input` from `num_devices` using `reduction` to a single device.
27430//
27431// The graph should be constructed so that all inputs have a valid device
27432// assignment, and the op itself is assigned one of these devices.
27433//
27434// input: The input to the reduction.
27435// data: the value of the reduction across all `num_devices` devices.
27436// reduction: the reduction operation to perform.
27437func NcclReduce(scope *Scope, input []tf.Output, reduction string) (data tf.Output) {
27438	if scope.Err() != nil {
27439		return
27440	}
27441	attrs := map[string]interface{}{"reduction": reduction}
27442	opspec := tf.OpSpec{
27443		Type: "NcclReduce",
27444		Input: []tf.Input{
27445			tf.OutputList(input),
27446		},
27447		Attrs: attrs,
27448	}
27449	op := scope.AddOperation(opspec)
27450	return op.Output(0)
27451}
27452
27453// Selects the k nearest centers for each point.
27454//
27455// Rows of points are assumed to be input points. Rows of centers are assumed to be
27456// the list of candidate centers. For each point, the k centers that have least L2
27457// distance to it are computed.
27458//
27459// Arguments:
27460//
27461//	points: Matrix of shape (n, d). Rows are assumed to be input points.
27462//	centers: Matrix of shape (m, d). Rows are assumed to be centers.
27463//	k: Number of nearest centers to return for each point. If k is larger than m, then
27464//
27465// only m centers are returned.
27466//
27467// Returns:
27468//
27469//	nearest_center_indices: Matrix of shape (n, min(m, k)). Each row contains the indices of the centers
27470//
27471// closest to the corresponding point, ordered by increasing distance.
27472//
27473//	nearest_center_distances: Matrix of shape (n, min(m, k)). Each row contains the squared L2 distance to the
27474//
27475// corresponding center in nearest_center_indices.
27476func NearestNeighbors(scope *Scope, points tf.Output, centers tf.Output, k tf.Output) (nearest_center_indices tf.Output, nearest_center_distances tf.Output) {
27477	if scope.Err() != nil {
27478		return
27479	}
27480	opspec := tf.OpSpec{
27481		Type: "NearestNeighbors",
27482		Input: []tf.Input{
27483			points, centers, k,
27484		},
27485	}
27486	op := scope.AddOperation(opspec)
27487	return op.Output(0), op.Output(1)
27488}
27489
27490// Computes numerical negative value element-wise.
27491//
27492// I.e., \\(y = -x\\).
27493func Neg(scope *Scope, x tf.Output) (y tf.Output) {
27494	if scope.Err() != nil {
27495		return
27496	}
27497	opspec := tf.OpSpec{
27498		Type: "Neg",
27499		Input: []tf.Input{
27500			x,
27501		},
27502	}
27503	op := scope.AddOperation(opspec)
27504	return op.Output(0)
27505}
27506
27507// Returns the next representable value of `x1` in the direction of `x2`, element-wise.
27508//
27509// This operation returns the same result as the C++ std::nextafter function.
27510//
27511// It can also return a subnormal number.
27512//
27513// @compatibility(cpp)
27514// Equivalent to C++ std::nextafter function.
27515// @end_compatibility
27516func NextAfter(scope *Scope, x1 tf.Output, x2 tf.Output) (output tf.Output) {
27517	if scope.Err() != nil {
27518		return
27519	}
27520	opspec := tf.OpSpec{
27521		Type: "NextAfter",
27522		Input: []tf.Input{
27523			x1, x2,
27524		},
27525	}
27526	op := scope.AddOperation(opspec)
27527	return op.Output(0)
27528}
27529
27530// Makes its input available to the next iteration.
27531//
27532// Arguments:
27533//
27534//	data: The tensor to be made available to the next iteration.
27535//
27536// Returns The same tensor as `data`.
27537func NextIteration(scope *Scope, data tf.Output) (output tf.Output) {
27538	if scope.Err() != nil {
27539		return
27540	}
27541	opspec := tf.OpSpec{
27542		Type: "NextIteration",
27543		Input: []tf.Input{
27544			data,
27545		},
27546	}
27547	op := scope.AddOperation(opspec)
27548	return op.Output(0)
27549}
27550
27551// Does nothing. Only useful as a placeholder for control edges.
27552//
27553// Returns the created operation.
27554func NoOp(scope *Scope) (o *tf.Operation) {
27555	if scope.Err() != nil {
27556		return
27557	}
27558	opspec := tf.OpSpec{
27559		Type: "NoOp",
27560	}
27561	return scope.AddOperation(opspec)
27562}
27563
27564// NonDeterministicIntsAttr is an optional argument to NonDeterministicInts.
27565type NonDeterministicIntsAttr func(optionalAttr)
27566
27567// NonDeterministicIntsDtype sets the optional dtype attribute to value.
27568//
27569// value: The type of the output.
27570// If not specified, defaults to DT_INT64
27571func NonDeterministicIntsDtype(value tf.DataType) NonDeterministicIntsAttr {
27572	return func(m optionalAttr) {
27573		m["dtype"] = value
27574	}
27575}
27576
27577// Non-deterministically generates some integers.
27578//
27579// This op may use some OS-provided source of non-determinism (e.g. an RNG), so each execution will give different results.
27580//
27581// Arguments:
27582//
27583//	shape: The shape of the output tensor.
27584//
27585// Returns Non-deterministic integer values with specified shape.
27586func NonDeterministicInts(scope *Scope, shape tf.Output, optional ...NonDeterministicIntsAttr) (output tf.Output) {
27587	if scope.Err() != nil {
27588		return
27589	}
27590	attrs := map[string]interface{}{}
27591	for _, a := range optional {
27592		a(attrs)
27593	}
27594	opspec := tf.OpSpec{
27595		Type: "NonDeterministicInts",
27596		Input: []tf.Input{
27597			shape,
27598		},
27599		Attrs: attrs,
27600	}
27601	op := scope.AddOperation(opspec)
27602	return op.Output(0)
27603}
27604
27605// NonMaxSuppressionAttr is an optional argument to NonMaxSuppression.
27606type NonMaxSuppressionAttr func(optionalAttr)
27607
27608// NonMaxSuppressionIouThreshold sets the optional iou_threshold attribute to value.
27609//
27610// value: A float representing the threshold for deciding whether boxes
27611// overlap too much with respect to IOU.
27612// If not specified, defaults to 0.5
27613func NonMaxSuppressionIouThreshold(value float32) NonMaxSuppressionAttr {
27614	return func(m optionalAttr) {
27615		m["iou_threshold"] = value
27616	}
27617}
27618
27619// Greedily selects a subset of bounding boxes in descending order of score,
27620//
27621// pruning away boxes that have high intersection-over-union (IOU) overlap
27622// with previously selected boxes.  Bounding boxes are supplied as
27623// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
27624// diagonal pair of box corners and the coordinates can be provided as normalized
27625// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
27626// is agnostic to where the origin is in the coordinate system.  Note that this
27627// algorithm is invariant to orthogonal transformations and translations
27628// of the coordinate system; thus translating or reflections of the coordinate
27629// system result in the same boxes being selected by the algorithm.
27630// The output of this operation is a set of integers indexing into the input
27631// collection of bounding boxes representing the selected boxes.  The bounding
27632// box coordinates corresponding to the selected indices can then be obtained
27633// using the `tf.gather operation`.  For example:
27634//
27635//	selected_indices = tf.image.non_max_suppression(
27636//	    boxes, scores, max_output_size, iou_threshold)
27637//	selected_boxes = tf.gather(boxes, selected_indices)
27638//
27639// Arguments:
27640//
27641//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
27642//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
27643//
27644// score corresponding to each box (each row of boxes).
27645//
27646//	max_output_size: A scalar integer tensor representing the maximum number of
27647//
27648// boxes to be selected by non max suppression.
27649//
27650// Returns A 1-D integer tensor of shape `[M]` representing the selected
27651// indices from the boxes tensor, where `M <= max_output_size`.
27652func NonMaxSuppression(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, optional ...NonMaxSuppressionAttr) (selected_indices tf.Output) {
27653	if scope.Err() != nil {
27654		return
27655	}
27656	attrs := map[string]interface{}{}
27657	for _, a := range optional {
27658		a(attrs)
27659	}
27660	opspec := tf.OpSpec{
27661		Type: "NonMaxSuppression",
27662		Input: []tf.Input{
27663			boxes, scores, max_output_size,
27664		},
27665		Attrs: attrs,
27666	}
27667	op := scope.AddOperation(opspec)
27668	return op.Output(0)
27669}
27670
27671// Greedily selects a subset of bounding boxes in descending order of score,
27672//
27673// pruning away boxes that have high intersection-over-union (IOU) overlap
27674// with previously selected boxes.  Bounding boxes are supplied as
27675// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
27676// diagonal pair of box corners and the coordinates can be provided as normalized
27677// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
27678// is agnostic to where the origin is in the coordinate system.  Note that this
27679// algorithm is invariant to orthogonal transformations and translations
27680// of the coordinate system; thus translating or reflections of the coordinate
27681// system result in the same boxes being selected by the algorithm.
27682//
27683// The output of this operation is a set of integers indexing into the input
27684// collection of bounding boxes representing the selected boxes.  The bounding
27685// box coordinates corresponding to the selected indices can then be obtained
27686// using the `tf.gather operation`.  For example:
27687//
27688//	selected_indices = tf.image.non_max_suppression_v2(
27689//	    boxes, scores, max_output_size, iou_threshold)
27690//	selected_boxes = tf.gather(boxes, selected_indices)
27691//
27692// Arguments:
27693//
27694//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
27695//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
27696//
27697// score corresponding to each box (each row of boxes).
27698//
27699//	max_output_size: A scalar integer tensor representing the maximum number of
27700//
27701// boxes to be selected by non max suppression.
27702//
27703//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
27704//
27705// boxes overlap too much with respect to IOU.
27706//
27707// Returns A 1-D integer tensor of shape `[M]` representing the selected
27708// indices from the boxes tensor, where `M <= max_output_size`.
27709func NonMaxSuppressionV2(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output) (selected_indices tf.Output) {
27710	if scope.Err() != nil {
27711		return
27712	}
27713	opspec := tf.OpSpec{
27714		Type: "NonMaxSuppressionV2",
27715		Input: []tf.Input{
27716			boxes, scores, max_output_size, iou_threshold,
27717		},
27718	}
27719	op := scope.AddOperation(opspec)
27720	return op.Output(0)
27721}
27722
27723// Greedily selects a subset of bounding boxes in descending order of score,
27724//
27725// pruning away boxes that have high intersection-over-union (IOU) overlap
27726// with previously selected boxes.  Bounding boxes with score less than
27727// `score_threshold` are removed.  Bounding boxes are supplied as
27728// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
27729// diagonal pair of box corners and the coordinates can be provided as normalized
27730// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
27731// is agnostic to where the origin is in the coordinate system and more
27732// generally is invariant to orthogonal transformations and translations
27733// of the coordinate system; thus translating or reflections of the coordinate
27734// system result in the same boxes being selected by the algorithm.
27735// The output of this operation is a set of integers indexing into the input
27736// collection of bounding boxes representing the selected boxes.  The bounding
27737// box coordinates corresponding to the selected indices can then be obtained
27738// using the `tf.gather operation`.  For example:
27739//
27740//	selected_indices = tf.image.non_max_suppression_v2(
27741//	    boxes, scores, max_output_size, iou_threshold, score_threshold)
27742//	selected_boxes = tf.gather(boxes, selected_indices)
27743//
27744// Arguments:
27745//
27746//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
27747//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
27748//
27749// score corresponding to each box (each row of boxes).
27750//
27751//	max_output_size: A scalar integer tensor representing the maximum number of
27752//
27753// boxes to be selected by non max suppression.
27754//
27755//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
27756//
27757// boxes overlap too much with respect to IOU.
27758//
27759//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
27760//
27761// boxes based on score.
27762//
27763// Returns A 1-D integer tensor of shape `[M]` representing the selected
27764// indices from the boxes tensor, where `M <= max_output_size`.
27765func NonMaxSuppressionV3(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
27766	if scope.Err() != nil {
27767		return
27768	}
27769	opspec := tf.OpSpec{
27770		Type: "NonMaxSuppressionV3",
27771		Input: []tf.Input{
27772			boxes, scores, max_output_size, iou_threshold, score_threshold,
27773		},
27774	}
27775	op := scope.AddOperation(opspec)
27776	return op.Output(0)
27777}
27778
27779// NonMaxSuppressionV4Attr is an optional argument to NonMaxSuppressionV4.
27780type NonMaxSuppressionV4Attr func(optionalAttr)
27781
27782// NonMaxSuppressionV4PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value.
27783//
27784// value: If true, the output `selected_indices` is padded to be of length
27785// `max_output_size`. Defaults to false.
27786// If not specified, defaults to false
27787func NonMaxSuppressionV4PadToMaxOutputSize(value bool) NonMaxSuppressionV4Attr {
27788	return func(m optionalAttr) {
27789		m["pad_to_max_output_size"] = value
27790	}
27791}
27792
27793// Greedily selects a subset of bounding boxes in descending order of score,
27794//
27795// pruning away boxes that have high intersection-over-union (IOU) overlap
27796// with previously selected boxes.  Bounding boxes with score less than
27797// `score_threshold` are removed.  Bounding boxes are supplied as
27798// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
27799// diagonal pair of box corners and the coordinates can be provided as normalized
27800// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
27801// is agnostic to where the origin is in the coordinate system and more
27802// generally is invariant to orthogonal transformations and translations
27803// of the coordinate system; thus translating or reflections of the coordinate
27804// system result in the same boxes being selected by the algorithm.
27805// The output of this operation is a set of integers indexing into the input
27806// collection of bounding boxes representing the selected boxes.  The bounding
27807// box coordinates corresponding to the selected indices can then be obtained
27808// using the `tf.gather operation`.  For example:
27809//
27810//	selected_indices = tf.image.non_max_suppression_v2(
27811//	    boxes, scores, max_output_size, iou_threshold, score_threshold)
27812//	selected_boxes = tf.gather(boxes, selected_indices)
27813//
27814// Arguments:
27815//
27816//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
27817//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
27818//
27819// score corresponding to each box (each row of boxes).
27820//
27821//	max_output_size: A scalar integer tensor representing the maximum number of
27822//
27823// boxes to be selected by non max suppression.
27824//
27825//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
27826//
27827// boxes overlap too much with respect to IOU.
27828//
27829//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
27830//
27831// boxes based on score.
27832//
27833// Returns:
27834//
27835//	selected_indices: A 1-D integer tensor of shape `[M]` representing the selected
27836//
27837// indices from the boxes tensor, where `M <= max_output_size`.
27838//
27839//	valid_outputs: A 0-D integer tensor representing the number of valid elements in
27840//
27841// `selected_indices`, with the valid elements appearing first.
27842func NonMaxSuppressionV4(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, optional ...NonMaxSuppressionV4Attr) (selected_indices tf.Output, valid_outputs tf.Output) {
27843	if scope.Err() != nil {
27844		return
27845	}
27846	attrs := map[string]interface{}{}
27847	for _, a := range optional {
27848		a(attrs)
27849	}
27850	opspec := tf.OpSpec{
27851		Type: "NonMaxSuppressionV4",
27852		Input: []tf.Input{
27853			boxes, scores, max_output_size, iou_threshold, score_threshold,
27854		},
27855		Attrs: attrs,
27856	}
27857	op := scope.AddOperation(opspec)
27858	return op.Output(0), op.Output(1)
27859}
27860
27861// NonMaxSuppressionV5Attr is an optional argument to NonMaxSuppressionV5.
27862type NonMaxSuppressionV5Attr func(optionalAttr)
27863
27864// NonMaxSuppressionV5PadToMaxOutputSize sets the optional pad_to_max_output_size attribute to value.
27865//
27866// value: If true, the output `selected_indices` is padded to be of length
27867// `max_output_size`. Defaults to false.
27868// If not specified, defaults to false
27869func NonMaxSuppressionV5PadToMaxOutputSize(value bool) NonMaxSuppressionV5Attr {
27870	return func(m optionalAttr) {
27871		m["pad_to_max_output_size"] = value
27872	}
27873}
27874
27875// Greedily selects a subset of bounding boxes in descending order of score,
27876//
27877// pruning away boxes that have high intersection-over-union (IOU) overlap
27878// with previously selected boxes.  Bounding boxes with score less than
27879// `score_threshold` are removed.  Bounding boxes are supplied as
27880// [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
27881// diagonal pair of box corners and the coordinates can be provided as normalized
27882// (i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
27883// is agnostic to where the origin is in the coordinate system and more
27884// generally is invariant to orthogonal transformations and translations
27885// of the coordinate system; thus translating or reflections of the coordinate
27886// system result in the same boxes being selected by the algorithm.
27887// The output of this operation is a set of integers indexing into the input
27888// collection of bounding boxes representing the selected boxes.  The bounding
27889// box coordinates corresponding to the selected indices can then be obtained
27890// using the `tf.gather operation`.  For example:
27891//
27892//	selected_indices = tf.image.non_max_suppression_v2(
27893//	    boxes, scores, max_output_size, iou_threshold, score_threshold)
27894//	selected_boxes = tf.gather(boxes, selected_indices)
27895//
27896// This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.
27897// Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
27898// of other overlapping boxes instead of directly causing them to be pruned.
27899// To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
27900// larger than 0.
27901//
27902// Arguments:
27903//
27904//	boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
27905//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
27906//
27907// score corresponding to each box (each row of boxes).
27908//
27909//	max_output_size: A scalar integer tensor representing the maximum number of
27910//
27911// boxes to be selected by non max suppression.
27912//
27913//	iou_threshold: A 0-D float tensor representing the threshold for deciding whether
27914//
27915// boxes overlap too much with respect to IOU.
27916//
27917//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
27918//
27919// boxes based on score.
27920//
27921//	soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et
27922//
27923// al (c.f. https://arxiv.org/abs/1704.04503).  When `soft_nms_sigma=0.0` (which
27924// is default), we fall back to standard (hard) NMS.
27925//
27926// Returns:
27927//
27928//	selected_indices: A 1-D integer tensor of shape `[M]` representing the selected
27929//
27930// indices from the boxes tensor, where `M <= max_output_size`.
27931//
27932//	selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding
27933//
27934// scores for each selected box, where `M <= max_output_size`.  Scores only differ
27935// from corresponding input scores when using Soft NMS (i.e. when
27936// `soft_nms_sigma>0`)
27937//
27938//	valid_outputs: A 0-D integer tensor representing the number of valid elements in
27939//
27940// `selected_indices`, with the valid elements appearing first.
27941func NonMaxSuppressionV5(scope *Scope, boxes tf.Output, scores tf.Output, max_output_size tf.Output, iou_threshold tf.Output, score_threshold tf.Output, soft_nms_sigma tf.Output, optional ...NonMaxSuppressionV5Attr) (selected_indices tf.Output, selected_scores tf.Output, valid_outputs tf.Output) {
27942	if scope.Err() != nil {
27943		return
27944	}
27945	attrs := map[string]interface{}{}
27946	for _, a := range optional {
27947		a(attrs)
27948	}
27949	opspec := tf.OpSpec{
27950		Type: "NonMaxSuppressionV5",
27951		Input: []tf.Input{
27952			boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma,
27953		},
27954		Attrs: attrs,
27955	}
27956	op := scope.AddOperation(opspec)
27957	return op.Output(0), op.Output(1), op.Output(2)
27958}
27959
27960// Greedily selects a subset of bounding boxes in descending order of score,
27961//
27962// pruning away boxes that have high overlaps
27963// with previously selected boxes.  Bounding boxes with score less than
27964// `score_threshold` are removed. N-by-n overlap values are supplied as square matrix,
27965// which allows for defining a custom overlap criterium (eg. intersection over union,
27966// intersection over area, etc.).
27967//
27968// The output of this operation is a set of integers indexing into the input
27969// collection of bounding boxes representing the selected boxes.  The bounding
27970// box coordinates corresponding to the selected indices can then be obtained
27971// using the `tf.gather operation`.  For example:
27972//
27973//	selected_indices = tf.image.non_max_suppression_with_overlaps(
27974//	    overlaps, scores, max_output_size, overlap_threshold, score_threshold)
27975//	selected_boxes = tf.gather(boxes, selected_indices)
27976//
27977// Arguments:
27978//
27979//	overlaps: A 2-D float tensor of shape `[num_boxes, num_boxes]` representing
27980//
27981// the n-by-n box overlap values.
27982//
27983//	scores: A 1-D float tensor of shape `[num_boxes]` representing a single
27984//
27985// score corresponding to each box (each row of boxes).
27986//
27987//	max_output_size: A scalar integer tensor representing the maximum number of
27988//
27989// boxes to be selected by non max suppression.
27990//
27991//	overlap_threshold: A 0-D float tensor representing the threshold for deciding whether
27992//
27993// boxes overlap too.
27994//
27995//	score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
27996//
27997// boxes based on score.
27998//
27999// Returns A 1-D integer tensor of shape `[M]` representing the selected
28000// indices from the boxes tensor, where `M <= max_output_size`.
28001func NonMaxSuppressionWithOverlaps(scope *Scope, overlaps tf.Output, scores tf.Output, max_output_size tf.Output, overlap_threshold tf.Output, score_threshold tf.Output) (selected_indices tf.Output) {
28002	if scope.Err() != nil {
28003		return
28004	}
28005	opspec := tf.OpSpec{
28006		Type: "NonMaxSuppressionWithOverlaps",
28007		Input: []tf.Input{
28008			overlaps, scores, max_output_size, overlap_threshold, score_threshold,
28009		},
28010	}
28011	op := scope.AddOperation(opspec)
28012	return op.Output(0)
28013}
28014
28015// NotEqualAttr is an optional argument to NotEqual.
28016type NotEqualAttr func(optionalAttr)
28017
28018// NotEqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value.
28019// If not specified, defaults to true
28020func NotEqualIncompatibleShapeError(value bool) NotEqualAttr {
28021	return func(m optionalAttr) {
28022		m["incompatible_shape_error"] = value
28023	}
28024}
28025
28026// Returns the truth value of (x != y) element-wise.
28027//
28028// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting
28029// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
28030func NotEqual(scope *Scope, x tf.Output, y tf.Output, optional ...NotEqualAttr) (z tf.Output) {
28031	if scope.Err() != nil {
28032		return
28033	}
28034	attrs := map[string]interface{}{}
28035	for _, a := range optional {
28036		a(attrs)
28037	}
28038	opspec := tf.OpSpec{
28039		Type: "NotEqual",
28040		Input: []tf.Input{
28041			x, y,
28042		},
28043		Attrs: attrs,
28044	}
28045	op := scope.AddOperation(opspec)
28046	return op.Output(0)
28047}
28048
28049// NthElementAttr is an optional argument to NthElement.
28050type NthElementAttr func(optionalAttr)
28051
28052// NthElementReverse sets the optional reverse attribute to value.
28053//
28054// value: When set to True, find the nth-largest value in the vector and vice
28055// versa.
28056// If not specified, defaults to false
28057func NthElementReverse(value bool) NthElementAttr {
28058	return func(m optionalAttr) {
28059		m["reverse"] = value
28060	}
28061}
28062
28063// Finds values of the `n`-th order statistic for the last dimension.
28064//
28065// If the input is a vector (rank-1), finds the entries which is the nth-smallest
28066// value in the vector and outputs their values as scalar tensor.
28067//
28068// For matrices (resp. higher rank input), computes the entries which is the
28069// nth-smallest value in each row (resp. vector along the last dimension). Thus,
28070//
28071//	values.shape = input.shape[:-1]
28072//
28073// Arguments:
28074//
28075//	input: 1-D or higher with last dimension at least `n+1`.
28076//	n: 0-D. Position of sorted vector to select along the last dimension (along
28077//
28078// each row for matrices). Valid range of n is `[0, input.shape[:-1])`
28079//
28080// Returns The `n`-th order statistic along each last dimensional slice.
28081func NthElement(scope *Scope, input tf.Output, n tf.Output, optional ...NthElementAttr) (values tf.Output) {
28082	if scope.Err() != nil {
28083		return
28084	}
28085	attrs := map[string]interface{}{}
28086	for _, a := range optional {
28087		a(attrs)
28088	}
28089	opspec := tf.OpSpec{
28090		Type: "NthElement",
28091		Input: []tf.Input{
28092			input, n,
28093		},
28094		Attrs: attrs,
28095	}
28096	op := scope.AddOperation(opspec)
28097	return op.Output(0)
28098}
28099
28100// OneHotAttr is an optional argument to OneHot.
28101type OneHotAttr func(optionalAttr)
28102
28103// OneHotAxis sets the optional axis attribute to value.
28104//
28105// value: The axis to fill (default: -1, a new inner-most axis).
28106// If not specified, defaults to -1
28107func OneHotAxis(value int64) OneHotAttr {
28108	return func(m optionalAttr) {
28109		m["axis"] = value
28110	}
28111}
28112
28113// Returns a one-hot tensor.
28114//
28115// The locations represented by indices in `indices` take value `on_value`,
28116// while all other locations take value `off_value`.
28117//
28118// If the input `indices` is rank `N`, the output will have rank `N+1`,
28119// The new axis is created at dimension `axis` (default: the new axis is
28120// appended at the end).
28121//
28122// If `indices` is a scalar the output shape will be a vector of length `depth`.
28123//
28124// If `indices` is a vector of length `features`, the output shape will be:
28125// ```
28126//
28127//	features x depth if axis == -1
28128//	depth x features if axis == 0
28129//
28130// ```
28131//
28132// If `indices` is a matrix (batch) with shape `[batch, features]`,
28133// the output shape will be:
28134// ```
28135//
28136//	batch x features x depth if axis == -1
28137//	batch x depth x features if axis == 1
28138//	depth x batch x features if axis == 0
28139//
28140// ```
28141//
28142// Examples
28143// =========
28144//
28145// Suppose that
28146// ```
28147//
28148//	indices = [0, 2, -1, 1]
28149//	depth = 3
28150//	on_value = 5.0
28151//	off_value = 0.0
28152//	axis = -1
28153//
28154// ```
28155//
28156// Then output is `[4 x 3]`:
28157// ```
28158// output =
28159//
28160//	[5.0 0.0 0.0]  // one_hot(0)
28161//	[0.0 0.0 5.0]  // one_hot(2)
28162//	[0.0 0.0 0.0]  // one_hot(-1)
28163//	[0.0 5.0 0.0]  // one_hot(1)
28164//
28165// ```
28166//
28167// Suppose that
28168// ```
28169//
28170//	indices = [0, 2, -1, 1]
28171//	depth = 3
28172//	on_value = 0.0
28173//	off_value = 3.0
28174//	axis = 0
28175//
28176// ```
28177//
28178// Then output is `[3 x 4]`:
28179// ```
28180// output =
28181//
28182//	[0.0 3.0 3.0 3.0]
28183//	[3.0 3.0 3.0 0.0]
28184//	[3.0 3.0 3.0 3.0]
28185//	[3.0 0.0 3.0 3.0]
28186//
28187// //  ^                one_hot(0)
28188// //      ^            one_hot(2)
28189// //          ^        one_hot(-1)
28190// //              ^    one_hot(1)
28191// ```
28192//
28193// Suppose that
28194// ```
28195//
28196//	indices = [[0, 2], [1, -1]]
28197//	depth = 3
28198//	on_value = 1.0
28199//	off_value = 0.0
28200//	axis = -1
28201//
28202// ```
28203//
28204// Then output is `[2 x 2 x 3]`:
28205// ```
28206// output =
28207//
28208//	[
28209//	  [1.0, 0.0, 0.0]  // one_hot(0)
28210//	  [0.0, 0.0, 1.0]  // one_hot(2)
28211//	][
28212//	  [0.0, 1.0, 0.0]  // one_hot(1)
28213//	  [0.0, 0.0, 0.0]  // one_hot(-1)
28214//	]
28215//
28216// ```
28217//
28218// Arguments:
28219//
28220//	indices: A tensor of indices.
28221//	depth: A scalar defining the depth of the one hot dimension.
28222//	on_value: A scalar defining the value to fill in output when `indices[j] = i`.
28223//	off_value: A scalar defining the value to fill in output when `indices[j] != i`.
28224//
28225// Returns The one-hot tensor.
28226func OneHot(scope *Scope, indices tf.Output, depth tf.Output, on_value tf.Output, off_value tf.Output, optional ...OneHotAttr) (output tf.Output) {
28227	if scope.Err() != nil {
28228		return
28229	}
28230	attrs := map[string]interface{}{}
28231	for _, a := range optional {
28232		a(attrs)
28233	}
28234	opspec := tf.OpSpec{
28235		Type: "OneHot",
28236		Input: []tf.Input{
28237			indices, depth, on_value, off_value,
28238		},
28239		Attrs: attrs,
28240	}
28241	op := scope.AddOperation(opspec)
28242	return op.Output(0)
28243}
28244
28245// Returns a tensor of ones with the same shape and type as x.
28246//
28247// Arguments:
28248//
28249//	x: a tensor of type T.
28250//
28251// Returns a tensor of the same shape and type as x but filled with ones.
28252func OnesLike(scope *Scope, x tf.Output) (y tf.Output) {
28253	if scope.Err() != nil {
28254		return
28255	}
28256	opspec := tf.OpSpec{
28257		Type: "OnesLike",
28258		Input: []tf.Input{
28259			x,
28260		},
28261	}
28262	op := scope.AddOperation(opspec)
28263	return op.Output(0)
28264}
28265
28266// OptimizeDatasetAttr is an optional argument to OptimizeDataset.
28267type OptimizeDatasetAttr func(optionalAttr)
28268
28269// OptimizeDatasetOptimizationConfigs sets the optional optimization_configs attribute to value.
28270// If not specified, defaults to {}
28271func OptimizeDatasetOptimizationConfigs(value []string) OptimizeDatasetAttr {
28272	return func(m optionalAttr) {
28273		m["optimization_configs"] = value
28274	}
28275}
28276
28277// Creates a dataset by applying optimizations to `input_dataset`.
28278//
28279// Creates a dataset by applying optimizations to `input_dataset`.
28280//
28281// Arguments:
28282//
28283//	input_dataset: A variant tensor representing the input dataset.
28284//	optimizations: A `tf.string` vector `tf.Tensor` identifying optimizations to use.
28285func OptimizeDataset(scope *Scope, input_dataset tf.Output, optimizations tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptimizeDatasetAttr) (handle tf.Output) {
28286	if scope.Err() != nil {
28287		return
28288	}
28289	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28290	for _, a := range optional {
28291		a(attrs)
28292	}
28293	opspec := tf.OpSpec{
28294		Type: "OptimizeDataset",
28295		Input: []tf.Input{
28296			input_dataset, optimizations,
28297		},
28298		Attrs: attrs,
28299	}
28300	op := scope.AddOperation(opspec)
28301	return op.Output(0)
28302}
28303
28304// OptimizeDatasetV2Attr is an optional argument to OptimizeDatasetV2.
28305type OptimizeDatasetV2Attr func(optionalAttr)
28306
28307// OptimizeDatasetV2OptimizationConfigs sets the optional optimization_configs attribute to value.
28308// If not specified, defaults to {}
28309func OptimizeDatasetV2OptimizationConfigs(value []string) OptimizeDatasetV2Attr {
28310	return func(m optionalAttr) {
28311		m["optimization_configs"] = value
28312	}
28313}
28314
28315// Creates a dataset by applying related optimizations to `input_dataset`.
28316//
28317// Creates a dataset by applying related optimizations to `input_dataset`.
28318//
28319// Arguments:
28320//
28321//	input_dataset: A variant tensor representing the input dataset.
28322//	optimizations_enabled: A `tf.string` vector `tf.Tensor` identifying user enabled optimizations.
28323//	optimizations_disabled: A `tf.string` vector `tf.Tensor` identifying user disabled optimizations.
28324//	optimizations_default: A `tf.string` vector `tf.Tensor` identifying optimizations by default.
28325func OptimizeDatasetV2(scope *Scope, input_dataset tf.Output, optimizations_enabled tf.Output, optimizations_disabled tf.Output, optimizations_default tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptimizeDatasetV2Attr) (handle tf.Output) {
28326	if scope.Err() != nil {
28327		return
28328	}
28329	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28330	for _, a := range optional {
28331		a(attrs)
28332	}
28333	opspec := tf.OpSpec{
28334		Type: "OptimizeDatasetV2",
28335		Input: []tf.Input{
28336			input_dataset, optimizations_enabled, optimizations_disabled, optimizations_default,
28337		},
28338		Attrs: attrs,
28339	}
28340	op := scope.AddOperation(opspec)
28341	return op.Output(0)
28342}
28343
28344// Constructs an Optional variant from a tuple of tensors.
28345func OptionalFromValue(scope *Scope, components []tf.Output) (optional tf.Output) {
28346	if scope.Err() != nil {
28347		return
28348	}
28349	opspec := tf.OpSpec{
28350		Type: "OptionalFromValue",
28351		Input: []tf.Input{
28352			tf.OutputList(components),
28353		},
28354	}
28355	op := scope.AddOperation(opspec)
28356	return op.Output(0)
28357}
28358
28359// Returns the value stored in an Optional variant or raises an error if none exists.
28360func OptionalGetValue(scope *Scope, optional tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
28361	if scope.Err() != nil {
28362		return
28363	}
28364	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
28365	opspec := tf.OpSpec{
28366		Type: "OptionalGetValue",
28367		Input: []tf.Input{
28368			optional,
28369		},
28370		Attrs: attrs,
28371	}
28372	op := scope.AddOperation(opspec)
28373	if scope.Err() != nil {
28374		return
28375	}
28376	var idx int
28377	var err error
28378	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
28379		scope.UpdateErr("OptionalGetValue", err)
28380		return
28381	}
28382	return components
28383}
28384
28385// Returns true if and only if the given Optional variant has a value.
28386func OptionalHasValue(scope *Scope, optional tf.Output) (has_value tf.Output) {
28387	if scope.Err() != nil {
28388		return
28389	}
28390	opspec := tf.OpSpec{
28391		Type: "OptionalHasValue",
28392		Input: []tf.Input{
28393			optional,
28394		},
28395	}
28396	op := scope.AddOperation(opspec)
28397	return op.Output(0)
28398}
28399
28400// Creates an Optional variant with no value.
28401func OptionalNone(scope *Scope) (optional tf.Output) {
28402	if scope.Err() != nil {
28403		return
28404	}
28405	opspec := tf.OpSpec{
28406		Type: "OptionalNone",
28407	}
28408	op := scope.AddOperation(opspec)
28409	return op.Output(0)
28410}
28411
28412// OptionsDatasetAttr is an optional argument to OptionsDataset.
28413type OptionsDatasetAttr func(optionalAttr)
28414
28415// OptionsDatasetMetadata sets the optional metadata attribute to value.
28416// If not specified, defaults to ""
28417func OptionsDatasetMetadata(value string) OptionsDatasetAttr {
28418	return func(m optionalAttr) {
28419		m["metadata"] = value
28420	}
28421}
28422
28423// Creates a dataset by attaching tf.data.Options to `input_dataset`.
28424//
28425// Arguments:
28426//
28427//	input_dataset: A variant tensor representing the input dataset.
28428//	serialized_options: A `tf.string` scalar `tf.Tensor` of serialized `tf.data.Options` protocol buffer.
28429func OptionsDataset(scope *Scope, input_dataset tf.Output, serialized_options string, output_types []tf.DataType, output_shapes []tf.Shape, optional ...OptionsDatasetAttr) (handle tf.Output) {
28430	if scope.Err() != nil {
28431		return
28432	}
28433	attrs := map[string]interface{}{"serialized_options": serialized_options, "output_types": output_types, "output_shapes": output_shapes}
28434	for _, a := range optional {
28435		a(attrs)
28436	}
28437	opspec := tf.OpSpec{
28438		Type: "OptionsDataset",
28439		Input: []tf.Input{
28440			input_dataset,
28441		},
28442		Attrs: attrs,
28443	}
28444	op := scope.AddOperation(opspec)
28445	return op.Output(0)
28446}
28447
28448// OrderedMapClearAttr is an optional argument to OrderedMapClear.
28449type OrderedMapClearAttr func(optionalAttr)
28450
28451// OrderedMapClearCapacity sets the optional capacity attribute to value.
28452// If not specified, defaults to 0
28453//
28454// REQUIRES: value >= 0
28455func OrderedMapClearCapacity(value int64) OrderedMapClearAttr {
28456	return func(m optionalAttr) {
28457		m["capacity"] = value
28458	}
28459}
28460
28461// OrderedMapClearMemoryLimit sets the optional memory_limit attribute to value.
28462// If not specified, defaults to 0
28463//
28464// REQUIRES: value >= 0
28465func OrderedMapClearMemoryLimit(value int64) OrderedMapClearAttr {
28466	return func(m optionalAttr) {
28467		m["memory_limit"] = value
28468	}
28469}
28470
28471// OrderedMapClearContainer sets the optional container attribute to value.
28472// If not specified, defaults to ""
28473func OrderedMapClearContainer(value string) OrderedMapClearAttr {
28474	return func(m optionalAttr) {
28475		m["container"] = value
28476	}
28477}
28478
28479// OrderedMapClearSharedName sets the optional shared_name attribute to value.
28480// If not specified, defaults to ""
28481func OrderedMapClearSharedName(value string) OrderedMapClearAttr {
28482	return func(m optionalAttr) {
28483		m["shared_name"] = value
28484	}
28485}
28486
28487// Op removes all elements in the underlying container.
28488//
28489// Returns the created operation.
28490func OrderedMapClear(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapClearAttr) (o *tf.Operation) {
28491	if scope.Err() != nil {
28492		return
28493	}
28494	attrs := map[string]interface{}{"dtypes": dtypes}
28495	for _, a := range optional {
28496		a(attrs)
28497	}
28498	opspec := tf.OpSpec{
28499		Type: "OrderedMapClear",
28500
28501		Attrs: attrs,
28502	}
28503	return scope.AddOperation(opspec)
28504}
28505
28506// OrderedMapIncompleteSizeAttr is an optional argument to OrderedMapIncompleteSize.
28507type OrderedMapIncompleteSizeAttr func(optionalAttr)
28508
28509// OrderedMapIncompleteSizeCapacity sets the optional capacity attribute to value.
28510// If not specified, defaults to 0
28511//
28512// REQUIRES: value >= 0
28513func OrderedMapIncompleteSizeCapacity(value int64) OrderedMapIncompleteSizeAttr {
28514	return func(m optionalAttr) {
28515		m["capacity"] = value
28516	}
28517}
28518
28519// OrderedMapIncompleteSizeMemoryLimit sets the optional memory_limit attribute to value.
28520// If not specified, defaults to 0
28521//
28522// REQUIRES: value >= 0
28523func OrderedMapIncompleteSizeMemoryLimit(value int64) OrderedMapIncompleteSizeAttr {
28524	return func(m optionalAttr) {
28525		m["memory_limit"] = value
28526	}
28527}
28528
28529// OrderedMapIncompleteSizeContainer sets the optional container attribute to value.
28530// If not specified, defaults to ""
28531func OrderedMapIncompleteSizeContainer(value string) OrderedMapIncompleteSizeAttr {
28532	return func(m optionalAttr) {
28533		m["container"] = value
28534	}
28535}
28536
28537// OrderedMapIncompleteSizeSharedName sets the optional shared_name attribute to value.
28538// If not specified, defaults to ""
28539func OrderedMapIncompleteSizeSharedName(value string) OrderedMapIncompleteSizeAttr {
28540	return func(m optionalAttr) {
28541		m["shared_name"] = value
28542	}
28543}
28544
28545// Op returns the number of incomplete elements in the underlying container.
28546func OrderedMapIncompleteSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapIncompleteSizeAttr) (size tf.Output) {
28547	if scope.Err() != nil {
28548		return
28549	}
28550	attrs := map[string]interface{}{"dtypes": dtypes}
28551	for _, a := range optional {
28552		a(attrs)
28553	}
28554	opspec := tf.OpSpec{
28555		Type: "OrderedMapIncompleteSize",
28556
28557		Attrs: attrs,
28558	}
28559	op := scope.AddOperation(opspec)
28560	return op.Output(0)
28561}
28562
28563// OrderedMapPeekAttr is an optional argument to OrderedMapPeek.
28564type OrderedMapPeekAttr func(optionalAttr)
28565
28566// OrderedMapPeekCapacity sets the optional capacity attribute to value.
28567// If not specified, defaults to 0
28568//
28569// REQUIRES: value >= 0
28570func OrderedMapPeekCapacity(value int64) OrderedMapPeekAttr {
28571	return func(m optionalAttr) {
28572		m["capacity"] = value
28573	}
28574}
28575
28576// OrderedMapPeekMemoryLimit sets the optional memory_limit attribute to value.
28577// If not specified, defaults to 0
28578//
28579// REQUIRES: value >= 0
28580func OrderedMapPeekMemoryLimit(value int64) OrderedMapPeekAttr {
28581	return func(m optionalAttr) {
28582		m["memory_limit"] = value
28583	}
28584}
28585
28586// OrderedMapPeekContainer sets the optional container attribute to value.
28587// If not specified, defaults to ""
28588func OrderedMapPeekContainer(value string) OrderedMapPeekAttr {
28589	return func(m optionalAttr) {
28590		m["container"] = value
28591	}
28592}
28593
28594// OrderedMapPeekSharedName sets the optional shared_name attribute to value.
28595// If not specified, defaults to ""
28596func OrderedMapPeekSharedName(value string) OrderedMapPeekAttr {
28597	return func(m optionalAttr) {
28598		m["shared_name"] = value
28599	}
28600}
28601
28602// Op peeks at the values at the specified key.  If the
28603//
28604// underlying container does not contain this key
28605// this op will block until it does.   This Op is optimized for
28606// performance.
28607func OrderedMapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapPeekAttr) (values []tf.Output) {
28608	if scope.Err() != nil {
28609		return
28610	}
28611	attrs := map[string]interface{}{"dtypes": dtypes}
28612	for _, a := range optional {
28613		a(attrs)
28614	}
28615	opspec := tf.OpSpec{
28616		Type: "OrderedMapPeek",
28617		Input: []tf.Input{
28618			key, indices,
28619		},
28620		Attrs: attrs,
28621	}
28622	op := scope.AddOperation(opspec)
28623	if scope.Err() != nil {
28624		return
28625	}
28626	var idx int
28627	var err error
28628	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
28629		scope.UpdateErr("OrderedMapPeek", err)
28630		return
28631	}
28632	return values
28633}
28634
28635// OrderedMapSizeAttr is an optional argument to OrderedMapSize.
28636type OrderedMapSizeAttr func(optionalAttr)
28637
28638// OrderedMapSizeCapacity sets the optional capacity attribute to value.
28639// If not specified, defaults to 0
28640//
28641// REQUIRES: value >= 0
28642func OrderedMapSizeCapacity(value int64) OrderedMapSizeAttr {
28643	return func(m optionalAttr) {
28644		m["capacity"] = value
28645	}
28646}
28647
28648// OrderedMapSizeMemoryLimit sets the optional memory_limit attribute to value.
28649// If not specified, defaults to 0
28650//
28651// REQUIRES: value >= 0
28652func OrderedMapSizeMemoryLimit(value int64) OrderedMapSizeAttr {
28653	return func(m optionalAttr) {
28654		m["memory_limit"] = value
28655	}
28656}
28657
28658// OrderedMapSizeContainer sets the optional container attribute to value.
28659// If not specified, defaults to ""
28660func OrderedMapSizeContainer(value string) OrderedMapSizeAttr {
28661	return func(m optionalAttr) {
28662		m["container"] = value
28663	}
28664}
28665
28666// OrderedMapSizeSharedName sets the optional shared_name attribute to value.
28667// If not specified, defaults to ""
28668func OrderedMapSizeSharedName(value string) OrderedMapSizeAttr {
28669	return func(m optionalAttr) {
28670		m["shared_name"] = value
28671	}
28672}
28673
28674// Op returns the number of elements in the underlying container.
28675func OrderedMapSize(scope *Scope, dtypes []tf.DataType, optional ...OrderedMapSizeAttr) (size tf.Output) {
28676	if scope.Err() != nil {
28677		return
28678	}
28679	attrs := map[string]interface{}{"dtypes": dtypes}
28680	for _, a := range optional {
28681		a(attrs)
28682	}
28683	opspec := tf.OpSpec{
28684		Type: "OrderedMapSize",
28685
28686		Attrs: attrs,
28687	}
28688	op := scope.AddOperation(opspec)
28689	return op.Output(0)
28690}
28691
28692// OrderedMapStageAttr is an optional argument to OrderedMapStage.
28693type OrderedMapStageAttr func(optionalAttr)
28694
28695// OrderedMapStageCapacity sets the optional capacity attribute to value.
28696//
28697// value: Maximum number of elements in the Staging Area. If > 0, inserts
28698// on the container will block when the capacity is reached.
28699// If not specified, defaults to 0
28700//
28701// REQUIRES: value >= 0
28702func OrderedMapStageCapacity(value int64) OrderedMapStageAttr {
28703	return func(m optionalAttr) {
28704		m["capacity"] = value
28705	}
28706}
28707
28708// OrderedMapStageMemoryLimit sets the optional memory_limit attribute to value.
28709// If not specified, defaults to 0
28710//
28711// REQUIRES: value >= 0
28712func OrderedMapStageMemoryLimit(value int64) OrderedMapStageAttr {
28713	return func(m optionalAttr) {
28714		m["memory_limit"] = value
28715	}
28716}
28717
28718// OrderedMapStageContainer sets the optional container attribute to value.
28719//
28720// value: If non-empty, this queue is placed in the given container. Otherwise,
28721// a default container is used.
28722// If not specified, defaults to ""
28723func OrderedMapStageContainer(value string) OrderedMapStageAttr {
28724	return func(m optionalAttr) {
28725		m["container"] = value
28726	}
28727}
28728
28729// OrderedMapStageSharedName sets the optional shared_name attribute to value.
28730//
28731// value: It is necessary to match this name to the matching Unstage Op.
28732// If not specified, defaults to ""
28733func OrderedMapStageSharedName(value string) OrderedMapStageAttr {
28734	return func(m optionalAttr) {
28735		m["shared_name"] = value
28736	}
28737}
28738
28739// Stage (key, values) in the underlying container which behaves like a ordered
28740//
28741// associative container.   Elements are ordered by key.
28742//
28743// Arguments:
28744//
28745//	key: int64
28746//
28747//	values: a list of tensors
28748//
28749// dtypes A list of data types that inserted values should adhere to.
28750//
28751// Returns the created operation.
28752func OrderedMapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...OrderedMapStageAttr) (o *tf.Operation) {
28753	if scope.Err() != nil {
28754		return
28755	}
28756	attrs := map[string]interface{}{"dtypes": dtypes}
28757	for _, a := range optional {
28758		a(attrs)
28759	}
28760	opspec := tf.OpSpec{
28761		Type: "OrderedMapStage",
28762		Input: []tf.Input{
28763			key, indices, tf.OutputList(values),
28764		},
28765		Attrs: attrs,
28766	}
28767	return scope.AddOperation(opspec)
28768}
28769
28770// OrderedMapUnstageAttr is an optional argument to OrderedMapUnstage.
28771type OrderedMapUnstageAttr func(optionalAttr)
28772
28773// OrderedMapUnstageCapacity sets the optional capacity attribute to value.
28774// If not specified, defaults to 0
28775//
28776// REQUIRES: value >= 0
28777func OrderedMapUnstageCapacity(value int64) OrderedMapUnstageAttr {
28778	return func(m optionalAttr) {
28779		m["capacity"] = value
28780	}
28781}
28782
28783// OrderedMapUnstageMemoryLimit sets the optional memory_limit attribute to value.
28784// If not specified, defaults to 0
28785//
28786// REQUIRES: value >= 0
28787func OrderedMapUnstageMemoryLimit(value int64) OrderedMapUnstageAttr {
28788	return func(m optionalAttr) {
28789		m["memory_limit"] = value
28790	}
28791}
28792
28793// OrderedMapUnstageContainer sets the optional container attribute to value.
28794// If not specified, defaults to ""
28795func OrderedMapUnstageContainer(value string) OrderedMapUnstageAttr {
28796	return func(m optionalAttr) {
28797		m["container"] = value
28798	}
28799}
28800
28801// OrderedMapUnstageSharedName sets the optional shared_name attribute to value.
28802// If not specified, defaults to ""
28803func OrderedMapUnstageSharedName(value string) OrderedMapUnstageAttr {
28804	return func(m optionalAttr) {
28805		m["shared_name"] = value
28806	}
28807}
28808
28809// Op removes and returns the values associated with the key
28810//
28811// from the underlying container.   If the underlying container
28812// does not contain this key, the op will block until it does.
28813func OrderedMapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageAttr) (values []tf.Output) {
28814	if scope.Err() != nil {
28815		return
28816	}
28817	attrs := map[string]interface{}{"dtypes": dtypes}
28818	for _, a := range optional {
28819		a(attrs)
28820	}
28821	opspec := tf.OpSpec{
28822		Type: "OrderedMapUnstage",
28823		Input: []tf.Input{
28824			key, indices,
28825		},
28826		Attrs: attrs,
28827	}
28828	op := scope.AddOperation(opspec)
28829	if scope.Err() != nil {
28830		return
28831	}
28832	var idx int
28833	var err error
28834	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
28835		scope.UpdateErr("OrderedMapUnstage", err)
28836		return
28837	}
28838	return values
28839}
28840
28841// OrderedMapUnstageNoKeyAttr is an optional argument to OrderedMapUnstageNoKey.
28842type OrderedMapUnstageNoKeyAttr func(optionalAttr)
28843
28844// OrderedMapUnstageNoKeyCapacity sets the optional capacity attribute to value.
28845// If not specified, defaults to 0
28846//
28847// REQUIRES: value >= 0
28848func OrderedMapUnstageNoKeyCapacity(value int64) OrderedMapUnstageNoKeyAttr {
28849	return func(m optionalAttr) {
28850		m["capacity"] = value
28851	}
28852}
28853
28854// OrderedMapUnstageNoKeyMemoryLimit sets the optional memory_limit attribute to value.
28855// If not specified, defaults to 0
28856//
28857// REQUIRES: value >= 0
28858func OrderedMapUnstageNoKeyMemoryLimit(value int64) OrderedMapUnstageNoKeyAttr {
28859	return func(m optionalAttr) {
28860		m["memory_limit"] = value
28861	}
28862}
28863
28864// OrderedMapUnstageNoKeyContainer sets the optional container attribute to value.
28865// If not specified, defaults to ""
28866func OrderedMapUnstageNoKeyContainer(value string) OrderedMapUnstageNoKeyAttr {
28867	return func(m optionalAttr) {
28868		m["container"] = value
28869	}
28870}
28871
28872// OrderedMapUnstageNoKeySharedName sets the optional shared_name attribute to value.
28873// If not specified, defaults to ""
28874func OrderedMapUnstageNoKeySharedName(value string) OrderedMapUnstageNoKeyAttr {
28875	return func(m optionalAttr) {
28876		m["shared_name"] = value
28877	}
28878}
28879
28880// Op removes and returns the (key, value) element with the smallest
28881//
28882// key from the underlying container.   If the underlying container
28883// does not contain elements, the op will block until it does.
28884func OrderedMapUnstageNoKey(scope *Scope, indices tf.Output, dtypes []tf.DataType, optional ...OrderedMapUnstageNoKeyAttr) (key tf.Output, values []tf.Output) {
28885	if scope.Err() != nil {
28886		return
28887	}
28888	attrs := map[string]interface{}{"dtypes": dtypes}
28889	for _, a := range optional {
28890		a(attrs)
28891	}
28892	opspec := tf.OpSpec{
28893		Type: "OrderedMapUnstageNoKey",
28894		Input: []tf.Input{
28895			indices,
28896		},
28897		Attrs: attrs,
28898	}
28899	op := scope.AddOperation(opspec)
28900	if scope.Err() != nil {
28901		return
28902	}
28903	var idx int
28904	var err error
28905	key = op.Output(idx)
28906	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
28907		scope.UpdateErr("OrderedMapUnstageNoKey", err)
28908		return
28909	}
28910	return key, values
28911}
28912
28913// OutfeedDequeueAttr is an optional argument to OutfeedDequeue.
28914type OutfeedDequeueAttr func(optionalAttr)
28915
28916// OutfeedDequeueDeviceOrdinal sets the optional device_ordinal attribute to value.
28917//
28918// value: The TPU device to use. This should be -1 when the Op
28919// is running on a TPU device, and >= 0 when the Op is running on the CPU
28920// device.
28921// If not specified, defaults to -1
28922func OutfeedDequeueDeviceOrdinal(value int64) OutfeedDequeueAttr {
28923	return func(m optionalAttr) {
28924		m["device_ordinal"] = value
28925	}
28926}
28927
28928// Retrieves a single tensor from the computation outfeed.
28929//
28930// This operation will block indefinitely until data is available.
28931//
28932// Arguments:
28933//
28934//	dtype: The type of elements in the tensor.
28935//	shape: The shape of the tensor.
28936//
28937// Returns A tensor that will be read from the device outfeed.
28938func OutfeedDequeue(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...OutfeedDequeueAttr) (output tf.Output) {
28939	if scope.Err() != nil {
28940		return
28941	}
28942	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
28943	for _, a := range optional {
28944		a(attrs)
28945	}
28946	opspec := tf.OpSpec{
28947		Type: "OutfeedDequeue",
28948
28949		Attrs: attrs,
28950	}
28951	op := scope.AddOperation(opspec)
28952	return op.Output(0)
28953}
28954
28955// OutfeedDequeueTupleAttr is an optional argument to OutfeedDequeueTuple.
28956type OutfeedDequeueTupleAttr func(optionalAttr)
28957
28958// OutfeedDequeueTupleDeviceOrdinal sets the optional device_ordinal attribute to value.
28959//
28960// value: The TPU device to use. This should be -1 when the Op
28961// is running on a TPU device, and >= 0 when the Op is running on the CPU
28962// device.
28963// If not specified, defaults to -1
28964func OutfeedDequeueTupleDeviceOrdinal(value int64) OutfeedDequeueTupleAttr {
28965	return func(m optionalAttr) {
28966		m["device_ordinal"] = value
28967	}
28968}
28969
28970// Retrieve multiple values from the computation outfeed.
28971//
28972// This operation will block indefinitely until data is available. Output `i`
28973// corresponds to XLA tuple element `i`.
28974//
28975// Arguments:
28976//
28977//	dtypes: The element types of each element in `outputs`.
28978//	shapes: The shapes of each tensor in `outputs`.
28979//
28980// Returns A list of tensors that will be read from the outfeed.
28981func OutfeedDequeueTuple(scope *Scope, dtypes []tf.DataType, shapes []tf.Shape, optional ...OutfeedDequeueTupleAttr) (outputs []tf.Output) {
28982	if scope.Err() != nil {
28983		return
28984	}
28985	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
28986	for _, a := range optional {
28987		a(attrs)
28988	}
28989	opspec := tf.OpSpec{
28990		Type: "OutfeedDequeueTuple",
28991
28992		Attrs: attrs,
28993	}
28994	op := scope.AddOperation(opspec)
28995	if scope.Err() != nil {
28996		return
28997	}
28998	var idx int
28999	var err error
29000	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
29001		scope.UpdateErr("OutfeedDequeueTuple", err)
29002		return
29003	}
29004	return outputs
29005}
29006
29007// Retrieve multiple values from the computation outfeed. Device ordinal is a
29008// tensor allowing dynamic outfeed.
29009//
29010// This operation will block indefinitely until data is available. Output `i`
29011// corresponds to XLA tuple element `i`.
29012//
29013// Arguments:
29014//
29015//	device_ordinal: An int scalar tensor, representing the TPU device to use. This should be -1 when
29016//
29017// the Op is running on a TPU device, and >= 0 when the Op is running on the CPU
29018// device.
29019//
29020//	dtypes: The element types of each element in `outputs`.
29021//	shapes: The shapes of each tensor in `outputs`.
29022//
29023// Returns A list of tensors that will be read from the outfeed.
29024func OutfeedDequeueTupleV2(scope *Scope, device_ordinal tf.Output, dtypes []tf.DataType, shapes []tf.Shape) (outputs []tf.Output) {
29025	if scope.Err() != nil {
29026		return
29027	}
29028	attrs := map[string]interface{}{"dtypes": dtypes, "shapes": shapes}
29029	opspec := tf.OpSpec{
29030		Type: "OutfeedDequeueTupleV2",
29031		Input: []tf.Input{
29032			device_ordinal,
29033		},
29034		Attrs: attrs,
29035	}
29036	op := scope.AddOperation(opspec)
29037	if scope.Err() != nil {
29038		return
29039	}
29040	var idx int
29041	var err error
29042	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
29043		scope.UpdateErr("OutfeedDequeueTupleV2", err)
29044		return
29045	}
29046	return outputs
29047}
29048
29049// Retrieves a single tensor from the computation outfeed. Device ordinal is a
29050// tensor allowing dynamic outfeed.
29051//
29052// This operation will block indefinitely until data is available.
29053//
29054// Arguments:
29055//
29056//	device_ordinal: An int scalar tensor, representing the TPU device to use. This should be -1 when
29057//
29058// the Op is running on a TPU device, and >= 0 when the Op is running on the CPU
29059// device.
29060//
29061//	dtype: The type of elements in the tensor.
29062//	shape: The shape of the tensor.
29063//
29064// Returns A tensor that will be read from the device outfeed.
29065func OutfeedDequeueV2(scope *Scope, device_ordinal tf.Output, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
29066	if scope.Err() != nil {
29067		return
29068	}
29069	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
29070	opspec := tf.OpSpec{
29071		Type: "OutfeedDequeueV2",
29072		Input: []tf.Input{
29073			device_ordinal,
29074		},
29075		Attrs: attrs,
29076	}
29077	op := scope.AddOperation(opspec)
29078	return op.Output(0)
29079}
29080
29081// Enqueue a Tensor on the computation outfeed.
29082//
29083// Arguments:
29084//
29085//	input: A tensor that will be inserted into the outfeed queue.
29086//
29087// Returns the created operation.
29088func OutfeedEnqueue(scope *Scope, input tf.Output) (o *tf.Operation) {
29089	if scope.Err() != nil {
29090		return
29091	}
29092	opspec := tf.OpSpec{
29093		Type: "OutfeedEnqueue",
29094		Input: []tf.Input{
29095			input,
29096		},
29097	}
29098	return scope.AddOperation(opspec)
29099}
29100
29101// Enqueue multiple Tensor values on the computation outfeed.
29102//
29103// Arguments:
29104//
29105//	inputs: A list of tensors that will be inserted into the outfeed queue as an
29106//
29107// XLA tuple.
29108//
29109// Returns the created operation.
29110func OutfeedEnqueueTuple(scope *Scope, inputs []tf.Output) (o *tf.Operation) {
29111	if scope.Err() != nil {
29112		return
29113	}
29114	opspec := tf.OpSpec{
29115		Type: "OutfeedEnqueueTuple",
29116		Input: []tf.Input{
29117			tf.OutputList(inputs),
29118		},
29119	}
29120	return scope.AddOperation(opspec)
29121}
29122
29123// PackAttr is an optional argument to Pack.
29124type PackAttr func(optionalAttr)
29125
29126// PackAxis sets the optional axis attribute to value.
29127//
29128// value: Dimension along which to pack.  Negative values wrap around, so the
29129// valid range is `[-(R+1), R+1)`.
29130// If not specified, defaults to 0
29131func PackAxis(value int64) PackAttr {
29132	return func(m optionalAttr) {
29133		m["axis"] = value
29134	}
29135}
29136
29137// Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
29138//
29139// Packs the `N` tensors in `values` into a tensor with rank one higher than each
29140// tensor in `values`, by packing them along the `axis` dimension.
29141// Given a list of tensors of shape `(A, B, C)`;
29142//
29143// if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
29144// if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
29145// Etc.
29146//
29147// For example:
29148//
29149// ```
29150// # 'x' is [1, 4]
29151// # 'y' is [2, 5]
29152// # 'z' is [3, 6]
29153// pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
29154// pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
29155// ```
29156//
29157// This is the opposite of `unpack`.
29158//
29159// Arguments:
29160//
29161//	values: Must be of same shape and type.
29162//
29163// Returns The packed tensor.
29164func Pack(scope *Scope, values []tf.Output, optional ...PackAttr) (output tf.Output) {
29165	if scope.Err() != nil {
29166		return
29167	}
29168	attrs := map[string]interface{}{}
29169	for _, a := range optional {
29170		a(attrs)
29171	}
29172	opspec := tf.OpSpec{
29173		Type: "Pack",
29174		Input: []tf.Input{
29175			tf.OutputList(values),
29176		},
29177		Attrs: attrs,
29178	}
29179	op := scope.AddOperation(opspec)
29180	return op.Output(0)
29181}
29182
29183// Pads a tensor with zeros.
29184//
29185// This operation pads a `input` with zeros according to the `paddings` you
29186// specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
29187// rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
29188// how many zeros to add before the contents of `input` in that dimension, and
29189// `paddings[D, 1]` indicates how many zeros to add after the contents of `input`
29190// in that dimension.
29191//
29192// The padded size of each dimension D of the output is:
29193//
29194// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
29195//
29196// For example:
29197//
29198// ```
29199// # 't' is [[1, 1], [2, 2]]
29200// # 'paddings' is [[1, 1], [2, 2]]
29201// # rank of 't' is 2
29202// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
29203//
29204//	[0, 0, 1, 1, 0, 0]
29205//	[0, 0, 2, 2, 0, 0]
29206//	[0, 0, 0, 0, 0, 0]]
29207//
29208// ```
29209func Pad(scope *Scope, input tf.Output, paddings tf.Output) (output tf.Output) {
29210	if scope.Err() != nil {
29211		return
29212	}
29213	opspec := tf.OpSpec{
29214		Type: "Pad",
29215		Input: []tf.Input{
29216			input, paddings,
29217		},
29218	}
29219	op := scope.AddOperation(opspec)
29220	return op.Output(0)
29221}
29222
29223// Pads a tensor.
29224//
29225// This operation pads `input` according to the `paddings` and `constant_values`
29226// you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
29227// the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
29228// how many padding values to add before the contents of `input` in that dimension,
29229// and `paddings[D, 1]` indicates how many padding values to add after the contents
29230// of `input` in that dimension. `constant_values` is a scalar tensor of the same
29231// type as `input` that indicates the value to use for padding `input`.
29232//
29233// The padded size of each dimension D of the output is:
29234//
29235// `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
29236//
29237// For example:
29238//
29239// ```
29240// # 't' is [[1, 1], [2, 2]]
29241// # 'paddings' is [[1, 1], [2, 2]]
29242// # 'constant_values' is 0
29243// # rank of 't' is 2
29244// pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
29245//
29246//	[0, 0, 1, 1, 0, 0]
29247//	[0, 0, 2, 2, 0, 0]
29248//	[0, 0, 0, 0, 0, 0]]
29249//
29250// ```
29251func PadV2(scope *Scope, input tf.Output, paddings tf.Output, constant_values tf.Output) (output tf.Output) {
29252	if scope.Err() != nil {
29253		return
29254	}
29255	opspec := tf.OpSpec{
29256		Type: "PadV2",
29257		Input: []tf.Input{
29258			input, paddings, constant_values,
29259		},
29260	}
29261	op := scope.AddOperation(opspec)
29262	return op.Output(0)
29263}
29264
29265// PaddedBatchDatasetAttr is an optional argument to PaddedBatchDataset.
29266type PaddedBatchDatasetAttr func(optionalAttr)
29267
29268// PaddedBatchDatasetMetadata sets the optional metadata attribute to value.
29269// If not specified, defaults to ""
29270func PaddedBatchDatasetMetadata(value string) PaddedBatchDatasetAttr {
29271	return func(m optionalAttr) {
29272		m["metadata"] = value
29273	}
29274}
29275
29276// Creates a dataset that batches and pads `batch_size` elements from the input.
29277//
29278// Arguments:
29279//
29280//	batch_size: A scalar representing the number of elements to accumulate in a
29281//
29282// batch.
29283//
29284//	padded_shapes: A list of int64 tensors representing the desired padded shapes
29285//
29286// of the corresponding output components. These shapes may be partially
29287// specified, using `-1` to indicate that a particular dimension should be
29288// padded to the maximum size of all batch elements.
29289//
29290//	padding_values: A list of scalars containing the padding value to use for
29291//
29292// each of the outputs.
29293func PaddedBatchDataset(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, output_shapes []tf.Shape, optional ...PaddedBatchDatasetAttr) (handle tf.Output) {
29294	if scope.Err() != nil {
29295		return
29296	}
29297	attrs := map[string]interface{}{"output_shapes": output_shapes}
29298	for _, a := range optional {
29299		a(attrs)
29300	}
29301	opspec := tf.OpSpec{
29302		Type: "PaddedBatchDataset",
29303		Input: []tf.Input{
29304			input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values),
29305		},
29306		Attrs: attrs,
29307	}
29308	op := scope.AddOperation(opspec)
29309	return op.Output(0)
29310}
29311
29312// PaddedBatchDatasetV2Attr is an optional argument to PaddedBatchDatasetV2.
29313type PaddedBatchDatasetV2Attr func(optionalAttr)
29314
29315// PaddedBatchDatasetV2ParallelCopy sets the optional parallel_copy attribute to value.
29316// If not specified, defaults to false
29317func PaddedBatchDatasetV2ParallelCopy(value bool) PaddedBatchDatasetV2Attr {
29318	return func(m optionalAttr) {
29319		m["parallel_copy"] = value
29320	}
29321}
29322
29323// PaddedBatchDatasetV2Metadata sets the optional metadata attribute to value.
29324// If not specified, defaults to ""
29325func PaddedBatchDatasetV2Metadata(value string) PaddedBatchDatasetV2Attr {
29326	return func(m optionalAttr) {
29327		m["metadata"] = value
29328	}
29329}
29330
29331// Creates a dataset that batches and pads `batch_size` elements from the input.
29332//
29333// Arguments:
29334//
29335//	batch_size: A scalar representing the number of elements to accumulate in a
29336//
29337// batch.
29338//
29339//	padded_shapes: A list of int64 tensors representing the desired padded shapes
29340//
29341// of the corresponding output components. These shapes may be partially
29342// specified, using `-1` to indicate that a particular dimension should be
29343// padded to the maximum size of all batch elements.
29344//
29345//	padding_values: A list of scalars containing the padding value to use for
29346//
29347// each of the outputs.
29348//
29349//	drop_remainder: A scalar representing whether the last batch should be dropped in case its size
29350//
29351// is smaller than desired.
29352func PaddedBatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_size tf.Output, padded_shapes []tf.Output, padding_values []tf.Output, drop_remainder tf.Output, output_shapes []tf.Shape, optional ...PaddedBatchDatasetV2Attr) (handle tf.Output) {
29353	if scope.Err() != nil {
29354		return
29355	}
29356	attrs := map[string]interface{}{"output_shapes": output_shapes}
29357	for _, a := range optional {
29358		a(attrs)
29359	}
29360	opspec := tf.OpSpec{
29361		Type: "PaddedBatchDatasetV2",
29362		Input: []tf.Input{
29363			input_dataset, batch_size, tf.OutputList(padded_shapes), tf.OutputList(padding_values), drop_remainder,
29364		},
29365		Attrs: attrs,
29366	}
29367	op := scope.AddOperation(opspec)
29368	return op.Output(0)
29369}
29370
29371// PaddingFIFOQueueV2Attr is an optional argument to PaddingFIFOQueueV2.
29372type PaddingFIFOQueueV2Attr func(optionalAttr)
29373
29374// PaddingFIFOQueueV2Shapes sets the optional shapes attribute to value.
29375//
29376// value: The shape of each component in a value. The length of this attr must
29377// be either 0 or the same as the length of component_types.
29378// Shapes of fixed rank but variable size are allowed by setting
29379// any shape dimension to -1.  In this case, the inputs' shape may vary along
29380// the given dimension, and DequeueMany will pad the given dimension with
29381// zeros up to the maximum shape of all elements in the given batch.
29382// If the length of this attr is 0, different queue elements may have
29383// different ranks and shapes, but only one element may be dequeued at a time.
29384// If not specified, defaults to {}
29385//
29386// REQUIRES: len(value) >= 0
29387func PaddingFIFOQueueV2Shapes(value []tf.Shape) PaddingFIFOQueueV2Attr {
29388	return func(m optionalAttr) {
29389		m["shapes"] = value
29390	}
29391}
29392
29393// PaddingFIFOQueueV2Capacity sets the optional capacity attribute to value.
29394//
29395// value: The upper bound on the number of elements in this queue.
29396// Negative numbers mean no limit.
29397// If not specified, defaults to -1
29398func PaddingFIFOQueueV2Capacity(value int64) PaddingFIFOQueueV2Attr {
29399	return func(m optionalAttr) {
29400		m["capacity"] = value
29401	}
29402}
29403
29404// PaddingFIFOQueueV2Container sets the optional container attribute to value.
29405//
29406// value: If non-empty, this queue is placed in the given container.
29407// Otherwise, a default container is used.
29408// If not specified, defaults to ""
29409func PaddingFIFOQueueV2Container(value string) PaddingFIFOQueueV2Attr {
29410	return func(m optionalAttr) {
29411		m["container"] = value
29412	}
29413}
29414
29415// PaddingFIFOQueueV2SharedName sets the optional shared_name attribute to value.
29416//
29417// value: If non-empty, this queue will be shared under the given name
29418// across multiple sessions.
29419// If not specified, defaults to ""
29420func PaddingFIFOQueueV2SharedName(value string) PaddingFIFOQueueV2Attr {
29421	return func(m optionalAttr) {
29422		m["shared_name"] = value
29423	}
29424}
29425
29426// A queue that produces elements in first-in first-out order.
29427//
29428// Variable-size shapes are allowed by setting the corresponding shape dimensions
29429// to 0 in the shape attr.  In this case DequeueMany will pad up to the maximum
29430// size of any given element in the minibatch.  See below for details.
29431//
29432// Arguments:
29433//
29434//	component_types: The type of each component in a value.
29435//
29436// Returns The handle to the queue.
29437func PaddingFIFOQueueV2(scope *Scope, component_types []tf.DataType, optional ...PaddingFIFOQueueV2Attr) (handle tf.Output) {
29438	if scope.Err() != nil {
29439		return
29440	}
29441	attrs := map[string]interface{}{"component_types": component_types}
29442	for _, a := range optional {
29443		a(attrs)
29444	}
29445	opspec := tf.OpSpec{
29446		Type: "PaddingFIFOQueueV2",
29447
29448		Attrs: attrs,
29449	}
29450	op := scope.AddOperation(opspec)
29451	return op.Output(0)
29452}
29453
29454// Concatenates a list of `N` tensors along the first dimension.
29455//
29456// The input tensors are all required to have size 1 in the first dimension.
29457//
29458// For example:
29459//
29460// ```
29461// # 'x' is [[1, 4]]
29462// # 'y' is [[2, 5]]
29463// # 'z' is [[3, 6]]
29464// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
29465// ```
29466//
29467// The difference between concat and parallel_concat is that concat requires all
29468// of the inputs be computed before the operation will begin but doesn't require
29469// that the input shapes be known during graph construction.  Parallel concat
29470// will copy pieces of the input into the output as they become available, in
29471// some situations this can provide a performance benefit.
29472//
29473// Arguments:
29474//
29475//	values: Tensors to be concatenated. All must have size 1 in the first dimension
29476//
29477// and same shape.
29478//
29479//	shape: the final shape of the result; should be equal to the shapes of any input
29480//
29481// but with the number of input values in the first dimension.
29482//
29483// Returns The concatenated tensor.
29484func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) {
29485	if scope.Err() != nil {
29486		return
29487	}
29488	attrs := map[string]interface{}{"shape": shape}
29489	opspec := tf.OpSpec{
29490		Type: "ParallelConcat",
29491		Input: []tf.Input{
29492			tf.OutputList(values),
29493		},
29494		Attrs: attrs,
29495	}
29496	op := scope.AddOperation(opspec)
29497	return op.Output(0)
29498}
29499
29500// Interleave the values from the `data` tensors into a single tensor.
29501//
29502// # Builds a merged tensor such that
29503//
29504// ```python
29505//
29506//	merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
29507//
29508// ```
29509//
29510// For example, if each `indices[m]` is scalar or vector, we have
29511//
29512// ```python
29513//
29514//	# Scalar indices:
29515//	merged[indices[m], ...] = data[m][...]
29516//
29517//	# Vector indices:
29518//	merged[indices[m][i], ...] = data[m][i, ...]
29519//
29520// ```
29521//
29522// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
29523// and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
29524// must have `data[i].shape = indices[i].shape + constant`.  In terms of this
29525// `constant`, the output shape is
29526//
29527//	merged.shape = [max(indices)] + constant
29528//
29529// Values may be merged in parallel, so if an index appears in both `indices[m][i]`
29530// and `indices[n][j]`, the result may be invalid. This differs from the normal
29531// DynamicStitch operator that defines the behavior in that case.
29532//
29533// For example:
29534//
29535// ```python
29536//
29537//	indices[0] = 6
29538//	indices[1] = [4, 1]
29539//	indices[2] = [[5, 2], [0, 3]]
29540//	data[0] = [61, 62]
29541//	data[1] = [[41, 42], [11, 12]]
29542//	data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
29543//	merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
29544//	          [51, 52], [61, 62]]
29545//
29546// ```
29547//
29548// This method can be used to merge partitions created by `dynamic_partition`
29549// as illustrated on the following example:
29550//
29551// ```python
29552//
29553//	# Apply function (increments x_i) on elements for which a certain condition
29554//	# apply (x_i != -1 in this example).
29555//	x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
29556//	condition_mask=tf.not_equal(x,tf.constant(-1.))
29557//	partitioned_data = tf.dynamic_partition(
29558//	    x, tf.cast(condition_mask, tf.int32) , 2)
29559//	partitioned_data[1] = partitioned_data[1] + 1.0
29560//	condition_indices = tf.dynamic_partition(
29561//	    tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
29562//	x = tf.dynamic_stitch(condition_indices, partitioned_data)
29563//	# Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
29564//	# unchanged.
29565//
29566// ```
29567//
29568// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
29569// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
29570// </div>
29571func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
29572	if scope.Err() != nil {
29573		return
29574	}
29575	opspec := tf.OpSpec{
29576		Type: "ParallelDynamicStitch",
29577		Input: []tf.Input{
29578			tf.OutputList(indices), tf.OutputList(data),
29579		},
29580	}
29581	op := scope.AddOperation(opspec)
29582	return op.Output(0)
29583}
29584
29585// ParameterizedTruncatedNormalAttr is an optional argument to ParameterizedTruncatedNormal.
29586type ParameterizedTruncatedNormalAttr func(optionalAttr)
29587
29588// ParameterizedTruncatedNormalSeed sets the optional seed attribute to value.
29589//
29590// value: If either `seed` or `seed2` are set to be non-zero, the random number
29591// generator is seeded by the given seed.  Otherwise, it is seeded by a
29592// random seed.
29593// If not specified, defaults to 0
29594func ParameterizedTruncatedNormalSeed(value int64) ParameterizedTruncatedNormalAttr {
29595	return func(m optionalAttr) {
29596		m["seed"] = value
29597	}
29598}
29599
29600// ParameterizedTruncatedNormalSeed2 sets the optional seed2 attribute to value.
29601//
29602// value: A second seed to avoid seed collision.
29603// If not specified, defaults to 0
29604func ParameterizedTruncatedNormalSeed2(value int64) ParameterizedTruncatedNormalAttr {
29605	return func(m optionalAttr) {
29606		m["seed2"] = value
29607	}
29608}
29609
29610// Outputs random values from a normal distribution. The parameters may each be a
29611//
29612// scalar which applies to the entire output, or a vector of length shape[0] which
29613// stores the parameters for each batch.
29614//
29615// Arguments:
29616//
29617//	shape: The shape of the output tensor. Batches are indexed by the 0th dimension.
29618//	means: The mean parameter of each batch.
29619//	stdevs: The standard deviation parameter of each batch. Must be greater than 0.
29620//	minvals: The minimum cutoff. May be -infinity.
29621//	maxvals: The maximum cutoff. May be +infinity, and must be more than the minval
29622//
29623// for each batch.
29624//
29625// Returns A matrix of shape num_batches x samples_per_batch, filled with random
29626// truncated normal values using the parameters for each row.
29627func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output, stdevs tf.Output, minvals tf.Output, maxvals tf.Output, optional ...ParameterizedTruncatedNormalAttr) (output tf.Output) {
29628	if scope.Err() != nil {
29629		return
29630	}
29631	attrs := map[string]interface{}{}
29632	for _, a := range optional {
29633		a(attrs)
29634	}
29635	opspec := tf.OpSpec{
29636		Type: "ParameterizedTruncatedNormal",
29637		Input: []tf.Input{
29638			shape, means, stdevs, minvals, maxvals,
29639		},
29640		Attrs: attrs,
29641	}
29642	op := scope.AddOperation(opspec)
29643	return op.Output(0)
29644}
29645
29646// Transforms a vector of brain.Example protos (as strings) into typed tensors.
29647//
29648// Arguments:
29649//
29650//	serialized: A vector containing a batch of binary serialized Example protos.
29651//	names: A vector containing the names of the serialized protos.
29652//
29653// May contain, for example, table key (descriptive) names for the
29654// corresponding serialized protos.  These are purely useful for debugging
29655// purposes, and the presence of values here has no effect on the output.
29656// May also be an empty vector if no names are available.
29657// If non-empty, this vector must be the same length as "serialized".
29658//
29659//	sparse_keys: A list of Nsparse string Tensors (scalars).
29660//
29661// The keys expected in the Examples' features associated with sparse values.
29662//
29663//	dense_keys: A list of Ndense string Tensors (scalars).
29664//
29665// The keys expected in the Examples' features associated with dense values.
29666//
29667//	dense_defaults: A list of Ndense Tensors (some may be empty).
29668//
29669// dense_defaults[j] provides default values
29670// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
29671// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
29672// The input type is inferred from dense_defaults[j], even when it's empty.
29673// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
29674// then the shape of dense_defaults[j] must match that of dense_shapes[j].
29675// If dense_shapes[j] has an undefined major dimension (variable strides dense
29676// feature), dense_defaults[j] must contain a single element:
29677// the padding element.
29678//
29679//	sparse_types: A list of Nsparse types; the data types of data in each Feature
29680//
29681// given in sparse_keys.
29682// Currently the ParseExample supports DT_FLOAT (FloatList),
29683// DT_INT64 (Int64List), and DT_STRING (BytesList).
29684//
29685//	dense_shapes: A list of Ndense shapes; the shapes of data in each Feature
29686//
29687// given in dense_keys.
29688// The number of elements in the Feature corresponding to dense_key[j]
29689// must always equal dense_shapes[j].NumEntries().
29690// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
29691// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
29692// The dense outputs are just the inputs row-stacked by batch.
29693// This works for dense_shapes[j] = (-1, D1, ..., DN).  In this case
29694// the shape of the output Tensor dense_values[j] will be
29695// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
29696// of elements of length D1 * .... * DN, across all minibatch entries
29697// in the input.  Any minibatch entry with less than M blocks of elements of
29698// length D1 * ... * DN will be padded with the corresponding default_value
29699// scalar element along the second dimension.
29700func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys []tf.Output, dense_keys []tf.Output, dense_defaults []tf.Output, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
29701	if scope.Err() != nil {
29702		return
29703	}
29704	attrs := map[string]interface{}{"sparse_types": sparse_types, "dense_shapes": dense_shapes}
29705	opspec := tf.OpSpec{
29706		Type: "ParseExample",
29707		Input: []tf.Input{
29708			serialized, names, tf.OutputList(sparse_keys), tf.OutputList(dense_keys), tf.OutputList(dense_defaults),
29709		},
29710		Attrs: attrs,
29711	}
29712	op := scope.AddOperation(opspec)
29713	if scope.Err() != nil {
29714		return
29715	}
29716	var idx int
29717	var err error
29718	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
29719		scope.UpdateErr("ParseExample", err)
29720		return
29721	}
29722	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
29723		scope.UpdateErr("ParseExample", err)
29724		return
29725	}
29726	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
29727		scope.UpdateErr("ParseExample", err)
29728		return
29729	}
29730	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
29731		scope.UpdateErr("ParseExample", err)
29732		return
29733	}
29734	return sparse_indices, sparse_values, sparse_shapes, dense_values
29735}
29736
29737// ParseExampleDatasetAttr is an optional argument to ParseExampleDataset.
29738type ParseExampleDatasetAttr func(optionalAttr)
29739
29740// ParseExampleDatasetSloppy sets the optional sloppy attribute to value.
29741// If not specified, defaults to false
29742func ParseExampleDatasetSloppy(value bool) ParseExampleDatasetAttr {
29743	return func(m optionalAttr) {
29744		m["sloppy"] = value
29745	}
29746}
29747
29748// ParseExampleDatasetRaggedKeys sets the optional ragged_keys attribute to value.
29749// If not specified, defaults to {}
29750//
29751// REQUIRES: len(value) >= 0
29752func ParseExampleDatasetRaggedKeys(value []string) ParseExampleDatasetAttr {
29753	return func(m optionalAttr) {
29754		m["ragged_keys"] = value
29755	}
29756}
29757
29758// ParseExampleDatasetRaggedValueTypes sets the optional ragged_value_types attribute to value.
29759// If not specified, defaults to {}
29760//
29761// REQUIRES: len(value) >= 0
29762func ParseExampleDatasetRaggedValueTypes(value []tf.DataType) ParseExampleDatasetAttr {
29763	return func(m optionalAttr) {
29764		m["ragged_value_types"] = value
29765	}
29766}
29767
29768// ParseExampleDatasetRaggedSplitTypes sets the optional ragged_split_types attribute to value.
29769// If not specified, defaults to {}
29770//
29771// REQUIRES: len(value) >= 0
29772func ParseExampleDatasetRaggedSplitTypes(value []tf.DataType) ParseExampleDatasetAttr {
29773	return func(m optionalAttr) {
29774		m["ragged_split_types"] = value
29775	}
29776}
29777
29778// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
29779//
29780// Arguments:
29781//
29782//	dense_defaults: A dict mapping string keys to `Tensor`s.
29783//
29784// The keys of the dict must match the dense_keys of the feature.
29785//
29786//	sparse_keys: A list of string keys in the examples features.
29787//
29788// The results for these keys will be returned as `SparseTensor` objects.
29789//
29790//	dense_keys: A list of Ndense string Tensors (scalars).
29791//
29792// The keys expected in the Examples features associated with dense values.
29793//
29794//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
29795//
29796// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
29797// and `tf.string` (`BytesList`) are supported.
29798//
29799//	dense_shapes: List of tuples with the same length as `dense_keys`.
29800//
29801// The shape of the data for each dense feature referenced by `dense_keys`.
29802// Required for any input tensors identified by `dense_keys`.  Must be
29803// either fully defined, or may contain an unknown first dimension.
29804// An unknown first dimension means the feature is treated as having
29805// a variable number of blocks, and the output shape along this dimension
29806// is considered unknown at graph build time.  Padding is applied for
29807// minibatch elements smaller than the maximum number of blocks for the
29808// given feature along this dimension.
29809//
29810//	output_types: The type list for the return values.
29811//	output_shapes: The list of shapes being produced.
29812func ParseExampleDataset(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ParseExampleDatasetAttr) (handle tf.Output) {
29813	if scope.Err() != nil {
29814		return
29815	}
29816	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
29817	for _, a := range optional {
29818		a(attrs)
29819	}
29820	opspec := tf.OpSpec{
29821		Type: "ParseExampleDataset",
29822		Input: []tf.Input{
29823			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
29824		},
29825		Attrs: attrs,
29826	}
29827	op := scope.AddOperation(opspec)
29828	return op.Output(0)
29829}
29830
29831// ParseExampleDatasetV2Attr is an optional argument to ParseExampleDatasetV2.
29832type ParseExampleDatasetV2Attr func(optionalAttr)
29833
29834// ParseExampleDatasetV2Deterministic sets the optional deterministic attribute to value.
29835//
29836// value: A string indicating the op-level determinism to use. Deterministic controls
29837// whether the dataset is allowed to return elements out of order if the next
29838// element to be returned isn't available, but a later element is. Options are
29839// "true", "false", and "default". "default" indicates that determinism should be
29840// decided by the `experimental_deterministic` parameter of `tf.data.Options`.
29841// If not specified, defaults to "default"
29842func ParseExampleDatasetV2Deterministic(value string) ParseExampleDatasetV2Attr {
29843	return func(m optionalAttr) {
29844		m["deterministic"] = value
29845	}
29846}
29847
29848// ParseExampleDatasetV2RaggedKeys sets the optional ragged_keys attribute to value.
29849// If not specified, defaults to {}
29850//
29851// REQUIRES: len(value) >= 0
29852func ParseExampleDatasetV2RaggedKeys(value []string) ParseExampleDatasetV2Attr {
29853	return func(m optionalAttr) {
29854		m["ragged_keys"] = value
29855	}
29856}
29857
29858// ParseExampleDatasetV2RaggedValueTypes sets the optional ragged_value_types attribute to value.
29859// If not specified, defaults to {}
29860//
29861// REQUIRES: len(value) >= 0
29862func ParseExampleDatasetV2RaggedValueTypes(value []tf.DataType) ParseExampleDatasetV2Attr {
29863	return func(m optionalAttr) {
29864		m["ragged_value_types"] = value
29865	}
29866}
29867
29868// ParseExampleDatasetV2RaggedSplitTypes sets the optional ragged_split_types attribute to value.
29869// If not specified, defaults to {}
29870//
29871// REQUIRES: len(value) >= 0
29872func ParseExampleDatasetV2RaggedSplitTypes(value []tf.DataType) ParseExampleDatasetV2Attr {
29873	return func(m optionalAttr) {
29874		m["ragged_split_types"] = value
29875	}
29876}
29877
29878// Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset of `Tensor` or `SparseTensor` objects representing the parsed features.
29879//
29880// Arguments:
29881//
29882//	dense_defaults: A dict mapping string keys to `Tensor`s.
29883//
29884// The keys of the dict must match the dense_keys of the feature.
29885//
29886//	sparse_keys: A list of string keys in the examples features.
29887//
29888// The results for these keys will be returned as `SparseTensor` objects.
29889//
29890//	dense_keys: A list of Ndense string Tensors (scalars).
29891//
29892// The keys expected in the Examples features associated with dense values.
29893//
29894//	sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
29895//
29896// Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
29897// and `tf.string` (`BytesList`) are supported.
29898//
29899//	dense_shapes: List of tuples with the same length as `dense_keys`.
29900//
29901// The shape of the data for each dense feature referenced by `dense_keys`.
29902// Required for any input tensors identified by `dense_keys`.  Must be
29903// either fully defined, or may contain an unknown first dimension.
29904// An unknown first dimension means the feature is treated as having
29905// a variable number of blocks, and the output shape along this dimension
29906// is considered unknown at graph build time.  Padding is applied for
29907// minibatch elements smaller than the maximum number of blocks for the
29908// given feature along this dimension.
29909//
29910//	output_types: The type list for the return values.
29911//	output_shapes: The list of shapes being produced.
29912func ParseExampleDatasetV2(scope *Scope, input_dataset tf.Output, num_parallel_calls tf.Output, dense_defaults []tf.Output, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ParseExampleDatasetV2Attr) (handle tf.Output) {
29913	if scope.Err() != nil {
29914		return
29915	}
29916	attrs := map[string]interface{}{"sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes, "output_types": output_types, "output_shapes": output_shapes}
29917	for _, a := range optional {
29918		a(attrs)
29919	}
29920	opspec := tf.OpSpec{
29921		Type: "ParseExampleDatasetV2",
29922		Input: []tf.Input{
29923			input_dataset, num_parallel_calls, tf.OutputList(dense_defaults),
29924		},
29925		Attrs: attrs,
29926	}
29927	op := scope.AddOperation(opspec)
29928	return op.Output(0)
29929}
29930
29931// Transforms a vector of tf.Example protos (as strings) into typed tensors.
29932//
29933// Arguments:
29934//
29935//	serialized: A scalar or vector containing binary serialized Example protos.
29936//	names: A tensor containing the names of the serialized protos.
29937//
29938// Corresponds 1:1 with the `serialized` tensor.
29939// May contain, for example, table key (descriptive) names for the
29940// corresponding serialized protos.  These are purely useful for debugging
29941// purposes, and the presence of values here has no effect on the output.
29942// May also be an empty vector if no names are available.
29943// If non-empty, this tensor must have the same shape as "serialized".
29944//
29945//	sparse_keys: Vector of strings.
29946//
29947// The keys expected in the Examples' features associated with sparse values.
29948//
29949//	dense_keys: Vector of strings.
29950//
29951// The keys expected in the Examples' features associated with dense values.
29952//
29953//	ragged_keys: Vector of strings.
29954//
29955// The keys expected in the Examples' features associated with ragged values.
29956//
29957//	dense_defaults: A list of Tensors (some may be empty).  Corresponds 1:1 with `dense_keys`.
29958//
29959// dense_defaults[j] provides default values
29960// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
29961// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
29962// The input type is inferred from dense_defaults[j], even when it's empty.
29963// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
29964// then the shape of dense_defaults[j] must match that of dense_shapes[j].
29965// If dense_shapes[j] has an undefined major dimension (variable strides dense
29966// feature), dense_defaults[j] must contain a single element:
29967// the padding element.
29968//
29969//	num_sparse: The number of sparse keys.
29970//	sparse_types: A list of `num_sparse` types; the data types of data in each Feature
29971//
29972// given in sparse_keys.
29973// Currently the ParseExample supports DT_FLOAT (FloatList),
29974// DT_INT64 (Int64List), and DT_STRING (BytesList).
29975//
29976//	ragged_value_types: A list of `num_ragged` types; the data types of data in each Feature
29977//
29978// given in ragged_keys (where `num_ragged = sparse_keys.size()`).
29979// Currently the ParseExample supports DT_FLOAT (FloatList),
29980// DT_INT64 (Int64List), and DT_STRING (BytesList).
29981//
29982//	ragged_split_types: A list of `num_ragged` types; the data types of row_splits in each Feature
29983//
29984// given in ragged_keys (where `num_ragged = sparse_keys.size()`).
29985// May be DT_INT32 or DT_INT64.
29986//
29987//	dense_shapes: A list of `num_dense` shapes; the shapes of data in each Feature
29988//
29989// given in dense_keys (where `num_dense = dense_keys.size()`).
29990// The number of elements in the Feature corresponding to dense_key[j]
29991// must always equal dense_shapes[j].NumEntries().
29992// If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output
29993// Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
29994// The dense outputs are just the inputs row-stacked by batch.
29995// This works for dense_shapes[j] = (-1, D1, ..., DN).  In this case
29996// the shape of the output Tensor dense_values[j] will be
29997// (|serialized|, M, D1, .., DN), where M is the maximum number of blocks
29998// of elements of length D1 * .... * DN, across all minibatch entries
29999// in the input.  Any minibatch entry with less than M blocks of elements of
30000// length D1 * ... * DN will be padded with the corresponding default_value
30001// scalar element along the second dimension.
30002func ParseExampleV2(scope *Scope, serialized tf.Output, names tf.Output, sparse_keys tf.Output, dense_keys tf.Output, ragged_keys tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_types []tf.DataType, ragged_value_types []tf.DataType, ragged_split_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output, ragged_values []tf.Output, ragged_row_splits []tf.Output) {
30003	if scope.Err() != nil {
30004		return
30005	}
30006	attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_types": sparse_types, "ragged_value_types": ragged_value_types, "ragged_split_types": ragged_split_types, "dense_shapes": dense_shapes}
30007	opspec := tf.OpSpec{
30008		Type: "ParseExampleV2",
30009		Input: []tf.Input{
30010			serialized, names, sparse_keys, dense_keys, ragged_keys, tf.OutputList(dense_defaults),
30011		},
30012		Attrs: attrs,
30013	}
30014	op := scope.AddOperation(opspec)
30015	if scope.Err() != nil {
30016		return
30017	}
30018	var idx int
30019	var err error
30020	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
30021		scope.UpdateErr("ParseExampleV2", err)
30022		return
30023	}
30024	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
30025		scope.UpdateErr("ParseExampleV2", err)
30026		return
30027	}
30028	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
30029		scope.UpdateErr("ParseExampleV2", err)
30030		return
30031	}
30032	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
30033		scope.UpdateErr("ParseExampleV2", err)
30034		return
30035	}
30036	if ragged_values, idx, err = makeOutputList(op, idx, "ragged_values"); err != nil {
30037		scope.UpdateErr("ParseExampleV2", err)
30038		return
30039	}
30040	if ragged_row_splits, idx, err = makeOutputList(op, idx, "ragged_row_splits"); err != nil {
30041		scope.UpdateErr("ParseExampleV2", err)
30042		return
30043	}
30044	return sparse_indices, sparse_values, sparse_shapes, dense_values, ragged_values, ragged_row_splits
30045}
30046
30047// ParseSequenceExampleAttr is an optional argument to ParseSequenceExample.
30048type ParseSequenceExampleAttr func(optionalAttr)
30049
30050// ParseSequenceExampleNcontextSparse sets the optional Ncontext_sparse attribute to value.
30051// If not specified, defaults to 0
30052//
30053// REQUIRES: value >= 0
30054func ParseSequenceExampleNcontextSparse(value int64) ParseSequenceExampleAttr {
30055	return func(m optionalAttr) {
30056		m["Ncontext_sparse"] = value
30057	}
30058}
30059
30060// ParseSequenceExampleNcontextDense sets the optional Ncontext_dense attribute to value.
30061// If not specified, defaults to 0
30062//
30063// REQUIRES: value >= 0
30064func ParseSequenceExampleNcontextDense(value int64) ParseSequenceExampleAttr {
30065	return func(m optionalAttr) {
30066		m["Ncontext_dense"] = value
30067	}
30068}
30069
30070// ParseSequenceExampleNfeatureListSparse sets the optional Nfeature_list_sparse attribute to value.
30071// If not specified, defaults to 0
30072//
30073// REQUIRES: value >= 0
30074func ParseSequenceExampleNfeatureListSparse(value int64) ParseSequenceExampleAttr {
30075	return func(m optionalAttr) {
30076		m["Nfeature_list_sparse"] = value
30077	}
30078}
30079
30080// ParseSequenceExampleNfeatureListDense sets the optional Nfeature_list_dense attribute to value.
30081// If not specified, defaults to 0
30082//
30083// REQUIRES: value >= 0
30084func ParseSequenceExampleNfeatureListDense(value int64) ParseSequenceExampleAttr {
30085	return func(m optionalAttr) {
30086		m["Nfeature_list_dense"] = value
30087	}
30088}
30089
30090// ParseSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
30091//
30092// value: A list of Ncontext_sparse types; the data types of data in
30093// each context Feature given in context_sparse_keys.
30094// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
30095// DT_INT64 (Int64List), and DT_STRING (BytesList).
30096// If not specified, defaults to {}
30097//
30098// REQUIRES: len(value) >= 0
30099func ParseSequenceExampleContextSparseTypes(value []tf.DataType) ParseSequenceExampleAttr {
30100	return func(m optionalAttr) {
30101		m["context_sparse_types"] = value
30102	}
30103}
30104
30105// ParseSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
30106// If not specified, defaults to {}
30107//
30108// REQUIRES: len(value) >= 0
30109func ParseSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleAttr {
30110	return func(m optionalAttr) {
30111		m["feature_list_dense_types"] = value
30112	}
30113}
30114
30115// ParseSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
30116//
30117// value: A list of Ncontext_dense shapes; the shapes of data in
30118// each context Feature given in context_dense_keys.
30119// The number of elements in the Feature corresponding to context_dense_key[j]
30120// must always equal context_dense_shapes[j].NumEntries().
30121// The shape of context_dense_values[j] will match context_dense_shapes[j].
30122// If not specified, defaults to {}
30123//
30124// REQUIRES: len(value) >= 0
30125func ParseSequenceExampleContextDenseShapes(value []tf.Shape) ParseSequenceExampleAttr {
30126	return func(m optionalAttr) {
30127		m["context_dense_shapes"] = value
30128	}
30129}
30130
30131// ParseSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
30132//
30133// value: A list of Nfeature_list_sparse types; the data types
30134// of data in each FeatureList given in feature_list_sparse_keys.
30135// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
30136// DT_INT64 (Int64List), and DT_STRING (BytesList).
30137// If not specified, defaults to {}
30138//
30139// REQUIRES: len(value) >= 0
30140func ParseSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleAttr {
30141	return func(m optionalAttr) {
30142		m["feature_list_sparse_types"] = value
30143	}
30144}
30145
30146// ParseSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
30147//
30148// value: A list of Nfeature_list_dense shapes; the shapes of
30149// data in each FeatureList given in feature_list_dense_keys.
30150// The shape of each Feature in the FeatureList corresponding to
30151// feature_list_dense_key[j] must always equal
30152// feature_list_dense_shapes[j].NumEntries().
30153// If not specified, defaults to {}
30154//
30155// REQUIRES: len(value) >= 0
30156func ParseSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleAttr {
30157	return func(m optionalAttr) {
30158		m["feature_list_dense_shapes"] = value
30159	}
30160}
30161
30162// Transforms a vector of brain.SequenceExample protos (as strings) into typed tensors.
30163//
30164// Arguments:
30165//
30166//	serialized: A vector containing binary serialized SequenceExample protos.
30167//	debug_name: A vector containing the names of the serialized protos.
30168//
30169// May contain, for example, table key (descriptive) name for the
30170// corresponding serialized proto.  This is purely useful for debugging
30171// purposes, and the presence of values here has no effect on the output.
30172// May also be an empty vector if no name is available.
30173//
30174//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
30175//
30176// context_dense_defaults[j] provides default values
30177// when the SequenceExample's context map lacks context_dense_key[j].
30178// If an empty Tensor is provided for context_dense_defaults[j],
30179// then the Feature context_dense_keys[j] is required.
30180// The input type is inferred from context_dense_defaults[j], even when it's
30181// empty.  If context_dense_defaults[j] is not empty, its shape must match
30182// context_dense_shapes[j].
30183//
30184//	feature_list_dense_missing_assumed_empty: A vector listing the
30185//
30186// FeatureList keys which may be missing from the SequenceExamples.  If the
30187// associated FeatureList is missing, it is treated as empty.  By default,
30188// any FeatureList not listed in this vector must exist in the SequenceExamples.
30189//
30190//	context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
30191//
30192// The keys expected in the Examples' features associated with context_sparse
30193// values.
30194//
30195//	context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
30196//
30197// The keys expected in the SequenceExamples' context features associated with
30198// dense values.
30199//
30200//	feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
30201//
30202// (scalars).  The keys expected in the FeatureLists associated with sparse
30203// values.
30204//
30205//	feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
30206//
30207// The keys expected in the SequenceExamples' feature_lists associated
30208// with lists of dense values.
30209func ParseSequenceExample(scope *Scope, serialized tf.Output, debug_name tf.Output, context_dense_defaults []tf.Output, feature_list_dense_missing_assumed_empty []string, context_sparse_keys []string, context_dense_keys []string, feature_list_sparse_keys []string, feature_list_dense_keys []string, optional ...ParseSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output, feature_list_dense_lengths []tf.Output) {
30210	if scope.Err() != nil {
30211		return
30212	}
30213	attrs := map[string]interface{}{"feature_list_dense_missing_assumed_empty": feature_list_dense_missing_assumed_empty, "context_sparse_keys": context_sparse_keys, "context_dense_keys": context_dense_keys, "feature_list_sparse_keys": feature_list_sparse_keys, "feature_list_dense_keys": feature_list_dense_keys}
30214	for _, a := range optional {
30215		a(attrs)
30216	}
30217	opspec := tf.OpSpec{
30218		Type: "ParseSequenceExample",
30219		Input: []tf.Input{
30220			serialized, debug_name, tf.OutputList(context_dense_defaults),
30221		},
30222		Attrs: attrs,
30223	}
30224	op := scope.AddOperation(opspec)
30225	if scope.Err() != nil {
30226		return
30227	}
30228	var idx int
30229	var err error
30230	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
30231		scope.UpdateErr("ParseSequenceExample", err)
30232		return
30233	}
30234	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
30235		scope.UpdateErr("ParseSequenceExample", err)
30236		return
30237	}
30238	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
30239		scope.UpdateErr("ParseSequenceExample", err)
30240		return
30241	}
30242	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
30243		scope.UpdateErr("ParseSequenceExample", err)
30244		return
30245	}
30246	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
30247		scope.UpdateErr("ParseSequenceExample", err)
30248		return
30249	}
30250	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
30251		scope.UpdateErr("ParseSequenceExample", err)
30252		return
30253	}
30254	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
30255		scope.UpdateErr("ParseSequenceExample", err)
30256		return
30257	}
30258	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
30259		scope.UpdateErr("ParseSequenceExample", err)
30260		return
30261	}
30262	if feature_list_dense_lengths, idx, err = makeOutputList(op, idx, "feature_list_dense_lengths"); err != nil {
30263		scope.UpdateErr("ParseSequenceExample", err)
30264		return
30265	}
30266	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths
30267}
30268
30269// ParseSequenceExampleV2Attr is an optional argument to ParseSequenceExampleV2.
30270type ParseSequenceExampleV2Attr func(optionalAttr)
30271
30272// ParseSequenceExampleV2NcontextSparse sets the optional Ncontext_sparse attribute to value.
30273// If not specified, defaults to 0
30274//
30275// REQUIRES: value >= 0
30276func ParseSequenceExampleV2NcontextSparse(value int64) ParseSequenceExampleV2Attr {
30277	return func(m optionalAttr) {
30278		m["Ncontext_sparse"] = value
30279	}
30280}
30281
30282// ParseSequenceExampleV2ContextSparseTypes sets the optional context_sparse_types attribute to value.
30283//
30284// value: A list of Ncontext_sparse types; the data types of data in
30285// each context Feature given in context_sparse_keys.
30286// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
30287// DT_INT64 (Int64List), and DT_STRING (BytesList).
30288// If not specified, defaults to {}
30289//
30290// REQUIRES: len(value) >= 0
30291func ParseSequenceExampleV2ContextSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
30292	return func(m optionalAttr) {
30293		m["context_sparse_types"] = value
30294	}
30295}
30296
30297// ParseSequenceExampleV2ContextRaggedValueTypes sets the optional context_ragged_value_types attribute to value.
30298//
30299// value: RaggedTensor.value dtypes for the ragged context features.
30300// If not specified, defaults to {}
30301//
30302// REQUIRES: len(value) >= 0
30303func ParseSequenceExampleV2ContextRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
30304	return func(m optionalAttr) {
30305		m["context_ragged_value_types"] = value
30306	}
30307}
30308
30309// ParseSequenceExampleV2ContextRaggedSplitTypes sets the optional context_ragged_split_types attribute to value.
30310//
30311// value: RaggedTensor.row_split dtypes for the ragged context features.
30312// If not specified, defaults to {}
30313//
30314// REQUIRES: len(value) >= 0
30315func ParseSequenceExampleV2ContextRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
30316	return func(m optionalAttr) {
30317		m["context_ragged_split_types"] = value
30318	}
30319}
30320
30321// ParseSequenceExampleV2ContextDenseShapes sets the optional context_dense_shapes attribute to value.
30322//
30323// value: A list of Ncontext_dense shapes; the shapes of data in
30324// each context Feature given in context_dense_keys.
30325// The number of elements in the Feature corresponding to context_dense_key[j]
30326// must always equal context_dense_shapes[j].NumEntries().
30327// The shape of context_dense_values[j] will match context_dense_shapes[j].
30328// If not specified, defaults to {}
30329//
30330// REQUIRES: len(value) >= 0
30331func ParseSequenceExampleV2ContextDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr {
30332	return func(m optionalAttr) {
30333		m["context_dense_shapes"] = value
30334	}
30335}
30336
30337// ParseSequenceExampleV2NfeatureListSparse sets the optional Nfeature_list_sparse attribute to value.
30338// If not specified, defaults to 0
30339//
30340// REQUIRES: value >= 0
30341func ParseSequenceExampleV2NfeatureListSparse(value int64) ParseSequenceExampleV2Attr {
30342	return func(m optionalAttr) {
30343		m["Nfeature_list_sparse"] = value
30344	}
30345}
30346
30347// ParseSequenceExampleV2NfeatureListDense sets the optional Nfeature_list_dense attribute to value.
30348// If not specified, defaults to 0
30349//
30350// REQUIRES: value >= 0
30351func ParseSequenceExampleV2NfeatureListDense(value int64) ParseSequenceExampleV2Attr {
30352	return func(m optionalAttr) {
30353		m["Nfeature_list_dense"] = value
30354	}
30355}
30356
30357// ParseSequenceExampleV2FeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
30358// If not specified, defaults to {}
30359//
30360// REQUIRES: len(value) >= 0
30361func ParseSequenceExampleV2FeatureListDenseTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
30362	return func(m optionalAttr) {
30363		m["feature_list_dense_types"] = value
30364	}
30365}
30366
30367// ParseSequenceExampleV2FeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
30368//
30369// value: A list of Nfeature_list_sparse types; the data types
30370// of data in each FeatureList given in feature_list_sparse_keys.
30371// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
30372// DT_INT64 (Int64List), and DT_STRING (BytesList).
30373// If not specified, defaults to {}
30374//
30375// REQUIRES: len(value) >= 0
30376func ParseSequenceExampleV2FeatureListSparseTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
30377	return func(m optionalAttr) {
30378		m["feature_list_sparse_types"] = value
30379	}
30380}
30381
30382// ParseSequenceExampleV2FeatureListRaggedValueTypes sets the optional feature_list_ragged_value_types attribute to value.
30383//
30384// value: RaggedTensor.value dtypes for the ragged FeatureList features.
30385// If not specified, defaults to {}
30386//
30387// REQUIRES: len(value) >= 0
30388func ParseSequenceExampleV2FeatureListRaggedValueTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
30389	return func(m optionalAttr) {
30390		m["feature_list_ragged_value_types"] = value
30391	}
30392}
30393
30394// ParseSequenceExampleV2FeatureListRaggedSplitTypes sets the optional feature_list_ragged_split_types attribute to value.
30395//
30396// value: RaggedTensor.row_split dtypes for the ragged FeatureList features.
30397// If not specified, defaults to {}
30398//
30399// REQUIRES: len(value) >= 0
30400func ParseSequenceExampleV2FeatureListRaggedSplitTypes(value []tf.DataType) ParseSequenceExampleV2Attr {
30401	return func(m optionalAttr) {
30402		m["feature_list_ragged_split_types"] = value
30403	}
30404}
30405
30406// ParseSequenceExampleV2FeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
30407//
30408// value: A list of Nfeature_list_dense shapes; the shapes of
30409// data in each FeatureList given in feature_list_dense_keys.
30410// The shape of each Feature in the FeatureList corresponding to
30411// feature_list_dense_key[j] must always equal
30412// feature_list_dense_shapes[j].NumEntries().
30413// If not specified, defaults to {}
30414//
30415// REQUIRES: len(value) >= 0
30416func ParseSequenceExampleV2FeatureListDenseShapes(value []tf.Shape) ParseSequenceExampleV2Attr {
30417	return func(m optionalAttr) {
30418		m["feature_list_dense_shapes"] = value
30419	}
30420}
30421
30422// Transforms a vector of tf.io.SequenceExample protos (as strings) into
30423// typed tensors.
30424//
30425// Arguments:
30426//
30427//	serialized: A scalar or vector containing binary serialized SequenceExample protos.
30428//	debug_name: A scalar or vector containing the names of the serialized protos.
30429//
30430// May contain, for example, table key (descriptive) name for the
30431// corresponding serialized proto.  This is purely useful for debugging
30432// purposes, and the presence of values here has no effect on the output.
30433// May also be an empty vector if no name is available.
30434//
30435//	context_sparse_keys: The keys expected in the Examples' features associated with context_sparse
30436//
30437// values.
30438//
30439//	context_dense_keys: The keys expected in the SequenceExamples' context features associated with
30440//
30441// dense values.
30442//
30443//	context_ragged_keys: The keys expected in the Examples' features associated with context_ragged
30444//
30445// values.
30446//
30447//	feature_list_sparse_keys: The keys expected in the FeatureLists associated with sparse values.
30448//	feature_list_dense_keys: The keys expected in the SequenceExamples' feature_lists associated
30449//
30450// with lists of dense values.
30451//
30452//	feature_list_ragged_keys: The keys expected in the FeatureLists associated with ragged values.
30453//	feature_list_dense_missing_assumed_empty: A vector corresponding 1:1 with feature_list_dense_keys, indicating which
30454//
30455// features may be missing from the SequenceExamples.  If the associated
30456// FeatureList is missing, it is treated as empty.
30457//
30458//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
30459//
30460// context_dense_defaults[j] provides default values
30461// when the SequenceExample's context map lacks context_dense_key[j].
30462// If an empty Tensor is provided for context_dense_defaults[j],
30463// then the Feature context_dense_keys[j] is required.
30464// The input type is inferred from context_dense_defaults[j], even when it's
30465// empty.  If context_dense_defaults[j] is not empty, its shape must match
30466// context_dense_shapes[j].
30467func ParseSequenceExampleV2(scope *Scope, serialized tf.Output, debug_name tf.Output, context_sparse_keys tf.Output, context_dense_keys tf.Output, context_ragged_keys tf.Output, feature_list_sparse_keys tf.Output, feature_list_dense_keys tf.Output, feature_list_ragged_keys tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_dense_defaults []tf.Output, optional ...ParseSequenceExampleV2Attr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, context_ragged_values []tf.Output, context_ragged_row_splits []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output, feature_list_dense_lengths []tf.Output, feature_list_ragged_values []tf.Output, feature_list_ragged_outer_splits []tf.Output, feature_list_ragged_inner_splits []tf.Output) {
30468	if scope.Err() != nil {
30469		return
30470	}
30471	attrs := map[string]interface{}{}
30472	for _, a := range optional {
30473		a(attrs)
30474	}
30475	opspec := tf.OpSpec{
30476		Type: "ParseSequenceExampleV2",
30477		Input: []tf.Input{
30478			serialized, debug_name, context_sparse_keys, context_dense_keys, context_ragged_keys, feature_list_sparse_keys, feature_list_dense_keys, feature_list_ragged_keys, feature_list_dense_missing_assumed_empty, tf.OutputList(context_dense_defaults),
30479		},
30480		Attrs: attrs,
30481	}
30482	op := scope.AddOperation(opspec)
30483	if scope.Err() != nil {
30484		return
30485	}
30486	var idx int
30487	var err error
30488	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
30489		scope.UpdateErr("ParseSequenceExampleV2", err)
30490		return
30491	}
30492	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
30493		scope.UpdateErr("ParseSequenceExampleV2", err)
30494		return
30495	}
30496	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
30497		scope.UpdateErr("ParseSequenceExampleV2", err)
30498		return
30499	}
30500	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
30501		scope.UpdateErr("ParseSequenceExampleV2", err)
30502		return
30503	}
30504	if context_ragged_values, idx, err = makeOutputList(op, idx, "context_ragged_values"); err != nil {
30505		scope.UpdateErr("ParseSequenceExampleV2", err)
30506		return
30507	}
30508	if context_ragged_row_splits, idx, err = makeOutputList(op, idx, "context_ragged_row_splits"); err != nil {
30509		scope.UpdateErr("ParseSequenceExampleV2", err)
30510		return
30511	}
30512	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
30513		scope.UpdateErr("ParseSequenceExampleV2", err)
30514		return
30515	}
30516	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
30517		scope.UpdateErr("ParseSequenceExampleV2", err)
30518		return
30519	}
30520	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
30521		scope.UpdateErr("ParseSequenceExampleV2", err)
30522		return
30523	}
30524	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
30525		scope.UpdateErr("ParseSequenceExampleV2", err)
30526		return
30527	}
30528	if feature_list_dense_lengths, idx, err = makeOutputList(op, idx, "feature_list_dense_lengths"); err != nil {
30529		scope.UpdateErr("ParseSequenceExampleV2", err)
30530		return
30531	}
30532	if feature_list_ragged_values, idx, err = makeOutputList(op, idx, "feature_list_ragged_values"); err != nil {
30533		scope.UpdateErr("ParseSequenceExampleV2", err)
30534		return
30535	}
30536	if feature_list_ragged_outer_splits, idx, err = makeOutputList(op, idx, "feature_list_ragged_outer_splits"); err != nil {
30537		scope.UpdateErr("ParseSequenceExampleV2", err)
30538		return
30539	}
30540	if feature_list_ragged_inner_splits, idx, err = makeOutputList(op, idx, "feature_list_ragged_inner_splits"); err != nil {
30541		scope.UpdateErr("ParseSequenceExampleV2", err)
30542		return
30543	}
30544	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, context_ragged_values, context_ragged_row_splits, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths, feature_list_ragged_values, feature_list_ragged_outer_splits, feature_list_ragged_inner_splits
30545}
30546
30547// Transforms a tf.Example proto (as a string) into typed tensors.
30548//
30549// Arguments:
30550//
30551//	serialized: A vector containing a batch of binary serialized Example protos.
30552//	dense_defaults: A list of Tensors (some may be empty), whose length matches
30553//
30554// the length of `dense_keys`. dense_defaults[j] provides default values
30555// when the example's feature_map lacks dense_key[j].  If an empty Tensor is
30556// provided for dense_defaults[j], then the Feature dense_keys[j] is required.
30557// The input type is inferred from dense_defaults[j], even when it's empty.
30558// If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined,
30559// then the shape of dense_defaults[j] must match that of dense_shapes[j].
30560// If dense_shapes[j] has an undefined major dimension (variable strides dense
30561// feature), dense_defaults[j] must contain a single element:
30562// the padding element.
30563//
30564//	num_sparse: The number of sparse features to be parsed from the example. This
30565//
30566// must match the lengths of `sparse_keys` and `sparse_types`.
30567//
30568//	sparse_keys: A list of `num_sparse` strings.
30569//
30570// The keys expected in the Examples' features associated with sparse values.
30571//
30572//	dense_keys: The keys expected in the Examples' features associated with dense
30573//
30574// values.
30575//
30576//	sparse_types: A list of `num_sparse` types; the data types of data in each
30577//
30578// Feature given in sparse_keys.
30579// Currently the ParseSingleExample op supports DT_FLOAT (FloatList),
30580// DT_INT64 (Int64List), and DT_STRING (BytesList).
30581//
30582//	dense_shapes: The shapes of data in each Feature given in dense_keys.
30583//
30584// The length of this list must match the length of `dense_keys`.  The
30585// number of elements in the Feature corresponding to dense_key[j] must
30586// always equal dense_shapes[j].NumEntries().  If dense_shapes[j] ==
30587// (D0, D1, ..., DN) then the shape of output Tensor dense_values[j]
30588// will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1,
30589// ..., DN), the shape of the output Tensor dense_values[j] will be (M,
30590// D1, .., DN), where M is the number of blocks of elements of length
30591// D1 * .... * DN, in the input.
30592func ParseSingleExample(scope *Scope, serialized tf.Output, dense_defaults []tf.Output, num_sparse int64, sparse_keys []string, dense_keys []string, sparse_types []tf.DataType, dense_shapes []tf.Shape) (sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shapes []tf.Output, dense_values []tf.Output) {
30593	if scope.Err() != nil {
30594		return
30595	}
30596	attrs := map[string]interface{}{"num_sparse": num_sparse, "sparse_keys": sparse_keys, "dense_keys": dense_keys, "sparse_types": sparse_types, "dense_shapes": dense_shapes}
30597	opspec := tf.OpSpec{
30598		Type: "ParseSingleExample",
30599		Input: []tf.Input{
30600			serialized, tf.OutputList(dense_defaults),
30601		},
30602		Attrs: attrs,
30603	}
30604	op := scope.AddOperation(opspec)
30605	if scope.Err() != nil {
30606		return
30607	}
30608	var idx int
30609	var err error
30610	if sparse_indices, idx, err = makeOutputList(op, idx, "sparse_indices"); err != nil {
30611		scope.UpdateErr("ParseSingleExample", err)
30612		return
30613	}
30614	if sparse_values, idx, err = makeOutputList(op, idx, "sparse_values"); err != nil {
30615		scope.UpdateErr("ParseSingleExample", err)
30616		return
30617	}
30618	if sparse_shapes, idx, err = makeOutputList(op, idx, "sparse_shapes"); err != nil {
30619		scope.UpdateErr("ParseSingleExample", err)
30620		return
30621	}
30622	if dense_values, idx, err = makeOutputList(op, idx, "dense_values"); err != nil {
30623		scope.UpdateErr("ParseSingleExample", err)
30624		return
30625	}
30626	return sparse_indices, sparse_values, sparse_shapes, dense_values
30627}
30628
30629// ParseSingleSequenceExampleAttr is an optional argument to ParseSingleSequenceExample.
30630type ParseSingleSequenceExampleAttr func(optionalAttr)
30631
30632// ParseSingleSequenceExampleContextSparseTypes sets the optional context_sparse_types attribute to value.
30633//
30634// value: A list of Ncontext_sparse types; the data types of data in
30635// each context Feature given in context_sparse_keys.
30636// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
30637// DT_INT64 (Int64List), and DT_STRING (BytesList).
30638// If not specified, defaults to {}
30639//
30640// REQUIRES: len(value) >= 0
30641func ParseSingleSequenceExampleContextSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
30642	return func(m optionalAttr) {
30643		m["context_sparse_types"] = value
30644	}
30645}
30646
30647// ParseSingleSequenceExampleFeatureListDenseTypes sets the optional feature_list_dense_types attribute to value.
30648// If not specified, defaults to {}
30649//
30650// REQUIRES: len(value) >= 0
30651func ParseSingleSequenceExampleFeatureListDenseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
30652	return func(m optionalAttr) {
30653		m["feature_list_dense_types"] = value
30654	}
30655}
30656
30657// ParseSingleSequenceExampleContextDenseShapes sets the optional context_dense_shapes attribute to value.
30658//
30659// value: A list of Ncontext_dense shapes; the shapes of data in
30660// each context Feature given in context_dense_keys.
30661// The number of elements in the Feature corresponding to context_dense_key[j]
30662// must always equal context_dense_shapes[j].NumEntries().
30663// The shape of context_dense_values[j] will match context_dense_shapes[j].
30664// If not specified, defaults to {}
30665//
30666// REQUIRES: len(value) >= 0
30667func ParseSingleSequenceExampleContextDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
30668	return func(m optionalAttr) {
30669		m["context_dense_shapes"] = value
30670	}
30671}
30672
30673// ParseSingleSequenceExampleFeatureListSparseTypes sets the optional feature_list_sparse_types attribute to value.
30674//
30675// value: A list of Nfeature_list_sparse types; the data types
30676// of data in each FeatureList given in feature_list_sparse_keys.
30677// Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList),
30678// DT_INT64 (Int64List), and DT_STRING (BytesList).
30679// If not specified, defaults to {}
30680//
30681// REQUIRES: len(value) >= 0
30682func ParseSingleSequenceExampleFeatureListSparseTypes(value []tf.DataType) ParseSingleSequenceExampleAttr {
30683	return func(m optionalAttr) {
30684		m["feature_list_sparse_types"] = value
30685	}
30686}
30687
30688// ParseSingleSequenceExampleFeatureListDenseShapes sets the optional feature_list_dense_shapes attribute to value.
30689//
30690// value: A list of Nfeature_list_dense shapes; the shapes of
30691// data in each FeatureList given in feature_list_dense_keys.
30692// The shape of each Feature in the FeatureList corresponding to
30693// feature_list_dense_key[j] must always equal
30694// feature_list_dense_shapes[j].NumEntries().
30695// If not specified, defaults to {}
30696//
30697// REQUIRES: len(value) >= 0
30698func ParseSingleSequenceExampleFeatureListDenseShapes(value []tf.Shape) ParseSingleSequenceExampleAttr {
30699	return func(m optionalAttr) {
30700		m["feature_list_dense_shapes"] = value
30701	}
30702}
30703
30704// Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors.
30705//
30706// Arguments:
30707//
30708//	serialized: A scalar containing a binary serialized SequenceExample proto.
30709//	feature_list_dense_missing_assumed_empty: A vector listing the
30710//
30711// FeatureList keys which may be missing from the SequenceExample.  If the
30712// associated FeatureList is missing, it is treated as empty.  By default,
30713// any FeatureList not listed in this vector must exist in the SequenceExample.
30714//
30715//	context_sparse_keys: A list of Ncontext_sparse string Tensors (scalars).
30716//
30717// The keys expected in the Examples' features associated with context_sparse
30718// values.
30719//
30720//	context_dense_keys: A list of Ncontext_dense string Tensors (scalars).
30721//
30722// The keys expected in the SequenceExamples' context features associated with
30723// dense values.
30724//
30725//	feature_list_sparse_keys: A list of Nfeature_list_sparse string Tensors
30726//
30727// (scalars).  The keys expected in the FeatureLists associated with sparse
30728// values.
30729//
30730//	feature_list_dense_keys: A list of Nfeature_list_dense string Tensors (scalars).
30731//
30732// The keys expected in the SequenceExamples' feature_lists associated
30733// with lists of dense values.
30734//
30735//	context_dense_defaults: A list of Ncontext_dense Tensors (some may be empty).
30736//
30737// context_dense_defaults[j] provides default values
30738// when the SequenceExample's context map lacks context_dense_key[j].
30739// If an empty Tensor is provided for context_dense_defaults[j],
30740// then the Feature context_dense_keys[j] is required.
30741// The input type is inferred from context_dense_defaults[j], even when it's
30742// empty.  If context_dense_defaults[j] is not empty, its shape must match
30743// context_dense_shapes[j].
30744//
30745//	debug_name: A scalar containing the name of the serialized proto.
30746//
30747// May contain, for example, table key (descriptive) name for the
30748// corresponding serialized proto.  This is purely useful for debugging
30749// purposes, and the presence of values here has no effect on the output.
30750// May also be an empty scalar if no name is available.
30751func ParseSingleSequenceExample(scope *Scope, serialized tf.Output, feature_list_dense_missing_assumed_empty tf.Output, context_sparse_keys []tf.Output, context_dense_keys []tf.Output, feature_list_sparse_keys []tf.Output, feature_list_dense_keys []tf.Output, context_dense_defaults []tf.Output, debug_name tf.Output, optional ...ParseSingleSequenceExampleAttr) (context_sparse_indices []tf.Output, context_sparse_values []tf.Output, context_sparse_shapes []tf.Output, context_dense_values []tf.Output, feature_list_sparse_indices []tf.Output, feature_list_sparse_values []tf.Output, feature_list_sparse_shapes []tf.Output, feature_list_dense_values []tf.Output) {
30752	if scope.Err() != nil {
30753		return
30754	}
30755	attrs := map[string]interface{}{}
30756	for _, a := range optional {
30757		a(attrs)
30758	}
30759	opspec := tf.OpSpec{
30760		Type: "ParseSingleSequenceExample",
30761		Input: []tf.Input{
30762			serialized, feature_list_dense_missing_assumed_empty, tf.OutputList(context_sparse_keys), tf.OutputList(context_dense_keys), tf.OutputList(feature_list_sparse_keys), tf.OutputList(feature_list_dense_keys), tf.OutputList(context_dense_defaults), debug_name,
30763		},
30764		Attrs: attrs,
30765	}
30766	op := scope.AddOperation(opspec)
30767	if scope.Err() != nil {
30768		return
30769	}
30770	var idx int
30771	var err error
30772	if context_sparse_indices, idx, err = makeOutputList(op, idx, "context_sparse_indices"); err != nil {
30773		scope.UpdateErr("ParseSingleSequenceExample", err)
30774		return
30775	}
30776	if context_sparse_values, idx, err = makeOutputList(op, idx, "context_sparse_values"); err != nil {
30777		scope.UpdateErr("ParseSingleSequenceExample", err)
30778		return
30779	}
30780	if context_sparse_shapes, idx, err = makeOutputList(op, idx, "context_sparse_shapes"); err != nil {
30781		scope.UpdateErr("ParseSingleSequenceExample", err)
30782		return
30783	}
30784	if context_dense_values, idx, err = makeOutputList(op, idx, "context_dense_values"); err != nil {
30785		scope.UpdateErr("ParseSingleSequenceExample", err)
30786		return
30787	}
30788	if feature_list_sparse_indices, idx, err = makeOutputList(op, idx, "feature_list_sparse_indices"); err != nil {
30789		scope.UpdateErr("ParseSingleSequenceExample", err)
30790		return
30791	}
30792	if feature_list_sparse_values, idx, err = makeOutputList(op, idx, "feature_list_sparse_values"); err != nil {
30793		scope.UpdateErr("ParseSingleSequenceExample", err)
30794		return
30795	}
30796	if feature_list_sparse_shapes, idx, err = makeOutputList(op, idx, "feature_list_sparse_shapes"); err != nil {
30797		scope.UpdateErr("ParseSingleSequenceExample", err)
30798		return
30799	}
30800	if feature_list_dense_values, idx, err = makeOutputList(op, idx, "feature_list_dense_values"); err != nil {
30801		scope.UpdateErr("ParseSingleSequenceExample", err)
30802		return
30803	}
30804	return context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values
30805}
30806
30807// Transforms a serialized tensorflow.TensorProto proto into a Tensor.
30808//
30809// Arguments:
30810//
30811//	serialized: A scalar string containing a serialized TensorProto proto.
30812//	out_type: The type of the serialized tensor.  The provided type must match the
30813//
30814// type of the serialized tensor and no implicit conversion will take place.
30815//
30816// Returns A Tensor of type `out_type`.
30817func ParseTensor(scope *Scope, serialized tf.Output, out_type tf.DataType) (output tf.Output) {
30818	if scope.Err() != nil {
30819		return
30820	}
30821	attrs := map[string]interface{}{"out_type": out_type}
30822	opspec := tf.OpSpec{
30823		Type: "ParseTensor",
30824		Input: []tf.Input{
30825			serialized,
30826		},
30827		Attrs: attrs,
30828	}
30829	op := scope.AddOperation(opspec)
30830	return op.Output(0)
30831}
30832
30833// PlaceholderAttr is an optional argument to Placeholder.
30834type PlaceholderAttr func(optionalAttr)
30835
30836// PlaceholderShape sets the optional shape attribute to value.
30837//
30838// value: (Optional) The shape of the tensor. If the shape has 0 dimensions, the
30839// shape is unconstrained.
30840// If not specified, defaults to {unknown_rank:true}
30841func PlaceholderShape(value tf.Shape) PlaceholderAttr {
30842	return func(m optionalAttr) {
30843		m["shape"] = value
30844	}
30845}
30846
30847// A placeholder op for a value that will be fed into the computation.
30848//
30849// N.B. This operation will fail with an error if it is executed. It is
30850// intended as a way to represent a value that will always be fed, and to
30851// provide attrs that enable the fed value to be checked at runtime.
30852//
30853// Arguments:
30854//
30855//	dtype: The type of elements in the tensor.
30856//
30857// Returns A placeholder tensor that must be replaced using the feed mechanism.
30858func Placeholder(scope *Scope, dtype tf.DataType, optional ...PlaceholderAttr) (output tf.Output) {
30859	if scope.Err() != nil {
30860		return
30861	}
30862	attrs := map[string]interface{}{"dtype": dtype}
30863	for _, a := range optional {
30864		a(attrs)
30865	}
30866	opspec := tf.OpSpec{
30867		Type: "Placeholder",
30868
30869		Attrs: attrs,
30870	}
30871	op := scope.AddOperation(opspec)
30872	return op.Output(0)
30873}
30874
30875// A placeholder op for a value that will be fed into the computation.
30876//
30877// DEPRECATED at GraphDef version 23: Placeholder now behaves the same as PlaceholderV2.
30878//
30879// N.B. This operation will fail with an error if it is executed. It is
30880// intended as a way to represent a value that will always be fed, and to
30881// provide attrs that enable the fed value to be checked at runtime.
30882//
30883// Arguments:
30884//
30885//	dtype: The type of elements in the tensor.
30886//	shape: The shape of the tensor. The shape can be any partially-specified
30887//
30888// shape.  To be unconstrained, pass in a shape with unknown rank.
30889//
30890// Returns A placeholder tensor that must be replaced using the feed mechanism.
30891func PlaceholderV2(scope *Scope, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
30892	if scope.Err() != nil {
30893		return
30894	}
30895	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
30896	opspec := tf.OpSpec{
30897		Type: "PlaceholderV2",
30898
30899		Attrs: attrs,
30900	}
30901	op := scope.AddOperation(opspec)
30902	return op.Output(0)
30903}
30904
30905// A placeholder op that passes through `input` when its output is not fed.
30906//
30907// Arguments:
30908//
30909//	input: The default value to produce when `output` is not fed.
30910//	shape: The (possibly partial) shape of the tensor.
30911//
30912// Returns A placeholder tensor that defaults to `input` if it is not fed.
30913func PlaceholderWithDefault(scope *Scope, input tf.Output, shape tf.Shape) (output tf.Output) {
30914	if scope.Err() != nil {
30915		return
30916	}
30917	attrs := map[string]interface{}{"shape": shape}
30918	opspec := tf.OpSpec{
30919		Type: "PlaceholderWithDefault",
30920		Input: []tf.Input{
30921			input,
30922		},
30923		Attrs: attrs,
30924	}
30925	op := scope.AddOperation(opspec)
30926	return op.Output(0)
30927}
30928
30929// Compute the polygamma function \\(\psi^{(n)}(x)\\).
30930//
30931// The polygamma function is defined as:
30932//
30933// \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\)
30934//
30935// where \\(\psi(x)\\) is the digamma function.
30936// The polygamma function is defined only for non-negative integer orders \\a\\.
30937func Polygamma(scope *Scope, a tf.Output, x tf.Output) (z tf.Output) {
30938	if scope.Err() != nil {
30939		return
30940	}
30941	opspec := tf.OpSpec{
30942		Type: "Polygamma",
30943		Input: []tf.Input{
30944			a, x,
30945		},
30946	}
30947	op := scope.AddOperation(opspec)
30948	return op.Output(0)
30949}
30950
30951// Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
30952//
30953// For each entry in `x`, calculates the number of `1` (on) bits in the binary
30954// representation of that entry.
30955//
30956// **NOTE**: It is more efficient to first `tf.bitcast` your tensors into
30957// `int32` or `int64` and perform the bitcount on the result, than to feed in
30958// 8- or 16-bit inputs and then aggregate the resulting counts.
30959func PopulationCount(scope *Scope, x tf.Output) (y tf.Output) {
30960	if scope.Err() != nil {
30961		return
30962	}
30963	opspec := tf.OpSpec{
30964		Type: "PopulationCount",
30965		Input: []tf.Input{
30966			x,
30967		},
30968	}
30969	op := scope.AddOperation(opspec)
30970	return op.Output(0)
30971}
30972
30973// Computes the power of one value to another.
30974//
30975// Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
30976// corresponding elements in `x` and `y`. For example:
30977//
30978// ```
30979// # tensor 'x' is [[2, 2]], [3, 3]]
30980// # tensor 'y' is [[8, 16], [2, 3]]
30981// tf.pow(x, y) ==> [[256, 65536], [9, 27]]
30982// ```
30983func Pow(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
30984	if scope.Err() != nil {
30985		return
30986	}
30987	opspec := tf.OpSpec{
30988		Type: "Pow",
30989		Input: []tf.Input{
30990			x, y,
30991		},
30992	}
30993	op := scope.AddOperation(opspec)
30994	return op.Output(0)
30995}
30996
30997// PrefetchDatasetAttr is an optional argument to PrefetchDataset.
30998type PrefetchDatasetAttr func(optionalAttr)
30999
31000// PrefetchDatasetSlackPeriod sets the optional slack_period attribute to value.
31001// If not specified, defaults to 0
31002func PrefetchDatasetSlackPeriod(value int64) PrefetchDatasetAttr {
31003	return func(m optionalAttr) {
31004		m["slack_period"] = value
31005	}
31006}
31007
31008// PrefetchDatasetLegacyAutotune sets the optional legacy_autotune attribute to value.
31009// If not specified, defaults to true
31010func PrefetchDatasetLegacyAutotune(value bool) PrefetchDatasetAttr {
31011	return func(m optionalAttr) {
31012		m["legacy_autotune"] = value
31013	}
31014}
31015
31016// PrefetchDatasetBufferSizeMin sets the optional buffer_size_min attribute to value.
31017// If not specified, defaults to 0
31018func PrefetchDatasetBufferSizeMin(value int64) PrefetchDatasetAttr {
31019	return func(m optionalAttr) {
31020		m["buffer_size_min"] = value
31021	}
31022}
31023
31024// PrefetchDatasetMetadata sets the optional metadata attribute to value.
31025// If not specified, defaults to ""
31026func PrefetchDatasetMetadata(value string) PrefetchDatasetAttr {
31027	return func(m optionalAttr) {
31028		m["metadata"] = value
31029	}
31030}
31031
31032// Creates a dataset that asynchronously prefetches elements from `input_dataset`.
31033//
31034// Arguments:
31035//
31036//	buffer_size: The maximum number of elements to buffer in an iterator over
31037//
31038// this dataset.
31039func PrefetchDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...PrefetchDatasetAttr) (handle tf.Output) {
31040	if scope.Err() != nil {
31041		return
31042	}
31043	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
31044	for _, a := range optional {
31045		a(attrs)
31046	}
31047	opspec := tf.OpSpec{
31048		Type: "PrefetchDataset",
31049		Input: []tf.Input{
31050			input_dataset, buffer_size,
31051		},
31052		Attrs: attrs,
31053	}
31054	op := scope.AddOperation(opspec)
31055	return op.Output(0)
31056}
31057
31058// PrelinearizeAttr is an optional argument to Prelinearize.
31059type PrelinearizeAttr func(optionalAttr)
31060
31061// PrelinearizeShape sets the optional shape attribute to value.
31062//
31063// value: The shape of the tensor.
31064// If not specified, defaults to {}
31065func PrelinearizeShape(value tf.Shape) PrelinearizeAttr {
31066	return func(m optionalAttr) {
31067		m["shape"] = value
31068	}
31069}
31070
31071// PrelinearizeLayout sets the optional layout attribute to value.
31072//
31073// value: A vector holding the requested layout in minor-to-major sequence. If a layout
31074// attribute is passed but its values are all -1 the layout will be computed by
31075// the infeed operation.
31076// If not specified, defaults to {}
31077func PrelinearizeLayout(value []int64) PrelinearizeAttr {
31078	return func(m optionalAttr) {
31079		m["layout"] = value
31080	}
31081}
31082
31083// An op which linearizes one Tensor value to an opaque variant tensor.
31084//
31085// Arguments:
31086//
31087//	input: A tensor that will be linearized.
31088func Prelinearize(scope *Scope, input tf.Output, optional ...PrelinearizeAttr) (output tf.Output) {
31089	if scope.Err() != nil {
31090		return
31091	}
31092	attrs := map[string]interface{}{}
31093	for _, a := range optional {
31094		a(attrs)
31095	}
31096	opspec := tf.OpSpec{
31097		Type: "Prelinearize",
31098		Input: []tf.Input{
31099			input,
31100		},
31101		Attrs: attrs,
31102	}
31103	op := scope.AddOperation(opspec)
31104	return op.Output(0)
31105}
31106
31107// PrelinearizeTupleAttr is an optional argument to PrelinearizeTuple.
31108type PrelinearizeTupleAttr func(optionalAttr)
31109
31110// PrelinearizeTupleLayouts sets the optional layouts attribute to value.
31111//
31112// value: A vector holding the requested layout in minor-to-major sequence for all the
31113// tuple shapes in the order the shapes appear in the "shapes" input. The layout
31114// elements for a sub-shape can be set to -1 in which case the corresponding layout
31115// will be computed by the infeed operation.
31116// If not specified, defaults to {}
31117func PrelinearizeTupleLayouts(value []int64) PrelinearizeTupleAttr {
31118	return func(m optionalAttr) {
31119		m["layouts"] = value
31120	}
31121}
31122
31123// An op which linearizes multiple Tensor values to an opaque variant tensor.
31124//
31125// Arguments:
31126//
31127//	inputs: A list of tensors that will be provided using the infeed mechanism.
31128//	shapes: The shapes of each tensor in `inputs`.
31129func PrelinearizeTuple(scope *Scope, inputs []tf.Output, shapes []tf.Shape, optional ...PrelinearizeTupleAttr) (output tf.Output) {
31130	if scope.Err() != nil {
31131		return
31132	}
31133	attrs := map[string]interface{}{"shapes": shapes}
31134	for _, a := range optional {
31135		a(attrs)
31136	}
31137	opspec := tf.OpSpec{
31138		Type: "PrelinearizeTuple",
31139		Input: []tf.Input{
31140			tf.OutputList(inputs),
31141		},
31142		Attrs: attrs,
31143	}
31144	op := scope.AddOperation(opspec)
31145	return op.Output(0)
31146}
31147
31148// PreventGradientAttr is an optional argument to PreventGradient.
31149type PreventGradientAttr func(optionalAttr)
31150
31151// PreventGradientMessage sets the optional message attribute to value.
31152//
31153// value: Will be printed in the error when anyone tries to differentiate
31154// this operation.
31155// If not specified, defaults to ""
31156func PreventGradientMessage(value string) PreventGradientAttr {
31157	return func(m optionalAttr) {
31158		m["message"] = value
31159	}
31160}
31161
31162// An identity op that triggers an error if a gradient is requested.
31163//
31164// When executed in a graph, this op outputs its input tensor as-is.
31165//
31166// When building ops to compute gradients, the TensorFlow gradient system
31167// will return an error when trying to lookup the gradient of this op,
31168// because no gradient must ever be registered for this function.  This
31169// op exists to prevent subtle bugs from silently returning unimplemented
31170// gradients in some corner cases.
31171//
31172// Arguments:
31173//
31174//	input: any tensor.
31175//
31176// Returns the same input tensor.
31177func PreventGradient(scope *Scope, input tf.Output, optional ...PreventGradientAttr) (output tf.Output) {
31178	if scope.Err() != nil {
31179		return
31180	}
31181	attrs := map[string]interface{}{}
31182	for _, a := range optional {
31183		a(attrs)
31184	}
31185	opspec := tf.OpSpec{
31186		Type: "PreventGradient",
31187		Input: []tf.Input{
31188			input,
31189		},
31190		Attrs: attrs,
31191	}
31192	op := scope.AddOperation(opspec)
31193	return op.Output(0)
31194}
31195
31196// PrintAttr is an optional argument to Print.
31197type PrintAttr func(optionalAttr)
31198
31199// PrintMessage sets the optional message attribute to value.
31200//
31201// value: A string, prefix of the error message.
31202// If not specified, defaults to ""
31203func PrintMessage(value string) PrintAttr {
31204	return func(m optionalAttr) {
31205		m["message"] = value
31206	}
31207}
31208
31209// PrintFirstN sets the optional first_n attribute to value.
31210//
31211// value: Only log `first_n` number of times. -1 disables logging.
31212// If not specified, defaults to -1
31213func PrintFirstN(value int64) PrintAttr {
31214	return func(m optionalAttr) {
31215		m["first_n"] = value
31216	}
31217}
31218
31219// PrintSummarize sets the optional summarize attribute to value.
31220//
31221// value: Only print this many entries of each tensor.
31222// If not specified, defaults to 3
31223func PrintSummarize(value int64) PrintAttr {
31224	return func(m optionalAttr) {
31225		m["summarize"] = value
31226	}
31227}
31228
31229// Prints a list of tensors.
31230//
31231// Passes `input` through to `output` and prints `data` when evaluating.
31232//
31233// Arguments:
31234//
31235//	input: The tensor passed to `output`
31236//	data: A list of tensors to print out when op is evaluated.
31237//
31238// Returns The unmodified `input` tensor
31239func Print(scope *Scope, input tf.Output, data []tf.Output, optional ...PrintAttr) (output tf.Output) {
31240	if scope.Err() != nil {
31241		return
31242	}
31243	attrs := map[string]interface{}{}
31244	for _, a := range optional {
31245		a(attrs)
31246	}
31247	opspec := tf.OpSpec{
31248		Type: "Print",
31249		Input: []tf.Input{
31250			input, tf.OutputList(data),
31251		},
31252		Attrs: attrs,
31253	}
31254	op := scope.AddOperation(opspec)
31255	return op.Output(0)
31256}
31257
31258// PrintV2Attr is an optional argument to PrintV2.
31259type PrintV2Attr func(optionalAttr)
31260
31261// PrintV2OutputStream sets the optional output_stream attribute to value.
31262//
31263// value: A string specifying the output stream or logging level to print to.
31264// If not specified, defaults to "stderr"
31265func PrintV2OutputStream(value string) PrintV2Attr {
31266	return func(m optionalAttr) {
31267		m["output_stream"] = value
31268	}
31269}
31270
31271// PrintV2End sets the optional end attribute to value.
31272// If not specified, defaults to "\n"
31273func PrintV2End(value string) PrintV2Attr {
31274	return func(m optionalAttr) {
31275		m["end"] = value
31276	}
31277}
31278
31279// Prints a string scalar.
31280//
31281// Prints a string scalar to the desired output_stream.
31282//
31283// Arguments:
31284//
31285//	input: The string scalar to print.
31286//
31287// Returns the created operation.
31288func PrintV2(scope *Scope, input tf.Output, optional ...PrintV2Attr) (o *tf.Operation) {
31289	if scope.Err() != nil {
31290		return
31291	}
31292	attrs := map[string]interface{}{}
31293	for _, a := range optional {
31294		a(attrs)
31295	}
31296	opspec := tf.OpSpec{
31297		Type: "PrintV2",
31298		Input: []tf.Input{
31299			input,
31300		},
31301		Attrs: attrs,
31302	}
31303	return scope.AddOperation(opspec)
31304}
31305
31306// PriorityQueueV2Attr is an optional argument to PriorityQueueV2.
31307type PriorityQueueV2Attr func(optionalAttr)
31308
31309// PriorityQueueV2ComponentTypes sets the optional component_types attribute to value.
31310//
31311// value: The type of each component in a value.
31312// If not specified, defaults to {}
31313//
31314// REQUIRES: len(value) >= 0
31315func PriorityQueueV2ComponentTypes(value []tf.DataType) PriorityQueueV2Attr {
31316	return func(m optionalAttr) {
31317		m["component_types"] = value
31318	}
31319}
31320
31321// PriorityQueueV2Capacity sets the optional capacity attribute to value.
31322//
31323// value: The upper bound on the number of elements in this queue.
31324// Negative numbers mean no limit.
31325// If not specified, defaults to -1
31326func PriorityQueueV2Capacity(value int64) PriorityQueueV2Attr {
31327	return func(m optionalAttr) {
31328		m["capacity"] = value
31329	}
31330}
31331
31332// PriorityQueueV2Container sets the optional container attribute to value.
31333//
31334// value: If non-empty, this queue is placed in the given container.
31335// Otherwise, a default container is used.
31336// If not specified, defaults to ""
31337func PriorityQueueV2Container(value string) PriorityQueueV2Attr {
31338	return func(m optionalAttr) {
31339		m["container"] = value
31340	}
31341}
31342
31343// PriorityQueueV2SharedName sets the optional shared_name attribute to value.
31344//
31345// value: If non-empty, this queue will be shared under the given name
31346// across multiple sessions.
31347// If not specified, defaults to ""
31348func PriorityQueueV2SharedName(value string) PriorityQueueV2Attr {
31349	return func(m optionalAttr) {
31350		m["shared_name"] = value
31351	}
31352}
31353
31354// A queue that produces elements sorted by the first component value.
31355//
31356// Note that the PriorityQueue requires the first component of any element
31357// to be a scalar int64, in addition to the other elements declared by
31358// component_types.  Therefore calls to Enqueue and EnqueueMany (resp. Dequeue
31359// and DequeueMany) on a PriorityQueue will all require (resp. output) one extra
31360// entry in their input (resp. output) lists.
31361//
31362// Arguments:
31363//
31364//	shapes: The shape of each component in a value. The length of this attr must
31365//
31366// be either 0 or the same as the length of component_types. If the length of
31367// this attr is 0, the shapes of queue elements are not constrained, and
31368// only one element may be dequeued at a time.
31369//
31370// Returns The handle to the queue.
31371func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV2Attr) (handle tf.Output) {
31372	if scope.Err() != nil {
31373		return
31374	}
31375	attrs := map[string]interface{}{"shapes": shapes}
31376	for _, a := range optional {
31377		a(attrs)
31378	}
31379	opspec := tf.OpSpec{
31380		Type: "PriorityQueueV2",
31381
31382		Attrs: attrs,
31383	}
31384	op := scope.AddOperation(opspec)
31385	return op.Output(0)
31386}
31387
31388// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
31389//
31390// Arguments:
31391//
31392//	num_threads: Identifies the number of threads to use for the private threadpool.
31393func PrivateThreadPoolDataset(scope *Scope, input_dataset tf.Output, num_threads tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
31394	if scope.Err() != nil {
31395		return
31396	}
31397	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
31398	opspec := tf.OpSpec{
31399		Type: "PrivateThreadPoolDataset",
31400		Input: []tf.Input{
31401			input_dataset, num_threads,
31402		},
31403		Attrs: attrs,
31404	}
31405	op := scope.AddOperation(opspec)
31406	return op.Output(0)
31407}
31408
31409// ProdAttr is an optional argument to Prod.
31410type ProdAttr func(optionalAttr)
31411
31412// ProdKeepDims sets the optional keep_dims attribute to value.
31413//
31414// value: If true, retain reduced dimensions with length 1.
31415// If not specified, defaults to false
31416func ProdKeepDims(value bool) ProdAttr {
31417	return func(m optionalAttr) {
31418		m["keep_dims"] = value
31419	}
31420}
31421
31422// Computes the product of elements across dimensions of a tensor.
31423//
31424// Reduces `input` along the dimensions given in `axis`. Unless
31425// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
31426// `axis`. If `keep_dims` is true, the reduced dimensions are
31427// retained with length 1.
31428//
31429// Arguments:
31430//
31431//	input: The tensor to reduce.
31432//	axis: The dimensions to reduce. Must be in the range
31433//
31434// `[-rank(input), rank(input))`.
31435//
31436// Returns The reduced tensor.
31437func Prod(scope *Scope, input tf.Output, axis tf.Output, optional ...ProdAttr) (output tf.Output) {
31438	if scope.Err() != nil {
31439		return
31440	}
31441	attrs := map[string]interface{}{}
31442	for _, a := range optional {
31443		a(attrs)
31444	}
31445	opspec := tf.OpSpec{
31446		Type: "Prod",
31447		Input: []tf.Input{
31448			input, axis,
31449		},
31450		Attrs: attrs,
31451	}
31452	op := scope.AddOperation(opspec)
31453	return op.Output(0)
31454}
31455
31456// QrAttr is an optional argument to Qr.
31457type QrAttr func(optionalAttr)
31458
31459// QrFullMatrices sets the optional full_matrices attribute to value.
31460//
31461// value: If true, compute full-sized `q` and `r`. If false
31462// (the default), compute only the leading `P` columns of `q`.
31463// If not specified, defaults to false
31464func QrFullMatrices(value bool) QrAttr {
31465	return func(m optionalAttr) {
31466		m["full_matrices"] = value
31467	}
31468}
31469
31470// Computes the QR decompositions of one or more matrices.
31471//
31472// Computes the QR decomposition of each inner matrix in `tensor` such that
31473// `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
31474//
31475// Currently, the gradient for the QR decomposition is well-defined only when
31476// the first `P` columns of the inner matrix are linearly independent, where
31477// `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`.
31478//
31479// ```python
31480// # a is a tensor.
31481// # q is a tensor of orthonormal matrices.
31482// # r is a tensor of upper triangular matrices.
31483// q, r = qr(a)
31484// q_full, r_full = qr(a, full_matrices=True)
31485// ```
31486//
31487// Arguments:
31488//
31489//	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
31490//
31491// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
31492//
31493// Returns:
31494//
31495//	q: Orthonormal basis for range of `a`. If `full_matrices` is `False` then
31496//
31497// shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
31498// `[..., M, M]`.
31499//
31500//	r: Triangular factor. If `full_matrices` is `False` then shape is
31501//
31502// `[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.
31503func Qr(scope *Scope, input tf.Output, optional ...QrAttr) (q tf.Output, r tf.Output) {
31504	if scope.Err() != nil {
31505		return
31506	}
31507	attrs := map[string]interface{}{}
31508	for _, a := range optional {
31509		a(attrs)
31510	}
31511	opspec := tf.OpSpec{
31512		Type: "Qr",
31513		Input: []tf.Input{
31514			input,
31515		},
31516		Attrs: attrs,
31517	}
31518	op := scope.AddOperation(opspec)
31519	return op.Output(0), op.Output(1)
31520}
31521
31522// QuantizeAndDequantizeAttr is an optional argument to QuantizeAndDequantize.
31523type QuantizeAndDequantizeAttr func(optionalAttr)
31524
31525// QuantizeAndDequantizeSignedInput sets the optional signed_input attribute to value.
31526// If not specified, defaults to true
31527func QuantizeAndDequantizeSignedInput(value bool) QuantizeAndDequantizeAttr {
31528	return func(m optionalAttr) {
31529		m["signed_input"] = value
31530	}
31531}
31532
31533// QuantizeAndDequantizeNumBits sets the optional num_bits attribute to value.
31534// If not specified, defaults to 8
31535func QuantizeAndDequantizeNumBits(value int64) QuantizeAndDequantizeAttr {
31536	return func(m optionalAttr) {
31537		m["num_bits"] = value
31538	}
31539}
31540
31541// QuantizeAndDequantizeRangeGiven sets the optional range_given attribute to value.
31542// If not specified, defaults to false
31543func QuantizeAndDequantizeRangeGiven(value bool) QuantizeAndDequantizeAttr {
31544	return func(m optionalAttr) {
31545		m["range_given"] = value
31546	}
31547}
31548
31549// QuantizeAndDequantizeInputMin sets the optional input_min attribute to value.
31550// If not specified, defaults to 0
31551func QuantizeAndDequantizeInputMin(value float32) QuantizeAndDequantizeAttr {
31552	return func(m optionalAttr) {
31553		m["input_min"] = value
31554	}
31555}
31556
31557// QuantizeAndDequantizeInputMax sets the optional input_max attribute to value.
31558// If not specified, defaults to 0
31559func QuantizeAndDequantizeInputMax(value float32) QuantizeAndDequantizeAttr {
31560	return func(m optionalAttr) {
31561		m["input_max"] = value
31562	}
31563}
31564
31565// Use QuantizeAndDequantizeV2 instead.
31566//
31567// DEPRECATED at GraphDef version 22: Replaced by QuantizeAndDequantizeV2
31568func QuantizeAndDequantize(scope *Scope, input tf.Output, optional ...QuantizeAndDequantizeAttr) (output tf.Output) {
31569	if scope.Err() != nil {
31570		return
31571	}
31572	attrs := map[string]interface{}{}
31573	for _, a := range optional {
31574		a(attrs)
31575	}
31576	opspec := tf.OpSpec{
31577		Type: "QuantizeAndDequantize",
31578		Input: []tf.Input{
31579			input,
31580		},
31581		Attrs: attrs,
31582	}
31583	op := scope.AddOperation(opspec)
31584	return op.Output(0)
31585}
31586
31587// QuantizeAndDequantizeV2Attr is an optional argument to QuantizeAndDequantizeV2.
31588type QuantizeAndDequantizeV2Attr func(optionalAttr)
31589
31590// QuantizeAndDequantizeV2SignedInput sets the optional signed_input attribute to value.
31591//
31592// value: Whether the quantization is signed or unsigned. (actually this parameter should
31593// have been called <b>`signed_output`</b>)
31594// If not specified, defaults to true
31595func QuantizeAndDequantizeV2SignedInput(value bool) QuantizeAndDequantizeV2Attr {
31596	return func(m optionalAttr) {
31597		m["signed_input"] = value
31598	}
31599}
31600
31601// QuantizeAndDequantizeV2NumBits sets the optional num_bits attribute to value.
31602//
31603// value: The bitwidth of the quantization.
31604// If not specified, defaults to 8
31605func QuantizeAndDequantizeV2NumBits(value int64) QuantizeAndDequantizeV2Attr {
31606	return func(m optionalAttr) {
31607		m["num_bits"] = value
31608	}
31609}
31610
31611// QuantizeAndDequantizeV2RangeGiven sets the optional range_given attribute to value.
31612//
31613// value: Whether the range is given or should be determined from the `input` tensor.
31614// If not specified, defaults to false
31615func QuantizeAndDequantizeV2RangeGiven(value bool) QuantizeAndDequantizeV2Attr {
31616	return func(m optionalAttr) {
31617		m["range_given"] = value
31618	}
31619}
31620
31621// QuantizeAndDequantizeV2RoundMode sets the optional round_mode attribute to value.
31622//
31623// value: The 'round_mode' attribute controls which rounding tie-breaking algorithm is
31624// used when rounding float values to their quantized equivalents. The following
31625// rounding modes are currently supported:
31626//
31627//   - HALF_TO_EVEN: this is the default round_mode.
31628//   - HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5
31629//     rounds up to -7.
31630//
31631// If not specified, defaults to "HALF_TO_EVEN"
31632func QuantizeAndDequantizeV2RoundMode(value string) QuantizeAndDequantizeV2Attr {
31633	return func(m optionalAttr) {
31634		m["round_mode"] = value
31635	}
31636}
31637
31638// QuantizeAndDequantizeV2NarrowRange sets the optional narrow_range attribute to value.
31639//
31640// value: If True, then the absolute value of the quantized minimum value is the same as
31641// the quantized maximum value, instead of 1 greater.
31642// i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
31643// If not specified, defaults to false
31644func QuantizeAndDequantizeV2NarrowRange(value bool) QuantizeAndDequantizeV2Attr {
31645	return func(m optionalAttr) {
31646		m["narrow_range"] = value
31647	}
31648}
31649
31650// QuantizeAndDequantizeV2Axis sets the optional axis attribute to value.
31651//
31652// value: If specified, this axis is treated as a channel or slice axis, and a separate
31653// quantization range is used for each channel or slice along this axis.
31654// If not specified, defaults to -1
31655func QuantizeAndDequantizeV2Axis(value int64) QuantizeAndDequantizeV2Attr {
31656	return func(m optionalAttr) {
31657		m["axis"] = value
31658	}
31659}
31660
31661// Quantizes then dequantizes a tensor.
31662//
31663// This op simulates the precision loss from the quantized forward pass by:
31664//
31665//  1. Quantizing the tensor to fixed point numbers, which should match the target
31666//     quantization method when it is used in inference.
31667//  2. Dequantizing it back to floating point numbers for the following ops, most
31668//     likely matmul.
31669//
31670// There are different ways to quantize. This version uses only scaling, so 0.0
31671// maps to 0.
31672//
31673// From the specified 'num_bits' in the quantized output type, it determines
31674// minimum and maximum representable quantized values.
31675//
31676// e.g.
31677//
31678// *   [-128, 127] for signed, num_bits = 8, or
31679// *   [0, 255] for unsigned, num_bits = 8.
31680//
31681// If range_given == False, the initial input_min, input_max will be determined
31682// automatically as the minimum and maximum values in the input tensor, otherwise
31683// the specified values of input_min, input_max are used.
31684//
31685// Note: If the input_min, input_max are specified, they do not need to equal the
31686// actual minimum and maximum values in the tensor. e.g. in some cases it may be
31687// beneficial to specify these values such that the low probability extremes of the
31688// input distribution are clipped.
31689//
31690// This op determines the maximum scale_factor that would map the initial
31691// [input_min, input_max] range to a range that lies within the representable
31692// quantized range.
31693//
31694// It determines the scale from one of input_min and input_max, then updates the
31695// other one to maximize the representable range.
31696//
31697// e.g.
31698//
31699//   - if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
31700//     5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
31701//     would update input_max to be 127 / 12.8 = 9.921875
31702//   - if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
31703//     10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
31704//     would update input_min to be 128.0 / 12.7 = -10.07874
31705//   - if the output is unsigned, input_min is forced to be 0, and only the
31706//     specified input_max is used.
31707//
31708// After determining the scale_factor and updating the input range, it applies the
31709// following to each value in the 'input' tensor.
31710//
31711// output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
31712//
31713// The above round function rounds the value based on the given round_mode.
31714//
31715// Arguments:
31716//
31717//	input: Tensor to quantize and then dequantize.
31718//	input_min: If `range_given == True`, this specifies the minimum input value that needs to
31719//
31720// be represented, otherwise it is determined from the min value of the `input`
31721// tensor.
31722//
31723//	input_max: If `range_given == True`, this specifies the maximum input value that needs to
31724//
31725// be represented, otherwise it is determined from the max value of the `input`
31726// tensor.
31727func QuantizeAndDequantizeV2(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV2Attr) (output tf.Output) {
31728	if scope.Err() != nil {
31729		return
31730	}
31731	attrs := map[string]interface{}{}
31732	for _, a := range optional {
31733		a(attrs)
31734	}
31735	opspec := tf.OpSpec{
31736		Type: "QuantizeAndDequantizeV2",
31737		Input: []tf.Input{
31738			input, input_min, input_max,
31739		},
31740		Attrs: attrs,
31741	}
31742	op := scope.AddOperation(opspec)
31743	return op.Output(0)
31744}
31745
31746// QuantizeAndDequantizeV3Attr is an optional argument to QuantizeAndDequantizeV3.
31747type QuantizeAndDequantizeV3Attr func(optionalAttr)
31748
31749// QuantizeAndDequantizeV3SignedInput sets the optional signed_input attribute to value.
31750// If not specified, defaults to true
31751func QuantizeAndDequantizeV3SignedInput(value bool) QuantizeAndDequantizeV3Attr {
31752	return func(m optionalAttr) {
31753		m["signed_input"] = value
31754	}
31755}
31756
31757// QuantizeAndDequantizeV3RangeGiven sets the optional range_given attribute to value.
31758// If not specified, defaults to true
31759func QuantizeAndDequantizeV3RangeGiven(value bool) QuantizeAndDequantizeV3Attr {
31760	return func(m optionalAttr) {
31761		m["range_given"] = value
31762	}
31763}
31764
31765// QuantizeAndDequantizeV3NarrowRange sets the optional narrow_range attribute to value.
31766// If not specified, defaults to false
31767func QuantizeAndDequantizeV3NarrowRange(value bool) QuantizeAndDequantizeV3Attr {
31768	return func(m optionalAttr) {
31769		m["narrow_range"] = value
31770	}
31771}
31772
31773// QuantizeAndDequantizeV3Axis sets the optional axis attribute to value.
31774// If not specified, defaults to -1
31775func QuantizeAndDequantizeV3Axis(value int64) QuantizeAndDequantizeV3Attr {
31776	return func(m optionalAttr) {
31777		m["axis"] = value
31778	}
31779}
31780
31781// Quantizes then dequantizes a tensor.
31782//
31783// This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
31784// tensor, so its value can change during training.
31785func QuantizeAndDequantizeV3(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, num_bits tf.Output, optional ...QuantizeAndDequantizeV3Attr) (output tf.Output) {
31786	if scope.Err() != nil {
31787		return
31788	}
31789	attrs := map[string]interface{}{}
31790	for _, a := range optional {
31791		a(attrs)
31792	}
31793	opspec := tf.OpSpec{
31794		Type: "QuantizeAndDequantizeV3",
31795		Input: []tf.Input{
31796			input, input_min, input_max, num_bits,
31797		},
31798		Attrs: attrs,
31799	}
31800	op := scope.AddOperation(opspec)
31801	return op.Output(0)
31802}
31803
31804// QuantizeAndDequantizeV4Attr is an optional argument to QuantizeAndDequantizeV4.
31805type QuantizeAndDequantizeV4Attr func(optionalAttr)
31806
31807// QuantizeAndDequantizeV4SignedInput sets the optional signed_input attribute to value.
31808//
31809// value: Whether the quantization is signed or unsigned. (actually this parameter should
31810// have been called <b>`signed_output`</b>)
31811// If not specified, defaults to true
31812func QuantizeAndDequantizeV4SignedInput(value bool) QuantizeAndDequantizeV4Attr {
31813	return func(m optionalAttr) {
31814		m["signed_input"] = value
31815	}
31816}
31817
31818// QuantizeAndDequantizeV4NumBits sets the optional num_bits attribute to value.
31819//
31820// value: The bitwidth of the quantization.
31821// If not specified, defaults to 8
31822func QuantizeAndDequantizeV4NumBits(value int64) QuantizeAndDequantizeV4Attr {
31823	return func(m optionalAttr) {
31824		m["num_bits"] = value
31825	}
31826}
31827
31828// QuantizeAndDequantizeV4RangeGiven sets the optional range_given attribute to value.
31829//
31830// value: Whether the range is given or should be determined from the `input` tensor.
31831// If not specified, defaults to false
31832func QuantizeAndDequantizeV4RangeGiven(value bool) QuantizeAndDequantizeV4Attr {
31833	return func(m optionalAttr) {
31834		m["range_given"] = value
31835	}
31836}
31837
31838// QuantizeAndDequantizeV4RoundMode sets the optional round_mode attribute to value.
31839//
31840// value: The 'round_mode' attribute controls which rounding tie-breaking algorithm is
31841// used when rounding float values to their quantized equivalents. The following
31842// rounding modes are currently supported:
31843//
31844//   - HALF_TO_EVEN: this is the default round_mode.
31845//   - HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5
31846//     rounds up to -7.
31847//
31848// If not specified, defaults to "HALF_TO_EVEN"
31849func QuantizeAndDequantizeV4RoundMode(value string) QuantizeAndDequantizeV4Attr {
31850	return func(m optionalAttr) {
31851		m["round_mode"] = value
31852	}
31853}
31854
31855// QuantizeAndDequantizeV4NarrowRange sets the optional narrow_range attribute to value.
31856//
31857// value: If True, then the absolute value of the quantized minimum value is the same as
31858// the quantized maximum value, instead of 1 greater.
31859// i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
31860// If not specified, defaults to false
31861func QuantizeAndDequantizeV4NarrowRange(value bool) QuantizeAndDequantizeV4Attr {
31862	return func(m optionalAttr) {
31863		m["narrow_range"] = value
31864	}
31865}
31866
31867// QuantizeAndDequantizeV4Axis sets the optional axis attribute to value.
31868//
31869// value: If specified, this axis is treated as a channel or slice axis, and a separate
31870// quantization range is used for each channel or slice along this axis.
31871// If not specified, defaults to -1
31872func QuantizeAndDequantizeV4Axis(value int64) QuantizeAndDequantizeV4Attr {
31873	return func(m optionalAttr) {
31874		m["axis"] = value
31875	}
31876}
31877
31878// Quantizes then dequantizes a tensor.
31879//
31880// This is almost identical to QuantizeAndDequantizeV2, except that it returns a
31881// gradient of 1 for inputs that are within the quantization range, or 0 otherwise.
31882//
31883// Arguments:
31884//
31885//	input: Tensor to quantize and then dequantize.
31886//	input_min: If `range_given == True`, this specifies the minimum input value that needs to
31887//
31888// be represented, otherwise it is determined from the min value of the `input`
31889// tensor.
31890//
31891//	input_max: If `range_given == True`, this specifies the maximum input value that needs to
31892//
31893// be represented, otherwise it is determined from the max value of the `input`
31894// tensor.
31895func QuantizeAndDequantizeV4(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV4Attr) (output tf.Output) {
31896	if scope.Err() != nil {
31897		return
31898	}
31899	attrs := map[string]interface{}{}
31900	for _, a := range optional {
31901		a(attrs)
31902	}
31903	opspec := tf.OpSpec{
31904		Type: "QuantizeAndDequantizeV4",
31905		Input: []tf.Input{
31906			input, input_min, input_max,
31907		},
31908		Attrs: attrs,
31909	}
31910	op := scope.AddOperation(opspec)
31911	return op.Output(0)
31912}
31913
31914// QuantizeAndDequantizeV4GradAttr is an optional argument to QuantizeAndDequantizeV4Grad.
31915type QuantizeAndDequantizeV4GradAttr func(optionalAttr)
31916
31917// QuantizeAndDequantizeV4GradAxis sets the optional axis attribute to value.
31918// If not specified, defaults to -1
31919func QuantizeAndDequantizeV4GradAxis(value int64) QuantizeAndDequantizeV4GradAttr {
31920	return func(m optionalAttr) {
31921		m["axis"] = value
31922	}
31923}
31924
31925// Returns the gradient of `QuantizeAndDequantizeV4`.
31926//
31927// Returns a gradient of 1 for inputs that are within the quantization range,
31928// or 0 otherwise.
31929func QuantizeAndDequantizeV4Grad(scope *Scope, gradients tf.Output, input tf.Output, input_min tf.Output, input_max tf.Output, optional ...QuantizeAndDequantizeV4GradAttr) (input_backprop tf.Output, input_min_backprop tf.Output, input_max_backprop tf.Output) {
31930	if scope.Err() != nil {
31931		return
31932	}
31933	attrs := map[string]interface{}{}
31934	for _, a := range optional {
31935		a(attrs)
31936	}
31937	opspec := tf.OpSpec{
31938		Type: "QuantizeAndDequantizeV4Grad",
31939		Input: []tf.Input{
31940			gradients, input, input_min, input_max,
31941		},
31942		Attrs: attrs,
31943	}
31944	op := scope.AddOperation(opspec)
31945	return op.Output(0), op.Output(1), op.Output(2)
31946}
31947
31948// Convert the quantized 'input' tensor into a lower-precision 'output', using the
31949//
31950// actual distribution of the values to maximize the usage of the lower bit depth
31951// and adjusting the output min and max ranges accordingly.
31952//
31953// [input_min, input_max] are scalar floats that specify the range for the float
31954// interpretation of the 'input' data. For example, if input_min is -1.0f and
31955// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
31956// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
31957//
31958// This operator tries to squeeze as much precision as possible into an output with
31959// a lower bit depth by calculating the actual min and max values found in the
31960// data. For example, maybe that quint16 input has no values lower than 16,384 and
31961// none higher than 49,152. That means only half the range is actually needed, all
31962// the float interpretations are between -0.5f and 0.5f, so if we want to compress
31963// the data into a quint8 output, we can use that range rather than the theoretical
31964// -1.0f to 1.0f that is suggested by the input min and max.
31965//
31966// In practice, this is most useful for taking output from operations like
31967// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
31968// may have large potential output ranges, but in practice have a distribution of
31969// input values that only uses a small fraction of the possible range. By feeding
31970// that output into this operator, we can reduce it from 32 bits down to 8 with
31971// minimal loss of accuracy.
31972//
31973// Arguments:
31974//
31975//	input_min: The float value that the minimum quantized input value represents.
31976//	input_max: The float value that the maximum quantized input value represents.
31977//	out_type: The type of the output. Should be a lower bit depth than Tinput.
31978//
31979// Returns:
31980//
31981//	output
31982//	output_min: The float value that the minimum quantized output value represents.
31983//	output_max: The float value that the maximum quantized output value represents.
31984func QuantizeDownAndShrinkRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
31985	if scope.Err() != nil {
31986		return
31987	}
31988	attrs := map[string]interface{}{"out_type": out_type}
31989	opspec := tf.OpSpec{
31990		Type: "QuantizeDownAndShrinkRange",
31991		Input: []tf.Input{
31992			input, input_min, input_max,
31993		},
31994		Attrs: attrs,
31995	}
31996	op := scope.AddOperation(opspec)
31997	return op.Output(0), op.Output(1), op.Output(2)
31998}
31999
32000// QuantizeV2Attr is an optional argument to QuantizeV2.
32001type QuantizeV2Attr func(optionalAttr)
32002
32003// QuantizeV2Mode sets the optional mode attribute to value.
32004// If not specified, defaults to "MIN_COMBINED"
32005func QuantizeV2Mode(value string) QuantizeV2Attr {
32006	return func(m optionalAttr) {
32007		m["mode"] = value
32008	}
32009}
32010
32011// QuantizeV2RoundMode sets the optional round_mode attribute to value.
32012// If not specified, defaults to "HALF_AWAY_FROM_ZERO"
32013func QuantizeV2RoundMode(value string) QuantizeV2Attr {
32014	return func(m optionalAttr) {
32015		m["round_mode"] = value
32016	}
32017}
32018
32019// QuantizeV2NarrowRange sets the optional narrow_range attribute to value.
32020// If not specified, defaults to false
32021func QuantizeV2NarrowRange(value bool) QuantizeV2Attr {
32022	return func(m optionalAttr) {
32023		m["narrow_range"] = value
32024	}
32025}
32026
32027// QuantizeV2Axis sets the optional axis attribute to value.
32028// If not specified, defaults to -1
32029func QuantizeV2Axis(value int64) QuantizeV2Attr {
32030	return func(m optionalAttr) {
32031		m["axis"] = value
32032	}
32033}
32034
32035// QuantizeV2EnsureMinimumRange sets the optional ensure_minimum_range attribute to value.
32036// If not specified, defaults to 0.01
32037func QuantizeV2EnsureMinimumRange(value float32) QuantizeV2Attr {
32038	return func(m optionalAttr) {
32039		m["ensure_minimum_range"] = value
32040	}
32041}
32042
32043// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
32044//
32045// [min_range, max_range] are scalar floats that specify the range for
32046// the 'input' data. The 'mode' attribute controls exactly which calculations are
32047// used to convert the float values to their quantized equivalents.  The
32048// 'round_mode' attribute controls which rounding tie-breaking algorithm is used
32049// when rounding float values to their quantized equivalents.
32050//
32051// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
32052//
32053// ```
32054// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
32055// if T == qint8: out[i] -= (range(T) + 1) / 2.0
32056// ```
32057//
32058// here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
32059//
32060// *MIN_COMBINED Mode Example*
32061//
32062// Assume the input is type float and has a possible range of [0.0, 6.0] and the
32063// output type is quint8 ([0, 255]). The min_range and max_range values should be
32064// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
32065// value of the input by 255/6 and cast to quint8.
32066//
32067// If the output type was qint8 ([-128, 127]), the operation will additionally
32068// subtract each value by 128 prior to casting, so that the range of values aligns
32069// with the range of qint8.
32070//
32071// If the mode is 'MIN_FIRST', then this approach is used:
32072//
32073// ```
32074// num_discrete_values = 1 << (# of bits in T)
32075// range_adjust = num_discrete_values / (num_discrete_values - 1)
32076// range = (range_max - range_min) * range_adjust
32077// range_scale = num_discrete_values / range
32078// quantized = round(input * range_scale) - round(range_min * range_scale) +
32079//
32080//	numeric_limits<T>::min()
32081//
32082// quantized = max(quantized, numeric_limits<T>::min())
32083// quantized = min(quantized, numeric_limits<T>::max())
32084// ```
32085//
32086// The biggest difference between this and MIN_COMBINED is that the minimum range
32087// is rounded first, before it's subtracted from the rounded value. With
32088// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
32089// and dequantizing will introduce a larger and larger error.
32090//
32091// *SCALED mode Example*
32092//
32093// `SCALED` mode matches the quantization approach used in
32094// `QuantizeAndDequantize{V2|V3}`.
32095//
32096// If the mode is `SCALED`, the quantization is performed by multiplying each
32097// input value by a scaling_factor.
32098// The scaling_factor is determined from `min_range` and `max_range` to be as large
32099// as possible such that the range from `min_range` to `max_range` is representable
32100// within values of type T.
32101//
32102// ```c++
32103//
32104//	const int min_T = std::numeric_limits<T>::min();
32105//	const int max_T = std::numeric_limits<T>::max();
32106//	const float max_float = std::numeric_limits<float>::max();
32107//
32108//	const float scale_factor_from_min_side =
32109//	    (min_T * min_range > 0) ? min_T / min_range : max_float;
32110//	const float scale_factor_from_max_side =
32111//	    (max_T * max_range > 0) ? max_T / max_range : max_float;
32112//
32113//	const float scale_factor = std::min(scale_factor_from_min_side,
32114//	                                    scale_factor_from_max_side);
32115//
32116// ```
32117//
32118// We next use the scale_factor to adjust min_range and max_range as follows:
32119//
32120// ```c++
32121//
32122//	min_range = min_T / scale_factor;
32123//	max_range = max_T / scale_factor;
32124//
32125// ```
32126//
32127// e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would
32128// compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8
32129// In this case, min_range would remain -10, but max_range would be adjusted to
32130// 127 / 12.8 = 9.921875
32131//
32132// So we will quantize input values in the range (-10, 9.921875) to (-128, 127).
32133//
32134// The input tensor can now be quantized by clipping values to the range
32135// `min_range` to `max_range`, then multiplying by scale_factor as follows:
32136//
32137// ```c++
32138// result = round(min(max_range, max(min_range, input)) * scale_factor)
32139// ```
32140//
32141// The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of
32142// this operation. These outputs should be used as the range for any further
32143// calculations.
32144//
32145// *narrow_range (bool) attribute*
32146//
32147// If true, we do not use the minimum quantized value.
32148// i.e. for int8 the quantized output, it would be restricted to the range
32149// -127..127 instead of the full -128..127 range.
32150// This is provided for compatibility with certain inference backends.
32151// (Only applies to SCALED mode)
32152//
32153// *axis (int) attribute*
32154//
32155// An optional `axis` attribute can specify a dimension index of the input tensor,
32156// such that quantization ranges will be calculated and applied separately for each
32157// slice of the tensor along that dimension. This is useful for per-channel
32158// quantization.
32159//
32160// If axis is specified, min_range and max_range
32161//
32162// if `axis`=None, per-tensor quantization is performed as normal.
32163//
32164// *ensure_minimum_range (float) attribute*
32165//
32166// Ensures the minimum quantization range is at least this value.
32167// The legacy default value for this is 0.01, but it is strongly suggested to
32168// set it to 0 for new uses.
32169//
32170// Arguments:
32171//
32172//	min_range: The minimum value of the quantization range. This value may be adjusted by the
32173//
32174// op depending on other parameters. The adjusted value is written to `output_min`.
32175// If the `axis` attribute is specified, this must be a 1-D tensor whose size
32176// matches the `axis` dimension of the input and output tensors.
32177//
32178//	max_range: The maximum value of the quantization range. This value may be adjusted by the
32179//
32180// op depending on other parameters. The adjusted value is written to `output_max`.
32181// If the `axis` attribute is specified, this must be a 1-D tensor whose size
32182// matches the `axis` dimension of the input and output tensors.
32183//
32184// Returns:
32185//
32186//	output: The quantized data produced from the float input.
32187//	output_min: The final quantization range minimum, used to clip input values before scaling
32188//
32189// and rounding them to quantized values.
32190// If the `axis` attribute is specified, this will be a 1-D tensor whose size
32191// matches the `axis` dimension of the input and output tensors.
32192//
32193//	output_max: The final quantization range maximum, used to clip input values before scaling
32194//
32195// and rounding them to quantized values.
32196// If the `axis` attribute is specified, this will be a 1-D tensor whose size
32197// matches the `axis` dimension of the input and output tensors.
32198func QuantizeV2(scope *Scope, input tf.Output, min_range tf.Output, max_range tf.Output, T tf.DataType, optional ...QuantizeV2Attr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
32199	if scope.Err() != nil {
32200		return
32201	}
32202	attrs := map[string]interface{}{"T": T}
32203	for _, a := range optional {
32204		a(attrs)
32205	}
32206	opspec := tf.OpSpec{
32207		Type: "QuantizeV2",
32208		Input: []tf.Input{
32209			input, min_range, max_range,
32210		},
32211		Attrs: attrs,
32212	}
32213	op := scope.AddOperation(opspec)
32214	return op.Output(0), op.Output(1), op.Output(2)
32215}
32216
32217// QuantizedAddAttr is an optional argument to QuantizedAdd.
32218type QuantizedAddAttr func(optionalAttr)
32219
32220// QuantizedAddToutput sets the optional Toutput attribute to value.
32221// If not specified, defaults to DT_QINT32
32222func QuantizedAddToutput(value tf.DataType) QuantizedAddAttr {
32223	return func(m optionalAttr) {
32224		m["Toutput"] = value
32225	}
32226}
32227
32228// Returns x + y element-wise, working on quantized buffers.
32229//
32230// Arguments:
32231//
32232//	min_x: The float value that the lowest quantized `x` value represents.
32233//	max_x: The float value that the highest quantized `x` value represents.
32234//	min_y: The float value that the lowest quantized `y` value represents.
32235//	max_y: The float value that the highest quantized `y` value represents.
32236//
32237// Returns:
32238//
32239//	z
32240//	min_z: The float value that the lowest quantized output value represents.
32241//	max_z: The float value that the highest quantized output value represents.
32242//
32243// *NOTE*: `QuantizedAdd` supports limited forms of broadcasting. More about
32244// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
32245func QuantizedAdd(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedAddAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
32246	if scope.Err() != nil {
32247		return
32248	}
32249	attrs := map[string]interface{}{}
32250	for _, a := range optional {
32251		a(attrs)
32252	}
32253	opspec := tf.OpSpec{
32254		Type: "QuantizedAdd",
32255		Input: []tf.Input{
32256			x, y, min_x, max_x, min_y, max_y,
32257		},
32258		Attrs: attrs,
32259	}
32260	op := scope.AddOperation(opspec)
32261	return op.Output(0), op.Output(1), op.Output(2)
32262}
32263
32264// Produces the average pool of the input tensor for quantized types.
32265//
32266// Arguments:
32267//
32268//	input: 4-D with shape `[batch, height, width, channels]`.
32269//	min_input: The float value that the lowest quantized input value represents.
32270//	max_input: The float value that the highest quantized input value represents.
32271//	ksize: The size of the window for each dimension of the input tensor.
32272//
32273// The length must be 4 to match the number of dimensions of the input.
32274//
32275//	strides: The stride of the sliding window for each dimension of the input
32276//
32277// tensor.  The length must be 4 to match the number of dimensions of the input.
32278//
32279//	padding: The type of padding algorithm to use.
32280//
32281// Returns:
32282//
32283//	output
32284//	min_output: The float value that the lowest quantized output value represents.
32285//	max_output: The float value that the highest quantized output value represents.
32286func QuantizedAvgPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
32287	if scope.Err() != nil {
32288		return
32289	}
32290	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
32291	opspec := tf.OpSpec{
32292		Type: "QuantizedAvgPool",
32293		Input: []tf.Input{
32294			input, min_input, max_input,
32295		},
32296		Attrs: attrs,
32297	}
32298	op := scope.AddOperation(opspec)
32299	return op.Output(0), op.Output(1), op.Output(2)
32300}
32301
32302// Quantized Batch normalization.
32303//
32304// This op is deprecated and will be removed in the future. Prefer
32305// `tf.nn.batch_normalization`.
32306//
32307// Arguments:
32308//
32309//	t: A 4D input Tensor.
32310//	t_min: The value represented by the lowest quantized input.
32311//	t_max: The value represented by the highest quantized input.
32312//	m: A 1D mean Tensor with size matching the last dimension of t.
32313//
32314// This is the first output from tf.nn.moments,
32315// or a saved moving average thereof.
32316//
32317//	m_min: The value represented by the lowest quantized mean.
32318//	m_max: The value represented by the highest quantized mean.
32319//	v: A 1D variance Tensor with size matching the last dimension of t.
32320//
32321// This is the second output from tf.nn.moments,
32322// or a saved moving average thereof.
32323//
32324//	v_min: The value represented by the lowest quantized variance.
32325//	v_max: The value represented by the highest quantized variance.
32326//	beta: A 1D beta Tensor with size matching the last dimension of t.
32327//
32328// An offset to be added to the normalized tensor.
32329//
32330//	beta_min: The value represented by the lowest quantized offset.
32331//	beta_max: The value represented by the highest quantized offset.
32332//	gamma: A 1D gamma Tensor with size matching the last dimension of t.
32333//
32334// If "scale_after_normalization" is true, this tensor will be multiplied
32335// with the normalized tensor.
32336//
32337//	gamma_min: The value represented by the lowest quantized gamma.
32338//	gamma_max: The value represented by the highest quantized gamma.
32339//
32340//	variance_epsilon: A small float number to avoid dividing by 0.
32341//	scale_after_normalization: A bool indicating whether the resulted tensor
32342//
32343// needs to be multiplied with gamma.
32344func QuantizedBatchNormWithGlobalNormalization(scope *Scope, t tf.Output, t_min tf.Output, t_max tf.Output, m tf.Output, m_min tf.Output, m_max tf.Output, v tf.Output, v_min tf.Output, v_max tf.Output, beta tf.Output, beta_min tf.Output, beta_max tf.Output, gamma tf.Output, gamma_min tf.Output, gamma_max tf.Output, out_type tf.DataType, variance_epsilon float32, scale_after_normalization bool) (result tf.Output, result_min tf.Output, result_max tf.Output) {
32345	if scope.Err() != nil {
32346		return
32347	}
32348	attrs := map[string]interface{}{"out_type": out_type, "variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization}
32349	opspec := tf.OpSpec{
32350		Type: "QuantizedBatchNormWithGlobalNormalization",
32351		Input: []tf.Input{
32352			t, t_min, t_max, m, m_min, m_max, v, v_min, v_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max,
32353		},
32354		Attrs: attrs,
32355	}
32356	op := scope.AddOperation(opspec)
32357	return op.Output(0), op.Output(1), op.Output(2)
32358}
32359
32360// Adds Tensor 'bias' to Tensor 'input' for Quantized types.
32361//
32362// Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
32363//
32364// Arguments:
32365//
32366//	bias: A 1D bias Tensor with size matching the last dimension of 'input'.
32367//	min_input: The float value that the lowest quantized input value represents.
32368//	max_input: The float value that the highest quantized input value represents.
32369//	min_bias: The float value that the lowest quantized bias value represents.
32370//	max_bias: The float value that the highest quantized bias value represents.
32371//
32372// Returns:
32373//
32374//	output
32375//	min_out: The float value that the lowest quantized output value represents.
32376//	max_out: The float value that the highest quantized output value represents.
32377func QuantizedBiasAdd(scope *Scope, input tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_bias tf.Output, max_bias tf.Output, out_type tf.DataType) (output tf.Output, min_out tf.Output, max_out tf.Output) {
32378	if scope.Err() != nil {
32379		return
32380	}
32381	attrs := map[string]interface{}{"out_type": out_type}
32382	opspec := tf.OpSpec{
32383		Type: "QuantizedBiasAdd",
32384		Input: []tf.Input{
32385			input, bias, min_input, max_input, min_bias, max_bias,
32386		},
32387		Attrs: attrs,
32388	}
32389	op := scope.AddOperation(opspec)
32390	return op.Output(0), op.Output(1), op.Output(2)
32391}
32392
32393// Concatenates quantized tensors along one dimension.
32394//
32395// Arguments:
32396//
32397//	concat_dim: 0-D.  The dimension along which to concatenate.  Must be in the
32398//
32399// range [0, rank(values)).
32400//
32401//	values: The `N` Tensors to concatenate. Their ranks and types must match,
32402//
32403// and their sizes must match in all dimensions except `concat_dim`.
32404//
32405//	input_mins: The minimum scalar values for each of the input tensors.
32406//	input_maxes: The maximum scalar values for each of the input tensors.
32407//
32408// Returns:
32409//
32410//	output: A `Tensor` with the concatenation of values stacked along the
32411//
32412// `concat_dim` dimension.  This tensor's shape matches that of `values` except
32413// in `concat_dim` where it has the sum of the sizes.
32414//
32415//	output_min: The float value that the minimum quantized output value represents.
32416//	output_max: The float value that the maximum quantized output value represents.
32417func QuantizedConcat(scope *Scope, concat_dim tf.Output, values []tf.Output, input_mins []tf.Output, input_maxes []tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
32418	if scope.Err() != nil {
32419		return
32420	}
32421	opspec := tf.OpSpec{
32422		Type: "QuantizedConcat",
32423		Input: []tf.Input{
32424			concat_dim, tf.OutputList(values), tf.OutputList(input_mins), tf.OutputList(input_maxes),
32425		},
32426	}
32427	op := scope.AddOperation(opspec)
32428	return op.Output(0), op.Output(1), op.Output(2)
32429}
32430
32431// QuantizedConv2DAttr is an optional argument to QuantizedConv2D.
32432type QuantizedConv2DAttr func(optionalAttr)
32433
32434// QuantizedConv2DOutType sets the optional out_type attribute to value.
32435// If not specified, defaults to DT_QINT32
32436func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr {
32437	return func(m optionalAttr) {
32438		m["out_type"] = value
32439	}
32440}
32441
32442// QuantizedConv2DDilations sets the optional dilations attribute to value.
32443//
32444// value: 1-D tensor of length 4.  The dilation factor for each dimension of
32445// `input`. If set to k > 1, there will be k-1 skipped cells between each
32446// filter element on that dimension. The dimension order is determined by the
32447// value of `data_format`, see above for details. Dilations in the batch and
32448// depth dimensions must be 1.
32449// If not specified, defaults to {i:1 i:1 i:1 i:1}
32450func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr {
32451	return func(m optionalAttr) {
32452		m["dilations"] = value
32453	}
32454}
32455
32456// Computes a 2D convolution given quantized 4D input and filter tensors.
32457//
32458// The inputs are quantized tensors where the lowest value represents the real
32459// number of the associated minimum, and the highest represents the maximum.
32460// This means that you can only interpret the quantized output in the same way, by
32461// taking the returned minimum and maximum values into account.
32462//
32463// Arguments:
32464//
32465//	filter: filter's input_depth dimension must match input's depth dimensions.
32466//	min_input: The float value that the lowest quantized input value represents.
32467//	max_input: The float value that the highest quantized input value represents.
32468//	min_filter: The float value that the lowest quantized filter value represents.
32469//	max_filter: The float value that the highest quantized filter value represents.
32470//	strides: The stride of the sliding window for each dimension of the input
32471//
32472// tensor.
32473//
32474//	padding: The type of padding algorithm to use.
32475//
32476// Returns:
32477//
32478//	output
32479//	min_output: The float value that the lowest quantized output value represents.
32480//	max_output: The float value that the highest quantized output value represents.
32481func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
32482	if scope.Err() != nil {
32483		return
32484	}
32485	attrs := map[string]interface{}{"strides": strides, "padding": padding}
32486	for _, a := range optional {
32487		a(attrs)
32488	}
32489	opspec := tf.OpSpec{
32490		Type: "QuantizedConv2D",
32491		Input: []tf.Input{
32492			input, filter, min_input, max_input, min_filter, max_filter,
32493		},
32494		Attrs: attrs,
32495	}
32496	op := scope.AddOperation(opspec)
32497	return op.Output(0), op.Output(1), op.Output(2)
32498}
32499
32500// QuantizedConv2DPerChannelAttr is an optional argument to QuantizedConv2DPerChannel.
32501type QuantizedConv2DPerChannelAttr func(optionalAttr)
32502
32503// QuantizedConv2DPerChannelOutType sets the optional out_type attribute to value.
32504//
32505// value: The quantized type of output tensor that needs to be converted.
32506// If not specified, defaults to DT_QINT32
32507func QuantizedConv2DPerChannelOutType(value tf.DataType) QuantizedConv2DPerChannelAttr {
32508	return func(m optionalAttr) {
32509		m["out_type"] = value
32510	}
32511}
32512
32513// QuantizedConv2DPerChannelDilations sets the optional dilations attribute to value.
32514//
32515// value: list of dilation values.
32516// If not specified, defaults to {i:1 i:1 i:1 i:1}
32517func QuantizedConv2DPerChannelDilations(value []int64) QuantizedConv2DPerChannelAttr {
32518	return func(m optionalAttr) {
32519		m["dilations"] = value
32520	}
32521}
32522
32523// Computes QuantizedConv2D per channel.
32524//
32525// Arguments:
32526//
32527//	input: The original input tensor.
32528//	filter: The original filter tensor.
32529//	min_input: The minimum value of the input tensor
32530//	max_input: The maximum value of the input tensor.
32531//	min_filter: The minimum value of the filter tensor.
32532//	max_filter: The maximum value of the filter tensor.
32533//	strides: list of stride values.
32534//
32535// Returns:
32536//
32537//	output: The output tensor.
32538//	min_output: The minimum value of the final output tensor.
32539//	max_output: The maximum value of the final output tensor.
32540func QuantizedConv2DPerChannel(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DPerChannelAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
32541	if scope.Err() != nil {
32542		return
32543	}
32544	attrs := map[string]interface{}{"strides": strides, "padding": padding}
32545	for _, a := range optional {
32546		a(attrs)
32547	}
32548	opspec := tf.OpSpec{
32549		Type: "QuantizedConv2DPerChannel",
32550		Input: []tf.Input{
32551			input, filter, min_input, max_input, min_filter, max_filter,
32552		},
32553		Attrs: attrs,
32554	}
32555	op := scope.AddOperation(opspec)
32556	return op.Output(0), op.Output(1), op.Output(2)
32557}
32558
32559// QuantizedDepthwiseConv2DAttr is an optional argument to QuantizedDepthwiseConv2D.
32560type QuantizedDepthwiseConv2DAttr func(optionalAttr)
32561
32562// QuantizedDepthwiseConv2DOutType sets the optional out_type attribute to value.
32563//
32564// value: The type of the output.
32565// If not specified, defaults to DT_QINT32
32566func QuantizedDepthwiseConv2DOutType(value tf.DataType) QuantizedDepthwiseConv2DAttr {
32567	return func(m optionalAttr) {
32568		m["out_type"] = value
32569	}
32570}
32571
32572// QuantizedDepthwiseConv2DDilations sets the optional dilations attribute to value.
32573//
32574// value: List of dilation values.
32575// If not specified, defaults to {i:1 i:1 i:1 i:1}
32576func QuantizedDepthwiseConv2DDilations(value []int64) QuantizedDepthwiseConv2DAttr {
32577	return func(m optionalAttr) {
32578		m["dilations"] = value
32579	}
32580}
32581
32582// Computes quantized depthwise Conv2D.
32583//
32584// Arguments:
32585//
32586//	input: The original input tensor.
32587//	filter: The original filter tensor.
32588//	min_input: The float value that the minimum quantized input value represents.
32589//	max_input: The float value that the maximum quantized input value represents.
32590//	min_filter: The float value that the minimum quantized filter value represents.
32591//	max_filter: The float value that the maximum quantized filter value represents.
32592//	strides: List of stride values.
32593//
32594// Returns:
32595//
32596//	output: The output tensor.
32597//	min_output: The float value that the minimum quantized output value represents.
32598//	max_output: The float value that the maximum quantized output value represents.
32599func QuantizedDepthwiseConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
32600	if scope.Err() != nil {
32601		return
32602	}
32603	attrs := map[string]interface{}{"strides": strides, "padding": padding}
32604	for _, a := range optional {
32605		a(attrs)
32606	}
32607	opspec := tf.OpSpec{
32608		Type: "QuantizedDepthwiseConv2D",
32609		Input: []tf.Input{
32610			input, filter, min_input, max_input, min_filter, max_filter,
32611		},
32612		Attrs: attrs,
32613	}
32614	op := scope.AddOperation(opspec)
32615	return op.Output(0), op.Output(1), op.Output(2)
32616}
32617
32618// QuantizedDepthwiseConv2DWithBiasAttr is an optional argument to QuantizedDepthwiseConv2DWithBias.
32619type QuantizedDepthwiseConv2DWithBiasAttr func(optionalAttr)
32620
32621// QuantizedDepthwiseConv2DWithBiasOutType sets the optional out_type attribute to value.
32622//
32623// value: The type of the output.
32624// If not specified, defaults to DT_QINT32
32625func QuantizedDepthwiseConv2DWithBiasOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAttr {
32626	return func(m optionalAttr) {
32627		m["out_type"] = value
32628	}
32629}
32630
32631// QuantizedDepthwiseConv2DWithBiasDilations sets the optional dilations attribute to value.
32632//
32633// value: List of dilation values.
32634// If not specified, defaults to {i:1 i:1 i:1 i:1}
32635func QuantizedDepthwiseConv2DWithBiasDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAttr {
32636	return func(m optionalAttr) {
32637		m["dilations"] = value
32638	}
32639}
32640
32641// Computes quantized depthwise Conv2D with Bias.
32642//
32643// Arguments:
32644//
32645//	input: The original input tensor.
32646//	filter: The original filter tensor.
32647//	bias: The original bias tensor.
32648//	min_input: The float value that the minimum quantized input value represents.
32649//	max_input: The float value that the maximum quantized input value represents.
32650//	min_filter: The float value that the minimum quantized filter value represents.
32651//	max_filter: The float value that the maximum quantized filter value represents.
32652//	strides: List of stride values.
32653//
32654// Returns:
32655//
32656//	output: The output tensor.
32657//	min_output: The float value that the minimum quantized output value represents.
32658//	max_output: The float value that the maximum quantized output value represents.
32659func QuantizedDepthwiseConv2DWithBias(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
32660	if scope.Err() != nil {
32661		return
32662	}
32663	attrs := map[string]interface{}{"strides": strides, "padding": padding}
32664	for _, a := range optional {
32665		a(attrs)
32666	}
32667	opspec := tf.OpSpec{
32668		Type: "QuantizedDepthwiseConv2DWithBias",
32669		Input: []tf.Input{
32670			input, filter, bias, min_input, max_input, min_filter, max_filter,
32671		},
32672		Attrs: attrs,
32673	}
32674	op := scope.AddOperation(opspec)
32675	return op.Output(0), op.Output(1), op.Output(2)
32676}
32677
32678// QuantizedDepthwiseConv2DWithBiasAndReluAttr is an optional argument to QuantizedDepthwiseConv2DWithBiasAndRelu.
32679type QuantizedDepthwiseConv2DWithBiasAndReluAttr func(optionalAttr)
32680
32681// QuantizedDepthwiseConv2DWithBiasAndReluOutType sets the optional out_type attribute to value.
32682//
32683// value: The type of the output.
32684// If not specified, defaults to DT_QINT32
32685func QuantizedDepthwiseConv2DWithBiasAndReluOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAttr {
32686	return func(m optionalAttr) {
32687		m["out_type"] = value
32688	}
32689}
32690
32691// QuantizedDepthwiseConv2DWithBiasAndReluDilations sets the optional dilations attribute to value.
32692//
32693// value: List of dilation values.
32694// If not specified, defaults to {i:1 i:1 i:1 i:1}
32695func QuantizedDepthwiseConv2DWithBiasAndReluDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr {
32696	return func(m optionalAttr) {
32697		m["dilations"] = value
32698	}
32699}
32700
32701// QuantizedDepthwiseConv2DWithBiasAndReluPaddingList sets the optional padding_list attribute to value.
32702// If not specified, defaults to {}
32703func QuantizedDepthwiseConv2DWithBiasAndReluPaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAttr {
32704	return func(m optionalAttr) {
32705		m["padding_list"] = value
32706	}
32707}
32708
32709// Computes quantized depthwise Conv2D with Bias and Relu.
32710//
32711// Arguments:
32712//
32713//	input: The original input tensor.
32714//	filter: The original filter tensor.
32715//	bias: The original bias tensor.
32716//	min_input: The float value that the minimum quantized input value represents.
32717//	max_input: The float value that the maximum quantized input value represents.
32718//	min_filter: The float value that the minimum quantized filter value represents.
32719//	max_filter: The float value that the maximum quantized filter value represents.
32720//	strides: List of stride values.
32721//
32722// Returns:
32723//
32724//	output: The output tensor.
32725//	min_output: The float value that the minimum quantized output value represents.
32726//	max_output: The float value that the maximum quantized output value represents.
32727func QuantizedDepthwiseConv2DWithBiasAndRelu(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAndReluAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
32728	if scope.Err() != nil {
32729		return
32730	}
32731	attrs := map[string]interface{}{"strides": strides, "padding": padding}
32732	for _, a := range optional {
32733		a(attrs)
32734	}
32735	opspec := tf.OpSpec{
32736		Type: "QuantizedDepthwiseConv2DWithBiasAndRelu",
32737		Input: []tf.Input{
32738			input, filter, bias, min_input, max_input, min_filter, max_filter,
32739		},
32740		Attrs: attrs,
32741	}
32742	op := scope.AddOperation(opspec)
32743	return op.Output(0), op.Output(1), op.Output(2)
32744}
32745
32746// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize.
32747type QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr func(optionalAttr)
32748
32749// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType sets the optional out_type attribute to value.
32750//
32751// value: The type of the output.
32752// If not specified, defaults to DT_QUINT8
32753func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeOutType(value tf.DataType) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr {
32754	return func(m optionalAttr) {
32755		m["out_type"] = value
32756	}
32757}
32758
32759// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations sets the optional dilations attribute to value.
32760//
32761// value: List of dilation values.
32762// If not specified, defaults to {i:1 i:1 i:1 i:1}
32763func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeDilations(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr {
32764	return func(m optionalAttr) {
32765		m["dilations"] = value
32766	}
32767}
32768
32769// QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList sets the optional padding_list attribute to value.
32770// If not specified, defaults to {}
32771func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizePaddingList(value []int64) QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr {
32772	return func(m optionalAttr) {
32773		m["padding_list"] = value
32774	}
32775}
32776
32777// Computes quantized depthwise Conv2D with Bias, Relu and Requantize.
32778//
32779// Arguments:
32780//
32781//	input: The original input tensor.
32782//	filter: The original filter tensor.
32783//	bias: The original bias tensor.
32784//	min_input: The float value that the minimum quantized input value represents.
32785//	max_input: The float value that the maximum quantized input value represents.
32786//	min_filter: The float value that the minimum quantized filter value represents.
32787//	max_filter: The float value that the maximum quantized filter value represents.
32788//	min_freezed_output: The minimum float value of the output tensor.
32789//	max_freezed_output: The maximum float value of the output tensor.
32790//	strides: List of stride values.
32791//
32792// Returns:
32793//
32794//	output: The output tensor.
32795//	min_output: The float value that the minimum quantized output value represents.
32796//	max_output: The float value that the maximum quantized output value represents.
32797func QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize(scope *Scope, input tf.Output, filter tf.Output, bias tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, strides []int64, padding string, optional ...QuantizedDepthwiseConv2DWithBiasAndReluAndRequantizeAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) {
32798	if scope.Err() != nil {
32799		return
32800	}
32801	attrs := map[string]interface{}{"strides": strides, "padding": padding}
32802	for _, a := range optional {
32803		a(attrs)
32804	}
32805	opspec := tf.OpSpec{
32806		Type: "QuantizedDepthwiseConv2DWithBiasAndReluAndRequantize",
32807		Input: []tf.Input{
32808			input, filter, bias, min_input, max_input, min_filter, max_filter, min_freezed_output, max_freezed_output,
32809		},
32810		Attrs: attrs,
32811	}
32812	op := scope.AddOperation(opspec)
32813	return op.Output(0), op.Output(1), op.Output(2)
32814}
32815
32816// QuantizedInstanceNormAttr is an optional argument to QuantizedInstanceNorm.
32817type QuantizedInstanceNormAttr func(optionalAttr)
32818
32819// QuantizedInstanceNormOutputRangeGiven sets the optional output_range_given attribute to value.
32820//
32821// value: If True, `given_y_min` and `given_y_min`
32822// and `given_y_max` are used as the output range. Otherwise,
32823// the implementation computes the output range.
32824// If not specified, defaults to false
32825func QuantizedInstanceNormOutputRangeGiven(value bool) QuantizedInstanceNormAttr {
32826	return func(m optionalAttr) {
32827		m["output_range_given"] = value
32828	}
32829}
32830
32831// QuantizedInstanceNormGivenYMin sets the optional given_y_min attribute to value.
32832//
32833// value: Output in `y_min` if `output_range_given` is True.
32834// If not specified, defaults to 0
32835func QuantizedInstanceNormGivenYMin(value float32) QuantizedInstanceNormAttr {
32836	return func(m optionalAttr) {
32837		m["given_y_min"] = value
32838	}
32839}
32840
32841// QuantizedInstanceNormGivenYMax sets the optional given_y_max attribute to value.
32842//
32843// value: Output in `y_max` if `output_range_given` is True.
32844// If not specified, defaults to 0
32845func QuantizedInstanceNormGivenYMax(value float32) QuantizedInstanceNormAttr {
32846	return func(m optionalAttr) {
32847		m["given_y_max"] = value
32848	}
32849}
32850
32851// QuantizedInstanceNormVarianceEpsilon sets the optional variance_epsilon attribute to value.
32852//
32853// value: A small float number to avoid dividing by 0.
32854// If not specified, defaults to 1e-05
32855func QuantizedInstanceNormVarianceEpsilon(value float32) QuantizedInstanceNormAttr {
32856	return func(m optionalAttr) {
32857		m["variance_epsilon"] = value
32858	}
32859}
32860
32861// QuantizedInstanceNormMinSeparation sets the optional min_separation attribute to value.
32862//
32863// value: Minimum value of `y_max - y_min`
32864// If not specified, defaults to 0.001
32865func QuantizedInstanceNormMinSeparation(value float32) QuantizedInstanceNormAttr {
32866	return func(m optionalAttr) {
32867		m["min_separation"] = value
32868	}
32869}
32870
32871// Quantized Instance normalization.
32872//
32873// Arguments:
32874//
32875//	x: A 4D input Tensor.
32876//	x_min: The value represented by the lowest quantized input.
32877//	x_max: The value represented by the highest quantized input.
32878//
32879// Returns:
32880//
32881//	y: A 4D Tensor.
32882//	y_min: The value represented by the lowest quantized output.
32883//	y_max: The value represented by the highest quantized output.
32884func QuantizedInstanceNorm(scope *Scope, x tf.Output, x_min tf.Output, x_max tf.Output, optional ...QuantizedInstanceNormAttr) (y tf.Output, y_min tf.Output, y_max tf.Output) {
32885	if scope.Err() != nil {
32886		return
32887	}
32888	attrs := map[string]interface{}{}
32889	for _, a := range optional {
32890		a(attrs)
32891	}
32892	opspec := tf.OpSpec{
32893		Type: "QuantizedInstanceNorm",
32894		Input: []tf.Input{
32895			x, x_min, x_max,
32896		},
32897		Attrs: attrs,
32898	}
32899	op := scope.AddOperation(opspec)
32900	return op.Output(0), op.Output(1), op.Output(2)
32901}
32902
32903// QuantizedMatMulAttr is an optional argument to QuantizedMatMul.
32904type QuantizedMatMulAttr func(optionalAttr)
32905
32906// QuantizedMatMulToutput sets the optional Toutput attribute to value.
32907// If not specified, defaults to DT_QINT32
32908func QuantizedMatMulToutput(value tf.DataType) QuantizedMatMulAttr {
32909	return func(m optionalAttr) {
32910		m["Toutput"] = value
32911	}
32912}
32913
32914// QuantizedMatMulTransposeA sets the optional transpose_a attribute to value.
32915//
32916// value: If true, `a` is transposed before multiplication.
32917// If not specified, defaults to false
32918func QuantizedMatMulTransposeA(value bool) QuantizedMatMulAttr {
32919	return func(m optionalAttr) {
32920		m["transpose_a"] = value
32921	}
32922}
32923
32924// QuantizedMatMulTransposeB sets the optional transpose_b attribute to value.
32925//
32926// value: If true, `b` is transposed before multiplication.
32927// If not specified, defaults to false
32928func QuantizedMatMulTransposeB(value bool) QuantizedMatMulAttr {
32929	return func(m optionalAttr) {
32930		m["transpose_b"] = value
32931	}
32932}
32933
32934// QuantizedMatMulTactivation sets the optional Tactivation attribute to value.
32935//
32936// value: The type of output produced by activation function
32937// following this operation.
32938// If not specified, defaults to DT_QUINT8
32939func QuantizedMatMulTactivation(value tf.DataType) QuantizedMatMulAttr {
32940	return func(m optionalAttr) {
32941		m["Tactivation"] = value
32942	}
32943}
32944
32945// Perform a quantized matrix multiplication of  `a` by the matrix `b`.
32946//
32947// The inputs must be two-dimensional matrices and the inner dimension of
32948// `a` (after being transposed if `transpose_a` is non-zero) must match the
32949// outer dimension of `b` (after being transposed if `transposed_b` is
32950// non-zero).
32951//
32952// Arguments:
32953//
32954//	a: Must be a two-dimensional tensor.
32955//	b: Must be a two-dimensional tensor.
32956//	min_a: The float value that the lowest quantized `a` value represents.
32957//	max_a: The float value that the highest quantized `a` value represents.
32958//	min_b: The float value that the lowest quantized `b` value represents.
32959//	max_b: The float value that the highest quantized `b` value represents.
32960//
32961// Returns:
32962//
32963//	out
32964//	min_out: The float value that the lowest quantized output value represents.
32965//	max_out: The float value that the highest quantized output value represents.
32966func QuantizedMatMul(scope *Scope, a tf.Output, b tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
32967	if scope.Err() != nil {
32968		return
32969	}
32970	attrs := map[string]interface{}{}
32971	for _, a := range optional {
32972		a(attrs)
32973	}
32974	opspec := tf.OpSpec{
32975		Type: "QuantizedMatMul",
32976		Input: []tf.Input{
32977			a, b, min_a, max_a, min_b, max_b,
32978		},
32979		Attrs: attrs,
32980	}
32981	op := scope.AddOperation(opspec)
32982	return op.Output(0), op.Output(1), op.Output(2)
32983}
32984
32985// QuantizedMatMulWithBiasAttr is an optional argument to QuantizedMatMulWithBias.
32986type QuantizedMatMulWithBiasAttr func(optionalAttr)
32987
32988// QuantizedMatMulWithBiasToutput sets the optional Toutput attribute to value.
32989// If not specified, defaults to DT_QINT32
32990func QuantizedMatMulWithBiasToutput(value tf.DataType) QuantizedMatMulWithBiasAttr {
32991	return func(m optionalAttr) {
32992		m["Toutput"] = value
32993	}
32994}
32995
32996// QuantizedMatMulWithBiasTransposeA sets the optional transpose_a attribute to value.
32997//
32998// value: If true, `a` is transposed before multiplication.
32999// If not specified, defaults to false
33000func QuantizedMatMulWithBiasTransposeA(value bool) QuantizedMatMulWithBiasAttr {
33001	return func(m optionalAttr) {
33002		m["transpose_a"] = value
33003	}
33004}
33005
33006// QuantizedMatMulWithBiasTransposeB sets the optional transpose_b attribute to value.
33007//
33008// value: If true, `b` is transposed before multiplication.
33009// If not specified, defaults to false
33010func QuantizedMatMulWithBiasTransposeB(value bool) QuantizedMatMulWithBiasAttr {
33011	return func(m optionalAttr) {
33012		m["transpose_b"] = value
33013	}
33014}
33015
33016// QuantizedMatMulWithBiasInputQuantMode sets the optional input_quant_mode attribute to value.
33017//
33018// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED.
33019// If not specified, defaults to "MIN_FIRST"
33020func QuantizedMatMulWithBiasInputQuantMode(value string) QuantizedMatMulWithBiasAttr {
33021	return func(m optionalAttr) {
33022		m["input_quant_mode"] = value
33023	}
33024}
33025
33026// Performs a quantized matrix multiplication of `a` by the matrix `b` with bias
33027// add.
33028//
33029// The inputs must be two-dimensional matrices and 1D bias vector. And the inner
33030// dimension of `a` (after being transposed if `transpose_a` is non-zero) must
33031// match the outer dimension of `b` (after being transposed if `transposed_b` is
33032// non-zero). Then do broadcast add operation with bias values on the matrix
33033// multiplication result. The bias size must match inner dimension of `b`.
33034//
33035// Arguments:
33036//
33037//	a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
33038//	b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
33039//	bias: A 1D bias tensor with size matching inner dimension of `b` (after being
33040//
33041// transposed if `transposed_b` is non-zero).
33042//
33043//	min_a: The float value that the lowest quantized `a` value represents.
33044//	max_a: The float value that the highest quantized `a` value represents.
33045//	min_b: The float value that the lowest quantized `b` value represents.
33046//	max_b: The float value that the highest quantized `b` value represents.
33047//
33048// Returns:
33049//
33050//	out
33051//	min_out: The float value that the lowest quantized output value represents.
33052//	max_out: The float value that the highest quantized output value represents.
33053func QuantizedMatMulWithBias(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
33054	if scope.Err() != nil {
33055		return
33056	}
33057	attrs := map[string]interface{}{}
33058	for _, a := range optional {
33059		a(attrs)
33060	}
33061	opspec := tf.OpSpec{
33062		Type: "QuantizedMatMulWithBias",
33063		Input: []tf.Input{
33064			a, b, bias, min_a, max_a, min_b, max_b,
33065		},
33066		Attrs: attrs,
33067	}
33068	op := scope.AddOperation(opspec)
33069	return op.Output(0), op.Output(1), op.Output(2)
33070}
33071
33072// QuantizedMatMulWithBiasAndReluAttr is an optional argument to QuantizedMatMulWithBiasAndRelu.
33073type QuantizedMatMulWithBiasAndReluAttr func(optionalAttr)
33074
33075// QuantizedMatMulWithBiasAndReluToutput sets the optional Toutput attribute to value.
33076// If not specified, defaults to DT_QINT32
33077func QuantizedMatMulWithBiasAndReluToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAttr {
33078	return func(m optionalAttr) {
33079		m["Toutput"] = value
33080	}
33081}
33082
33083// QuantizedMatMulWithBiasAndReluTransposeA sets the optional transpose_a attribute to value.
33084//
33085// value: If true, `a` is transposed before multiplication.
33086// If not specified, defaults to false
33087func QuantizedMatMulWithBiasAndReluTransposeA(value bool) QuantizedMatMulWithBiasAndReluAttr {
33088	return func(m optionalAttr) {
33089		m["transpose_a"] = value
33090	}
33091}
33092
33093// QuantizedMatMulWithBiasAndReluTransposeB sets the optional transpose_b attribute to value.
33094//
33095// value: If true, `b` is transposed before multiplication.
33096// If not specified, defaults to false
33097func QuantizedMatMulWithBiasAndReluTransposeB(value bool) QuantizedMatMulWithBiasAndReluAttr {
33098	return func(m optionalAttr) {
33099		m["transpose_b"] = value
33100	}
33101}
33102
33103// QuantizedMatMulWithBiasAndReluInputQuantMode sets the optional input_quant_mode attribute to value.
33104//
33105// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED.
33106// If not specified, defaults to "MIN_FIRST"
33107func QuantizedMatMulWithBiasAndReluInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAttr {
33108	return func(m optionalAttr) {
33109		m["input_quant_mode"] = value
33110	}
33111}
33112
33113// Perform a quantized matrix multiplication of  `a` by the matrix `b` with bias
33114// add and relu fusion.
33115//
33116// The inputs must be two-dimensional matrices and 1D bias vector. And the inner
33117// dimension of `a` (after being transposed if `transpose_a` is non-zero) must
33118// match the outer dimension of `b` (after being transposed if `transposed_b` is
33119// non-zero). Then do broadcast add operation with bias values on the matrix
33120// multiplication result. The bias size must match inner dimension of `b`. Then do
33121// relu activation to get non-negative result.
33122//
33123// Arguments:
33124//
33125//	a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
33126//	b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
33127//	bias: A 1D bias tensor with size matching with inner dimension of `b` (after being
33128//
33129// transposed if `transposed_b` is non-zero).
33130//
33131//	min_a: The float value that the lowest quantized `a` value represents.
33132//	max_a: The float value that the highest quantized `a` value represents.
33133//	min_b: The float value that the lowest quantized `b` value represents.
33134//	max_b: The float value that the highest quantized `b` value represents.
33135//
33136// Returns:
33137//
33138//	out
33139//	min_out: The float value that the lowest quantized output value represents.
33140//	max_out: The float value that the highest quantized output value represents.
33141func QuantizedMatMulWithBiasAndRelu(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAndReluAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
33142	if scope.Err() != nil {
33143		return
33144	}
33145	attrs := map[string]interface{}{}
33146	for _, a := range optional {
33147		a(attrs)
33148	}
33149	opspec := tf.OpSpec{
33150		Type: "QuantizedMatMulWithBiasAndRelu",
33151		Input: []tf.Input{
33152			a, b, bias, min_a, max_a, min_b, max_b,
33153		},
33154		Attrs: attrs,
33155	}
33156	op := scope.AddOperation(opspec)
33157	return op.Output(0), op.Output(1), op.Output(2)
33158}
33159
33160// QuantizedMatMulWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedMatMulWithBiasAndReluAndRequantize.
33161type QuantizedMatMulWithBiasAndReluAndRequantizeAttr func(optionalAttr)
33162
33163// QuantizedMatMulWithBiasAndReluAndRequantizeToutput sets the optional Toutput attribute to value.
33164// If not specified, defaults to DT_QUINT8
33165func QuantizedMatMulWithBiasAndReluAndRequantizeToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
33166	return func(m optionalAttr) {
33167		m["Toutput"] = value
33168	}
33169}
33170
33171// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA sets the optional transpose_a attribute to value.
33172//
33173// value: If true, `a` is transposed before multiplication.
33174// If not specified, defaults to false
33175func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
33176	return func(m optionalAttr) {
33177		m["transpose_a"] = value
33178	}
33179}
33180
33181// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB sets the optional transpose_b attribute to value.
33182//
33183// value: If true, `b` is transposed before multiplication.
33184// If not specified, defaults to false
33185func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
33186	return func(m optionalAttr) {
33187		m["transpose_b"] = value
33188	}
33189}
33190
33191// QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode sets the optional input_quant_mode attribute to value.
33192//
33193// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED.
33194// If not specified, defaults to "MIN_FIRST"
33195func QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAndRequantizeAttr {
33196	return func(m optionalAttr) {
33197		m["input_quant_mode"] = value
33198	}
33199}
33200
33201// Perform a quantized matrix multiplication of  `a` by the matrix `b` with bias
33202// add and relu and requantize fusion.
33203//
33204// The inputs must be two-dimensional matrices and 1D bias vector. And the inner
33205// dimension of `a` (after being transposed if `transpose_a` is non-zero) must
33206// match the outer dimension of `b` (after being transposed if `transposed_b` is
33207// non-zero). Then do broadcast add operation with bias values on the matrix
33208// multiplication result. The bias size must match inner dimension of `b`.  Then do
33209// relu activation to get non-negative result. Then do requantize operation to get
33210// final uint8 result.
33211//
33212// Arguments:
33213//
33214//	a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`.
33215//	b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`.
33216//	bias: A 1D bias tensor with size matching with inner dimension of `b` (after being
33217//
33218// transposed if `transposed_b` is non-zero).
33219//
33220//	min_a: The float value that the lowest quantized `a` value represents.
33221//	max_a: The float value that the highest quantized `a` value represents.
33222//	min_b: The float value that the lowest quantized `b` value represents.
33223//	max_b: The float value that the highest quantized `b` value represents.
33224//	min_freezed_output: The float value that the highest quantized output value after requantize.
33225//
33226// Returns:
33227//
33228//	out
33229//	min_out: The float value that the lowest quantized output value represents.
33230//	max_out: The float value that the highest quantized output value represents.
33231func QuantizedMatMulWithBiasAndReluAndRequantize(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, optional ...QuantizedMatMulWithBiasAndReluAndRequantizeAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) {
33232	if scope.Err() != nil {
33233		return
33234	}
33235	attrs := map[string]interface{}{}
33236	for _, a := range optional {
33237		a(attrs)
33238	}
33239	opspec := tf.OpSpec{
33240		Type: "QuantizedMatMulWithBiasAndReluAndRequantize",
33241		Input: []tf.Input{
33242			a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output,
33243		},
33244		Attrs: attrs,
33245	}
33246	op := scope.AddOperation(opspec)
33247	return op.Output(0), op.Output(1), op.Output(2)
33248}
33249
33250// Produces the max pool of the input tensor for quantized types.
33251//
33252// Arguments:
33253//
33254//	input: The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
33255//	min_input: The float value that the lowest quantized input value represents.
33256//	max_input: The float value that the highest quantized input value represents.
33257//	ksize: The size of the window for each dimension of the input tensor.
33258//
33259// The length must be 4 to match the number of dimensions of the input.
33260//
33261//	strides: The stride of the sliding window for each dimension of the input
33262//
33263// tensor. The length must be 4 to match the number of dimensions of the input.
33264//
33265//	padding: The type of padding algorithm to use.
33266//
33267// Returns:
33268//
33269//	output
33270//	min_output: The float value that the lowest quantized output value represents.
33271//	max_output: The float value that the highest quantized output value represents.
33272func QuantizedMaxPool(scope *Scope, input tf.Output, min_input tf.Output, max_input tf.Output, ksize []int64, strides []int64, padding string) (output tf.Output, min_output tf.Output, max_output tf.Output) {
33273	if scope.Err() != nil {
33274		return
33275	}
33276	attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding}
33277	opspec := tf.OpSpec{
33278		Type: "QuantizedMaxPool",
33279		Input: []tf.Input{
33280			input, min_input, max_input,
33281		},
33282		Attrs: attrs,
33283	}
33284	op := scope.AddOperation(opspec)
33285	return op.Output(0), op.Output(1), op.Output(2)
33286}
33287
33288// QuantizedMulAttr is an optional argument to QuantizedMul.
33289type QuantizedMulAttr func(optionalAttr)
33290
33291// QuantizedMulToutput sets the optional Toutput attribute to value.
33292// If not specified, defaults to DT_QINT32
33293func QuantizedMulToutput(value tf.DataType) QuantizedMulAttr {
33294	return func(m optionalAttr) {
33295		m["Toutput"] = value
33296	}
33297}
33298
33299// Returns x * y element-wise, working on quantized buffers.
33300//
33301// Arguments:
33302//
33303//	min_x: The float value that the lowest quantized `x` value represents.
33304//	max_x: The float value that the highest quantized `x` value represents.
33305//	min_y: The float value that the lowest quantized `y` value represents.
33306//	max_y: The float value that the highest quantized `y` value represents.
33307//
33308// Returns:
33309//
33310//	z
33311//	min_z: The float value that the lowest quantized output value represents.
33312//	max_z: The float value that the highest quantized output value represents.
33313//
33314// *NOTE*: `QuantizedMul` supports limited forms of broadcasting. More about
33315// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
33316func QuantizedMul(scope *Scope, x tf.Output, y tf.Output, min_x tf.Output, max_x tf.Output, min_y tf.Output, max_y tf.Output, optional ...QuantizedMulAttr) (z tf.Output, min_z tf.Output, max_z tf.Output) {
33317	if scope.Err() != nil {
33318		return
33319	}
33320	attrs := map[string]interface{}{}
33321	for _, a := range optional {
33322		a(attrs)
33323	}
33324	opspec := tf.OpSpec{
33325		Type: "QuantizedMul",
33326		Input: []tf.Input{
33327			x, y, min_x, max_x, min_y, max_y,
33328		},
33329		Attrs: attrs,
33330	}
33331	op := scope.AddOperation(opspec)
33332	return op.Output(0), op.Output(1), op.Output(2)
33333}
33334
33335// QuantizedReluAttr is an optional argument to QuantizedRelu.
33336type QuantizedReluAttr func(optionalAttr)
33337
33338// QuantizedReluOutType sets the optional out_type attribute to value.
33339// If not specified, defaults to DT_QUINT8
33340func QuantizedReluOutType(value tf.DataType) QuantizedReluAttr {
33341	return func(m optionalAttr) {
33342		m["out_type"] = value
33343	}
33344}
33345
33346// Computes Quantized Rectified Linear: `max(features, 0)`
33347//
33348// Arguments:
33349//
33350//	min_features: The float value that the lowest quantized value represents.
33351//	max_features: The float value that the highest quantized value represents.
33352//
33353// Returns:
33354//
33355//	activations: Has the same output shape as "features".
33356//	min_activations: The float value that the lowest quantized value represents.
33357//	max_activations: The float value that the highest quantized value represents.
33358func QuantizedRelu(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
33359	if scope.Err() != nil {
33360		return
33361	}
33362	attrs := map[string]interface{}{}
33363	for _, a := range optional {
33364		a(attrs)
33365	}
33366	opspec := tf.OpSpec{
33367		Type: "QuantizedRelu",
33368		Input: []tf.Input{
33369			features, min_features, max_features,
33370		},
33371		Attrs: attrs,
33372	}
33373	op := scope.AddOperation(opspec)
33374	return op.Output(0), op.Output(1), op.Output(2)
33375}
33376
33377// QuantizedRelu6Attr is an optional argument to QuantizedRelu6.
33378type QuantizedRelu6Attr func(optionalAttr)
33379
33380// QuantizedRelu6OutType sets the optional out_type attribute to value.
33381// If not specified, defaults to DT_QUINT8
33382func QuantizedRelu6OutType(value tf.DataType) QuantizedRelu6Attr {
33383	return func(m optionalAttr) {
33384		m["out_type"] = value
33385	}
33386}
33387
33388// Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)`
33389//
33390// Arguments:
33391//
33392//	min_features: The float value that the lowest quantized value represents.
33393//	max_features: The float value that the highest quantized value represents.
33394//
33395// Returns:
33396//
33397//	activations: Has the same output shape as "features".
33398//	min_activations: The float value that the lowest quantized value represents.
33399//	max_activations: The float value that the highest quantized value represents.
33400func QuantizedRelu6(scope *Scope, features tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedRelu6Attr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
33401	if scope.Err() != nil {
33402		return
33403	}
33404	attrs := map[string]interface{}{}
33405	for _, a := range optional {
33406		a(attrs)
33407	}
33408	opspec := tf.OpSpec{
33409		Type: "QuantizedRelu6",
33410		Input: []tf.Input{
33411			features, min_features, max_features,
33412		},
33413		Attrs: attrs,
33414	}
33415	op := scope.AddOperation(opspec)
33416	return op.Output(0), op.Output(1), op.Output(2)
33417}
33418
33419// QuantizedReluXAttr is an optional argument to QuantizedReluX.
33420type QuantizedReluXAttr func(optionalAttr)
33421
33422// QuantizedReluXOutType sets the optional out_type attribute to value.
33423// If not specified, defaults to DT_QUINT8
33424func QuantizedReluXOutType(value tf.DataType) QuantizedReluXAttr {
33425	return func(m optionalAttr) {
33426		m["out_type"] = value
33427	}
33428}
33429
33430// Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)`
33431//
33432// Arguments:
33433//
33434//	min_features: The float value that the lowest quantized value represents.
33435//	max_features: The float value that the highest quantized value represents.
33436//
33437// Returns:
33438//
33439//	activations: Has the same output shape as "features".
33440//	min_activations: The float value that the lowest quantized value represents.
33441//	max_activations: The float value that the highest quantized value represents.
33442func QuantizedReluX(scope *Scope, features tf.Output, max_value tf.Output, min_features tf.Output, max_features tf.Output, optional ...QuantizedReluXAttr) (activations tf.Output, min_activations tf.Output, max_activations tf.Output) {
33443	if scope.Err() != nil {
33444		return
33445	}
33446	attrs := map[string]interface{}{}
33447	for _, a := range optional {
33448		a(attrs)
33449	}
33450	opspec := tf.OpSpec{
33451		Type: "QuantizedReluX",
33452		Input: []tf.Input{
33453			features, max_value, min_features, max_features,
33454		},
33455		Attrs: attrs,
33456	}
33457	op := scope.AddOperation(opspec)
33458	return op.Output(0), op.Output(1), op.Output(2)
33459}
33460
33461// Reshapes a quantized tensor as per the Reshape op.
33462//
33463// ```
33464//
33465// Arguments:
33466//
33467//	shape: Defines the shape of the output tensor.
33468//	input_min: The minimum value of the input.
33469//	input_max: The maximum value of the input.
33470//
33471// Returns:
33472//
33473//	output
33474//	output_min: This value is copied from input_min.
33475//	output_max: This value is copied from input_max.
33476func QuantizedReshape(scope *Scope, tensor tf.Output, shape tf.Output, input_min tf.Output, input_max tf.Output) (output tf.Output, output_min tf.Output, output_max tf.Output) {
33477	if scope.Err() != nil {
33478		return
33479	}
33480	opspec := tf.OpSpec{
33481		Type: "QuantizedReshape",
33482		Input: []tf.Input{
33483			tensor, shape, input_min, input_max,
33484		},
33485	}
33486	op := scope.AddOperation(opspec)
33487	return op.Output(0), op.Output(1), op.Output(2)
33488}
33489
33490// QuantizedResizeBilinearAttr is an optional argument to QuantizedResizeBilinear.
33491type QuantizedResizeBilinearAttr func(optionalAttr)
33492
33493// QuantizedResizeBilinearAlignCorners sets the optional align_corners attribute to value.
33494//
33495// value: If true, the centers of the 4 corner pixels of the input and output tensors are
33496// aligned, preserving the values at the corner pixels. Defaults to false.
33497// If not specified, defaults to false
33498func QuantizedResizeBilinearAlignCorners(value bool) QuantizedResizeBilinearAttr {
33499	return func(m optionalAttr) {
33500		m["align_corners"] = value
33501	}
33502}
33503
33504// QuantizedResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value.
33505// If not specified, defaults to false
33506func QuantizedResizeBilinearHalfPixelCenters(value bool) QuantizedResizeBilinearAttr {
33507	return func(m optionalAttr) {
33508		m["half_pixel_centers"] = value
33509	}
33510}
33511
33512// Resize quantized `images` to `size` using quantized bilinear interpolation.
33513//
33514// Input images and output images must be quantized types.
33515//
33516// Arguments:
33517//
33518//	images: 4-D with shape `[batch, height, width, channels]`.
33519//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
33520//
33521// new size for the images.
33522//
33523// Returns:
33524//
33525//	resized_images: 4-D with shape
33526//
33527// `[batch, new_height, new_width, channels]`.
33528//
33529//	out_min
33530//	out_max
33531func QuantizedResizeBilinear(scope *Scope, images tf.Output, size tf.Output, min tf.Output, max tf.Output, optional ...QuantizedResizeBilinearAttr) (resized_images tf.Output, out_min tf.Output, out_max tf.Output) {
33532	if scope.Err() != nil {
33533		return
33534	}
33535	attrs := map[string]interface{}{}
33536	for _, a := range optional {
33537		a(attrs)
33538	}
33539	opspec := tf.OpSpec{
33540		Type: "QuantizedResizeBilinear",
33541		Input: []tf.Input{
33542			images, size, min, max,
33543		},
33544		Attrs: attrs,
33545	}
33546	op := scope.AddOperation(opspec)
33547	return op.Output(0), op.Output(1), op.Output(2)
33548}
33549
33550// QueueCloseV2Attr is an optional argument to QueueCloseV2.
33551type QueueCloseV2Attr func(optionalAttr)
33552
33553// QueueCloseV2CancelPendingEnqueues sets the optional cancel_pending_enqueues attribute to value.
33554//
33555// value: If true, all pending enqueue requests that are
33556// blocked on the given queue will be canceled.
33557// If not specified, defaults to false
33558func QueueCloseV2CancelPendingEnqueues(value bool) QueueCloseV2Attr {
33559	return func(m optionalAttr) {
33560		m["cancel_pending_enqueues"] = value
33561	}
33562}
33563
33564// Closes the given queue.
33565//
33566// This operation signals that no more elements will be enqueued in the
33567// given queue. Subsequent Enqueue(Many) operations will fail.
33568// Subsequent Dequeue(Many) operations will continue to succeed if
33569// sufficient elements remain in the queue. Subsequent Dequeue(Many)
33570// operations that would block will fail immediately.
33571//
33572// Arguments:
33573//
33574//	handle: The handle to a queue.
33575//
33576// Returns the created operation.
33577func QueueCloseV2(scope *Scope, handle tf.Output, optional ...QueueCloseV2Attr) (o *tf.Operation) {
33578	if scope.Err() != nil {
33579		return
33580	}
33581	attrs := map[string]interface{}{}
33582	for _, a := range optional {
33583		a(attrs)
33584	}
33585	opspec := tf.OpSpec{
33586		Type: "QueueCloseV2",
33587		Input: []tf.Input{
33588			handle,
33589		},
33590		Attrs: attrs,
33591	}
33592	return scope.AddOperation(opspec)
33593}
33594
33595// QueueDequeueManyV2Attr is an optional argument to QueueDequeueManyV2.
33596type QueueDequeueManyV2Attr func(optionalAttr)
33597
33598// QueueDequeueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
33599//
33600// value: If the queue has fewer than n elements, this operation
33601// will block for up to timeout_ms milliseconds.
33602// Note: This option is not supported yet.
33603// If not specified, defaults to -1
33604func QueueDequeueManyV2TimeoutMs(value int64) QueueDequeueManyV2Attr {
33605	return func(m optionalAttr) {
33606		m["timeout_ms"] = value
33607	}
33608}
33609
33610// Dequeues `n` tuples of one or more tensors from the given queue.
33611//
33612// If the queue is closed and there are fewer than `n` elements, then an
33613// OutOfRange error is returned.
33614//
33615// This operation concatenates queue-element component tensors along the
33616// 0th dimension to make a single component tensor.  All of the components
33617// in the dequeued tuple will have size `n` in the 0th dimension.
33618//
33619// This operation has `k` outputs, where `k` is the number of components in
33620// the tuples stored in the given queue, and output `i` is the ith
33621// component of the dequeued tuple.
33622//
33623// N.B. If the queue is empty, this operation will block until `n` elements
33624// have been dequeued (or 'timeout_ms' elapses, if specified).
33625//
33626// Arguments:
33627//
33628//	handle: The handle to a queue.
33629//	n: The number of tuples to dequeue.
33630//	component_types: The type of each component in a tuple.
33631//
33632// Returns One or more tensors that were dequeued as a tuple.
33633func QueueDequeueManyV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueManyV2Attr) (components []tf.Output) {
33634	if scope.Err() != nil {
33635		return
33636	}
33637	attrs := map[string]interface{}{"component_types": component_types}
33638	for _, a := range optional {
33639		a(attrs)
33640	}
33641	opspec := tf.OpSpec{
33642		Type: "QueueDequeueManyV2",
33643		Input: []tf.Input{
33644			handle, n,
33645		},
33646		Attrs: attrs,
33647	}
33648	op := scope.AddOperation(opspec)
33649	if scope.Err() != nil {
33650		return
33651	}
33652	var idx int
33653	var err error
33654	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
33655		scope.UpdateErr("QueueDequeueManyV2", err)
33656		return
33657	}
33658	return components
33659}
33660
33661// QueueDequeueUpToV2Attr is an optional argument to QueueDequeueUpToV2.
33662type QueueDequeueUpToV2Attr func(optionalAttr)
33663
33664// QueueDequeueUpToV2TimeoutMs sets the optional timeout_ms attribute to value.
33665//
33666// value: If the queue has fewer than n elements, this operation
33667// will block for up to timeout_ms milliseconds.
33668// Note: This option is not supported yet.
33669// If not specified, defaults to -1
33670func QueueDequeueUpToV2TimeoutMs(value int64) QueueDequeueUpToV2Attr {
33671	return func(m optionalAttr) {
33672		m["timeout_ms"] = value
33673	}
33674}
33675
33676// Dequeues `n` tuples of one or more tensors from the given queue.
33677//
33678// This operation is not supported by all queues.  If a queue does not support
33679// DequeueUpTo, then an Unimplemented error is returned.
33680//
33681// If the queue is closed and there are more than 0 but less than `n`
33682// elements remaining, then instead of returning an OutOfRange error like
33683// QueueDequeueMany, less than `n` elements are returned immediately.  If
33684// the queue is closed and there are 0 elements left in the queue, then
33685// an OutOfRange error is returned just like in QueueDequeueMany.
33686// Otherwise the behavior is identical to QueueDequeueMany:
33687//
33688// This operation concatenates queue-element component tensors along the
33689// 0th dimension to make a single component tensor.  All of the components
33690// in the dequeued tuple will have size n in the 0th dimension.
33691//
33692// This operation has `k` outputs, where `k` is the number of components in
33693// the tuples stored in the given queue, and output `i` is the ith
33694// component of the dequeued tuple.
33695//
33696// Arguments:
33697//
33698//	handle: The handle to a queue.
33699//	n: The number of tuples to dequeue.
33700//	component_types: The type of each component in a tuple.
33701//
33702// Returns One or more tensors that were dequeued as a tuple.
33703func QueueDequeueUpToV2(scope *Scope, handle tf.Output, n tf.Output, component_types []tf.DataType, optional ...QueueDequeueUpToV2Attr) (components []tf.Output) {
33704	if scope.Err() != nil {
33705		return
33706	}
33707	attrs := map[string]interface{}{"component_types": component_types}
33708	for _, a := range optional {
33709		a(attrs)
33710	}
33711	opspec := tf.OpSpec{
33712		Type: "QueueDequeueUpToV2",
33713		Input: []tf.Input{
33714			handle, n,
33715		},
33716		Attrs: attrs,
33717	}
33718	op := scope.AddOperation(opspec)
33719	if scope.Err() != nil {
33720		return
33721	}
33722	var idx int
33723	var err error
33724	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
33725		scope.UpdateErr("QueueDequeueUpToV2", err)
33726		return
33727	}
33728	return components
33729}
33730
33731// QueueDequeueV2Attr is an optional argument to QueueDequeueV2.
33732type QueueDequeueV2Attr func(optionalAttr)
33733
33734// QueueDequeueV2TimeoutMs sets the optional timeout_ms attribute to value.
33735//
33736// value: If the queue is empty, this operation will block for up to
33737// timeout_ms milliseconds.
33738// Note: This option is not supported yet.
33739// If not specified, defaults to -1
33740func QueueDequeueV2TimeoutMs(value int64) QueueDequeueV2Attr {
33741	return func(m optionalAttr) {
33742		m["timeout_ms"] = value
33743	}
33744}
33745
33746// Dequeues a tuple of one or more tensors from the given queue.
33747//
33748// This operation has k outputs, where k is the number of components
33749// in the tuples stored in the given queue, and output i is the ith
33750// component of the dequeued tuple.
33751//
33752// N.B. If the queue is empty, this operation will block until an element
33753// has been dequeued (or 'timeout_ms' elapses, if specified).
33754//
33755// Arguments:
33756//
33757//	handle: The handle to a queue.
33758//	component_types: The type of each component in a tuple.
33759//
33760// Returns One or more tensors that were dequeued as a tuple.
33761func QueueDequeueV2(scope *Scope, handle tf.Output, component_types []tf.DataType, optional ...QueueDequeueV2Attr) (components []tf.Output) {
33762	if scope.Err() != nil {
33763		return
33764	}
33765	attrs := map[string]interface{}{"component_types": component_types}
33766	for _, a := range optional {
33767		a(attrs)
33768	}
33769	opspec := tf.OpSpec{
33770		Type: "QueueDequeueV2",
33771		Input: []tf.Input{
33772			handle,
33773		},
33774		Attrs: attrs,
33775	}
33776	op := scope.AddOperation(opspec)
33777	if scope.Err() != nil {
33778		return
33779	}
33780	var idx int
33781	var err error
33782	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
33783		scope.UpdateErr("QueueDequeueV2", err)
33784		return
33785	}
33786	return components
33787}
33788
33789// QueueEnqueueManyV2Attr is an optional argument to QueueEnqueueManyV2.
33790type QueueEnqueueManyV2Attr func(optionalAttr)
33791
33792// QueueEnqueueManyV2TimeoutMs sets the optional timeout_ms attribute to value.
33793//
33794// value: If the queue is too full, this operation will block for up
33795// to timeout_ms milliseconds.
33796// Note: This option is not supported yet.
33797// If not specified, defaults to -1
33798func QueueEnqueueManyV2TimeoutMs(value int64) QueueEnqueueManyV2Attr {
33799	return func(m optionalAttr) {
33800		m["timeout_ms"] = value
33801	}
33802}
33803
33804// Enqueues zero or more tuples of one or more tensors in the given queue.
33805//
33806// This operation slices each component tensor along the 0th dimension to
33807// make multiple queue elements. All of the tuple components must have the
33808// same size in the 0th dimension.
33809//
33810// The components input has k elements, which correspond to the components of
33811// tuples stored in the given queue.
33812//
33813// N.B. If the queue is full, this operation will block until the given
33814// elements have been enqueued (or 'timeout_ms' elapses, if specified).
33815//
33816// Arguments:
33817//
33818//	handle: The handle to a queue.
33819//	components: One or more tensors from which the enqueued tensors should
33820//
33821// be taken.
33822//
33823// Returns the created operation.
33824func QueueEnqueueManyV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueManyV2Attr) (o *tf.Operation) {
33825	if scope.Err() != nil {
33826		return
33827	}
33828	attrs := map[string]interface{}{}
33829	for _, a := range optional {
33830		a(attrs)
33831	}
33832	opspec := tf.OpSpec{
33833		Type: "QueueEnqueueManyV2",
33834		Input: []tf.Input{
33835			handle, tf.OutputList(components),
33836		},
33837		Attrs: attrs,
33838	}
33839	return scope.AddOperation(opspec)
33840}
33841
33842// QueueEnqueueV2Attr is an optional argument to QueueEnqueueV2.
33843type QueueEnqueueV2Attr func(optionalAttr)
33844
33845// QueueEnqueueV2TimeoutMs sets the optional timeout_ms attribute to value.
33846//
33847// value: If the queue is full, this operation will block for up to
33848// timeout_ms milliseconds.
33849// Note: This option is not supported yet.
33850// If not specified, defaults to -1
33851func QueueEnqueueV2TimeoutMs(value int64) QueueEnqueueV2Attr {
33852	return func(m optionalAttr) {
33853		m["timeout_ms"] = value
33854	}
33855}
33856
33857// Enqueues a tuple of one or more tensors in the given queue.
33858//
33859// The components input has k elements, which correspond to the components of
33860// tuples stored in the given queue.
33861//
33862// N.B. If the queue is full, this operation will block until the given
33863// element has been enqueued (or 'timeout_ms' elapses, if specified).
33864//
33865// Arguments:
33866//
33867//	handle: The handle to a queue.
33868//	components: One or more tensors from which the enqueued tensors should be taken.
33869//
33870// Returns the created operation.
33871func QueueEnqueueV2(scope *Scope, handle tf.Output, components []tf.Output, optional ...QueueEnqueueV2Attr) (o *tf.Operation) {
33872	if scope.Err() != nil {
33873		return
33874	}
33875	attrs := map[string]interface{}{}
33876	for _, a := range optional {
33877		a(attrs)
33878	}
33879	opspec := tf.OpSpec{
33880		Type: "QueueEnqueueV2",
33881		Input: []tf.Input{
33882			handle, tf.OutputList(components),
33883		},
33884		Attrs: attrs,
33885	}
33886	return scope.AddOperation(opspec)
33887}
33888
33889// Returns true if queue is closed.
33890//
33891// This operation returns true if the queue is closed and false if the queue
33892// is open.
33893//
33894// Arguments:
33895//
33896//	handle: The handle to a queue.
33897func QueueIsClosedV2(scope *Scope, handle tf.Output) (is_closed tf.Output) {
33898	if scope.Err() != nil {
33899		return
33900	}
33901	opspec := tf.OpSpec{
33902		Type: "QueueIsClosedV2",
33903		Input: []tf.Input{
33904			handle,
33905		},
33906	}
33907	op := scope.AddOperation(opspec)
33908	return op.Output(0)
33909}
33910
33911// Computes the number of elements in the given queue.
33912//
33913// Arguments:
33914//
33915//	handle: The handle to a queue.
33916//
33917// Returns The number of elements in the given queue.
33918func QueueSizeV2(scope *Scope, handle tf.Output) (size tf.Output) {
33919	if scope.Err() != nil {
33920		return
33921	}
33922	opspec := tf.OpSpec{
33923		Type: "QueueSizeV2",
33924		Input: []tf.Input{
33925			handle,
33926		},
33927	}
33928	op := scope.AddOperation(opspec)
33929	return op.Output(0)
33930}
33931
33932// RFFTAttr is an optional argument to RFFT.
33933type RFFTAttr func(optionalAttr)
33934
33935// RFFTTcomplex sets the optional Tcomplex attribute to value.
33936// If not specified, defaults to DT_COMPLEX64
33937func RFFTTcomplex(value tf.DataType) RFFTAttr {
33938	return func(m optionalAttr) {
33939		m["Tcomplex"] = value
33940	}
33941}
33942
33943// Real-valued fast Fourier transform.
33944//
33945// Computes the 1-dimensional discrete Fourier transform of a real-valued signal
33946// over the inner-most dimension of `input`.
33947//
33948// Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
33949// `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
33950// followed by the `fft_length / 2` positive-frequency terms.
33951//
33952// Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
33953// corresponding dimension of `input`, the dimension is cropped. If it is larger,
33954// the dimension is padded with zeros.
33955//
33956// Arguments:
33957//
33958//	input: A float32 tensor.
33959//	fft_length: An int32 tensor of shape [1]. The FFT length.
33960//
33961// Returns A complex64 tensor of the same rank as `input`. The inner-most
33962//
33963//	dimension of `input` is replaced with the `fft_length / 2 + 1` unique
33964//	frequency components of its 1D Fourier transform.
33965//
33966// @compatibility(numpy)
33967// Equivalent to np.fft.rfft
33968// @end_compatibility
33969func RFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFTAttr) (output tf.Output) {
33970	if scope.Err() != nil {
33971		return
33972	}
33973	attrs := map[string]interface{}{}
33974	for _, a := range optional {
33975		a(attrs)
33976	}
33977	opspec := tf.OpSpec{
33978		Type: "RFFT",
33979		Input: []tf.Input{
33980			input, fft_length,
33981		},
33982		Attrs: attrs,
33983	}
33984	op := scope.AddOperation(opspec)
33985	return op.Output(0)
33986}
33987
33988// RFFT2DAttr is an optional argument to RFFT2D.
33989type RFFT2DAttr func(optionalAttr)
33990
33991// RFFT2DTcomplex sets the optional Tcomplex attribute to value.
33992// If not specified, defaults to DT_COMPLEX64
33993func RFFT2DTcomplex(value tf.DataType) RFFT2DAttr {
33994	return func(m optionalAttr) {
33995		m["Tcomplex"] = value
33996	}
33997}
33998
33999// 2D real-valued fast Fourier transform.
34000//
34001// Computes the 2-dimensional discrete Fourier transform of a real-valued signal
34002// over the inner-most 2 dimensions of `input`.
34003//
34004// Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
34005// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
34006// of `output`: the zero-frequency term, followed by the `fft_length / 2`
34007// positive-frequency terms.
34008//
34009// Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
34010// corresponding dimension of `input`, the dimension is cropped. If it is larger,
34011// the dimension is padded with zeros.
34012//
34013// Arguments:
34014//
34015//	input: A float32 tensor.
34016//	fft_length: An int32 tensor of shape [2]. The FFT length for each dimension.
34017//
34018// Returns A complex64 tensor of the same rank as `input`. The inner-most 2
34019//
34020//	dimensions of `input` are replaced with their 2D Fourier transform. The
34021//	inner-most dimension contains `fft_length / 2 + 1` unique frequency
34022//	components.
34023//
34024// @compatibility(numpy)
34025// Equivalent to np.fft.rfft2
34026// @end_compatibility
34027func RFFT2D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT2DAttr) (output tf.Output) {
34028	if scope.Err() != nil {
34029		return
34030	}
34031	attrs := map[string]interface{}{}
34032	for _, a := range optional {
34033		a(attrs)
34034	}
34035	opspec := tf.OpSpec{
34036		Type: "RFFT2D",
34037		Input: []tf.Input{
34038			input, fft_length,
34039		},
34040		Attrs: attrs,
34041	}
34042	op := scope.AddOperation(opspec)
34043	return op.Output(0)
34044}
34045
34046// RFFT3DAttr is an optional argument to RFFT3D.
34047type RFFT3DAttr func(optionalAttr)
34048
34049// RFFT3DTcomplex sets the optional Tcomplex attribute to value.
34050// If not specified, defaults to DT_COMPLEX64
34051func RFFT3DTcomplex(value tf.DataType) RFFT3DAttr {
34052	return func(m optionalAttr) {
34053		m["Tcomplex"] = value
34054	}
34055}
34056
34057// 3D real-valued fast Fourier transform.
34058//
34059// Computes the 3-dimensional discrete Fourier transform of a real-valued signal
34060// over the inner-most 3 dimensions of `input`.
34061//
34062// Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
34063// `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
34064// of `output`: the zero-frequency term, followed by the `fft_length / 2`
34065// positive-frequency terms.
34066//
34067// Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
34068// corresponding dimension of `input`, the dimension is cropped. If it is larger,
34069// the dimension is padded with zeros.
34070//
34071// Arguments:
34072//
34073//	input: A float32 tensor.
34074//	fft_length: An int32 tensor of shape [3]. The FFT length for each dimension.
34075//
34076// Returns A complex64 tensor of the same rank as `input`. The inner-most 3
34077//
34078//	dimensions of `input` are replaced with the their 3D Fourier transform. The
34079//	inner-most dimension contains `fft_length / 2 + 1` unique frequency
34080//	components.
34081//
34082// @compatibility(numpy)
34083// Equivalent to np.fft.rfftn with 3 dimensions.
34084// @end_compatibility
34085func RFFT3D(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFT3DAttr) (output tf.Output) {
34086	if scope.Err() != nil {
34087		return
34088	}
34089	attrs := map[string]interface{}{}
34090	for _, a := range optional {
34091		a(attrs)
34092	}
34093	opspec := tf.OpSpec{
34094		Type: "RFFT3D",
34095		Input: []tf.Input{
34096			input, fft_length,
34097		},
34098		Attrs: attrs,
34099	}
34100	op := scope.AddOperation(opspec)
34101	return op.Output(0)
34102}
34103
34104// Converts one or more images from RGB to HSV.
34105//
34106// Outputs a tensor of the same shape as the `images` tensor, containing the HSV
34107// value of the pixels. The output is only well defined if the value in `images`
34108// are in `[0,1]`.
34109//
34110// `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
34111// `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
34112// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
34113//
34114// Usage Example:
34115//
34116// >>> blue_image = tf.stack([
34117// ...    tf.zeros([5,5]),
34118// ...    tf.zeros([5,5]),
34119// ...    tf.ones([5,5])],
34120// ...    axis=-1)
34121// >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image)
34122// >>> blue_hsv_image[0,0].numpy()
34123// array([0.6666667, 1. , 1. ], dtype=float32)
34124//
34125// Arguments:
34126//
34127//	images: 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
34128//
34129// Returns `images` converted to HSV.
34130func RGBToHSV(scope *Scope, images tf.Output) (output tf.Output) {
34131	if scope.Err() != nil {
34132		return
34133	}
34134	opspec := tf.OpSpec{
34135		Type: "RGBToHSV",
34136		Input: []tf.Input{
34137			images,
34138		},
34139	}
34140	op := scope.AddOperation(opspec)
34141	return op.Output(0)
34142}
34143
34144// RaggedBincountAttr is an optional argument to RaggedBincount.
34145type RaggedBincountAttr func(optionalAttr)
34146
34147// RaggedBincountBinaryOutput sets the optional binary_output attribute to value.
34148//
34149// value: bool; Whether the kernel should count the appearance or number of occurrences.
34150// If not specified, defaults to false
34151func RaggedBincountBinaryOutput(value bool) RaggedBincountAttr {
34152	return func(m optionalAttr) {
34153		m["binary_output"] = value
34154	}
34155}
34156
34157// Counts the number of occurrences of each value in an integer array.
34158//
34159// Outputs a vector with length `size` and the same dtype as `weights`. If
34160// `weights` are empty, then index `i` stores the number of times the value `i` is
34161// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
34162// the value in `weights` at each index where the corresponding value in `arr` is
34163// `i`.
34164//
34165// Values in `arr` outside of the range [0, size) are ignored.
34166//
34167// Arguments:
34168//
34169//	splits: 1D int64 `Tensor`.
34170//	values: 2D int `Tensor`.
34171//	size: non-negative int scalar `Tensor`.
34172//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
34173//
34174// shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights
34175// equal to 1.
34176//
34177// Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].
34178// The counts or summed weights for each value in the range [0, size).
34179func RaggedBincount(scope *Scope, splits tf.Output, values tf.Output, size tf.Output, weights tf.Output, optional ...RaggedBincountAttr) (output tf.Output) {
34180	if scope.Err() != nil {
34181		return
34182	}
34183	attrs := map[string]interface{}{}
34184	for _, a := range optional {
34185		a(attrs)
34186	}
34187	opspec := tf.OpSpec{
34188		Type: "RaggedBincount",
34189		Input: []tf.Input{
34190			splits, values, size, weights,
34191		},
34192		Attrs: attrs,
34193	}
34194	op := scope.AddOperation(opspec)
34195	return op.Output(0)
34196}
34197
34198// RaggedCountSparseOutputAttr is an optional argument to RaggedCountSparseOutput.
34199type RaggedCountSparseOutputAttr func(optionalAttr)
34200
34201// RaggedCountSparseOutputMinlength sets the optional minlength attribute to value.
34202//
34203// value: Minimum value to count. Can be set to -1 for no minimum.
34204// If not specified, defaults to -1
34205//
34206// REQUIRES: value >= -1
34207func RaggedCountSparseOutputMinlength(value int64) RaggedCountSparseOutputAttr {
34208	return func(m optionalAttr) {
34209		m["minlength"] = value
34210	}
34211}
34212
34213// RaggedCountSparseOutputMaxlength sets the optional maxlength attribute to value.
34214//
34215// value: Maximum value to count. Can be set to -1 for no maximum.
34216// If not specified, defaults to -1
34217//
34218// REQUIRES: value >= -1
34219func RaggedCountSparseOutputMaxlength(value int64) RaggedCountSparseOutputAttr {
34220	return func(m optionalAttr) {
34221		m["maxlength"] = value
34222	}
34223}
34224
34225// Performs sparse-output bin counting for a ragged tensor input.
34226//
34227//	Counts the number of times each value occurs in the input.
34228//
34229// Arguments:
34230//
34231//	splits: Tensor containing the row splits of the ragged tensor to count.
34232//	values: Tensor containing values of the sparse tensor to count.
34233//	weights: A Tensor of the same shape as indices containing per-index weight values.
34234//
34235// May also be the empty tensor if no weights are used.
34236//
34237//	binary_output: Whether to output the number of occurrences of each value or 1.
34238//
34239// Returns:
34240//
34241//		output_indices: Indices tensor for the resulting sparse tensor object.
34242//		output_values: Values tensor for the resulting sparse tensor object.
34243//		output_dense_shape: Shape tensor for the resulting sparse tensor object.
34244//	  END
34245//	  }
34246//	  attr {
34247//	    name: "T"
34248//	    description: <<END
34249//
34250// Dtype of the input values tensor.
34251func RaggedCountSparseOutput(scope *Scope, splits tf.Output, values tf.Output, weights tf.Output, binary_output bool, optional ...RaggedCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output) {
34252	if scope.Err() != nil {
34253		return
34254	}
34255	attrs := map[string]interface{}{"binary_output": binary_output}
34256	for _, a := range optional {
34257		a(attrs)
34258	}
34259	opspec := tf.OpSpec{
34260		Type: "RaggedCountSparseOutput",
34261		Input: []tf.Input{
34262			splits, values, weights,
34263		},
34264		Attrs: attrs,
34265	}
34266	op := scope.AddOperation(opspec)
34267	return op.Output(0), op.Output(1), op.Output(2)
34268}
34269
34270// Generates a feature cross from a list of tensors, and returns it as a
34271// RaggedTensor.  See `tf.ragged.cross` for more details.
34272//
34273// Arguments:
34274//
34275//	ragged_values: The values tensor for each RaggedTensor input.
34276//	ragged_row_splits: The row_splits tensor for each RaggedTensor input.
34277//	sparse_indices: The indices tensor for each SparseTensor input.
34278//	sparse_values: The values tensor for each SparseTensor input.
34279//	sparse_shape: The dense_shape tensor for each SparseTensor input.
34280//	dense_inputs: The tf.Tensor inputs.
34281//	input_order: String specifying the tensor type for each input.  The `i`th character in
34282//
34283// this string specifies the type of the `i`th input, and is one of: 'R' (ragged),
34284// 'D' (dense), or 'S' (sparse).  This attr is used to ensure that the crossed
34285// values are combined in the order of the inputs from the call to tf.ragged.cross.
34286//
34287// Returns:
34288//
34289//	output_values: The `values` for the returned `RaggedTensor`.
34290//	output_row_splits: The `row_splits` for the returned `RaggedTensor`.
34291func RaggedCross(scope *Scope, ragged_values []tf.Output, ragged_row_splits []tf.Output, sparse_indices []tf.Output, sparse_values []tf.Output, sparse_shape []tf.Output, dense_inputs []tf.Output, input_order string, hashed_output bool, num_buckets int64, hash_key int64, out_values_type tf.DataType, out_row_splits_type tf.DataType) (output_values tf.Output, output_row_splits tf.Output) {
34292	if scope.Err() != nil {
34293		return
34294	}
34295	attrs := map[string]interface{}{"input_order": input_order, "hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_values_type": out_values_type, "out_row_splits_type": out_row_splits_type}
34296	opspec := tf.OpSpec{
34297		Type: "RaggedCross",
34298		Input: []tf.Input{
34299			tf.OutputList(ragged_values), tf.OutputList(ragged_row_splits), tf.OutputList(sparse_indices), tf.OutputList(sparse_values), tf.OutputList(sparse_shape), tf.OutputList(dense_inputs),
34300		},
34301		Attrs: attrs,
34302	}
34303	op := scope.AddOperation(opspec)
34304	return op.Output(0), op.Output(1)
34305}
34306
34307// Gather ragged slices from `params` axis `0` according to `indices`.
34308//
34309// Outputs a `RaggedTensor` output composed from `output_dense_values` and
34310// `output_nested_splits`, such that:
34311//
34312// ```python
34313// output.shape = indices.shape + params.shape[1:]
34314// output.ragged_rank = indices.shape.ndims + params.ragged_rank
34315// output[i...j, d0...dn] = params[indices[i...j], d0...dn]
34316// ```
34317//
34318// where
34319//
34320//   - `params =
34321//     ragged.from_nested_row_splits(params_dense_values, params_nested_splits)`
34322//     provides the values that should be gathered.
34323//   - `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which
34324//     values should be gathered.
34325//   - `output =
34326//     ragged.from_nested_row_splits(output_dense_values, output_nested_splits)`
34327//     is the output tensor.
34328//
34329// (Note: This c++ op is used to implement the higher-level python
34330// `tf.ragged.gather` op, which also supports ragged indices.)
34331//
34332// Arguments:
34333//
34334//	params_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the
34335//
34336// `params` RaggedTensor input.
34337//
34338//	params_dense_values: The `flat_values` for the `params` RaggedTensor. There was a terminology change
34339//
34340// at the python level from dense_values to flat_values, so dense_values is the
34341// deprecated name.
34342//
34343//	indices: Indices in the outermost dimension of `params` of the values that should be
34344//
34345// gathered.
34346//
34347//	OUTPUT_RAGGED_RANK: The ragged rank of the output RaggedTensor. `output_nested_splits` will contain
34348//
34349// this number of `row_splits` tensors. This value should equal
34350// `indices.shape.ndims + params.ragged_rank - 1`.
34351//
34352// Returns:
34353//
34354//	output_nested_splits: The `nested_row_splits` tensors that define the row-partitioning for the
34355//
34356// returned RaggedTensor.
34357//
34358//	output_dense_values: The `flat_values` for the returned RaggedTensor.
34359func RaggedGather(scope *Scope, params_nested_splits []tf.Output, params_dense_values tf.Output, indices tf.Output, OUTPUT_RAGGED_RANK int64) (output_nested_splits []tf.Output, output_dense_values tf.Output) {
34360	if scope.Err() != nil {
34361		return
34362	}
34363	attrs := map[string]interface{}{"OUTPUT_RAGGED_RANK": OUTPUT_RAGGED_RANK}
34364	opspec := tf.OpSpec{
34365		Type: "RaggedGather",
34366		Input: []tf.Input{
34367			tf.OutputList(params_nested_splits), params_dense_values, indices,
34368		},
34369		Attrs: attrs,
34370	}
34371	op := scope.AddOperation(opspec)
34372	if scope.Err() != nil {
34373		return
34374	}
34375	var idx int
34376	var err error
34377	if output_nested_splits, idx, err = makeOutputList(op, idx, "output_nested_splits"); err != nil {
34378		scope.UpdateErr("RaggedGather", err)
34379		return
34380	}
34381	output_dense_values = op.Output(idx)
34382	return output_nested_splits, output_dense_values
34383}
34384
34385// RaggedRangeAttr is an optional argument to RaggedRange.
34386type RaggedRangeAttr func(optionalAttr)
34387
34388// RaggedRangeTsplits sets the optional Tsplits attribute to value.
34389// If not specified, defaults to DT_INT64
34390func RaggedRangeTsplits(value tf.DataType) RaggedRangeAttr {
34391	return func(m optionalAttr) {
34392		m["Tsplits"] = value
34393	}
34394}
34395
34396// Returns a `RaggedTensor` containing the specified sequences of numbers.
34397//
34398// Returns a `RaggedTensor` `result` composed from `rt_dense_values` and
34399// `rt_nested_splits`, such that
34400// `result[i] = range(starts[i], limits[i], deltas[i])`.
34401//
34402// ```python
34403// (rt_nested_splits, rt_dense_values) = ragged_range(
34404//
34405//	starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
34406//
34407// result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits)
34408// print(result)
34409// <tf.RaggedTensor [[2], [], [8, 9, 10, 11]] >
34410// ```
34411//
34412// The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
34413// The vector inputs must all have the same size.  Scalar inputs are broadcast
34414// to match the size of the vector inputs.
34415//
34416// Arguments:
34417//
34418//	starts: The starts of each range.
34419//	limits: The limits of each range.
34420//	deltas: The deltas of each range.
34421//
34422// Returns:
34423//
34424//	rt_nested_splits: The `row_splits` for the returned `RaggedTensor`.
34425//	rt_dense_values: The `flat_values` for the returned `RaggedTensor`.
34426func RaggedRange(scope *Scope, starts tf.Output, limits tf.Output, deltas tf.Output, optional ...RaggedRangeAttr) (rt_nested_splits tf.Output, rt_dense_values tf.Output) {
34427	if scope.Err() != nil {
34428		return
34429	}
34430	attrs := map[string]interface{}{}
34431	for _, a := range optional {
34432		a(attrs)
34433	}
34434	opspec := tf.OpSpec{
34435		Type: "RaggedRange",
34436		Input: []tf.Input{
34437			starts, limits, deltas,
34438		},
34439		Attrs: attrs,
34440	}
34441	op := scope.AddOperation(opspec)
34442	return op.Output(0), op.Output(1)
34443}
34444
34445// RaggedTensorFromVariantAttr is an optional argument to RaggedTensorFromVariant.
34446type RaggedTensorFromVariantAttr func(optionalAttr)
34447
34448// RaggedTensorFromVariantTsplits sets the optional Tsplits attribute to value.
34449// If not specified, defaults to DT_INT64
34450func RaggedTensorFromVariantTsplits(value tf.DataType) RaggedTensorFromVariantAttr {
34451	return func(m optionalAttr) {
34452		m["Tsplits"] = value
34453	}
34454}
34455
34456// Decodes a `variant` Tensor into a `RaggedTensor`.
34457//
34458// Decodes the given `variant` Tensor and returns a `RaggedTensor`. The input
34459// could be a scalar, meaning it encodes a single `RaggedTensor` with ragged_rank
34460// `output_ragged_rank`. It could also have an arbitrary rank, in which case each
34461// element is decoded into a `RaggedTensor` with ragged_rank `input_ragged_rank`
34462// and these are then stacked according to the input shape to output a single
34463// `RaggedTensor` with ragged_rank `output_ragged_rank`. Each `variant` element in
34464// the input Tensor is decoded by retrieving from the element a 1-D `variant`
34465// Tensor with `input_ragged_rank + 1` Tensors, corresponding to the splits and
34466// values of the decoded `RaggedTensor`. If `input_ragged_rank` is -1, then it is
34467// inferred as `output_ragged_rank` - `rank(encoded_ragged)`. See
34468// `RaggedTensorToVariant` for the corresponding encoding logic.
34469//
34470// Arguments:
34471//
34472//	encoded_ragged: A `variant` Tensor containing encoded `RaggedTensor`s.
34473//	input_ragged_rank: The ragged rank of each encoded `RaggedTensor` component in the input. If set to
34474//
34475// -1, this is inferred as `output_ragged_rank` - `rank(encoded_ragged)`
34476//
34477//	output_ragged_rank: The expected ragged rank of the output `RaggedTensor`. The following must hold:
34478//
34479// `output_ragged_rank = rank(encoded_ragged) + input_ragged_rank`.
34480//
34481// Returns:
34482//
34483//	output_nested_splits: A list of one or more Tensors representing the splits of the output
34484//
34485// `RaggedTensor`.
34486//
34487//	output_dense_values: A Tensor representing the values of the output `RaggedTensor`.
34488func RaggedTensorFromVariant(scope *Scope, encoded_ragged tf.Output, input_ragged_rank int64, output_ragged_rank int64, Tvalues tf.DataType, optional ...RaggedTensorFromVariantAttr) (output_nested_splits []tf.Output, output_dense_values tf.Output) {
34489	if scope.Err() != nil {
34490		return
34491	}
34492	attrs := map[string]interface{}{"input_ragged_rank": input_ragged_rank, "output_ragged_rank": output_ragged_rank, "Tvalues": Tvalues}
34493	for _, a := range optional {
34494		a(attrs)
34495	}
34496	opspec := tf.OpSpec{
34497		Type: "RaggedTensorFromVariant",
34498		Input: []tf.Input{
34499			encoded_ragged,
34500		},
34501		Attrs: attrs,
34502	}
34503	op := scope.AddOperation(opspec)
34504	if scope.Err() != nil {
34505		return
34506	}
34507	var idx int
34508	var err error
34509	if output_nested_splits, idx, err = makeOutputList(op, idx, "output_nested_splits"); err != nil {
34510		scope.UpdateErr("RaggedTensorFromVariant", err)
34511		return
34512	}
34513	output_dense_values = op.Output(idx)
34514	return output_nested_splits, output_dense_values
34515}
34516
34517// Converts a `RaggedTensor` into a `SparseTensor` with the same values.
34518//
34519// input=ragged.from_nested_row_splits(rt_dense_values, rt_nested_splits)
34520// output=SparseTensor(indices=sparse_indices, values=sparse_values,
34521//
34522//	dense_shape=sparse_dense_shape)
34523//
34524// Arguments:
34525//
34526//	rt_nested_splits: The `row_splits` for the `RaggedTensor`.
34527//	rt_dense_values: The `flat_values` for the `RaggedTensor`.
34528//
34529// Returns:
34530//
34531//	sparse_indices: The indices for the `SparseTensor`.
34532//	sparse_values: The values of the `SparseTensor`.
34533//	sparse_dense_shape: `sparse_dense_shape` is a tight bounding box of the input `RaggedTensor`.
34534func RaggedTensorToSparse(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output) (sparse_indices tf.Output, sparse_values tf.Output, sparse_dense_shape tf.Output) {
34535	if scope.Err() != nil {
34536		return
34537	}
34538	opspec := tf.OpSpec{
34539		Type: "RaggedTensorToSparse",
34540		Input: []tf.Input{
34541			tf.OutputList(rt_nested_splits), rt_dense_values,
34542		},
34543	}
34544	op := scope.AddOperation(opspec)
34545	return op.Output(0), op.Output(1), op.Output(2)
34546}
34547
34548// Create a dense tensor from a ragged tensor, possibly altering its shape.
34549//
34550// The `ragged_to_dense` op creates a dense tensor from a list of row partition
34551// tensors, a value vector, and default values. If the shape is unspecified, the
34552// minimal shape required to contain all the elements in the ragged tensor (the
34553// natural shape) will be used. If some dimensions are left unspecified, then the
34554// size of the natural shape is used in that dimension.
34555//
34556// The default_value will be broadcast to the output shape. After that, the values
34557// from the ragged tensor overwrite the default values. Note that the default_value
34558// must have less dimensions than the value.
34559//
34560// The row partition tensors are in the order of the dimensions.
34561// At present, the types can be:
34562//   - "ROW_SPLITS": the row_splits tensor from the ragged tensor.
34563//   - "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
34564//   - "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
34565//     is preceded by "FIRST_DIM_SIZE".
34566//
34567// Arguments:
34568//
34569//	shape: The desired shape of the output tensor. If left unspecified (empty),
34570//
34571// the minimal shape required to contain all the elements in the ragged tensor
34572// (the natural shape) will be used. If some dimensions are left unspecified, then
34573// the size of the natural shape is used in that dimension.
34574//
34575// Note that dense dimensions cannot be modified by the shape argument. Trying to
34576// change the size of a dense dimension will cause the op to fail.
34577// Examples:
34578// natural shape: [4, 5, 6]
34579// shape: -1
34580// output shape: [4, 5, 6]
34581//
34582// natural shape: [4, 5, 6]
34583// shape: [3, -1, 2]
34584// output shape: [3, 5, 2]
34585//
34586// natural shape: [4, 5, 6]
34587// shape: [3, 7, 2]
34588// output shape: [3, 7, 2]
34589//
34590//	values: A 1D tensor representing the values of the ragged tensor.
34591//	default_value: The default_value when the shape is larger than the ragged tensor. The
34592//
34593// default_value is broadcast until it is the shape of the output tensor, and
34594// then overwritten by values in the ragged tensor. The default value must be
34595// compatible with this broadcast operation, and must have fewer dimensions than
34596// the value tensor.
34597//
34598//	row_partition_types: The types of the row partition tensors. At present, these can be:
34599//   - "ROW_SPLITS": the row_splits tensor from the ragged tensor.
34600//   - "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor.
34601//   - "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it
34602//     is preceeded by "FIRST_DIM_SIZE".
34603//
34604// The tensors are in the order of the dimensions.
34605//
34606// Returns The resulting dense tensor.
34607func RaggedTensorToTensor(scope *Scope, shape tf.Output, values tf.Output, default_value tf.Output, row_partition_tensors []tf.Output, row_partition_types []string) (result tf.Output) {
34608	if scope.Err() != nil {
34609		return
34610	}
34611	attrs := map[string]interface{}{"row_partition_types": row_partition_types}
34612	opspec := tf.OpSpec{
34613		Type: "RaggedTensorToTensor",
34614		Input: []tf.Input{
34615			shape, values, default_value, tf.OutputList(row_partition_tensors),
34616		},
34617		Attrs: attrs,
34618	}
34619	op := scope.AddOperation(opspec)
34620	return op.Output(0)
34621}
34622
34623// Encodes a `RaggedTensor` into a `variant` Tensor.
34624//
34625// Encodes the given `RaggedTensor` and returns a `variant` Tensor. If
34626// `batched_input` is True, then input `RaggedTensor` is unbatched along the
34627// zero-th dimension, each component `RaggedTensor` is encoded into a scalar
34628// `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.
34629// If `batched_input` is False, then the input `RaggedTensor` is encoded as is and
34630// a scalar `variant` Tensor is returned. A `RaggedTensor` is encoded by first
34631// creating a 1-D `variant` Tensor with `ragged_rank + 1` elements, containing the
34632// splits and values Tensors of the `RaggedTensor`. Then the 1-D `variant` Tensor
34633// is wrapped in a scalar `variant` Tensor. See `RaggedTensorFromVariant` for the
34634// corresponding decoding logic.
34635//
34636// Arguments:
34637//
34638//	rt_nested_splits: A list of one or more Tensors representing the splits of the input
34639//
34640// `RaggedTensor`.
34641//
34642//	rt_dense_values: A Tensor representing the values of the input `RaggedTensor`.
34643//	batched_input: A `bool` denoting whether the input is a batched `RaggedTensor`.
34644//
34645// Returns A `variant` Tensor that containing encoded `RaggedTensor`.
34646func RaggedTensorToVariant(scope *Scope, rt_nested_splits []tf.Output, rt_dense_values tf.Output, batched_input bool) (encoded_ragged tf.Output) {
34647	if scope.Err() != nil {
34648		return
34649	}
34650	attrs := map[string]interface{}{"batched_input": batched_input}
34651	opspec := tf.OpSpec{
34652		Type: "RaggedTensorToVariant",
34653		Input: []tf.Input{
34654			tf.OutputList(rt_nested_splits), rt_dense_values,
34655		},
34656		Attrs: attrs,
34657	}
34658	op := scope.AddOperation(opspec)
34659	return op.Output(0)
34660}
34661
34662// Helper used to compute the gradient for `RaggedTensorToVariant`.
34663//
34664// Computes the gradient for the dense_values input to the RaggedTensorToVariant
34665// op, given the variant-encoded ragged gradients of the outputs, along with
34666// the outer row-splits and the shape of the dense-values that were provided as
34667// inputs to the RaggedTensorToVariant op.
34668//
34669// Arguments:
34670//
34671//	encoded_ragged_grad: A `variant` Tensor containing encoded `RaggedTensor` gradients.
34672//	row_splits: Outermost row-splits that were used as input to the RaggedTensorToVariant op.
34673//	dense_values_shape: Shape of the dense_values that was used as an input to the
34674//
34675// RaggedTensorToVariant op.
34676//
34677// Returns Gradient for the dense_values of the RaggedTensorToVariant op.
34678func RaggedTensorToVariantGradient(scope *Scope, encoded_ragged_grad tf.Output, row_splits tf.Output, dense_values_shape tf.Output, Tvalues tf.DataType) (dense_values_grad tf.Output) {
34679	if scope.Err() != nil {
34680		return
34681	}
34682	attrs := map[string]interface{}{"Tvalues": Tvalues}
34683	opspec := tf.OpSpec{
34684		Type: "RaggedTensorToVariantGradient",
34685		Input: []tf.Input{
34686			encoded_ragged_grad, row_splits, dense_values_shape,
34687		},
34688		Attrs: attrs,
34689	}
34690	op := scope.AddOperation(opspec)
34691	return op.Output(0)
34692}
34693
34694// RandomCropAttr is an optional argument to RandomCrop.
34695type RandomCropAttr func(optionalAttr)
34696
34697// RandomCropSeed sets the optional seed attribute to value.
34698//
34699// value: If either seed or seed2 are set to be non-zero, the random number
34700// generator is seeded by the given seed.  Otherwise, it is seeded by a
34701// random seed.
34702// If not specified, defaults to 0
34703func RandomCropSeed(value int64) RandomCropAttr {
34704	return func(m optionalAttr) {
34705		m["seed"] = value
34706	}
34707}
34708
34709// RandomCropSeed2 sets the optional seed2 attribute to value.
34710//
34711// value: An second seed to avoid seed collision.
34712// If not specified, defaults to 0
34713func RandomCropSeed2(value int64) RandomCropAttr {
34714	return func(m optionalAttr) {
34715		m["seed2"] = value
34716	}
34717}
34718
34719// Randomly crop `image`.
34720//
34721// DEPRECATED at GraphDef version 8: Random crop is now pure Python
34722//
34723// `size` is a 1-D int64 tensor with 2 elements representing the crop height and
34724// width.  The values must be non negative.
34725//
34726// This Op picks a random location in `image` and crops a `height` by `width`
34727// rectangle from that location.  The random location is picked so the cropped
34728// area will fit inside the original image.
34729//
34730// Arguments:
34731//
34732//	image: 3-D of shape `[height, width, channels]`.
34733//	size: 1-D of length 2 containing: `crop_height`, `crop_width`..
34734//
34735// Returns 3-D of shape `[crop_height, crop_width, channels].`
34736func RandomCrop(scope *Scope, image tf.Output, size tf.Output, optional ...RandomCropAttr) (output tf.Output) {
34737	if scope.Err() != nil {
34738		return
34739	}
34740	attrs := map[string]interface{}{}
34741	for _, a := range optional {
34742		a(attrs)
34743	}
34744	opspec := tf.OpSpec{
34745		Type: "RandomCrop",
34746		Input: []tf.Input{
34747			image, size,
34748		},
34749		Attrs: attrs,
34750	}
34751	op := scope.AddOperation(opspec)
34752	return op.Output(0)
34753}
34754
34755// RandomDatasetAttr is an optional argument to RandomDataset.
34756type RandomDatasetAttr func(optionalAttr)
34757
34758// RandomDatasetMetadata sets the optional metadata attribute to value.
34759// If not specified, defaults to ""
34760func RandomDatasetMetadata(value string) RandomDatasetAttr {
34761	return func(m optionalAttr) {
34762		m["metadata"] = value
34763	}
34764}
34765
34766// Creates a Dataset that returns pseudorandom numbers.
34767//
34768// Creates a Dataset that returns a stream of uniformly distributed
34769// pseudorandom 64-bit signed integers.
34770//
34771// In the TensorFlow Python API, you can instantiate this dataset via the
34772// class `tf.data.experimental.RandomDataset`.
34773//
34774// Instances of this dataset are also created as a result of the
34775// `hoist_random_uniform` static optimization. Whether this optimization is
34776// performed is determined by the `experimental_optimization.hoist_random_uniform`
34777// option of `tf.data.Options`.
34778//
34779// Arguments:
34780//
34781//	seed: A scalar seed for the random number generator. If either seed or
34782//
34783// seed2 is set to be non-zero, the random number generator is seeded
34784// by the given seed.  Otherwise, a random seed is used.
34785//
34786//	seed2: A second scalar seed to avoid seed collision.
34787func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RandomDatasetAttr) (handle tf.Output) {
34788	if scope.Err() != nil {
34789		return
34790	}
34791	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
34792	for _, a := range optional {
34793		a(attrs)
34794	}
34795	opspec := tf.OpSpec{
34796		Type: "RandomDataset",
34797		Input: []tf.Input{
34798			seed, seed2,
34799		},
34800		Attrs: attrs,
34801	}
34802	op := scope.AddOperation(opspec)
34803	return op.Output(0)
34804}
34805
34806// RandomGammaAttr is an optional argument to RandomGamma.
34807type RandomGammaAttr func(optionalAttr)
34808
34809// RandomGammaSeed sets the optional seed attribute to value.
34810//
34811// value: If either `seed` or `seed2` are set to be non-zero, the random number
34812// generator is seeded by the given seed.  Otherwise, it is seeded by a
34813// random seed.
34814// If not specified, defaults to 0
34815func RandomGammaSeed(value int64) RandomGammaAttr {
34816	return func(m optionalAttr) {
34817		m["seed"] = value
34818	}
34819}
34820
34821// RandomGammaSeed2 sets the optional seed2 attribute to value.
34822//
34823// value: A second seed to avoid seed collision.
34824// If not specified, defaults to 0
34825func RandomGammaSeed2(value int64) RandomGammaAttr {
34826	return func(m optionalAttr) {
34827		m["seed2"] = value
34828	}
34829}
34830
34831// Outputs random values from the Gamma distribution(s) described by alpha.
34832//
34833// This op uses the algorithm by Marsaglia et al. to acquire samples via
34834// transformation-rejection from pairs of uniform and normal random variables.
34835// See http://dl.acm.org/citation.cfm?id=358414
34836//
34837// Arguments:
34838//
34839//	shape: 1-D integer tensor. Shape of independent samples to draw from each
34840//
34841// distribution described by the shape parameters given in alpha.
34842//
34843//	alpha: A tensor in which each scalar is a "shape" parameter describing the
34844//
34845// associated gamma distribution.
34846//
34847// Returns A tensor with shape `shape + shape(alpha)`. Each slice
34848// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
34849// `alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.
34850func RandomGamma(scope *Scope, shape tf.Output, alpha tf.Output, optional ...RandomGammaAttr) (output tf.Output) {
34851	if scope.Err() != nil {
34852		return
34853	}
34854	attrs := map[string]interface{}{}
34855	for _, a := range optional {
34856		a(attrs)
34857	}
34858	opspec := tf.OpSpec{
34859		Type: "RandomGamma",
34860		Input: []tf.Input{
34861			shape, alpha,
34862		},
34863		Attrs: attrs,
34864	}
34865	op := scope.AddOperation(opspec)
34866	return op.Output(0)
34867}
34868
34869// Computes the derivative of a Gamma random sample w.r.t. `alpha`.
34870func RandomGammaGrad(scope *Scope, alpha tf.Output, sample tf.Output) (output tf.Output) {
34871	if scope.Err() != nil {
34872		return
34873	}
34874	opspec := tf.OpSpec{
34875		Type: "RandomGammaGrad",
34876		Input: []tf.Input{
34877			alpha, sample,
34878		},
34879	}
34880	op := scope.AddOperation(opspec)
34881	return op.Output(0)
34882}
34883
34884// Outputs the position of `value` in a permutation of [0, ..., max_index].
34885//
34886// Output values are a bijection of the `index` for any combination and `seed` and `max_index`.
34887//
34888// If multiple inputs are vectors (matrix in case of seed) then the size of the
34889// first dimension must match.
34890//
34891// The outputs are deterministic.
34892//
34893// Arguments:
34894//
34895//	index: A scalar tensor or a vector of dtype `dtype`. The index (or indices) to be shuffled. Must be within [0, max_index].
34896//	seed: A tensor of dtype `Tseed` and shape [3] or [n, 3]. The random seed.
34897//	max_index: A scalar tensor or vector of dtype `dtype`. The upper bound(s) of the interval (inclusive).
34898//
34899// Returns A scalar tensor of dtype `dtype`, within [0, max_index]. The randomly shuffled index.
34900func RandomIndexShuffle(scope *Scope, index tf.Output, seed tf.Output, max_index tf.Output) (output tf.Output) {
34901	if scope.Err() != nil {
34902		return
34903	}
34904	opspec := tf.OpSpec{
34905		Type: "RandomIndexShuffle",
34906		Input: []tf.Input{
34907			index, seed, max_index,
34908		},
34909	}
34910	op := scope.AddOperation(opspec)
34911	return op.Output(0)
34912}
34913
34914// RandomPoissonAttr is an optional argument to RandomPoisson.
34915type RandomPoissonAttr func(optionalAttr)
34916
34917// RandomPoissonSeed sets the optional seed attribute to value.
34918// If not specified, defaults to 0
34919func RandomPoissonSeed(value int64) RandomPoissonAttr {
34920	return func(m optionalAttr) {
34921		m["seed"] = value
34922	}
34923}
34924
34925// RandomPoissonSeed2 sets the optional seed2 attribute to value.
34926// If not specified, defaults to 0
34927func RandomPoissonSeed2(value int64) RandomPoissonAttr {
34928	return func(m optionalAttr) {
34929		m["seed2"] = value
34930	}
34931}
34932
34933// Use RandomPoissonV2 instead.
34934//
34935// DEPRECATED at GraphDef version 25: Replaced by RandomPoissonV2
34936func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonAttr) (output tf.Output) {
34937	if scope.Err() != nil {
34938		return
34939	}
34940	attrs := map[string]interface{}{}
34941	for _, a := range optional {
34942		a(attrs)
34943	}
34944	opspec := tf.OpSpec{
34945		Type: "RandomPoisson",
34946		Input: []tf.Input{
34947			shape, rate,
34948		},
34949		Attrs: attrs,
34950	}
34951	op := scope.AddOperation(opspec)
34952	return op.Output(0)
34953}
34954
34955// RandomPoissonV2Attr is an optional argument to RandomPoissonV2.
34956type RandomPoissonV2Attr func(optionalAttr)
34957
34958// RandomPoissonV2Seed sets the optional seed attribute to value.
34959//
34960// value: If either `seed` or `seed2` are set to be non-zero, the random number
34961// generator is seeded by the given seed.  Otherwise, it is seeded by a
34962// random seed.
34963// If not specified, defaults to 0
34964func RandomPoissonV2Seed(value int64) RandomPoissonV2Attr {
34965	return func(m optionalAttr) {
34966		m["seed"] = value
34967	}
34968}
34969
34970// RandomPoissonV2Seed2 sets the optional seed2 attribute to value.
34971//
34972// value: A second seed to avoid seed collision.
34973// If not specified, defaults to 0
34974func RandomPoissonV2Seed2(value int64) RandomPoissonV2Attr {
34975	return func(m optionalAttr) {
34976		m["seed2"] = value
34977	}
34978}
34979
34980// RandomPoissonV2Dtype sets the optional dtype attribute to value.
34981// If not specified, defaults to DT_INT64
34982func RandomPoissonV2Dtype(value tf.DataType) RandomPoissonV2Attr {
34983	return func(m optionalAttr) {
34984		m["dtype"] = value
34985	}
34986}
34987
34988// Outputs random values from the Poisson distribution(s) described by rate.
34989//
34990// This op uses two algorithms, depending on rate. If rate >= 10, then
34991// the algorithm by Hormann is used to acquire samples via
34992// transformation-rejection.
34993// See http://www.sciencedirect.com/science/article/pii/0167668793909974.
34994//
34995// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
34996// random variables.
34997// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
34998// Programming, Volume 2. Addison Wesley
34999//
35000// Arguments:
35001//
35002//	shape: 1-D integer tensor. Shape of independent samples to draw from each
35003//
35004// distribution described by the shape parameters given in rate.
35005//
35006//	rate: A tensor in which each scalar is a "rate" parameter describing the
35007//
35008// associated poisson distribution.
35009//
35010// Returns A tensor with shape `shape + shape(rate)`. Each slice
35011// `[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
35012// `rate[i0, i1, ...iN]`.
35013func RandomPoissonV2(scope *Scope, shape tf.Output, rate tf.Output, optional ...RandomPoissonV2Attr) (output tf.Output) {
35014	if scope.Err() != nil {
35015		return
35016	}
35017	attrs := map[string]interface{}{}
35018	for _, a := range optional {
35019		a(attrs)
35020	}
35021	opspec := tf.OpSpec{
35022		Type: "RandomPoissonV2",
35023		Input: []tf.Input{
35024			shape, rate,
35025		},
35026		Attrs: attrs,
35027	}
35028	op := scope.AddOperation(opspec)
35029	return op.Output(0)
35030}
35031
35032// RandomShuffleAttr is an optional argument to RandomShuffle.
35033type RandomShuffleAttr func(optionalAttr)
35034
35035// RandomShuffleSeed sets the optional seed attribute to value.
35036//
35037// value: If either `seed` or `seed2` are set to be non-zero, the random number
35038// generator is seeded by the given seed.  Otherwise, it is seeded by a
35039// random seed.
35040// If not specified, defaults to 0
35041func RandomShuffleSeed(value int64) RandomShuffleAttr {
35042	return func(m optionalAttr) {
35043		m["seed"] = value
35044	}
35045}
35046
35047// RandomShuffleSeed2 sets the optional seed2 attribute to value.
35048//
35049// value: A second seed to avoid seed collision.
35050// If not specified, defaults to 0
35051func RandomShuffleSeed2(value int64) RandomShuffleAttr {
35052	return func(m optionalAttr) {
35053		m["seed2"] = value
35054	}
35055}
35056
35057// Randomly shuffles a tensor along its first dimension.
35058//
35059//	The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
35060//	to one and only one `output[i]`. For example, a mapping that might occur for a
35061//	3x2 tensor is:
35062//
35063// ```
35064// [[1, 2],       [[5, 6],
35065//
35066//	[3, 4],  ==>   [1, 2],
35067//	[5, 6]]        [3, 4]]
35068//
35069// ```
35070//
35071// Arguments:
35072//
35073//	value: The tensor to be shuffled.
35074//
35075// Returns A tensor of same shape and type as `value`, shuffled along its first
35076// dimension.
35077func RandomShuffle(scope *Scope, value tf.Output, optional ...RandomShuffleAttr) (output tf.Output) {
35078	if scope.Err() != nil {
35079		return
35080	}
35081	attrs := map[string]interface{}{}
35082	for _, a := range optional {
35083		a(attrs)
35084	}
35085	opspec := tf.OpSpec{
35086		Type: "RandomShuffle",
35087		Input: []tf.Input{
35088			value,
35089		},
35090		Attrs: attrs,
35091	}
35092	op := scope.AddOperation(opspec)
35093	return op.Output(0)
35094}
35095
35096// RandomShuffleQueueV2Attr is an optional argument to RandomShuffleQueueV2.
35097type RandomShuffleQueueV2Attr func(optionalAttr)
35098
35099// RandomShuffleQueueV2Shapes sets the optional shapes attribute to value.
35100//
35101// value: The shape of each component in a value. The length of this attr must
35102// be either 0 or the same as the length of component_types. If the length of
35103// this attr is 0, the shapes of queue elements are not constrained, and
35104// only one element may be dequeued at a time.
35105// If not specified, defaults to {}
35106//
35107// REQUIRES: len(value) >= 0
35108func RandomShuffleQueueV2Shapes(value []tf.Shape) RandomShuffleQueueV2Attr {
35109	return func(m optionalAttr) {
35110		m["shapes"] = value
35111	}
35112}
35113
35114// RandomShuffleQueueV2Capacity sets the optional capacity attribute to value.
35115//
35116// value: The upper bound on the number of elements in this queue.
35117// Negative numbers mean no limit.
35118// If not specified, defaults to -1
35119func RandomShuffleQueueV2Capacity(value int64) RandomShuffleQueueV2Attr {
35120	return func(m optionalAttr) {
35121		m["capacity"] = value
35122	}
35123}
35124
35125// RandomShuffleQueueV2MinAfterDequeue sets the optional min_after_dequeue attribute to value.
35126//
35127// value: Dequeue will block unless there would be this
35128// many elements after the dequeue or the queue is closed. This
35129// ensures a minimum level of mixing of elements.
35130// If not specified, defaults to 0
35131func RandomShuffleQueueV2MinAfterDequeue(value int64) RandomShuffleQueueV2Attr {
35132	return func(m optionalAttr) {
35133		m["min_after_dequeue"] = value
35134	}
35135}
35136
35137// RandomShuffleQueueV2Seed sets the optional seed attribute to value.
35138//
35139// value: If either seed or seed2 is set to be non-zero, the random number
35140// generator is seeded by the given seed.  Otherwise, a random seed is used.
35141// If not specified, defaults to 0
35142func RandomShuffleQueueV2Seed(value int64) RandomShuffleQueueV2Attr {
35143	return func(m optionalAttr) {
35144		m["seed"] = value
35145	}
35146}
35147
35148// RandomShuffleQueueV2Seed2 sets the optional seed2 attribute to value.
35149//
35150// value: A second seed to avoid seed collision.
35151// If not specified, defaults to 0
35152func RandomShuffleQueueV2Seed2(value int64) RandomShuffleQueueV2Attr {
35153	return func(m optionalAttr) {
35154		m["seed2"] = value
35155	}
35156}
35157
35158// RandomShuffleQueueV2Container sets the optional container attribute to value.
35159//
35160// value: If non-empty, this queue is placed in the given container.
35161// Otherwise, a default container is used.
35162// If not specified, defaults to ""
35163func RandomShuffleQueueV2Container(value string) RandomShuffleQueueV2Attr {
35164	return func(m optionalAttr) {
35165		m["container"] = value
35166	}
35167}
35168
35169// RandomShuffleQueueV2SharedName sets the optional shared_name attribute to value.
35170//
35171// value: If non-empty, this queue will be shared under the given name
35172// across multiple sessions.
35173// If not specified, defaults to ""
35174func RandomShuffleQueueV2SharedName(value string) RandomShuffleQueueV2Attr {
35175	return func(m optionalAttr) {
35176		m["shared_name"] = value
35177	}
35178}
35179
35180// A queue that randomizes the order of elements.
35181//
35182// Arguments:
35183//
35184//	component_types: The type of each component in a value.
35185//
35186// Returns The handle to the queue.
35187func RandomShuffleQueueV2(scope *Scope, component_types []tf.DataType, optional ...RandomShuffleQueueV2Attr) (handle tf.Output) {
35188	if scope.Err() != nil {
35189		return
35190	}
35191	attrs := map[string]interface{}{"component_types": component_types}
35192	for _, a := range optional {
35193		a(attrs)
35194	}
35195	opspec := tf.OpSpec{
35196		Type: "RandomShuffleQueueV2",
35197
35198		Attrs: attrs,
35199	}
35200	op := scope.AddOperation(opspec)
35201	return op.Output(0)
35202}
35203
35204// RandomStandardNormalAttr is an optional argument to RandomStandardNormal.
35205type RandomStandardNormalAttr func(optionalAttr)
35206
35207// RandomStandardNormalSeed sets the optional seed attribute to value.
35208//
35209// value: If either `seed` or `seed2` are set to be non-zero, the random number
35210// generator is seeded by the given seed.  Otherwise, it is seeded by a
35211// random seed.
35212// If not specified, defaults to 0
35213func RandomStandardNormalSeed(value int64) RandomStandardNormalAttr {
35214	return func(m optionalAttr) {
35215		m["seed"] = value
35216	}
35217}
35218
35219// RandomStandardNormalSeed2 sets the optional seed2 attribute to value.
35220//
35221// value: A second seed to avoid seed collision.
35222// If not specified, defaults to 0
35223func RandomStandardNormalSeed2(value int64) RandomStandardNormalAttr {
35224	return func(m optionalAttr) {
35225		m["seed2"] = value
35226	}
35227}
35228
35229// Outputs random values from a normal distribution.
35230//
35231// The generated values will have mean 0 and standard deviation 1.
35232//
35233// Arguments:
35234//
35235//	shape: The shape of the output tensor.
35236//	dtype: The type of the output.
35237//
35238// Returns A tensor of the specified shape filled with random normal values.
35239func RandomStandardNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomStandardNormalAttr) (output tf.Output) {
35240	if scope.Err() != nil {
35241		return
35242	}
35243	attrs := map[string]interface{}{"dtype": dtype}
35244	for _, a := range optional {
35245		a(attrs)
35246	}
35247	opspec := tf.OpSpec{
35248		Type: "RandomStandardNormal",
35249		Input: []tf.Input{
35250			shape,
35251		},
35252		Attrs: attrs,
35253	}
35254	op := scope.AddOperation(opspec)
35255	return op.Output(0)
35256}
35257
35258// RandomUniformAttr is an optional argument to RandomUniform.
35259type RandomUniformAttr func(optionalAttr)
35260
35261// RandomUniformSeed sets the optional seed attribute to value.
35262//
35263// value: If either `seed` or `seed2` are set to be non-zero, the random number
35264// generator is seeded by the given seed.  Otherwise, it is seeded by a
35265// random seed.
35266// If not specified, defaults to 0
35267func RandomUniformSeed(value int64) RandomUniformAttr {
35268	return func(m optionalAttr) {
35269		m["seed"] = value
35270	}
35271}
35272
35273// RandomUniformSeed2 sets the optional seed2 attribute to value.
35274//
35275// value: A second seed to avoid seed collision.
35276// If not specified, defaults to 0
35277func RandomUniformSeed2(value int64) RandomUniformAttr {
35278	return func(m optionalAttr) {
35279		m["seed2"] = value
35280	}
35281}
35282
35283// Outputs random values from a uniform distribution.
35284//
35285// The generated values follow a uniform distribution in the range `[0, 1)`. The
35286// lower bound 0 is included in the range, while the upper bound 1 is excluded.
35287//
35288// Arguments:
35289//
35290//	shape: The shape of the output tensor.
35291//	dtype: The type of the output.
35292//
35293// Returns A tensor of the specified shape filled with uniform random values.
35294func RandomUniform(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...RandomUniformAttr) (output tf.Output) {
35295	if scope.Err() != nil {
35296		return
35297	}
35298	attrs := map[string]interface{}{"dtype": dtype}
35299	for _, a := range optional {
35300		a(attrs)
35301	}
35302	opspec := tf.OpSpec{
35303		Type: "RandomUniform",
35304		Input: []tf.Input{
35305			shape,
35306		},
35307		Attrs: attrs,
35308	}
35309	op := scope.AddOperation(opspec)
35310	return op.Output(0)
35311}
35312
35313// RandomUniformIntAttr is an optional argument to RandomUniformInt.
35314type RandomUniformIntAttr func(optionalAttr)
35315
35316// RandomUniformIntSeed sets the optional seed attribute to value.
35317//
35318// value: If either `seed` or `seed2` are set to be non-zero, the random number
35319// generator is seeded by the given seed.  Otherwise, it is seeded by a
35320// random seed.
35321// If not specified, defaults to 0
35322func RandomUniformIntSeed(value int64) RandomUniformIntAttr {
35323	return func(m optionalAttr) {
35324		m["seed"] = value
35325	}
35326}
35327
35328// RandomUniformIntSeed2 sets the optional seed2 attribute to value.
35329//
35330// value: A second seed to avoid seed collision.
35331// If not specified, defaults to 0
35332func RandomUniformIntSeed2(value int64) RandomUniformIntAttr {
35333	return func(m optionalAttr) {
35334		m["seed2"] = value
35335	}
35336}
35337
35338// Outputs random integers from a uniform distribution.
35339//
35340// The generated values are uniform integers in the range `[minval, maxval)`.
35341// The lower bound `minval` is included in the range, while the upper bound
35342// `maxval` is excluded.
35343//
35344// The random integers are slightly biased unless `maxval - minval` is an exact
35345// power of two.  The bias is small for values of `maxval - minval` significantly
35346// smaller than the range of the output (either `2^32` or `2^64`).
35347//
35348// Arguments:
35349//
35350//	shape: The shape of the output tensor.
35351//	minval: 0-D.  Inclusive lower bound on the generated integers.
35352//	maxval: 0-D.  Exclusive upper bound on the generated integers.
35353//
35354// Returns A tensor of the specified shape filled with uniform random integers.
35355func RandomUniformInt(scope *Scope, shape tf.Output, minval tf.Output, maxval tf.Output, optional ...RandomUniformIntAttr) (output tf.Output) {
35356	if scope.Err() != nil {
35357		return
35358	}
35359	attrs := map[string]interface{}{}
35360	for _, a := range optional {
35361		a(attrs)
35362	}
35363	opspec := tf.OpSpec{
35364		Type: "RandomUniformInt",
35365		Input: []tf.Input{
35366			shape, minval, maxval,
35367		},
35368		Attrs: attrs,
35369	}
35370	op := scope.AddOperation(opspec)
35371	return op.Output(0)
35372}
35373
35374// Creates a sequence of numbers.
35375//
35376// This operation creates a sequence of numbers that begins at `start` and
35377// extends by increments of `delta` up to but not including `limit`.
35378//
35379// For example:
35380//
35381// ```
35382// # 'start' is 3
35383// # 'limit' is 18
35384// # 'delta' is 3
35385// tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
35386// ```
35387//
35388// Arguments:
35389//
35390//	start: 0-D (scalar). First entry in the sequence.
35391//	limit: 0-D (scalar). Upper limit of sequence, exclusive.
35392//	delta: 0-D (scalar). Optional. Default is 1. Number that increments `start`.
35393//
35394// Returns 1-D.
35395func Range(scope *Scope, start tf.Output, limit tf.Output, delta tf.Output) (output tf.Output) {
35396	if scope.Err() != nil {
35397		return
35398	}
35399	opspec := tf.OpSpec{
35400		Type: "Range",
35401		Input: []tf.Input{
35402			start, limit, delta,
35403		},
35404	}
35405	op := scope.AddOperation(opspec)
35406	return op.Output(0)
35407}
35408
35409// RangeDatasetAttr is an optional argument to RangeDataset.
35410type RangeDatasetAttr func(optionalAttr)
35411
35412// RangeDatasetMetadata sets the optional metadata attribute to value.
35413// If not specified, defaults to ""
35414func RangeDatasetMetadata(value string) RangeDatasetAttr {
35415	return func(m optionalAttr) {
35416		m["metadata"] = value
35417	}
35418}
35419
35420// RangeDatasetReplicateOnSplit sets the optional replicate_on_split attribute to value.
35421// If not specified, defaults to false
35422func RangeDatasetReplicateOnSplit(value bool) RangeDatasetAttr {
35423	return func(m optionalAttr) {
35424		m["replicate_on_split"] = value
35425	}
35426}
35427
35428// Creates a dataset with a range of values. Corresponds to python's xrange.
35429//
35430// Arguments:
35431//
35432//	start: corresponds to start in python's xrange().
35433//	stop: corresponds to stop in python's xrange().
35434//	step: corresponds to step in python's xrange().
35435func RangeDataset(scope *Scope, start tf.Output, stop tf.Output, step tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RangeDatasetAttr) (handle tf.Output) {
35436	if scope.Err() != nil {
35437		return
35438	}
35439	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
35440	for _, a := range optional {
35441		a(attrs)
35442	}
35443	opspec := tf.OpSpec{
35444		Type: "RangeDataset",
35445		Input: []tf.Input{
35446			start, stop, step,
35447		},
35448		Attrs: attrs,
35449	}
35450	op := scope.AddOperation(opspec)
35451	return op.Output(0)
35452}
35453
35454// Returns the rank of a tensor.
35455//
35456// This operation returns an integer representing the rank of `input`.
35457//
35458// For example:
35459//
35460// ```
35461// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
35462// # shape of tensor 't' is [2, 2, 3]
35463// rank(t) ==> 3
35464// ```
35465//
35466// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
35467// of a tensor is the number of indices required to uniquely select each element
35468// of the tensor. Rank is also known as "order", "degree", or "ndims."
35469func Rank(scope *Scope, input tf.Output) (output tf.Output) {
35470	if scope.Err() != nil {
35471		return
35472	}
35473	opspec := tf.OpSpec{
35474		Type: "Rank",
35475		Input: []tf.Input{
35476			input,
35477		},
35478	}
35479	op := scope.AddOperation(opspec)
35480	return op.Output(0)
35481}
35482
35483// Reads and outputs the entire contents of the input filename.
35484func ReadFile(scope *Scope, filename tf.Output) (contents tf.Output) {
35485	if scope.Err() != nil {
35486		return
35487	}
35488	opspec := tf.OpSpec{
35489		Type: "ReadFile",
35490		Input: []tf.Input{
35491			filename,
35492		},
35493	}
35494	op := scope.AddOperation(opspec)
35495	return op.Output(0)
35496}
35497
35498// Reads the value of a variable.
35499//
35500// The tensor returned by this operation is immutable.
35501//
35502// The value returned by this operation is guaranteed to be influenced by all the
35503// writes on which this operation depends directly or indirectly, and to not be
35504// influenced by any of the writes which depend directly or indirectly on this
35505// operation.
35506//
35507// Arguments:
35508//
35509//	resource: handle to the resource in which to store the variable.
35510//	dtype: the dtype of the value.
35511func ReadVariableOp(scope *Scope, resource tf.Output, dtype tf.DataType) (value tf.Output) {
35512	if scope.Err() != nil {
35513		return
35514	}
35515	attrs := map[string]interface{}{"dtype": dtype}
35516	opspec := tf.OpSpec{
35517		Type: "ReadVariableOp",
35518		Input: []tf.Input{
35519			resource,
35520		},
35521		Attrs: attrs,
35522	}
35523	op := scope.AddOperation(opspec)
35524	return op.Output(0)
35525}
35526
35527// ReadVariableXlaSplitNDAttr is an optional argument to ReadVariableXlaSplitND.
35528type ReadVariableXlaSplitNDAttr func(optionalAttr)
35529
35530// ReadVariableXlaSplitNDPaddings sets the optional paddings attribute to value.
35531//
35532// value: Optional list of right paddings per dimension of input tensor to apply before
35533// splitting. This can be used to make a dimension evenly divisible.
35534// If not specified, defaults to {}
35535func ReadVariableXlaSplitNDPaddings(value []int64) ReadVariableXlaSplitNDAttr {
35536	return func(m optionalAttr) {
35537		m["paddings"] = value
35538	}
35539}
35540
35541// Splits resource variable input tensor across all dimensions.
35542//
35543// An op which splits the resource variable input tensor based on the given
35544// num_splits attribute, pads slices optionally, and returned the slices. Slices
35545// are returned in row-major order.
35546//
35547// This op may be generated via the TPU bridge.
35548//
35549// For example, with `input` tensor:
35550// ```
35551// [[0, 1, 2],
35552//
35553//	[3, 4, 5],
35554//	[6, 7, 8]]
35555//
35556// ```
35557// `num_splits`:
35558// ```
35559// [2, 2]
35560// ```
35561// and `paddings`:
35562// ```
35563// [1, 1]
35564// ```
35565// the expected `outputs` is:
35566// ```
35567// [[0, 1],
35568//
35569//	[3, 4]]
35570//
35571// [[2, 0],
35572//
35573//	[5, 0]]
35574//
35575// [[6, 7],
35576//
35577//	[0, 0]]
35578//
35579// [[8, 0],
35580//
35581//	[0, 0]]
35582//
35583// ```
35584//
35585// Arguments:
35586//
35587//		resource: Resource variable of input tensor to split across all dimensions.
35588//	  }
35589//	  out_arg {
35590//	    name: "outputs"
35591//	    description: <<END
35592//
35593// Output slices based on input and num_splits defined, in row-major order.
35594//
35595//	num_splits: Number of ways to split per dimension. Shape dimensions must be evenly
35596//
35597// divisible.
35598func ReadVariableXlaSplitND(scope *Scope, resource tf.Output, T tf.DataType, N int64, num_splits []int64, optional ...ReadVariableXlaSplitNDAttr) (outputs []tf.Output) {
35599	if scope.Err() != nil {
35600		return
35601	}
35602	attrs := map[string]interface{}{"T": T, "N": N, "num_splits": num_splits}
35603	for _, a := range optional {
35604		a(attrs)
35605	}
35606	opspec := tf.OpSpec{
35607		Type: "ReadVariableXlaSplitND",
35608		Input: []tf.Input{
35609			resource,
35610		},
35611		Attrs: attrs,
35612	}
35613	op := scope.AddOperation(opspec)
35614	if scope.Err() != nil {
35615		return
35616	}
35617	var idx int
35618	var err error
35619	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
35620		scope.UpdateErr("ReadVariableXlaSplitND", err)
35621		return
35622	}
35623	return outputs
35624}
35625
35626// Returns the number of records this Reader has produced.
35627//
35628// This is the same as the number of ReaderRead executions that have
35629// succeeded.
35630//
35631// Arguments:
35632//
35633//	reader_handle: Handle to a Reader.
35634func ReaderNumRecordsProducedV2(scope *Scope, reader_handle tf.Output) (records_produced tf.Output) {
35635	if scope.Err() != nil {
35636		return
35637	}
35638	opspec := tf.OpSpec{
35639		Type: "ReaderNumRecordsProducedV2",
35640		Input: []tf.Input{
35641			reader_handle,
35642		},
35643	}
35644	op := scope.AddOperation(opspec)
35645	return op.Output(0)
35646}
35647
35648// Returns the number of work units this Reader has finished processing.
35649//
35650// Arguments:
35651//
35652//	reader_handle: Handle to a Reader.
35653func ReaderNumWorkUnitsCompletedV2(scope *Scope, reader_handle tf.Output) (units_completed tf.Output) {
35654	if scope.Err() != nil {
35655		return
35656	}
35657	opspec := tf.OpSpec{
35658		Type: "ReaderNumWorkUnitsCompletedV2",
35659		Input: []tf.Input{
35660			reader_handle,
35661		},
35662	}
35663	op := scope.AddOperation(opspec)
35664	return op.Output(0)
35665}
35666
35667// Returns up to `num_records` (key, value) pairs produced by a Reader.
35668//
35669// Will dequeue from the input queue if necessary (e.g. when the
35670// Reader needs to start reading from a new file since it has finished
35671// with the previous file).
35672// It may return less than `num_records` even before the last batch.
35673//
35674// Arguments:
35675//
35676//	reader_handle: Handle to a `Reader`.
35677//	queue_handle: Handle to a `Queue`, with string work items.
35678//	num_records: number of records to read from `Reader`.
35679//
35680// Returns:
35681//
35682//	keys: A 1-D tensor.
35683//	values: A 1-D tensor.
35684func ReaderReadUpToV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output, num_records tf.Output) (keys tf.Output, values tf.Output) {
35685	if scope.Err() != nil {
35686		return
35687	}
35688	opspec := tf.OpSpec{
35689		Type: "ReaderReadUpToV2",
35690		Input: []tf.Input{
35691			reader_handle, queue_handle, num_records,
35692		},
35693	}
35694	op := scope.AddOperation(opspec)
35695	return op.Output(0), op.Output(1)
35696}
35697
35698// Returns the next record (key, value pair) produced by a Reader.
35699//
35700// Will dequeue from the input queue if necessary (e.g. when the
35701// Reader needs to start reading from a new file since it has finished
35702// with the previous file).
35703//
35704// Arguments:
35705//
35706//	reader_handle: Handle to a Reader.
35707//	queue_handle: Handle to a Queue, with string work items.
35708//
35709// Returns:
35710//
35711//	key: A scalar.
35712//	value: A scalar.
35713func ReaderReadV2(scope *Scope, reader_handle tf.Output, queue_handle tf.Output) (key tf.Output, value tf.Output) {
35714	if scope.Err() != nil {
35715		return
35716	}
35717	opspec := tf.OpSpec{
35718		Type: "ReaderReadV2",
35719		Input: []tf.Input{
35720			reader_handle, queue_handle,
35721		},
35722	}
35723	op := scope.AddOperation(opspec)
35724	return op.Output(0), op.Output(1)
35725}
35726
35727// Restore a Reader to its initial clean state.
35728//
35729// Arguments:
35730//
35731//	reader_handle: Handle to a Reader.
35732//
35733// Returns the created operation.
35734func ReaderResetV2(scope *Scope, reader_handle tf.Output) (o *tf.Operation) {
35735	if scope.Err() != nil {
35736		return
35737	}
35738	opspec := tf.OpSpec{
35739		Type: "ReaderResetV2",
35740		Input: []tf.Input{
35741			reader_handle,
35742		},
35743	}
35744	return scope.AddOperation(opspec)
35745}
35746
35747// Restore a reader to a previously saved state.
35748//
35749// Not all Readers support being restored, so this can produce an
35750// Unimplemented error.
35751//
35752// Arguments:
35753//
35754//	reader_handle: Handle to a Reader.
35755//	state: Result of a ReaderSerializeState of a Reader with type
35756//
35757// matching reader_handle.
35758//
35759// Returns the created operation.
35760func ReaderRestoreStateV2(scope *Scope, reader_handle tf.Output, state tf.Output) (o *tf.Operation) {
35761	if scope.Err() != nil {
35762		return
35763	}
35764	opspec := tf.OpSpec{
35765		Type: "ReaderRestoreStateV2",
35766		Input: []tf.Input{
35767			reader_handle, state,
35768		},
35769	}
35770	return scope.AddOperation(opspec)
35771}
35772
35773// Produce a string tensor that encodes the state of a Reader.
35774//
35775// Not all Readers support being serialized, so this can produce an
35776// Unimplemented error.
35777//
35778// Arguments:
35779//
35780//	reader_handle: Handle to a Reader.
35781func ReaderSerializeStateV2(scope *Scope, reader_handle tf.Output) (state tf.Output) {
35782	if scope.Err() != nil {
35783		return
35784	}
35785	opspec := tf.OpSpec{
35786		Type: "ReaderSerializeStateV2",
35787		Input: []tf.Input{
35788			reader_handle,
35789		},
35790	}
35791	op := scope.AddOperation(opspec)
35792	return op.Output(0)
35793}
35794
35795// RealAttr is an optional argument to Real.
35796type RealAttr func(optionalAttr)
35797
35798// RealTout sets the optional Tout attribute to value.
35799// If not specified, defaults to DT_FLOAT
35800func RealTout(value tf.DataType) RealAttr {
35801	return func(m optionalAttr) {
35802		m["Tout"] = value
35803	}
35804}
35805
35806// Returns the real part of a complex number.
35807//
35808// Given a tensor `input` of complex numbers, this operation returns a tensor of
35809// type `float` that is the real part of each element in `input`. All elements in
35810// `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
35811//
35812//	part returned by this operation and *b* is the imaginary part.
35813//
35814// For example:
35815//
35816// ```
35817// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
35818// tf.real(input) ==> [-2.25, 3.25]
35819// ```
35820func Real(scope *Scope, input tf.Output, optional ...RealAttr) (output tf.Output) {
35821	if scope.Err() != nil {
35822		return
35823	}
35824	attrs := map[string]interface{}{}
35825	for _, a := range optional {
35826		a(attrs)
35827	}
35828	opspec := tf.OpSpec{
35829		Type: "Real",
35830		Input: []tf.Input{
35831			input,
35832		},
35833		Attrs: attrs,
35834	}
35835	op := scope.AddOperation(opspec)
35836	return op.Output(0)
35837}
35838
35839// Returns x / y element-wise for real types.
35840//
35841// If `x` and `y` are reals, this will return the floating-point division.
35842//
35843// *NOTE*: `Div` supports broadcasting. More about broadcasting
35844// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
35845func RealDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
35846	if scope.Err() != nil {
35847		return
35848	}
35849	opspec := tf.OpSpec{
35850		Type: "RealDiv",
35851		Input: []tf.Input{
35852			x, y,
35853		},
35854	}
35855	op := scope.AddOperation(opspec)
35856	return op.Output(0)
35857}
35858
35859// RebatchDatasetAttr is an optional argument to RebatchDataset.
35860type RebatchDatasetAttr func(optionalAttr)
35861
35862// RebatchDatasetUseFallback sets the optional use_fallback attribute to value.
35863// If not specified, defaults to true
35864func RebatchDatasetUseFallback(value bool) RebatchDatasetAttr {
35865	return func(m optionalAttr) {
35866		m["use_fallback"] = value
35867	}
35868}
35869
35870// Creates a dataset that changes the batch size.
35871//
35872// Creates a dataset that changes the batch size of the dataset to current batch
35873// size // num_workers.
35874//
35875// Arguments:
35876//
35877//	input_dataset: A variant tensor representing the input dataset.
35878//	num_replicas: A scalar representing the number of replicas to distribute this batch across. As
35879//
35880// a result of this transformation the current batch size would end up being
35881// divided  by this parameter.
35882func RebatchDataset(scope *Scope, input_dataset tf.Output, num_replicas tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RebatchDatasetAttr) (handle tf.Output) {
35883	if scope.Err() != nil {
35884		return
35885	}
35886	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
35887	for _, a := range optional {
35888		a(attrs)
35889	}
35890	opspec := tf.OpSpec{
35891		Type: "RebatchDataset",
35892		Input: []tf.Input{
35893			input_dataset, num_replicas,
35894		},
35895		Attrs: attrs,
35896	}
35897	op := scope.AddOperation(opspec)
35898	return op.Output(0)
35899}
35900
35901// Creates a dataset that changes the batch size.
35902//
35903// Creates a dataset that rebatches elements from `input_dataset` into new batch
35904// sizes.
35905//
35906// Arguments:
35907//
35908//	input_dataset: A variant tensor representing the input dataset.
35909//	batch_sizes: A vector of integers representing the size of batches to produce. These values
35910//
35911// are cycled through in order.
35912func RebatchDatasetV2(scope *Scope, input_dataset tf.Output, batch_sizes tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
35913	if scope.Err() != nil {
35914		return
35915	}
35916	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
35917	opspec := tf.OpSpec{
35918		Type: "RebatchDatasetV2",
35919		Input: []tf.Input{
35920			input_dataset, batch_sizes, drop_remainder,
35921		},
35922		Attrs: attrs,
35923	}
35924	op := scope.AddOperation(opspec)
35925	return op.Output(0)
35926}
35927
35928// Computes the reciprocal of x element-wise.
35929//
35930// I.e., \\(y = 1 / x\\).
35931func Reciprocal(scope *Scope, x tf.Output) (y tf.Output) {
35932	if scope.Err() != nil {
35933		return
35934	}
35935	opspec := tf.OpSpec{
35936		Type: "Reciprocal",
35937		Input: []tf.Input{
35938			x,
35939		},
35940	}
35941	op := scope.AddOperation(opspec)
35942	return op.Output(0)
35943}
35944
35945// Computes the gradient for the inverse of `x` wrt its input.
35946//
35947// Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
35948// is the corresponding input gradient.
35949func ReciprocalGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
35950	if scope.Err() != nil {
35951		return
35952	}
35953	opspec := tf.OpSpec{
35954		Type: "ReciprocalGrad",
35955		Input: []tf.Input{
35956			y, dy,
35957		},
35958	}
35959	op := scope.AddOperation(opspec)
35960	return op.Output(0)
35961}
35962
35963// RecordInputAttr is an optional argument to RecordInput.
35964type RecordInputAttr func(optionalAttr)
35965
35966// RecordInputFileRandomSeed sets the optional file_random_seed attribute to value.
35967//
35968// value: Random seeds used to produce randomized records.
35969// If not specified, defaults to 301
35970func RecordInputFileRandomSeed(value int64) RecordInputAttr {
35971	return func(m optionalAttr) {
35972		m["file_random_seed"] = value
35973	}
35974}
35975
35976// RecordInputFileShuffleShiftRatio sets the optional file_shuffle_shift_ratio attribute to value.
35977//
35978// value: Shifts the list of files after the list is randomly
35979// shuffled.
35980// If not specified, defaults to 0
35981func RecordInputFileShuffleShiftRatio(value float32) RecordInputAttr {
35982	return func(m optionalAttr) {
35983		m["file_shuffle_shift_ratio"] = value
35984	}
35985}
35986
35987// RecordInputFileBufferSize sets the optional file_buffer_size attribute to value.
35988//
35989// value: The randomization shuffling buffer.
35990// If not specified, defaults to 10000
35991func RecordInputFileBufferSize(value int64) RecordInputAttr {
35992	return func(m optionalAttr) {
35993		m["file_buffer_size"] = value
35994	}
35995}
35996
35997// RecordInputFileParallelism sets the optional file_parallelism attribute to value.
35998//
35999// value: How many sstables are opened and concurrently iterated over.
36000// If not specified, defaults to 16
36001func RecordInputFileParallelism(value int64) RecordInputAttr {
36002	return func(m optionalAttr) {
36003		m["file_parallelism"] = value
36004	}
36005}
36006
36007// RecordInputBatchSize sets the optional batch_size attribute to value.
36008//
36009// value: The batch size.
36010// If not specified, defaults to 32
36011func RecordInputBatchSize(value int64) RecordInputAttr {
36012	return func(m optionalAttr) {
36013		m["batch_size"] = value
36014	}
36015}
36016
36017// RecordInputCompressionType sets the optional compression_type attribute to value.
36018//
36019// value: The type of compression for the file. Currently ZLIB and
36020// GZIP are supported. Defaults to none.
36021// If not specified, defaults to ""
36022func RecordInputCompressionType(value string) RecordInputAttr {
36023	return func(m optionalAttr) {
36024		m["compression_type"] = value
36025	}
36026}
36027
36028// Emits randomized records.
36029//
36030// Arguments:
36031//
36032//	file_pattern: Glob pattern for the data files.
36033//
36034// Returns A tensor of shape [batch_size].
36035func RecordInput(scope *Scope, file_pattern string, optional ...RecordInputAttr) (records tf.Output) {
36036	if scope.Err() != nil {
36037		return
36038	}
36039	attrs := map[string]interface{}{"file_pattern": file_pattern}
36040	for _, a := range optional {
36041		a(attrs)
36042	}
36043	opspec := tf.OpSpec{
36044		Type: "RecordInput",
36045
36046		Attrs: attrs,
36047	}
36048	op := scope.AddOperation(opspec)
36049	return op.Output(0)
36050}
36051
36052// RecvAttr is an optional argument to Recv.
36053type RecvAttr func(optionalAttr)
36054
36055// RecvClientTerminated sets the optional client_terminated attribute to value.
36056//
36057// value: If set to true, this indicates that the node was added
36058// to the graph as a result of a client-side feed or fetch of Tensor data,
36059// in which case the corresponding send or recv is expected to be managed
36060// locally by the caller.
36061// If not specified, defaults to false
36062func RecvClientTerminated(value bool) RecvAttr {
36063	return func(m optionalAttr) {
36064		m["client_terminated"] = value
36065	}
36066}
36067
36068// Receives the named tensor from send_device on recv_device.
36069//
36070// Arguments:
36071//
36072//	tensor_name: The name of the tensor to receive.
36073//	send_device: The name of the device sending the tensor.
36074//	send_device_incarnation: The current incarnation of send_device.
36075//	recv_device: The name of the device receiving the tensor.
36076//
36077// Returns The tensor to receive.
36078func Recv(scope *Scope, tensor_type tf.DataType, tensor_name string, send_device string, send_device_incarnation int64, recv_device string, optional ...RecvAttr) (tensor tf.Output) {
36079	if scope.Err() != nil {
36080		return
36081	}
36082	attrs := map[string]interface{}{"tensor_type": tensor_type, "tensor_name": tensor_name, "send_device": send_device, "send_device_incarnation": send_device_incarnation, "recv_device": recv_device}
36083	for _, a := range optional {
36084		a(attrs)
36085	}
36086	opspec := tf.OpSpec{
36087		Type: "Recv",
36088
36089		Attrs: attrs,
36090	}
36091	op := scope.AddOperation(opspec)
36092	return op.Output(0)
36093}
36094
36095// An op that receives embedding activations on the TPU.
36096//
36097// The TPU system performs the embedding lookups and aggregations specified by
36098// the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The
36099// results of these aggregations are visible to the Tensorflow Graph as the
36100// outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing
36101// one Tensor of activations per table specified in the model. There can be at
36102// most one RecvTPUEmbeddingActivations op in the TPU graph.
36103//
36104// Arguments:
36105//
36106//	num_outputs: The number of output activation tensors, equal to the number of
36107//
36108// embedding tables in the model.
36109//
36110//	config: Serialized TPUEmbeddingConfiguration proto.
36111//
36112// Returns A TensorList of embedding activations containing one Tensor per
36113// embedding table in the model.
36114func RecvTPUEmbeddingActivations(scope *Scope, num_outputs int64, config string) (outputs []tf.Output) {
36115	if scope.Err() != nil {
36116		return
36117	}
36118	attrs := map[string]interface{}{"num_outputs": num_outputs, "config": config}
36119	opspec := tf.OpSpec{
36120		Type: "RecvTPUEmbeddingActivations",
36121
36122		Attrs: attrs,
36123	}
36124	op := scope.AddOperation(opspec)
36125	if scope.Err() != nil {
36126		return
36127	}
36128	var idx int
36129	var err error
36130	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
36131		scope.UpdateErr("RecvTPUEmbeddingActivations", err)
36132		return
36133	}
36134	return outputs
36135}
36136
36137// ReduceJoinAttr is an optional argument to ReduceJoin.
36138type ReduceJoinAttr func(optionalAttr)
36139
36140// ReduceJoinKeepDims sets the optional keep_dims attribute to value.
36141//
36142// value: If `True`, retain reduced dimensions with length `1`.
36143// If not specified, defaults to false
36144func ReduceJoinKeepDims(value bool) ReduceJoinAttr {
36145	return func(m optionalAttr) {
36146		m["keep_dims"] = value
36147	}
36148}
36149
36150// ReduceJoinSeparator sets the optional separator attribute to value.
36151//
36152// value: The separator to use when joining.
36153// If not specified, defaults to ""
36154func ReduceJoinSeparator(value string) ReduceJoinAttr {
36155	return func(m optionalAttr) {
36156		m["separator"] = value
36157	}
36158}
36159
36160// Joins a string Tensor across the given dimensions.
36161//
36162// Computes the string join across dimensions in the given string Tensor of shape
36163// `[\\(d_0, d_1, ..., d_{n-1}\\)]`.  Returns a new Tensor created by joining the input
36164// strings with the given separator (default: empty string).  Negative indices are
36165// counted backwards from the end, with `-1` being equivalent to `n - 1`.  If
36166// indices are not specified, joins across all dimensions beginning from `n - 1`
36167// through `0`.
36168//
36169// For example:
36170//
36171// ```python
36172// # tensor `a` is [["a", "b"], ["c", "d"]]
36173// tf.reduce_join(a, 0) ==> ["ac", "bd"]
36174// tf.reduce_join(a, 1) ==> ["ab", "cd"]
36175// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
36176// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
36177// tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
36178// tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
36179// tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
36180// tf.reduce_join(a, [0, 1]) ==> "acbd"
36181// tf.reduce_join(a, [1, 0]) ==> "abcd"
36182// tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]]
36183// tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
36184// ```
36185//
36186// Arguments:
36187//
36188//	inputs: The input to be joined.  All reduced indices must have non-zero size.
36189//	reduction_indices: The dimensions to reduce over.  Dimensions are reduced in the
36190//
36191// order specified.  Omitting `reduction_indices` is equivalent to passing
36192// `[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.
36193//
36194// Returns Has shape equal to that of the input with reduced dimensions removed or
36195// set to `1` depending on `keep_dims`.
36196func ReduceJoin(scope *Scope, inputs tf.Output, reduction_indices tf.Output, optional ...ReduceJoinAttr) (output tf.Output) {
36197	if scope.Err() != nil {
36198		return
36199	}
36200	attrs := map[string]interface{}{}
36201	for _, a := range optional {
36202		a(attrs)
36203	}
36204	opspec := tf.OpSpec{
36205		Type: "ReduceJoin",
36206		Input: []tf.Input{
36207			inputs, reduction_indices,
36208		},
36209		Attrs: attrs,
36210	}
36211	op := scope.AddOperation(opspec)
36212	return op.Output(0)
36213}
36214
36215// Check if the input matches the regex pattern.
36216//
36217// The input is a string tensor of any shape. The pattern is a scalar
36218// string tensor which is applied to every element of the input tensor.
36219// The boolean values (True or False) of the output tensor indicate
36220// if the input matches the regex pattern provided.
36221//
36222// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
36223//
36224// Examples:
36225//
36226// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$")
36227// <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
36228// >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$")
36229// <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  True])>
36230//
36231// Arguments:
36232//
36233//	input: A string tensor of the text to be processed.
36234//	pattern: A scalar string tensor containing the regular expression to match the input.
36235//
36236// Returns A bool tensor with the same shape as `input`.
36237func RegexFullMatch(scope *Scope, input tf.Output, pattern tf.Output) (output tf.Output) {
36238	if scope.Err() != nil {
36239		return
36240	}
36241	opspec := tf.OpSpec{
36242		Type: "RegexFullMatch",
36243		Input: []tf.Input{
36244			input, pattern,
36245		},
36246	}
36247	op := scope.AddOperation(opspec)
36248	return op.Output(0)
36249}
36250
36251// RegexReplaceAttr is an optional argument to RegexReplace.
36252type RegexReplaceAttr func(optionalAttr)
36253
36254// RegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
36255//
36256// value: If True, the replacement is global (that is, all matches of the `pattern` regular
36257// expression in each input string are rewritten), otherwise the `rewrite`
36258// substitution is only made for the first `pattern` match.
36259// If not specified, defaults to true
36260func RegexReplaceReplaceGlobal(value bool) RegexReplaceAttr {
36261	return func(m optionalAttr) {
36262		m["replace_global"] = value
36263	}
36264}
36265
36266// Replaces matches of the `pattern` regular expression in `input` with the
36267// replacement string provided in `rewrite`.
36268//
36269// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
36270//
36271// Arguments:
36272//
36273//	input: The text to be processed.
36274//	pattern: The regular expression to be matched in the `input` strings.
36275//	rewrite: The rewrite string to be substituted for the `pattern` expression where it is
36276//
36277// matched in the `input` strings.
36278//
36279// Returns The text after applying pattern match and rewrite substitution.
36280func RegexReplace(scope *Scope, input tf.Output, pattern tf.Output, rewrite tf.Output, optional ...RegexReplaceAttr) (output tf.Output) {
36281	if scope.Err() != nil {
36282		return
36283	}
36284	attrs := map[string]interface{}{}
36285	for _, a := range optional {
36286		a(attrs)
36287	}
36288	opspec := tf.OpSpec{
36289		Type: "RegexReplace",
36290		Input: []tf.Input{
36291			input, pattern, rewrite,
36292		},
36293		Attrs: attrs,
36294	}
36295	op := scope.AddOperation(opspec)
36296	return op.Output(0)
36297}
36298
36299// RegisterDatasetAttr is an optional argument to RegisterDataset.
36300type RegisterDatasetAttr func(optionalAttr)
36301
36302// RegisterDatasetElementSpec sets the optional element_spec attribute to value.
36303// If not specified, defaults to ""
36304func RegisterDatasetElementSpec(value string) RegisterDatasetAttr {
36305	return func(m optionalAttr) {
36306		m["element_spec"] = value
36307	}
36308}
36309
36310// RegisterDatasetMetadata sets the optional metadata attribute to value.
36311// If not specified, defaults to ""
36312func RegisterDatasetMetadata(value string) RegisterDatasetAttr {
36313	return func(m optionalAttr) {
36314		m["metadata"] = value
36315	}
36316}
36317
36318// Registers a dataset with the tf.data service.
36319func RegisterDataset(scope *Scope, dataset tf.Output, address tf.Output, protocol tf.Output, external_state_policy int64, optional ...RegisterDatasetAttr) (dataset_id tf.Output) {
36320	if scope.Err() != nil {
36321		return
36322	}
36323	attrs := map[string]interface{}{"external_state_policy": external_state_policy}
36324	for _, a := range optional {
36325		a(attrs)
36326	}
36327	opspec := tf.OpSpec{
36328		Type: "RegisterDataset",
36329		Input: []tf.Input{
36330			dataset, address, protocol,
36331		},
36332		Attrs: attrs,
36333	}
36334	op := scope.AddOperation(opspec)
36335	return op.Output(0)
36336}
36337
36338// RegisterDatasetV2Attr is an optional argument to RegisterDatasetV2.
36339type RegisterDatasetV2Attr func(optionalAttr)
36340
36341// RegisterDatasetV2ElementSpec sets the optional element_spec attribute to value.
36342// If not specified, defaults to ""
36343func RegisterDatasetV2ElementSpec(value string) RegisterDatasetV2Attr {
36344	return func(m optionalAttr) {
36345		m["element_spec"] = value
36346	}
36347}
36348
36349// RegisterDatasetV2RequestedDatasetId sets the optional requested_dataset_id attribute to value.
36350// If not specified, defaults to ""
36351func RegisterDatasetV2RequestedDatasetId(value string) RegisterDatasetV2Attr {
36352	return func(m optionalAttr) {
36353		m["requested_dataset_id"] = value
36354	}
36355}
36356
36357// RegisterDatasetV2Metadata sets the optional metadata attribute to value.
36358// If not specified, defaults to ""
36359func RegisterDatasetV2Metadata(value string) RegisterDatasetV2Attr {
36360	return func(m optionalAttr) {
36361		m["metadata"] = value
36362	}
36363}
36364
36365// Registers a dataset with the tf.data service.
36366func RegisterDatasetV2(scope *Scope, dataset tf.Output, address tf.Output, protocol tf.Output, external_state_policy int64, optional ...RegisterDatasetV2Attr) (dataset_id tf.Output) {
36367	if scope.Err() != nil {
36368		return
36369	}
36370	attrs := map[string]interface{}{"external_state_policy": external_state_policy}
36371	for _, a := range optional {
36372		a(attrs)
36373	}
36374	opspec := tf.OpSpec{
36375		Type: "RegisterDatasetV2",
36376		Input: []tf.Input{
36377			dataset, address, protocol,
36378		},
36379		Attrs: attrs,
36380	}
36381	op := scope.AddOperation(opspec)
36382	return op.Output(0)
36383}
36384
36385// Computes rectified linear: `max(features, 0)`.
36386//
36387// See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
36388// Example usage:
36389// >>> tf.nn.relu([-2., 0., 3.]).numpy()
36390// array([0., 0., 3.], dtype=float32)
36391func Relu(scope *Scope, features tf.Output) (activations tf.Output) {
36392	if scope.Err() != nil {
36393		return
36394	}
36395	opspec := tf.OpSpec{
36396		Type: "Relu",
36397		Input: []tf.Input{
36398			features,
36399		},
36400	}
36401	op := scope.AddOperation(opspec)
36402	return op.Output(0)
36403}
36404
36405// Computes rectified linear 6: `min(max(features, 0), 6)`.
36406func Relu6(scope *Scope, features tf.Output) (activations tf.Output) {
36407	if scope.Err() != nil {
36408		return
36409	}
36410	opspec := tf.OpSpec{
36411		Type: "Relu6",
36412		Input: []tf.Input{
36413			features,
36414		},
36415	}
36416	op := scope.AddOperation(opspec)
36417	return op.Output(0)
36418}
36419
36420// Computes rectified linear 6 gradients for a Relu6 operation.
36421//
36422// Arguments:
36423//
36424//	gradients: The backpropagated gradients to the corresponding Relu6 operation.
36425//	features: The features passed as input to the corresponding Relu6 operation, or
36426//
36427// its output; using either one produces the same result.
36428//
36429// Returns The gradients:
36430// `gradients * (features > 0) * (features < 6)`.
36431func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
36432	if scope.Err() != nil {
36433		return
36434	}
36435	opspec := tf.OpSpec{
36436		Type: "Relu6Grad",
36437		Input: []tf.Input{
36438			gradients, features,
36439		},
36440	}
36441	op := scope.AddOperation(opspec)
36442	return op.Output(0)
36443}
36444
36445// Computes rectified linear gradients for a Relu operation.
36446//
36447// Arguments:
36448//
36449//	gradients: The backpropagated gradients to the corresponding Relu operation.
36450//	features: The features passed as input to the corresponding Relu operation, OR
36451//
36452// the outputs of that operation (both work equivalently).
36453//
36454// Returns `gradients * (features > 0)`.
36455func ReluGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
36456	if scope.Err() != nil {
36457		return
36458	}
36459	opspec := tf.OpSpec{
36460		Type: "ReluGrad",
36461		Input: []tf.Input{
36462			gradients, features,
36463		},
36464	}
36465	op := scope.AddOperation(opspec)
36466	return op.Output(0)
36467}
36468
36469// RepeatDatasetAttr is an optional argument to RepeatDataset.
36470type RepeatDatasetAttr func(optionalAttr)
36471
36472// RepeatDatasetMetadata sets the optional metadata attribute to value.
36473// If not specified, defaults to ""
36474func RepeatDatasetMetadata(value string) RepeatDatasetAttr {
36475	return func(m optionalAttr) {
36476		m["metadata"] = value
36477	}
36478}
36479
36480// Creates a dataset that emits the outputs of `input_dataset` `count` times.
36481//
36482// Arguments:
36483//
36484//	count: A scalar representing the number of times that `input_dataset` should
36485//
36486// be repeated. A value of `-1` indicates that it should be repeated infinitely.
36487func RepeatDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...RepeatDatasetAttr) (handle tf.Output) {
36488	if scope.Err() != nil {
36489		return
36490	}
36491	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
36492	for _, a := range optional {
36493		a(attrs)
36494	}
36495	opspec := tf.OpSpec{
36496		Type: "RepeatDataset",
36497		Input: []tf.Input{
36498			input_dataset, count,
36499		},
36500		Attrs: attrs,
36501	}
36502	op := scope.AddOperation(opspec)
36503	return op.Output(0)
36504}
36505
36506// Computes a range that covers the actual values present in a quantized tensor.
36507//
36508// Given a quantized tensor described by `(input, input_min, input_max)`, outputs a
36509// range that covers the actual values present in that tensor. This op is typically
36510// used to produce the `requested_output_min` and `requested_output_max` for
36511// `Requantize`.
36512//
36513// Arguments:
36514//
36515//	input_min: The float value that the minimum quantized input value represents.
36516//	input_max: The float value that the maximum quantized input value represents.
36517//
36518// Returns:
36519//
36520//	output_min: The computed min output.
36521//	output_max: the computed max output.
36522func RequantizationRange(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output) (output_min tf.Output, output_max tf.Output) {
36523	if scope.Err() != nil {
36524		return
36525	}
36526	opspec := tf.OpSpec{
36527		Type: "RequantizationRange",
36528		Input: []tf.Input{
36529			input, input_min, input_max,
36530		},
36531	}
36532	op := scope.AddOperation(opspec)
36533	return op.Output(0), op.Output(1)
36534}
36535
36536// Computes requantization range per channel.
36537//
36538// Arguments:
36539//
36540//	input: The original input tensor.
36541//	input_min: The minimum value of the input tensor
36542//	input_max: The maximum value of the input tensor.
36543//	clip_value_max: The maximum value of the output that needs to be clipped.
36544//
36545// Example: set this to 6 for Relu6.
36546//
36547// Returns:
36548//
36549//	output_min: The minimum value of the final output tensor
36550//	output_max: The maximum value of the final output tensor.
36551func RequantizationRangePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, clip_value_max float32) (output_min tf.Output, output_max tf.Output) {
36552	if scope.Err() != nil {
36553		return
36554	}
36555	attrs := map[string]interface{}{"clip_value_max": clip_value_max}
36556	opspec := tf.OpSpec{
36557		Type: "RequantizationRangePerChannel",
36558		Input: []tf.Input{
36559			input, input_min, input_max,
36560		},
36561		Attrs: attrs,
36562	}
36563	op := scope.AddOperation(opspec)
36564	return op.Output(0), op.Output(1)
36565}
36566
36567// Converts the quantized `input` tensor into a lower-precision `output`.
36568//
36569// Converts the quantized `input` tensor into a lower-precision `output`, using the
36570// output range specified with `requested_output_min` and `requested_output_max`.
36571//
36572// `[input_min, input_max]` are scalar floats that specify the range for the float
36573// interpretation of the `input` data. For example, if `input_min` is -1.0f and
36574// `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0
36575// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
36576//
36577// Arguments:
36578//
36579//	input_min: The float value that the minimum quantized input value represents.
36580//	input_max: The float value that the maximum quantized input value represents.
36581//	requested_output_min: The float value that the minimum quantized output value represents.
36582//	requested_output_max: The float value that the maximum quantized output value represents.
36583//	out_type: The type of the output. Should be a lower bit depth than Tinput.
36584//
36585// Returns:
36586//
36587//	output
36588//	output_min: The requested_output_min value is copied into this output.
36589//	output_max: The requested_output_max value is copied into this output.
36590func Requantize(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, out_type tf.DataType) (output tf.Output, output_min tf.Output, output_max tf.Output) {
36591	if scope.Err() != nil {
36592		return
36593	}
36594	attrs := map[string]interface{}{"out_type": out_type}
36595	opspec := tf.OpSpec{
36596		Type: "Requantize",
36597		Input: []tf.Input{
36598			input, input_min, input_max, requested_output_min, requested_output_max,
36599		},
36600		Attrs: attrs,
36601	}
36602	op := scope.AddOperation(opspec)
36603	return op.Output(0), op.Output(1), op.Output(2)
36604}
36605
36606// RequantizePerChannelAttr is an optional argument to RequantizePerChannel.
36607type RequantizePerChannelAttr func(optionalAttr)
36608
36609// RequantizePerChannelOutType sets the optional out_type attribute to value.
36610//
36611// value: The quantized type of output tensor that needs to be converted.
36612// If not specified, defaults to DT_QUINT8
36613func RequantizePerChannelOutType(value tf.DataType) RequantizePerChannelAttr {
36614	return func(m optionalAttr) {
36615		m["out_type"] = value
36616	}
36617}
36618
36619// Requantizes input with min and max values known per channel.
36620//
36621// Arguments:
36622//
36623//	input: The original input tensor.
36624//	input_min: The minimum value of the input tensor
36625//	input_max: The maximum value of the input tensor.
36626//	requested_output_min: The minimum value of the output tensor requested.
36627//	requested_output_max: The maximum value of the output tensor requested.
36628//
36629// Returns:
36630//
36631//	output: Output tensor.
36632//	output_min: The minimum value of the final output tensor
36633//	output_max: The maximum value of the final output tensor.
36634func RequantizePerChannel(scope *Scope, input tf.Output, input_min tf.Output, input_max tf.Output, requested_output_min tf.Output, requested_output_max tf.Output, optional ...RequantizePerChannelAttr) (output tf.Output, output_min tf.Output, output_max tf.Output) {
36635	if scope.Err() != nil {
36636		return
36637	}
36638	attrs := map[string]interface{}{}
36639	for _, a := range optional {
36640		a(attrs)
36641	}
36642	opspec := tf.OpSpec{
36643		Type: "RequantizePerChannel",
36644		Input: []tf.Input{
36645			input, input_min, input_max, requested_output_min, requested_output_max,
36646		},
36647		Attrs: attrs,
36648	}
36649	op := scope.AddOperation(opspec)
36650	return op.Output(0), op.Output(1), op.Output(2)
36651}
36652
36653// Reshapes a tensor.
36654//
36655// Given `tensor`, this operation returns a tensor that has the same values
36656// as `tensor` with shape `shape`.
36657//
36658// If one component of 1-D tensor `shape` is the special value -1, the size of that
36659// dimension is computed so that the total size remains constant.  In particular, a
36660// `shape` of `[-1]` flattens into 1-D.  At most one component of `shape` may be
36661// unknown.
36662//
36663// The `shape` must be 1-D and the operation returns a tensor with shape
36664// `shape` filled with the values of `tensor`. In this case, the number of elements
36665// implied by `shape` must be the same as the number of elements in `tensor`.
36666//
36667// It is an error if `shape` is not 1-D.
36668//
36669// For example:
36670//
36671// ```
36672// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
36673// # tensor 't' has shape [9]
36674// reshape(t, [3, 3]) ==> [[1, 2, 3],
36675//
36676//	[4, 5, 6],
36677//	[7, 8, 9]]
36678//
36679// # tensor 't' is [[[1, 1], [2, 2]],
36680// #                [[3, 3], [4, 4]]]
36681// # tensor 't' has shape [2, 2, 2]
36682// reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
36683//
36684//	[3, 3, 4, 4]]
36685//
36686// # tensor 't' is [[[1, 1, 1],
36687// #                 [2, 2, 2]],
36688// #                [[3, 3, 3],
36689// #                 [4, 4, 4]],
36690// #                [[5, 5, 5],
36691// #                 [6, 6, 6]]]
36692// # tensor 't' has shape [3, 2, 3]
36693// # pass '[-1]' to flatten 't'
36694// reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
36695//
36696// # -1 can also be used to infer the shape
36697//
36698// # -1 is inferred to be 9:
36699// reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
36700//
36701//	[4, 4, 4, 5, 5, 5, 6, 6, 6]]
36702//
36703// # -1 is inferred to be 2:
36704// reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
36705//
36706//	[4, 4, 4, 5, 5, 5, 6, 6, 6]]
36707//
36708// # -1 is inferred to be 3:
36709// reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
36710//
36711//	 [2, 2, 2],
36712//	 [3, 3, 3]],
36713//	[[4, 4, 4],
36714//	 [5, 5, 5],
36715//	 [6, 6, 6]]]
36716//
36717// # tensor 't' is [7]
36718// # shape `[]` reshapes to a scalar
36719// reshape(t, []) ==> 7
36720// ```
36721//
36722// Arguments:
36723//
36724//	shape: Defines the shape of the output tensor.
36725func Reshape(scope *Scope, tensor tf.Output, shape tf.Output) (output tf.Output) {
36726	if scope.Err() != nil {
36727		return
36728	}
36729	opspec := tf.OpSpec{
36730		Type: "Reshape",
36731		Input: []tf.Input{
36732			tensor, shape,
36733		},
36734	}
36735	op := scope.AddOperation(opspec)
36736	return op.Output(0)
36737}
36738
36739// ResizeAreaAttr is an optional argument to ResizeArea.
36740type ResizeAreaAttr func(optionalAttr)
36741
36742// ResizeAreaAlignCorners sets the optional align_corners attribute to value.
36743//
36744// value: If true, the centers of the 4 corner pixels of the input and output tensors are
36745// aligned, preserving the values at the corner pixels. Defaults to false.
36746// If not specified, defaults to false
36747func ResizeAreaAlignCorners(value bool) ResizeAreaAttr {
36748	return func(m optionalAttr) {
36749		m["align_corners"] = value
36750	}
36751}
36752
36753// Resize `images` to `size` using area interpolation.
36754//
36755// Input images can be of different types but output images are always float.
36756//
36757// The range of pixel values for the output image might be slightly different
36758// from the range for the input image because of limited numerical precision.
36759// To guarantee an output range, for example `[0.0, 1.0]`, apply
36760// `tf.clip_by_value` to the output.
36761//
36762// Each output pixel is computed by first transforming the pixel's footprint into
36763// the input tensor and then averaging the pixels that intersect the footprint. An
36764// input pixel's contribution to the average is weighted by the fraction of its
36765// area that intersects the footprint.  This is the same as OpenCV's INTER_AREA.
36766//
36767// Arguments:
36768//
36769//	images: 4-D with shape `[batch, height, width, channels]`.
36770//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
36771//
36772// new size for the images.
36773//
36774// Returns 4-D with shape
36775// `[batch, new_height, new_width, channels]`.
36776func ResizeArea(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeAreaAttr) (resized_images tf.Output) {
36777	if scope.Err() != nil {
36778		return
36779	}
36780	attrs := map[string]interface{}{}
36781	for _, a := range optional {
36782		a(attrs)
36783	}
36784	opspec := tf.OpSpec{
36785		Type: "ResizeArea",
36786		Input: []tf.Input{
36787			images, size,
36788		},
36789		Attrs: attrs,
36790	}
36791	op := scope.AddOperation(opspec)
36792	return op.Output(0)
36793}
36794
36795// ResizeBicubicAttr is an optional argument to ResizeBicubic.
36796type ResizeBicubicAttr func(optionalAttr)
36797
36798// ResizeBicubicAlignCorners sets the optional align_corners attribute to value.
36799//
36800// value: If true, the centers of the 4 corner pixels of the input and output tensors are
36801// aligned, preserving the values at the corner pixels. Defaults to false.
36802// If not specified, defaults to false
36803func ResizeBicubicAlignCorners(value bool) ResizeBicubicAttr {
36804	return func(m optionalAttr) {
36805		m["align_corners"] = value
36806	}
36807}
36808
36809// ResizeBicubicHalfPixelCenters sets the optional half_pixel_centers attribute to value.
36810// If not specified, defaults to false
36811func ResizeBicubicHalfPixelCenters(value bool) ResizeBicubicAttr {
36812	return func(m optionalAttr) {
36813		m["half_pixel_centers"] = value
36814	}
36815}
36816
36817// Resize `images` to `size` using bicubic interpolation.
36818//
36819// Input images can be of different types but output images are always float.
36820//
36821// Arguments:
36822//
36823//	images: 4-D with shape `[batch, height, width, channels]`.
36824//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
36825//
36826// new size for the images.
36827//
36828// Returns 4-D with shape
36829// `[batch, new_height, new_width, channels]`.
36830func ResizeBicubic(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBicubicAttr) (resized_images tf.Output) {
36831	if scope.Err() != nil {
36832		return
36833	}
36834	attrs := map[string]interface{}{}
36835	for _, a := range optional {
36836		a(attrs)
36837	}
36838	opspec := tf.OpSpec{
36839		Type: "ResizeBicubic",
36840		Input: []tf.Input{
36841			images, size,
36842		},
36843		Attrs: attrs,
36844	}
36845	op := scope.AddOperation(opspec)
36846	return op.Output(0)
36847}
36848
36849// ResizeBicubicGradAttr is an optional argument to ResizeBicubicGrad.
36850type ResizeBicubicGradAttr func(optionalAttr)
36851
36852// ResizeBicubicGradAlignCorners sets the optional align_corners attribute to value.
36853//
36854// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
36855// aligned. Defaults to false.
36856// If not specified, defaults to false
36857func ResizeBicubicGradAlignCorners(value bool) ResizeBicubicGradAttr {
36858	return func(m optionalAttr) {
36859		m["align_corners"] = value
36860	}
36861}
36862
36863// ResizeBicubicGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
36864// If not specified, defaults to false
36865func ResizeBicubicGradHalfPixelCenters(value bool) ResizeBicubicGradAttr {
36866	return func(m optionalAttr) {
36867		m["half_pixel_centers"] = value
36868	}
36869}
36870
36871// Computes the gradient of bicubic interpolation.
36872//
36873// Arguments:
36874//
36875//	grads: 4-D with shape `[batch, height, width, channels]`.
36876//	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
36877//
36878// The image tensor that was resized.
36879//
36880// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
36881// Gradients with respect to the input image. Input image must have been
36882// float or double.
36883func ResizeBicubicGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBicubicGradAttr) (output tf.Output) {
36884	if scope.Err() != nil {
36885		return
36886	}
36887	attrs := map[string]interface{}{}
36888	for _, a := range optional {
36889		a(attrs)
36890	}
36891	opspec := tf.OpSpec{
36892		Type: "ResizeBicubicGrad",
36893		Input: []tf.Input{
36894			grads, original_image,
36895		},
36896		Attrs: attrs,
36897	}
36898	op := scope.AddOperation(opspec)
36899	return op.Output(0)
36900}
36901
36902// ResizeBilinearAttr is an optional argument to ResizeBilinear.
36903type ResizeBilinearAttr func(optionalAttr)
36904
36905// ResizeBilinearAlignCorners sets the optional align_corners attribute to value.
36906//
36907// value: If true, the centers of the 4 corner pixels of the input and output tensors are
36908// aligned, preserving the values at the corner pixels. Defaults to false.
36909// If not specified, defaults to false
36910func ResizeBilinearAlignCorners(value bool) ResizeBilinearAttr {
36911	return func(m optionalAttr) {
36912		m["align_corners"] = value
36913	}
36914}
36915
36916// ResizeBilinearHalfPixelCenters sets the optional half_pixel_centers attribute to value.
36917// If not specified, defaults to false
36918func ResizeBilinearHalfPixelCenters(value bool) ResizeBilinearAttr {
36919	return func(m optionalAttr) {
36920		m["half_pixel_centers"] = value
36921	}
36922}
36923
36924// Resize `images` to `size` using bilinear interpolation.
36925//
36926// Input images can be of different types but output images are always float.
36927//
36928// Arguments:
36929//
36930//	images: 4-D with shape `[batch, height, width, channels]`.
36931//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
36932//
36933// new size for the images.
36934//
36935// Returns 4-D with shape
36936// `[batch, new_height, new_width, channels]`.
36937func ResizeBilinear(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeBilinearAttr) (resized_images tf.Output) {
36938	if scope.Err() != nil {
36939		return
36940	}
36941	attrs := map[string]interface{}{}
36942	for _, a := range optional {
36943		a(attrs)
36944	}
36945	opspec := tf.OpSpec{
36946		Type: "ResizeBilinear",
36947		Input: []tf.Input{
36948			images, size,
36949		},
36950		Attrs: attrs,
36951	}
36952	op := scope.AddOperation(opspec)
36953	return op.Output(0)
36954}
36955
36956// ResizeBilinearGradAttr is an optional argument to ResizeBilinearGrad.
36957type ResizeBilinearGradAttr func(optionalAttr)
36958
36959// ResizeBilinearGradAlignCorners sets the optional align_corners attribute to value.
36960//
36961// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
36962// aligned. Defaults to false.
36963// If not specified, defaults to false
36964func ResizeBilinearGradAlignCorners(value bool) ResizeBilinearGradAttr {
36965	return func(m optionalAttr) {
36966		m["align_corners"] = value
36967	}
36968}
36969
36970// ResizeBilinearGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
36971// If not specified, defaults to false
36972func ResizeBilinearGradHalfPixelCenters(value bool) ResizeBilinearGradAttr {
36973	return func(m optionalAttr) {
36974		m["half_pixel_centers"] = value
36975	}
36976}
36977
36978// Computes the gradient of bilinear interpolation.
36979//
36980// Arguments:
36981//
36982//	grads: 4-D with shape `[batch, height, width, channels]`.
36983//	original_image: 4-D with shape `[batch, orig_height, orig_width, channels]`,
36984//
36985// The image tensor that was resized.
36986//
36987// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`.
36988// Gradients with respect to the input image. Input image must have been
36989// float or double.
36990func ResizeBilinearGrad(scope *Scope, grads tf.Output, original_image tf.Output, optional ...ResizeBilinearGradAttr) (output tf.Output) {
36991	if scope.Err() != nil {
36992		return
36993	}
36994	attrs := map[string]interface{}{}
36995	for _, a := range optional {
36996		a(attrs)
36997	}
36998	opspec := tf.OpSpec{
36999		Type: "ResizeBilinearGrad",
37000		Input: []tf.Input{
37001			grads, original_image,
37002		},
37003		Attrs: attrs,
37004	}
37005	op := scope.AddOperation(opspec)
37006	return op.Output(0)
37007}
37008
37009// ResizeNearestNeighborAttr is an optional argument to ResizeNearestNeighbor.
37010type ResizeNearestNeighborAttr func(optionalAttr)
37011
37012// ResizeNearestNeighborAlignCorners sets the optional align_corners attribute to value.
37013//
37014// value: If true, the centers of the 4 corner pixels of the input and output tensors are
37015// aligned, preserving the values at the corner pixels. Defaults to false.
37016// If not specified, defaults to false
37017func ResizeNearestNeighborAlignCorners(value bool) ResizeNearestNeighborAttr {
37018	return func(m optionalAttr) {
37019		m["align_corners"] = value
37020	}
37021}
37022
37023// ResizeNearestNeighborHalfPixelCenters sets the optional half_pixel_centers attribute to value.
37024// If not specified, defaults to false
37025func ResizeNearestNeighborHalfPixelCenters(value bool) ResizeNearestNeighborAttr {
37026	return func(m optionalAttr) {
37027		m["half_pixel_centers"] = value
37028	}
37029}
37030
37031// Resize `images` to `size` using nearest neighbor interpolation.
37032//
37033// Arguments:
37034//
37035//	images: 4-D with shape `[batch, height, width, channels]`.
37036//	size: = A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
37037//
37038// new size for the images.
37039//
37040// Returns 4-D with shape
37041// `[batch, new_height, new_width, channels]`.
37042func ResizeNearestNeighbor(scope *Scope, images tf.Output, size tf.Output, optional ...ResizeNearestNeighborAttr) (resized_images tf.Output) {
37043	if scope.Err() != nil {
37044		return
37045	}
37046	attrs := map[string]interface{}{}
37047	for _, a := range optional {
37048		a(attrs)
37049	}
37050	opspec := tf.OpSpec{
37051		Type: "ResizeNearestNeighbor",
37052		Input: []tf.Input{
37053			images, size,
37054		},
37055		Attrs: attrs,
37056	}
37057	op := scope.AddOperation(opspec)
37058	return op.Output(0)
37059}
37060
37061// ResizeNearestNeighborGradAttr is an optional argument to ResizeNearestNeighborGrad.
37062type ResizeNearestNeighborGradAttr func(optionalAttr)
37063
37064// ResizeNearestNeighborGradAlignCorners sets the optional align_corners attribute to value.
37065//
37066// value: If true, the centers of the 4 corner pixels of the input and grad tensors are
37067// aligned. Defaults to false.
37068// If not specified, defaults to false
37069func ResizeNearestNeighborGradAlignCorners(value bool) ResizeNearestNeighborGradAttr {
37070	return func(m optionalAttr) {
37071		m["align_corners"] = value
37072	}
37073}
37074
37075// ResizeNearestNeighborGradHalfPixelCenters sets the optional half_pixel_centers attribute to value.
37076// If not specified, defaults to false
37077func ResizeNearestNeighborGradHalfPixelCenters(value bool) ResizeNearestNeighborGradAttr {
37078	return func(m optionalAttr) {
37079		m["half_pixel_centers"] = value
37080	}
37081}
37082
37083// Computes the gradient of nearest neighbor interpolation.
37084//
37085// Arguments:
37086//
37087//	grads: 4-D with shape `[batch, height, width, channels]`.
37088//	size: = A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
37089//
37090// original input size.
37091//
37092// Returns 4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
37093// with respect to the input image.
37094func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, optional ...ResizeNearestNeighborGradAttr) (output tf.Output) {
37095	if scope.Err() != nil {
37096		return
37097	}
37098	attrs := map[string]interface{}{}
37099	for _, a := range optional {
37100		a(attrs)
37101	}
37102	opspec := tf.OpSpec{
37103		Type: "ResizeNearestNeighborGrad",
37104		Input: []tf.Input{
37105			grads, size,
37106		},
37107		Attrs: attrs,
37108	}
37109	op := scope.AddOperation(opspec)
37110	return op.Output(0)
37111}
37112
37113// Applies a gradient to a given accumulator.
37114//
37115// Does not add if local_step is lesser than the accumulator's global_step.
37116//
37117// Arguments:
37118//
37119//	handle: The handle to a accumulator.
37120//	local_step: The local_step value at which the gradient was computed.
37121//	gradient: A tensor of the gradient to be accumulated.
37122//
37123// Returns the created operation.
37124func ResourceAccumulatorApplyGradient(scope *Scope, handle tf.Output, local_step tf.Output, gradient tf.Output) (o *tf.Operation) {
37125	if scope.Err() != nil {
37126		return
37127	}
37128	opspec := tf.OpSpec{
37129		Type: "ResourceAccumulatorApplyGradient",
37130		Input: []tf.Input{
37131			handle, local_step, gradient,
37132		},
37133	}
37134	return scope.AddOperation(opspec)
37135}
37136
37137// Returns the number of gradients aggregated in the given accumulators.
37138//
37139// Arguments:
37140//
37141//	handle: The handle to an accumulator.
37142//
37143// Returns The number of gradients aggregated in the given accumulator.
37144func ResourceAccumulatorNumAccumulated(scope *Scope, handle tf.Output) (num_accumulated tf.Output) {
37145	if scope.Err() != nil {
37146		return
37147	}
37148	opspec := tf.OpSpec{
37149		Type: "ResourceAccumulatorNumAccumulated",
37150		Input: []tf.Input{
37151			handle,
37152		},
37153	}
37154	op := scope.AddOperation(opspec)
37155	return op.Output(0)
37156}
37157
37158// Updates the accumulator with a new value for global_step.
37159//
37160// Logs warning if the accumulator's value is already higher than
37161// new_global_step.
37162//
37163// Arguments:
37164//
37165//	handle: The handle to an accumulator.
37166//	new_global_step: The new global_step value to set.
37167//
37168// Returns the created operation.
37169func ResourceAccumulatorSetGlobalStep(scope *Scope, handle tf.Output, new_global_step tf.Output) (o *tf.Operation) {
37170	if scope.Err() != nil {
37171		return
37172	}
37173	opspec := tf.OpSpec{
37174		Type: "ResourceAccumulatorSetGlobalStep",
37175		Input: []tf.Input{
37176			handle, new_global_step,
37177		},
37178	}
37179	return scope.AddOperation(opspec)
37180}
37181
37182// Extracts the average gradient in the given ConditionalAccumulator.
37183//
37184// The op blocks until sufficient (i.e., more than num_required)
37185// gradients have been accumulated.  If the accumulator has already
37186// aggregated more than num_required gradients, it returns the average of
37187// the accumulated gradients.  Also automatically increments the recorded
37188// global_step in the accumulator by 1, and resets the aggregate to 0.
37189//
37190// Arguments:
37191//
37192//	handle: The handle to an accumulator.
37193//	num_required: Number of gradients required before we return an aggregate.
37194//	dtype: The data type of accumulated gradients. Needs to correspond to the type
37195//
37196// of the accumulator.
37197//
37198// Returns The average of the accumulated gradients.
37199func ResourceAccumulatorTakeGradient(scope *Scope, handle tf.Output, num_required tf.Output, dtype tf.DataType) (average tf.Output) {
37200	if scope.Err() != nil {
37201		return
37202	}
37203	attrs := map[string]interface{}{"dtype": dtype}
37204	opspec := tf.OpSpec{
37205		Type: "ResourceAccumulatorTakeGradient",
37206		Input: []tf.Input{
37207			handle, num_required,
37208		},
37209		Attrs: attrs,
37210	}
37211	op := scope.AddOperation(opspec)
37212	return op.Output(0)
37213}
37214
37215// ResourceApplyAdaMaxAttr is an optional argument to ResourceApplyAdaMax.
37216type ResourceApplyAdaMaxAttr func(optionalAttr)
37217
37218// ResourceApplyAdaMaxUseLocking sets the optional use_locking attribute to value.
37219//
37220// value: If `True`, updating of the var, m, and v tensors will be protected
37221// by a lock; otherwise the behavior is undefined, but may exhibit less
37222// contention.
37223// If not specified, defaults to false
37224func ResourceApplyAdaMaxUseLocking(value bool) ResourceApplyAdaMaxAttr {
37225	return func(m optionalAttr) {
37226		m["use_locking"] = value
37227	}
37228}
37229
37230// Update '*var' according to the AdaMax algorithm.
37231//
37232// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
37233// v_t <- max(beta2 * v_{t-1}, abs(g))
37234// variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
37235//
37236// Arguments:
37237//
37238//	var_: Should be from a Variable().
37239//	m: Should be from a Variable().
37240//	v: Should be from a Variable().
37241//	beta1_power: Must be a scalar.
37242//	lr: Scaling factor. Must be a scalar.
37243//	beta1: Momentum factor. Must be a scalar.
37244//	beta2: Momentum factor. Must be a scalar.
37245//	epsilon: Ridge term. Must be a scalar.
37246//	grad: The gradient.
37247//
37248// Returns the created operation.
37249func ResourceApplyAdaMax(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdaMaxAttr) (o *tf.Operation) {
37250	if scope.Err() != nil {
37251		return
37252	}
37253	attrs := map[string]interface{}{}
37254	for _, a := range optional {
37255		a(attrs)
37256	}
37257	opspec := tf.OpSpec{
37258		Type: "ResourceApplyAdaMax",
37259		Input: []tf.Input{
37260			var_, m, v, beta1_power, lr, beta1, beta2, epsilon, grad,
37261		},
37262		Attrs: attrs,
37263	}
37264	return scope.AddOperation(opspec)
37265}
37266
37267// ResourceApplyAdadeltaAttr is an optional argument to ResourceApplyAdadelta.
37268type ResourceApplyAdadeltaAttr func(optionalAttr)
37269
37270// ResourceApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
37271//
37272// value: If True, updating of the var, accum and update_accum tensors will be protected by
37273// a lock; otherwise the behavior is undefined, but may exhibit less contention.
37274// If not specified, defaults to false
37275func ResourceApplyAdadeltaUseLocking(value bool) ResourceApplyAdadeltaAttr {
37276	return func(m optionalAttr) {
37277		m["use_locking"] = value
37278	}
37279}
37280
37281// Update '*var' according to the adadelta scheme.
37282//
37283// accum = rho() * accum + (1 - rho()) * grad.square();
37284// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
37285// update_accum = rho() * update_accum + (1 - rho()) * update.square();
37286// var -= update;
37287//
37288// Arguments:
37289//
37290//	var_: Should be from a Variable().
37291//	accum: Should be from a Variable().
37292//	accum_update: Should be from a Variable().
37293//	lr: Scaling factor. Must be a scalar.
37294//	rho: Decay factor. Must be a scalar.
37295//	epsilon: Constant factor. Must be a scalar.
37296//	grad: The gradient.
37297//
37298// Returns the created operation.
37299func ResourceApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdadeltaAttr) (o *tf.Operation) {
37300	if scope.Err() != nil {
37301		return
37302	}
37303	attrs := map[string]interface{}{}
37304	for _, a := range optional {
37305		a(attrs)
37306	}
37307	opspec := tf.OpSpec{
37308		Type: "ResourceApplyAdadelta",
37309		Input: []tf.Input{
37310			var_, accum, accum_update, lr, rho, epsilon, grad,
37311		},
37312		Attrs: attrs,
37313	}
37314	return scope.AddOperation(opspec)
37315}
37316
37317// ResourceApplyAdagradAttr is an optional argument to ResourceApplyAdagrad.
37318type ResourceApplyAdagradAttr func(optionalAttr)
37319
37320// ResourceApplyAdagradUseLocking sets the optional use_locking attribute to value.
37321//
37322// value: If `True`, updating of the var and accum tensors will be protected
37323// by a lock; otherwise the behavior is undefined, but may exhibit less
37324// contention.
37325// If not specified, defaults to false
37326func ResourceApplyAdagradUseLocking(value bool) ResourceApplyAdagradAttr {
37327	return func(m optionalAttr) {
37328		m["use_locking"] = value
37329	}
37330}
37331
37332// ResourceApplyAdagradUpdateSlots sets the optional update_slots attribute to value.
37333// If not specified, defaults to true
37334func ResourceApplyAdagradUpdateSlots(value bool) ResourceApplyAdagradAttr {
37335	return func(m optionalAttr) {
37336		m["update_slots"] = value
37337	}
37338}
37339
37340// Update '*var' according to the adagrad scheme.
37341//
37342// accum += grad * grad
37343// var -= lr * grad * (1 / sqrt(accum))
37344//
37345// Arguments:
37346//
37347//	var_: Should be from a Variable().
37348//	accum: Should be from a Variable().
37349//	lr: Scaling factor. Must be a scalar.
37350//	grad: The gradient.
37351//
37352// Returns the created operation.
37353func ResourceApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, optional ...ResourceApplyAdagradAttr) (o *tf.Operation) {
37354	if scope.Err() != nil {
37355		return
37356	}
37357	attrs := map[string]interface{}{}
37358	for _, a := range optional {
37359		a(attrs)
37360	}
37361	opspec := tf.OpSpec{
37362		Type: "ResourceApplyAdagrad",
37363		Input: []tf.Input{
37364			var_, accum, lr, grad,
37365		},
37366		Attrs: attrs,
37367	}
37368	return scope.AddOperation(opspec)
37369}
37370
37371// ResourceApplyAdagradDAAttr is an optional argument to ResourceApplyAdagradDA.
37372type ResourceApplyAdagradDAAttr func(optionalAttr)
37373
37374// ResourceApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
37375//
37376// value: If True, updating of the var and accum tensors will be protected by
37377// a lock; otherwise the behavior is undefined, but may exhibit less contention.
37378// If not specified, defaults to false
37379func ResourceApplyAdagradDAUseLocking(value bool) ResourceApplyAdagradDAAttr {
37380	return func(m optionalAttr) {
37381		m["use_locking"] = value
37382	}
37383}
37384
37385// Update '*var' according to the proximal adagrad scheme.
37386//
37387// Arguments:
37388//
37389//	var_: Should be from a Variable().
37390//	gradient_accumulator: Should be from a Variable().
37391//	gradient_squared_accumulator: Should be from a Variable().
37392//	grad: The gradient.
37393//	lr: Scaling factor. Must be a scalar.
37394//	l1: L1 regularization. Must be a scalar.
37395//	l2: L2 regularization. Must be a scalar.
37396//	global_step: Training step number. Must be a scalar.
37397//
37398// Returns the created operation.
37399func ResourceApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceApplyAdagradDAAttr) (o *tf.Operation) {
37400	if scope.Err() != nil {
37401		return
37402	}
37403	attrs := map[string]interface{}{}
37404	for _, a := range optional {
37405		a(attrs)
37406	}
37407	opspec := tf.OpSpec{
37408		Type: "ResourceApplyAdagradDA",
37409		Input: []tf.Input{
37410			var_, gradient_accumulator, gradient_squared_accumulator, grad, lr, l1, l2, global_step,
37411		},
37412		Attrs: attrs,
37413	}
37414	return scope.AddOperation(opspec)
37415}
37416
37417// ResourceApplyAdagradV2Attr is an optional argument to ResourceApplyAdagradV2.
37418type ResourceApplyAdagradV2Attr func(optionalAttr)
37419
37420// ResourceApplyAdagradV2UseLocking sets the optional use_locking attribute to value.
37421//
37422// value: If `True`, updating of the var and accum tensors will be protected
37423// by a lock; otherwise the behavior is undefined, but may exhibit less
37424// contention.
37425// If not specified, defaults to false
37426func ResourceApplyAdagradV2UseLocking(value bool) ResourceApplyAdagradV2Attr {
37427	return func(m optionalAttr) {
37428		m["use_locking"] = value
37429	}
37430}
37431
37432// ResourceApplyAdagradV2UpdateSlots sets the optional update_slots attribute to value.
37433// If not specified, defaults to true
37434func ResourceApplyAdagradV2UpdateSlots(value bool) ResourceApplyAdagradV2Attr {
37435	return func(m optionalAttr) {
37436		m["update_slots"] = value
37437	}
37438}
37439
37440// Update '*var' according to the adagrad scheme.
37441//
37442// accum += grad * grad
37443// var -= lr * grad * (1 / (sqrt(accum) + epsilon))
37444//
37445// Arguments:
37446//
37447//	var_: Should be from a Variable().
37448//	accum: Should be from a Variable().
37449//	lr: Scaling factor. Must be a scalar.
37450//	epsilon: Constant factor. Must be a scalar.
37451//	grad: The gradient.
37452//
37453// Returns the created operation.
37454func ResourceApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdagradV2Attr) (o *tf.Operation) {
37455	if scope.Err() != nil {
37456		return
37457	}
37458	attrs := map[string]interface{}{}
37459	for _, a := range optional {
37460		a(attrs)
37461	}
37462	opspec := tf.OpSpec{
37463		Type: "ResourceApplyAdagradV2",
37464		Input: []tf.Input{
37465			var_, accum, lr, epsilon, grad,
37466		},
37467		Attrs: attrs,
37468	}
37469	return scope.AddOperation(opspec)
37470}
37471
37472// ResourceApplyAdamAttr is an optional argument to ResourceApplyAdam.
37473type ResourceApplyAdamAttr func(optionalAttr)
37474
37475// ResourceApplyAdamUseLocking sets the optional use_locking attribute to value.
37476//
37477// value: If `True`, updating of the var, m, and v tensors will be protected
37478// by a lock; otherwise the behavior is undefined, but may exhibit less
37479// contention.
37480// If not specified, defaults to false
37481func ResourceApplyAdamUseLocking(value bool) ResourceApplyAdamAttr {
37482	return func(m optionalAttr) {
37483		m["use_locking"] = value
37484	}
37485}
37486
37487// ResourceApplyAdamUseNesterov sets the optional use_nesterov attribute to value.
37488//
37489// value: If `True`, uses the nesterov update.
37490// If not specified, defaults to false
37491func ResourceApplyAdamUseNesterov(value bool) ResourceApplyAdamAttr {
37492	return func(m optionalAttr) {
37493		m["use_nesterov"] = value
37494	}
37495}
37496
37497// Update '*var' according to the Adam algorithm.
37498//
37499// $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$
37500// $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$
37501// $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$
37502// $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\  \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$
37503//
37504// Arguments:
37505//
37506//	var_: Should be from a Variable().
37507//	m: Should be from a Variable().
37508//	v: Should be from a Variable().
37509//	beta1_power: Must be a scalar.
37510//	beta2_power: Must be a scalar.
37511//	lr: Scaling factor. Must be a scalar.
37512//	beta1: Momentum factor. Must be a scalar.
37513//	beta2: Momentum factor. Must be a scalar.
37514//	epsilon: Ridge term. Must be a scalar.
37515//	grad: The gradient.
37516//
37517// Returns the created operation.
37518func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamAttr) (o *tf.Operation) {
37519	if scope.Err() != nil {
37520		return
37521	}
37522	attrs := map[string]interface{}{}
37523	for _, a := range optional {
37524		a(attrs)
37525	}
37526	opspec := tf.OpSpec{
37527		Type: "ResourceApplyAdam",
37528		Input: []tf.Input{
37529			var_, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
37530		},
37531		Attrs: attrs,
37532	}
37533	return scope.AddOperation(opspec)
37534}
37535
37536// ResourceApplyAdamWithAmsgradAttr is an optional argument to ResourceApplyAdamWithAmsgrad.
37537type ResourceApplyAdamWithAmsgradAttr func(optionalAttr)
37538
37539// ResourceApplyAdamWithAmsgradUseLocking sets the optional use_locking attribute to value.
37540//
37541// value: If `True`, updating of the var, m, and v tensors will be protected
37542// by a lock; otherwise the behavior is undefined, but may exhibit less
37543// contention.
37544// If not specified, defaults to false
37545func ResourceApplyAdamWithAmsgradUseLocking(value bool) ResourceApplyAdamWithAmsgradAttr {
37546	return func(m optionalAttr) {
37547		m["use_locking"] = value
37548	}
37549}
37550
37551// Update '*var' according to the Adam algorithm.
37552//
37553// $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
37554// $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
37555// $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$
37556// $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$
37557// $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$
37558//
37559// Arguments:
37560//
37561//	var_: Should be from a Variable().
37562//	m: Should be from a Variable().
37563//	v: Should be from a Variable().
37564//	vhat: Should be from a Variable().
37565//	beta1_power: Must be a scalar.
37566//	beta2_power: Must be a scalar.
37567//	lr: Scaling factor. Must be a scalar.
37568//	beta1: Momentum factor. Must be a scalar.
37569//	beta2: Momentum factor. Must be a scalar.
37570//	epsilon: Ridge term. Must be a scalar.
37571//	grad: The gradient.
37572//
37573// Returns the created operation.
37574func ResourceApplyAdamWithAmsgrad(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, vhat tf.Output, beta1_power tf.Output, beta2_power tf.Output, lr tf.Output, beta1 tf.Output, beta2 tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyAdamWithAmsgradAttr) (o *tf.Operation) {
37575	if scope.Err() != nil {
37576		return
37577	}
37578	attrs := map[string]interface{}{}
37579	for _, a := range optional {
37580		a(attrs)
37581	}
37582	opspec := tf.OpSpec{
37583		Type: "ResourceApplyAdamWithAmsgrad",
37584		Input: []tf.Input{
37585			var_, m, v, vhat, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad,
37586		},
37587		Attrs: attrs,
37588	}
37589	return scope.AddOperation(opspec)
37590}
37591
37592// ResourceApplyAddSignAttr is an optional argument to ResourceApplyAddSign.
37593type ResourceApplyAddSignAttr func(optionalAttr)
37594
37595// ResourceApplyAddSignUseLocking sets the optional use_locking attribute to value.
37596//
37597// value: If `True`, updating of the var and m tensors is
37598// protected by a lock; otherwise the behavior is undefined, but may exhibit less
37599// contention.
37600// If not specified, defaults to false
37601func ResourceApplyAddSignUseLocking(value bool) ResourceApplyAddSignAttr {
37602	return func(m optionalAttr) {
37603		m["use_locking"] = value
37604	}
37605}
37606
37607// Update '*var' according to the AddSign update.
37608//
37609// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
37610// update <- (alpha + sign_decay * sign(g) *sign(m)) * g
37611// variable <- variable - lr_t * update
37612//
37613// Arguments:
37614//
37615//	var_: Should be from a Variable().
37616//	m: Should be from a Variable().
37617//	lr: Scaling factor. Must be a scalar.
37618//	alpha: Must be a scalar.
37619//	sign_decay: Must be a scalar.
37620//	beta: Must be a scalar.
37621//	grad: The gradient.
37622//
37623// Returns the created operation.
37624func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, alpha tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyAddSignAttr) (o *tf.Operation) {
37625	if scope.Err() != nil {
37626		return
37627	}
37628	attrs := map[string]interface{}{}
37629	for _, a := range optional {
37630		a(attrs)
37631	}
37632	opspec := tf.OpSpec{
37633		Type: "ResourceApplyAddSign",
37634		Input: []tf.Input{
37635			var_, m, lr, alpha, sign_decay, beta, grad,
37636		},
37637		Attrs: attrs,
37638	}
37639	return scope.AddOperation(opspec)
37640}
37641
37642// ResourceApplyCenteredRMSPropAttr is an optional argument to ResourceApplyCenteredRMSProp.
37643type ResourceApplyCenteredRMSPropAttr func(optionalAttr)
37644
37645// ResourceApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
37646//
37647// value: If `True`, updating of the var, mg, ms, and mom tensors is
37648// protected by a lock; otherwise the behavior is undefined, but may exhibit less
37649// contention.
37650// If not specified, defaults to false
37651func ResourceApplyCenteredRMSPropUseLocking(value bool) ResourceApplyCenteredRMSPropAttr {
37652	return func(m optionalAttr) {
37653		m["use_locking"] = value
37654	}
37655}
37656
37657// Update '*var' according to the centered RMSProp algorithm.
37658//
37659// The centered RMSProp algorithm uses an estimate of the centered second moment
37660// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
37661// uses the (uncentered) second moment. This often helps with training, but is
37662// slightly more expensive in terms of computation and memory.
37663//
37664// Note that in dense implementation of this algorithm, mg, ms, and mom will
37665// update even if the grad is zero, but in this sparse implementation, mg, ms,
37666// and mom will not update in iterations during which the grad is zero.
37667//
37668// mean_square = decay * mean_square + (1-decay) * gradient ** 2
37669// mean_grad = decay * mean_grad + (1-decay) * gradient
37670//
37671// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
37672//
37673// mg <- rho * mg_{t-1} + (1-rho) * grad
37674// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
37675// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
37676// var <- var - mom
37677//
37678// Arguments:
37679//
37680//	var_: Should be from a Variable().
37681//	mg: Should be from a Variable().
37682//	ms: Should be from a Variable().
37683//	mom: Should be from a Variable().
37684//	lr: Scaling factor. Must be a scalar.
37685//	rho: Decay rate. Must be a scalar.
37686//	momentum: Momentum Scale. Must be a scalar.
37687//	epsilon: Ridge term. Must be a scalar.
37688//	grad: The gradient.
37689//
37690// Returns the created operation.
37691func ResourceApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyCenteredRMSPropAttr) (o *tf.Operation) {
37692	if scope.Err() != nil {
37693		return
37694	}
37695	attrs := map[string]interface{}{}
37696	for _, a := range optional {
37697		a(attrs)
37698	}
37699	opspec := tf.OpSpec{
37700		Type: "ResourceApplyCenteredRMSProp",
37701		Input: []tf.Input{
37702			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad,
37703		},
37704		Attrs: attrs,
37705	}
37706	return scope.AddOperation(opspec)
37707}
37708
37709// ResourceApplyFtrlAttr is an optional argument to ResourceApplyFtrl.
37710type ResourceApplyFtrlAttr func(optionalAttr)
37711
37712// ResourceApplyFtrlUseLocking sets the optional use_locking attribute to value.
37713//
37714// value: If `True`, updating of the var and accum tensors will be protected
37715// by a lock; otherwise the behavior is undefined, but may exhibit less
37716// contention.
37717// If not specified, defaults to false
37718func ResourceApplyFtrlUseLocking(value bool) ResourceApplyFtrlAttr {
37719	return func(m optionalAttr) {
37720		m["use_locking"] = value
37721	}
37722}
37723
37724// ResourceApplyFtrlMultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
37725// If not specified, defaults to false
37726func ResourceApplyFtrlMultiplyLinearByLr(value bool) ResourceApplyFtrlAttr {
37727	return func(m optionalAttr) {
37728		m["multiply_linear_by_lr"] = value
37729	}
37730}
37731
37732// Update '*var' according to the Ftrl-proximal scheme.
37733//
37734// accum_new = accum + grad * grad
37735// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
37736// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
37737// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
37738// accum = accum_new
37739//
37740// Arguments:
37741//
37742//	var_: Should be from a Variable().
37743//	accum: Should be from a Variable().
37744//	linear: Should be from a Variable().
37745//	grad: The gradient.
37746//	lr: Scaling factor. Must be a scalar.
37747//	l1: L1 regularization. Must be a scalar.
37748//	l2: L2 regularization. Must be a scalar.
37749//	lr_power: Scaling factor. Must be a scalar.
37750//
37751// Returns the created operation.
37752func ResourceApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlAttr) (o *tf.Operation) {
37753	if scope.Err() != nil {
37754		return
37755	}
37756	attrs := map[string]interface{}{}
37757	for _, a := range optional {
37758		a(attrs)
37759	}
37760	opspec := tf.OpSpec{
37761		Type: "ResourceApplyFtrl",
37762		Input: []tf.Input{
37763			var_, accum, linear, grad, lr, l1, l2, lr_power,
37764		},
37765		Attrs: attrs,
37766	}
37767	return scope.AddOperation(opspec)
37768}
37769
37770// ResourceApplyFtrlV2Attr is an optional argument to ResourceApplyFtrlV2.
37771type ResourceApplyFtrlV2Attr func(optionalAttr)
37772
37773// ResourceApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
37774//
37775// value: If `True`, updating of the var and accum tensors will be protected
37776// by a lock; otherwise the behavior is undefined, but may exhibit less
37777// contention.
37778// If not specified, defaults to false
37779func ResourceApplyFtrlV2UseLocking(value bool) ResourceApplyFtrlV2Attr {
37780	return func(m optionalAttr) {
37781		m["use_locking"] = value
37782	}
37783}
37784
37785// ResourceApplyFtrlV2MultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
37786// If not specified, defaults to false
37787func ResourceApplyFtrlV2MultiplyLinearByLr(value bool) ResourceApplyFtrlV2Attr {
37788	return func(m optionalAttr) {
37789		m["multiply_linear_by_lr"] = value
37790	}
37791}
37792
37793// Update '*var' according to the Ftrl-proximal scheme.
37794//
37795// accum_new = accum + grad * grad
37796// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
37797// linear += grad_with_shrinkage +
37798//
37799//	(accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
37800//
37801// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
37802// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
37803// accum = accum_new
37804//
37805// Arguments:
37806//
37807//	var_: Should be from a Variable().
37808//	accum: Should be from a Variable().
37809//	linear: Should be from a Variable().
37810//	grad: The gradient.
37811//	lr: Scaling factor. Must be a scalar.
37812//	l1: L1 regularization. Must be a scalar.
37813//	l2: L2 shrinkage regularization. Must be a scalar.
37814//
37815//	lr_power: Scaling factor. Must be a scalar.
37816//
37817// Returns the created operation.
37818func ResourceApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceApplyFtrlV2Attr) (o *tf.Operation) {
37819	if scope.Err() != nil {
37820		return
37821	}
37822	attrs := map[string]interface{}{}
37823	for _, a := range optional {
37824		a(attrs)
37825	}
37826	opspec := tf.OpSpec{
37827		Type: "ResourceApplyFtrlV2",
37828		Input: []tf.Input{
37829			var_, accum, linear, grad, lr, l1, l2, l2_shrinkage, lr_power,
37830		},
37831		Attrs: attrs,
37832	}
37833	return scope.AddOperation(opspec)
37834}
37835
37836// ResourceApplyGradientDescentAttr is an optional argument to ResourceApplyGradientDescent.
37837type ResourceApplyGradientDescentAttr func(optionalAttr)
37838
37839// ResourceApplyGradientDescentUseLocking sets the optional use_locking attribute to value.
37840//
37841// value: If `True`, the subtraction will be protected by a lock;
37842// otherwise the behavior is undefined, but may exhibit less contention.
37843// If not specified, defaults to false
37844func ResourceApplyGradientDescentUseLocking(value bool) ResourceApplyGradientDescentAttr {
37845	return func(m optionalAttr) {
37846		m["use_locking"] = value
37847	}
37848}
37849
37850// Update '*var' by subtracting 'alpha' * 'delta' from it.
37851//
37852// Arguments:
37853//
37854//	var_: Should be from a Variable().
37855//	alpha: Scaling factor. Must be a scalar.
37856//	delta: The change.
37857//
37858// Returns the created operation.
37859func ResourceApplyGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, delta tf.Output, optional ...ResourceApplyGradientDescentAttr) (o *tf.Operation) {
37860	if scope.Err() != nil {
37861		return
37862	}
37863	attrs := map[string]interface{}{}
37864	for _, a := range optional {
37865		a(attrs)
37866	}
37867	opspec := tf.OpSpec{
37868		Type: "ResourceApplyGradientDescent",
37869		Input: []tf.Input{
37870			var_, alpha, delta,
37871		},
37872		Attrs: attrs,
37873	}
37874	return scope.AddOperation(opspec)
37875}
37876
37877// ResourceApplyKerasMomentumAttr is an optional argument to ResourceApplyKerasMomentum.
37878type ResourceApplyKerasMomentumAttr func(optionalAttr)
37879
37880// ResourceApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
37881//
37882// value: If `True`, updating of the var and accum tensors will be protected
37883// by a lock; otherwise the behavior is undefined, but may exhibit less
37884// contention.
37885// If not specified, defaults to false
37886func ResourceApplyKerasMomentumUseLocking(value bool) ResourceApplyKerasMomentumAttr {
37887	return func(m optionalAttr) {
37888		m["use_locking"] = value
37889	}
37890}
37891
37892// ResourceApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
37893//
37894// value: If `True`, the tensor passed to compute grad will be
37895// var + momentum * accum, so in the end, the var you get is actually
37896// var + momentum * accum.
37897// If not specified, defaults to false
37898func ResourceApplyKerasMomentumUseNesterov(value bool) ResourceApplyKerasMomentumAttr {
37899	return func(m optionalAttr) {
37900		m["use_nesterov"] = value
37901	}
37902}
37903
37904// Update '*var' according to the momentum scheme.
37905//
37906// Set use_nesterov = True if you want to use Nesterov momentum.
37907//
37908// accum = accum * momentum - lr * grad
37909// var += accum
37910//
37911// Arguments:
37912//
37913//	var_: Should be from a Variable().
37914//	accum: Should be from a Variable().
37915//	lr: Scaling factor. Must be a scalar.
37916//	grad: The gradient.
37917//	momentum: Momentum. Must be a scalar.
37918//
37919// Returns the created operation.
37920func ResourceApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyKerasMomentumAttr) (o *tf.Operation) {
37921	if scope.Err() != nil {
37922		return
37923	}
37924	attrs := map[string]interface{}{}
37925	for _, a := range optional {
37926		a(attrs)
37927	}
37928	opspec := tf.OpSpec{
37929		Type: "ResourceApplyKerasMomentum",
37930		Input: []tf.Input{
37931			var_, accum, lr, grad, momentum,
37932		},
37933		Attrs: attrs,
37934	}
37935	return scope.AddOperation(opspec)
37936}
37937
37938// ResourceApplyMomentumAttr is an optional argument to ResourceApplyMomentum.
37939type ResourceApplyMomentumAttr func(optionalAttr)
37940
37941// ResourceApplyMomentumUseLocking sets the optional use_locking attribute to value.
37942//
37943// value: If `True`, updating of the var and accum tensors will be protected
37944// by a lock; otherwise the behavior is undefined, but may exhibit less
37945// contention.
37946// If not specified, defaults to false
37947func ResourceApplyMomentumUseLocking(value bool) ResourceApplyMomentumAttr {
37948	return func(m optionalAttr) {
37949		m["use_locking"] = value
37950	}
37951}
37952
37953// ResourceApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
37954//
37955// value: If `True`, the tensor passed to compute grad will be
37956// var - lr * momentum * accum, so in the end, the var you get is actually
37957// var - lr * momentum * accum.
37958// If not specified, defaults to false
37959func ResourceApplyMomentumUseNesterov(value bool) ResourceApplyMomentumAttr {
37960	return func(m optionalAttr) {
37961		m["use_nesterov"] = value
37962	}
37963}
37964
37965// Update '*var' according to the momentum scheme.
37966//
37967// Set use_nesterov = True if you want to use Nesterov momentum.
37968//
37969// accum = accum * momentum + grad
37970// var -= lr * accum
37971//
37972// Arguments:
37973//
37974//	var_: Should be from a Variable().
37975//	accum: Should be from a Variable().
37976//	lr: Scaling factor. Must be a scalar.
37977//	grad: The gradient.
37978//	momentum: Momentum. Must be a scalar.
37979//
37980// Returns the created operation.
37981func ResourceApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, momentum tf.Output, optional ...ResourceApplyMomentumAttr) (o *tf.Operation) {
37982	if scope.Err() != nil {
37983		return
37984	}
37985	attrs := map[string]interface{}{}
37986	for _, a := range optional {
37987		a(attrs)
37988	}
37989	opspec := tf.OpSpec{
37990		Type: "ResourceApplyMomentum",
37991		Input: []tf.Input{
37992			var_, accum, lr, grad, momentum,
37993		},
37994		Attrs: attrs,
37995	}
37996	return scope.AddOperation(opspec)
37997}
37998
37999// ResourceApplyPowerSignAttr is an optional argument to ResourceApplyPowerSign.
38000type ResourceApplyPowerSignAttr func(optionalAttr)
38001
38002// ResourceApplyPowerSignUseLocking sets the optional use_locking attribute to value.
38003//
38004// value: If `True`, updating of the var and m tensors is
38005// protected by a lock; otherwise the behavior is undefined, but may exhibit less
38006// contention.
38007// If not specified, defaults to false
38008func ResourceApplyPowerSignUseLocking(value bool) ResourceApplyPowerSignAttr {
38009	return func(m optionalAttr) {
38010		m["use_locking"] = value
38011	}
38012}
38013
38014// Update '*var' according to the AddSign update.
38015//
38016// m_t <- beta1 * m_{t-1} + (1 - beta1) * g
38017// update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
38018// variable <- variable - lr_t * update
38019//
38020// Arguments:
38021//
38022//	var_: Should be from a Variable().
38023//	m: Should be from a Variable().
38024//	lr: Scaling factor. Must be a scalar.
38025//	logbase: Must be a scalar.
38026//	sign_decay: Must be a scalar.
38027//	beta: Must be a scalar.
38028//	grad: The gradient.
38029//
38030// Returns the created operation.
38031func ResourceApplyPowerSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Output, logbase tf.Output, sign_decay tf.Output, beta tf.Output, grad tf.Output, optional ...ResourceApplyPowerSignAttr) (o *tf.Operation) {
38032	if scope.Err() != nil {
38033		return
38034	}
38035	attrs := map[string]interface{}{}
38036	for _, a := range optional {
38037		a(attrs)
38038	}
38039	opspec := tf.OpSpec{
38040		Type: "ResourceApplyPowerSign",
38041		Input: []tf.Input{
38042			var_, m, lr, logbase, sign_decay, beta, grad,
38043		},
38044		Attrs: attrs,
38045	}
38046	return scope.AddOperation(opspec)
38047}
38048
38049// ResourceApplyProximalAdagradAttr is an optional argument to ResourceApplyProximalAdagrad.
38050type ResourceApplyProximalAdagradAttr func(optionalAttr)
38051
38052// ResourceApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
38053//
38054// value: If True, updating of the var and accum tensors will be protected by
38055// a lock; otherwise the behavior is undefined, but may exhibit less contention.
38056// If not specified, defaults to false
38057func ResourceApplyProximalAdagradUseLocking(value bool) ResourceApplyProximalAdagradAttr {
38058	return func(m optionalAttr) {
38059		m["use_locking"] = value
38060	}
38061}
38062
38063// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
38064//
38065// accum += grad * grad
38066// prox_v = var - lr * grad * (1 / sqrt(accum))
38067// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
38068//
38069// Arguments:
38070//
38071//	var_: Should be from a Variable().
38072//	accum: Should be from a Variable().
38073//	lr: Scaling factor. Must be a scalar.
38074//	l1: L1 regularization. Must be a scalar.
38075//	l2: L2 regularization. Must be a scalar.
38076//	grad: The gradient.
38077//
38078// Returns the created operation.
38079func ResourceApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, optional ...ResourceApplyProximalAdagradAttr) (o *tf.Operation) {
38080	if scope.Err() != nil {
38081		return
38082	}
38083	attrs := map[string]interface{}{}
38084	for _, a := range optional {
38085		a(attrs)
38086	}
38087	opspec := tf.OpSpec{
38088		Type: "ResourceApplyProximalAdagrad",
38089		Input: []tf.Input{
38090			var_, accum, lr, l1, l2, grad,
38091		},
38092		Attrs: attrs,
38093	}
38094	return scope.AddOperation(opspec)
38095}
38096
38097// ResourceApplyProximalGradientDescentAttr is an optional argument to ResourceApplyProximalGradientDescent.
38098type ResourceApplyProximalGradientDescentAttr func(optionalAttr)
38099
38100// ResourceApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
38101//
38102// value: If True, the subtraction will be protected by a lock;
38103// otherwise the behavior is undefined, but may exhibit less contention.
38104// If not specified, defaults to false
38105func ResourceApplyProximalGradientDescentUseLocking(value bool) ResourceApplyProximalGradientDescentAttr {
38106	return func(m optionalAttr) {
38107		m["use_locking"] = value
38108	}
38109}
38110
38111// Update '*var' as FOBOS algorithm with fixed learning rate.
38112//
38113// prox_v = var - alpha * delta
38114// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
38115//
38116// Arguments:
38117//
38118//	var_: Should be from a Variable().
38119//	alpha: Scaling factor. Must be a scalar.
38120//	l1: L1 regularization. Must be a scalar.
38121//	l2: L2 regularization. Must be a scalar.
38122//	delta: The change.
38123//
38124// Returns the created operation.
38125func ResourceApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, delta tf.Output, optional ...ResourceApplyProximalGradientDescentAttr) (o *tf.Operation) {
38126	if scope.Err() != nil {
38127		return
38128	}
38129	attrs := map[string]interface{}{}
38130	for _, a := range optional {
38131		a(attrs)
38132	}
38133	opspec := tf.OpSpec{
38134		Type: "ResourceApplyProximalGradientDescent",
38135		Input: []tf.Input{
38136			var_, alpha, l1, l2, delta,
38137		},
38138		Attrs: attrs,
38139	}
38140	return scope.AddOperation(opspec)
38141}
38142
38143// ResourceApplyRMSPropAttr is an optional argument to ResourceApplyRMSProp.
38144type ResourceApplyRMSPropAttr func(optionalAttr)
38145
38146// ResourceApplyRMSPropUseLocking sets the optional use_locking attribute to value.
38147//
38148// value: If `True`, updating of the var, ms, and mom tensors is protected
38149// by a lock; otherwise the behavior is undefined, but may exhibit less
38150// contention.
38151// If not specified, defaults to false
38152func ResourceApplyRMSPropUseLocking(value bool) ResourceApplyRMSPropAttr {
38153	return func(m optionalAttr) {
38154		m["use_locking"] = value
38155	}
38156}
38157
38158// Update '*var' according to the RMSProp algorithm.
38159//
38160// Note that in dense implementation of this algorithm, ms and mom will
38161// update even if the grad is zero, but in this sparse implementation, ms
38162// and mom will not update in iterations during which the grad is zero.
38163//
38164// mean_square = decay * mean_square + (1-decay) * gradient ** 2
38165// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
38166//
38167// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
38168// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
38169// var <- var - mom
38170//
38171// Arguments:
38172//
38173//	var_: Should be from a Variable().
38174//	ms: Should be from a Variable().
38175//	mom: Should be from a Variable().
38176//	lr: Scaling factor. Must be a scalar.
38177//	rho: Decay rate. Must be a scalar.
38178//
38179//	epsilon: Ridge term. Must be a scalar.
38180//	grad: The gradient.
38181//
38182// Returns the created operation.
38183func ResourceApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, optional ...ResourceApplyRMSPropAttr) (o *tf.Operation) {
38184	if scope.Err() != nil {
38185		return
38186	}
38187	attrs := map[string]interface{}{}
38188	for _, a := range optional {
38189		a(attrs)
38190	}
38191	opspec := tf.OpSpec{
38192		Type: "ResourceApplyRMSProp",
38193		Input: []tf.Input{
38194			var_, ms, mom, lr, rho, momentum, epsilon, grad,
38195		},
38196		Attrs: attrs,
38197	}
38198	return scope.AddOperation(opspec)
38199}
38200
38201// ResourceConditionalAccumulatorAttr is an optional argument to ResourceConditionalAccumulator.
38202type ResourceConditionalAccumulatorAttr func(optionalAttr)
38203
38204// ResourceConditionalAccumulatorContainer sets the optional container attribute to value.
38205//
38206// value: If non-empty, this accumulator is placed in the given container.
38207// Otherwise, a default container is used.
38208// If not specified, defaults to ""
38209func ResourceConditionalAccumulatorContainer(value string) ResourceConditionalAccumulatorAttr {
38210	return func(m optionalAttr) {
38211		m["container"] = value
38212	}
38213}
38214
38215// ResourceConditionalAccumulatorSharedName sets the optional shared_name attribute to value.
38216//
38217// value: If non-empty, this accumulator will be shared under the
38218// given name across multiple sessions.
38219// If not specified, defaults to ""
38220func ResourceConditionalAccumulatorSharedName(value string) ResourceConditionalAccumulatorAttr {
38221	return func(m optionalAttr) {
38222		m["shared_name"] = value
38223	}
38224}
38225
38226// ResourceConditionalAccumulatorReductionType sets the optional reduction_type attribute to value.
38227// If not specified, defaults to "MEAN"
38228func ResourceConditionalAccumulatorReductionType(value string) ResourceConditionalAccumulatorAttr {
38229	return func(m optionalAttr) {
38230		m["reduction_type"] = value
38231	}
38232}
38233
38234// A conditional accumulator for aggregating gradients.
38235//
38236// The accumulator accepts gradients marked with local_step greater or
38237// equal to the most recent global_step known to the accumulator. The
38238// average can be extracted from the accumulator, provided sufficient
38239// gradients have been accumulated. Extracting the average automatically
38240// resets the aggregate to 0, and increments the global_step recorded by
38241// the accumulator.
38242// This is a resource version of ConditionalAccumulator that will work in TF2.0
38243// with tf.cond version 2.
38244//
38245// Arguments:
38246//
38247//	dtype: The type of the value being accumulated.
38248//	shape: The shape of the values, can be [], in which case shape is unknown.
38249//
38250// Returns The handle to the accumulator.
38251func ResourceConditionalAccumulator(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...ResourceConditionalAccumulatorAttr) (handle tf.Output) {
38252	if scope.Err() != nil {
38253		return
38254	}
38255	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
38256	for _, a := range optional {
38257		a(attrs)
38258	}
38259	opspec := tf.OpSpec{
38260		Type: "ResourceConditionalAccumulator",
38261
38262		Attrs: attrs,
38263	}
38264	op := scope.AddOperation(opspec)
38265	return op.Output(0)
38266}
38267
38268// Increments variable pointed to by 'resource' until it reaches 'limit'.
38269//
38270// Arguments:
38271//
38272//	resource: Should be from a scalar `Variable` node.
38273//	limit: If incrementing ref would bring it above limit, instead generates an
38274//
38275// 'OutOfRange' error.
38276//
38277// Returns A copy of the input before increment. If nothing else modifies the
38278// input, the values produced will all be distinct.
38279func ResourceCountUpTo(scope *Scope, resource tf.Output, limit int64, T tf.DataType) (output tf.Output) {
38280	if scope.Err() != nil {
38281		return
38282	}
38283	attrs := map[string]interface{}{"limit": limit, "T": T}
38284	opspec := tf.OpSpec{
38285		Type: "ResourceCountUpTo",
38286		Input: []tf.Input{
38287			resource,
38288		},
38289		Attrs: attrs,
38290	}
38291	op := scope.AddOperation(opspec)
38292	return op.Output(0)
38293}
38294
38295// ResourceGatherAttr is an optional argument to ResourceGather.
38296type ResourceGatherAttr func(optionalAttr)
38297
38298// ResourceGatherBatchDims sets the optional batch_dims attribute to value.
38299// If not specified, defaults to 0
38300func ResourceGatherBatchDims(value int64) ResourceGatherAttr {
38301	return func(m optionalAttr) {
38302		m["batch_dims"] = value
38303	}
38304}
38305
38306// ResourceGatherValidateIndices sets the optional validate_indices attribute to value.
38307// If not specified, defaults to true
38308func ResourceGatherValidateIndices(value bool) ResourceGatherAttr {
38309	return func(m optionalAttr) {
38310		m["validate_indices"] = value
38311	}
38312}
38313
38314// Gather slices from the variable pointed to by `resource` according to `indices`.
38315//
38316// `indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
38317// Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
38318//
38319// ```python
38320//
38321//	# Scalar indices
38322//	output[:, ..., :] = params[indices, :, ... :]
38323//
38324//	# Vector indices
38325//	output[i, :, ..., :] = params[indices[i], :, ... :]
38326//
38327//	# Higher rank indices
38328//	output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
38329//
38330// ```
38331func ResourceGather(scope *Scope, resource tf.Output, indices tf.Output, dtype tf.DataType, optional ...ResourceGatherAttr) (output tf.Output) {
38332	if scope.Err() != nil {
38333		return
38334	}
38335	attrs := map[string]interface{}{"dtype": dtype}
38336	for _, a := range optional {
38337		a(attrs)
38338	}
38339	opspec := tf.OpSpec{
38340		Type: "ResourceGather",
38341		Input: []tf.Input{
38342			resource, indices,
38343		},
38344		Attrs: attrs,
38345	}
38346	op := scope.AddOperation(opspec)
38347	return op.Output(0)
38348}
38349
38350// Adds sparse updates to the variable referenced by `resource`.
38351//
38352// This operation computes
38353//
38354//	# Scalar indices
38355//	ref[indices, ...] += updates[...]
38356//
38357//	# Vector indices (for each i)
38358//	ref[indices[i], ...] += updates[i, ...]
38359//
38360//	# High rank indices (for each i, ..., j)
38361//	ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
38362//
38363// Duplicate entries are handled correctly: if multiple `indices` reference
38364// the same location, their contributions add.
38365//
38366// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
38367//
38368// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
38369// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
38370// </div>
38371//
38372// Arguments:
38373//
38374//	resource: Should be from a `Variable` node.
38375//	indices: A tensor of indices into the first dimension of `ref`.
38376//	updates: A tensor of updated values to add to `ref`.
38377//
38378// Returns the created operation.
38379func ResourceScatterAdd(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
38380	if scope.Err() != nil {
38381		return
38382	}
38383	opspec := tf.OpSpec{
38384		Type: "ResourceScatterAdd",
38385		Input: []tf.Input{
38386			resource, indices, updates,
38387		},
38388	}
38389	return scope.AddOperation(opspec)
38390}
38391
38392// Divides sparse updates into the variable referenced by `resource`.
38393//
38394// This operation computes
38395//
38396//	# Scalar indices
38397//	ref[indices, ...] /= updates[...]
38398//
38399//	# Vector indices (for each i)
38400//	ref[indices[i], ...] /= updates[i, ...]
38401//
38402//	# High rank indices (for each i, ..., j)
38403//	ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
38404//
38405// Duplicate entries are handled correctly: if multiple `indices` reference
38406// the same location, their contributions multiply.
38407//
38408// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
38409//
38410// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
38411// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
38412// </div>
38413//
38414// Arguments:
38415//
38416//	resource: Should be from a `Variable` node.
38417//	indices: A tensor of indices into the first dimension of `ref`.
38418//	updates: A tensor of updated values to add to `ref`.
38419//
38420// Returns the created operation.
38421func ResourceScatterDiv(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
38422	if scope.Err() != nil {
38423		return
38424	}
38425	opspec := tf.OpSpec{
38426		Type: "ResourceScatterDiv",
38427		Input: []tf.Input{
38428			resource, indices, updates,
38429		},
38430	}
38431	return scope.AddOperation(opspec)
38432}
38433
38434// Reduces sparse updates into the variable referenced by `resource` using the `max` operation.
38435//
38436// This operation computes
38437//
38438//	# Scalar indices
38439//	ref[indices, ...] = max(ref[indices, ...], updates[...])
38440//
38441//	# Vector indices (for each i)
38442//	ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
38443//
38444//	# High rank indices (for each i, ..., j)
38445//	ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
38446//
38447// Duplicate entries are handled correctly: if multiple `indices` reference
38448// the same location, their contributions are combined.
38449//
38450// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
38451//
38452// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
38453// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
38454// </div>
38455//
38456// Arguments:
38457//
38458//	resource: Should be from a `Variable` node.
38459//	indices: A tensor of indices into the first dimension of `ref`.
38460//	updates: A tensor of updated values to add to `ref`.
38461//
38462// Returns the created operation.
38463func ResourceScatterMax(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
38464	if scope.Err() != nil {
38465		return
38466	}
38467	opspec := tf.OpSpec{
38468		Type: "ResourceScatterMax",
38469		Input: []tf.Input{
38470			resource, indices, updates,
38471		},
38472	}
38473	return scope.AddOperation(opspec)
38474}
38475
38476// Reduces sparse updates into the variable referenced by `resource` using the `min` operation.
38477//
38478// This operation computes
38479//
38480//	# Scalar indices
38481//	ref[indices, ...] = min(ref[indices, ...], updates[...])
38482//
38483//	# Vector indices (for each i)
38484//	ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
38485//
38486//	# High rank indices (for each i, ..., j)
38487//	ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
38488//
38489// Duplicate entries are handled correctly: if multiple `indices` reference
38490// the same location, their contributions are combined.
38491//
38492// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
38493//
38494// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
38495// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
38496// </div>
38497//
38498// Arguments:
38499//
38500//	resource: Should be from a `Variable` node.
38501//	indices: A tensor of indices into the first dimension of `ref`.
38502//	updates: A tensor of updated values to add to `ref`.
38503//
38504// Returns the created operation.
38505func ResourceScatterMin(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
38506	if scope.Err() != nil {
38507		return
38508	}
38509	opspec := tf.OpSpec{
38510		Type: "ResourceScatterMin",
38511		Input: []tf.Input{
38512			resource, indices, updates,
38513		},
38514	}
38515	return scope.AddOperation(opspec)
38516}
38517
38518// Multiplies sparse updates into the variable referenced by `resource`.
38519//
38520// This operation computes
38521//
38522//	# Scalar indices
38523//	ref[indices, ...] *= updates[...]
38524//
38525//	# Vector indices (for each i)
38526//	ref[indices[i], ...] *= updates[i, ...]
38527//
38528//	# High rank indices (for each i, ..., j)
38529//	ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
38530//
38531// Duplicate entries are handled correctly: if multiple `indices` reference
38532// the same location, their contributions multiply.
38533//
38534// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
38535//
38536// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
38537// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
38538// </div>
38539//
38540// Arguments:
38541//
38542//	resource: Should be from a `Variable` node.
38543//	indices: A tensor of indices into the first dimension of `ref`.
38544//	updates: A tensor of updated values to add to `ref`.
38545//
38546// Returns the created operation.
38547func ResourceScatterMul(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
38548	if scope.Err() != nil {
38549		return
38550	}
38551	opspec := tf.OpSpec{
38552		Type: "ResourceScatterMul",
38553		Input: []tf.Input{
38554			resource, indices, updates,
38555		},
38556	}
38557	return scope.AddOperation(opspec)
38558}
38559
38560// ResourceScatterNdAddAttr is an optional argument to ResourceScatterNdAdd.
38561type ResourceScatterNdAddAttr func(optionalAttr)
38562
38563// ResourceScatterNdAddUseLocking sets the optional use_locking attribute to value.
38564//
38565// value: An optional bool. Defaults to True. If True, the assignment will
38566// be protected by a lock; otherwise the behavior is undefined,
38567// but may exhibit less contention.
38568// If not specified, defaults to true
38569func ResourceScatterNdAddUseLocking(value bool) ResourceScatterNdAddAttr {
38570	return func(m optionalAttr) {
38571		m["use_locking"] = value
38572	}
38573}
38574
38575// Applies sparse addition to individual values or slices in a Variable.
38576//
38577// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
38578//
38579// `indices` must be integer tensor, containing indices into `ref`.
38580// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
38581//
38582// The innermost dimension of `indices` (with length `K`) corresponds to
38583// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
38584// dimension of `ref`.
38585//
38586// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
38587//
38588// ```
38589// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
38590// ```
38591//
38592// For example, say we want to add 4 scattered elements to a rank-1 tensor to
38593// 8 elements. In Python, that addition would look like this:
38594//
38595// ```python
38596// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
38597// indices = tf.constant([[4], [3], [1], [7]])
38598// updates = tf.constant([9, 10, 11, 12])
38599// add = tf.scatter_nd_add(ref, indices, updates)
38600// with tf.Session() as sess:
38601//
38602//	print sess.run(add)
38603//
38604// ```
38605//
38606// The resulting update to ref would look like this:
38607//
38608//	[1, 13, 3, 14, 14, 6, 7, 20]
38609//
38610// See `tf.scatter_nd` for more details about how to make updates to
38611// slices.
38612//
38613// Arguments:
38614//
38615//	ref: A resource handle. Must be from a VarHandleOp.
38616//	indices: A Tensor. Must be one of the following types: int32, int64.
38617//
38618// A tensor of indices into ref.
38619//
38620//	updates: A Tensor. Must have the same type as ref. A tensor of
38621//
38622// values to add to ref.
38623//
38624// Returns the created operation.
38625func ResourceScatterNdAdd(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdAddAttr) (o *tf.Operation) {
38626	if scope.Err() != nil {
38627		return
38628	}
38629	attrs := map[string]interface{}{}
38630	for _, a := range optional {
38631		a(attrs)
38632	}
38633	opspec := tf.OpSpec{
38634		Type: "ResourceScatterNdAdd",
38635		Input: []tf.Input{
38636			ref, indices, updates,
38637		},
38638		Attrs: attrs,
38639	}
38640	return scope.AddOperation(opspec)
38641}
38642
38643// ResourceScatterNdSubAttr is an optional argument to ResourceScatterNdSub.
38644type ResourceScatterNdSubAttr func(optionalAttr)
38645
38646// ResourceScatterNdSubUseLocking sets the optional use_locking attribute to value.
38647//
38648// value: An optional bool. Defaults to True. If True, the assignment will
38649// be protected by a lock; otherwise the behavior is undefined,
38650// but may exhibit less contention.
38651// If not specified, defaults to true
38652func ResourceScatterNdSubUseLocking(value bool) ResourceScatterNdSubAttr {
38653	return func(m optionalAttr) {
38654		m["use_locking"] = value
38655	}
38656}
38657
38658// Applies sparse subtraction to individual values or slices in a Variable.
38659//
38660// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
38661//
38662// `indices` must be integer tensor, containing indices into `ref`.
38663// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
38664//
38665// The innermost dimension of `indices` (with length `K`) corresponds to
38666// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
38667// dimension of `ref`.
38668//
38669// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
38670//
38671// ```
38672// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
38673// ```
38674//
38675// For example, say we want to subtract 4 scattered elements from a rank-1 tensor
38676// with 8 elements. In Python, that subtraction would look like this:
38677//
38678// ```python
38679// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
38680// indices = tf.constant([[4], [3], [1], [7]])
38681// updates = tf.constant([9, 10, 11, 12])
38682// sub = tf.scatter_nd_sub(ref, indices, updates)
38683// with tf.Session() as sess:
38684//
38685//	print sess.run(sub)
38686//
38687// ```
38688//
38689// The resulting update to ref would look like this:
38690//
38691//	[1, -9, 3, -6, -4, 6, 7, -4]
38692//
38693// See `tf.scatter_nd` for more details about how to make updates to
38694// slices.
38695//
38696// Arguments:
38697//
38698//	ref: A resource handle. Must be from a VarHandleOp.
38699//	indices: A Tensor. Must be one of the following types: int32, int64.
38700//
38701// A tensor of indices into ref.
38702//
38703//	updates: A Tensor. Must have the same type as ref. A tensor of
38704//
38705// values to add to ref.
38706//
38707// Returns the created operation.
38708func ResourceScatterNdSub(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdSubAttr) (o *tf.Operation) {
38709	if scope.Err() != nil {
38710		return
38711	}
38712	attrs := map[string]interface{}{}
38713	for _, a := range optional {
38714		a(attrs)
38715	}
38716	opspec := tf.OpSpec{
38717		Type: "ResourceScatterNdSub",
38718		Input: []tf.Input{
38719			ref, indices, updates,
38720		},
38721		Attrs: attrs,
38722	}
38723	return scope.AddOperation(opspec)
38724}
38725
38726// ResourceScatterNdUpdateAttr is an optional argument to ResourceScatterNdUpdate.
38727type ResourceScatterNdUpdateAttr func(optionalAttr)
38728
38729// ResourceScatterNdUpdateUseLocking sets the optional use_locking attribute to value.
38730//
38731// value: An optional bool. Defaults to True. If True, the assignment will
38732// be protected by a lock; otherwise the behavior is undefined,
38733// but may exhibit less contention.
38734// If not specified, defaults to true
38735func ResourceScatterNdUpdateUseLocking(value bool) ResourceScatterNdUpdateAttr {
38736	return func(m optionalAttr) {
38737		m["use_locking"] = value
38738	}
38739}
38740
38741// Applies sparse `updates` to individual values or slices within a given
38742//
38743// variable according to `indices`.
38744//
38745// `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
38746//
38747// `indices` must be integer tensor, containing indices into `ref`.
38748// It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
38749//
38750// The innermost dimension of `indices` (with length `K`) corresponds to
38751// indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
38752// dimension of `ref`.
38753//
38754// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
38755//
38756// ```
38757// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
38758// ```
38759//
38760// For example, say we want to update 4 scattered elements to a rank-1 tensor to
38761// 8 elements. In Python, that update would look like this:
38762//
38763// ```python
38764//
38765//	ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
38766//	indices = tf.constant([[4], [3], [1] ,[7]])
38767//	updates = tf.constant([9, 10, 11, 12])
38768//	update = tf.scatter_nd_update(ref, indices, updates)
38769//	with tf.Session() as sess:
38770//	  print sess.run(update)
38771//
38772// ```
38773//
38774// The resulting update to ref would look like this:
38775//
38776//	[1, 11, 3, 10, 9, 6, 7, 12]
38777//
38778// See `tf.scatter_nd` for more details about how to make updates to
38779// slices.
38780//
38781// Arguments:
38782//
38783//	ref: A resource handle. Must be from a VarHandleOp.
38784//	indices: A Tensor. Must be one of the following types: int32, int64.
38785//
38786// A tensor of indices into ref.
38787//
38788//	updates: A Tensor. Must have the same type as ref. A tensor of updated
38789//
38790// values to add to ref.
38791//
38792// Returns the created operation.
38793func ResourceScatterNdUpdate(scope *Scope, ref tf.Output, indices tf.Output, updates tf.Output, optional ...ResourceScatterNdUpdateAttr) (o *tf.Operation) {
38794	if scope.Err() != nil {
38795		return
38796	}
38797	attrs := map[string]interface{}{}
38798	for _, a := range optional {
38799		a(attrs)
38800	}
38801	opspec := tf.OpSpec{
38802		Type: "ResourceScatterNdUpdate",
38803		Input: []tf.Input{
38804			ref, indices, updates,
38805		},
38806		Attrs: attrs,
38807	}
38808	return scope.AddOperation(opspec)
38809}
38810
38811// Subtracts sparse updates from the variable referenced by `resource`.
38812//
38813// This operation computes
38814//
38815//	# Scalar indices
38816//	ref[indices, ...] -= updates[...]
38817//
38818//	# Vector indices (for each i)
38819//	ref[indices[i], ...] -= updates[i, ...]
38820//
38821//	# High rank indices (for each i, ..., j)
38822//	ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
38823//
38824// Duplicate entries are handled correctly: if multiple `indices` reference
38825// the same location, their contributions add.
38826//
38827// Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
38828//
38829// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
38830// <img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
38831// </div>
38832//
38833// Arguments:
38834//
38835//	resource: Should be from a `Variable` node.
38836//	indices: A tensor of indices into the first dimension of `ref`.
38837//	updates: A tensor of updated values to add to `ref`.
38838//
38839// Returns the created operation.
38840func ResourceScatterSub(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
38841	if scope.Err() != nil {
38842		return
38843	}
38844	opspec := tf.OpSpec{
38845		Type: "ResourceScatterSub",
38846		Input: []tf.Input{
38847			resource, indices, updates,
38848		},
38849	}
38850	return scope.AddOperation(opspec)
38851}
38852
38853// Assigns sparse updates to the variable referenced by `resource`.
38854//
38855// This operation computes
38856//
38857//	# Scalar indices
38858//	ref[indices, ...] = updates[...]
38859//
38860//	# Vector indices (for each i)
38861//	ref[indices[i], ...] = updates[i, ...]
38862//
38863//	# High rank indices (for each i, ..., j)
38864//	ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
38865//
38866// Arguments:
38867//
38868//	resource: Should be from a `Variable` node.
38869//	indices: A tensor of indices into the first dimension of `ref`.
38870//	updates: A tensor of updated values to add to `ref`.
38871//
38872// Returns the created operation.
38873func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) {
38874	if scope.Err() != nil {
38875		return
38876	}
38877	opspec := tf.OpSpec{
38878		Type: "ResourceScatterUpdate",
38879		Input: []tf.Input{
38880			resource, indices, updates,
38881		},
38882	}
38883	return scope.AddOperation(opspec)
38884}
38885
38886// ResourceSparseApplyAdadeltaAttr is an optional argument to ResourceSparseApplyAdadelta.
38887type ResourceSparseApplyAdadeltaAttr func(optionalAttr)
38888
38889// ResourceSparseApplyAdadeltaUseLocking sets the optional use_locking attribute to value.
38890//
38891// value: If True, updating of the var and accum tensors will be protected by
38892// a lock; otherwise the behavior is undefined, but may exhibit less contention.
38893// If not specified, defaults to false
38894func ResourceSparseApplyAdadeltaUseLocking(value bool) ResourceSparseApplyAdadeltaAttr {
38895	return func(m optionalAttr) {
38896		m["use_locking"] = value
38897	}
38898}
38899
38900// var: Should be from a Variable().
38901//
38902// Arguments:
38903//
38904//	accum: Should be from a Variable().
38905//	accum_update: : Should be from a Variable().
38906//	lr: Learning rate. Must be a scalar.
38907//	rho: Decay factor. Must be a scalar.
38908//	epsilon: Constant factor. Must be a scalar.
38909//	grad: The gradient.
38910//	indices: A vector of indices into the first dimension of var and accum.
38911//
38912// Returns the created operation.
38913func ResourceSparseApplyAdadelta(scope *Scope, var_ tf.Output, accum tf.Output, accum_update tf.Output, lr tf.Output, rho tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdadeltaAttr) (o *tf.Operation) {
38914	if scope.Err() != nil {
38915		return
38916	}
38917	attrs := map[string]interface{}{}
38918	for _, a := range optional {
38919		a(attrs)
38920	}
38921	opspec := tf.OpSpec{
38922		Type: "ResourceSparseApplyAdadelta",
38923		Input: []tf.Input{
38924			var_, accum, accum_update, lr, rho, epsilon, grad, indices,
38925		},
38926		Attrs: attrs,
38927	}
38928	return scope.AddOperation(opspec)
38929}
38930
38931// ResourceSparseApplyAdagradAttr is an optional argument to ResourceSparseApplyAdagrad.
38932type ResourceSparseApplyAdagradAttr func(optionalAttr)
38933
38934// ResourceSparseApplyAdagradUseLocking sets the optional use_locking attribute to value.
38935//
38936// value: If `True`, updating of the var and accum tensors will be protected
38937// by a lock; otherwise the behavior is undefined, but may exhibit less
38938// contention.
38939// If not specified, defaults to false
38940func ResourceSparseApplyAdagradUseLocking(value bool) ResourceSparseApplyAdagradAttr {
38941	return func(m optionalAttr) {
38942		m["use_locking"] = value
38943	}
38944}
38945
38946// ResourceSparseApplyAdagradUpdateSlots sets the optional update_slots attribute to value.
38947// If not specified, defaults to true
38948func ResourceSparseApplyAdagradUpdateSlots(value bool) ResourceSparseApplyAdagradAttr {
38949	return func(m optionalAttr) {
38950		m["update_slots"] = value
38951	}
38952}
38953
38954// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
38955//
38956// That is for rows we have grad for, we update var and accum as follows:
38957// accum += grad * grad
38958// var -= lr * grad * (1 / sqrt(accum))
38959//
38960// Arguments:
38961//
38962//	var_: Should be from a Variable().
38963//	accum: Should be from a Variable().
38964//	lr: Learning rate. Must be a scalar.
38965//	grad: The gradient.
38966//	indices: A vector of indices into the first dimension of var and accum.
38967//
38968// Returns the created operation.
38969func ResourceSparseApplyAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradAttr) (o *tf.Operation) {
38970	if scope.Err() != nil {
38971		return
38972	}
38973	attrs := map[string]interface{}{}
38974	for _, a := range optional {
38975		a(attrs)
38976	}
38977	opspec := tf.OpSpec{
38978		Type: "ResourceSparseApplyAdagrad",
38979		Input: []tf.Input{
38980			var_, accum, lr, grad, indices,
38981		},
38982		Attrs: attrs,
38983	}
38984	return scope.AddOperation(opspec)
38985}
38986
38987// ResourceSparseApplyAdagradDAAttr is an optional argument to ResourceSparseApplyAdagradDA.
38988type ResourceSparseApplyAdagradDAAttr func(optionalAttr)
38989
38990// ResourceSparseApplyAdagradDAUseLocking sets the optional use_locking attribute to value.
38991//
38992// value: If True, updating of the var and accum tensors will be protected by
38993// a lock; otherwise the behavior is undefined, but may exhibit less contention.
38994// If not specified, defaults to false
38995func ResourceSparseApplyAdagradDAUseLocking(value bool) ResourceSparseApplyAdagradDAAttr {
38996	return func(m optionalAttr) {
38997		m["use_locking"] = value
38998	}
38999}
39000
39001// Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
39002//
39003// Arguments:
39004//
39005//	var_: Should be from a Variable().
39006//	gradient_accumulator: Should be from a Variable().
39007//	gradient_squared_accumulator: Should be from a Variable().
39008//	grad: The gradient.
39009//	indices: A vector of indices into the first dimension of var and accum.
39010//	lr: Learning rate. Must be a scalar.
39011//	l1: L1 regularization. Must be a scalar.
39012//	l2: L2 regularization. Must be a scalar.
39013//	global_step: Training step number. Must be a scalar.
39014//
39015// Returns the created operation.
39016func ResourceSparseApplyAdagradDA(scope *Scope, var_ tf.Output, gradient_accumulator tf.Output, gradient_squared_accumulator tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, global_step tf.Output, optional ...ResourceSparseApplyAdagradDAAttr) (o *tf.Operation) {
39017	if scope.Err() != nil {
39018		return
39019	}
39020	attrs := map[string]interface{}{}
39021	for _, a := range optional {
39022		a(attrs)
39023	}
39024	opspec := tf.OpSpec{
39025		Type: "ResourceSparseApplyAdagradDA",
39026		Input: []tf.Input{
39027			var_, gradient_accumulator, gradient_squared_accumulator, grad, indices, lr, l1, l2, global_step,
39028		},
39029		Attrs: attrs,
39030	}
39031	return scope.AddOperation(opspec)
39032}
39033
39034// ResourceSparseApplyAdagradV2Attr is an optional argument to ResourceSparseApplyAdagradV2.
39035type ResourceSparseApplyAdagradV2Attr func(optionalAttr)
39036
39037// ResourceSparseApplyAdagradV2UseLocking sets the optional use_locking attribute to value.
39038//
39039// value: If `True`, updating of the var and accum tensors will be protected
39040// by a lock; otherwise the behavior is undefined, but may exhibit less
39041// contention.
39042// If not specified, defaults to false
39043func ResourceSparseApplyAdagradV2UseLocking(value bool) ResourceSparseApplyAdagradV2Attr {
39044	return func(m optionalAttr) {
39045		m["use_locking"] = value
39046	}
39047}
39048
39049// ResourceSparseApplyAdagradV2UpdateSlots sets the optional update_slots attribute to value.
39050// If not specified, defaults to true
39051func ResourceSparseApplyAdagradV2UpdateSlots(value bool) ResourceSparseApplyAdagradV2Attr {
39052	return func(m optionalAttr) {
39053		m["update_slots"] = value
39054	}
39055}
39056
39057// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
39058//
39059// That is for rows we have grad for, we update var and accum as follows:
39060// accum += grad * grad
39061// var -= lr * grad * (1 / sqrt(accum))
39062//
39063// Arguments:
39064//
39065//	var_: Should be from a Variable().
39066//	accum: Should be from a Variable().
39067//	lr: Learning rate. Must be a scalar.
39068//	epsilon: Constant factor. Must be a scalar.
39069//	grad: The gradient.
39070//	indices: A vector of indices into the first dimension of var and accum.
39071//
39072// Returns the created operation.
39073func ResourceSparseApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyAdagradV2Attr) (o *tf.Operation) {
39074	if scope.Err() != nil {
39075		return
39076	}
39077	attrs := map[string]interface{}{}
39078	for _, a := range optional {
39079		a(attrs)
39080	}
39081	opspec := tf.OpSpec{
39082		Type: "ResourceSparseApplyAdagradV2",
39083		Input: []tf.Input{
39084			var_, accum, lr, epsilon, grad, indices,
39085		},
39086		Attrs: attrs,
39087	}
39088	return scope.AddOperation(opspec)
39089}
39090
39091// ResourceSparseApplyCenteredRMSPropAttr is an optional argument to ResourceSparseApplyCenteredRMSProp.
39092type ResourceSparseApplyCenteredRMSPropAttr func(optionalAttr)
39093
39094// ResourceSparseApplyCenteredRMSPropUseLocking sets the optional use_locking attribute to value.
39095//
39096// value: If `True`, updating of the var, mg, ms, and mom tensors is
39097// protected by a lock; otherwise the behavior is undefined, but may exhibit less
39098// contention.
39099// If not specified, defaults to false
39100func ResourceSparseApplyCenteredRMSPropUseLocking(value bool) ResourceSparseApplyCenteredRMSPropAttr {
39101	return func(m optionalAttr) {
39102		m["use_locking"] = value
39103	}
39104}
39105
39106// Update '*var' according to the centered RMSProp algorithm.
39107//
39108// The centered RMSProp algorithm uses an estimate of the centered second moment
39109// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
39110// uses the (uncentered) second moment. This often helps with training, but is
39111// slightly more expensive in terms of computation and memory.
39112//
39113// Note that in dense implementation of this algorithm, mg, ms, and mom will
39114// update even if the grad is zero, but in this sparse implementation, mg, ms,
39115// and mom will not update in iterations during which the grad is zero.
39116//
39117// mean_square = decay * mean_square + (1-decay) * gradient ** 2
39118// mean_grad = decay * mean_grad + (1-decay) * gradient
39119// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
39120//
39121// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
39122// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
39123// var <- var - mom
39124//
39125// Arguments:
39126//
39127//	var_: Should be from a Variable().
39128//	mg: Should be from a Variable().
39129//	ms: Should be from a Variable().
39130//	mom: Should be from a Variable().
39131//	lr: Scaling factor. Must be a scalar.
39132//	rho: Decay rate. Must be a scalar.
39133//
39134//	epsilon: Ridge term. Must be a scalar.
39135//	grad: The gradient.
39136//	indices: A vector of indices into the first dimension of var, ms and mom.
39137//
39138// Returns the created operation.
39139func ResourceSparseApplyCenteredRMSProp(scope *Scope, var_ tf.Output, mg tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyCenteredRMSPropAttr) (o *tf.Operation) {
39140	if scope.Err() != nil {
39141		return
39142	}
39143	attrs := map[string]interface{}{}
39144	for _, a := range optional {
39145		a(attrs)
39146	}
39147	opspec := tf.OpSpec{
39148		Type: "ResourceSparseApplyCenteredRMSProp",
39149		Input: []tf.Input{
39150			var_, mg, ms, mom, lr, rho, momentum, epsilon, grad, indices,
39151		},
39152		Attrs: attrs,
39153	}
39154	return scope.AddOperation(opspec)
39155}
39156
39157// ResourceSparseApplyFtrlAttr is an optional argument to ResourceSparseApplyFtrl.
39158type ResourceSparseApplyFtrlAttr func(optionalAttr)
39159
39160// ResourceSparseApplyFtrlUseLocking sets the optional use_locking attribute to value.
39161//
39162// value: If `True`, updating of the var and accum tensors will be protected
39163// by a lock; otherwise the behavior is undefined, but may exhibit less
39164// contention.
39165// If not specified, defaults to false
39166func ResourceSparseApplyFtrlUseLocking(value bool) ResourceSparseApplyFtrlAttr {
39167	return func(m optionalAttr) {
39168		m["use_locking"] = value
39169	}
39170}
39171
39172// ResourceSparseApplyFtrlMultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
39173// If not specified, defaults to false
39174func ResourceSparseApplyFtrlMultiplyLinearByLr(value bool) ResourceSparseApplyFtrlAttr {
39175	return func(m optionalAttr) {
39176		m["multiply_linear_by_lr"] = value
39177	}
39178}
39179
39180// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
39181//
39182// That is for rows we have grad for, we update var, accum and linear as follows:
39183// accum_new = accum + grad * grad
39184// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
39185// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
39186// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
39187// accum = accum_new
39188//
39189// Arguments:
39190//
39191//	var_: Should be from a Variable().
39192//	accum: Should be from a Variable().
39193//	linear: Should be from a Variable().
39194//	grad: The gradient.
39195//	indices: A vector of indices into the first dimension of var and accum.
39196//	lr: Scaling factor. Must be a scalar.
39197//	l1: L1 regularization. Must be a scalar.
39198//	l2: L2 regularization. Must be a scalar.
39199//	lr_power: Scaling factor. Must be a scalar.
39200//
39201// Returns the created operation.
39202func ResourceSparseApplyFtrl(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlAttr) (o *tf.Operation) {
39203	if scope.Err() != nil {
39204		return
39205	}
39206	attrs := map[string]interface{}{}
39207	for _, a := range optional {
39208		a(attrs)
39209	}
39210	opspec := tf.OpSpec{
39211		Type: "ResourceSparseApplyFtrl",
39212		Input: []tf.Input{
39213			var_, accum, linear, grad, indices, lr, l1, l2, lr_power,
39214		},
39215		Attrs: attrs,
39216	}
39217	return scope.AddOperation(opspec)
39218}
39219
39220// ResourceSparseApplyFtrlV2Attr is an optional argument to ResourceSparseApplyFtrlV2.
39221type ResourceSparseApplyFtrlV2Attr func(optionalAttr)
39222
39223// ResourceSparseApplyFtrlV2UseLocking sets the optional use_locking attribute to value.
39224//
39225// value: If `True`, updating of the var and accum tensors will be protected
39226// by a lock; otherwise the behavior is undefined, but may exhibit less
39227// contention.
39228// If not specified, defaults to false
39229func ResourceSparseApplyFtrlV2UseLocking(value bool) ResourceSparseApplyFtrlV2Attr {
39230	return func(m optionalAttr) {
39231		m["use_locking"] = value
39232	}
39233}
39234
39235// ResourceSparseApplyFtrlV2MultiplyLinearByLr sets the optional multiply_linear_by_lr attribute to value.
39236// If not specified, defaults to false
39237func ResourceSparseApplyFtrlV2MultiplyLinearByLr(value bool) ResourceSparseApplyFtrlV2Attr {
39238	return func(m optionalAttr) {
39239		m["multiply_linear_by_lr"] = value
39240	}
39241}
39242
39243// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
39244//
39245// That is for rows we have grad for, we update var, accum and linear as follows:
39246// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
39247// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
39248// linear += grad_with_shrinkage +
39249//
39250//	(accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
39251//
39252// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
39253// var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
39254// accum = accum_new
39255//
39256// Arguments:
39257//
39258//	var_: Should be from a Variable().
39259//	accum: Should be from a Variable().
39260//	linear: Should be from a Variable().
39261//	grad: The gradient.
39262//	indices: A vector of indices into the first dimension of var and accum.
39263//	lr: Scaling factor. Must be a scalar.
39264//	l1: L1 regularization. Must be a scalar.
39265//	l2: L2 shrinkage regularization. Must be a scalar.
39266//
39267//	lr_power: Scaling factor. Must be a scalar.
39268//
39269// Returns the created operation.
39270func ResourceSparseApplyFtrlV2(scope *Scope, var_ tf.Output, accum tf.Output, linear tf.Output, grad tf.Output, indices tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, l2_shrinkage tf.Output, lr_power tf.Output, optional ...ResourceSparseApplyFtrlV2Attr) (o *tf.Operation) {
39271	if scope.Err() != nil {
39272		return
39273	}
39274	attrs := map[string]interface{}{}
39275	for _, a := range optional {
39276		a(attrs)
39277	}
39278	opspec := tf.OpSpec{
39279		Type: "ResourceSparseApplyFtrlV2",
39280		Input: []tf.Input{
39281			var_, accum, linear, grad, indices, lr, l1, l2, l2_shrinkage, lr_power,
39282		},
39283		Attrs: attrs,
39284	}
39285	return scope.AddOperation(opspec)
39286}
39287
39288// ResourceSparseApplyKerasMomentumAttr is an optional argument to ResourceSparseApplyKerasMomentum.
39289type ResourceSparseApplyKerasMomentumAttr func(optionalAttr)
39290
39291// ResourceSparseApplyKerasMomentumUseLocking sets the optional use_locking attribute to value.
39292//
39293// value: If `True`, updating of the var and accum tensors will be protected
39294// by a lock; otherwise the behavior is undefined, but may exhibit less
39295// contention.
39296// If not specified, defaults to false
39297func ResourceSparseApplyKerasMomentumUseLocking(value bool) ResourceSparseApplyKerasMomentumAttr {
39298	return func(m optionalAttr) {
39299		m["use_locking"] = value
39300	}
39301}
39302
39303// ResourceSparseApplyKerasMomentumUseNesterov sets the optional use_nesterov attribute to value.
39304//
39305// value: If `True`, the tensor passed to compute grad will be
39306// var + momentum * accum, so in the end, the var you get is actually
39307// var + momentum * accum.
39308// If not specified, defaults to false
39309func ResourceSparseApplyKerasMomentumUseNesterov(value bool) ResourceSparseApplyKerasMomentumAttr {
39310	return func(m optionalAttr) {
39311		m["use_nesterov"] = value
39312	}
39313}
39314
39315// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
39316//
39317// Set use_nesterov = True if you want to use Nesterov momentum.
39318//
39319// That is for rows we have grad for, we update var and accum as follows:
39320//
39321// accum = accum * momentum - lr * grad
39322// var += accum
39323//
39324// Arguments:
39325//
39326//	var_: Should be from a Variable().
39327//	accum: Should be from a Variable().
39328//	lr: Learning rate. Must be a scalar.
39329//	grad: The gradient.
39330//	indices: A vector of indices into the first dimension of var and accum.
39331//	momentum: Momentum. Must be a scalar.
39332//
39333// Returns the created operation.
39334func ResourceSparseApplyKerasMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyKerasMomentumAttr) (o *tf.Operation) {
39335	if scope.Err() != nil {
39336		return
39337	}
39338	attrs := map[string]interface{}{}
39339	for _, a := range optional {
39340		a(attrs)
39341	}
39342	opspec := tf.OpSpec{
39343		Type: "ResourceSparseApplyKerasMomentum",
39344		Input: []tf.Input{
39345			var_, accum, lr, grad, indices, momentum,
39346		},
39347		Attrs: attrs,
39348	}
39349	return scope.AddOperation(opspec)
39350}
39351
39352// ResourceSparseApplyMomentumAttr is an optional argument to ResourceSparseApplyMomentum.
39353type ResourceSparseApplyMomentumAttr func(optionalAttr)
39354
39355// ResourceSparseApplyMomentumUseLocking sets the optional use_locking attribute to value.
39356//
39357// value: If `True`, updating of the var and accum tensors will be protected
39358// by a lock; otherwise the behavior is undefined, but may exhibit less
39359// contention.
39360// If not specified, defaults to false
39361func ResourceSparseApplyMomentumUseLocking(value bool) ResourceSparseApplyMomentumAttr {
39362	return func(m optionalAttr) {
39363		m["use_locking"] = value
39364	}
39365}
39366
39367// ResourceSparseApplyMomentumUseNesterov sets the optional use_nesterov attribute to value.
39368//
39369// value: If `True`, the tensor passed to compute grad will be
39370// var - lr * momentum * accum, so in the end, the var you get is actually
39371// var - lr * momentum * accum.
39372// If not specified, defaults to false
39373func ResourceSparseApplyMomentumUseNesterov(value bool) ResourceSparseApplyMomentumAttr {
39374	return func(m optionalAttr) {
39375		m["use_nesterov"] = value
39376	}
39377}
39378
39379// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
39380//
39381// Set use_nesterov = True if you want to use Nesterov momentum.
39382//
39383// That is for rows we have grad for, we update var and accum as follows:
39384//
39385// accum = accum * momentum + grad
39386// var -= lr * accum
39387//
39388// Arguments:
39389//
39390//	var_: Should be from a Variable().
39391//	accum: Should be from a Variable().
39392//	lr: Learning rate. Must be a scalar.
39393//	grad: The gradient.
39394//	indices: A vector of indices into the first dimension of var and accum.
39395//	momentum: Momentum. Must be a scalar.
39396//
39397// Returns the created operation.
39398func ResourceSparseApplyMomentum(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, grad tf.Output, indices tf.Output, momentum tf.Output, optional ...ResourceSparseApplyMomentumAttr) (o *tf.Operation) {
39399	if scope.Err() != nil {
39400		return
39401	}
39402	attrs := map[string]interface{}{}
39403	for _, a := range optional {
39404		a(attrs)
39405	}
39406	opspec := tf.OpSpec{
39407		Type: "ResourceSparseApplyMomentum",
39408		Input: []tf.Input{
39409			var_, accum, lr, grad, indices, momentum,
39410		},
39411		Attrs: attrs,
39412	}
39413	return scope.AddOperation(opspec)
39414}
39415
39416// ResourceSparseApplyProximalAdagradAttr is an optional argument to ResourceSparseApplyProximalAdagrad.
39417type ResourceSparseApplyProximalAdagradAttr func(optionalAttr)
39418
39419// ResourceSparseApplyProximalAdagradUseLocking sets the optional use_locking attribute to value.
39420//
39421// value: If True, updating of the var and accum tensors will be protected by
39422// a lock; otherwise the behavior is undefined, but may exhibit less contention.
39423// If not specified, defaults to false
39424func ResourceSparseApplyProximalAdagradUseLocking(value bool) ResourceSparseApplyProximalAdagradAttr {
39425	return func(m optionalAttr) {
39426		m["use_locking"] = value
39427	}
39428}
39429
39430// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
39431//
39432// That is for rows we have grad for, we update var and accum as follows:
39433// accum += grad * grad
39434// prox_v = var
39435// prox_v -= lr * grad * (1 / sqrt(accum))
39436// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
39437//
39438// Arguments:
39439//
39440//	var_: Should be from a Variable().
39441//	accum: Should be from a Variable().
39442//	lr: Learning rate. Must be a scalar.
39443//	l1: L1 regularization. Must be a scalar.
39444//	l2: L2 regularization. Must be a scalar.
39445//	grad: The gradient.
39446//	indices: A vector of indices into the first dimension of var and accum.
39447//
39448// Returns the created operation.
39449func ResourceSparseApplyProximalAdagrad(scope *Scope, var_ tf.Output, accum tf.Output, lr tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalAdagradAttr) (o *tf.Operation) {
39450	if scope.Err() != nil {
39451		return
39452	}
39453	attrs := map[string]interface{}{}
39454	for _, a := range optional {
39455		a(attrs)
39456	}
39457	opspec := tf.OpSpec{
39458		Type: "ResourceSparseApplyProximalAdagrad",
39459		Input: []tf.Input{
39460			var_, accum, lr, l1, l2, grad, indices,
39461		},
39462		Attrs: attrs,
39463	}
39464	return scope.AddOperation(opspec)
39465}
39466
39467// ResourceSparseApplyProximalGradientDescentAttr is an optional argument to ResourceSparseApplyProximalGradientDescent.
39468type ResourceSparseApplyProximalGradientDescentAttr func(optionalAttr)
39469
39470// ResourceSparseApplyProximalGradientDescentUseLocking sets the optional use_locking attribute to value.
39471//
39472// value: If True, the subtraction will be protected by a lock;
39473// otherwise the behavior is undefined, but may exhibit less contention.
39474// If not specified, defaults to false
39475func ResourceSparseApplyProximalGradientDescentUseLocking(value bool) ResourceSparseApplyProximalGradientDescentAttr {
39476	return func(m optionalAttr) {
39477		m["use_locking"] = value
39478	}
39479}
39480
39481// Sparse update '*var' as FOBOS algorithm with fixed learning rate.
39482//
39483// That is for rows we have grad for, we update var as follows:
39484// prox_v = var - alpha * grad
39485// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
39486//
39487// Arguments:
39488//
39489//	var_: Should be from a Variable().
39490//	alpha: Scaling factor. Must be a scalar.
39491//	l1: L1 regularization. Must be a scalar.
39492//	l2: L2 regularization. Must be a scalar.
39493//	grad: The gradient.
39494//	indices: A vector of indices into the first dimension of var and accum.
39495//
39496// Returns the created operation.
39497func ResourceSparseApplyProximalGradientDescent(scope *Scope, var_ tf.Output, alpha tf.Output, l1 tf.Output, l2 tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyProximalGradientDescentAttr) (o *tf.Operation) {
39498	if scope.Err() != nil {
39499		return
39500	}
39501	attrs := map[string]interface{}{}
39502	for _, a := range optional {
39503		a(attrs)
39504	}
39505	opspec := tf.OpSpec{
39506		Type: "ResourceSparseApplyProximalGradientDescent",
39507		Input: []tf.Input{
39508			var_, alpha, l1, l2, grad, indices,
39509		},
39510		Attrs: attrs,
39511	}
39512	return scope.AddOperation(opspec)
39513}
39514
39515// ResourceSparseApplyRMSPropAttr is an optional argument to ResourceSparseApplyRMSProp.
39516type ResourceSparseApplyRMSPropAttr func(optionalAttr)
39517
39518// ResourceSparseApplyRMSPropUseLocking sets the optional use_locking attribute to value.
39519//
39520// value: If `True`, updating of the var, ms, and mom tensors is protected
39521// by a lock; otherwise the behavior is undefined, but may exhibit less
39522// contention.
39523// If not specified, defaults to false
39524func ResourceSparseApplyRMSPropUseLocking(value bool) ResourceSparseApplyRMSPropAttr {
39525	return func(m optionalAttr) {
39526		m["use_locking"] = value
39527	}
39528}
39529
39530// Update '*var' according to the RMSProp algorithm.
39531//
39532// Note that in dense implementation of this algorithm, ms and mom will
39533// update even if the grad is zero, but in this sparse implementation, ms
39534// and mom will not update in iterations during which the grad is zero.
39535//
39536// mean_square = decay * mean_square + (1-decay) * gradient ** 2
39537// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
39538//
39539// ms <- rho * ms_{t-1} + (1-rho) * grad * grad
39540// mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
39541// var <- var - mom
39542//
39543// Arguments:
39544//
39545//	var_: Should be from a Variable().
39546//	ms: Should be from a Variable().
39547//	mom: Should be from a Variable().
39548//	lr: Scaling factor. Must be a scalar.
39549//	rho: Decay rate. Must be a scalar.
39550//
39551//	epsilon: Ridge term. Must be a scalar.
39552//	grad: The gradient.
39553//	indices: A vector of indices into the first dimension of var, ms and mom.
39554//
39555// Returns the created operation.
39556func ResourceSparseApplyRMSProp(scope *Scope, var_ tf.Output, ms tf.Output, mom tf.Output, lr tf.Output, rho tf.Output, momentum tf.Output, epsilon tf.Output, grad tf.Output, indices tf.Output, optional ...ResourceSparseApplyRMSPropAttr) (o *tf.Operation) {
39557	if scope.Err() != nil {
39558		return
39559	}
39560	attrs := map[string]interface{}{}
39561	for _, a := range optional {
39562		a(attrs)
39563	}
39564	opspec := tf.OpSpec{
39565		Type: "ResourceSparseApplyRMSProp",
39566		Input: []tf.Input{
39567			var_, ms, mom, lr, rho, momentum, epsilon, grad, indices,
39568		},
39569		Attrs: attrs,
39570	}
39571	return scope.AddOperation(opspec)
39572}
39573
39574// ResourceStridedSliceAssignAttr is an optional argument to ResourceStridedSliceAssign.
39575type ResourceStridedSliceAssignAttr func(optionalAttr)
39576
39577// ResourceStridedSliceAssignBeginMask sets the optional begin_mask attribute to value.
39578// If not specified, defaults to 0
39579func ResourceStridedSliceAssignBeginMask(value int64) ResourceStridedSliceAssignAttr {
39580	return func(m optionalAttr) {
39581		m["begin_mask"] = value
39582	}
39583}
39584
39585// ResourceStridedSliceAssignEndMask sets the optional end_mask attribute to value.
39586// If not specified, defaults to 0
39587func ResourceStridedSliceAssignEndMask(value int64) ResourceStridedSliceAssignAttr {
39588	return func(m optionalAttr) {
39589		m["end_mask"] = value
39590	}
39591}
39592
39593// ResourceStridedSliceAssignEllipsisMask sets the optional ellipsis_mask attribute to value.
39594// If not specified, defaults to 0
39595func ResourceStridedSliceAssignEllipsisMask(value int64) ResourceStridedSliceAssignAttr {
39596	return func(m optionalAttr) {
39597		m["ellipsis_mask"] = value
39598	}
39599}
39600
39601// ResourceStridedSliceAssignNewAxisMask sets the optional new_axis_mask attribute to value.
39602// If not specified, defaults to 0
39603func ResourceStridedSliceAssignNewAxisMask(value int64) ResourceStridedSliceAssignAttr {
39604	return func(m optionalAttr) {
39605		m["new_axis_mask"] = value
39606	}
39607}
39608
39609// ResourceStridedSliceAssignShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
39610// If not specified, defaults to 0
39611func ResourceStridedSliceAssignShrinkAxisMask(value int64) ResourceStridedSliceAssignAttr {
39612	return func(m optionalAttr) {
39613		m["shrink_axis_mask"] = value
39614	}
39615}
39616
39617// Assign `value` to the sliced l-value reference of `ref`.
39618//
39619// The values of `value` are assigned to the positions in the variable
39620// `ref` that are selected by the slice parameters. The slice parameters
39621// `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
39622//
39623// NOTE this op currently does not support broadcasting and so `value`'s
39624// shape must be exactly the shape produced by the slice of `ref`.
39625//
39626// Returns the created operation.
39627func ResourceStridedSliceAssign(scope *Scope, ref tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...ResourceStridedSliceAssignAttr) (o *tf.Operation) {
39628	if scope.Err() != nil {
39629		return
39630	}
39631	attrs := map[string]interface{}{}
39632	for _, a := range optional {
39633		a(attrs)
39634	}
39635	opspec := tf.OpSpec{
39636		Type: "ResourceStridedSliceAssign",
39637		Input: []tf.Input{
39638			ref, begin, end, strides, value,
39639		},
39640		Attrs: attrs,
39641	}
39642	return scope.AddOperation(opspec)
39643}
39644
39645// RestoreAttr is an optional argument to Restore.
39646type RestoreAttr func(optionalAttr)
39647
39648// RestorePreferredShard sets the optional preferred_shard attribute to value.
39649//
39650// value: Index of file to open first if multiple files match
39651// `file_pattern`.
39652// If not specified, defaults to -1
39653func RestorePreferredShard(value int64) RestoreAttr {
39654	return func(m optionalAttr) {
39655		m["preferred_shard"] = value
39656	}
39657}
39658
39659// Restores a tensor from checkpoint files.
39660//
39661// Reads a tensor stored in one or several files. If there are several files (for
39662// instance because a tensor was saved as slices), `file_pattern` may contain
39663// wildcard symbols (`*` and `?`) in the filename portion only, not in the
39664// directory portion.
39665//
39666// If a `file_pattern` matches several files, `preferred_shard` can be used to hint
39667// in which file the requested tensor is likely to be found. This op will first
39668// open the file at index `preferred_shard` in the list of matching files and try
39669// to restore tensors from that file.  Only if some tensors or tensor slices are
39670// not found in that first file, then the Op opens all the files. Setting
39671// `preferred_shard` to match the value passed as the `shard` input
39672// of a matching `Save` Op may speed up Restore.  This attribute only affects
39673// performance, not correctness.  The default value -1 means files are processed in
39674// order.
39675//
39676// See also `RestoreSlice`.
39677//
39678// Arguments:
39679//
39680//	file_pattern: Must have a single element. The pattern of the files from
39681//
39682// which we read the tensor.
39683//
39684//	tensor_name: Must have a single element. The name of the tensor to be
39685//
39686// restored.
39687//
39688//	dt: The type of the tensor to be restored.
39689//
39690// Returns The restored tensor.
39691func Restore(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, dt tf.DataType, optional ...RestoreAttr) (tensor tf.Output) {
39692	if scope.Err() != nil {
39693		return
39694	}
39695	attrs := map[string]interface{}{"dt": dt}
39696	for _, a := range optional {
39697		a(attrs)
39698	}
39699	opspec := tf.OpSpec{
39700		Type: "Restore",
39701		Input: []tf.Input{
39702			file_pattern, tensor_name,
39703		},
39704		Attrs: attrs,
39705	}
39706	op := scope.AddOperation(opspec)
39707	return op.Output(0)
39708}
39709
39710// RestoreSliceAttr is an optional argument to RestoreSlice.
39711type RestoreSliceAttr func(optionalAttr)
39712
39713// RestoreSlicePreferredShard sets the optional preferred_shard attribute to value.
39714//
39715// value: Index of file to open first if multiple files match
39716// `file_pattern`. See the documentation for `Restore`.
39717// If not specified, defaults to -1
39718func RestoreSlicePreferredShard(value int64) RestoreSliceAttr {
39719	return func(m optionalAttr) {
39720		m["preferred_shard"] = value
39721	}
39722}
39723
39724// Restores a tensor from checkpoint files.
39725//
39726// This is like `Restore` except that restored tensor can be listed as filling
39727// only a slice of a larger tensor.  `shape_and_slice` specifies the shape of the
39728// larger tensor and the slice that the restored tensor covers.
39729//
39730// The `shape_and_slice` input has the same format as the
39731// elements of the `shapes_and_slices` input of the `SaveSlices` op.
39732//
39733// Arguments:
39734//
39735//	file_pattern: Must have a single element. The pattern of the files from
39736//
39737// which we read the tensor.
39738//
39739//	tensor_name: Must have a single element. The name of the tensor to be
39740//
39741// restored.
39742//
39743//	shape_and_slice: Scalar. The shapes and slice specifications to use when
39744//
39745// restoring a tensors.
39746//
39747//	dt: The type of the tensor to be restored.
39748//
39749// Returns The restored tensor.
39750func RestoreSlice(scope *Scope, file_pattern tf.Output, tensor_name tf.Output, shape_and_slice tf.Output, dt tf.DataType, optional ...RestoreSliceAttr) (tensor tf.Output) {
39751	if scope.Err() != nil {
39752		return
39753	}
39754	attrs := map[string]interface{}{"dt": dt}
39755	for _, a := range optional {
39756		a(attrs)
39757	}
39758	opspec := tf.OpSpec{
39759		Type: "RestoreSlice",
39760		Input: []tf.Input{
39761			file_pattern, tensor_name, shape_and_slice,
39762		},
39763		Attrs: attrs,
39764	}
39765	op := scope.AddOperation(opspec)
39766	return op.Output(0)
39767}
39768
39769// Restores tensors from a V2 checkpoint.
39770//
39771// For backward compatibility with the V1 format, this Op currently allows
39772// restoring from a V1 checkpoint as well:
39773//   - This Op first attempts to find the V2 index file pointed to by "prefix", and
39774//     if found proceed to read it as a V2 checkpoint;
39775//   - Otherwise the V1 read path is invoked.
39776//
39777// Relying on this behavior is not recommended, as the ability to fall back to read
39778// V1 might be deprecated and eventually removed.
39779//
39780// By default, restores the named tensors in full.  If the caller wishes to restore
39781// specific slices of stored tensors, "shape_and_slices" should be non-empty
39782// strings and correspondingly well-formed.
39783//
39784// Callers must ensure all the named tensors are indeed stored in the checkpoint.
39785//
39786// Arguments:
39787//
39788//	prefix: Must have a single element.  The prefix of a V2 checkpoint.
39789//	tensor_names: shape {N}.  The names of the tensors to be restored.
39790//	shape_and_slices: shape {N}.  The slice specs of the tensors to be restored.
39791//
39792// Empty strings indicate that they are non-partitioned tensors.
39793//
39794//	dtypes: shape {N}.  The list of expected dtype for the tensors.  Must match
39795//
39796// those stored in the checkpoint.
39797//
39798// Returns shape {N}.  The restored tensors, whose shapes are read from the
39799// checkpoint directly.
39800func RestoreV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, dtypes []tf.DataType) (tensors []tf.Output) {
39801	if scope.Err() != nil {
39802		return
39803	}
39804	attrs := map[string]interface{}{"dtypes": dtypes}
39805	opspec := tf.OpSpec{
39806		Type: "RestoreV2",
39807		Input: []tf.Input{
39808			prefix, tensor_names, shape_and_slices,
39809		},
39810		Attrs: attrs,
39811	}
39812	op := scope.AddOperation(opspec)
39813	if scope.Err() != nil {
39814		return
39815	}
39816	var idx int
39817	var err error
39818	if tensors, idx, err = makeOutputList(op, idx, "tensors"); err != nil {
39819		scope.UpdateErr("RestoreV2", err)
39820		return
39821	}
39822	return tensors
39823}
39824
39825// An op that retrieves optimization parameters from embedding to host memory.
39826//
39827// An op that retrieves optimization parameters from embedding to host memory.
39828// Must be preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
39829// embedding table configuration. For example, this op is used to retrieve updated
39830// parameters before saving a checkpoint.  For Adagrad, auxiliary1 will contain the
39831// accumulators after running this op. For SGD, all of the auxiliary* values will
39832// be empty (0x0 tensors for that table). For FTRL, auxiliary1 will contain the
39833// accumulators and auxiliary2 will contain the linear terms. For ADAM, auxiliary1
39834// will contain the momenta and auxiliary2 will contain the velocities.
39835//
39836// Arguments:
39837//
39838//	NumTables: The number of embedding tables.
39839//	config: An TPUEmbeddingConfiguration proto describing the
39840//
39841// table parameters being loaded, serialized to a string.
39842//
39843//	num_shards: Number of shards into which the embedding tables are divided.
39844//	shard_id: Identifier of shard for this operation.
39845//
39846// Returns:
39847//
39848//	parameters:  A list of tensors, one for each embedding table, containing the
39849//
39850// stored embedding table parameters.
39851//
39852//	auxiliary1: A list of tensors, one for each embedding table, containing the
39853//
39854// first auxiliary optimization parameter stored. Elements are
39855// present in the list, but have zero size, for unused optimization parameters
39856// (based on the algorithm in use for each table).
39857//
39858//	auxiliary2: A list of tensors, one for each embedding table, containing the
39859//
39860// second auxiliary optimization parameter stored. Elements are
39861// present in the list, but have zero size, for unused optimization parameters
39862// (based on the algorithm in use for each table).
39863//
39864//	auxiliary3: A list of tensors, one for each embedding table, containing the
39865//
39866// third auxiliary optimization parameter stored. Elements are
39867// present in the list, but have zero size, for unused optimization parameters
39868// (based on the algorithm in use for each table).
39869//
39870//	auxiliary4: A list of tensors, one for each embedding table, containing the
39871//
39872// fourth auxiliary optimization parameter stored. Elements are
39873// present in the list, but have zero size, for unused optimization parameters
39874// (based on the algorithm in use for each table).
39875//
39876//	auxiliary5: A list of tensors, one for each embedding table, containing the
39877//
39878// fifth auxiliary optimization parameter stored. Elements are
39879// present in the list, but have zero size, for unused optimization parameters
39880// (based on the algorithm in use for each table).
39881//
39882//	auxiliary6: A list of tensors, one for each embedding table, containing the
39883//
39884// six auxiliary optimization parameter stored. Elements are
39885// present in the list, but have zero size, for unused optimization parameters
39886// (based on the algorithm in use for each table).
39887//
39888//	auxiliary7: A list of tensors, one for each embedding table, containing the
39889//
39890// seventh auxiliary optimization parameter stored. Elements are
39891// present in the list, but have zero size, for unused optimization parameters
39892// (based on the algorithm in use for each table).
39893func RetrieveAllTPUEmbeddingParameters(scope *Scope, NumTables int64, config string, num_shards int64, shard_id int64) (parameters []tf.Output, auxiliary1 []tf.Output, auxiliary2 []tf.Output, auxiliary3 []tf.Output, auxiliary4 []tf.Output, auxiliary5 []tf.Output, auxiliary6 []tf.Output, auxiliary7 []tf.Output) {
39894	if scope.Err() != nil {
39895		return
39896	}
39897	attrs := map[string]interface{}{"NumTables": NumTables, "config": config, "num_shards": num_shards, "shard_id": shard_id}
39898	opspec := tf.OpSpec{
39899		Type: "RetrieveAllTPUEmbeddingParameters",
39900
39901		Attrs: attrs,
39902	}
39903	op := scope.AddOperation(opspec)
39904	if scope.Err() != nil {
39905		return
39906	}
39907	var idx int
39908	var err error
39909	if parameters, idx, err = makeOutputList(op, idx, "parameters"); err != nil {
39910		scope.UpdateErr("RetrieveAllTPUEmbeddingParameters", err)
39911		return
39912	}
39913	if auxiliary1, idx, err = makeOutputList(op, idx, "auxiliary1"); err != nil {
39914		scope.UpdateErr("RetrieveAllTPUEmbeddingParameters", err)
39915		return
39916	}
39917	if auxiliary2, idx, err = makeOutputList(op, idx, "auxiliary2"); err != nil {
39918		scope.UpdateErr("RetrieveAllTPUEmbeddingParameters", err)
39919		return
39920	}
39921	if auxiliary3, idx, err = makeOutputList(op, idx, "auxiliary3"); err != nil {
39922		scope.UpdateErr("RetrieveAllTPUEmbeddingParameters", err)
39923		return
39924	}
39925	if auxiliary4, idx, err = makeOutputList(op, idx, "auxiliary4"); err != nil {
39926		scope.UpdateErr("RetrieveAllTPUEmbeddingParameters", err)
39927		return
39928	}
39929	if auxiliary5, idx, err = makeOutputList(op, idx, "auxiliary5"); err != nil {
39930		scope.UpdateErr("RetrieveAllTPUEmbeddingParameters", err)
39931		return
39932	}
39933	if auxiliary6, idx, err = makeOutputList(op, idx, "auxiliary6"); err != nil {
39934		scope.UpdateErr("RetrieveAllTPUEmbeddingParameters", err)
39935		return
39936	}
39937	if auxiliary7, idx, err = makeOutputList(op, idx, "auxiliary7"); err != nil {
39938		scope.UpdateErr("RetrieveAllTPUEmbeddingParameters", err)
39939		return
39940	}
39941	return parameters, auxiliary1, auxiliary2, auxiliary3, auxiliary4, auxiliary5, auxiliary6, auxiliary7
39942}
39943
39944// RetrieveTPUEmbeddingADAMParametersAttr is an optional argument to RetrieveTPUEmbeddingADAMParameters.
39945type RetrieveTPUEmbeddingADAMParametersAttr func(optionalAttr)
39946
39947// RetrieveTPUEmbeddingADAMParametersTableId sets the optional table_id attribute to value.
39948// If not specified, defaults to -1
39949func RetrieveTPUEmbeddingADAMParametersTableId(value int64) RetrieveTPUEmbeddingADAMParametersAttr {
39950	return func(m optionalAttr) {
39951		m["table_id"] = value
39952	}
39953}
39954
39955// RetrieveTPUEmbeddingADAMParametersTableName sets the optional table_name attribute to value.
39956// If not specified, defaults to ""
39957func RetrieveTPUEmbeddingADAMParametersTableName(value string) RetrieveTPUEmbeddingADAMParametersAttr {
39958	return func(m optionalAttr) {
39959		m["table_name"] = value
39960	}
39961}
39962
39963// RetrieveTPUEmbeddingADAMParametersConfig sets the optional config attribute to value.
39964// If not specified, defaults to ""
39965func RetrieveTPUEmbeddingADAMParametersConfig(value string) RetrieveTPUEmbeddingADAMParametersAttr {
39966	return func(m optionalAttr) {
39967		m["config"] = value
39968	}
39969}
39970
39971// Retrieve ADAM embedding parameters.
39972//
39973// An op that retrieves optimization parameters from embedding to host
39974// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
39975// the correct embedding table configuration. For example, this op is
39976// used to retrieve updated parameters before saving a checkpoint.
39977//
39978// Returns:
39979//
39980//	parameters: Parameter parameters updated by the ADAM optimization algorithm.
39981//	momenta: Parameter momenta updated by the ADAM optimization algorithm.
39982//	velocities: Parameter velocities updated by the ADAM optimization algorithm.
39983func RetrieveTPUEmbeddingADAMParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingADAMParametersAttr) (parameters tf.Output, momenta tf.Output, velocities tf.Output) {
39984	if scope.Err() != nil {
39985		return
39986	}
39987	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
39988	for _, a := range optional {
39989		a(attrs)
39990	}
39991	opspec := tf.OpSpec{
39992		Type: "RetrieveTPUEmbeddingADAMParameters",
39993
39994		Attrs: attrs,
39995	}
39996	op := scope.AddOperation(opspec)
39997	return op.Output(0), op.Output(1), op.Output(2)
39998}
39999
40000// RetrieveTPUEmbeddingAdadeltaParametersAttr is an optional argument to RetrieveTPUEmbeddingAdadeltaParameters.
40001type RetrieveTPUEmbeddingAdadeltaParametersAttr func(optionalAttr)
40002
40003// RetrieveTPUEmbeddingAdadeltaParametersTableId sets the optional table_id attribute to value.
40004// If not specified, defaults to -1
40005func RetrieveTPUEmbeddingAdadeltaParametersTableId(value int64) RetrieveTPUEmbeddingAdadeltaParametersAttr {
40006	return func(m optionalAttr) {
40007		m["table_id"] = value
40008	}
40009}
40010
40011// RetrieveTPUEmbeddingAdadeltaParametersTableName sets the optional table_name attribute to value.
40012// If not specified, defaults to ""
40013func RetrieveTPUEmbeddingAdadeltaParametersTableName(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr {
40014	return func(m optionalAttr) {
40015		m["table_name"] = value
40016	}
40017}
40018
40019// RetrieveTPUEmbeddingAdadeltaParametersConfig sets the optional config attribute to value.
40020// If not specified, defaults to ""
40021func RetrieveTPUEmbeddingAdadeltaParametersConfig(value string) RetrieveTPUEmbeddingAdadeltaParametersAttr {
40022	return func(m optionalAttr) {
40023		m["config"] = value
40024	}
40025}
40026
40027// Retrieve Adadelta embedding parameters.
40028//
40029// An op that retrieves optimization parameters from embedding to host
40030// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40031// the correct embedding table configuration. For example, this op is
40032// used to retrieve updated parameters before saving a checkpoint.
40033//
40034// Returns:
40035//
40036//	parameters: Parameter parameters updated by the Adadelta optimization algorithm.
40037//	accumulators: Parameter accumulators updated by the Adadelta optimization algorithm.
40038//	updates: Parameter updates updated by the Adadelta optimization algorithm.
40039func RetrieveTPUEmbeddingAdadeltaParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdadeltaParametersAttr) (parameters tf.Output, accumulators tf.Output, updates tf.Output) {
40040	if scope.Err() != nil {
40041		return
40042	}
40043	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40044	for _, a := range optional {
40045		a(attrs)
40046	}
40047	opspec := tf.OpSpec{
40048		Type: "RetrieveTPUEmbeddingAdadeltaParameters",
40049
40050		Attrs: attrs,
40051	}
40052	op := scope.AddOperation(opspec)
40053	return op.Output(0), op.Output(1), op.Output(2)
40054}
40055
40056// RetrieveTPUEmbeddingAdagradMomentumParametersAttr is an optional argument to RetrieveTPUEmbeddingAdagradMomentumParameters.
40057type RetrieveTPUEmbeddingAdagradMomentumParametersAttr func(optionalAttr)
40058
40059// RetrieveTPUEmbeddingAdagradMomentumParametersTableId sets the optional table_id attribute to value.
40060// If not specified, defaults to -1
40061func RetrieveTPUEmbeddingAdagradMomentumParametersTableId(value int64) RetrieveTPUEmbeddingAdagradMomentumParametersAttr {
40062	return func(m optionalAttr) {
40063		m["table_id"] = value
40064	}
40065}
40066
40067// RetrieveTPUEmbeddingAdagradMomentumParametersTableName sets the optional table_name attribute to value.
40068// If not specified, defaults to ""
40069func RetrieveTPUEmbeddingAdagradMomentumParametersTableName(value string) RetrieveTPUEmbeddingAdagradMomentumParametersAttr {
40070	return func(m optionalAttr) {
40071		m["table_name"] = value
40072	}
40073}
40074
40075// RetrieveTPUEmbeddingAdagradMomentumParametersConfig sets the optional config attribute to value.
40076// If not specified, defaults to ""
40077func RetrieveTPUEmbeddingAdagradMomentumParametersConfig(value string) RetrieveTPUEmbeddingAdagradMomentumParametersAttr {
40078	return func(m optionalAttr) {
40079		m["config"] = value
40080	}
40081}
40082
40083// Retrieve Adagrad Momentum embedding parameters.
40084//
40085// An op that retrieves optimization parameters from embedding to host
40086// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40087// the correct embedding table configuration. For example, this op is
40088// used to retrieve updated parameters before saving a checkpoint.
40089//
40090// Returns:
40091//
40092//	parameters: Parameter parameters updated by the Adagrad Momentum optimization algorithm.
40093//	accumulators: Parameter accumulators updated by the Adagrad Momentum optimization algorithm.
40094//	momenta: Parameter momenta updated by the Adagrad Momentum optimization algorithm.
40095func RetrieveTPUEmbeddingAdagradMomentumParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdagradMomentumParametersAttr) (parameters tf.Output, accumulators tf.Output, momenta tf.Output) {
40096	if scope.Err() != nil {
40097		return
40098	}
40099	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40100	for _, a := range optional {
40101		a(attrs)
40102	}
40103	opspec := tf.OpSpec{
40104		Type: "RetrieveTPUEmbeddingAdagradMomentumParameters",
40105
40106		Attrs: attrs,
40107	}
40108	op := scope.AddOperation(opspec)
40109	return op.Output(0), op.Output(1), op.Output(2)
40110}
40111
40112// RetrieveTPUEmbeddingAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingAdagradParameters.
40113type RetrieveTPUEmbeddingAdagradParametersAttr func(optionalAttr)
40114
40115// RetrieveTPUEmbeddingAdagradParametersTableId sets the optional table_id attribute to value.
40116// If not specified, defaults to -1
40117func RetrieveTPUEmbeddingAdagradParametersTableId(value int64) RetrieveTPUEmbeddingAdagradParametersAttr {
40118	return func(m optionalAttr) {
40119		m["table_id"] = value
40120	}
40121}
40122
40123// RetrieveTPUEmbeddingAdagradParametersTableName sets the optional table_name attribute to value.
40124// If not specified, defaults to ""
40125func RetrieveTPUEmbeddingAdagradParametersTableName(value string) RetrieveTPUEmbeddingAdagradParametersAttr {
40126	return func(m optionalAttr) {
40127		m["table_name"] = value
40128	}
40129}
40130
40131// RetrieveTPUEmbeddingAdagradParametersConfig sets the optional config attribute to value.
40132// If not specified, defaults to ""
40133func RetrieveTPUEmbeddingAdagradParametersConfig(value string) RetrieveTPUEmbeddingAdagradParametersAttr {
40134	return func(m optionalAttr) {
40135		m["config"] = value
40136	}
40137}
40138
40139// Retrieve Adagrad embedding parameters.
40140//
40141// An op that retrieves optimization parameters from embedding to host
40142// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40143// the correct embedding table configuration. For example, this op is
40144// used to retrieve updated parameters before saving a checkpoint.
40145//
40146// Returns:
40147//
40148//	parameters: Parameter parameters updated by the Adagrad optimization algorithm.
40149//	accumulators: Parameter accumulators updated by the Adagrad optimization algorithm.
40150func RetrieveTPUEmbeddingAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output) {
40151	if scope.Err() != nil {
40152		return
40153	}
40154	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40155	for _, a := range optional {
40156		a(attrs)
40157	}
40158	opspec := tf.OpSpec{
40159		Type: "RetrieveTPUEmbeddingAdagradParameters",
40160
40161		Attrs: attrs,
40162	}
40163	op := scope.AddOperation(opspec)
40164	return op.Output(0), op.Output(1)
40165}
40166
40167// RetrieveTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingCenteredRMSPropParameters.
40168type RetrieveTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
40169
40170// RetrieveTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
40171// If not specified, defaults to -1
40172func RetrieveTPUEmbeddingCenteredRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
40173	return func(m optionalAttr) {
40174		m["table_id"] = value
40175	}
40176}
40177
40178// RetrieveTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
40179// If not specified, defaults to ""
40180func RetrieveTPUEmbeddingCenteredRMSPropParametersTableName(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
40181	return func(m optionalAttr) {
40182		m["table_name"] = value
40183	}
40184}
40185
40186// RetrieveTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value.
40187// If not specified, defaults to ""
40188func RetrieveTPUEmbeddingCenteredRMSPropParametersConfig(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
40189	return func(m optionalAttr) {
40190		m["config"] = value
40191	}
40192}
40193
40194// Retrieve centered RMSProp embedding parameters.
40195//
40196// An op that retrieves optimization parameters from embedding to host
40197// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40198// the correct embedding table configuration. For example, this op is
40199// used to retrieve updated parameters before saving a checkpoint.
40200//
40201// Returns:
40202//
40203//	parameters: Parameter parameters updated by the centered RMSProp optimization algorithm.
40204//	ms: Parameter ms updated by the centered RMSProp optimization algorithm.
40205//	mom: Parameter mom updated by the centered RMSProp optimization algorithm.
40206//	mg: Parameter mg updated by the centered RMSProp optimization algorithm.
40207func RetrieveTPUEmbeddingCenteredRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingCenteredRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output) {
40208	if scope.Err() != nil {
40209		return
40210	}
40211	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40212	for _, a := range optional {
40213		a(attrs)
40214	}
40215	opspec := tf.OpSpec{
40216		Type: "RetrieveTPUEmbeddingCenteredRMSPropParameters",
40217
40218		Attrs: attrs,
40219	}
40220	op := scope.AddOperation(opspec)
40221	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
40222}
40223
40224// RetrieveTPUEmbeddingFTRLParametersAttr is an optional argument to RetrieveTPUEmbeddingFTRLParameters.
40225type RetrieveTPUEmbeddingFTRLParametersAttr func(optionalAttr)
40226
40227// RetrieveTPUEmbeddingFTRLParametersTableId sets the optional table_id attribute to value.
40228// If not specified, defaults to -1
40229func RetrieveTPUEmbeddingFTRLParametersTableId(value int64) RetrieveTPUEmbeddingFTRLParametersAttr {
40230	return func(m optionalAttr) {
40231		m["table_id"] = value
40232	}
40233}
40234
40235// RetrieveTPUEmbeddingFTRLParametersTableName sets the optional table_name attribute to value.
40236// If not specified, defaults to ""
40237func RetrieveTPUEmbeddingFTRLParametersTableName(value string) RetrieveTPUEmbeddingFTRLParametersAttr {
40238	return func(m optionalAttr) {
40239		m["table_name"] = value
40240	}
40241}
40242
40243// RetrieveTPUEmbeddingFTRLParametersConfig sets the optional config attribute to value.
40244// If not specified, defaults to ""
40245func RetrieveTPUEmbeddingFTRLParametersConfig(value string) RetrieveTPUEmbeddingFTRLParametersAttr {
40246	return func(m optionalAttr) {
40247		m["config"] = value
40248	}
40249}
40250
40251// Retrieve FTRL embedding parameters.
40252//
40253// An op that retrieves optimization parameters from embedding to host
40254// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40255// the correct embedding table configuration. For example, this op is
40256// used to retrieve updated parameters before saving a checkpoint.
40257//
40258// Returns:
40259//
40260//	parameters: Parameter parameters updated by the FTRL optimization algorithm.
40261//	accumulators: Parameter accumulators updated by the FTRL optimization algorithm.
40262//	linears: Parameter linears updated by the FTRL optimization algorithm.
40263func RetrieveTPUEmbeddingFTRLParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFTRLParametersAttr) (parameters tf.Output, accumulators tf.Output, linears tf.Output) {
40264	if scope.Err() != nil {
40265		return
40266	}
40267	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40268	for _, a := range optional {
40269		a(attrs)
40270	}
40271	opspec := tf.OpSpec{
40272		Type: "RetrieveTPUEmbeddingFTRLParameters",
40273
40274		Attrs: attrs,
40275	}
40276	op := scope.AddOperation(opspec)
40277	return op.Output(0), op.Output(1), op.Output(2)
40278}
40279
40280// RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr is an optional argument to RetrieveTPUEmbeddingFrequencyEstimatorParameters.
40281type RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr func(optionalAttr)
40282
40283// RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId sets the optional table_id attribute to value.
40284// If not specified, defaults to -1
40285func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableId(value int64) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr {
40286	return func(m optionalAttr) {
40287		m["table_id"] = value
40288	}
40289}
40290
40291// RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName sets the optional table_name attribute to value.
40292// If not specified, defaults to ""
40293func RetrieveTPUEmbeddingFrequencyEstimatorParametersTableName(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr {
40294	return func(m optionalAttr) {
40295		m["table_name"] = value
40296	}
40297}
40298
40299// RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig sets the optional config attribute to value.
40300// If not specified, defaults to ""
40301func RetrieveTPUEmbeddingFrequencyEstimatorParametersConfig(value string) RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr {
40302	return func(m optionalAttr) {
40303		m["config"] = value
40304	}
40305}
40306
40307// Retrieve frequency estimator embedding parameters.
40308//
40309// An op that retrieves optimization parameters from embedding to host
40310// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40311// the correct embedding table configuration. For example, this op is
40312// used to retrieve updated parameters before saving a checkpoint.
40313//
40314// Returns:
40315//
40316//	parameters: Parameter parameters updated by the frequency estimator optimization algorithm.
40317//	last_hit_step: Parameter last_hit_step updated by the frequency estimator optimization
40318//
40319// algorithm.
40320func RetrieveTPUEmbeddingFrequencyEstimatorParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingFrequencyEstimatorParametersAttr) (parameters tf.Output, last_hit_step tf.Output) {
40321	if scope.Err() != nil {
40322		return
40323	}
40324	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40325	for _, a := range optional {
40326		a(attrs)
40327	}
40328	opspec := tf.OpSpec{
40329		Type: "RetrieveTPUEmbeddingFrequencyEstimatorParameters",
40330
40331		Attrs: attrs,
40332	}
40333	op := scope.AddOperation(opspec)
40334	return op.Output(0), op.Output(1)
40335}
40336
40337// RetrieveTPUEmbeddingMDLAdagradLightParametersAttr is an optional argument to RetrieveTPUEmbeddingMDLAdagradLightParameters.
40338type RetrieveTPUEmbeddingMDLAdagradLightParametersAttr func(optionalAttr)
40339
40340// RetrieveTPUEmbeddingMDLAdagradLightParametersTableId sets the optional table_id attribute to value.
40341// If not specified, defaults to -1
40342func RetrieveTPUEmbeddingMDLAdagradLightParametersTableId(value int64) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
40343	return func(m optionalAttr) {
40344		m["table_id"] = value
40345	}
40346}
40347
40348// RetrieveTPUEmbeddingMDLAdagradLightParametersTableName sets the optional table_name attribute to value.
40349// If not specified, defaults to ""
40350func RetrieveTPUEmbeddingMDLAdagradLightParametersTableName(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
40351	return func(m optionalAttr) {
40352		m["table_name"] = value
40353	}
40354}
40355
40356// RetrieveTPUEmbeddingMDLAdagradLightParametersConfig sets the optional config attribute to value.
40357// If not specified, defaults to ""
40358func RetrieveTPUEmbeddingMDLAdagradLightParametersConfig(value string) RetrieveTPUEmbeddingMDLAdagradLightParametersAttr {
40359	return func(m optionalAttr) {
40360		m["config"] = value
40361	}
40362}
40363
40364// Retrieve MDL Adagrad Light embedding parameters.
40365//
40366// An op that retrieves optimization parameters from embedding to host
40367// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40368// the correct embedding table configuration. For example, this op is
40369// used to retrieve updated parameters before saving a checkpoint.
40370//
40371// Returns:
40372//
40373//	parameters: Parameter parameters updated by the MDL Adagrad Light optimization algorithm.
40374//	accumulators: Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.
40375//	weights: Parameter weights updated by the MDL Adagrad Light optimization algorithm.
40376//	benefits: Parameter benefits updated by the MDL Adagrad Light optimization algorithm.
40377func RetrieveTPUEmbeddingMDLAdagradLightParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMDLAdagradLightParametersAttr) (parameters tf.Output, accumulators tf.Output, weights tf.Output, benefits tf.Output) {
40378	if scope.Err() != nil {
40379		return
40380	}
40381	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40382	for _, a := range optional {
40383		a(attrs)
40384	}
40385	opspec := tf.OpSpec{
40386		Type: "RetrieveTPUEmbeddingMDLAdagradLightParameters",
40387
40388		Attrs: attrs,
40389	}
40390	op := scope.AddOperation(opspec)
40391	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
40392}
40393
40394// RetrieveTPUEmbeddingMomentumParametersAttr is an optional argument to RetrieveTPUEmbeddingMomentumParameters.
40395type RetrieveTPUEmbeddingMomentumParametersAttr func(optionalAttr)
40396
40397// RetrieveTPUEmbeddingMomentumParametersTableId sets the optional table_id attribute to value.
40398// If not specified, defaults to -1
40399func RetrieveTPUEmbeddingMomentumParametersTableId(value int64) RetrieveTPUEmbeddingMomentumParametersAttr {
40400	return func(m optionalAttr) {
40401		m["table_id"] = value
40402	}
40403}
40404
40405// RetrieveTPUEmbeddingMomentumParametersTableName sets the optional table_name attribute to value.
40406// If not specified, defaults to ""
40407func RetrieveTPUEmbeddingMomentumParametersTableName(value string) RetrieveTPUEmbeddingMomentumParametersAttr {
40408	return func(m optionalAttr) {
40409		m["table_name"] = value
40410	}
40411}
40412
40413// RetrieveTPUEmbeddingMomentumParametersConfig sets the optional config attribute to value.
40414// If not specified, defaults to ""
40415func RetrieveTPUEmbeddingMomentumParametersConfig(value string) RetrieveTPUEmbeddingMomentumParametersAttr {
40416	return func(m optionalAttr) {
40417		m["config"] = value
40418	}
40419}
40420
40421// Retrieve Momentum embedding parameters.
40422//
40423// An op that retrieves optimization parameters from embedding to host
40424// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40425// the correct embedding table configuration. For example, this op is
40426// used to retrieve updated parameters before saving a checkpoint.
40427//
40428// Returns:
40429//
40430//	parameters: Parameter parameters updated by the Momentum optimization algorithm.
40431//	momenta: Parameter momenta updated by the Momentum optimization algorithm.
40432func RetrieveTPUEmbeddingMomentumParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingMomentumParametersAttr) (parameters tf.Output, momenta tf.Output) {
40433	if scope.Err() != nil {
40434		return
40435	}
40436	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40437	for _, a := range optional {
40438		a(attrs)
40439	}
40440	opspec := tf.OpSpec{
40441		Type: "RetrieveTPUEmbeddingMomentumParameters",
40442
40443		Attrs: attrs,
40444	}
40445	op := scope.AddOperation(opspec)
40446	return op.Output(0), op.Output(1)
40447}
40448
40449// RetrieveTPUEmbeddingProximalAdagradParametersAttr is an optional argument to RetrieveTPUEmbeddingProximalAdagradParameters.
40450type RetrieveTPUEmbeddingProximalAdagradParametersAttr func(optionalAttr)
40451
40452// RetrieveTPUEmbeddingProximalAdagradParametersTableId sets the optional table_id attribute to value.
40453// If not specified, defaults to -1
40454func RetrieveTPUEmbeddingProximalAdagradParametersTableId(value int64) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
40455	return func(m optionalAttr) {
40456		m["table_id"] = value
40457	}
40458}
40459
40460// RetrieveTPUEmbeddingProximalAdagradParametersTableName sets the optional table_name attribute to value.
40461// If not specified, defaults to ""
40462func RetrieveTPUEmbeddingProximalAdagradParametersTableName(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
40463	return func(m optionalAttr) {
40464		m["table_name"] = value
40465	}
40466}
40467
40468// RetrieveTPUEmbeddingProximalAdagradParametersConfig sets the optional config attribute to value.
40469// If not specified, defaults to ""
40470func RetrieveTPUEmbeddingProximalAdagradParametersConfig(value string) RetrieveTPUEmbeddingProximalAdagradParametersAttr {
40471	return func(m optionalAttr) {
40472		m["config"] = value
40473	}
40474}
40475
40476// Retrieve proximal Adagrad embedding parameters.
40477//
40478// An op that retrieves optimization parameters from embedding to host
40479// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40480// the correct embedding table configuration. For example, this op is
40481// used to retrieve updated parameters before saving a checkpoint.
40482//
40483// Returns:
40484//
40485//	parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm.
40486//	accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm.
40487func RetrieveTPUEmbeddingProximalAdagradParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingProximalAdagradParametersAttr) (parameters tf.Output, accumulators tf.Output) {
40488	if scope.Err() != nil {
40489		return
40490	}
40491	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40492	for _, a := range optional {
40493		a(attrs)
40494	}
40495	opspec := tf.OpSpec{
40496		Type: "RetrieveTPUEmbeddingProximalAdagradParameters",
40497
40498		Attrs: attrs,
40499	}
40500	op := scope.AddOperation(opspec)
40501	return op.Output(0), op.Output(1)
40502}
40503
40504// RetrieveTPUEmbeddingRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingRMSPropParameters.
40505type RetrieveTPUEmbeddingRMSPropParametersAttr func(optionalAttr)
40506
40507// RetrieveTPUEmbeddingRMSPropParametersTableId sets the optional table_id attribute to value.
40508// If not specified, defaults to -1
40509func RetrieveTPUEmbeddingRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingRMSPropParametersAttr {
40510	return func(m optionalAttr) {
40511		m["table_id"] = value
40512	}
40513}
40514
40515// RetrieveTPUEmbeddingRMSPropParametersTableName sets the optional table_name attribute to value.
40516// If not specified, defaults to ""
40517func RetrieveTPUEmbeddingRMSPropParametersTableName(value string) RetrieveTPUEmbeddingRMSPropParametersAttr {
40518	return func(m optionalAttr) {
40519		m["table_name"] = value
40520	}
40521}
40522
40523// RetrieveTPUEmbeddingRMSPropParametersConfig sets the optional config attribute to value.
40524// If not specified, defaults to ""
40525func RetrieveTPUEmbeddingRMSPropParametersConfig(value string) RetrieveTPUEmbeddingRMSPropParametersAttr {
40526	return func(m optionalAttr) {
40527		m["config"] = value
40528	}
40529}
40530
40531// Retrieve RMSProp embedding parameters.
40532//
40533// An op that retrieves optimization parameters from embedding to host
40534// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40535// the correct embedding table configuration. For example, this op is
40536// used to retrieve updated parameters before saving a checkpoint.
40537//
40538// Returns:
40539//
40540//	parameters: Parameter parameters updated by the RMSProp optimization algorithm.
40541//	ms: Parameter ms updated by the RMSProp optimization algorithm.
40542//	mom: Parameter mom updated by the RMSProp optimization algorithm.
40543func RetrieveTPUEmbeddingRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output) {
40544	if scope.Err() != nil {
40545		return
40546	}
40547	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40548	for _, a := range optional {
40549		a(attrs)
40550	}
40551	opspec := tf.OpSpec{
40552		Type: "RetrieveTPUEmbeddingRMSPropParameters",
40553
40554		Attrs: attrs,
40555	}
40556	op := scope.AddOperation(opspec)
40557	return op.Output(0), op.Output(1), op.Output(2)
40558}
40559
40560// RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr is an optional argument to RetrieveTPUEmbeddingStochasticGradientDescentParameters.
40561type RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr func(optionalAttr)
40562
40563// RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId sets the optional table_id attribute to value.
40564// If not specified, defaults to -1
40565func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableId(value int64) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
40566	return func(m optionalAttr) {
40567		m["table_id"] = value
40568	}
40569}
40570
40571// RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName sets the optional table_name attribute to value.
40572// If not specified, defaults to ""
40573func RetrieveTPUEmbeddingStochasticGradientDescentParametersTableName(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
40574	return func(m optionalAttr) {
40575		m["table_name"] = value
40576	}
40577}
40578
40579// RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig sets the optional config attribute to value.
40580// If not specified, defaults to ""
40581func RetrieveTPUEmbeddingStochasticGradientDescentParametersConfig(value string) RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr {
40582	return func(m optionalAttr) {
40583		m["config"] = value
40584	}
40585}
40586
40587// Retrieve SGD embedding parameters.
40588//
40589// An op that retrieves optimization parameters from embedding to host
40590// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
40591// the correct embedding table configuration. For example, this op is
40592// used to retrieve updated parameters before saving a checkpoint.
40593//
40594// Returns Parameter parameters updated by the stochastic gradient descent optimization algorithm.
40595func RetrieveTPUEmbeddingStochasticGradientDescentParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingStochasticGradientDescentParametersAttr) (parameters tf.Output) {
40596	if scope.Err() != nil {
40597		return
40598	}
40599	attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
40600	for _, a := range optional {
40601		a(attrs)
40602	}
40603	opspec := tf.OpSpec{
40604		Type: "RetrieveTPUEmbeddingStochasticGradientDescentParameters",
40605
40606		Attrs: attrs,
40607	}
40608	op := scope.AddOperation(opspec)
40609	return op.Output(0)
40610}
40611
40612// Reverses specific dimensions of a tensor.
40613//
40614// Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
40615// of `tensor`, this operation reverses each dimension i of `tensor` where
40616// `dims[i]` is `True`.
40617//
40618// `tensor` can have up to 8 dimensions. The number of dimensions
40619// of `tensor` must equal the number of elements in `dims`. In other words:
40620//
40621// `rank(tensor) = size(dims)`
40622//
40623// For example:
40624//
40625// ```
40626// # tensor 't' is [[[[ 0,  1,  2,  3],
40627// #                  [ 4,  5,  6,  7],
40628// #                  [ 8,  9, 10, 11]],
40629// #                 [[12, 13, 14, 15],
40630// #                  [16, 17, 18, 19],
40631// #                  [20, 21, 22, 23]]]]
40632// # tensor 't' shape is [1, 2, 3, 4]
40633//
40634// # 'dims' is [False, False, False, True]
40635// reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
40636//
40637//	 [ 7,  6,  5,  4],
40638//	 [ 11, 10, 9, 8]],
40639//	[[15, 14, 13, 12],
40640//	 [19, 18, 17, 16],
40641//	 [23, 22, 21, 20]]]]
40642//
40643// # 'dims' is [False, True, False, False]
40644// reverse(t, dims) ==> [[[[12, 13, 14, 15],
40645//
40646//	 [16, 17, 18, 19],
40647//	 [20, 21, 22, 23]
40648//	[[ 0,  1,  2,  3],
40649//	 [ 4,  5,  6,  7],
40650//	 [ 8,  9, 10, 11]]]]
40651//
40652// # 'dims' is [False, False, True, False]
40653// reverse(t, dims) ==> [[[[8, 9, 10, 11],
40654//
40655//	 [4, 5, 6, 7],
40656//	 [0, 1, 2, 3]]
40657//	[[20, 21, 22, 23],
40658//	 [16, 17, 18, 19],
40659//	 [12, 13, 14, 15]]]]
40660//
40661// ```
40662//
40663// Arguments:
40664//
40665//	tensor: Up to 8-D.
40666//	dims: 1-D. The dimensions to reverse.
40667//
40668// Returns The same shape as `tensor`.
40669func Reverse(scope *Scope, tensor tf.Output, dims tf.Output) (output tf.Output) {
40670	if scope.Err() != nil {
40671		return
40672	}
40673	opspec := tf.OpSpec{
40674		Type: "Reverse",
40675		Input: []tf.Input{
40676			tensor, dims,
40677		},
40678	}
40679	op := scope.AddOperation(opspec)
40680	return op.Output(0)
40681}
40682
40683// ReverseSequenceAttr is an optional argument to ReverseSequence.
40684type ReverseSequenceAttr func(optionalAttr)
40685
40686// ReverseSequenceBatchDim sets the optional batch_dim attribute to value.
40687//
40688// value: The dimension along which reversal is performed.
40689// If not specified, defaults to 0
40690func ReverseSequenceBatchDim(value int64) ReverseSequenceAttr {
40691	return func(m optionalAttr) {
40692		m["batch_dim"] = value
40693	}
40694}
40695
40696// Reverses variable length slices.
40697//
40698// This op first slices `input` along the dimension `batch_dim`, and for each
40699// slice `i`, reverses the first `seq_lengths[i]` elements along
40700// the dimension `seq_dim`.
40701//
40702// The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
40703// and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
40704//
40705// The output slice `i` along dimension `batch_dim` is then given by input
40706// slice `i`, with the first `seq_lengths[i]` slices along dimension
40707// `seq_dim` reversed.
40708//
40709// For example:
40710//
40711// ```
40712// # Given this:
40713// batch_dim = 0
40714// seq_dim = 1
40715// input.dims = (4, 8, ...)
40716// seq_lengths = [7, 2, 3, 5]
40717//
40718// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
40719// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
40720// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
40721// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
40722// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
40723//
40724// # while entries past seq_lens are copied through:
40725// output[0, 7:, :, ...] = input[0, 7:, :, ...]
40726// output[1, 2:, :, ...] = input[1, 2:, :, ...]
40727// output[2, 3:, :, ...] = input[2, 3:, :, ...]
40728// output[3, 2:, :, ...] = input[3, 2:, :, ...]
40729// ```
40730//
40731// In contrast, if:
40732//
40733// ```
40734// # Given this:
40735// batch_dim = 2
40736// seq_dim = 0
40737// input.dims = (8, ?, 4, ...)
40738// seq_lengths = [7, 2, 3, 5]
40739//
40740// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
40741// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
40742// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
40743// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
40744// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
40745//
40746// # while entries past seq_lens are copied through:
40747// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
40748// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
40749// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
40750// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
40751// ```
40752//
40753// Arguments:
40754//
40755//	input: The input to reverse.
40756//	seq_lengths: 1-D with length `input.dims(batch_dim)` and
40757//
40758// `max(seq_lengths) <= input.dims(seq_dim)`
40759//
40760//	seq_dim: The dimension which is partially reversed.
40761//
40762// Returns The partially reversed input. It has the same shape as `input`.
40763func ReverseSequence(scope *Scope, input tf.Output, seq_lengths tf.Output, seq_dim int64, optional ...ReverseSequenceAttr) (output tf.Output) {
40764	if scope.Err() != nil {
40765		return
40766	}
40767	attrs := map[string]interface{}{"seq_dim": seq_dim}
40768	for _, a := range optional {
40769		a(attrs)
40770	}
40771	opspec := tf.OpSpec{
40772		Type: "ReverseSequence",
40773		Input: []tf.Input{
40774			input, seq_lengths,
40775		},
40776		Attrs: attrs,
40777	}
40778	op := scope.AddOperation(opspec)
40779	return op.Output(0)
40780}
40781
40782// Reverses specific dimensions of a tensor.
40783//
40784// Given a `tensor`, and a `int32` tensor `axis` representing the set of
40785// dimensions of `tensor` to reverse. This operation reverses each dimension
40786// `i` for which there exists `j` s.t. `axis[j] == i`.
40787//
40788// `tensor` can have up to 8 dimensions. The number of dimensions specified
40789// in `axis` may be 0 or more entries. If an index is specified more than
40790// once, a InvalidArgument error is raised.
40791//
40792// For example:
40793//
40794// ```
40795// # tensor 't' is [[[[ 0,  1,  2,  3],
40796// #                  [ 4,  5,  6,  7],
40797// #                  [ 8,  9, 10, 11]],
40798// #                 [[12, 13, 14, 15],
40799// #                  [16, 17, 18, 19],
40800// #                  [20, 21, 22, 23]]]]
40801// # tensor 't' shape is [1, 2, 3, 4]
40802//
40803// # 'dims' is [3] or 'dims' is [-1]
40804// reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
40805//
40806//	 [ 7,  6,  5,  4],
40807//	 [ 11, 10, 9, 8]],
40808//	[[15, 14, 13, 12],
40809//	 [19, 18, 17, 16],
40810//	 [23, 22, 21, 20]]]]
40811//
40812// # 'dims' is '[1]' (or 'dims' is '[-3]')
40813// reverse(t, dims) ==> [[[[12, 13, 14, 15],
40814//
40815//	 [16, 17, 18, 19],
40816//	 [20, 21, 22, 23]
40817//	[[ 0,  1,  2,  3],
40818//	 [ 4,  5,  6,  7],
40819//	 [ 8,  9, 10, 11]]]]
40820//
40821// # 'dims' is '[2]' (or 'dims' is '[-2]')
40822// reverse(t, dims) ==> [[[[8, 9, 10, 11],
40823//
40824//	 [4, 5, 6, 7],
40825//	 [0, 1, 2, 3]]
40826//	[[20, 21, 22, 23],
40827//	 [16, 17, 18, 19],
40828//	 [12, 13, 14, 15]]]]
40829//
40830// ```
40831//
40832// Arguments:
40833//
40834//	tensor: Up to 8-D.
40835//	axis: 1-D. The indices of the dimensions to reverse. Must be in the range
40836//
40837// `[-rank(tensor), rank(tensor))`.
40838//
40839// Returns The same shape as `tensor`.
40840func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output) {
40841	if scope.Err() != nil {
40842		return
40843	}
40844	opspec := tf.OpSpec{
40845		Type: "ReverseV2",
40846		Input: []tf.Input{
40847			tensor, axis,
40848		},
40849	}
40850	op := scope.AddOperation(opspec)
40851	return op.Output(0)
40852}
40853
40854// Elementwise computes the bitwise right-shift of `x` and `y`.
40855//
40856// Performs a logical shift for unsigned integer types, and an arithmetic shift
40857// for signed integer types.
40858//
40859// If `y` is negative, or greater than or equal to than the width of `x` in bits
40860// the result is implementation defined.
40861//
40862// Example:
40863//
40864// ```python
40865// import tensorflow as tf
40866// from tensorflow.python.ops import bitwise_ops
40867// import numpy as np
40868// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
40869//
40870// for dtype in dtype_list:
40871//
40872//	lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
40873//	rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
40874//
40875//	right_shift_result = bitwise_ops.right_shift(lhs, rhs)
40876//
40877//	print(right_shift_result)
40878//
40879// # This will print:
40880// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8)
40881// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16)
40882// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32)
40883// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)
40884//
40885// lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
40886// rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
40887// bitwise_ops.right_shift(lhs, rhs)
40888// # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
40889// ```
40890func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
40891	if scope.Err() != nil {
40892		return
40893	}
40894	opspec := tf.OpSpec{
40895		Type: "RightShift",
40896		Input: []tf.Input{
40897			x, y,
40898		},
40899	}
40900	op := scope.AddOperation(opspec)
40901	return op.Output(0)
40902}
40903
40904// Returns element-wise integer closest to x.
40905//
40906// If the result is midway between two representable values,
40907// the even representable is chosen.
40908// For example:
40909//
40910// ```
40911// rint(-1.5) ==> -2.0
40912// rint(0.5000001) ==> 1.0
40913// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
40914// ```
40915func Rint(scope *Scope, x tf.Output) (y tf.Output) {
40916	if scope.Err() != nil {
40917		return
40918	}
40919	opspec := tf.OpSpec{
40920		Type: "Rint",
40921		Input: []tf.Input{
40922			x,
40923		},
40924	}
40925	op := scope.AddOperation(opspec)
40926	return op.Output(0)
40927}
40928
40929// Returns x + y element-wise.
40930//
40931// *NOTE*: `RiscAdd` does not supports broadcasting.
40932//
40933// Given two input tensors, the `tf.risc_add` operation computes the sum for every element in the tensor.
40934//
40935// Both input and output have a range `(-inf, inf)`.
40936func RiscAdd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
40937	if scope.Err() != nil {
40938		return
40939	}
40940	opspec := tf.OpSpec{
40941		Type: "RiscAdd",
40942		Input: []tf.Input{
40943			x, y,
40944		},
40945	}
40946	op := scope.AddOperation(opspec)
40947	return op.Output(0)
40948}
40949
40950// Returns max(x, y) element-wise.
40951//
40952// *NOTE*: `RiscMax` does not supports broadcasting.
40953//
40954// Given two input tensors, the `tf.risc_max` operation computes the maximum for every element in the tensor.
40955func RiscMax(scope *Scope, x tf.Output, y tf.Output) (max tf.Output) {
40956	if scope.Err() != nil {
40957		return
40958	}
40959	opspec := tf.OpSpec{
40960		Type: "RiscMax",
40961		Input: []tf.Input{
40962			x, y,
40963		},
40964	}
40965	op := scope.AddOperation(opspec)
40966	return op.Output(0)
40967}
40968
40969// Advance the counter of a counter-based RNG.
40970//
40971// The state of the RNG after
40972// `rng_read_and_skip(n)` will be the same as that after `uniform([n])`
40973// (or any other distribution). The actual increment added to the
40974// counter is an unspecified implementation choice.
40975//
40976// Arguments:
40977//
40978//	resource: The handle of the resource variable that stores the state of the RNG.
40979//	alg: The RNG algorithm.
40980//	delta: The amount of advancement.
40981//
40982// Returns The old value of the resource variable, before incrementing. Since state size is algorithm-dependent, this output will be right-padded with zeros to reach shape int64[3] (the current maximal state size among algorithms).
40983func RngReadAndSkip(scope *Scope, resource tf.Output, alg tf.Output, delta tf.Output) (value tf.Output) {
40984	if scope.Err() != nil {
40985		return
40986	}
40987	opspec := tf.OpSpec{
40988		Type: "RngReadAndSkip",
40989		Input: []tf.Input{
40990			resource, alg, delta,
40991		},
40992	}
40993	op := scope.AddOperation(opspec)
40994	return op.Output(0)
40995}
40996
40997// Advance the counter of a counter-based RNG.
40998//
40999// The state of the RNG after
41000// `rng_skip(n)` will be the same as that after `stateful_uniform([n])`
41001// (or any other distribution). The actual increment added to the
41002// counter is an unspecified implementation detail.
41003//
41004// Arguments:
41005//
41006//	resource: The handle of the resource variable that stores the state of the RNG.
41007//	algorithm: The RNG algorithm.
41008//	delta: The amount of advancement.
41009//
41010// Returns the created operation.
41011func RngSkip(scope *Scope, resource tf.Output, algorithm tf.Output, delta tf.Output) (o *tf.Operation) {
41012	if scope.Err() != nil {
41013		return
41014	}
41015	opspec := tf.OpSpec{
41016		Type: "RngSkip",
41017		Input: []tf.Input{
41018			resource, algorithm, delta,
41019		},
41020	}
41021	return scope.AddOperation(opspec)
41022}
41023
41024// Rolls the elements of a tensor along an axis.
41025//
41026// The elements are shifted positively (towards larger indices) by the offset of
41027// `shift` along the dimension of `axis`. Negative `shift` values will shift
41028// elements in the opposite direction. Elements that roll passed the last position
41029// will wrap around to the first and vice versa. Multiple shifts along multiple
41030// axes may be specified.
41031//
41032// For example:
41033//
41034// ```
41035// # 't' is [0, 1, 2, 3, 4]
41036// roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
41037//
41038// # shifting along multiple dimensions
41039// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
41040// roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
41041//
41042// # shifting along the same axis multiple times
41043// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
41044// roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
41045// ```
41046//
41047// Arguments:
41048//
41049//	shift: Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
41050//
41051// elements are shifted positively (towards larger indices) along the dimension
41052// specified by `axis[i]`. Negative shifts will roll the elements in the opposite
41053// direction.
41054//
41055//	axis: Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
41056//
41057// `shift[i]` should occur. If the same axis is referenced more than once, the
41058// total shift for that axis will be the sum of all the shifts that belong to that
41059// axis.
41060//
41061// Returns Has the same shape and size as the input. The elements are shifted
41062// positively (towards larger indices) by the offsets of `shift` along the
41063// dimensions of `axis`.
41064func Roll(scope *Scope, input tf.Output, shift tf.Output, axis tf.Output) (output tf.Output) {
41065	if scope.Err() != nil {
41066		return
41067	}
41068	opspec := tf.OpSpec{
41069		Type: "Roll",
41070		Input: []tf.Input{
41071			input, shift, axis,
41072		},
41073	}
41074	op := scope.AddOperation(opspec)
41075	return op.Output(0)
41076}
41077
41078// Rounds the values of a tensor to the nearest integer, element-wise.
41079//
41080// Rounds half to even.  Also known as bankers rounding. If you want to round
41081// according to the current system rounding mode use std::cint.
41082func Round(scope *Scope, x tf.Output) (y tf.Output) {
41083	if scope.Err() != nil {
41084		return
41085	}
41086	opspec := tf.OpSpec{
41087		Type: "Round",
41088		Input: []tf.Input{
41089			x,
41090		},
41091	}
41092	op := scope.AddOperation(opspec)
41093	return op.Output(0)
41094}
41095
41096// Computes reciprocal of square root of x element-wise.
41097//
41098// I.e., \\(y = 1 / \sqrt{x}\\).
41099func Rsqrt(scope *Scope, x tf.Output) (y tf.Output) {
41100	if scope.Err() != nil {
41101		return
41102	}
41103	opspec := tf.OpSpec{
41104		Type: "Rsqrt",
41105		Input: []tf.Input{
41106			x,
41107		},
41108	}
41109	op := scope.AddOperation(opspec)
41110	return op.Output(0)
41111}
41112
41113// Computes the gradient for the rsqrt of `x` wrt its input.
41114//
41115// Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
41116// is the corresponding input gradient.
41117func RsqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
41118	if scope.Err() != nil {
41119		return
41120	}
41121	opspec := tf.OpSpec{
41122		Type: "RsqrtGrad",
41123		Input: []tf.Input{
41124			y, dy,
41125		},
41126	}
41127	op := scope.AddOperation(opspec)
41128	return op.Output(0)
41129}
41130
41131// SampleDistortedBoundingBoxAttr is an optional argument to SampleDistortedBoundingBox.
41132type SampleDistortedBoundingBoxAttr func(optionalAttr)
41133
41134// SampleDistortedBoundingBoxSeed sets the optional seed attribute to value.
41135//
41136// value: If either `seed` or `seed2` are set to non-zero, the random number
41137// generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
41138// seed.
41139// If not specified, defaults to 0
41140func SampleDistortedBoundingBoxSeed(value int64) SampleDistortedBoundingBoxAttr {
41141	return func(m optionalAttr) {
41142		m["seed"] = value
41143	}
41144}
41145
41146// SampleDistortedBoundingBoxSeed2 sets the optional seed2 attribute to value.
41147//
41148// value: A second seed to avoid seed collision.
41149// If not specified, defaults to 0
41150func SampleDistortedBoundingBoxSeed2(value int64) SampleDistortedBoundingBoxAttr {
41151	return func(m optionalAttr) {
41152		m["seed2"] = value
41153	}
41154}
41155
41156// SampleDistortedBoundingBoxMinObjectCovered sets the optional min_object_covered attribute to value.
41157//
41158// value: The cropped area of the image must contain at least this
41159// fraction of any bounding box supplied. The value of this parameter should be
41160// non-negative. In the case of 0, the cropped area does not need to overlap
41161// any of the bounding boxes supplied.
41162// If not specified, defaults to 0.1
41163func SampleDistortedBoundingBoxMinObjectCovered(value float32) SampleDistortedBoundingBoxAttr {
41164	return func(m optionalAttr) {
41165		m["min_object_covered"] = value
41166	}
41167}
41168
41169// SampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
41170//
41171// value: The cropped area of the image must have an aspect ratio =
41172// width / height within this range.
41173// If not specified, defaults to {f:0.75 f:1.33}
41174func SampleDistortedBoundingBoxAspectRatioRange(value []float32) SampleDistortedBoundingBoxAttr {
41175	return func(m optionalAttr) {
41176		m["aspect_ratio_range"] = value
41177	}
41178}
41179
41180// SampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
41181//
41182// value: The cropped area of the image must contain a fraction of the
41183// supplied image within this range.
41184// If not specified, defaults to {f:0.05 f:1}
41185func SampleDistortedBoundingBoxAreaRange(value []float32) SampleDistortedBoundingBoxAttr {
41186	return func(m optionalAttr) {
41187		m["area_range"] = value
41188	}
41189}
41190
41191// SampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
41192//
41193// value: Number of attempts at generating a cropped region of the image
41194// of the specified constraints. After `max_attempts` failures, return the entire
41195// image.
41196// If not specified, defaults to 100
41197func SampleDistortedBoundingBoxMaxAttempts(value int64) SampleDistortedBoundingBoxAttr {
41198	return func(m optionalAttr) {
41199		m["max_attempts"] = value
41200	}
41201}
41202
41203// SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
41204//
41205// value: Controls behavior if no bounding boxes supplied.
41206// If true, assume an implicit bounding box covering the whole input. If false,
41207// raise an error.
41208// If not specified, defaults to false
41209func SampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxAttr {
41210	return func(m optionalAttr) {
41211		m["use_image_if_no_bounding_boxes"] = value
41212	}
41213}
41214
41215// Generate a single randomly distorted bounding box for an image.
41216//
41217// Bounding box annotations are often supplied in addition to ground-truth labels
41218// in image recognition or object localization tasks. A common technique for
41219// training such a system is to randomly distort an image while preserving
41220// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
41221// localization of an object, i.e. bounding box, given an `image_size`,
41222// `bounding_boxes` and a series of constraints.
41223//
41224// The output of this Op is a single bounding box that may be used to crop the
41225// original image. The output is returned as 3 tensors: `begin`, `size` and
41226// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
41227// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
41228// what the bounding box looks like.
41229//
41230// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
41231// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
41232// height of the underlying image.
41233//
41234// For example,
41235//
41236// ```python
41237//
41238//	# Generate a single distorted bounding box.
41239//	begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
41240//	    tf.shape(image),
41241//	    bounding_boxes=bounding_boxes)
41242//
41243//	# Draw the bounding box in an image summary.
41244//	image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
41245//	                                              bbox_for_draw)
41246//	tf.summary.image('images_with_box', image_with_box)
41247//
41248//	# Employ the bounding box to distort the image.
41249//	distorted_image = tf.slice(image, begin, size)
41250//
41251// ```
41252//
41253// Note that if no bounding box information is available, setting
41254// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
41255// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
41256// false and no bounding boxes are supplied, an error is raised.
41257//
41258// Arguments:
41259//
41260//	image_size: 1-D, containing `[height, width, channels]`.
41261//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
41262//
41263// associated with the image.
41264//
41265// Returns:
41266//
41267//	begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
41268//
41269// `tf.slice`.
41270//
41271//	size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
41272//
41273// `tf.slice`.
41274//
41275//	bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
41276//
41277// Provide as input to `tf.image.draw_bounding_boxes`.
41278func SampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, optional ...SampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
41279	if scope.Err() != nil {
41280		return
41281	}
41282	attrs := map[string]interface{}{}
41283	for _, a := range optional {
41284		a(attrs)
41285	}
41286	opspec := tf.OpSpec{
41287		Type: "SampleDistortedBoundingBox",
41288		Input: []tf.Input{
41289			image_size, bounding_boxes,
41290		},
41291		Attrs: attrs,
41292	}
41293	op := scope.AddOperation(opspec)
41294	return op.Output(0), op.Output(1), op.Output(2)
41295}
41296
41297// SampleDistortedBoundingBoxV2Attr is an optional argument to SampleDistortedBoundingBoxV2.
41298type SampleDistortedBoundingBoxV2Attr func(optionalAttr)
41299
41300// SampleDistortedBoundingBoxV2Seed sets the optional seed attribute to value.
41301//
41302// value: If either `seed` or `seed2` are set to non-zero, the random number
41303// generator is seeded by the given `seed`.  Otherwise, it is seeded by a random
41304// seed.
41305// If not specified, defaults to 0
41306func SampleDistortedBoundingBoxV2Seed(value int64) SampleDistortedBoundingBoxV2Attr {
41307	return func(m optionalAttr) {
41308		m["seed"] = value
41309	}
41310}
41311
41312// SampleDistortedBoundingBoxV2Seed2 sets the optional seed2 attribute to value.
41313//
41314// value: A second seed to avoid seed collision.
41315// If not specified, defaults to 0
41316func SampleDistortedBoundingBoxV2Seed2(value int64) SampleDistortedBoundingBoxV2Attr {
41317	return func(m optionalAttr) {
41318		m["seed2"] = value
41319	}
41320}
41321
41322// SampleDistortedBoundingBoxV2AspectRatioRange sets the optional aspect_ratio_range attribute to value.
41323//
41324// value: The cropped area of the image must have an aspect ratio =
41325// width / height within this range.
41326// If not specified, defaults to {f:0.75 f:1.33}
41327func SampleDistortedBoundingBoxV2AspectRatioRange(value []float32) SampleDistortedBoundingBoxV2Attr {
41328	return func(m optionalAttr) {
41329		m["aspect_ratio_range"] = value
41330	}
41331}
41332
41333// SampleDistortedBoundingBoxV2AreaRange sets the optional area_range attribute to value.
41334//
41335// value: The cropped area of the image must contain a fraction of the
41336// supplied image within this range.
41337// If not specified, defaults to {f:0.05 f:1}
41338func SampleDistortedBoundingBoxV2AreaRange(value []float32) SampleDistortedBoundingBoxV2Attr {
41339	return func(m optionalAttr) {
41340		m["area_range"] = value
41341	}
41342}
41343
41344// SampleDistortedBoundingBoxV2MaxAttempts sets the optional max_attempts attribute to value.
41345//
41346// value: Number of attempts at generating a cropped region of the image
41347// of the specified constraints. After `max_attempts` failures, return the entire
41348// image.
41349// If not specified, defaults to 100
41350func SampleDistortedBoundingBoxV2MaxAttempts(value int64) SampleDistortedBoundingBoxV2Attr {
41351	return func(m optionalAttr) {
41352		m["max_attempts"] = value
41353	}
41354}
41355
41356// SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
41357//
41358// value: Controls behavior if no bounding boxes supplied.
41359// If true, assume an implicit bounding box covering the whole input. If false,
41360// raise an error.
41361// If not specified, defaults to false
41362func SampleDistortedBoundingBoxV2UseImageIfNoBoundingBoxes(value bool) SampleDistortedBoundingBoxV2Attr {
41363	return func(m optionalAttr) {
41364		m["use_image_if_no_bounding_boxes"] = value
41365	}
41366}
41367
41368// Generate a single randomly distorted bounding box for an image.
41369//
41370// Bounding box annotations are often supplied in addition to ground-truth labels
41371// in image recognition or object localization tasks. A common technique for
41372// training such a system is to randomly distort an image while preserving
41373// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
41374// localization of an object, i.e. bounding box, given an `image_size`,
41375// `bounding_boxes` and a series of constraints.
41376//
41377// The output of this Op is a single bounding box that may be used to crop the
41378// original image. The output is returned as 3 tensors: `begin`, `size` and
41379// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
41380// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
41381// what the bounding box looks like.
41382//
41383// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
41384// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
41385// height of the underlying image.
41386//
41387// For example,
41388//
41389// ```python
41390//
41391//	# Generate a single distorted bounding box.
41392//	begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
41393//	    tf.shape(image),
41394//	    bounding_boxes=bounding_boxes)
41395//
41396//	# Draw the bounding box in an image summary.
41397//	image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
41398//	                                              bbox_for_draw)
41399//	tf.summary.image('images_with_box', image_with_box)
41400//
41401//	# Employ the bounding box to distort the image.
41402//	distorted_image = tf.slice(image, begin, size)
41403//
41404// ```
41405//
41406// Note that if no bounding box information is available, setting
41407// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
41408// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
41409// false and no bounding boxes are supplied, an error is raised.
41410//
41411// Arguments:
41412//
41413//	image_size: 1-D, containing `[height, width, channels]`.
41414//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
41415//
41416// associated with the image.
41417//
41418//	min_object_covered: The cropped area of the image must contain at least this
41419//
41420// fraction of any bounding box supplied. The value of this parameter should be
41421// non-negative. In the case of 0, the cropped area does not need to overlap
41422// any of the bounding boxes supplied.
41423//
41424// Returns:
41425//
41426//	begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
41427//
41428// `tf.slice`.
41429//
41430//	size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
41431//
41432// `tf.slice`.
41433//
41434//	bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
41435//
41436// Provide as input to `tf.image.draw_bounding_boxes`.
41437func SampleDistortedBoundingBoxV2(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, optional ...SampleDistortedBoundingBoxV2Attr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
41438	if scope.Err() != nil {
41439		return
41440	}
41441	attrs := map[string]interface{}{}
41442	for _, a := range optional {
41443		a(attrs)
41444	}
41445	opspec := tf.OpSpec{
41446		Type: "SampleDistortedBoundingBoxV2",
41447		Input: []tf.Input{
41448			image_size, bounding_boxes, min_object_covered,
41449		},
41450		Attrs: attrs,
41451	}
41452	op := scope.AddOperation(opspec)
41453	return op.Output(0), op.Output(1), op.Output(2)
41454}
41455
41456// Creates a dataset that takes a Bernoulli sample of the contents of another dataset.
41457//
41458// There is no transformation in the `tf.data` Python API for creating this dataset.
41459// Instead, it is created as a result of the `filter_with_random_uniform_fusion`
41460// static optimization. Whether this optimization is performed is determined by the
41461// `experimental_optimization.filter_with_random_uniform_fusion` option of
41462// `tf.data.Options`.
41463//
41464// Arguments:
41465//
41466//	rate: A scalar representing the sample rate. Each element of `input_dataset` is
41467//
41468// retained with this probability, independent of all other elements.
41469//
41470//	seed: A scalar representing seed of random number generator.
41471//	seed2: A scalar representing seed2 of random number generator.
41472func SamplingDataset(scope *Scope, input_dataset tf.Output, rate tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
41473	if scope.Err() != nil {
41474		return
41475	}
41476	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
41477	opspec := tf.OpSpec{
41478		Type: "SamplingDataset",
41479		Input: []tf.Input{
41480			input_dataset, rate, seed, seed2,
41481		},
41482		Attrs: attrs,
41483	}
41484	op := scope.AddOperation(opspec)
41485	return op.Output(0)
41486}
41487
41488// Saves the input tensors to disk.
41489//
41490// The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
41491// is written to `filename` with name `tensor_names[i]`.
41492//
41493// See also `SaveSlices`.
41494//
41495// Arguments:
41496//
41497//	filename: Must have a single element. The name of the file to which we write
41498//
41499// the tensor.
41500//
41501//	tensor_names: Shape `[N]`. The names of the tensors to be saved.
41502//	data: `N` tensors to save.
41503//
41504// Returns the created operation.
41505func Save(scope *Scope, filename tf.Output, tensor_names tf.Output, data []tf.Output) (o *tf.Operation) {
41506	if scope.Err() != nil {
41507		return
41508	}
41509	opspec := tf.OpSpec{
41510		Type: "Save",
41511		Input: []tf.Input{
41512			filename, tensor_names, tf.OutputList(data),
41513		},
41514	}
41515	return scope.AddOperation(opspec)
41516}
41517
41518// Saves input tensors slices to disk.
41519//
41520// This is like `Save` except that tensors can be listed in the saved file as being
41521// a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
41522// larger tensor and the slice that this tensor covers. `shapes_and_slices` must
41523// have as many elements as `tensor_names`.
41524//
41525// Elements of the `shapes_and_slices` input must either be:
41526//
41527//   - The empty string, in which case the corresponding tensor is
41528//     saved normally.
41529//   - A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
41530//     `dimI` are the dimensions of the larger tensor and `slice-spec`
41531//     specifies what part is covered by the tensor to save.
41532//
41533// `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
41534// where each `sliceI` is either:
41535//
41536//   - The string `-` meaning that the slice covers all indices of this dimension
41537//   - `start,length` where `start` and `length` are integers.  In that
41538//     case the slice covers `length` indices starting at `start`.
41539//
41540// See also `Save`.
41541//
41542// Arguments:
41543//
41544//	filename: Must have a single element. The name of the file to which we write the
41545//
41546// tensor.
41547//
41548//	tensor_names: Shape `[N]`. The names of the tensors to be saved.
41549//	shapes_and_slices: Shape `[N]`.  The shapes and slice specifications to use when
41550//
41551// saving the tensors.
41552//
41553//	data: `N` tensors to save.
41554//
41555// Returns the created operation.
41556func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes_and_slices tf.Output, data []tf.Output) (o *tf.Operation) {
41557	if scope.Err() != nil {
41558		return
41559	}
41560	opspec := tf.OpSpec{
41561		Type: "SaveSlices",
41562		Input: []tf.Input{
41563			filename, tensor_names, shapes_and_slices, tf.OutputList(data),
41564		},
41565	}
41566	return scope.AddOperation(opspec)
41567}
41568
41569// Saves tensors in V2 checkpoint format.
41570//
41571// By default, saves the named tensors in full.  If the caller wishes to save
41572// specific slices of full tensors, "shape_and_slices" should be non-empty strings
41573// and correspondingly well-formed.
41574//
41575// Arguments:
41576//
41577//	prefix: Must have a single element. The prefix of the V2 checkpoint to which we
41578//
41579// write the tensors.
41580//
41581//	tensor_names: shape {N}. The names of the tensors to be saved.
41582//	shape_and_slices: shape {N}.  The slice specs of the tensors to be saved.
41583//
41584// Empty strings indicate that they are non-partitioned tensors.
41585//
41586//	tensors: `N` tensors to save.
41587//
41588// Returns the created operation.
41589func SaveV2(scope *Scope, prefix tf.Output, tensor_names tf.Output, shape_and_slices tf.Output, tensors []tf.Output) (o *tf.Operation) {
41590	if scope.Err() != nil {
41591		return
41592	}
41593	opspec := tf.OpSpec{
41594		Type: "SaveV2",
41595		Input: []tf.Input{
41596			prefix, tensor_names, shape_and_slices, tf.OutputList(tensors),
41597		},
41598	}
41599	return scope.AddOperation(opspec)
41600}
41601
41602// Outputs a `Summary` protocol buffer with scalar values.
41603//
41604// The input `tags` and `values` must have the same shape.  The generated summary
41605// has a summary value for each tag-value pair in `tags` and `values`.
41606//
41607// Arguments:
41608//
41609//	tags: Tags for the summary.
41610//	values: Same shape as `tags.  Values for the summary.
41611//
41612// Returns Scalar.  Serialized `Summary` protocol buffer.
41613func ScalarSummary(scope *Scope, tags tf.Output, values tf.Output) (summary tf.Output) {
41614	if scope.Err() != nil {
41615		return
41616	}
41617	opspec := tf.OpSpec{
41618		Type: "ScalarSummary",
41619		Input: []tf.Input{
41620			tags, values,
41621		},
41622	}
41623	op := scope.AddOperation(opspec)
41624	return op.Output(0)
41625}
41626
41627// Scatters `updates` into a tensor of shape `shape` according to `indices`.
41628//
41629// Scatter sparse `updates` according to individual values at the specified
41630// `indices`. This op returns an output tensor with the `shape` you specify. This
41631// op is the inverse of the `tf.gather_nd` operator which extracts values or slices
41632// from a given tensor.
41633//
41634// This operation is similar to `tf.tensor_scatter_nd_add`, except that the tensor
41635// is zero-initialized. Calling `tf.scatter_nd(indices, updates, shape)`
41636// is identical to calling
41637// `tf.tensor_scatter_nd_add(tf.zeros(shape, updates.dtype), indices, updates)`
41638//
41639// If `indices` contains duplicates, the associated `updates` are accumulated
41640// (summed) into the output tensor.
41641//
41642// **WARNING**: For floating-point data types, the output may be nondeterministic.
41643// This is because the order in which the updates are applied is nondeterministic
41644// and when floating-point numbers are added in different orders the resulting
41645// numerical approximation error can be slightly different. However, the output
41646// will be deterministic if op determinism is enabled via
41647// `tf.config.experimental.enable_op_determinism`.
41648//
41649// `indices` is an integer tensor containing indices into the output tensor. The
41650// last dimension of `indices` can be at most the rank of `shape`:
41651//
41652//	indices.shape[-1] <= shape.rank
41653//
41654// The last dimension of `indices` corresponds to indices of elements
41655// (if `indices.shape[-1] = shape.rank`) or slices
41656// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
41657// `shape`.
41658//
41659// `updates` is a tensor with shape:
41660//
41661//	indices.shape[:-1] + shape[indices.shape[-1]:]
41662//
41663// The simplest form of the scatter op is to insert individual elements in
41664// a tensor by index. Consider an example where you want to insert 4 scattered
41665// elements in a rank-1 tensor with 8 elements.
41666//
41667// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
41668// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
41669// </div>
41670//
41671// In Python, this scatter operation would look like this:
41672//
41673// ```python
41674//
41675//	indices = tf.constant([[4], [3], [1], [7]])
41676//	updates = tf.constant([9, 10, 11, 12])
41677//	shape = tf.constant([8])
41678//	scatter = tf.scatter_nd(indices, updates, shape)
41679//	print(scatter)
41680//
41681// ```
41682//
41683// The resulting tensor would look like this:
41684//
41685//	[0, 11, 0, 10, 9, 0, 0, 12]
41686//
41687// You can also insert entire slices of a higher rank tensor all at once. For
41688// example, you can insert two slices in the first dimension of a rank-3 tensor
41689// with two matrices of new values.
41690//
41691// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
41692// <img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
41693// </div>
41694//
41695// In Python, this scatter operation would look like this:
41696//
41697// ```python
41698//
41699//	indices = tf.constant([[0], [2]])
41700//	updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
41701//	                        [7, 7, 7, 7], [8, 8, 8, 8]],
41702//	                       [[5, 5, 5, 5], [6, 6, 6, 6],
41703//	                        [7, 7, 7, 7], [8, 8, 8, 8]]])
41704//	shape = tf.constant([4, 4, 4])
41705//	scatter = tf.scatter_nd(indices, updates, shape)
41706//	print(scatter)
41707//
41708// ```
41709//
41710// The resulting tensor would look like this:
41711//
41712//	[[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
41713//	 [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
41714//	 [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
41715//	 [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
41716//
41717// Note that on CPU, if an out of bound index is found, an error is returned.
41718// On GPU, if an out of bound index is found, the index is ignored.
41719//
41720// Arguments:
41721//
41722//	indices: Tensor of indices.
41723//	updates: Values to scatter into the output tensor.
41724//	shape: 1-D. The shape of the output tensor.
41725//
41726// Returns A new tensor with the given shape and updates applied according
41727// to the indices.
41728func ScatterNd(scope *Scope, indices tf.Output, updates tf.Output, shape tf.Output) (output tf.Output) {
41729	if scope.Err() != nil {
41730		return
41731	}
41732	opspec := tf.OpSpec{
41733		Type: "ScatterNd",
41734		Input: []tf.Input{
41735			indices, updates, shape,
41736		},
41737	}
41738	op := scope.AddOperation(opspec)
41739	return op.Output(0)
41740}
41741
41742// Applies sparse addition to `input` using individual values or slices
41743//
41744// from `updates` according to indices `indices`.  The updates are non-aliasing:
41745// `input` is only modified in-place if no other operations will use it.
41746// Otherwise, a copy of `input` is made.  This operation has a gradient with
41747// respect to both `input` and `updates`.
41748//
41749// `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
41750//
41751// `indices` must be integer tensor, containing indices into `input`.
41752// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`.
41753//
41754// The innermost dimension of `indices` (with length `K`) corresponds to
41755// indices into elements (if `K = P`) or `(P-K)`-dimensional slices
41756// (if `K < P`) along the `K`th dimension of `input`.
41757//
41758// `updates` is `Tensor` of rank `Q-1+P-K` with shape:
41759//
41760// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$
41761//
41762// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
41763// elements. In Python, that addition would look like this:
41764//
41765//	input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
41766//	indices = tf.constant([[4], [3], [1], [7]])
41767//	updates = tf.constant([9, 10, 11, 12])
41768//	output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
41769//	with tf.Session() as sess:
41770//	  print(sess.run(output))
41771//
41772// The resulting value `output` would look like this:
41773//
41774//	[1, 13, 3, 14, 14, 6, 7, 20]
41775//
41776// See `tf.scatter_nd` for more details about how to make updates to slices.
41777//
41778// Arguments:
41779//
41780//	input: A Tensor.
41781//	indices: A Tensor. Must be one of the following types: `int32`, `int64`.
41782//
41783// A tensor of indices into `input`.
41784//
41785//	updates: A Tensor. Must have the same type as ref. A tensor of updated values
41786//
41787// to add to `input`.
41788//
41789// Returns A `Tensor` with the same shape as `input`, containing values of `input`
41790// updated with `updates`.
41791func ScatterNdNonAliasingAdd(scope *Scope, input tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
41792	if scope.Err() != nil {
41793		return
41794	}
41795	opspec := tf.OpSpec{
41796		Type: "ScatterNdNonAliasingAdd",
41797		Input: []tf.Input{
41798			input, indices, updates,
41799		},
41800	}
41801	op := scope.AddOperation(opspec)
41802	return op.Output(0)
41803}
41804
41805// Computes fingerprints of the input strings.
41806//
41807// Arguments:
41808//
41809//	input: vector of strings to compute fingerprints on.
41810//
41811// Returns a (N,2) shaped matrix where N is the number of elements in the input
41812// vector. Each row contains the low and high parts of the fingerprint.
41813func SdcaFprint(scope *Scope, input tf.Output) (output tf.Output) {
41814	if scope.Err() != nil {
41815		return
41816	}
41817	opspec := tf.OpSpec{
41818		Type: "SdcaFprint",
41819		Input: []tf.Input{
41820			input,
41821		},
41822	}
41823	op := scope.AddOperation(opspec)
41824	return op.Output(0)
41825}
41826
41827// SdcaOptimizerAttr is an optional argument to SdcaOptimizer.
41828type SdcaOptimizerAttr func(optionalAttr)
41829
41830// SdcaOptimizerAdaptative sets the optional adaptative attribute to value.
41831//
41832// value: Whether to use Adaptive SDCA for the inner loop.
41833// If not specified, defaults to true
41834func SdcaOptimizerAdaptative(value bool) SdcaOptimizerAttr {
41835	return func(m optionalAttr) {
41836		m["adaptative"] = value
41837	}
41838}
41839
41840// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
41841//
41842// linear models with L1 + L2 regularization. As global optimization objective is
41843// strongly-convex, the optimizer optimizes the dual objective at each step. The
41844// optimizer applies each update one example at a time. Examples are sampled
41845// uniformly, and the optimizer is learning rate free and enjoys linear convergence
41846// rate.
41847//
41848// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
41849// Shai Shalev-Shwartz, Tong Zhang. 2012
41850//
41851// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
41852//
41853// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
41854// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
41855// Peter Richtarik, Martin Takac. 2015
41856//
41857// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
41858// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
41859//
41860// Arguments:
41861//
41862//	sparse_example_indices: a list of vectors which contain example indices.
41863//	sparse_feature_indices: a list of vectors which contain feature indices.
41864//	sparse_feature_values: a list of vectors which contains feature value
41865//
41866// associated with each feature group.
41867//
41868//	dense_features: a list of matrices which contains the dense feature values.
41869//	example_weights: a vector which contains the weight associated with each
41870//
41871// example.
41872//
41873//	example_labels: a vector which contains the label/target associated with each
41874//
41875// example.
41876//
41877//	sparse_indices: a list of vectors where each value is the indices which has
41878//
41879// corresponding weights in sparse_weights. This field maybe omitted for the
41880// dense approach.
41881//
41882//	sparse_weights: a list of vectors where each value is the weight associated with
41883//
41884// a sparse feature group.
41885//
41886//	dense_weights: a list of vectors where the values are the weights associated
41887//
41888// with a dense feature group.
41889//
41890//	example_state_data: a list of vectors containing the example state data.
41891//	loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
41892//
41893// squared and hinge losses.
41894//
41895//	l1: Symmetric l1 regularization strength.
41896//	l2: Symmetric l2 regularization strength.
41897//	num_loss_partitions: Number of partitions of the global loss function.
41898//	num_inner_iterations: Number of iterations per mini-batch.
41899//
41900// Returns:
41901//
41902//	out_example_state_data: a list of vectors containing the updated example state
41903//
41904// data.
41905//
41906//	out_delta_sparse_weights: a list of vectors where each value is the delta
41907//
41908// weights associated with a sparse feature group.
41909//
41910//	out_delta_dense_weights: a list of vectors where the values are the delta
41911//
41912// weights associated with a dense feature group.
41913func SdcaOptimizer(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerAttr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
41914	if scope.Err() != nil {
41915		return
41916	}
41917	attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
41918	for _, a := range optional {
41919		a(attrs)
41920	}
41921	opspec := tf.OpSpec{
41922		Type: "SdcaOptimizer",
41923		Input: []tf.Input{
41924			tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
41925		},
41926		Attrs: attrs,
41927	}
41928	op := scope.AddOperation(opspec)
41929	if scope.Err() != nil {
41930		return
41931	}
41932	var idx int
41933	var err error
41934	out_example_state_data = op.Output(idx)
41935	if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
41936		scope.UpdateErr("SdcaOptimizer", err)
41937		return
41938	}
41939	if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
41940		scope.UpdateErr("SdcaOptimizer", err)
41941		return
41942	}
41943	return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
41944}
41945
41946// SdcaOptimizerV2Attr is an optional argument to SdcaOptimizerV2.
41947type SdcaOptimizerV2Attr func(optionalAttr)
41948
41949// SdcaOptimizerV2Adaptive sets the optional adaptive attribute to value.
41950//
41951// value: Whether to use Adaptive SDCA for the inner loop.
41952// If not specified, defaults to true
41953func SdcaOptimizerV2Adaptive(value bool) SdcaOptimizerV2Attr {
41954	return func(m optionalAttr) {
41955		m["adaptive"] = value
41956	}
41957}
41958
41959// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
41960//
41961// linear models with L1 + L2 regularization. As global optimization objective is
41962// strongly-convex, the optimizer optimizes the dual objective at each step. The
41963// optimizer applies each update one example at a time. Examples are sampled
41964// uniformly, and the optimizer is learning rate free and enjoys linear convergence
41965// rate.
41966//
41967// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).<br>
41968// Shai Shalev-Shwartz, Tong Zhang. 2012
41969//
41970// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
41971//
41972// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).<br>
41973// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
41974// Peter Richtarik, Martin Takac. 2015
41975//
41976// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).<br>
41977// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
41978//
41979// Arguments:
41980//
41981//	sparse_example_indices: a list of vectors which contain example indices.
41982//	sparse_feature_indices: a list of vectors which contain feature indices.
41983//	sparse_feature_values: a list of vectors which contains feature value
41984//
41985// associated with each feature group.
41986//
41987//	dense_features: a list of matrices which contains the dense feature values.
41988//	example_weights: a vector which contains the weight associated with each
41989//
41990// example.
41991//
41992//	example_labels: a vector which contains the label/target associated with each
41993//
41994// example.
41995//
41996//	sparse_indices: a list of vectors where each value is the indices which has
41997//
41998// corresponding weights in sparse_weights. This field maybe omitted for the
41999// dense approach.
42000//
42001//	sparse_weights: a list of vectors where each value is the weight associated with
42002//
42003// a sparse feature group.
42004//
42005//	dense_weights: a list of vectors where the values are the weights associated
42006//
42007// with a dense feature group.
42008//
42009//	example_state_data: a list of vectors containing the example state data.
42010//	loss_type: Type of the primal loss. Currently SdcaSolver supports logistic,
42011//
42012// squared and hinge losses.
42013//
42014//	l1: Symmetric l1 regularization strength.
42015//	l2: Symmetric l2 regularization strength.
42016//	num_loss_partitions: Number of partitions of the global loss function.
42017//	num_inner_iterations: Number of iterations per mini-batch.
42018//
42019// Returns:
42020//
42021//	out_example_state_data: a list of vectors containing the updated example state
42022//
42023// data.
42024//
42025//	out_delta_sparse_weights: a list of vectors where each value is the delta
42026//
42027// weights associated with a sparse feature group.
42028//
42029//	out_delta_dense_weights: a list of vectors where the values are the delta
42030//
42031// weights associated with a dense feature group.
42032func SdcaOptimizerV2(scope *Scope, sparse_example_indices []tf.Output, sparse_feature_indices []tf.Output, sparse_feature_values []tf.Output, dense_features []tf.Output, example_weights tf.Output, example_labels tf.Output, sparse_indices []tf.Output, sparse_weights []tf.Output, dense_weights []tf.Output, example_state_data tf.Output, loss_type string, l1 float32, l2 float32, num_loss_partitions int64, num_inner_iterations int64, optional ...SdcaOptimizerV2Attr) (out_example_state_data tf.Output, out_delta_sparse_weights []tf.Output, out_delta_dense_weights []tf.Output) {
42033	if scope.Err() != nil {
42034		return
42035	}
42036	attrs := map[string]interface{}{"loss_type": loss_type, "l1": l1, "l2": l2, "num_loss_partitions": num_loss_partitions, "num_inner_iterations": num_inner_iterations}
42037	for _, a := range optional {
42038		a(attrs)
42039	}
42040	opspec := tf.OpSpec{
42041		Type: "SdcaOptimizerV2",
42042		Input: []tf.Input{
42043			tf.OutputList(sparse_example_indices), tf.OutputList(sparse_feature_indices), tf.OutputList(sparse_feature_values), tf.OutputList(dense_features), example_weights, example_labels, tf.OutputList(sparse_indices), tf.OutputList(sparse_weights), tf.OutputList(dense_weights), example_state_data,
42044		},
42045		Attrs: attrs,
42046	}
42047	op := scope.AddOperation(opspec)
42048	if scope.Err() != nil {
42049		return
42050	}
42051	var idx int
42052	var err error
42053	out_example_state_data = op.Output(idx)
42054	if out_delta_sparse_weights, idx, err = makeOutputList(op, idx, "out_delta_sparse_weights"); err != nil {
42055		scope.UpdateErr("SdcaOptimizerV2", err)
42056		return
42057	}
42058	if out_delta_dense_weights, idx, err = makeOutputList(op, idx, "out_delta_dense_weights"); err != nil {
42059		scope.UpdateErr("SdcaOptimizerV2", err)
42060		return
42061	}
42062	return out_example_state_data, out_delta_sparse_weights, out_delta_dense_weights
42063}
42064
42065// Computes the maximum along segments of a tensor.
42066//
42067// Read
42068// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
42069// for an explanation of segments.
42070//
42071// Computes a tensor such that
42072// \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
42073// that `segment_ids[j] == i`.
42074//
42075// If the max is empty for a given segment ID `i`, `output[i] = 0`.
42076//
42077// Caution: On CPU, values in `segment_ids` are always validated to be sorted,
42078// and an error is thrown for indices that are not increasing. On GPU, this
42079// does not throw an error for unsorted indices. On GPU, out-of-order indices
42080// result in safe but unspecified behavior, which may include treating
42081// out-of-order indices as the same as a smaller following index.
42082//
42083// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
42084// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
42085// </div>
42086//
42087// For example:
42088//
42089// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
42090// >>> tf.math.segment_max(c, tf.constant([0, 0, 1])).numpy()
42091// array([[4, 3, 3, 4],
42092//
42093//	[5, 6, 7, 8]], dtype=int32)
42094//
42095// Arguments:
42096//
42097//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
42098//
42099// first dimension.  Values should be sorted and can be repeated.
42100//
42101// Caution: The values are always validated to be sorted on CPU, never validated
42102// on GPU.
42103//
42104// Returns Has same shape as data, except for dimension 0 which
42105// has size `k`, the number of segments.
42106func SegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
42107	if scope.Err() != nil {
42108		return
42109	}
42110	opspec := tf.OpSpec{
42111		Type: "SegmentMax",
42112		Input: []tf.Input{
42113			data, segment_ids,
42114		},
42115	}
42116	op := scope.AddOperation(opspec)
42117	return op.Output(0)
42118}
42119
42120// Computes the mean along segments of a tensor.
42121//
42122// Read
42123// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
42124// for an explanation of segments.
42125//
42126// Computes a tensor such that
42127// \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
42128// over `j` such that `segment_ids[j] == i` and `N` is the total number of
42129// values summed.
42130//
42131// If the mean is empty for a given segment ID `i`, `output[i] = 0`.
42132//
42133// Caution: On CPU, values in `segment_ids` are always validated to be sorted,
42134// and an error is thrown for indices that are not increasing. On GPU, this
42135// does not throw an error for unsorted indices. On GPU, out-of-order indices
42136// result in safe but unspecified behavior, which may include treating
42137// out-of-order indices as a smaller following index when computing the numerator
42138// of the mean.
42139//
42140// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
42141// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
42142// </div>
42143//
42144// For example:
42145//
42146// >>> c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
42147// >>> tf.math.segment_mean(c, tf.constant([0, 0, 1])).numpy()
42148// array([[2.5, 2.5, 2.5, 2.5],
42149//
42150//	[5., 6., 7., 8.]], dtype=float32)
42151//
42152// Arguments:
42153//
42154//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
42155//
42156// first dimension.  Values should be sorted and can be repeated.
42157//
42158// Caution: The values are always validated to be sorted on CPU, never validated
42159// on GPU.
42160//
42161// Returns Has same shape as data, except for dimension 0 which
42162// has size `k`, the number of segments.
42163func SegmentMean(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
42164	if scope.Err() != nil {
42165		return
42166	}
42167	opspec := tf.OpSpec{
42168		Type: "SegmentMean",
42169		Input: []tf.Input{
42170			data, segment_ids,
42171		},
42172	}
42173	op := scope.AddOperation(opspec)
42174	return op.Output(0)
42175}
42176
42177// Computes the minimum along segments of a tensor.
42178//
42179// Read
42180// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
42181// for an explanation of segments.
42182//
42183// Computes a tensor such that
42184// \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
42185// that `segment_ids[j] == i`.
42186//
42187// If the min is empty for a given segment ID `i`, `output[i] = 0`.
42188//
42189// Caution: On CPU, values in `segment_ids` are always validated to be sorted,
42190// and an error is thrown for indices that are not increasing. On GPU, this
42191// does not throw an error for unsorted indices. On GPU, out-of-order indices
42192// result in safe but unspecified behavior, which may include treating
42193// out-of-order indices as the same as a smaller following index.
42194//
42195// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
42196// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
42197// </div>
42198//
42199// For example:
42200//
42201// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
42202// >>> tf.math.segment_min(c, tf.constant([0, 0, 1])).numpy()
42203// array([[1, 2, 2, 1],
42204//
42205//	[5, 6, 7, 8]], dtype=int32)
42206//
42207// Arguments:
42208//
42209//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
42210//
42211// first dimension.  Values should be sorted and can be repeated.
42212//
42213// Caution: The values are always validated to be sorted on CPU, never validated
42214// on GPU.
42215//
42216// Returns Has same shape as data, except for dimension 0 which
42217// has size `k`, the number of segments.
42218func SegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
42219	if scope.Err() != nil {
42220		return
42221	}
42222	opspec := tf.OpSpec{
42223		Type: "SegmentMin",
42224		Input: []tf.Input{
42225			data, segment_ids,
42226		},
42227	}
42228	op := scope.AddOperation(opspec)
42229	return op.Output(0)
42230}
42231
42232// Computes the product along segments of a tensor.
42233//
42234// Read
42235// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
42236// for an explanation of segments.
42237//
42238// Computes a tensor such that
42239// \\(output_i = \prod_j data_j\\) where the product is over `j` such
42240// that `segment_ids[j] == i`.
42241//
42242// If the product is empty for a given segment ID `i`, `output[i] = 1`.
42243//
42244// Caution: On CPU, values in `segment_ids` are always validated to be sorted,
42245// and an error is thrown for indices that are not increasing. On GPU, this
42246// does not throw an error for unsorted indices. On GPU, out-of-order indices
42247// result in safe but unspecified behavior, which may include treating
42248// out-of-order indices as the same as a smaller following index.
42249//
42250// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
42251// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
42252// </div>
42253//
42254// For example:
42255//
42256// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
42257// >>> tf.math.segment_prod(c, tf.constant([0, 0, 1])).numpy()
42258// array([[4, 6, 6, 4],
42259//
42260//	[5, 6, 7, 8]], dtype=int32)
42261//
42262// Arguments:
42263//
42264//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
42265//
42266// first dimension.  Values should be sorted and can be repeated.
42267//
42268// Caution: The values are always validated to be sorted on CPU, never validated
42269// on GPU.
42270//
42271// Returns Has same shape as data, except for dimension 0 which
42272// has size `k`, the number of segments.
42273func SegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
42274	if scope.Err() != nil {
42275		return
42276	}
42277	opspec := tf.OpSpec{
42278		Type: "SegmentProd",
42279		Input: []tf.Input{
42280			data, segment_ids,
42281		},
42282	}
42283	op := scope.AddOperation(opspec)
42284	return op.Output(0)
42285}
42286
42287// Computes the sum along segments of a tensor.
42288//
42289// Read
42290// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
42291// for an explanation of segments.
42292//
42293// Computes a tensor such that
42294// \\(output_i = \sum_j data_j\\) where sum is over `j` such
42295// that `segment_ids[j] == i`.
42296//
42297// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
42298//
42299// Caution: On CPU, values in `segment_ids` are always validated to be sorted,
42300// and an error is thrown for indices that are not increasing. On GPU, this
42301// does not throw an error for unsorted indices. On GPU, out-of-order indices
42302// result in safe but unspecified behavior, which may include treating
42303// out-of-order indices as the same as a smaller following index.
42304//
42305// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
42306// <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
42307// </div>
42308//
42309// For example:
42310//
42311// >>> c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
42312// >>> tf.math.segment_sum(c, tf.constant([0, 0, 1])).numpy()
42313// array([[5, 5, 5, 5],
42314//
42315//	[5, 6, 7, 8]], dtype=int32)
42316//
42317// Arguments:
42318//
42319//	segment_ids: A 1-D tensor whose size is equal to the size of `data`'s
42320//
42321// first dimension.  Values should be sorted and can be repeated.
42322//
42323// Caution: The values are always validated to be sorted on CPU, never validated
42324// on GPU.
42325//
42326// Returns Has same shape as data, except for dimension 0 which
42327// has size `k`, the number of segments.
42328func SegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output) (output tf.Output) {
42329	if scope.Err() != nil {
42330		return
42331	}
42332	opspec := tf.OpSpec{
42333		Type: "SegmentSum",
42334		Input: []tf.Input{
42335			data, segment_ids,
42336		},
42337	}
42338	op := scope.AddOperation(opspec)
42339	return op.Output(0)
42340}
42341
42342// Selects elements from `x` or `y`, depending on `condition`.
42343//
42344// The `x`, and `y` tensors must all have the same shape, and the
42345// output will also have that shape.
42346//
42347// The `condition` tensor must be a scalar if `x` and `y` are scalars.
42348// If `x` and `y` are vectors or higher rank, then `condition` must be either a
42349// scalar, a vector with size matching the first dimension of `x`, or must have
42350// the same shape as `x`.
42351//
42352// The `condition` tensor acts as a mask that chooses, based on the value at each
42353// element, whether the corresponding element / row in the output should be
42354// taken from `x` (if true) or `y` (if false).
42355//
42356// If `condition` is a vector and `x` and `y` are higher rank matrices, then
42357// it chooses which row (outer dimension) to copy from `x` and `y`.
42358// If `condition` has the same shape as `x` and `y`, then it chooses which
42359// element to copy from `x` and `y`.
42360//
42361// For example:
42362//
42363// ```python
42364// # 'condition' tensor is [[True,  False]
42365// #                        [False, True]]
42366// # 't' is [[1, 2],
42367// #         [3, 4]]
42368// # 'e' is [[5, 6],
42369// #         [7, 8]]
42370// select(condition, t, e)  # => [[1, 6], [7, 4]]
42371//
42372// # 'condition' tensor is [True, False]
42373// # 't' is [[1, 2],
42374// #         [3, 4]]
42375// # 'e' is [[5, 6],
42376// #         [7, 8]]
42377// select(condition, t, e) ==> [[1, 2],
42378//
42379//	[7, 8]]
42380//
42381// ```
42382//
42383// Arguments:
42384//
42385//	x: = A `Tensor` which may have the same shape as `condition`.
42386//
42387// If `condition` is rank 1, `x` may have higher rank,
42388// but its first dimension must match the size of `condition`.
42389//
42390//	y: = A `Tensor` with the same type and shape as `x`.
42391//
42392// Returns = A `Tensor` with the same type and shape as `x` and `y`.
42393func Select(scope *Scope, condition tf.Output, x tf.Output, y tf.Output) (output tf.Output) {
42394	if scope.Err() != nil {
42395		return
42396	}
42397	opspec := tf.OpSpec{
42398		Type: "Select",
42399		Input: []tf.Input{
42400			condition, x, y,
42401		},
42402	}
42403	op := scope.AddOperation(opspec)
42404	return op.Output(0)
42405}
42406
42407// Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
42408//
42409// DEPRECATED at GraphDef version 11: Use SelfAdjointEigV2 instead.
42410//
42411// The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
42412// form square matrices, with the same constraints as the single matrix
42413// SelfAdjointEig.
42414//
42415// The result is a [..., M+1, M] matrix with [..., 0,:] containing the
42416// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues
42417// are sorted in non-decreasing order.
42418//
42419// Arguments:
42420//
42421//	input: Shape is `[..., M, M]`.
42422//
42423// Returns Shape is `[..., M+1, M]`.
42424func SelfAdjointEig(scope *Scope, input tf.Output) (output tf.Output) {
42425	if scope.Err() != nil {
42426		return
42427	}
42428	opspec := tf.OpSpec{
42429		Type: "SelfAdjointEig",
42430		Input: []tf.Input{
42431			input,
42432		},
42433	}
42434	op := scope.AddOperation(opspec)
42435	return op.Output(0)
42436}
42437
42438// SelfAdjointEigV2Attr is an optional argument to SelfAdjointEigV2.
42439type SelfAdjointEigV2Attr func(optionalAttr)
42440
42441// SelfAdjointEigV2ComputeV sets the optional compute_v attribute to value.
42442//
42443// value: If `True` then eigenvectors will be computed and returned in `v`.
42444// Otherwise, only the eigenvalues will be computed.
42445// If not specified, defaults to true
42446func SelfAdjointEigV2ComputeV(value bool) SelfAdjointEigV2Attr {
42447	return func(m optionalAttr) {
42448		m["compute_v"] = value
42449	}
42450}
42451
42452// Computes the eigen decomposition of one or more square self-adjoint matrices.
42453//
42454// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
42455// `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
42456// are sorted in non-decreasing order.
42457//
42458// ```python
42459// # a is a tensor.
42460// # e is a tensor of eigenvalues.
42461// # v is a tensor of eigenvectors.
42462// e, v = self_adjoint_eig(a)
42463// e = self_adjoint_eig(a, compute_v=False)
42464// ```
42465//
42466// Arguments:
42467//
42468//	input: `Tensor` input of shape `[N, N]`.
42469//
42470// Returns:
42471//
42472//	e: Eigenvalues. Shape is `[N]`.
42473//	v: Eigenvectors. Shape is `[N, N]`.
42474func SelfAdjointEigV2(scope *Scope, input tf.Output, optional ...SelfAdjointEigV2Attr) (e tf.Output, v tf.Output) {
42475	if scope.Err() != nil {
42476		return
42477	}
42478	attrs := map[string]interface{}{}
42479	for _, a := range optional {
42480		a(attrs)
42481	}
42482	opspec := tf.OpSpec{
42483		Type: "SelfAdjointEigV2",
42484		Input: []tf.Input{
42485			input,
42486		},
42487		Attrs: attrs,
42488	}
42489	op := scope.AddOperation(opspec)
42490	return op.Output(0), op.Output(1)
42491}
42492
42493// Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
42494//
42495// if < 0, `scale * features` otherwise.
42496//
42497// To be used together with
42498// `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
42499// For correct dropout, use `tf.contrib.nn.alpha_dropout`.
42500//
42501// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
42502func Selu(scope *Scope, features tf.Output) (activations tf.Output) {
42503	if scope.Err() != nil {
42504		return
42505	}
42506	opspec := tf.OpSpec{
42507		Type: "Selu",
42508		Input: []tf.Input{
42509			features,
42510		},
42511	}
42512	op := scope.AddOperation(opspec)
42513	return op.Output(0)
42514}
42515
42516// Computes gradients for the scaled exponential linear (Selu) operation.
42517//
42518// Arguments:
42519//
42520//	gradients: The backpropagated gradients to the corresponding Selu operation.
42521//	outputs: The outputs of the corresponding Selu operation.
42522//
42523// Returns The gradients: `gradients * (outputs + scale * alpha)`
42524// if outputs < 0, `scale * gradients` otherwise.
42525func SeluGrad(scope *Scope, gradients tf.Output, outputs tf.Output) (backprops tf.Output) {
42526	if scope.Err() != nil {
42527		return
42528	}
42529	opspec := tf.OpSpec{
42530		Type: "SeluGrad",
42531		Input: []tf.Input{
42532			gradients, outputs,
42533		},
42534	}
42535	op := scope.AddOperation(opspec)
42536	return op.Output(0)
42537}
42538
42539// SendAttr is an optional argument to Send.
42540type SendAttr func(optionalAttr)
42541
42542// SendClientTerminated sets the optional client_terminated attribute to value.
42543//
42544// value: If set to true, this indicates that the node was added
42545// to the graph as a result of a client-side feed or fetch of Tensor data,
42546// in which case the corresponding send or recv is expected to be managed
42547// locally by the caller.
42548// If not specified, defaults to false
42549func SendClientTerminated(value bool) SendAttr {
42550	return func(m optionalAttr) {
42551		m["client_terminated"] = value
42552	}
42553}
42554
42555// Sends the named tensor from send_device to recv_device.
42556//
42557// Arguments:
42558//
42559//	tensor: The tensor to send.
42560//	tensor_name: The name of the tensor to send.
42561//	send_device: The name of the device sending the tensor.
42562//	send_device_incarnation: The current incarnation of send_device.
42563//	recv_device: The name of the device receiving the tensor.
42564//
42565// Returns the created operation.
42566func Send(scope *Scope, tensor tf.Output, tensor_name string, send_device string, send_device_incarnation int64, recv_device string, optional ...SendAttr) (o *tf.Operation) {
42567	if scope.Err() != nil {
42568		return
42569	}
42570	attrs := map[string]interface{}{"tensor_name": tensor_name, "send_device": send_device, "send_device_incarnation": send_device_incarnation, "recv_device": recv_device}
42571	for _, a := range optional {
42572		a(attrs)
42573	}
42574	opspec := tf.OpSpec{
42575		Type: "Send",
42576		Input: []tf.Input{
42577			tensor,
42578		},
42579		Attrs: attrs,
42580	}
42581	return scope.AddOperation(opspec)
42582}
42583
42584// Performs gradient updates of embedding tables.
42585//
42586// Arguments:
42587//
42588//	inputs: A TensorList of gradients with which to update embedding tables.
42589//
42590// This argument has the same length and shapes as the return value of
42591// RecvTPUEmbeddingActivations, but contains gradients of the model's loss
42592// with respect to the embedding activations. The embedding tables are updated
42593// from these gradients via the optimizer specified in the TPU embedding
42594// configuration given to tpu.initialize_system.
42595//
42596//	learning_rates: A TensorList of float32 scalars, one for each dynamic learning
42597//
42598// rate tag: see the comments in
42599// //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto.
42600// Multiple tables can share the same dynamic learning rate tag as specified
42601// in the configuration. If the learning rates for all tables are constant,
42602// this list should be empty.
42603//
42604//	config: Serialized TPUEmbeddingConfiguration proto.
42605//
42606// Returns the created operation.
42607func SendTPUEmbeddingGradients(scope *Scope, inputs []tf.Output, learning_rates []tf.Output, config string) (o *tf.Operation) {
42608	if scope.Err() != nil {
42609		return
42610	}
42611	attrs := map[string]interface{}{"config": config}
42612	opspec := tf.OpSpec{
42613		Type: "SendTPUEmbeddingGradients",
42614		Input: []tf.Input{
42615			tf.OutputList(inputs), tf.OutputList(learning_rates),
42616		},
42617		Attrs: attrs,
42618	}
42619	return scope.AddOperation(opspec)
42620}
42621
42622// SerializeIteratorAttr is an optional argument to SerializeIterator.
42623type SerializeIteratorAttr func(optionalAttr)
42624
42625// SerializeIteratorExternalStatePolicy sets the optional external_state_policy attribute to value.
42626// If not specified, defaults to 0
42627func SerializeIteratorExternalStatePolicy(value int64) SerializeIteratorAttr {
42628	return func(m optionalAttr) {
42629		m["external_state_policy"] = value
42630	}
42631}
42632
42633// Converts the given `resource_handle` representing an iterator to a variant tensor.
42634//
42635// Arguments:
42636//
42637//	resource_handle: A handle to an iterator resource.
42638//
42639// Returns A variant tensor storing the state of the iterator contained in the
42640// resource.
42641func SerializeIterator(scope *Scope, resource_handle tf.Output, optional ...SerializeIteratorAttr) (serialized tf.Output) {
42642	if scope.Err() != nil {
42643		return
42644	}
42645	attrs := map[string]interface{}{}
42646	for _, a := range optional {
42647		a(attrs)
42648	}
42649	opspec := tf.OpSpec{
42650		Type: "SerializeIterator",
42651		Input: []tf.Input{
42652			resource_handle,
42653		},
42654		Attrs: attrs,
42655	}
42656	op := scope.AddOperation(opspec)
42657	return op.Output(0)
42658}
42659
42660// SerializeManySparseAttr is an optional argument to SerializeManySparse.
42661type SerializeManySparseAttr func(optionalAttr)
42662
42663// SerializeManySparseOutType sets the optional out_type attribute to value.
42664//
42665// value: The `dtype` to use for serialization; the supported types are `string`
42666// (default) and `variant`.
42667// If not specified, defaults to DT_STRING
42668func SerializeManySparseOutType(value tf.DataType) SerializeManySparseAttr {
42669	return func(m optionalAttr) {
42670		m["out_type"] = value
42671	}
42672}
42673
42674// Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.
42675//
42676// The `SparseTensor` must have rank `R` greater than 1, and the first dimension
42677// is treated as the minibatch dimension.  Elements of the `SparseTensor`
42678// must be sorted in increasing order of this first dimension.  The serialized
42679// `SparseTensor` objects going into each row of `serialized_sparse` will have
42680// rank `R-1`.
42681//
42682// The minibatch size `N` is extracted from `sparse_shape[0]`.
42683//
42684// Arguments:
42685//
42686//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
42687//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
42688//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
42689func SerializeManySparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeManySparseAttr) (serialized_sparse tf.Output) {
42690	if scope.Err() != nil {
42691		return
42692	}
42693	attrs := map[string]interface{}{}
42694	for _, a := range optional {
42695		a(attrs)
42696	}
42697	opspec := tf.OpSpec{
42698		Type: "SerializeManySparse",
42699		Input: []tf.Input{
42700			sparse_indices, sparse_values, sparse_shape,
42701		},
42702		Attrs: attrs,
42703	}
42704	op := scope.AddOperation(opspec)
42705	return op.Output(0)
42706}
42707
42708// SerializeSparseAttr is an optional argument to SerializeSparse.
42709type SerializeSparseAttr func(optionalAttr)
42710
42711// SerializeSparseOutType sets the optional out_type attribute to value.
42712//
42713// value: The `dtype` to use for serialization; the supported types are `string`
42714// (default) and `variant`.
42715// If not specified, defaults to DT_STRING
42716func SerializeSparseOutType(value tf.DataType) SerializeSparseAttr {
42717	return func(m optionalAttr) {
42718		m["out_type"] = value
42719	}
42720}
42721
42722// Serialize a `SparseTensor` into a `[3]` `Tensor` object.
42723//
42724// Arguments:
42725//
42726//	sparse_indices: 2-D.  The `indices` of the `SparseTensor`.
42727//	sparse_values: 1-D.  The `values` of the `SparseTensor`.
42728//	sparse_shape: 1-D.  The `shape` of the `SparseTensor`.
42729func SerializeSparse(scope *Scope, sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output, optional ...SerializeSparseAttr) (serialized_sparse tf.Output) {
42730	if scope.Err() != nil {
42731		return
42732	}
42733	attrs := map[string]interface{}{}
42734	for _, a := range optional {
42735		a(attrs)
42736	}
42737	opspec := tf.OpSpec{
42738		Type: "SerializeSparse",
42739		Input: []tf.Input{
42740			sparse_indices, sparse_values, sparse_shape,
42741		},
42742		Attrs: attrs,
42743	}
42744	op := scope.AddOperation(opspec)
42745	return op.Output(0)
42746}
42747
42748// Transforms a Tensor into a serialized TensorProto proto.
42749//
42750// Arguments:
42751//
42752//	tensor: A Tensor of type `T`.
42753//
42754// Returns A serialized TensorProto proto of the input tensor.
42755func SerializeTensor(scope *Scope, tensor tf.Output) (serialized tf.Output) {
42756	if scope.Err() != nil {
42757		return
42758	}
42759	opspec := tf.OpSpec{
42760		Type: "SerializeTensor",
42761		Input: []tf.Input{
42762			tensor,
42763		},
42764	}
42765	op := scope.AddOperation(opspec)
42766	return op.Output(0)
42767}
42768
42769// SetSizeAttr is an optional argument to SetSize.
42770type SetSizeAttr func(optionalAttr)
42771
42772// SetSizeValidateIndices sets the optional validate_indices attribute to value.
42773// If not specified, defaults to true
42774func SetSizeValidateIndices(value bool) SetSizeAttr {
42775	return func(m optionalAttr) {
42776		m["validate_indices"] = value
42777	}
42778}
42779
42780// Number of unique elements along last dimension of input `set`.
42781//
42782// Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
42783// and `set_shape`. The last dimension contains values in a set, duplicates are
42784// allowed but ignored.
42785//
42786// If `validate_indices` is `True`, this op validates the order and range of `set`
42787// indices.
42788//
42789// Arguments:
42790//
42791//	set_indices: 2D `Tensor`, indices of a `SparseTensor`.
42792//	set_values: 1D `Tensor`, values of a `SparseTensor`.
42793//	set_shape: 1D `Tensor`, shape of a `SparseTensor`.
42794//
42795// Returns For `set` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st
42796// `n-1` dimensions as `set`. Each value is the number of unique elements in
42797// the corresponding `[0...n-1]` dimension of `set`.
42798func SetSize(scope *Scope, set_indices tf.Output, set_values tf.Output, set_shape tf.Output, optional ...SetSizeAttr) (size tf.Output) {
42799	if scope.Err() != nil {
42800		return
42801	}
42802	attrs := map[string]interface{}{}
42803	for _, a := range optional {
42804		a(attrs)
42805	}
42806	opspec := tf.OpSpec{
42807		Type: "SetSize",
42808		Input: []tf.Input{
42809			set_indices, set_values, set_shape,
42810		},
42811		Attrs: attrs,
42812	}
42813	op := scope.AddOperation(opspec)
42814	return op.Output(0)
42815}
42816
42817// ShapeAttr is an optional argument to Shape.
42818type ShapeAttr func(optionalAttr)
42819
42820// ShapeOutType sets the optional out_type attribute to value.
42821// If not specified, defaults to DT_INT32
42822func ShapeOutType(value tf.DataType) ShapeAttr {
42823	return func(m optionalAttr) {
42824		m["out_type"] = value
42825	}
42826}
42827
42828// Returns the shape of a tensor.
42829//
42830// This operation returns a 1-D integer tensor representing the shape of `input`.
42831//
42832// For example:
42833//
42834// ```
42835// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
42836// shape(t) ==> [2, 2, 3]
42837// ```
42838func Shape(scope *Scope, input tf.Output, optional ...ShapeAttr) (output tf.Output) {
42839	if scope.Err() != nil {
42840		return
42841	}
42842	attrs := map[string]interface{}{}
42843	for _, a := range optional {
42844		a(attrs)
42845	}
42846	opspec := tf.OpSpec{
42847		Type: "Shape",
42848		Input: []tf.Input{
42849			input,
42850		},
42851		Attrs: attrs,
42852	}
42853	op := scope.AddOperation(opspec)
42854	return op.Output(0)
42855}
42856
42857// ShapeNAttr is an optional argument to ShapeN.
42858type ShapeNAttr func(optionalAttr)
42859
42860// ShapeNOutType sets the optional out_type attribute to value.
42861// If not specified, defaults to DT_INT32
42862func ShapeNOutType(value tf.DataType) ShapeNAttr {
42863	return func(m optionalAttr) {
42864		m["out_type"] = value
42865	}
42866}
42867
42868// Returns shape of tensors.
42869//
42870// This operation returns N 1-D integer tensors representing shape of `input[i]s`.
42871func ShapeN(scope *Scope, input []tf.Output, optional ...ShapeNAttr) (output []tf.Output) {
42872	if scope.Err() != nil {
42873		return
42874	}
42875	attrs := map[string]interface{}{}
42876	for _, a := range optional {
42877		a(attrs)
42878	}
42879	opspec := tf.OpSpec{
42880		Type: "ShapeN",
42881		Input: []tf.Input{
42882			tf.OutputList(input),
42883		},
42884		Attrs: attrs,
42885	}
42886	op := scope.AddOperation(opspec)
42887	if scope.Err() != nil {
42888		return
42889	}
42890	var idx int
42891	var err error
42892	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
42893		scope.UpdateErr("ShapeN", err)
42894		return
42895	}
42896	return output
42897}
42898
42899// ShardDatasetAttr is an optional argument to ShardDataset.
42900type ShardDatasetAttr func(optionalAttr)
42901
42902// ShardDatasetRequireNonEmpty sets the optional require_non_empty attribute to value.
42903// If not specified, defaults to false
42904func ShardDatasetRequireNonEmpty(value bool) ShardDatasetAttr {
42905	return func(m optionalAttr) {
42906		m["require_non_empty"] = value
42907	}
42908}
42909
42910// ShardDatasetMetadata sets the optional metadata attribute to value.
42911// If not specified, defaults to ""
42912func ShardDatasetMetadata(value string) ShardDatasetAttr {
42913	return func(m optionalAttr) {
42914		m["metadata"] = value
42915	}
42916}
42917
42918// Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
42919//
42920// Arguments:
42921//
42922//	num_shards: An integer representing the number of shards operating in parallel.
42923//	index: An integer representing the current worker index.
42924func ShardDataset(scope *Scope, input_dataset tf.Output, num_shards tf.Output, index tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShardDatasetAttr) (handle tf.Output) {
42925	if scope.Err() != nil {
42926		return
42927	}
42928	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
42929	for _, a := range optional {
42930		a(attrs)
42931	}
42932	opspec := tf.OpSpec{
42933		Type: "ShardDataset",
42934		Input: []tf.Input{
42935			input_dataset, num_shards, index,
42936		},
42937		Attrs: attrs,
42938	}
42939	op := scope.AddOperation(opspec)
42940	return op.Output(0)
42941}
42942
42943// Generate a sharded filename. The filename is printf formatted as
42944//
42945//	%s-%05d-of-%05d, basename, shard, num_shards.
42946func ShardedFilename(scope *Scope, basename tf.Output, shard tf.Output, num_shards tf.Output) (filename tf.Output) {
42947	if scope.Err() != nil {
42948		return
42949	}
42950	opspec := tf.OpSpec{
42951		Type: "ShardedFilename",
42952		Input: []tf.Input{
42953			basename, shard, num_shards,
42954		},
42955	}
42956	op := scope.AddOperation(opspec)
42957	return op.Output(0)
42958}
42959
42960// Generate a glob pattern matching all sharded file names.
42961func ShardedFilespec(scope *Scope, basename tf.Output, num_shards tf.Output) (filename tf.Output) {
42962	if scope.Err() != nil {
42963		return
42964	}
42965	opspec := tf.OpSpec{
42966		Type: "ShardedFilespec",
42967		Input: []tf.Input{
42968			basename, num_shards,
42969		},
42970	}
42971	op := scope.AddOperation(opspec)
42972	return op.Output(0)
42973}
42974
42975// ShuffleAndRepeatDatasetAttr is an optional argument to ShuffleAndRepeatDataset.
42976type ShuffleAndRepeatDatasetAttr func(optionalAttr)
42977
42978// ShuffleAndRepeatDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
42979// If not specified, defaults to true
42980func ShuffleAndRepeatDatasetReshuffleEachIteration(value bool) ShuffleAndRepeatDatasetAttr {
42981	return func(m optionalAttr) {
42982		m["reshuffle_each_iteration"] = value
42983	}
42984}
42985
42986// ShuffleAndRepeatDatasetMetadata sets the optional metadata attribute to value.
42987// If not specified, defaults to ""
42988func ShuffleAndRepeatDatasetMetadata(value string) ShuffleAndRepeatDatasetAttr {
42989	return func(m optionalAttr) {
42990		m["metadata"] = value
42991	}
42992}
42993
42994// Creates a dataset that shuffles and repeats elements from `input_dataset`
42995//
42996// pseudorandomly.
42997//
42998// Arguments:
42999//
43000//	buffer_size: The number of output elements to buffer in an iterator over
43001//
43002// this dataset. Compare with the `min_after_dequeue` attr when creating a
43003// `RandomShuffleQueue`.
43004//
43005//	seed: A scalar seed for the random number generator. If either `seed` or
43006//
43007// `seed2` is set to be non-zero, the random number generator is seeded
43008// by the given seed.  Otherwise, a random seed is used.
43009//
43010//	seed2: A second scalar seed to avoid seed collision.
43011//	count: A scalar representing the number of times the underlying dataset
43012//
43013// should be repeated. The default is `-1`, which results in infinite repetition.
43014func ShuffleAndRepeatDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleAndRepeatDatasetAttr) (handle tf.Output) {
43015	if scope.Err() != nil {
43016		return
43017	}
43018	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
43019	for _, a := range optional {
43020		a(attrs)
43021	}
43022	opspec := tf.OpSpec{
43023		Type: "ShuffleAndRepeatDataset",
43024		Input: []tf.Input{
43025			input_dataset, buffer_size, seed, seed2, count,
43026		},
43027		Attrs: attrs,
43028	}
43029	op := scope.AddOperation(opspec)
43030	return op.Output(0)
43031}
43032
43033// ShuffleDatasetAttr is an optional argument to ShuffleDataset.
43034type ShuffleDatasetAttr func(optionalAttr)
43035
43036// ShuffleDatasetReshuffleEachIteration sets the optional reshuffle_each_iteration attribute to value.
43037//
43038// value: If true, each iterator over this dataset will be given
43039// a different pseudorandomly generated seed, based on a sequence seeded by the
43040// `seed` and `seed2` inputs. If false, each iterator will be given the same
43041// seed, and repeated iteration over this dataset will yield the exact same
43042// sequence of results.
43043// If not specified, defaults to true
43044func ShuffleDatasetReshuffleEachIteration(value bool) ShuffleDatasetAttr {
43045	return func(m optionalAttr) {
43046		m["reshuffle_each_iteration"] = value
43047	}
43048}
43049
43050// ShuffleDatasetMetadata sets the optional metadata attribute to value.
43051// If not specified, defaults to ""
43052func ShuffleDatasetMetadata(value string) ShuffleDatasetAttr {
43053	return func(m optionalAttr) {
43054		m["metadata"] = value
43055	}
43056}
43057
43058// Creates a dataset that shuffles elements from `input_dataset` pseudorandomly.
43059//
43060// Arguments:
43061//
43062//	buffer_size: The number of output elements to buffer in an iterator over
43063//
43064// this dataset. Compare with the `min_after_dequeue` attr when creating a
43065// `RandomShuffleQueue`.
43066//
43067//	seed: A scalar seed for the random number generator. If either `seed` or
43068//
43069// `seed2` is set to be non-zero, the random number generator is seeded
43070// by the given seed.  Otherwise, a random seed is used.
43071//
43072//	seed2: A second scalar seed to avoid seed collision.
43073func ShuffleDataset(scope *Scope, input_dataset tf.Output, buffer_size tf.Output, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ShuffleDatasetAttr) (handle tf.Output) {
43074	if scope.Err() != nil {
43075		return
43076	}
43077	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
43078	for _, a := range optional {
43079		a(attrs)
43080	}
43081	opspec := tf.OpSpec{
43082		Type: "ShuffleDataset",
43083		Input: []tf.Input{
43084			input_dataset, buffer_size, seed, seed2,
43085		},
43086		Attrs: attrs,
43087	}
43088	op := scope.AddOperation(opspec)
43089	return op.Output(0)
43090}
43091
43092// Shuts down a running distributed TPU system.
43093//
43094// The op returns an error if no system is running.
43095//
43096// Returns the created operation.
43097func ShutdownDistributedTPU(scope *Scope) (o *tf.Operation) {
43098	if scope.Err() != nil {
43099		return
43100	}
43101	opspec := tf.OpSpec{
43102		Type: "ShutdownDistributedTPU",
43103	}
43104	return scope.AddOperation(opspec)
43105}
43106
43107// An op that shuts down the TPU system.
43108//
43109// Returns A boolean that indicates if the shut down process succeeds.
43110func ShutdownTPUSystem(scope *Scope) (success tf.Output) {
43111	if scope.Err() != nil {
43112		return
43113	}
43114	opspec := tf.OpSpec{
43115		Type: "ShutdownTPUSystem",
43116	}
43117	op := scope.AddOperation(opspec)
43118	return op.Output(0)
43119}
43120
43121// Computes sigmoid of `x` element-wise.
43122//
43123// Specifically, `y = 1 / (1 + exp(-x))`.
43124func Sigmoid(scope *Scope, x tf.Output) (y tf.Output) {
43125	if scope.Err() != nil {
43126		return
43127	}
43128	opspec := tf.OpSpec{
43129		Type: "Sigmoid",
43130		Input: []tf.Input{
43131			x,
43132		},
43133	}
43134	op := scope.AddOperation(opspec)
43135	return op.Output(0)
43136}
43137
43138// Computes the gradient of the sigmoid of `x` wrt its input.
43139//
43140// Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
43141// `dy` is the corresponding input gradient.
43142func SigmoidGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
43143	if scope.Err() != nil {
43144		return
43145	}
43146	opspec := tf.OpSpec{
43147		Type: "SigmoidGrad",
43148		Input: []tf.Input{
43149			y, dy,
43150		},
43151	}
43152	op := scope.AddOperation(opspec)
43153	return op.Output(0)
43154}
43155
43156// Returns an element-wise indication of the sign of a number.
43157//
43158// `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
43159//
43160// For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
43161//
43162// Example usage:
43163// >>> tf.math.sign([0., 2., -3.])
43164// <tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0.,  1., -1.], dtype=float32)>
43165func Sign(scope *Scope, x tf.Output) (y tf.Output) {
43166	if scope.Err() != nil {
43167		return
43168	}
43169	opspec := tf.OpSpec{
43170		Type: "Sign",
43171		Input: []tf.Input{
43172			x,
43173		},
43174	}
43175	op := scope.AddOperation(opspec)
43176	return op.Output(0)
43177}
43178
43179// Computes sine of x element-wise.
43180//
43181//	Given an input tensor, this function computes sine of every
43182//	element in the tensor. Input range is `(-inf, inf)` and
43183//	output range is `[-1,1]`.
43184//
43185//	```python
43186//	x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")])
43187//	tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]
43188//	```
43189func Sin(scope *Scope, x tf.Output) (y tf.Output) {
43190	if scope.Err() != nil {
43191		return
43192	}
43193	opspec := tf.OpSpec{
43194		Type: "Sin",
43195		Input: []tf.Input{
43196			x,
43197		},
43198	}
43199	op := scope.AddOperation(opspec)
43200	return op.Output(0)
43201}
43202
43203// Computes hyperbolic sine of x element-wise.
43204//
43205//	Given an input tensor, this function computes hyperbolic sine of every
43206//	element in the tensor. Input range is `[-inf,inf]` and output range
43207//	is `[-inf,inf]`.
43208//
43209//	```python
43210//	x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
43211//	tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf]
43212//	```
43213func Sinh(scope *Scope, x tf.Output) (y tf.Output) {
43214	if scope.Err() != nil {
43215		return
43216	}
43217	opspec := tf.OpSpec{
43218		Type: "Sinh",
43219		Input: []tf.Input{
43220			x,
43221		},
43222	}
43223	op := scope.AddOperation(opspec)
43224	return op.Output(0)
43225}
43226
43227// SizeAttr is an optional argument to Size.
43228type SizeAttr func(optionalAttr)
43229
43230// SizeOutType sets the optional out_type attribute to value.
43231// If not specified, defaults to DT_INT32
43232func SizeOutType(value tf.DataType) SizeAttr {
43233	return func(m optionalAttr) {
43234		m["out_type"] = value
43235	}
43236}
43237
43238// Returns the size of a tensor.
43239//
43240// This operation returns an integer representing the number of elements in
43241// `input`.
43242//
43243// For example:
43244//
43245// ```
43246// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
43247// size(t) ==> 12
43248// ```
43249func Size(scope *Scope, input tf.Output, optional ...SizeAttr) (output tf.Output) {
43250	if scope.Err() != nil {
43251		return
43252	}
43253	attrs := map[string]interface{}{}
43254	for _, a := range optional {
43255		a(attrs)
43256	}
43257	opspec := tf.OpSpec{
43258		Type: "Size",
43259		Input: []tf.Input{
43260			input,
43261		},
43262		Attrs: attrs,
43263	}
43264	op := scope.AddOperation(opspec)
43265	return op.Output(0)
43266}
43267
43268// SkipDatasetAttr is an optional argument to SkipDataset.
43269type SkipDatasetAttr func(optionalAttr)
43270
43271// SkipDatasetMetadata sets the optional metadata attribute to value.
43272// If not specified, defaults to ""
43273func SkipDatasetMetadata(value string) SkipDatasetAttr {
43274	return func(m optionalAttr) {
43275		m["metadata"] = value
43276	}
43277}
43278
43279// Creates a dataset that skips `count` elements from the `input_dataset`.
43280//
43281// Arguments:
43282//
43283//	count: A scalar representing the number of elements from the `input_dataset`
43284//
43285// that should be skipped.  If count is -1, skips everything.
43286func SkipDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...SkipDatasetAttr) (handle tf.Output) {
43287	if scope.Err() != nil {
43288		return
43289	}
43290	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
43291	for _, a := range optional {
43292		a(attrs)
43293	}
43294	opspec := tf.OpSpec{
43295		Type: "SkipDataset",
43296		Input: []tf.Input{
43297			input_dataset, count,
43298		},
43299		Attrs: attrs,
43300	}
43301	op := scope.AddOperation(opspec)
43302	return op.Output(0)
43303}
43304
43305// SkipgramAttr is an optional argument to Skipgram.
43306type SkipgramAttr func(optionalAttr)
43307
43308// SkipgramWindowSize sets the optional window_size attribute to value.
43309//
43310// value: The number of words to predict to the left and right of the target.
43311// If not specified, defaults to 5
43312func SkipgramWindowSize(value int64) SkipgramAttr {
43313	return func(m optionalAttr) {
43314		m["window_size"] = value
43315	}
43316}
43317
43318// SkipgramMinCount sets the optional min_count attribute to value.
43319//
43320// value: The minimum number of word occurrences for it to be included in the
43321// vocabulary.
43322// If not specified, defaults to 5
43323func SkipgramMinCount(value int64) SkipgramAttr {
43324	return func(m optionalAttr) {
43325		m["min_count"] = value
43326	}
43327}
43328
43329// SkipgramSubsample sets the optional subsample attribute to value.
43330//
43331// value: Threshold for word occurrence. Words that appear with higher
43332// frequency will be randomly down-sampled. Set to 0 to disable.
43333// If not specified, defaults to 0.001
43334func SkipgramSubsample(value float32) SkipgramAttr {
43335	return func(m optionalAttr) {
43336		m["subsample"] = value
43337	}
43338}
43339
43340// Parses a text file and creates a batch of examples.
43341//
43342// DEPRECATED at GraphDef version 19: Moving word2vec into tensorflow_models/tutorials and deprecating its ops here as a result
43343//
43344// Arguments:
43345//
43346//	filename: The corpus's text file name.
43347//	batch_size: The size of produced batch.
43348//
43349// Returns:
43350//
43351//	vocab_word: A vector of words in the corpus.
43352//	vocab_freq: Frequencies of words. Sorted in the non-ascending order.
43353//	words_per_epoch: Number of words per epoch in the data file.
43354//	current_epoch: The current epoch number.
43355//	total_words_processed: The total number of words processed so far.
43356//	examples: A vector of word ids.
43357//	labels: A vector of word ids.
43358func Skipgram(scope *Scope, filename string, batch_size int64, optional ...SkipgramAttr) (vocab_word tf.Output, vocab_freq tf.Output, words_per_epoch tf.Output, current_epoch tf.Output, total_words_processed tf.Output, examples tf.Output, labels tf.Output) {
43359	if scope.Err() != nil {
43360		return
43361	}
43362	attrs := map[string]interface{}{"filename": filename, "batch_size": batch_size}
43363	for _, a := range optional {
43364		a(attrs)
43365	}
43366	opspec := tf.OpSpec{
43367		Type: "Skipgram",
43368
43369		Attrs: attrs,
43370	}
43371	op := scope.AddOperation(opspec)
43372	return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4), op.Output(5), op.Output(6)
43373}
43374
43375// Return a slice from 'input'.
43376//
43377// The output tensor is a tensor with dimensions described by 'size'
43378// whose values are extracted from 'input' starting at the offsets in
43379// 'begin'.
43380//
43381// *Requirements*:
43382//
43383//	0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
43384//
43385// Arguments:
43386//
43387//	begin: begin[i] specifies the offset into the 'i'th dimension of
43388//
43389// 'input' to slice from.
43390//
43391//	size: size[i] specifies the number of elements of the 'i'th dimension
43392//
43393// of 'input' to slice. If size[i] is -1, all remaining elements in dimension
43394// i are included in the slice (i.e. this is equivalent to setting
43395// size[i] = input.dim_size(i) - begin[i]).
43396func Slice(scope *Scope, input tf.Output, begin tf.Output, size tf.Output) (output tf.Output) {
43397	if scope.Err() != nil {
43398		return
43399	}
43400	opspec := tf.OpSpec{
43401		Type: "Slice",
43402		Input: []tf.Input{
43403			input, begin, size,
43404		},
43405	}
43406	op := scope.AddOperation(opspec)
43407	return op.Output(0)
43408}
43409
43410// SlidingWindowDatasetAttr is an optional argument to SlidingWindowDataset.
43411type SlidingWindowDatasetAttr func(optionalAttr)
43412
43413// SlidingWindowDatasetDropRemainder sets the optional drop_remainder attribute to value.
43414// If not specified, defaults to true
43415func SlidingWindowDatasetDropRemainder(value bool) SlidingWindowDatasetAttr {
43416	return func(m optionalAttr) {
43417		m["drop_remainder"] = value
43418	}
43419}
43420
43421// Creates a dataset that passes a sliding window over `input_dataset`.
43422//
43423// Arguments:
43424//
43425//	window_size: A scalar representing the number of elements in the
43426//
43427// sliding window.
43428//
43429//	window_shift: A scalar representing the steps moving the sliding window
43430//
43431// forward in one iteration. It must be positive.
43432//
43433//	window_stride: A scalar representing the stride of the input elements of the sliding window.
43434//
43435// It must be positive.
43436func SlidingWindowDataset(scope *Scope, input_dataset tf.Output, window_size tf.Output, window_shift tf.Output, window_stride tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...SlidingWindowDatasetAttr) (handle tf.Output) {
43437	if scope.Err() != nil {
43438		return
43439	}
43440	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
43441	for _, a := range optional {
43442		a(attrs)
43443	}
43444	opspec := tf.OpSpec{
43445		Type: "SlidingWindowDataset",
43446		Input: []tf.Input{
43447			input_dataset, window_size, window_shift, window_stride,
43448		},
43449		Attrs: attrs,
43450	}
43451	op := scope.AddOperation(opspec)
43452	return op.Output(0)
43453}
43454
43455// Returns a copy of the input tensor.
43456func Snapshot(scope *Scope, input tf.Output) (output tf.Output) {
43457	if scope.Err() != nil {
43458		return
43459	}
43460	opspec := tf.OpSpec{
43461		Type: "Snapshot",
43462		Input: []tf.Input{
43463			input,
43464		},
43465	}
43466	op := scope.AddOperation(opspec)
43467	return op.Output(0)
43468}
43469
43470// SnapshotDatasetAttr is an optional argument to SnapshotDataset.
43471type SnapshotDatasetAttr func(optionalAttr)
43472
43473// SnapshotDatasetCompression sets the optional compression attribute to value.
43474// If not specified, defaults to ""
43475func SnapshotDatasetCompression(value string) SnapshotDatasetAttr {
43476	return func(m optionalAttr) {
43477		m["compression"] = value
43478	}
43479}
43480
43481// SnapshotDatasetReaderPathPrefix sets the optional reader_path_prefix attribute to value.
43482// If not specified, defaults to ""
43483func SnapshotDatasetReaderPathPrefix(value string) SnapshotDatasetAttr {
43484	return func(m optionalAttr) {
43485		m["reader_path_prefix"] = value
43486	}
43487}
43488
43489// SnapshotDatasetWriterPathPrefix sets the optional writer_path_prefix attribute to value.
43490// If not specified, defaults to ""
43491func SnapshotDatasetWriterPathPrefix(value string) SnapshotDatasetAttr {
43492	return func(m optionalAttr) {
43493		m["writer_path_prefix"] = value
43494	}
43495}
43496
43497// SnapshotDatasetShardSizeBytes sets the optional shard_size_bytes attribute to value.
43498// If not specified, defaults to 10737418240
43499func SnapshotDatasetShardSizeBytes(value int64) SnapshotDatasetAttr {
43500	return func(m optionalAttr) {
43501		m["shard_size_bytes"] = value
43502	}
43503}
43504
43505// SnapshotDatasetPendingSnapshotExpirySeconds sets the optional pending_snapshot_expiry_seconds attribute to value.
43506// If not specified, defaults to 86400
43507func SnapshotDatasetPendingSnapshotExpirySeconds(value int64) SnapshotDatasetAttr {
43508	return func(m optionalAttr) {
43509		m["pending_snapshot_expiry_seconds"] = value
43510	}
43511}
43512
43513// SnapshotDatasetNumReaderThreads sets the optional num_reader_threads attribute to value.
43514// If not specified, defaults to 1
43515func SnapshotDatasetNumReaderThreads(value int64) SnapshotDatasetAttr {
43516	return func(m optionalAttr) {
43517		m["num_reader_threads"] = value
43518	}
43519}
43520
43521// SnapshotDatasetReaderBufferSize sets the optional reader_buffer_size attribute to value.
43522// If not specified, defaults to 1
43523func SnapshotDatasetReaderBufferSize(value int64) SnapshotDatasetAttr {
43524	return func(m optionalAttr) {
43525		m["reader_buffer_size"] = value
43526	}
43527}
43528
43529// SnapshotDatasetNumWriterThreads sets the optional num_writer_threads attribute to value.
43530// If not specified, defaults to 1
43531func SnapshotDatasetNumWriterThreads(value int64) SnapshotDatasetAttr {
43532	return func(m optionalAttr) {
43533		m["num_writer_threads"] = value
43534	}
43535}
43536
43537// SnapshotDatasetWriterBufferSize sets the optional writer_buffer_size attribute to value.
43538// If not specified, defaults to 1
43539func SnapshotDatasetWriterBufferSize(value int64) SnapshotDatasetAttr {
43540	return func(m optionalAttr) {
43541		m["writer_buffer_size"] = value
43542	}
43543}
43544
43545// SnapshotDatasetShuffleOnRead sets the optional shuffle_on_read attribute to value.
43546// If not specified, defaults to false
43547func SnapshotDatasetShuffleOnRead(value bool) SnapshotDatasetAttr {
43548	return func(m optionalAttr) {
43549		m["shuffle_on_read"] = value
43550	}
43551}
43552
43553// SnapshotDatasetSeed sets the optional seed attribute to value.
43554// If not specified, defaults to 0
43555func SnapshotDatasetSeed(value int64) SnapshotDatasetAttr {
43556	return func(m optionalAttr) {
43557		m["seed"] = value
43558	}
43559}
43560
43561// SnapshotDatasetSeed2 sets the optional seed2 attribute to value.
43562// If not specified, defaults to 0
43563func SnapshotDatasetSeed2(value int64) SnapshotDatasetAttr {
43564	return func(m optionalAttr) {
43565		m["seed2"] = value
43566	}
43567}
43568
43569// SnapshotDatasetMode sets the optional mode attribute to value.
43570// If not specified, defaults to "auto"
43571func SnapshotDatasetMode(value string) SnapshotDatasetAttr {
43572	return func(m optionalAttr) {
43573		m["mode"] = value
43574	}
43575}
43576
43577// SnapshotDatasetSnapshotName sets the optional snapshot_name attribute to value.
43578// If not specified, defaults to ""
43579func SnapshotDatasetSnapshotName(value string) SnapshotDatasetAttr {
43580	return func(m optionalAttr) {
43581		m["snapshot_name"] = value
43582	}
43583}
43584
43585// Creates a dataset that will write to / read from a snapshot.
43586//
43587// This dataset attempts to determine whether a valid snapshot exists at the
43588// `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`.
43589// If not, it will run the preprocessing pipeline as usual, and write out a
43590// snapshot of the data processed for future use.
43591//
43592// Arguments:
43593//
43594//	input_dataset: A variant tensor representing the input dataset.
43595//	path: The path we should write snapshots to / read snapshots from.
43596func SnapshotDataset(scope *Scope, input_dataset tf.Output, path tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...SnapshotDatasetAttr) (handle tf.Output) {
43597	if scope.Err() != nil {
43598		return
43599	}
43600	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
43601	for _, a := range optional {
43602		a(attrs)
43603	}
43604	opspec := tf.OpSpec{
43605		Type: "SnapshotDataset",
43606		Input: []tf.Input{
43607			input_dataset, path,
43608		},
43609		Attrs: attrs,
43610	}
43611	op := scope.AddOperation(opspec)
43612	return op.Output(0)
43613}
43614
43615// SobolSampleAttr is an optional argument to SobolSample.
43616type SobolSampleAttr func(optionalAttr)
43617
43618// SobolSampleDtype sets the optional dtype attribute to value.
43619//
43620// value: The type of the sample. One of: `float32` or `float64`.
43621// If not specified, defaults to DT_FLOAT
43622func SobolSampleDtype(value tf.DataType) SobolSampleAttr {
43623	return func(m optionalAttr) {
43624		m["dtype"] = value
43625	}
43626}
43627
43628// Generates points from the Sobol sequence.
43629//
43630// Creates a Sobol sequence with `num_results` samples. Each sample has dimension
43631// `dim`. Skips the first `skip` samples.
43632//
43633// Arguments:
43634//
43635//	dim: Positive scalar `Tensor` representing each sample's dimension.
43636//	num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol points to return
43637//
43638// in the output.
43639//
43640//	skip: Positive scalar `Tensor` of dtype int32. The number of initial points of the
43641//
43642// Sobol sequence to skip.
43643//
43644// Returns `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
43645func SobolSample(scope *Scope, dim tf.Output, num_results tf.Output, skip tf.Output, optional ...SobolSampleAttr) (samples tf.Output) {
43646	if scope.Err() != nil {
43647		return
43648	}
43649	attrs := map[string]interface{}{}
43650	for _, a := range optional {
43651		a(attrs)
43652	}
43653	opspec := tf.OpSpec{
43654		Type: "SobolSample",
43655		Input: []tf.Input{
43656			dim, num_results, skip,
43657		},
43658		Attrs: attrs,
43659	}
43660	op := scope.AddOperation(opspec)
43661	return op.Output(0)
43662}
43663
43664// Computes softmax activations.
43665//
43666// For each batch `i` and class `j` we have
43667//
43668//	$$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
43669//
43670// Arguments:
43671//
43672//	logits: 2-D with shape `[batch_size, num_classes]`.
43673//
43674// Returns Same shape as `logits`.
43675func Softmax(scope *Scope, logits tf.Output) (softmax tf.Output) {
43676	if scope.Err() != nil {
43677		return
43678	}
43679	opspec := tf.OpSpec{
43680		Type: "Softmax",
43681		Input: []tf.Input{
43682			logits,
43683		},
43684	}
43685	op := scope.AddOperation(opspec)
43686	return op.Output(0)
43687}
43688
43689// Computes softmax cross entropy cost and gradients to backpropagate.
43690//
43691// Inputs are the logits, not probabilities.
43692//
43693// Arguments:
43694//
43695//	features: batch_size x num_classes matrix
43696//	labels: batch_size x num_classes matrix
43697//
43698// The caller must ensure that each batch of labels represents a valid
43699// probability distribution.
43700//
43701// Returns:
43702//
43703//	loss: Per example loss (batch_size vector).
43704//	backprop: backpropagated gradients (batch_size x num_classes matrix).
43705func SoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
43706	if scope.Err() != nil {
43707		return
43708	}
43709	opspec := tf.OpSpec{
43710		Type: "SoftmaxCrossEntropyWithLogits",
43711		Input: []tf.Input{
43712			features, labels,
43713		},
43714	}
43715	op := scope.AddOperation(opspec)
43716	return op.Output(0), op.Output(1)
43717}
43718
43719// Computes softplus gradients for a softplus operation.
43720//
43721// Arguments:
43722//
43723//	gradients: The backpropagated gradients to the corresponding softplus operation.
43724//	features: The features passed as input to the corresponding softplus operation.
43725//
43726// Returns The gradients: `gradients / (1 + exp(-features))`.
43727func SoftplusGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
43728	if scope.Err() != nil {
43729		return
43730	}
43731	opspec := tf.OpSpec{
43732		Type: "SoftplusGrad",
43733		Input: []tf.Input{
43734			gradients, features,
43735		},
43736	}
43737	op := scope.AddOperation(opspec)
43738	return op.Output(0)
43739}
43740
43741// Computes softsign: `features / (abs(features) + 1)`.
43742func Softsign(scope *Scope, features tf.Output) (activations tf.Output) {
43743	if scope.Err() != nil {
43744		return
43745	}
43746	opspec := tf.OpSpec{
43747		Type: "Softsign",
43748		Input: []tf.Input{
43749			features,
43750		},
43751	}
43752	op := scope.AddOperation(opspec)
43753	return op.Output(0)
43754}
43755
43756// Computes softsign gradients for a softsign operation.
43757//
43758// Arguments:
43759//
43760//	gradients: The backpropagated gradients to the corresponding softsign operation.
43761//	features: The features passed as input to the corresponding softsign operation.
43762//
43763// Returns The gradients: `gradients / (1 + abs(features)) ** 2`.
43764func SoftsignGrad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) {
43765	if scope.Err() != nil {
43766		return
43767	}
43768	opspec := tf.OpSpec{
43769		Type: "SoftsignGrad",
43770		Input: []tf.Input{
43771			gradients, features,
43772		},
43773	}
43774	op := scope.AddOperation(opspec)
43775	return op.Output(0)
43776}
43777
43778// SpaceToBatch for 4-D tensors of type T.
43779//
43780// This is a legacy version of the more general SpaceToBatchND.
43781//
43782// Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
43783// More specifically, this op outputs a copy of the input tensor where values from
43784// the `height` and `width` dimensions are moved to the `batch` dimension. After
43785// the zero-padding, both `height` and `width` of the input must be divisible by the
43786// block size.
43787//
43788// The attr `block_size` must be greater than one. It indicates the block size.
43789//
43790//   - Non-overlapping blocks of size `block_size x block size` in the height and
43791//     width dimensions are rearranged into the batch dimension at each location.
43792//   - The batch of the output tensor is `batch * block_size * block_size`.
43793//   - Both height_pad and width_pad must be divisible by block_size.
43794//
43795// The shape of the output will be:
43796//
43797//	[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
43798//	 depth]
43799//
43800// Some examples:
43801//
43802// (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
43803//
43804// ```
43805// x = [[[[1], [2]], [[3], [4]]]]
43806// ```
43807//
43808// The output tensor has shape `[4, 1, 1, 1]` and value:
43809//
43810// ```
43811// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
43812// ```
43813//
43814// (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
43815//
43816// ```
43817// x = [[[[1, 2, 3], [4, 5, 6]],
43818//
43819//	[[7, 8, 9], [10, 11, 12]]]]
43820//
43821// ```
43822//
43823// The output tensor has shape `[4, 1, 1, 3]` and value:
43824//
43825// ```
43826// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
43827// ```
43828//
43829// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
43830//
43831// ```
43832// x = [[[[1],   [2],  [3],  [4]],
43833//
43834//	[[5],   [6],  [7],  [8]],
43835//	[[9],  [10], [11],  [12]],
43836//	[[13], [14], [15],  [16]]]]
43837//
43838// ```
43839//
43840// The output tensor has shape `[4, 2, 2, 1]` and value:
43841//
43842// ```
43843// x = [[[[1], [3]], [[9], [11]]],
43844//
43845//	[[[2], [4]], [[10], [12]]],
43846//	[[[5], [7]], [[13], [15]]],
43847//	[[[6], [8]], [[14], [16]]]]
43848//
43849// ```
43850//
43851// (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
43852//
43853// ```
43854// x = [[[[1],   [2],  [3],  [4]],
43855//
43856//	 [[5],   [6],  [7],  [8]]],
43857//	[[[9],  [10], [11],  [12]],
43858//	 [[13], [14], [15],  [16]]]]
43859//
43860// ```
43861//
43862// The output tensor has shape `[8, 1, 2, 1]` and value:
43863//
43864// ```
43865// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
43866//
43867//	[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
43868//
43869// ```
43870//
43871// Among others, this operation is useful for reducing atrous convolution into
43872// regular convolution.
43873//
43874// Arguments:
43875//
43876//		input: 4-D with shape `[batch, height, width, depth]`.
43877//		paddings: 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
43878//	  the padding of the input with zeros across the spatial dimensions as follows:
43879//
43880//	      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
43881//
43882//	  The effective spatial dimensions of the zero-padded input tensor will be:
43883//
43884//	      height_pad = pad_top + height + pad_bottom
43885//	      width_pad = pad_left + width + pad_right
43886func SpaceToBatch(scope *Scope, input tf.Output, paddings tf.Output, block_size int64) (output tf.Output) {
43887	if scope.Err() != nil {
43888		return
43889	}
43890	attrs := map[string]interface{}{"block_size": block_size}
43891	opspec := tf.OpSpec{
43892		Type: "SpaceToBatch",
43893		Input: []tf.Input{
43894			input, paddings,
43895		},
43896		Attrs: attrs,
43897	}
43898	op := scope.AddOperation(opspec)
43899	return op.Output(0)
43900}
43901
43902// SpaceToBatch for N-D tensors of type T.
43903//
43904// This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
43905// grid of blocks of shape `block_shape`, and interleaves these blocks with the
43906// "batch" dimension (0) such that in the output, the spatial dimensions
43907// `[1, ..., M]` correspond to the position within the grid, and the batch
43908// dimension combines both the position within a spatial block and the original
43909// batch position.  Prior to division into blocks, the spatial dimensions of the
43910// input are optionally zero padded according to `paddings`. See below for a
43911// precise description.
43912//
43913// This operation is equivalent to the following steps:
43914//
43915//  1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
43916//     input according to `paddings` to produce `padded` of shape `padded_shape`.
43917//
43918// 2. Reshape `padded` to `reshaped_padded` of shape:
43919//
43920//	[batch] +
43921//	[padded_shape[1] / block_shape[0],
43922//	  block_shape[0],
43923//	 ...,
43924//	 padded_shape[M] / block_shape[M-1],
43925//	 block_shape[M-1]] +
43926//	remaining_shape
43927//
43928//  3. Permute dimensions of `reshaped_padded` to produce
43929//     `permuted_reshaped_padded` of shape:
43930//
43931//     block_shape +
43932//     [batch] +
43933//     [padded_shape[1] / block_shape[0],
43934//     ...,
43935//     padded_shape[M] / block_shape[M-1]] +
43936//     remaining_shape
43937//
43938//  4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
43939//     dimension, producing an output tensor of shape:
43940//
43941//     [batch * prod(block_shape)] +
43942//     [padded_shape[1] / block_shape[0],
43943//     ...,
43944//     padded_shape[M] / block_shape[M-1]] +
43945//     remaining_shape
43946//
43947// Some examples:
43948//
43949// (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
43950//
43951//	`paddings = [[0, 0], [0, 0]]`:
43952//
43953// ```
43954// x = [[[[1], [2]], [[3], [4]]]]
43955// ```
43956//
43957// The output tensor has shape `[4, 1, 1, 1]` and value:
43958//
43959// ```
43960// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
43961// ```
43962//
43963// (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
43964//
43965//	`paddings = [[0, 0], [0, 0]]`:
43966//
43967// ```
43968// x = [[[[1, 2, 3], [4, 5, 6]],
43969//
43970//	[[7, 8, 9], [10, 11, 12]]]]
43971//
43972// ```
43973//
43974// The output tensor has shape `[4, 1, 1, 3]` and value:
43975//
43976// ```
43977// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
43978// ```
43979//
43980// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
43981//
43982//	`paddings = [[0, 0], [0, 0]]`:
43983//
43984// ```
43985// x = [[[[1],   [2],  [3],  [4]],
43986//
43987//	[[5],   [6],  [7],  [8]],
43988//	[[9],  [10], [11],  [12]],
43989//	[[13], [14], [15],  [16]]]]
43990//
43991// ```
43992//
43993// The output tensor has shape `[4, 2, 2, 1]` and value:
43994//
43995// ```
43996// x = [[[[1], [3]], [[9], [11]]],
43997//
43998//	[[[2], [4]], [[10], [12]]],
43999//	[[[5], [7]], [[13], [15]]],
44000//	[[[6], [8]], [[14], [16]]]]
44001//
44002// ```
44003//
44004// (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
44005//
44006//	paddings = `[[0, 0], [2, 0]]`:
44007//
44008// ```
44009// x = [[[[1],   [2],  [3],  [4]],
44010//
44011//	 [[5],   [6],  [7],  [8]]],
44012//	[[[9],  [10], [11],  [12]],
44013//	 [[13], [14], [15],  [16]]]]
44014//
44015// ```
44016//
44017// The output tensor has shape `[8, 1, 3, 1]` and value:
44018//
44019// ```
44020// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
44021//
44022//	[[[0], [2], [4]]], [[[0], [10], [12]]],
44023//	[[[0], [5], [7]]], [[[0], [13], [15]]],
44024//	[[[0], [6], [8]]], [[[0], [14], [16]]]]
44025//
44026// ```
44027//
44028// Among others, this operation is useful for reducing atrous convolution into
44029// regular convolution.
44030//
44031// Arguments:
44032//
44033//	input: N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
44034//
44035// where spatial_shape has `M` dimensions.
44036//
44037//		block_shape: 1-D with shape `[M]`, all values must be >= 1.
44038//		paddings: 2-D with shape `[M, 2]`, all values must be >= 0.
44039//	  `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
44040//	  `i + 1`, which corresponds to spatial dimension `i`.  It is required that
44041//	  `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
44042func SpaceToBatchND(scope *Scope, input tf.Output, block_shape tf.Output, paddings tf.Output) (output tf.Output) {
44043	if scope.Err() != nil {
44044		return
44045	}
44046	opspec := tf.OpSpec{
44047		Type: "SpaceToBatchND",
44048		Input: []tf.Input{
44049			input, block_shape, paddings,
44050		},
44051	}
44052	op := scope.AddOperation(opspec)
44053	return op.Output(0)
44054}
44055
44056// SpaceToDepthAttr is an optional argument to SpaceToDepth.
44057type SpaceToDepthAttr func(optionalAttr)
44058
44059// SpaceToDepthDataFormat sets the optional data_format attribute to value.
44060// If not specified, defaults to "NHWC"
44061func SpaceToDepthDataFormat(value string) SpaceToDepthAttr {
44062	return func(m optionalAttr) {
44063		m["data_format"] = value
44064	}
44065}
44066
44067// SpaceToDepth for tensors of type T.
44068//
44069// Rearranges blocks of spatial data, into depth. More specifically,
44070// this op outputs a copy of the input tensor where values from the `height`
44071// and `width` dimensions are moved to the `depth` dimension.
44072// The attr `block_size` indicates the input block size.
44073//
44074//   - Non-overlapping blocks of size `block_size x block size` are rearranged
44075//     into depth at each location.
44076//   - The depth of the output tensor is `block_size * block_size * input_depth`.
44077//   - The Y, X coordinates within each block of the input become the high order
44078//     component of the output channel index.
44079//   - The input tensor's height and width must be divisible by block_size.
44080//
44081// The `data_format` attr specifies the layout of the input and output tensors
44082// with the following options:
44083//
44084//	"NHWC": `[ batch, height, width, channels ]`
44085//	"NCHW": `[ batch, channels, height, width ]`
44086//	"NCHW_VECT_C":
44087//	    `qint8 [ batch, channels / 4, height, width, 4 ]`
44088//
44089// It is useful to consider the operation as transforming a 6-D Tensor.
44090// e.g. for data_format = NHWC,
44091//
44092//	Each element in the input tensor can be specified via 6 coordinates,
44093//	ordered by decreasing memory layout significance as:
44094//	n,oY,bY,oX,bX,iC  (where n=batch index, oX, oY means X or Y coordinates
44095//	                   within the output image, bX, bY means coordinates
44096//	                   within the input block, iC means input channels).
44097//	The output would be a transpose to the following layout:
44098//	n,oY,oX,bY,bX,iC
44099//
44100// This operation is useful for resizing the activations between convolutions
44101// (but keeping all data), e.g. instead of pooling. It is also useful for training
44102// purely convolutional models.
44103//
44104// For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
44105// block_size = 2:
44106//
44107// ```
44108// x = [[[[1], [2]],
44109//
44110//	[[3], [4]]]]
44111//
44112// ```
44113//
44114// This operation will output a tensor of shape `[1, 1, 1, 4]`:
44115//
44116// ```
44117// [[[[1, 2, 3, 4]]]]
44118// ```
44119//
44120// Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
44121// the corresponding output will have a single element (i.e. width and height are
44122// both 1) and will have a depth of 4 channels (1 * block_size * block_size).
44123// The output element shape is `[1, 1, 4]`.
44124//
44125// For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
44126//
44127// ```
44128// x = [[[[1, 2, 3], [4, 5, 6]],
44129//
44130//	[[7, 8, 9], [10, 11, 12]]]]
44131//
44132// ```
44133//
44134// This operation, for block_size of 2, will return the following tensor of shape
44135// `[1, 1, 1, 12]`
44136//
44137// ```
44138// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
44139// ```
44140//
44141// Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
44142//
44143// ```
44144// x = [[[[1],   [2],  [5],  [6]],
44145//
44146//	[[3],   [4],  [7],  [8]],
44147//	[[9],  [10], [13],  [14]],
44148//	[[11], [12], [15],  [16]]]]
44149//
44150// ```
44151//
44152// the operator will return the following tensor of shape `[1 2 2 4]`:
44153//
44154// ```
44155// x = [[[[1, 2, 3, 4],
44156//
44157//	 [5, 6, 7, 8]],
44158//	[[9, 10, 11, 12],
44159//	 [13, 14, 15, 16]]]]
44160//
44161// ```
44162//
44163// Arguments:
44164//
44165//	block_size: The size of the spatial block.
44166func SpaceToDepth(scope *Scope, input tf.Output, block_size int64, optional ...SpaceToDepthAttr) (output tf.Output) {
44167	if scope.Err() != nil {
44168		return
44169	}
44170	attrs := map[string]interface{}{"block_size": block_size}
44171	for _, a := range optional {
44172		a(attrs)
44173	}
44174	opspec := tf.OpSpec{
44175		Type: "SpaceToDepth",
44176		Input: []tf.Input{
44177			input,
44178		},
44179		Attrs: attrs,
44180	}
44181	op := scope.AddOperation(opspec)
44182	return op.Output(0)
44183}
44184
44185// Adds two `SparseTensor` objects to produce another `SparseTensor`.
44186//
44187// The input `SparseTensor` objects' indices are assumed ordered in standard
44188// lexicographic order.  If this is not the case, before this step run
44189// `SparseReorder` to restore index ordering.
44190//
44191// By default, if two values sum to zero at some index, the output `SparseTensor`
44192// would still include that particular location in its index, storing a zero in the
44193// corresponding value slot.  To override this, callers can specify `thresh`,
44194// indicating that if the sum has a magnitude strictly smaller than `thresh`, its
44195// corresponding value and index would then not be included.  In particular,
44196// `thresh == 0` (default) means everything is kept and actual thresholding happens
44197// only for a positive value.
44198//
44199// In the following shapes, `nnz` is the count after taking `thresh` into account.
44200//
44201// Arguments:
44202//
44203//	a_indices: 2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
44204//	a_values: 1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.
44205//	a_shape: 1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
44206//	b_indices: 2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
44207//	b_values: 1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.
44208//	b_shape: 1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
44209//	thresh: 0-D.  The magnitude threshold that determines if an output value/index
44210//
44211// pair takes space.
44212func SparseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output, thresh tf.Output) (sum_indices tf.Output, sum_values tf.Output, sum_shape tf.Output) {
44213	if scope.Err() != nil {
44214		return
44215	}
44216	opspec := tf.OpSpec{
44217		Type: "SparseAdd",
44218		Input: []tf.Input{
44219			a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh,
44220		},
44221	}
44222	op := scope.AddOperation(opspec)
44223	return op.Output(0), op.Output(1), op.Output(2)
44224}
44225
44226// The gradient operator for the SparseAdd op.
44227//
44228// The SparseAdd op calculates A + B, where A, B, and the sum are all represented
44229// as `SparseTensor` objects.  This op takes in the upstream gradient w.r.t.
44230// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
44231// values of A and B.
44232//
44233// Arguments:
44234//
44235//	backprop_val_grad: 1-D with shape `[nnz(sum)]`.  The gradient with respect to
44236//
44237// the non-empty values of the sum.
44238//
44239//	a_indices: 2-D.  The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
44240//	b_indices: 2-D.  The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
44241//	sum_indices: 2-D.  The `indices` of the sum `SparseTensor`, size
44242//
44243// `[nnz(sum), ndims]`.
44244//
44245// Returns:
44246//
44247//	a_val_grad: 1-D with shape `[nnz(A)]`. The gradient with respect to the
44248//
44249// non-empty values of A.
44250//
44251//	b_val_grad: 1-D with shape `[nnz(B)]`. The gradient with respect to the
44252//
44253// non-empty values of B.
44254func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Output, b_indices tf.Output, sum_indices tf.Output) (a_val_grad tf.Output, b_val_grad tf.Output) {
44255	if scope.Err() != nil {
44256		return
44257	}
44258	opspec := tf.OpSpec{
44259		Type: "SparseAddGrad",
44260		Input: []tf.Input{
44261			backprop_val_grad, a_indices, b_indices, sum_indices,
44262		},
44263	}
44264	op := scope.AddOperation(opspec)
44265	return op.Output(0), op.Output(1)
44266}
44267
44268// SparseBincountAttr is an optional argument to SparseBincount.
44269type SparseBincountAttr func(optionalAttr)
44270
44271// SparseBincountBinaryOutput sets the optional binary_output attribute to value.
44272//
44273// value: bool; Whether the kernel should count the appearance or number of occurrences.
44274// If not specified, defaults to false
44275func SparseBincountBinaryOutput(value bool) SparseBincountAttr {
44276	return func(m optionalAttr) {
44277		m["binary_output"] = value
44278	}
44279}
44280
44281// Counts the number of occurrences of each value in an integer array.
44282//
44283// Outputs a vector with length `size` and the same dtype as `weights`. If
44284// `weights` are empty, then index `i` stores the number of times the value `i` is
44285// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
44286// the value in `weights` at each index where the corresponding value in `arr` is
44287// `i`.
44288//
44289// Values in `arr` outside of the range [0, size) are ignored.
44290//
44291// Arguments:
44292//
44293//	indices: 2D int64 `Tensor`.
44294//	values: 1D int `Tensor`.
44295//	dense_shape: 1D int64 `Tensor`.
44296//	size: non-negative int scalar `Tensor`.
44297//	weights: is an int32, int64, float32, or float64 `Tensor` with the same
44298//
44299// shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights
44300// equal to 1.
44301//
44302// Returns 1D `Tensor` with length equal to `size` or 2D `Tensor` with [batch_size, `size`].
44303// The counts or summed weights for each value in the range [0, size).
44304func SparseBincount(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, size tf.Output, weights tf.Output, optional ...SparseBincountAttr) (output tf.Output) {
44305	if scope.Err() != nil {
44306		return
44307	}
44308	attrs := map[string]interface{}{}
44309	for _, a := range optional {
44310		a(attrs)
44311	}
44312	opspec := tf.OpSpec{
44313		Type: "SparseBincount",
44314		Input: []tf.Input{
44315			indices, values, dense_shape, size, weights,
44316		},
44317		Attrs: attrs,
44318	}
44319	op := scope.AddOperation(opspec)
44320	return op.Output(0)
44321}
44322
44323// Concatenates a list of `SparseTensor` along the specified dimension.
44324//
44325// Concatenation is with respect to the dense versions of these sparse tensors.
44326// It is assumed that each input is a `SparseTensor` whose elements are ordered
44327// along increasing dimension number.
44328//
44329// All inputs' shapes must match, except for the concat dimension.  The
44330// `indices`, `values`, and `shapes` lists must have the same length.
44331//
44332// The output shape is identical to the inputs', except along the concat
44333// dimension, where it is the sum of the inputs' sizes along that dimension.
44334//
44335// The output elements will be resorted to preserve the sort order along
44336// increasing dimension number.
44337//
44338// This op runs in `O(M log M)` time, where `M` is the total number of non-empty
44339// values across all inputs. This is due to the need for an internal sort in
44340// order to concatenate efficiently across an arbitrary dimension.
44341//
44342// For example, if `concat_dim = 1` and the inputs are
44343//
44344//	sp_inputs[0]: shape = [2, 3]
44345//	[0, 2]: "a"
44346//	[1, 0]: "b"
44347//	[1, 1]: "c"
44348//
44349//	sp_inputs[1]: shape = [2, 4]
44350//	[0, 1]: "d"
44351//	[0, 2]: "e"
44352//
44353// then the output will be
44354//
44355//	shape = [2, 7]
44356//	[0, 2]: "a"
44357//	[0, 4]: "d"
44358//	[0, 5]: "e"
44359//	[1, 0]: "b"
44360//	[1, 1]: "c"
44361//
44362// Graphically this is equivalent to doing
44363//
44364//	[    a] concat [  d e  ] = [    a   d e  ]
44365//	[b c  ]        [       ]   [b c          ]
44366//
44367// Arguments:
44368//
44369//	indices: 2-D.  Indices of each input `SparseTensor`.
44370//	values: 1-D.  Non-empty values of each `SparseTensor`.
44371//	shapes: 1-D.  Shapes of each `SparseTensor`.
44372//	concat_dim: Dimension to concatenate along. Must be in range [-rank, rank),
44373//
44374// where rank is the number of dimensions in each input `SparseTensor`.
44375//
44376// Returns:
44377//
44378//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
44379//	output_values: 1-D.  Non-empty values of the concatenated `SparseTensor`.
44380//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
44381func SparseConcat(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, concat_dim int64) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
44382	if scope.Err() != nil {
44383		return
44384	}
44385	attrs := map[string]interface{}{"concat_dim": concat_dim}
44386	opspec := tf.OpSpec{
44387		Type: "SparseConcat",
44388		Input: []tf.Input{
44389			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes),
44390		},
44391		Attrs: attrs,
44392	}
44393	op := scope.AddOperation(opspec)
44394	return op.Output(0), op.Output(1), op.Output(2)
44395}
44396
44397// SparseCountSparseOutputAttr is an optional argument to SparseCountSparseOutput.
44398type SparseCountSparseOutputAttr func(optionalAttr)
44399
44400// SparseCountSparseOutputMinlength sets the optional minlength attribute to value.
44401//
44402// value: Minimum value to count. Can be set to -1 for no minimum.
44403// If not specified, defaults to -1
44404//
44405// REQUIRES: value >= -1
44406func SparseCountSparseOutputMinlength(value int64) SparseCountSparseOutputAttr {
44407	return func(m optionalAttr) {
44408		m["minlength"] = value
44409	}
44410}
44411
44412// SparseCountSparseOutputMaxlength sets the optional maxlength attribute to value.
44413//
44414// value: Maximum value to count. Can be set to -1 for no maximum.
44415// If not specified, defaults to -1
44416//
44417// REQUIRES: value >= -1
44418func SparseCountSparseOutputMaxlength(value int64) SparseCountSparseOutputAttr {
44419	return func(m optionalAttr) {
44420		m["maxlength"] = value
44421	}
44422}
44423
44424// Performs sparse-output bin counting for a sparse tensor input.
44425//
44426//	Counts the number of times each value occurs in the input.
44427//
44428// Arguments:
44429//
44430//	indices: Tensor containing the indices of the sparse tensor to count.
44431//	values: Tensor containing values of the sparse tensor to count.
44432//	dense_shape: Tensor containing the dense shape of the sparse tensor to count.
44433//	weights: A Tensor of the same shape as indices containing per-index weight values.
44434//
44435// May also be the empty tensor if no weights are used.
44436//
44437//	binary_output: Whether to output the number of occurrences of each value or 1.
44438//
44439// Returns:
44440//
44441//	output_indices: Indices tensor for the resulting sparse tensor object.
44442//	output_values: Values tensor for the resulting sparse tensor object.
44443//	output_dense_shape: Shape tensor for the resulting sparse tensor object.
44444func SparseCountSparseOutput(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, weights tf.Output, binary_output bool, optional ...SparseCountSparseOutputAttr) (output_indices tf.Output, output_values tf.Output, output_dense_shape tf.Output) {
44445	if scope.Err() != nil {
44446		return
44447	}
44448	attrs := map[string]interface{}{"binary_output": binary_output}
44449	for _, a := range optional {
44450		a(attrs)
44451	}
44452	opspec := tf.OpSpec{
44453		Type: "SparseCountSparseOutput",
44454		Input: []tf.Input{
44455			indices, values, dense_shape, weights,
44456		},
44457		Attrs: attrs,
44458	}
44459	op := scope.AddOperation(opspec)
44460	return op.Output(0), op.Output(1), op.Output(2)
44461}
44462
44463// Generates sparse cross from a list of sparse and dense tensors.
44464//
44465// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
44466// representing features of one feature column. It outputs a 2D `SparseTensor` with
44467// the batchwise crosses of these features.
44468//
44469// For example, if the inputs are
44470//
44471//	inputs[0]: SparseTensor with shape = [2, 2]
44472//	[0, 0]: "a"
44473//	[1, 0]: "b"
44474//	[1, 1]: "c"
44475//
44476//	inputs[1]: SparseTensor with shape = [2, 1]
44477//	[0, 0]: "d"
44478//	[1, 0]: "e"
44479//
44480//	inputs[2]: Tensor [["f"], ["g"]]
44481//
44482// then the output will be
44483//
44484//	shape = [2, 2]
44485//	[0, 0]: "a_X_d_X_f"
44486//	[1, 0]: "b_X_e_X_g"
44487//	[1, 1]: "c_X_e_X_g"
44488//
44489// if hashed_output=true then the output will be
44490//
44491//	shape = [2, 2]
44492//	[0, 0]: FingerprintCat64(
44493//	            Fingerprint64("f"), FingerprintCat64(
44494//	                Fingerprint64("d"), Fingerprint64("a")))
44495//	[1, 0]: FingerprintCat64(
44496//	            Fingerprint64("g"), FingerprintCat64(
44497//	                Fingerprint64("e"), Fingerprint64("b")))
44498//	[1, 1]: FingerprintCat64(
44499//	            Fingerprint64("g"), FingerprintCat64(
44500//	                Fingerprint64("e"), Fingerprint64("c")))
44501//
44502// Arguments:
44503//
44504//	indices: 2-D.  Indices of each input `SparseTensor`.
44505//	values: 1-D.   values of each `SparseTensor`.
44506//	shapes: 1-D.   Shapes of each `SparseTensor`.
44507//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
44508//	hashed_output: If true, returns the hash of the cross instead of the string.
44509//
44510// This will allow us avoiding string manipulations.
44511//
44512//	num_buckets: It is used if hashed_output is true.
44513//
44514// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
44515//
44516//	hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
44517//
44518// function to combine the crosses fingerprints.
44519//
44520// Returns:
44521//
44522//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
44523//	output_values: 1-D.  Non-empty values of the concatenated or hashed
44524//
44525// `SparseTensor`.
44526//
44527//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
44528func SparseCross(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, hashed_output bool, num_buckets int64, hash_key int64, out_type tf.DataType, internal_type tf.DataType) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
44529	if scope.Err() != nil {
44530		return
44531	}
44532	attrs := map[string]interface{}{"hashed_output": hashed_output, "num_buckets": num_buckets, "hash_key": hash_key, "out_type": out_type, "internal_type": internal_type}
44533	opspec := tf.OpSpec{
44534		Type: "SparseCross",
44535		Input: []tf.Input{
44536			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs),
44537		},
44538		Attrs: attrs,
44539	}
44540	op := scope.AddOperation(opspec)
44541	return op.Output(0), op.Output(1), op.Output(2)
44542}
44543
44544// Generates sparse cross from a list of sparse and dense tensors.
44545//
44546// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
44547// representing features of one feature column. It outputs a 2D `SparseTensor` with
44548// the batchwise crosses of these features.
44549//
44550// For example, if the inputs are
44551//
44552//	inputs[0]: SparseTensor with shape = [2, 2]
44553//	[0, 0]: "a"
44554//	[1, 0]: "b"
44555//	[1, 1]: "c"
44556//
44557//	inputs[1]: SparseTensor with shape = [2, 1]
44558//	[0, 0]: "d"
44559//	[1, 0]: "e"
44560//
44561//	inputs[2]: Tensor [["f"], ["g"]]
44562//
44563// then the output will be
44564//
44565//	shape = [2, 2]
44566//	[0, 0]: "a_X_d_X_f"
44567//	[1, 0]: "b_X_e_X_g"
44568//	[1, 1]: "c_X_e_X_g"
44569//
44570// if hashed_output=true then the output will be
44571//
44572//	shape = [2, 2]
44573//	[0, 0]: FingerprintCat64(
44574//	            Fingerprint64("f"), FingerprintCat64(
44575//	                Fingerprint64("d"), Fingerprint64("a")))
44576//	[1, 0]: FingerprintCat64(
44577//	            Fingerprint64("g"), FingerprintCat64(
44578//	                Fingerprint64("e"), Fingerprint64("b")))
44579//	[1, 1]: FingerprintCat64(
44580//	            Fingerprint64("g"), FingerprintCat64(
44581//	                Fingerprint64("e"), Fingerprint64("c")))
44582//
44583// Arguments:
44584//
44585//	indices: 2-D.  Indices of each input `SparseTensor`.
44586//	values: 1-D.   values of each `SparseTensor`.
44587//	shapes: 1-D.   Shapes of each `SparseTensor`.
44588//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
44589//	num_buckets: It is used if hashed_output is true.
44590//
44591// output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
44592//
44593//	strong_hash: boolean, if true, siphash with salt will be used instead of farmhash.
44594//	salt: Specify the salt that will be used by the siphash function.
44595//
44596// Returns:
44597//
44598//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
44599//	output_values: 1-D.  Non-empty values of the concatenated or hashed
44600//
44601// `SparseTensor`.
44602//
44603//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
44604func SparseCrossHashed(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, num_buckets tf.Output, strong_hash tf.Output, salt tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
44605	if scope.Err() != nil {
44606		return
44607	}
44608	opspec := tf.OpSpec{
44609		Type: "SparseCrossHashed",
44610		Input: []tf.Input{
44611			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs), num_buckets, strong_hash, salt,
44612		},
44613	}
44614	op := scope.AddOperation(opspec)
44615	return op.Output(0), op.Output(1), op.Output(2)
44616}
44617
44618// Generates sparse cross from a list of sparse and dense tensors.
44619//
44620// The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
44621// representing features of one feature column. It outputs a 2D `SparseTensor` with
44622// the batchwise crosses of these features.
44623//
44624// For example, if the inputs are
44625//
44626//	inputs[0]: SparseTensor with shape = [2, 2]
44627//	[0, 0]: "a"
44628//	[1, 0]: "b"
44629//	[1, 1]: "c"
44630//
44631//	inputs[1]: SparseTensor with shape = [2, 1]
44632//	[0, 0]: "d"
44633//	[1, 0]: "e"
44634//
44635//	inputs[2]: Tensor [["f"], ["g"]]
44636//
44637// then the output will be
44638//
44639//	shape = [2, 2]
44640//	[0, 0]: "a_X_d_X_f"
44641//	[1, 0]: "b_X_e_X_g"
44642//	[1, 1]: "c_X_e_X_g"
44643//
44644// if hashed_output=true then the output will be
44645//
44646//	shape = [2, 2]
44647//	[0, 0]: FingerprintCat64(
44648//	            Fingerprint64("f"), FingerprintCat64(
44649//	                Fingerprint64("d"), Fingerprint64("a")))
44650//	[1, 0]: FingerprintCat64(
44651//	            Fingerprint64("g"), FingerprintCat64(
44652//	                Fingerprint64("e"), Fingerprint64("b")))
44653//	[1, 1]: FingerprintCat64(
44654//	            Fingerprint64("g"), FingerprintCat64(
44655//	                Fingerprint64("e"), Fingerprint64("c")))
44656//
44657// Arguments:
44658//
44659//	indices: 2-D.  Indices of each input `SparseTensor`.
44660//	values: 1-D.   values of each `SparseTensor`.
44661//	shapes: 1-D.   Shapes of each `SparseTensor`.
44662//	dense_inputs: 2-D.    Columns represented by dense `Tensor`.
44663//	sep: string used when joining a list of string inputs, can be used as separator later.
44664//
44665// Returns:
44666//
44667//	output_indices: 2-D.  Indices of the concatenated `SparseTensor`.
44668//	output_values: 1-D.  Non-empty values of the concatenated or hashed
44669//
44670// `SparseTensor`.
44671//
44672//	output_shape: 1-D.  Shape of the concatenated `SparseTensor`.
44673func SparseCrossV2(scope *Scope, indices []tf.Output, values []tf.Output, shapes []tf.Output, dense_inputs []tf.Output, sep tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
44674	if scope.Err() != nil {
44675		return
44676	}
44677	opspec := tf.OpSpec{
44678		Type: "SparseCrossV2",
44679		Input: []tf.Input{
44680			tf.OutputList(indices), tf.OutputList(values), tf.OutputList(shapes), tf.OutputList(dense_inputs), sep,
44681		},
44682	}
44683	op := scope.AddOperation(opspec)
44684	return op.Output(0), op.Output(1), op.Output(2)
44685}
44686
44687// Adds up a SparseTensor and a dense Tensor, using these special rules:
44688//
44689// (1) Broadcasts the dense side to have the same shape as the sparse side, if
44690//
44691//	eligible;
44692//
44693// (2) Then, only the dense values pointed to by the indices of the SparseTensor
44694//
44695//	participate in the cwise addition.
44696//
44697// By these rules, the result is a logical SparseTensor with exactly the same
44698// indices and shape, but possibly with different non-zero values.  The output of
44699// this Op is the resultant non-zero values.
44700//
44701// Arguments:
44702//
44703//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
44704//
44705// SparseTensor, possibly not in canonical ordering.
44706//
44707//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
44708//	sp_shape: 1-D.  Shape of the input SparseTensor.
44709//	dense: `R`-D.  The dense Tensor operand.
44710//
44711// Returns 1-D.  The `N` values that are operated on.
44712func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
44713	if scope.Err() != nil {
44714		return
44715	}
44716	opspec := tf.OpSpec{
44717		Type: "SparseDenseCwiseAdd",
44718		Input: []tf.Input{
44719			sp_indices, sp_values, sp_shape, dense,
44720		},
44721	}
44722	op := scope.AddOperation(opspec)
44723	return op.Output(0)
44724}
44725
44726// Component-wise divides a SparseTensor by a dense Tensor.
44727//
44728// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
44729// the other direction.
44730//
44731// Arguments:
44732//
44733//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
44734//
44735// SparseTensor, possibly not in canonical ordering.
44736//
44737//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
44738//	sp_shape: 1-D.  Shape of the input SparseTensor.
44739//	dense: `R`-D.  The dense Tensor operand.
44740//
44741// Returns 1-D.  The `N` values that are operated on.
44742func SparseDenseCwiseDiv(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
44743	if scope.Err() != nil {
44744		return
44745	}
44746	opspec := tf.OpSpec{
44747		Type: "SparseDenseCwiseDiv",
44748		Input: []tf.Input{
44749			sp_indices, sp_values, sp_shape, dense,
44750		},
44751	}
44752	op := scope.AddOperation(opspec)
44753	return op.Output(0)
44754}
44755
44756// Component-wise multiplies a SparseTensor by a dense Tensor.
44757//
44758// The output locations corresponding to the implicitly zero elements in the sparse
44759// tensor will be zero (i.e., will not take up storage space), regardless of the
44760// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
44761//
44762// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
44763// the other direction.
44764//
44765// Arguments:
44766//
44767//	sp_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
44768//
44769// SparseTensor, possibly not in canonical ordering.
44770//
44771//	sp_values: 1-D.  `N` non-empty values corresponding to `sp_indices`.
44772//	sp_shape: 1-D.  Shape of the input SparseTensor.
44773//	dense: `R`-D.  The dense Tensor operand.
44774//
44775// Returns 1-D.  The `N` values that are operated on.
44776func SparseDenseCwiseMul(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output, dense tf.Output) (output tf.Output) {
44777	if scope.Err() != nil {
44778		return
44779	}
44780	opspec := tf.OpSpec{
44781		Type: "SparseDenseCwiseMul",
44782		Input: []tf.Input{
44783			sp_indices, sp_values, sp_shape, dense,
44784		},
44785	}
44786	op := scope.AddOperation(opspec)
44787	return op.Output(0)
44788}
44789
44790// Fills empty rows in the input 2-D `SparseTensor` with a default value.
44791//
44792// The input `SparseTensor` is represented via the tuple of inputs
44793// (`indices`, `values`, `dense_shape`).  The output `SparseTensor` has the
44794// same `dense_shape` but with indices `output_indices` and values
44795// `output_values`.
44796//
44797// This op inserts a single entry for every row that doesn't have any values.
44798// The index is created as `[row, 0, ..., 0]` and the inserted value
44799// is `default_value`.
44800//
44801// For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
44802//
44803//	[0, 1]: a
44804//	[0, 3]: b
44805//	[2, 0]: c
44806//	[3, 1]: d
44807//
44808// Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
44809//
44810//	[0, 1]: a
44811//	[0, 3]: b
44812//	[1, 0]: default_value
44813//	[2, 0]: c
44814//	[3, 1]: d
44815//	[4, 0]: default_value
44816//
44817// The output `SparseTensor` will be in row-major order and will have the
44818// same shape as the input.
44819//
44820// This op also returns an indicator vector shaped `[dense_shape[0]]` such that
44821//
44822//	empty_row_indicator[i] = True iff row i was an empty row.
44823//
44824// And a reverse index map vector shaped `[indices.shape[0]]` that is used during
44825// backpropagation,
44826//
44827//	reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
44828//
44829// Arguments:
44830//
44831//		indices: 2-D. the indices of the sparse tensor.
44832//		values: 1-D. the values of the sparse tensor.
44833//		dense_shape: 1-D. the shape of the sparse tensor.
44834//		default_value: 0-D. default value to insert into location `[row, 0, ..., 0]`
44835//	  for rows missing from the input sparse tensor.
44836//
44837// output indices: 2-D. the indices of the filled sparse tensor.
44838//
44839// Returns:
44840//
44841//	output_indices
44842//	output_values: 1-D. the values of the filled sparse tensor.
44843//	empty_row_indicator: 1-D. whether the dense row was missing in the
44844//
44845// input sparse tensor.
44846//
44847//	reverse_index_map: 1-D. a map from the input indices to the output indices.
44848func SparseFillEmptyRows(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output, default_value tf.Output) (output_indices tf.Output, output_values tf.Output, empty_row_indicator tf.Output, reverse_index_map tf.Output) {
44849	if scope.Err() != nil {
44850		return
44851	}
44852	opspec := tf.OpSpec{
44853		Type: "SparseFillEmptyRows",
44854		Input: []tf.Input{
44855			indices, values, dense_shape, default_value,
44856		},
44857	}
44858	op := scope.AddOperation(opspec)
44859	return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
44860}
44861
44862// The gradient of SparseFillEmptyRows.
44863//
44864// Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
44865// shaped `[N_full]`, where `N_full >= N` and copies data into either
44866// `d_values` or `d_default_value`.  Here `d_values` is shaped `[N]` and
44867// `d_default_value` is a scalar.
44868//
44869//	d_values[j] = grad_values[reverse_index_map[j]]
44870//	d_default_value = sum_{k : 0 .. N_full - 1} (
44871//	   grad_values[k] * 1{k not in reverse_index_map})
44872//
44873// Arguments:
44874//
44875//	reverse_index_map: 1-D.  The reverse index map from SparseFillEmptyRows.
44876//	grad_values: 1-D.  The gradients from backprop.
44877//
44878// Returns:
44879//
44880//	d_values: 1-D.  The backprop into values.
44881//	d_default_value: 0-D.  The backprop into default_value.
44882func SparseFillEmptyRowsGrad(scope *Scope, reverse_index_map tf.Output, grad_values tf.Output) (d_values tf.Output, d_default_value tf.Output) {
44883	if scope.Err() != nil {
44884		return
44885	}
44886	opspec := tf.OpSpec{
44887		Type: "SparseFillEmptyRowsGrad",
44888		Input: []tf.Input{
44889			reverse_index_map, grad_values,
44890		},
44891	}
44892	op := scope.AddOperation(opspec)
44893	return op.Output(0), op.Output(1)
44894}
44895
44896// SparseMatMulAttr is an optional argument to SparseMatMul.
44897type SparseMatMulAttr func(optionalAttr)
44898
44899// SparseMatMulTransposeA sets the optional transpose_a attribute to value.
44900// If not specified, defaults to false
44901func SparseMatMulTransposeA(value bool) SparseMatMulAttr {
44902	return func(m optionalAttr) {
44903		m["transpose_a"] = value
44904	}
44905}
44906
44907// SparseMatMulTransposeB sets the optional transpose_b attribute to value.
44908// If not specified, defaults to false
44909func SparseMatMulTransposeB(value bool) SparseMatMulAttr {
44910	return func(m optionalAttr) {
44911		m["transpose_b"] = value
44912	}
44913}
44914
44915// SparseMatMulAIsSparse sets the optional a_is_sparse attribute to value.
44916// If not specified, defaults to false
44917func SparseMatMulAIsSparse(value bool) SparseMatMulAttr {
44918	return func(m optionalAttr) {
44919		m["a_is_sparse"] = value
44920	}
44921}
44922
44923// SparseMatMulBIsSparse sets the optional b_is_sparse attribute to value.
44924// If not specified, defaults to false
44925func SparseMatMulBIsSparse(value bool) SparseMatMulAttr {
44926	return func(m optionalAttr) {
44927		m["b_is_sparse"] = value
44928	}
44929}
44930
44931// Multiply matrix "a" by matrix "b".
44932//
44933// The inputs must be two-dimensional matrices and the inner dimension of "a" must
44934// match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
44935// `SparseTensor`s.  This op is optimized for the case where at least one of "a" or
44936// "b" is sparse, in the sense that they have a large proportion of zero values.
44937// The breakeven for using this versus a dense matrix multiply on one platform was
44938// 30% zero values in the sparse matrix.
44939//
44940// The gradient computation of this operation will only take advantage of sparsity
44941// in the input gradient when that gradient comes from a Relu.
44942func SparseMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatMulAttr) (product tf.Output) {
44943	if scope.Err() != nil {
44944		return
44945	}
44946	attrs := map[string]interface{}{}
44947	for _, a := range optional {
44948		a(attrs)
44949	}
44950	opspec := tf.OpSpec{
44951		Type: "SparseMatMul",
44952		Input: []tf.Input{
44953			a, b,
44954		},
44955		Attrs: attrs,
44956	}
44957	op := scope.AddOperation(opspec)
44958	return op.Output(0)
44959}
44960
44961// Sparse addition of two CSR matrices, C = alpha * A + beta * B.
44962//
44963// The gradients of SparseMatrixAdd outputs with respect to alpha and beta are not
44964// currently defined (TensorFlow will return zeros for these entries).
44965//
44966// Arguments:
44967//
44968//	a: A CSRSparseMatrix.
44969//	b: A CSRSparseMatrix.
44970//	alpha: A constant scalar.
44971//	beta: A constant scalar.
44972//
44973// Returns A CSRSparseMatrix.
44974func SparseMatrixAdd(scope *Scope, a tf.Output, b tf.Output, alpha tf.Output, beta tf.Output) (c tf.Output) {
44975	if scope.Err() != nil {
44976		return
44977	}
44978	opspec := tf.OpSpec{
44979		Type: "SparseMatrixAdd",
44980		Input: []tf.Input{
44981			a, b, alpha, beta,
44982		},
44983	}
44984	op := scope.AddOperation(opspec)
44985	return op.Output(0)
44986}
44987
44988// SparseMatrixMatMulAttr is an optional argument to SparseMatrixMatMul.
44989type SparseMatrixMatMulAttr func(optionalAttr)
44990
44991// SparseMatrixMatMulTransposeA sets the optional transpose_a attribute to value.
44992//
44993// value: Indicates whether `a` should be transposed.
44994// If not specified, defaults to false
44995func SparseMatrixMatMulTransposeA(value bool) SparseMatrixMatMulAttr {
44996	return func(m optionalAttr) {
44997		m["transpose_a"] = value
44998	}
44999}
45000
45001// SparseMatrixMatMulTransposeB sets the optional transpose_b attribute to value.
45002//
45003// value: Indicates whether `b` should be transposed.
45004// If not specified, defaults to false
45005func SparseMatrixMatMulTransposeB(value bool) SparseMatrixMatMulAttr {
45006	return func(m optionalAttr) {
45007		m["transpose_b"] = value
45008	}
45009}
45010
45011// SparseMatrixMatMulAdjointA sets the optional adjoint_a attribute to value.
45012//
45013// value: Indicates whether `a` should be conjugate-transposed.
45014// If not specified, defaults to false
45015func SparseMatrixMatMulAdjointA(value bool) SparseMatrixMatMulAttr {
45016	return func(m optionalAttr) {
45017		m["adjoint_a"] = value
45018	}
45019}
45020
45021// SparseMatrixMatMulAdjointB sets the optional adjoint_b attribute to value.
45022//
45023// value: Indicates whether `b` should be conjugate-transposed.
45024// If not specified, defaults to false
45025func SparseMatrixMatMulAdjointB(value bool) SparseMatrixMatMulAttr {
45026	return func(m optionalAttr) {
45027		m["adjoint_b"] = value
45028	}
45029}
45030
45031// SparseMatrixMatMulTransposeOutput sets the optional transpose_output attribute to value.
45032//
45033// value: Transposes the product of `a` and `b`.
45034// If not specified, defaults to false
45035func SparseMatrixMatMulTransposeOutput(value bool) SparseMatrixMatMulAttr {
45036	return func(m optionalAttr) {
45037		m["transpose_output"] = value
45038	}
45039}
45040
45041// SparseMatrixMatMulConjugateOutput sets the optional conjugate_output attribute to value.
45042//
45043// value: Conjugates the product of `a` and `b`.
45044// If not specified, defaults to false
45045func SparseMatrixMatMulConjugateOutput(value bool) SparseMatrixMatMulAttr {
45046	return func(m optionalAttr) {
45047		m["conjugate_output"] = value
45048	}
45049}
45050
45051// Matrix-multiplies a sparse matrix with a dense matrix.
45052//
45053// Returns a dense matrix.
45054// For inputs A and B, where A is CSR and B is dense; this op returns a dense C;
45055//
45056// If transpose_output is false, returns:
45057// ```
45058//
45059//	C = A . B
45060//
45061// ```
45062//
45063// If transpose_output is `true`, returns:
45064// ```
45065//
45066//	C = transpose(A . B) = transpose(B) . transpose(A)
45067//
45068// ```
45069// where the transposition is performed along the two innermost (matrix)
45070// dimensions.
45071//
45072// If conjugate_output is `true`, returns:
45073// ```
45074//
45075//	C = conjugate(A . B) = conjugate(A) . conjugate(B)
45076//
45077// ```
45078//
45079// If both conjugate_output and transpose_output are `true`, returns:
45080// ```
45081//
45082//	C = conjugate(transpose(A . B)) = conjugate(transpose(B)) .
45083//	                                  conjugate(transpose(A))
45084//
45085// ```
45086//
45087// Arguments:
45088//
45089//	a: A CSRSparseMatrix.
45090//	b: A dense tensor.
45091//
45092// Returns A dense output tensor.
45093func SparseMatrixMatMul(scope *Scope, a tf.Output, b tf.Output, optional ...SparseMatrixMatMulAttr) (output tf.Output) {
45094	if scope.Err() != nil {
45095		return
45096	}
45097	attrs := map[string]interface{}{}
45098	for _, a := range optional {
45099		a(attrs)
45100	}
45101	opspec := tf.OpSpec{
45102		Type: "SparseMatrixMatMul",
45103		Input: []tf.Input{
45104			a, b,
45105		},
45106		Attrs: attrs,
45107	}
45108	op := scope.AddOperation(opspec)
45109	return op.Output(0)
45110}
45111
45112// Element-wise multiplication of a sparse matrix with a dense tensor.
45113//
45114// Returns a sparse matrix.
45115//
45116// The dense tensor `b` may be either a scalar; otherwise `a` must be a rank-3
45117// `SparseMatrix`; in this case `b` must be shaped `[batch_size, 1, 1]` and the
45118// multiply operation broadcasts.
45119//
45120// **NOTE** even if `b` is zero, the sparsity structure of the output does not
45121// change.
45122//
45123// Arguments:
45124//
45125//	a: A CSRSparseMatrix.
45126//	b: A dense tensor.
45127//
45128// Returns A dense output tensor.
45129func SparseMatrixMul(scope *Scope, a tf.Output, b tf.Output) (output tf.Output) {
45130	if scope.Err() != nil {
45131		return
45132	}
45133	opspec := tf.OpSpec{
45134		Type: "SparseMatrixMul",
45135		Input: []tf.Input{
45136			a, b,
45137		},
45138	}
45139	op := scope.AddOperation(opspec)
45140	return op.Output(0)
45141}
45142
45143// Returns the number of nonzeroes of `sparse_matrix`.
45144//
45145// Arguments:
45146//
45147//	sparse_matrix: A CSRSparseMatrix.
45148//
45149// Returns The number of nonzeroes of `sparse_matrix`.
45150func SparseMatrixNNZ(scope *Scope, sparse_matrix tf.Output) (nnz tf.Output) {
45151	if scope.Err() != nil {
45152		return
45153	}
45154	opspec := tf.OpSpec{
45155		Type: "SparseMatrixNNZ",
45156		Input: []tf.Input{
45157			sparse_matrix,
45158		},
45159	}
45160	op := scope.AddOperation(opspec)
45161	return op.Output(0)
45162}
45163
45164// Computes the Approximate Minimum Degree (AMD) ordering of `input`.
45165//
45166// Computes the Approximate Minimum Degree (AMD) ordering for a sparse matrix.
45167//
45168// The returned permutation may be used to permute the rows and columns of the
45169// given sparse matrix. This typically results in permuted sparse matrix's sparse
45170// Cholesky (or other decompositions) in having fewer zero fill-in compared to
45171// decomposition of the original matrix.
45172//
45173// The input sparse matrix may have rank 2 or rank 3. The output Tensor,
45174// representing would then have rank 1 or 2 respectively, with the same batch
45175// shape as the input.
45176//
45177// Each component of the input sparse matrix must represent a square symmetric
45178// matrix; only the lower triangular part of the matrix is read. The values of the
45179// sparse matrix does not affect the returned permutation, only the sparsity
45180// pattern of the sparse matrix is used. Hence, a single AMD ordering may be
45181// reused for the Cholesky decompositions of sparse matrices with the same sparsity
45182// pattern but with possibly different values.
45183//
45184// Each batch component of the output permutation represents a permutation of `N`
45185// elements, where the input sparse matrix components each have `N` rows. That is,
45186// the component contains each of the integers `{0, .. N-1}` exactly once. The
45187// `i`th element represents the row index that the `i`th row maps to.
45188//
45189// Usage example:
45190//
45191// ```python
45192//
45193//	from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
45194//
45195//	a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])
45196//	a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)
45197//	a_dense_shape = [4, 4]
45198//
45199//	with tf.Session() as sess:
45200//	  # Define (COO format) SparseTensor over Numpy array.
45201//	  a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
45202//
45203//	  # Convert SparseTensors to CSR SparseMatrix.
45204//	  a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
45205//	      a_st.indices, a_st.values, a_st.dense_shape)
45206//
45207//	  # Obtain the AMD Ordering for the CSR SparseMatrix.
45208//	  ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)
45209//
45210//	  ordering_amd_value = sess.run(ordering_amd)
45211//
45212// ```
45213//
45214// `ordering_amd_value` stores the AMD ordering: `[1 2 3 0]`.
45215//
45216// input: A `CSRSparseMatrix`.
45217//
45218// Arguments:
45219//
45220//	input: A `CSRSparseMatrix`.
45221//
45222// Returns The Approximate Minimum Degree (AMD) ordering of `input`.
45223func SparseMatrixOrderingAMD(scope *Scope, input tf.Output) (output tf.Output) {
45224	if scope.Err() != nil {
45225		return
45226	}
45227	opspec := tf.OpSpec{
45228		Type: "SparseMatrixOrderingAMD",
45229		Input: []tf.Input{
45230			input,
45231		},
45232	}
45233	op := scope.AddOperation(opspec)
45234	return op.Output(0)
45235}
45236
45237// Calculates the softmax of a CSRSparseMatrix.
45238//
45239// Calculate the softmax of the innermost dimensions of a SparseMatrix.
45240//
45241// Missing values are treated as `-inf` (i.e., logits of zero probability); and
45242// the output has the same sparsity structure as the input (though missing values
45243// in the output may now be treated as having probability zero).
45244//
45245// Arguments:
45246//
45247//	logits: A CSRSparseMatrix.
45248//
45249// Returns A CSRSparseMatrix.
45250func SparseMatrixSoftmax(scope *Scope, logits tf.Output, type_ tf.DataType) (softmax tf.Output) {
45251	if scope.Err() != nil {
45252		return
45253	}
45254	attrs := map[string]interface{}{"type": type_}
45255	opspec := tf.OpSpec{
45256		Type: "SparseMatrixSoftmax",
45257		Input: []tf.Input{
45258			logits,
45259		},
45260		Attrs: attrs,
45261	}
45262	op := scope.AddOperation(opspec)
45263	return op.Output(0)
45264}
45265
45266// Calculates the gradient of the SparseMatrixSoftmax op.
45267//
45268// Arguments:
45269//
45270//	softmax: A CSRSparseMatrix.
45271//	grad_softmax: The gradient of `softmax`.
45272//
45273// Returns The output gradient.
45274func SparseMatrixSoftmaxGrad(scope *Scope, softmax tf.Output, grad_softmax tf.Output, type_ tf.DataType) (gradient tf.Output) {
45275	if scope.Err() != nil {
45276		return
45277	}
45278	attrs := map[string]interface{}{"type": type_}
45279	opspec := tf.OpSpec{
45280		Type: "SparseMatrixSoftmaxGrad",
45281		Input: []tf.Input{
45282			softmax, grad_softmax,
45283		},
45284		Attrs: attrs,
45285	}
45286	op := scope.AddOperation(opspec)
45287	return op.Output(0)
45288}
45289
45290// Computes the sparse Cholesky decomposition of `input`.
45291//
45292// Computes the Sparse Cholesky decomposition of a sparse matrix, with the given
45293// fill-in reducing permutation.
45294//
45295// The input sparse matrix and the fill-in reducing permutation `permutation` must
45296// have compatible shapes. If the sparse matrix has rank 3; with the batch
45297// dimension `B`, then the `permutation` must be of rank 2; with the same batch
45298// dimension `B`. There is no support for broadcasting.
45299//
45300// Furthermore, each component vector of `permutation` must be of length `N`,
45301// containing each of the integers {0, 1, ..., N - 1} exactly once, where `N` is
45302// the number of rows of each component of the sparse matrix.
45303//
45304// Each component of the input sparse matrix must represent a symmetric positive
45305// definite (SPD) matrix; although only the lower triangular part of the matrix is
45306// read. If any individual component is not SPD, then an InvalidArgument error is
45307// thrown.
45308//
45309// The returned sparse matrix has the same dense shape as the input sparse matrix.
45310// For each component `A` of the input sparse matrix, the corresponding output
45311// sparse matrix represents `L`, the lower triangular Cholesky factor satisfying
45312// the following identity:
45313//
45314// ```
45315//
45316//	A = L * Lt
45317//
45318// ```
45319//
45320// where Lt denotes the transpose of L (or its conjugate transpose, if `type` is
45321// `complex64` or `complex128`).
45322//
45323// The `type` parameter denotes the type of the matrix elements. The supported
45324// types are: `float32`, `float64`, `complex64` and `complex128`.
45325//
45326// Usage example:
45327//
45328// ```python
45329//
45330//	from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
45331//
45332//	a_indices = np.array([[0, 0], [1, 1], [2, 1], [2, 2], [3, 3]])
45333//	a_values = np.array([1.0, 2.0, 1.0, 3.0, 4.0], np.float32)
45334//	a_dense_shape = [4, 4]
45335//
45336//	with tf.Session() as sess:
45337//	  # Define (COO format) SparseTensor over Numpy array.
45338//	  a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
45339//
45340//	  # Convert SparseTensors to CSR SparseMatrix.
45341//	  a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
45342//	      a_st.indices, a_st.values, a_st.dense_shape)
45343//
45344//	  # Obtain the Sparse Cholesky factor using AMD Ordering for reducing zero
45345//	  # fill-in (number of structural non-zeros in the sparse Cholesky factor).
45346//	  ordering_amd = sparse_csr_matrix_ops.sparse_matrix_ordering_amd(sparse_matrix)
45347//	  cholesky_sparse_matrices = (
45348//	      sparse_csr_matrix_ops.sparse_matrix_sparse_cholesky(
45349//	          sparse_matrix, ordering_amd, type=tf.float32))
45350//
45351//	  # Convert the CSRSparseMatrix Cholesky factor to a dense Tensor
45352//	  dense_cholesky = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
45353//	      cholesky_sparse_matrices, tf.float32)
45354//
45355//	  # Evaluate the dense Tensor value.
45356//	  dense_cholesky_value = sess.run(dense_cholesky)
45357//
45358// ```
45359//
45360// `dense_cholesky_value` stores the dense Cholesky factor:
45361//
45362// ```
45363//
45364//	[[  1.  0.    0.    0.]
45365//	 [  0.  1.41  0.    0.]
45366//	 [  0.  0.70  1.58  0.]
45367//	 [  0.  0.    0.    2.]]
45368//
45369// ```
45370//
45371// input: A `CSRSparseMatrix`.
45372// permutation: A `Tensor`.
45373// type: The type of `input`.
45374//
45375// Arguments:
45376//
45377//	input: A `CSRSparseMatrix`.
45378//	permutation: A fill-in reducing permutation matrix.
45379//
45380// Returns The sparse Cholesky decompsition of `input`.
45381func SparseMatrixSparseCholesky(scope *Scope, input tf.Output, permutation tf.Output, type_ tf.DataType) (output tf.Output) {
45382	if scope.Err() != nil {
45383		return
45384	}
45385	attrs := map[string]interface{}{"type": type_}
45386	opspec := tf.OpSpec{
45387		Type: "SparseMatrixSparseCholesky",
45388		Input: []tf.Input{
45389			input, permutation,
45390		},
45391		Attrs: attrs,
45392	}
45393	op := scope.AddOperation(opspec)
45394	return op.Output(0)
45395}
45396
45397// SparseMatrixSparseMatMulAttr is an optional argument to SparseMatrixSparseMatMul.
45398type SparseMatrixSparseMatMulAttr func(optionalAttr)
45399
45400// SparseMatrixSparseMatMulTransposeA sets the optional transpose_a attribute to value.
45401//
45402// value: Indicates whether `a` should be transposed.
45403// If not specified, defaults to false
45404func SparseMatrixSparseMatMulTransposeA(value bool) SparseMatrixSparseMatMulAttr {
45405	return func(m optionalAttr) {
45406		m["transpose_a"] = value
45407	}
45408}
45409
45410// SparseMatrixSparseMatMulTransposeB sets the optional transpose_b attribute to value.
45411//
45412// value: Indicates whether `b` should be transposed.
45413// If not specified, defaults to false
45414func SparseMatrixSparseMatMulTransposeB(value bool) SparseMatrixSparseMatMulAttr {
45415	return func(m optionalAttr) {
45416		m["transpose_b"] = value
45417	}
45418}
45419
45420// SparseMatrixSparseMatMulAdjointA sets the optional adjoint_a attribute to value.
45421//
45422// value: Indicates whether `a` should be conjugate-transposed.
45423// If not specified, defaults to false
45424func SparseMatrixSparseMatMulAdjointA(value bool) SparseMatrixSparseMatMulAttr {
45425	return func(m optionalAttr) {
45426		m["adjoint_a"] = value
45427	}
45428}
45429
45430// SparseMatrixSparseMatMulAdjointB sets the optional adjoint_b attribute to value.
45431//
45432// value: Indicates whether `b` should be conjugate-transposed.
45433// If not specified, defaults to false
45434func SparseMatrixSparseMatMulAdjointB(value bool) SparseMatrixSparseMatMulAttr {
45435	return func(m optionalAttr) {
45436		m["adjoint_b"] = value
45437	}
45438}
45439
45440// Sparse-matrix-multiplies two CSR matrices `a` and `b`.
45441//
45442// Performs a matrix multiplication of a sparse matrix `a` with a sparse matrix
45443// `b`; returns a sparse matrix `a * b`, unless either `a` or `b` is transposed or
45444// adjointed.
45445//
45446// Each matrix may be transposed or adjointed (conjugated and transposed)
45447// according to the Boolean parameters `transpose_a`, `adjoint_a`, `transpose_b`
45448// and `adjoint_b`. At most one of `transpose_a` or `adjoint_a` may be True.
45449// Similarly, at most one of `transpose_b` or `adjoint_b` may be True.
45450//
45451// The inputs must have compatible shapes. That is, the inner dimension of `a`
45452// must be equal to the outer dimension of `b`. This requirement is adjusted
45453// according to whether either `a` or `b` is transposed or adjointed.
45454//
45455// The `type` parameter denotes the type of the matrix elements. Both `a` and `b`
45456// must have the same type. The supported types are: `float32`, `float64`,
45457// `complex64` and `complex128`.
45458//
45459// Both `a` and `b` must have the same rank. Broadcasting is not supported. If they
45460// have rank 3, each batch of 2D CSRSparseMatrices within `a` and `b` must have the
45461// same dense shape.
45462//
45463// The sparse matrix product may have numeric (non-structural) zeros.
45464// TODO(anudhyan): Consider adding a boolean attribute to control whether to prune
45465// zeros.
45466//
45467// Usage example:
45468//
45469// ```python
45470//
45471//	from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
45472//
45473//	a_indices = np.array([[0, 0], [2, 3], [2, 4], [3, 0]])
45474//	a_values = np.array([1.0, 5.0, -1.0, -2.0], np.float32)
45475//	a_dense_shape = [4, 5]
45476//
45477//	b_indices = np.array([[0, 0], [3, 0], [3, 1]])
45478//	b_values = np.array([2.0, 7.0, 8.0], np.float32)
45479//	b_dense_shape = [5, 3]
45480//
45481//	with tf.Session() as sess:
45482//	  # Define (COO format) Sparse Tensors over Numpy arrays
45483//	  a_st = tf.sparse.SparseTensor(a_indices, a_values, a_dense_shape)
45484//	  b_st = tf.sparse.SparseTensor(b_indices, b_values, b_dense_shape)
45485//
45486//	  # Convert SparseTensors to CSR SparseMatrix
45487//	  a_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
45488//	      a_st.indices, a_st.values, a_st.dense_shape)
45489//	  b_sm = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
45490//	      b_st.indices, b_st.values, b_st.dense_shape)
45491//
45492//	  # Compute the CSR SparseMatrix matrix multiplication
45493//	  c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
45494//	      a=a_sm, b=b_sm, type=tf.float32)
45495//
45496//	  # Convert the CSR SparseMatrix product to a dense Tensor
45497//	  c_sm_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
45498//	      c_sm, tf.float32)
45499//	  # Evaluate the dense Tensor value
45500//	  c_sm_dense_value = sess.run(c_sm_dense)
45501//
45502// ```
45503//
45504// `c_sm_dense_value` stores the dense matrix product:
45505//
45506// ```
45507//
45508//	[[  2.   0.   0.]
45509//	 [  0.   0.   0.]
45510//	 [ 35.  40.   0.]
45511//	 [ -4.   0.   0.]]
45512//
45513// ```
45514//
45515// a: A `CSRSparseMatrix`.
45516// b: A `CSRSparseMatrix` with the same type and rank as `a`.
45517// type: The type of both `a` and `b`.
45518// transpose_a: If True, `a` transposed before multiplication.
45519// transpose_b: If True, `b` transposed before multiplication.
45520// adjoint_a: If True, `a` adjointed before multiplication.
45521// adjoint_b: If True, `b` adjointed before multiplication.
45522//
45523// Arguments:
45524//
45525//	a: A CSRSparseMatrix.
45526//	b: A CSRSparseMatrix.
45527//
45528// Returns A CSRSparseMatrix.
45529func SparseMatrixSparseMatMul(scope *Scope, a tf.Output, b tf.Output, type_ tf.DataType, optional ...SparseMatrixSparseMatMulAttr) (c tf.Output) {
45530	if scope.Err() != nil {
45531		return
45532	}
45533	attrs := map[string]interface{}{"type": type_}
45534	for _, a := range optional {
45535		a(attrs)
45536	}
45537	opspec := tf.OpSpec{
45538		Type: "SparseMatrixSparseMatMul",
45539		Input: []tf.Input{
45540			a, b,
45541		},
45542		Attrs: attrs,
45543	}
45544	op := scope.AddOperation(opspec)
45545	return op.Output(0)
45546}
45547
45548// SparseMatrixTransposeAttr is an optional argument to SparseMatrixTranspose.
45549type SparseMatrixTransposeAttr func(optionalAttr)
45550
45551// SparseMatrixTransposeConjugate sets the optional conjugate attribute to value.
45552//
45553// value: Indicates whether `input` should be conjugated.
45554// If not specified, defaults to false
45555func SparseMatrixTransposeConjugate(value bool) SparseMatrixTransposeAttr {
45556	return func(m optionalAttr) {
45557		m["conjugate"] = value
45558	}
45559}
45560
45561// Transposes the inner (matrix) dimensions of a CSRSparseMatrix.
45562//
45563// Transposes the inner (matrix) dimensions of a SparseMatrix and optionally
45564// conjugates its values.
45565//
45566// Arguments:
45567//
45568//	input: A CSRSparseMatrix.
45569//
45570// Returns A CSRSparseMatrix.
45571func SparseMatrixTranspose(scope *Scope, input tf.Output, type_ tf.DataType, optional ...SparseMatrixTransposeAttr) (output tf.Output) {
45572	if scope.Err() != nil {
45573		return
45574	}
45575	attrs := map[string]interface{}{"type": type_}
45576	for _, a := range optional {
45577		a(attrs)
45578	}
45579	opspec := tf.OpSpec{
45580		Type: "SparseMatrixTranspose",
45581		Input: []tf.Input{
45582			input,
45583		},
45584		Attrs: attrs,
45585	}
45586	op := scope.AddOperation(opspec)
45587	return op.Output(0)
45588}
45589
45590// Creates an all-zeros CSRSparseMatrix with shape `dense_shape`.
45591//
45592// Arguments:
45593//
45594//	dense_shape: The desired matrix shape.
45595//
45596// Returns An empty CSR matrix with shape `dense_shape`.
45597func SparseMatrixZeros(scope *Scope, dense_shape tf.Output, type_ tf.DataType) (sparse_matrix tf.Output) {
45598	if scope.Err() != nil {
45599		return
45600	}
45601	attrs := map[string]interface{}{"type": type_}
45602	opspec := tf.OpSpec{
45603		Type: "SparseMatrixZeros",
45604		Input: []tf.Input{
45605			dense_shape,
45606		},
45607		Attrs: attrs,
45608	}
45609	op := scope.AddOperation(opspec)
45610	return op.Output(0)
45611}
45612
45613// SparseReduceMaxAttr is an optional argument to SparseReduceMax.
45614type SparseReduceMaxAttr func(optionalAttr)
45615
45616// SparseReduceMaxKeepDims sets the optional keep_dims attribute to value.
45617//
45618// value: If true, retain reduced dimensions with length 1.
45619// If not specified, defaults to false
45620func SparseReduceMaxKeepDims(value bool) SparseReduceMaxAttr {
45621	return func(m optionalAttr) {
45622		m["keep_dims"] = value
45623	}
45624}
45625
45626// Computes the max of elements across dimensions of a SparseTensor.
45627//
45628// This Op takes a SparseTensor and is the sparse counterpart to
45629// `tf.reduce_max()`.  In particular, this Op also returns a dense `Tensor`
45630// instead of a sparse one.
45631//
45632// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
45633// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
45634// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
45635// with length 1.
45636//
45637// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
45638// with a single element is returned.  Additionally, the axes can be negative,
45639// which are interpreted according to the indexing rules in Python.
45640//
45641// Arguments:
45642//
45643//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
45644//
45645// SparseTensor, possibly not in canonical ordering.
45646//
45647//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
45648//	input_shape: 1-D.  Shape of the input SparseTensor.
45649//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
45650//
45651// Returns `R-K`-D.  The reduced Tensor.
45652func SparseReduceMax(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxAttr) (output tf.Output) {
45653	if scope.Err() != nil {
45654		return
45655	}
45656	attrs := map[string]interface{}{}
45657	for _, a := range optional {
45658		a(attrs)
45659	}
45660	opspec := tf.OpSpec{
45661		Type: "SparseReduceMax",
45662		Input: []tf.Input{
45663			input_indices, input_values, input_shape, reduction_axes,
45664		},
45665		Attrs: attrs,
45666	}
45667	op := scope.AddOperation(opspec)
45668	return op.Output(0)
45669}
45670
45671// SparseReduceMaxSparseAttr is an optional argument to SparseReduceMaxSparse.
45672type SparseReduceMaxSparseAttr func(optionalAttr)
45673
45674// SparseReduceMaxSparseKeepDims sets the optional keep_dims attribute to value.
45675//
45676// value: If true, retain reduced dimensions with length 1.
45677// If not specified, defaults to false
45678func SparseReduceMaxSparseKeepDims(value bool) SparseReduceMaxSparseAttr {
45679	return func(m optionalAttr) {
45680		m["keep_dims"] = value
45681	}
45682}
45683
45684// Computes the max of elements across dimensions of a SparseTensor.
45685//
45686// This Op takes a SparseTensor and is the sparse counterpart to
45687// `tf.reduce_max()`.  In contrast to SparseReduceMax, this Op returns a
45688// SparseTensor.
45689//
45690// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
45691// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
45692// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
45693// with length 1.
45694//
45695// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
45696// with a single element is returned.  Additionally, the axes can be negative,
45697// which are interpreted according to the indexing rules in Python.
45698//
45699// Arguments:
45700//
45701//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
45702//
45703// SparseTensor, possibly not in canonical ordering.
45704//
45705//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
45706//	input_shape: 1-D.  Shape of the input SparseTensor.
45707//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
45708func SparseReduceMaxSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceMaxSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
45709	if scope.Err() != nil {
45710		return
45711	}
45712	attrs := map[string]interface{}{}
45713	for _, a := range optional {
45714		a(attrs)
45715	}
45716	opspec := tf.OpSpec{
45717		Type: "SparseReduceMaxSparse",
45718		Input: []tf.Input{
45719			input_indices, input_values, input_shape, reduction_axes,
45720		},
45721		Attrs: attrs,
45722	}
45723	op := scope.AddOperation(opspec)
45724	return op.Output(0), op.Output(1), op.Output(2)
45725}
45726
45727// SparseReduceSumAttr is an optional argument to SparseReduceSum.
45728type SparseReduceSumAttr func(optionalAttr)
45729
45730// SparseReduceSumKeepDims sets the optional keep_dims attribute to value.
45731//
45732// value: If true, retain reduced dimensions with length 1.
45733// If not specified, defaults to false
45734func SparseReduceSumKeepDims(value bool) SparseReduceSumAttr {
45735	return func(m optionalAttr) {
45736		m["keep_dims"] = value
45737	}
45738}
45739
45740// Computes the sum of elements across dimensions of a SparseTensor.
45741//
45742// This Op takes a SparseTensor and is the sparse counterpart to
45743// `tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
45744// instead of a sparse one.
45745//
45746// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
45747// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
45748// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
45749// with length 1.
45750//
45751// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
45752// with a single element is returned.  Additionally, the axes can be negative,
45753// which are interpreted according to the indexing rules in Python.
45754//
45755// Arguments:
45756//
45757//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
45758//
45759// SparseTensor, possibly not in canonical ordering.
45760//
45761//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
45762//	input_shape: 1-D.  Shape of the input SparseTensor.
45763//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
45764//
45765// Returns `R-K`-D.  The reduced Tensor.
45766func SparseReduceSum(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumAttr) (output tf.Output) {
45767	if scope.Err() != nil {
45768		return
45769	}
45770	attrs := map[string]interface{}{}
45771	for _, a := range optional {
45772		a(attrs)
45773	}
45774	opspec := tf.OpSpec{
45775		Type: "SparseReduceSum",
45776		Input: []tf.Input{
45777			input_indices, input_values, input_shape, reduction_axes,
45778		},
45779		Attrs: attrs,
45780	}
45781	op := scope.AddOperation(opspec)
45782	return op.Output(0)
45783}
45784
45785// SparseReduceSumSparseAttr is an optional argument to SparseReduceSumSparse.
45786type SparseReduceSumSparseAttr func(optionalAttr)
45787
45788// SparseReduceSumSparseKeepDims sets the optional keep_dims attribute to value.
45789//
45790// value: If true, retain reduced dimensions with length 1.
45791// If not specified, defaults to false
45792func SparseReduceSumSparseKeepDims(value bool) SparseReduceSumSparseAttr {
45793	return func(m optionalAttr) {
45794		m["keep_dims"] = value
45795	}
45796}
45797
45798// Computes the sum of elements across dimensions of a SparseTensor.
45799//
45800// This Op takes a SparseTensor and is the sparse counterpart to
45801// `tf.reduce_sum()`.  In contrast to SparseReduceSum, this Op returns a
45802// SparseTensor.
45803//
45804// Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
45805// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
45806// `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
45807// with length 1.
45808//
45809// If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
45810// with a single element is returned.  Additionally, the axes can be negative,
45811// which are interpreted according to the indexing rules in Python.
45812//
45813// Arguments:
45814//
45815//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
45816//
45817// SparseTensor, possibly not in canonical ordering.
45818//
45819//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
45820//	input_shape: 1-D.  Shape of the input SparseTensor.
45821//	reduction_axes: 1-D.  Length-`K` vector containing the reduction axes.
45822func SparseReduceSumSparse(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output, reduction_axes tf.Output, optional ...SparseReduceSumSparseAttr) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
45823	if scope.Err() != nil {
45824		return
45825	}
45826	attrs := map[string]interface{}{}
45827	for _, a := range optional {
45828		a(attrs)
45829	}
45830	opspec := tf.OpSpec{
45831		Type: "SparseReduceSumSparse",
45832		Input: []tf.Input{
45833			input_indices, input_values, input_shape, reduction_axes,
45834		},
45835		Attrs: attrs,
45836	}
45837	op := scope.AddOperation(opspec)
45838	return op.Output(0), op.Output(1), op.Output(2)
45839}
45840
45841// Reorders a SparseTensor into the canonical, row-major ordering.
45842//
45843// Note that by convention, all sparse ops preserve the canonical ordering along
45844// increasing dimension number. The only time ordering can be violated is during
45845// manual manipulation of the indices and values vectors to add entries.
45846//
45847// Reordering does not affect the shape of the SparseTensor.
45848//
45849// If the tensor has rank `R` and `N` non-empty values, `input_indices` has
45850// shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
45851//
45852// Arguments:
45853//
45854//	input_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
45855//
45856// SparseTensor, possibly not in canonical ordering.
45857//
45858//	input_values: 1-D.  `N` non-empty values corresponding to `input_indices`.
45859//	input_shape: 1-D.  Shape of the input SparseTensor.
45860//
45861// Returns:
45862//
45863//	output_indices: 2-D.  `N x R` matrix with the same indices as input_indices, but
45864//
45865// in canonical row-major ordering.
45866//
45867//	output_values: 1-D.  `N` non-empty values corresponding to `output_indices`.
45868func SparseReorder(scope *Scope, input_indices tf.Output, input_values tf.Output, input_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
45869	if scope.Err() != nil {
45870		return
45871	}
45872	opspec := tf.OpSpec{
45873		Type: "SparseReorder",
45874		Input: []tf.Input{
45875			input_indices, input_values, input_shape,
45876		},
45877	}
45878	op := scope.AddOperation(opspec)
45879	return op.Output(0), op.Output(1)
45880}
45881
45882// Reshapes a SparseTensor to represent values in a new dense shape.
45883//
45884// This operation has the same semantics as reshape on the represented dense
45885// tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
45886//
45887// If one component of `new_shape` is the special value -1, the size of that
45888// dimension is computed so that the total dense size remains constant.  At
45889// most one component of `new_shape` can be -1.  The number of dense elements
45890// implied by `new_shape` must be the same as the number of dense elements
45891// originally implied by `input_shape`.
45892//
45893// Reshaping does not affect the order of values in the SparseTensor.
45894//
45895// If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
45896// has length `R_out`, then `input_indices` has shape `[N, R_in]`,
45897// `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
45898// `output_shape` has length `R_out`.
45899//
45900// Arguments:
45901//
45902//	input_indices: 2-D.  `N x R_in` matrix with the indices of non-empty values in a
45903//
45904// SparseTensor.
45905//
45906//	input_shape: 1-D.  `R_in` vector with the input SparseTensor's dense shape.
45907//	new_shape: 1-D.  `R_out` vector with the requested new dense shape.
45908//
45909// Returns:
45910//
45911//	output_indices: 2-D.  `N x R_out` matrix with the updated indices of non-empty
45912//
45913// values in the output SparseTensor.
45914//
45915//	output_shape: 1-D.  `R_out` vector with the full dense shape of the output
45916//
45917// SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
45918// filled in.
45919func SparseReshape(scope *Scope, input_indices tf.Output, input_shape tf.Output, new_shape tf.Output) (output_indices tf.Output, output_shape tf.Output) {
45920	if scope.Err() != nil {
45921		return
45922	}
45923	opspec := tf.OpSpec{
45924		Type: "SparseReshape",
45925		Input: []tf.Input{
45926			input_indices, input_shape, new_shape,
45927		},
45928	}
45929	op := scope.AddOperation(opspec)
45930	return op.Output(0), op.Output(1)
45931}
45932
45933// Computes the mean along sparse segments of a tensor.
45934//
45935// See `tf.sparse.segment_sum` for usage examples.
45936//
45937// Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
45938// dimension, selecting a subset of dimension 0, specified by `indices`.
45939//
45940// Arguments:
45941//
45942//	indices: A 1-D tensor. Has same rank as `segment_ids`.
45943//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
45944//
45945// Returns Has same shape as data, except for dimension 0 which
45946// has size `k`, the number of segments.
45947func SparseSegmentMean(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
45948	if scope.Err() != nil {
45949		return
45950	}
45951	opspec := tf.OpSpec{
45952		Type: "SparseSegmentMean",
45953		Input: []tf.Input{
45954			data, indices, segment_ids,
45955		},
45956	}
45957	op := scope.AddOperation(opspec)
45958	return op.Output(0)
45959}
45960
45961// Computes gradients for SparseSegmentMean.
45962//
45963// Returns tensor "output" with same shape as grad, except for dimension 0 whose
45964// value is output_dim0.
45965//
45966// Arguments:
45967//
45968//	grad: gradient propagated to the SparseSegmentMean op.
45969//	indices: indices passed to the corresponding SparseSegmentMean op.
45970//	segment_ids: segment_ids passed to the corresponding SparseSegmentMean op.
45971//	output_dim0: dimension 0 of "data" passed to SparseSegmentMean op.
45972func SparseSegmentMeanGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
45973	if scope.Err() != nil {
45974		return
45975	}
45976	opspec := tf.OpSpec{
45977		Type: "SparseSegmentMeanGrad",
45978		Input: []tf.Input{
45979			grad, indices, segment_ids, output_dim0,
45980		},
45981	}
45982	op := scope.AddOperation(opspec)
45983	return op.Output(0)
45984}
45985
45986// Computes the mean along sparse segments of a tensor.
45987//
45988// Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
45989// missing, the `output` tensor at that position will be zeroed.
45990//
45991// Read
45992// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
45993// for an explanation of segments.
45994//
45995// Arguments:
45996//
45997//	indices: A 1-D tensor. Has same rank as `segment_ids`.
45998//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
45999//	num_segments: Should equal the number of distinct segment IDs.
46000//
46001// Returns Has same shape as data, except for dimension 0 which has size
46002// `num_segments`.
46003func SparseSegmentMeanWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
46004	if scope.Err() != nil {
46005		return
46006	}
46007	opspec := tf.OpSpec{
46008		Type: "SparseSegmentMeanWithNumSegments",
46009		Input: []tf.Input{
46010			data, indices, segment_ids, num_segments,
46011		},
46012	}
46013	op := scope.AddOperation(opspec)
46014	return op.Output(0)
46015}
46016
46017// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
46018//
46019// N is the size of the segment being reduced.
46020//
46021// See `tf.sparse.segment_sum` for usage examples.
46022//
46023// Arguments:
46024//
46025//	indices: A 1-D tensor. Has same rank as `segment_ids`.
46026//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
46027//
46028// Returns Has same shape as data, except for dimension 0 which
46029// has size `k`, the number of segments.
46030func SparseSegmentSqrtN(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
46031	if scope.Err() != nil {
46032		return
46033	}
46034	opspec := tf.OpSpec{
46035		Type: "SparseSegmentSqrtN",
46036		Input: []tf.Input{
46037			data, indices, segment_ids,
46038		},
46039	}
46040	op := scope.AddOperation(opspec)
46041	return op.Output(0)
46042}
46043
46044// Computes gradients for SparseSegmentSqrtN.
46045//
46046// Returns tensor "output" with same shape as grad, except for dimension 0 whose
46047// value is output_dim0.
46048//
46049// Arguments:
46050//
46051//	grad: gradient propagated to the SparseSegmentSqrtN op.
46052//	indices: indices passed to the corresponding SparseSegmentSqrtN op.
46053//	segment_ids: segment_ids passed to the corresponding SparseSegmentSqrtN op.
46054//	output_dim0: dimension 0 of "data" passed to SparseSegmentSqrtN op.
46055func SparseSegmentSqrtNGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
46056	if scope.Err() != nil {
46057		return
46058	}
46059	opspec := tf.OpSpec{
46060		Type: "SparseSegmentSqrtNGrad",
46061		Input: []tf.Input{
46062			grad, indices, segment_ids, output_dim0,
46063		},
46064	}
46065	op := scope.AddOperation(opspec)
46066	return op.Output(0)
46067}
46068
46069// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
46070//
46071// N is the size of the segment being reduced.
46072//
46073// Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
46074// missing, the `output` tensor at that position will be zeroed.
46075//
46076// Read
46077// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
46078// for an explanation of segments.
46079//
46080// Arguments:
46081//
46082//	indices: A 1-D tensor. Has same rank as `segment_ids`.
46083//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
46084//	num_segments: Should equal the number of distinct segment IDs.
46085//
46086// Returns Has same shape as data, except for dimension 0 which
46087// has size `k`, the number of segments.
46088func SparseSegmentSqrtNWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
46089	if scope.Err() != nil {
46090		return
46091	}
46092	opspec := tf.OpSpec{
46093		Type: "SparseSegmentSqrtNWithNumSegments",
46094		Input: []tf.Input{
46095			data, indices, segment_ids, num_segments,
46096		},
46097	}
46098	op := scope.AddOperation(opspec)
46099	return op.Output(0)
46100}
46101
46102// Computes the sum along sparse segments of a tensor.
46103//
46104// Read
46105// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
46106// for an explanation of segments.
46107//
46108// Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
46109// dimension, selecting a subset of dimension 0, specified by `indices`.
46110//
46111// For example:
46112//
46113// ```python
46114// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
46115//
46116// # Select two rows, one segment.
46117// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
46118// # => [[0 0 0 0]]
46119//
46120// # Select two rows, two segment.
46121// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
46122// # => [[ 1  2  3  4]
46123// #     [-1 -2 -3 -4]]
46124//
46125// # Select all rows, two segments.
46126// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
46127// # => [[0 0 0 0]
46128// #     [5 6 7 8]]
46129//
46130// # Which is equivalent to:
46131// tf.segment_sum(c, tf.constant([0, 0, 1]))
46132// ```
46133//
46134// Arguments:
46135//
46136//	indices: A 1-D tensor. Has same rank as `segment_ids`.
46137//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
46138//
46139// Returns Has same shape as data, except for dimension 0 which
46140// has size `k`, the number of segments.
46141func SparseSegmentSum(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output) (output tf.Output) {
46142	if scope.Err() != nil {
46143		return
46144	}
46145	opspec := tf.OpSpec{
46146		Type: "SparseSegmentSum",
46147		Input: []tf.Input{
46148			data, indices, segment_ids,
46149		},
46150	}
46151	op := scope.AddOperation(opspec)
46152	return op.Output(0)
46153}
46154
46155// Computes gradients for SparseSegmentSum.
46156//
46157// Returns tensor "output" with same shape as grad, except for dimension 0 whose
46158// value is output_dim0.
46159//
46160// Arguments:
46161//
46162//	grad: gradient propagated to the SparseSegmentSum op.
46163//	indices: indices passed to the corresponding SparseSegmentSum op.
46164//	segment_ids: segment_ids passed to the corresponding SparseSegmentSum op.
46165//	output_dim0: dimension 0 of "data" passed to SparseSegmentSum op.
46166func SparseSegmentSumGrad(scope *Scope, grad tf.Output, indices tf.Output, segment_ids tf.Output, output_dim0 tf.Output) (output tf.Output) {
46167	if scope.Err() != nil {
46168		return
46169	}
46170	opspec := tf.OpSpec{
46171		Type: "SparseSegmentSumGrad",
46172		Input: []tf.Input{
46173			grad, indices, segment_ids, output_dim0,
46174		},
46175	}
46176	op := scope.AddOperation(opspec)
46177	return op.Output(0)
46178}
46179
46180// Computes the sum along sparse segments of a tensor.
46181//
46182// Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
46183// missing, the `output` tensor at that position will be zeroed.
46184//
46185// Read
46186// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation)
46187// for an explanation of segments.
46188//
46189// For example:
46190//
46191// ```python
46192// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
46193//
46194// tf.sparse_segment_sum_with_num_segments(
46195//
46196//	c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
46197//
46198// # => [[0 0 0 0]
46199// #     [0 0 0 0]
46200// #     [0 0 0 0]]
46201//
46202// tf.sparse_segment_sum_with_num_segments(c,
46203//
46204//	tf.constant([0, 1]),
46205//	tf.constant([0, 2],
46206//	num_segments=4))
46207//
46208// # => [[ 1  2  3  4]
46209// #     [ 0  0  0  0]
46210// #     [-1 -2 -3 -4]
46211// #     [ 0  0  0  0]]
46212// ```
46213//
46214// Arguments:
46215//
46216//	indices: A 1-D tensor. Has same rank as `segment_ids`.
46217//	segment_ids: A 1-D tensor. Values should be sorted and can be repeated.
46218//	num_segments: Should equal the number of distinct segment IDs.
46219//
46220// Returns Has same shape as data, except for dimension 0 which
46221// has size `num_segments`.
46222func SparseSegmentSumWithNumSegments(scope *Scope, data tf.Output, indices tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
46223	if scope.Err() != nil {
46224		return
46225	}
46226	opspec := tf.OpSpec{
46227		Type: "SparseSegmentSumWithNumSegments",
46228		Input: []tf.Input{
46229			data, indices, segment_ids, num_segments,
46230		},
46231	}
46232	op := scope.AddOperation(opspec)
46233	return op.Output(0)
46234}
46235
46236// Slice a `SparseTensor` based on the `start` and `size`.
46237//
46238// For example, if the input is
46239//
46240//	input_tensor = shape = [2, 7]
46241//	[    a   d e  ]
46242//	[b c          ]
46243//
46244// Graphically the output tensors are:
46245//
46246//	sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
46247//	[    a  ]
46248//	[b c    ]
46249//
46250//	sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
46251//	[ d e  ]
46252//	[      ]
46253//
46254// Arguments:
46255//
46256//	indices: 2-D tensor represents the indices of the sparse tensor.
46257//	values: 1-D tensor represents the values of the sparse tensor.
46258//	shape: 1-D. tensor represents the shape of the sparse tensor.
46259//	start: 1-D. tensor represents the start of the slice.
46260//	size: 1-D. tensor represents the size of the slice.
46261//
46262// output indices: A list of 1-D tensors represents the indices of the output
46263// sparse tensors.
46264//
46265// Returns:
46266//
46267//	output_indices
46268//	output_values: A list of 1-D tensors represents the values of the output sparse
46269//
46270// tensors.
46271//
46272//	output_shape: A list of 1-D tensors represents the shape of the output sparse
46273//
46274// tensors.
46275func SparseSlice(scope *Scope, indices tf.Output, values tf.Output, shape tf.Output, start tf.Output, size tf.Output) (output_indices tf.Output, output_values tf.Output, output_shape tf.Output) {
46276	if scope.Err() != nil {
46277		return
46278	}
46279	opspec := tf.OpSpec{
46280		Type: "SparseSlice",
46281		Input: []tf.Input{
46282			indices, values, shape, start, size,
46283		},
46284	}
46285	op := scope.AddOperation(opspec)
46286	return op.Output(0), op.Output(1), op.Output(2)
46287}
46288
46289// The gradient operator for the SparseSlice op.
46290//
46291// This op takes in the upstream gradient w.r.t. non-empty values of
46292// the sliced `SparseTensor`, and outputs the gradients w.r.t.
46293// the non-empty values of input `SparseTensor`.
46294//
46295// Arguments:
46296//
46297//	backprop_val_grad: 1-D. The gradient with respect to
46298//
46299// the non-empty values of the sliced `SparseTensor`.
46300//
46301//	input_indices: 2-D.  The `indices` of the input `SparseTensor`.
46302//	input_start: 1-D. tensor represents the start of the slice.
46303//	output_indices: 2-D.  The `indices` of the sliced `SparseTensor`.
46304//
46305// Returns 1-D. The gradient with respect to the non-empty values of input `SparseTensor`.
46306func SparseSliceGrad(scope *Scope, backprop_val_grad tf.Output, input_indices tf.Output, input_start tf.Output, output_indices tf.Output) (val_grad tf.Output) {
46307	if scope.Err() != nil {
46308		return
46309	}
46310	opspec := tf.OpSpec{
46311		Type: "SparseSliceGrad",
46312		Input: []tf.Input{
46313			backprop_val_grad, input_indices, input_start, output_indices,
46314		},
46315	}
46316	op := scope.AddOperation(opspec)
46317	return op.Output(0)
46318}
46319
46320// Applies softmax to a batched N-D `SparseTensor`.
46321//
46322// The inputs represent an N-D SparseTensor  with logical shape `[..., B, C]`
46323// (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
46324//
46325// This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
46326// logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
46327// zero elements do not participate*.  Specifically, the algorithm is equivalent
46328// to the following:
46329//
46330//	(1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
46331//	    with shape `[B, C]`, along the size-C dimension;
46332//	(2) Masks out the original implicitly-zero locations;
46333//	(3) Renormalizes the remaining elements.
46334//
46335// Hence, the `SparseTensor` result has exactly the same non-zero indices and
46336// shape.
46337//
46338// Arguments:
46339//
46340//	sp_indices: 2-D.  `NNZ x R` matrix with the indices of non-empty values in a
46341//
46342// SparseTensor, in canonical ordering.
46343//
46344//	sp_values: 1-D.  `NNZ` non-empty values corresponding to `sp_indices`.
46345//	sp_shape: 1-D.  Shape of the input SparseTensor.
46346//
46347// Returns 1-D.  The `NNZ` values for the result `SparseTensor`.
46348func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_shape tf.Output) (output tf.Output) {
46349	if scope.Err() != nil {
46350		return
46351	}
46352	opspec := tf.OpSpec{
46353		Type: "SparseSoftmax",
46354		Input: []tf.Input{
46355			sp_indices, sp_values, sp_shape,
46356		},
46357	}
46358	op := scope.AddOperation(opspec)
46359	return op.Output(0)
46360}
46361
46362// Computes softmax cross entropy cost and gradients to backpropagate.
46363//
46364// Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
46365// a matrix of label probabilities, but rather a single label per row
46366// of features.  This label is considered to have probability 1.0 for the
46367// given row.
46368//
46369// Inputs are the logits, not probabilities.
46370//
46371// Arguments:
46372//
46373//	features: batch_size x num_classes matrix
46374//	labels: batch_size vector with values in [0, num_classes).
46375//
46376// This is the label for the given minibatch entry.
46377//
46378// Returns:
46379//
46380//	loss: Per example loss (batch_size vector).
46381//	backprop: backpropagated gradients (batch_size x num_classes matrix).
46382func SparseSoftmaxCrossEntropyWithLogits(scope *Scope, features tf.Output, labels tf.Output) (loss tf.Output, backprop tf.Output) {
46383	if scope.Err() != nil {
46384		return
46385	}
46386	opspec := tf.OpSpec{
46387		Type: "SparseSoftmaxCrossEntropyWithLogits",
46388		Input: []tf.Input{
46389			features, labels,
46390		},
46391	}
46392	op := scope.AddOperation(opspec)
46393	return op.Output(0), op.Output(1)
46394}
46395
46396// Returns the element-wise max of two SparseTensors.
46397//
46398// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
46399//
46400// Arguments:
46401//
46402//	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
46403//
46404// SparseTensor, in the canonical lexicographic ordering.
46405//
46406//	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
46407//	a_shape: 1-D.  Shape of the input SparseTensor.
46408//	b_indices: counterpart to `a_indices` for the other operand.
46409//	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
46410//	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
46411//
46412// Returns:
46413//
46414//	output_indices: 2-D.  The indices of the output SparseTensor.
46415//	output_values: 1-D.  The values of the output SparseTensor.
46416func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
46417	if scope.Err() != nil {
46418		return
46419	}
46420	opspec := tf.OpSpec{
46421		Type: "SparseSparseMaximum",
46422		Input: []tf.Input{
46423			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
46424		},
46425	}
46426	op := scope.AddOperation(opspec)
46427	return op.Output(0), op.Output(1)
46428}
46429
46430// Returns the element-wise min of two SparseTensors.
46431//
46432// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
46433//
46434// Arguments:
46435//
46436//	a_indices: 2-D.  `N x R` matrix with the indices of non-empty values in a
46437//
46438// SparseTensor, in the canonical lexicographic ordering.
46439//
46440//	a_values: 1-D.  `N` non-empty values corresponding to `a_indices`.
46441//	a_shape: 1-D.  Shape of the input SparseTensor.
46442//	b_indices: counterpart to `a_indices` for the other operand.
46443//	b_values: counterpart to `a_values` for the other operand; must be of the same dtype.
46444//	b_shape: counterpart to `a_shape` for the other operand; the two shapes must be equal.
46445//
46446// Returns:
46447//
46448//	output_indices: 2-D.  The indices of the output SparseTensor.
46449//	output_values: 1-D.  The values of the output SparseTensor.
46450func SparseSparseMinimum(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b_indices tf.Output, b_values tf.Output, b_shape tf.Output) (output_indices tf.Output, output_values tf.Output) {
46451	if scope.Err() != nil {
46452		return
46453	}
46454	opspec := tf.OpSpec{
46455		Type: "SparseSparseMinimum",
46456		Input: []tf.Input{
46457			a_indices, a_values, a_shape, b_indices, b_values, b_shape,
46458		},
46459	}
46460	op := scope.AddOperation(opspec)
46461	return op.Output(0), op.Output(1)
46462}
46463
46464// Split a `SparseTensor` into `num_split` tensors along one dimension.
46465//
46466// If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
46467// `[0 : shape[split_dim] % num_split]` gets one extra dimension.
46468// For example, if `split_dim = 1` and `num_split = 2` and the input is
46469//
46470//	input_tensor = shape = [2, 7]
46471//	[    a   d e  ]
46472//	[b c          ]
46473//
46474// Graphically the output tensors are:
46475//
46476//	output_tensor[0] = shape = [2, 4]
46477//	[    a  ]
46478//	[b c    ]
46479//
46480//	output_tensor[1] = shape = [2, 3]
46481//	[ d e  ]
46482//	[      ]
46483//
46484// Arguments:
46485//
46486//	split_dim: 0-D.  The dimension along which to split.  Must be in the range
46487//
46488// `[0, rank(shape))`.
46489//
46490//	indices: 2-D tensor represents the indices of the sparse tensor.
46491//	values: 1-D tensor represents the values of the sparse tensor.
46492//	shape: 1-D. tensor represents the shape of the sparse tensor.
46493//
46494// output indices: A list of 1-D tensors represents the indices of the output
46495// sparse tensors.
46496//
46497//	num_split: The number of ways to split.
46498//
46499// Returns:
46500//
46501//	output_indices
46502//	output_values: A list of 1-D tensors represents the values of the output sparse
46503//
46504// tensors.
46505//
46506//	output_shape: A list of 1-D tensors represents the shape of the output sparse
46507//
46508// tensors.
46509func SparseSplit(scope *Scope, split_dim tf.Output, indices tf.Output, values tf.Output, shape tf.Output, num_split int64) (output_indices []tf.Output, output_values []tf.Output, output_shape []tf.Output) {
46510	if scope.Err() != nil {
46511		return
46512	}
46513	attrs := map[string]interface{}{"num_split": num_split}
46514	opspec := tf.OpSpec{
46515		Type: "SparseSplit",
46516		Input: []tf.Input{
46517			split_dim, indices, values, shape,
46518		},
46519		Attrs: attrs,
46520	}
46521	op := scope.AddOperation(opspec)
46522	if scope.Err() != nil {
46523		return
46524	}
46525	var idx int
46526	var err error
46527	if output_indices, idx, err = makeOutputList(op, idx, "output_indices"); err != nil {
46528		scope.UpdateErr("SparseSplit", err)
46529		return
46530	}
46531	if output_values, idx, err = makeOutputList(op, idx, "output_values"); err != nil {
46532		scope.UpdateErr("SparseSplit", err)
46533		return
46534	}
46535	if output_shape, idx, err = makeOutputList(op, idx, "output_shape"); err != nil {
46536		scope.UpdateErr("SparseSplit", err)
46537		return
46538	}
46539	return output_indices, output_values, output_shape
46540}
46541
46542// Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
46543//
46544// This Op does not require `a_indices` be sorted in standard lexicographic order.
46545//
46546// Arguments:
46547//
46548//	a_indices: 2-D.  The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
46549//	a_values: 1-D.  The `values` of the `SparseTensor`, with shape `[nnz]`.
46550//	a_shape: 1-D.  The `shape` of the `SparseTensor`, with shape `[ndims]`.
46551//	b: `ndims`-D Tensor.  With shape `a_shape`.
46552func SparseTensorDenseAdd(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output) (output tf.Output) {
46553	if scope.Err() != nil {
46554		return
46555	}
46556	opspec := tf.OpSpec{
46557		Type: "SparseTensorDenseAdd",
46558		Input: []tf.Input{
46559			a_indices, a_values, a_shape, b,
46560		},
46561	}
46562	op := scope.AddOperation(opspec)
46563	return op.Output(0)
46564}
46565
46566// SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul.
46567type SparseTensorDenseMatMulAttr func(optionalAttr)
46568
46569// SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value.
46570//
46571// value: Use the adjoint of A in the matrix multiply.  If A is complex, this
46572// is transpose(conj(A)).  Otherwise it's transpose(A).
46573// If not specified, defaults to false
46574func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr {
46575	return func(m optionalAttr) {
46576		m["adjoint_a"] = value
46577	}
46578}
46579
46580// SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value.
46581//
46582// value: Use the adjoint of B in the matrix multiply.  If B is complex, this
46583// is transpose(conj(B)).  Otherwise it's transpose(B).
46584// If not specified, defaults to false
46585func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr {
46586	return func(m optionalAttr) {
46587		m["adjoint_b"] = value
46588	}
46589}
46590
46591// Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
46592//
46593// No validity checking is performed on the indices of A.  However, the following
46594// input format is recommended for optimal behavior:
46595//
46596// if adjoint_a == false:
46597//
46598//	A should be sorted in lexicographically increasing order.  Use SparseReorder
46599//	if you're not sure.
46600//
46601// if adjoint_a == true:
46602//
46603//	A should be sorted in order of increasing dimension 1 (i.e., "column major"
46604//	order instead of "row major" order).
46605//
46606// Arguments:
46607//
46608//	a_indices: 2-D.  The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
46609//	a_values: 1-D.  The `values` of the `SparseTensor`, size `[nnz]` Vector.
46610//	a_shape: 1-D.  The `shape` of the `SparseTensor`, size `[2]` Vector.
46611//	b: 2-D.  A dense Matrix.
46612func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output) {
46613	if scope.Err() != nil {
46614		return
46615	}
46616	attrs := map[string]interface{}{}
46617	for _, a := range optional {
46618		a(attrs)
46619	}
46620	opspec := tf.OpSpec{
46621		Type: "SparseTensorDenseMatMul",
46622		Input: []tf.Input{
46623			a_indices, a_values, a_shape, b,
46624		},
46625		Attrs: attrs,
46626	}
46627	op := scope.AddOperation(opspec)
46628	return op.Output(0)
46629}
46630
46631// Creates a dataset that splits a SparseTensor into elements row-wise.
46632func SparseTensorSliceDataset(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (handle tf.Output) {
46633	if scope.Err() != nil {
46634		return
46635	}
46636	opspec := tf.OpSpec{
46637		Type: "SparseTensorSliceDataset",
46638		Input: []tf.Input{
46639			indices, values, dense_shape,
46640		},
46641	}
46642	op := scope.AddOperation(opspec)
46643	return op.Output(0)
46644}
46645
46646// Converts a SparseTensor to a (possibly batched) CSRSparseMatrix.
46647//
46648// Arguments:
46649//
46650//	indices: SparseTensor indices.
46651//	values: SparseTensor values.
46652//	dense_shape: SparseTensor dense shape.
46653//
46654// Returns A (possibly batched) CSRSparseMatrix.
46655func SparseTensorToCSRSparseMatrix(scope *Scope, indices tf.Output, values tf.Output, dense_shape tf.Output) (sparse_matrix tf.Output) {
46656	if scope.Err() != nil {
46657		return
46658	}
46659	opspec := tf.OpSpec{
46660		Type: "SparseTensorToCSRSparseMatrix",
46661		Input: []tf.Input{
46662			indices, values, dense_shape,
46663		},
46664	}
46665	op := scope.AddOperation(opspec)
46666	return op.Output(0)
46667}
46668
46669// SparseToDenseAttr is an optional argument to SparseToDense.
46670type SparseToDenseAttr func(optionalAttr)
46671
46672// SparseToDenseValidateIndices sets the optional validate_indices attribute to value.
46673//
46674// value: If true, indices are checked to make sure they are sorted in
46675// lexicographic order and that there are no repeats.
46676// If not specified, defaults to true
46677func SparseToDenseValidateIndices(value bool) SparseToDenseAttr {
46678	return func(m optionalAttr) {
46679		m["validate_indices"] = value
46680	}
46681}
46682
46683// Converts a sparse representation into a dense tensor.
46684//
46685// Builds an array `dense` with shape `output_shape` such that
46686//
46687// ```
46688// # If sparse_indices is scalar
46689// dense[i] = (i == sparse_indices ? sparse_values : default_value)
46690//
46691// # If sparse_indices is a vector, then for each i
46692// dense[sparse_indices[i]] = sparse_values[i]
46693//
46694// # If sparse_indices is an n by d matrix, then for each i in [0, n)
46695// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
46696// ```
46697//
46698// All other values in `dense` are set to `default_value`.  If `sparse_values` is a
46699// scalar, all sparse indices are set to this single value.
46700//
46701// Indices should be sorted in lexicographic order, and indices must not
46702// contain any repeats. If `validate_indices` is true, these properties
46703// are checked during execution.
46704//
46705// Arguments:
46706//
46707//	sparse_indices: 0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
46708//
46709// index where `sparse_values[i]` will be placed.
46710//
46711//	output_shape: 1-D.  Shape of the dense output tensor.
46712//	sparse_values: 1-D.  Values corresponding to each row of `sparse_indices`,
46713//
46714// or a scalar value to be used for all sparse indices.
46715//
46716//	default_value: Scalar value to set for indices not specified in
46717//
46718// `sparse_indices`.
46719//
46720// Returns Dense output tensor of shape `output_shape`.
46721func SparseToDense(scope *Scope, sparse_indices tf.Output, output_shape tf.Output, sparse_values tf.Output, default_value tf.Output, optional ...SparseToDenseAttr) (dense tf.Output) {
46722	if scope.Err() != nil {
46723		return
46724	}
46725	attrs := map[string]interface{}{}
46726	for _, a := range optional {
46727		a(attrs)
46728	}
46729	opspec := tf.OpSpec{
46730		Type: "SparseToDense",
46731		Input: []tf.Input{
46732			sparse_indices, output_shape, sparse_values, default_value,
46733		},
46734		Attrs: attrs,
46735	}
46736	op := scope.AddOperation(opspec)
46737	return op.Output(0)
46738}
46739
46740// SparseToSparseSetOperationAttr is an optional argument to SparseToSparseSetOperation.
46741type SparseToSparseSetOperationAttr func(optionalAttr)
46742
46743// SparseToSparseSetOperationValidateIndices sets the optional validate_indices attribute to value.
46744// If not specified, defaults to true
46745func SparseToSparseSetOperationValidateIndices(value bool) SparseToSparseSetOperationAttr {
46746	return func(m optionalAttr) {
46747		m["validate_indices"] = value
46748	}
46749}
46750
46751// Applies set operation along last dimension of 2 `SparseTensor` inputs.
46752//
46753// See SetOperationOp::SetOperationFromContext for values of `set_operation`.
46754//
46755// If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
46756// order and range of `set1` and `set2` indices.
46757//
46758// Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
46759// and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
46760// as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
46761// ignored.
46762//
46763// Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
46764// and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
46765// as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
46766// ignored.
46767//
46768// If `validate_indices` is `True`, this op validates the order and range of `set1`
46769// and `set2` indices.
46770//
46771// Output `result` is a `SparseTensor` represented by `result_indices`,
46772// `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
46773// has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
46774// dimension contains the result of `set_operation` applied to the corresponding
46775// `[0...n-1]` dimension of `set`.
46776//
46777// Arguments:
46778//
46779//	set1_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
46780//
46781// order.
46782//
46783//	set1_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
46784//
46785// order.
46786//
46787//	set1_shape: 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
46788//
46789// be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
46790// max set size across `0...n-1` dimensions.
46791//
46792//	set2_indices: 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
46793//
46794// order.
46795//
46796//	set2_values: 1D `Tensor`, values of a `SparseTensor`. Must be in row-major
46797//
46798// order.
46799//
46800//	set2_shape: 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
46801//
46802// be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
46803// max set size across `0...n-1` dimensions.
46804//
46805// Returns:
46806//
46807//	result_indices: 2D indices of a `SparseTensor`.
46808//	result_values: 1D values of a `SparseTensor`.
46809//	result_shape: 1D `Tensor` shape of a `SparseTensor`. `result_shape[0...n-1]` is
46810//
46811// the same as the 1st `n-1` dimensions of `set1` and `set2`, `result_shape[n]`
46812// is the max result set size across all `0...n-1` dimensions.
46813func SparseToSparseSetOperation(scope *Scope, set1_indices tf.Output, set1_values tf.Output, set1_shape tf.Output, set2_indices tf.Output, set2_values tf.Output, set2_shape tf.Output, set_operation string, optional ...SparseToSparseSetOperationAttr) (result_indices tf.Output, result_values tf.Output, result_shape tf.Output) {
46814	if scope.Err() != nil {
46815		return
46816	}
46817	attrs := map[string]interface{}{"set_operation": set_operation}
46818	for _, a := range optional {
46819		a(attrs)
46820	}
46821	opspec := tf.OpSpec{
46822		Type: "SparseToSparseSetOperation",
46823		Input: []tf.Input{
46824			set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape,
46825		},
46826		Attrs: attrs,
46827	}
46828	op := scope.AddOperation(opspec)
46829	return op.Output(0), op.Output(1), op.Output(2)
46830}
46831
46832// Splits a tensor into `num_split` tensors along one dimension.
46833//
46834// Arguments:
46835//
46836//	axis: 0-D.  The dimension along which to split.  Must be in the range
46837//
46838// `[-rank(value), rank(value))`.
46839//
46840//	value: The tensor to split.
46841//	num_split: The number of ways to split.  Must evenly divide
46842//
46843// `value.shape[split_dim]`.
46844//
46845// Returns They are identically shaped tensors, whose shape matches that of `value`
46846// except along `axis`, where their sizes are
46847// `values.shape[split_dim] / num_split`.
46848func Split(scope *Scope, axis tf.Output, value tf.Output, num_split int64) (output []tf.Output) {
46849	if scope.Err() != nil {
46850		return
46851	}
46852	attrs := map[string]interface{}{"num_split": num_split}
46853	opspec := tf.OpSpec{
46854		Type: "Split",
46855		Input: []tf.Input{
46856			axis, value,
46857		},
46858		Attrs: attrs,
46859	}
46860	op := scope.AddOperation(opspec)
46861	if scope.Err() != nil {
46862		return
46863	}
46864	var idx int
46865	var err error
46866	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
46867		scope.UpdateErr("Split", err)
46868		return
46869	}
46870	return output
46871}
46872
46873// Splits a tensor into `num_split` tensors along one dimension.
46874//
46875// Arguments:
46876//
46877//	value: The tensor to split.
46878//	size_splits: list containing the sizes of each output tensor along the split
46879//
46880// dimension. Must sum to the dimension of value along split_dim.
46881// Can contain one -1 indicating that dimension is to be inferred.
46882//
46883//	axis: 0-D.  The dimension along which to split.  Must be in the range
46884//
46885// `[-rank(value), rank(value))`.
46886//
46887// Returns Tensors whose shape matches that of `value`
46888// except along `axis`, where their sizes are
46889// `size_splits[i]`.
46890func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, axis tf.Output, num_split int64) (output []tf.Output) {
46891	if scope.Err() != nil {
46892		return
46893	}
46894	attrs := map[string]interface{}{"num_split": num_split}
46895	opspec := tf.OpSpec{
46896		Type: "SplitV",
46897		Input: []tf.Input{
46898			value, size_splits, axis,
46899		},
46900		Attrs: attrs,
46901	}
46902	op := scope.AddOperation(opspec)
46903	if scope.Err() != nil {
46904		return
46905	}
46906	var idx int
46907	var err error
46908	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
46909		scope.UpdateErr("SplitV", err)
46910		return
46911	}
46912	return output
46913}
46914
46915// Creates a dataset that executes a SQL query and emits rows of the result set.
46916//
46917// Arguments:
46918//
46919//	driver_name: The database type. Currently, the only supported type is 'sqlite'.
46920//	data_source_name: A connection string to connect to the database.
46921//	query: A SQL query to execute.
46922func SqlDataset(scope *Scope, driver_name tf.Output, data_source_name tf.Output, query tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
46923	if scope.Err() != nil {
46924		return
46925	}
46926	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
46927	opspec := tf.OpSpec{
46928		Type: "SqlDataset",
46929		Input: []tf.Input{
46930			driver_name, data_source_name, query,
46931		},
46932		Attrs: attrs,
46933	}
46934	op := scope.AddOperation(opspec)
46935	return op.Output(0)
46936}
46937
46938// Computes square root of x element-wise.
46939//
46940// I.e., \\(y = \sqrt{x} = x^{1/2}\\).
46941func Sqrt(scope *Scope, x tf.Output) (y tf.Output) {
46942	if scope.Err() != nil {
46943		return
46944	}
46945	opspec := tf.OpSpec{
46946		Type: "Sqrt",
46947		Input: []tf.Input{
46948			x,
46949		},
46950	}
46951	op := scope.AddOperation(opspec)
46952	return op.Output(0)
46953}
46954
46955// Computes the gradient for the sqrt of `x` wrt its input.
46956//
46957// Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
46958// is the corresponding input gradient.
46959func SqrtGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
46960	if scope.Err() != nil {
46961		return
46962	}
46963	opspec := tf.OpSpec{
46964		Type: "SqrtGrad",
46965		Input: []tf.Input{
46966			y, dy,
46967		},
46968	}
46969	op := scope.AddOperation(opspec)
46970	return op.Output(0)
46971}
46972
46973// Computes square of x element-wise.
46974//
46975// I.e., \\(y = x * x = x^2\\).
46976func Square(scope *Scope, x tf.Output) (y tf.Output) {
46977	if scope.Err() != nil {
46978		return
46979	}
46980	opspec := tf.OpSpec{
46981		Type: "Square",
46982		Input: []tf.Input{
46983			x,
46984		},
46985	}
46986	op := scope.AddOperation(opspec)
46987	return op.Output(0)
46988}
46989
46990// Returns conj(x - y)(x - y) element-wise.
46991//
46992// *NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
46993// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
46994func SquaredDifference(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
46995	if scope.Err() != nil {
46996		return
46997	}
46998	opspec := tf.OpSpec{
46999		Type: "SquaredDifference",
47000		Input: []tf.Input{
47001			x, y,
47002		},
47003	}
47004	op := scope.AddOperation(opspec)
47005	return op.Output(0)
47006}
47007
47008// SqueezeAttr is an optional argument to Squeeze.
47009type SqueezeAttr func(optionalAttr)
47010
47011// SqueezeAxis sets the optional axis attribute to value.
47012//
47013// value: If specified, only squeezes the dimensions listed. The dimension
47014// index starts at 0. It is an error to squeeze a dimension that is not 1. Must
47015// be in the range `[-rank(input), rank(input))`.
47016// If not specified, defaults to {}
47017//
47018// REQUIRES: len(value) >= 0
47019func SqueezeAxis(value []int64) SqueezeAttr {
47020	return func(m optionalAttr) {
47021		m["squeeze_dims"] = value
47022	}
47023}
47024
47025// Removes dimensions of size 1 from the shape of a tensor.
47026//
47027// Given a tensor `input`, this operation returns a tensor of the same type with
47028// all dimensions of size 1 removed. If you don't want to remove all size 1
47029// dimensions, you can remove specific size 1 dimensions by specifying
47030// `axis`.
47031//
47032// For example:
47033//
47034// ```
47035// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
47036// shape(squeeze(t)) ==> [2, 3]
47037// ```
47038//
47039// Or, to remove specific size 1 dimensions:
47040//
47041// ```
47042// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
47043// shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
47044// ```
47045//
47046// Arguments:
47047//
47048//	input: The `input` to squeeze.
47049//
47050// Returns Contains the same data as `input`, but has one or more dimensions of
47051// size 1 removed.
47052func Squeeze(scope *Scope, input tf.Output, optional ...SqueezeAttr) (output tf.Output) {
47053	if scope.Err() != nil {
47054		return
47055	}
47056	attrs := map[string]interface{}{}
47057	for _, a := range optional {
47058		a(attrs)
47059	}
47060	opspec := tf.OpSpec{
47061		Type: "Squeeze",
47062		Input: []tf.Input{
47063			input,
47064		},
47065		Attrs: attrs,
47066	}
47067	op := scope.AddOperation(opspec)
47068	return op.Output(0)
47069}
47070
47071// Delete the stack from its resource container.
47072//
47073// Arguments:
47074//
47075//	handle: The handle to a stack.
47076//
47077// Returns the created operation.
47078func StackCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
47079	if scope.Err() != nil {
47080		return
47081	}
47082	opspec := tf.OpSpec{
47083		Type: "StackCloseV2",
47084		Input: []tf.Input{
47085			handle,
47086		},
47087	}
47088	return scope.AddOperation(opspec)
47089}
47090
47091// Pop the element at the top of the stack.
47092//
47093// Arguments:
47094//
47095//	handle: The handle to a stack.
47096//	elem_type: The type of the elem that is popped.
47097//
47098// Returns The tensor that is popped from the top of the stack.
47099func StackPopV2(scope *Scope, handle tf.Output, elem_type tf.DataType) (elem tf.Output) {
47100	if scope.Err() != nil {
47101		return
47102	}
47103	attrs := map[string]interface{}{"elem_type": elem_type}
47104	opspec := tf.OpSpec{
47105		Type: "StackPopV2",
47106		Input: []tf.Input{
47107			handle,
47108		},
47109		Attrs: attrs,
47110	}
47111	op := scope.AddOperation(opspec)
47112	return op.Output(0)
47113}
47114
47115// StackPushV2Attr is an optional argument to StackPushV2.
47116type StackPushV2Attr func(optionalAttr)
47117
47118// StackPushV2SwapMemory sets the optional swap_memory attribute to value.
47119//
47120// value: Swap `elem` to CPU. Default to false.
47121// If not specified, defaults to false
47122func StackPushV2SwapMemory(value bool) StackPushV2Attr {
47123	return func(m optionalAttr) {
47124		m["swap_memory"] = value
47125	}
47126}
47127
47128// Push an element onto the stack.
47129//
47130// Arguments:
47131//
47132//	handle: The handle to a stack.
47133//	elem: The tensor to be pushed onto the stack.
47134//
47135// Returns The same tensor as the input 'elem'.
47136func StackPushV2(scope *Scope, handle tf.Output, elem tf.Output, optional ...StackPushV2Attr) (output tf.Output) {
47137	if scope.Err() != nil {
47138		return
47139	}
47140	attrs := map[string]interface{}{}
47141	for _, a := range optional {
47142		a(attrs)
47143	}
47144	opspec := tf.OpSpec{
47145		Type: "StackPushV2",
47146		Input: []tf.Input{
47147			handle, elem,
47148		},
47149		Attrs: attrs,
47150	}
47151	op := scope.AddOperation(opspec)
47152	return op.Output(0)
47153}
47154
47155// StackV2Attr is an optional argument to StackV2.
47156type StackV2Attr func(optionalAttr)
47157
47158// StackV2StackName sets the optional stack_name attribute to value.
47159//
47160// value: Overrides the name used for the temporary stack resource. Default
47161// value is the name of the 'Stack' op (which is guaranteed unique).
47162// If not specified, defaults to ""
47163func StackV2StackName(value string) StackV2Attr {
47164	return func(m optionalAttr) {
47165		m["stack_name"] = value
47166	}
47167}
47168
47169// A stack that produces elements in first-in last-out order.
47170//
47171// Arguments:
47172//
47173//	max_size: The maximum size of the stack if non-negative. If negative, the stack
47174//
47175// size is unlimited.
47176//
47177//	elem_type: The type of the elements on the stack.
47178//
47179// Returns The handle to the stack.
47180func StackV2(scope *Scope, max_size tf.Output, elem_type tf.DataType, optional ...StackV2Attr) (handle tf.Output) {
47181	if scope.Err() != nil {
47182		return
47183	}
47184	attrs := map[string]interface{}{"elem_type": elem_type}
47185	for _, a := range optional {
47186		a(attrs)
47187	}
47188	opspec := tf.OpSpec{
47189		Type: "StackV2",
47190		Input: []tf.Input{
47191			max_size,
47192		},
47193		Attrs: attrs,
47194	}
47195	op := scope.AddOperation(opspec)
47196	return op.Output(0)
47197}
47198
47199// StageAttr is an optional argument to Stage.
47200type StageAttr func(optionalAttr)
47201
47202// StageCapacity sets the optional capacity attribute to value.
47203//
47204// value: Maximum number of elements in the Staging Area. If > 0, inserts
47205// on the container will block when the capacity is reached.
47206// If not specified, defaults to 0
47207//
47208// REQUIRES: value >= 0
47209func StageCapacity(value int64) StageAttr {
47210	return func(m optionalAttr) {
47211		m["capacity"] = value
47212	}
47213}
47214
47215// StageMemoryLimit sets the optional memory_limit attribute to value.
47216//
47217// value: The maximum number of bytes allowed for Tensors in the Staging Area.
47218// If > 0, inserts will block until sufficient space is available.
47219// If not specified, defaults to 0
47220//
47221// REQUIRES: value >= 0
47222func StageMemoryLimit(value int64) StageAttr {
47223	return func(m optionalAttr) {
47224		m["memory_limit"] = value
47225	}
47226}
47227
47228// StageContainer sets the optional container attribute to value.
47229//
47230// value: If non-empty, this queue is placed in the given container. Otherwise,
47231// a default container is used.
47232// If not specified, defaults to ""
47233func StageContainer(value string) StageAttr {
47234	return func(m optionalAttr) {
47235		m["container"] = value
47236	}
47237}
47238
47239// StageSharedName sets the optional shared_name attribute to value.
47240//
47241// value: It is necessary to match this name to the matching Unstage Op.
47242// If not specified, defaults to ""
47243func StageSharedName(value string) StageAttr {
47244	return func(m optionalAttr) {
47245		m["shared_name"] = value
47246	}
47247}
47248
47249// Stage values similar to a lightweight Enqueue.
47250//
47251// The basic functionality of this Op is similar to a queue with many
47252// fewer capabilities and options.  This Op is optimized for performance.
47253//
47254// Arguments:
47255//
47256//	values: a list of tensors
47257//
47258// dtypes A list of data types that inserted values should adhere to.
47259//
47260// Returns the created operation.
47261func Stage(scope *Scope, values []tf.Output, optional ...StageAttr) (o *tf.Operation) {
47262	if scope.Err() != nil {
47263		return
47264	}
47265	attrs := map[string]interface{}{}
47266	for _, a := range optional {
47267		a(attrs)
47268	}
47269	opspec := tf.OpSpec{
47270		Type: "Stage",
47271		Input: []tf.Input{
47272			tf.OutputList(values),
47273		},
47274		Attrs: attrs,
47275	}
47276	return scope.AddOperation(opspec)
47277}
47278
47279// StageClearAttr is an optional argument to StageClear.
47280type StageClearAttr func(optionalAttr)
47281
47282// StageClearCapacity sets the optional capacity attribute to value.
47283// If not specified, defaults to 0
47284//
47285// REQUIRES: value >= 0
47286func StageClearCapacity(value int64) StageClearAttr {
47287	return func(m optionalAttr) {
47288		m["capacity"] = value
47289	}
47290}
47291
47292// StageClearMemoryLimit sets the optional memory_limit attribute to value.
47293// If not specified, defaults to 0
47294//
47295// REQUIRES: value >= 0
47296func StageClearMemoryLimit(value int64) StageClearAttr {
47297	return func(m optionalAttr) {
47298		m["memory_limit"] = value
47299	}
47300}
47301
47302// StageClearContainer sets the optional container attribute to value.
47303// If not specified, defaults to ""
47304func StageClearContainer(value string) StageClearAttr {
47305	return func(m optionalAttr) {
47306		m["container"] = value
47307	}
47308}
47309
47310// StageClearSharedName sets the optional shared_name attribute to value.
47311// If not specified, defaults to ""
47312func StageClearSharedName(value string) StageClearAttr {
47313	return func(m optionalAttr) {
47314		m["shared_name"] = value
47315	}
47316}
47317
47318// Op removes all elements in the underlying container.
47319//
47320// Returns the created operation.
47321func StageClear(scope *Scope, dtypes []tf.DataType, optional ...StageClearAttr) (o *tf.Operation) {
47322	if scope.Err() != nil {
47323		return
47324	}
47325	attrs := map[string]interface{}{"dtypes": dtypes}
47326	for _, a := range optional {
47327		a(attrs)
47328	}
47329	opspec := tf.OpSpec{
47330		Type: "StageClear",
47331
47332		Attrs: attrs,
47333	}
47334	return scope.AddOperation(opspec)
47335}
47336
47337// StagePeekAttr is an optional argument to StagePeek.
47338type StagePeekAttr func(optionalAttr)
47339
47340// StagePeekCapacity sets the optional capacity attribute to value.
47341// If not specified, defaults to 0
47342//
47343// REQUIRES: value >= 0
47344func StagePeekCapacity(value int64) StagePeekAttr {
47345	return func(m optionalAttr) {
47346		m["capacity"] = value
47347	}
47348}
47349
47350// StagePeekMemoryLimit sets the optional memory_limit attribute to value.
47351// If not specified, defaults to 0
47352//
47353// REQUIRES: value >= 0
47354func StagePeekMemoryLimit(value int64) StagePeekAttr {
47355	return func(m optionalAttr) {
47356		m["memory_limit"] = value
47357	}
47358}
47359
47360// StagePeekContainer sets the optional container attribute to value.
47361// If not specified, defaults to ""
47362func StagePeekContainer(value string) StagePeekAttr {
47363	return func(m optionalAttr) {
47364		m["container"] = value
47365	}
47366}
47367
47368// StagePeekSharedName sets the optional shared_name attribute to value.
47369// If not specified, defaults to ""
47370func StagePeekSharedName(value string) StagePeekAttr {
47371	return func(m optionalAttr) {
47372		m["shared_name"] = value
47373	}
47374}
47375
47376// Op peeks at the values at the specified index.  If the
47377//
47378// underlying container does not contain sufficient elements
47379// this op will block until it does.   This Op is optimized for
47380// performance.
47381func StagePeek(scope *Scope, index tf.Output, dtypes []tf.DataType, optional ...StagePeekAttr) (values []tf.Output) {
47382	if scope.Err() != nil {
47383		return
47384	}
47385	attrs := map[string]interface{}{"dtypes": dtypes}
47386	for _, a := range optional {
47387		a(attrs)
47388	}
47389	opspec := tf.OpSpec{
47390		Type: "StagePeek",
47391		Input: []tf.Input{
47392			index,
47393		},
47394		Attrs: attrs,
47395	}
47396	op := scope.AddOperation(opspec)
47397	if scope.Err() != nil {
47398		return
47399	}
47400	var idx int
47401	var err error
47402	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
47403		scope.UpdateErr("StagePeek", err)
47404		return
47405	}
47406	return values
47407}
47408
47409// StageSizeAttr is an optional argument to StageSize.
47410type StageSizeAttr func(optionalAttr)
47411
47412// StageSizeCapacity sets the optional capacity attribute to value.
47413// If not specified, defaults to 0
47414//
47415// REQUIRES: value >= 0
47416func StageSizeCapacity(value int64) StageSizeAttr {
47417	return func(m optionalAttr) {
47418		m["capacity"] = value
47419	}
47420}
47421
47422// StageSizeMemoryLimit sets the optional memory_limit attribute to value.
47423// If not specified, defaults to 0
47424//
47425// REQUIRES: value >= 0
47426func StageSizeMemoryLimit(value int64) StageSizeAttr {
47427	return func(m optionalAttr) {
47428		m["memory_limit"] = value
47429	}
47430}
47431
47432// StageSizeContainer sets the optional container attribute to value.
47433// If not specified, defaults to ""
47434func StageSizeContainer(value string) StageSizeAttr {
47435	return func(m optionalAttr) {
47436		m["container"] = value
47437	}
47438}
47439
47440// StageSizeSharedName sets the optional shared_name attribute to value.
47441// If not specified, defaults to ""
47442func StageSizeSharedName(value string) StageSizeAttr {
47443	return func(m optionalAttr) {
47444		m["shared_name"] = value
47445	}
47446}
47447
47448// Op returns the number of elements in the underlying container.
47449func StageSize(scope *Scope, dtypes []tf.DataType, optional ...StageSizeAttr) (size tf.Output) {
47450	if scope.Err() != nil {
47451		return
47452	}
47453	attrs := map[string]interface{}{"dtypes": dtypes}
47454	for _, a := range optional {
47455		a(attrs)
47456	}
47457	opspec := tf.OpSpec{
47458		Type: "StageSize",
47459
47460		Attrs: attrs,
47461	}
47462	op := scope.AddOperation(opspec)
47463	return op.Output(0)
47464}
47465
47466// StatefulStandardNormalAttr is an optional argument to StatefulStandardNormal.
47467type StatefulStandardNormalAttr func(optionalAttr)
47468
47469// StatefulStandardNormalDtype sets the optional dtype attribute to value.
47470//
47471// value: The type of the output.
47472// If not specified, defaults to DT_FLOAT
47473func StatefulStandardNormalDtype(value tf.DataType) StatefulStandardNormalAttr {
47474	return func(m optionalAttr) {
47475		m["dtype"] = value
47476	}
47477}
47478
47479// Outputs random values from a normal distribution. This op is deprecated in favor of op 'StatefulStandardNormalV2'
47480//
47481// DEPRECATED at GraphDef version 29: Use StatefulStandardNormalV2 instead
47482//
47483// The generated values will have mean 0 and standard deviation 1.
47484//
47485// Arguments:
47486//
47487//	resource: The handle of the resource variable that stores the state of the RNG.
47488//	shape: The shape of the output tensor.
47489//
47490// Returns A tensor of the specified shape filled with random normal values.
47491func StatefulStandardNormal(scope *Scope, resource tf.Output, shape tf.Output, optional ...StatefulStandardNormalAttr) (output tf.Output) {
47492	if scope.Err() != nil {
47493		return
47494	}
47495	attrs := map[string]interface{}{}
47496	for _, a := range optional {
47497		a(attrs)
47498	}
47499	opspec := tf.OpSpec{
47500		Type: "StatefulStandardNormal",
47501		Input: []tf.Input{
47502			resource, shape,
47503		},
47504		Attrs: attrs,
47505	}
47506	op := scope.AddOperation(opspec)
47507	return op.Output(0)
47508}
47509
47510// StatefulStandardNormalV2Attr is an optional argument to StatefulStandardNormalV2.
47511type StatefulStandardNormalV2Attr func(optionalAttr)
47512
47513// StatefulStandardNormalV2Dtype sets the optional dtype attribute to value.
47514//
47515// value: The type of the output.
47516// If not specified, defaults to DT_FLOAT
47517func StatefulStandardNormalV2Dtype(value tf.DataType) StatefulStandardNormalV2Attr {
47518	return func(m optionalAttr) {
47519		m["dtype"] = value
47520	}
47521}
47522
47523// Outputs random values from a normal distribution.
47524//
47525// The generated values will have mean 0 and standard deviation 1.
47526//
47527// Arguments:
47528//
47529//	resource: The handle of the resource variable that stores the state of the RNG.
47530//	algorithm: The RNG algorithm.
47531//	shape: The shape of the output tensor.
47532//
47533// Returns A tensor of the specified shape filled with random normal values.
47534func StatefulStandardNormalV2(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulStandardNormalV2Attr) (output tf.Output) {
47535	if scope.Err() != nil {
47536		return
47537	}
47538	attrs := map[string]interface{}{}
47539	for _, a := range optional {
47540		a(attrs)
47541	}
47542	opspec := tf.OpSpec{
47543		Type: "StatefulStandardNormalV2",
47544		Input: []tf.Input{
47545			resource, algorithm, shape,
47546		},
47547		Attrs: attrs,
47548	}
47549	op := scope.AddOperation(opspec)
47550	return op.Output(0)
47551}
47552
47553// StatefulTruncatedNormalAttr is an optional argument to StatefulTruncatedNormal.
47554type StatefulTruncatedNormalAttr func(optionalAttr)
47555
47556// StatefulTruncatedNormalDtype sets the optional dtype attribute to value.
47557//
47558// value: The type of the output.
47559// If not specified, defaults to DT_FLOAT
47560func StatefulTruncatedNormalDtype(value tf.DataType) StatefulTruncatedNormalAttr {
47561	return func(m optionalAttr) {
47562		m["dtype"] = value
47563	}
47564}
47565
47566// Outputs random values from a truncated normal distribution.
47567//
47568// The generated values follow a normal distribution with mean 0 and standard
47569// deviation 1, except that values whose magnitude is more than 2 standard
47570// deviations from the mean are dropped and re-picked.
47571//
47572// Arguments:
47573//
47574//	resource: The handle of the resource variable that stores the state of the RNG.
47575//	algorithm: The RNG algorithm.
47576//	shape: The shape of the output tensor.
47577//
47578// Returns Random values with specified shape.
47579func StatefulTruncatedNormal(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulTruncatedNormalAttr) (output tf.Output) {
47580	if scope.Err() != nil {
47581		return
47582	}
47583	attrs := map[string]interface{}{}
47584	for _, a := range optional {
47585		a(attrs)
47586	}
47587	opspec := tf.OpSpec{
47588		Type: "StatefulTruncatedNormal",
47589		Input: []tf.Input{
47590			resource, algorithm, shape,
47591		},
47592		Attrs: attrs,
47593	}
47594	op := scope.AddOperation(opspec)
47595	return op.Output(0)
47596}
47597
47598// StatefulUniformAttr is an optional argument to StatefulUniform.
47599type StatefulUniformAttr func(optionalAttr)
47600
47601// StatefulUniformDtype sets the optional dtype attribute to value.
47602//
47603// value: The type of the output.
47604// If not specified, defaults to DT_FLOAT
47605func StatefulUniformDtype(value tf.DataType) StatefulUniformAttr {
47606	return func(m optionalAttr) {
47607		m["dtype"] = value
47608	}
47609}
47610
47611// Outputs random values from a uniform distribution.
47612//
47613// The generated values follow a uniform distribution in the range `[0, 1)`. The
47614// lower bound 0 is included in the range, while the upper bound 1 is excluded.
47615//
47616// Arguments:
47617//
47618//	resource: The handle of the resource variable that stores the state of the RNG.
47619//	algorithm: The RNG algorithm.
47620//	shape: The shape of the output tensor.
47621//
47622// Returns Random values with specified shape.
47623func StatefulUniform(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulUniformAttr) (output tf.Output) {
47624	if scope.Err() != nil {
47625		return
47626	}
47627	attrs := map[string]interface{}{}
47628	for _, a := range optional {
47629		a(attrs)
47630	}
47631	opspec := tf.OpSpec{
47632		Type: "StatefulUniform",
47633		Input: []tf.Input{
47634			resource, algorithm, shape,
47635		},
47636		Attrs: attrs,
47637	}
47638	op := scope.AddOperation(opspec)
47639	return op.Output(0)
47640}
47641
47642// StatefulUniformFullIntAttr is an optional argument to StatefulUniformFullInt.
47643type StatefulUniformFullIntAttr func(optionalAttr)
47644
47645// StatefulUniformFullIntDtype sets the optional dtype attribute to value.
47646//
47647// value: The type of the output.
47648// If not specified, defaults to DT_UINT64
47649func StatefulUniformFullIntDtype(value tf.DataType) StatefulUniformFullIntAttr {
47650	return func(m optionalAttr) {
47651		m["dtype"] = value
47652	}
47653}
47654
47655// Outputs random integers from a uniform distribution.
47656//
47657// The generated values are uniform integers covering the whole range of `dtype`.
47658//
47659// Arguments:
47660//
47661//	resource: The handle of the resource variable that stores the state of the RNG.
47662//	algorithm: The RNG algorithm.
47663//	shape: The shape of the output tensor.
47664//
47665// Returns Random values with specified shape.
47666func StatefulUniformFullInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, optional ...StatefulUniformFullIntAttr) (output tf.Output) {
47667	if scope.Err() != nil {
47668		return
47669	}
47670	attrs := map[string]interface{}{}
47671	for _, a := range optional {
47672		a(attrs)
47673	}
47674	opspec := tf.OpSpec{
47675		Type: "StatefulUniformFullInt",
47676		Input: []tf.Input{
47677			resource, algorithm, shape,
47678		},
47679		Attrs: attrs,
47680	}
47681	op := scope.AddOperation(opspec)
47682	return op.Output(0)
47683}
47684
47685// Outputs random integers from a uniform distribution.
47686//
47687// The generated values are uniform integers in the range `[minval, maxval)`.
47688// The lower bound `minval` is included in the range, while the upper bound
47689// `maxval` is excluded.
47690//
47691// The random integers are slightly biased unless `maxval - minval` is an exact
47692// power of two.  The bias is small for values of `maxval - minval` significantly
47693// smaller than the range of the output (either `2^32` or `2^64`).
47694//
47695// Arguments:
47696//
47697//	resource: The handle of the resource variable that stores the state of the RNG.
47698//	algorithm: The RNG algorithm.
47699//	shape: The shape of the output tensor.
47700//	minval: Minimum value (inclusive, scalar).
47701//	maxval: Maximum value (exclusive, scalar).
47702//
47703// Returns Random values with specified shape.
47704func StatefulUniformInt(scope *Scope, resource tf.Output, algorithm tf.Output, shape tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
47705	if scope.Err() != nil {
47706		return
47707	}
47708	opspec := tf.OpSpec{
47709		Type: "StatefulUniformInt",
47710		Input: []tf.Input{
47711			resource, algorithm, shape, minval, maxval,
47712		},
47713	}
47714	op := scope.AddOperation(opspec)
47715	return op.Output(0)
47716}
47717
47718// StatelessMultinomialAttr is an optional argument to StatelessMultinomial.
47719type StatelessMultinomialAttr func(optionalAttr)
47720
47721// StatelessMultinomialOutputDtype sets the optional output_dtype attribute to value.
47722// If not specified, defaults to DT_INT64
47723func StatelessMultinomialOutputDtype(value tf.DataType) StatelessMultinomialAttr {
47724	return func(m optionalAttr) {
47725		m["output_dtype"] = value
47726	}
47727}
47728
47729// Draws samples from a multinomial distribution.
47730//
47731// Arguments:
47732//
47733//	logits: 2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
47734//
47735// represents the unnormalized log probabilities for all classes.
47736//
47737//	num_samples: 0-D.  Number of independent samples to draw for each row slice.
47738//	seed: 2 seeds (shape [2]).
47739//
47740// Returns 2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
47741// contains the drawn class labels with range `[0, num_classes)`.
47742func StatelessMultinomial(scope *Scope, logits tf.Output, num_samples tf.Output, seed tf.Output, optional ...StatelessMultinomialAttr) (output tf.Output) {
47743	if scope.Err() != nil {
47744		return
47745	}
47746	attrs := map[string]interface{}{}
47747	for _, a := range optional {
47748		a(attrs)
47749	}
47750	opspec := tf.OpSpec{
47751		Type: "StatelessMultinomial",
47752		Input: []tf.Input{
47753			logits, num_samples, seed,
47754		},
47755		Attrs: attrs,
47756	}
47757	op := scope.AddOperation(opspec)
47758	return op.Output(0)
47759}
47760
47761// StatelessRandomBinomialAttr is an optional argument to StatelessRandomBinomial.
47762type StatelessRandomBinomialAttr func(optionalAttr)
47763
47764// StatelessRandomBinomialDtype sets the optional dtype attribute to value.
47765//
47766// value: The type of the output.
47767// If not specified, defaults to DT_INT64
47768func StatelessRandomBinomialDtype(value tf.DataType) StatelessRandomBinomialAttr {
47769	return func(m optionalAttr) {
47770		m["dtype"] = value
47771	}
47772}
47773
47774// Outputs deterministic pseudorandom random numbers from a binomial distribution.
47775//
47776// Outputs random values from a binomial distribution.
47777//
47778// The outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`.
47779//
47780// Arguments:
47781//
47782//	shape: The shape of the output tensor.
47783//	seed: 2 seeds (shape [2]).
47784//	counts: The counts of the binomial distribution. Must be broadcastable with `probs`,
47785//
47786// and broadcastable with the rightmost dimensions of `shape`.
47787//
47788//	probs: The probability of success for the binomial distribution. Must be broadcastable
47789//
47790// with `counts` and broadcastable with the rightmost dimensions of `shape`.
47791//
47792// Returns Random values with specified shape.
47793func StatelessRandomBinomial(scope *Scope, shape tf.Output, seed tf.Output, counts tf.Output, probs tf.Output, optional ...StatelessRandomBinomialAttr) (output tf.Output) {
47794	if scope.Err() != nil {
47795		return
47796	}
47797	attrs := map[string]interface{}{}
47798	for _, a := range optional {
47799		a(attrs)
47800	}
47801	opspec := tf.OpSpec{
47802		Type: "StatelessRandomBinomial",
47803		Input: []tf.Input{
47804			shape, seed, counts, probs,
47805		},
47806		Attrs: attrs,
47807	}
47808	op := scope.AddOperation(opspec)
47809	return op.Output(0)
47810}
47811
47812// Outputs deterministic pseudorandom random numbers from a gamma distribution.
47813//
47814// Outputs random values from a gamma distribution.
47815//
47816// The outputs are a deterministic function of `shape`, `seed`, and `alpha`.
47817//
47818// Arguments:
47819//
47820//	shape: The shape of the output tensor.
47821//	seed: 2 seeds (shape [2]).
47822//	alpha: The concentration of the gamma distribution. Shape must match the rightmost
47823//
47824// dimensions of `shape`.
47825//
47826// Returns Random values with specified shape.
47827func StatelessRandomGammaV2(scope *Scope, shape tf.Output, seed tf.Output, alpha tf.Output) (output tf.Output) {
47828	if scope.Err() != nil {
47829		return
47830	}
47831	opspec := tf.OpSpec{
47832		Type: "StatelessRandomGammaV2",
47833		Input: []tf.Input{
47834			shape, seed, alpha,
47835		},
47836	}
47837	op := scope.AddOperation(opspec)
47838	return op.Output(0)
47839}
47840
47841// Picks the best counter-based RNG algorithm based on device.
47842//
47843// This op picks the best counter-based RNG algorithm based on device.
47844//
47845// Returns The RNG algorithm (shape int32[]).
47846func StatelessRandomGetAlg(scope *Scope) (alg tf.Output) {
47847	if scope.Err() != nil {
47848		return
47849	}
47850	opspec := tf.OpSpec{
47851		Type: "StatelessRandomGetAlg",
47852	}
47853	op := scope.AddOperation(opspec)
47854	return op.Output(0)
47855}
47856
47857// Scrambles seed into key and counter, using the best algorithm based on device.
47858//
47859// This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
47860//
47861// Arguments:
47862//
47863//	seed: 2 seeds (shape [2]).
47864//
47865// Returns:
47866//
47867//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
47868//	counter: Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).
47869func StatelessRandomGetKeyCounter(scope *Scope, seed tf.Output) (key tf.Output, counter tf.Output) {
47870	if scope.Err() != nil {
47871		return
47872	}
47873	opspec := tf.OpSpec{
47874		Type: "StatelessRandomGetKeyCounter",
47875		Input: []tf.Input{
47876			seed,
47877		},
47878	}
47879	op := scope.AddOperation(opspec)
47880	return op.Output(0), op.Output(1)
47881}
47882
47883// Picks the best algorithm based on device, and scrambles seed into key and counter.
47884//
47885// This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
47886//
47887// Arguments:
47888//
47889//	seed: 2 seeds (shape [2]).
47890//
47891// Returns:
47892//
47893//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
47894//	counter: Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).
47895//	alg: The RNG algorithm (shape int32[]).
47896func StatelessRandomGetKeyCounterAlg(scope *Scope, seed tf.Output) (key tf.Output, counter tf.Output, alg tf.Output) {
47897	if scope.Err() != nil {
47898		return
47899	}
47900	opspec := tf.OpSpec{
47901		Type: "StatelessRandomGetKeyCounterAlg",
47902		Input: []tf.Input{
47903			seed,
47904		},
47905	}
47906	op := scope.AddOperation(opspec)
47907	return op.Output(0), op.Output(1), op.Output(2)
47908}
47909
47910// StatelessRandomNormalAttr is an optional argument to StatelessRandomNormal.
47911type StatelessRandomNormalAttr func(optionalAttr)
47912
47913// StatelessRandomNormalDtype sets the optional dtype attribute to value.
47914//
47915// value: The type of the output.
47916// If not specified, defaults to DT_FLOAT
47917func StatelessRandomNormalDtype(value tf.DataType) StatelessRandomNormalAttr {
47918	return func(m optionalAttr) {
47919		m["dtype"] = value
47920	}
47921}
47922
47923// Outputs deterministic pseudorandom values from a normal distribution.
47924//
47925// The generated values will have mean 0 and standard deviation 1.
47926//
47927// The outputs are a deterministic function of `shape` and `seed`.
47928//
47929// Arguments:
47930//
47931//	shape: The shape of the output tensor.
47932//	seed: 2 seeds (shape [2]).
47933//
47934// Returns Random values with specified shape.
47935func StatelessRandomNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomNormalAttr) (output tf.Output) {
47936	if scope.Err() != nil {
47937		return
47938	}
47939	attrs := map[string]interface{}{}
47940	for _, a := range optional {
47941		a(attrs)
47942	}
47943	opspec := tf.OpSpec{
47944		Type: "StatelessRandomNormal",
47945		Input: []tf.Input{
47946			shape, seed,
47947		},
47948		Attrs: attrs,
47949	}
47950	op := scope.AddOperation(opspec)
47951	return op.Output(0)
47952}
47953
47954// StatelessRandomNormalV2Attr is an optional argument to StatelessRandomNormalV2.
47955type StatelessRandomNormalV2Attr func(optionalAttr)
47956
47957// StatelessRandomNormalV2Dtype sets the optional dtype attribute to value.
47958//
47959// value: The type of the output.
47960// If not specified, defaults to DT_FLOAT
47961func StatelessRandomNormalV2Dtype(value tf.DataType) StatelessRandomNormalV2Attr {
47962	return func(m optionalAttr) {
47963		m["dtype"] = value
47964	}
47965}
47966
47967// Outputs deterministic pseudorandom values from a normal distribution.
47968//
47969// The generated values will have mean 0 and standard deviation 1.
47970//
47971// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
47972//
47973// Arguments:
47974//
47975//	shape: The shape of the output tensor.
47976//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
47977//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
47978//	alg: The RNG algorithm (shape int32[]).
47979//
47980// Returns Random values with specified shape.
47981func StatelessRandomNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomNormalV2Attr) (output tf.Output) {
47982	if scope.Err() != nil {
47983		return
47984	}
47985	attrs := map[string]interface{}{}
47986	for _, a := range optional {
47987		a(attrs)
47988	}
47989	opspec := tf.OpSpec{
47990		Type: "StatelessRandomNormalV2",
47991		Input: []tf.Input{
47992			shape, key, counter, alg,
47993		},
47994		Attrs: attrs,
47995	}
47996	op := scope.AddOperation(opspec)
47997	return op.Output(0)
47998}
47999
48000// Outputs deterministic pseudorandom random numbers from a Poisson distribution.
48001//
48002// Outputs random values from a Poisson distribution.
48003//
48004// The outputs are a deterministic function of `shape`, `seed`, and `lam`.
48005//
48006// Arguments:
48007//
48008//	shape: The shape of the output tensor.
48009//	seed: 2 seeds (shape [2]).
48010//	lam: The rate of the Poisson distribution. Shape must match the rightmost dimensions
48011//
48012// of `shape`.
48013//
48014//	dtype: The type of the output.
48015//
48016// Returns Random values with specified shape.
48017func StatelessRandomPoisson(scope *Scope, shape tf.Output, seed tf.Output, lam tf.Output, dtype tf.DataType) (output tf.Output) {
48018	if scope.Err() != nil {
48019		return
48020	}
48021	attrs := map[string]interface{}{"dtype": dtype}
48022	opspec := tf.OpSpec{
48023		Type: "StatelessRandomPoisson",
48024		Input: []tf.Input{
48025			shape, seed, lam,
48026		},
48027		Attrs: attrs,
48028	}
48029	op := scope.AddOperation(opspec)
48030	return op.Output(0)
48031}
48032
48033// StatelessRandomUniformAttr is an optional argument to StatelessRandomUniform.
48034type StatelessRandomUniformAttr func(optionalAttr)
48035
48036// StatelessRandomUniformDtype sets the optional dtype attribute to value.
48037//
48038// value: The type of the output.
48039// If not specified, defaults to DT_FLOAT
48040func StatelessRandomUniformDtype(value tf.DataType) StatelessRandomUniformAttr {
48041	return func(m optionalAttr) {
48042		m["dtype"] = value
48043	}
48044}
48045
48046// Outputs deterministic pseudorandom random values from a uniform distribution.
48047//
48048// The generated values follow a uniform distribution in the range `[0, 1)`. The
48049// lower bound 0 is included in the range, while the upper bound 1 is excluded.
48050//
48051// The outputs are a deterministic function of `shape` and `seed`.
48052//
48053// Arguments:
48054//
48055//	shape: The shape of the output tensor.
48056//	seed: 2 seeds (shape [2]).
48057//
48058// Returns Random values with specified shape.
48059func StatelessRandomUniform(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformAttr) (output tf.Output) {
48060	if scope.Err() != nil {
48061		return
48062	}
48063	attrs := map[string]interface{}{}
48064	for _, a := range optional {
48065		a(attrs)
48066	}
48067	opspec := tf.OpSpec{
48068		Type: "StatelessRandomUniform",
48069		Input: []tf.Input{
48070			shape, seed,
48071		},
48072		Attrs: attrs,
48073	}
48074	op := scope.AddOperation(opspec)
48075	return op.Output(0)
48076}
48077
48078// StatelessRandomUniformFullIntAttr is an optional argument to StatelessRandomUniformFullInt.
48079type StatelessRandomUniformFullIntAttr func(optionalAttr)
48080
48081// StatelessRandomUniformFullIntDtype sets the optional dtype attribute to value.
48082//
48083// value: The type of the output.
48084// If not specified, defaults to DT_UINT64
48085func StatelessRandomUniformFullIntDtype(value tf.DataType) StatelessRandomUniformFullIntAttr {
48086	return func(m optionalAttr) {
48087		m["dtype"] = value
48088	}
48089}
48090
48091// Outputs deterministic pseudorandom random integers from a uniform distribution.
48092//
48093// The generated values are uniform integers covering the whole range of `dtype`.
48094//
48095// The outputs are a deterministic function of `shape` and `seed`.
48096//
48097// Arguments:
48098//
48099//	shape: The shape of the output tensor.
48100//	seed: 2 seeds (shape [2]).
48101//
48102// Returns Random values with specified shape.
48103func StatelessRandomUniformFullInt(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessRandomUniformFullIntAttr) (output tf.Output) {
48104	if scope.Err() != nil {
48105		return
48106	}
48107	attrs := map[string]interface{}{}
48108	for _, a := range optional {
48109		a(attrs)
48110	}
48111	opspec := tf.OpSpec{
48112		Type: "StatelessRandomUniformFullInt",
48113		Input: []tf.Input{
48114			shape, seed,
48115		},
48116		Attrs: attrs,
48117	}
48118	op := scope.AddOperation(opspec)
48119	return op.Output(0)
48120}
48121
48122// StatelessRandomUniformFullIntV2Attr is an optional argument to StatelessRandomUniformFullIntV2.
48123type StatelessRandomUniformFullIntV2Attr func(optionalAttr)
48124
48125// StatelessRandomUniformFullIntV2Dtype sets the optional dtype attribute to value.
48126//
48127// value: The type of the output.
48128// If not specified, defaults to DT_UINT64
48129func StatelessRandomUniformFullIntV2Dtype(value tf.DataType) StatelessRandomUniformFullIntV2Attr {
48130	return func(m optionalAttr) {
48131		m["dtype"] = value
48132	}
48133}
48134
48135// Outputs deterministic pseudorandom random integers from a uniform distribution.
48136//
48137// The generated values are uniform integers covering the whole range of `dtype`.
48138//
48139// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
48140//
48141// Arguments:
48142//
48143//	shape: The shape of the output tensor.
48144//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
48145//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
48146//	alg: The RNG algorithm (shape int32[]).
48147//
48148// Returns Random values with specified shape.
48149func StatelessRandomUniformFullIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomUniformFullIntV2Attr) (output tf.Output) {
48150	if scope.Err() != nil {
48151		return
48152	}
48153	attrs := map[string]interface{}{}
48154	for _, a := range optional {
48155		a(attrs)
48156	}
48157	opspec := tf.OpSpec{
48158		Type: "StatelessRandomUniformFullIntV2",
48159		Input: []tf.Input{
48160			shape, key, counter, alg,
48161		},
48162		Attrs: attrs,
48163	}
48164	op := scope.AddOperation(opspec)
48165	return op.Output(0)
48166}
48167
48168// Outputs deterministic pseudorandom random integers from a uniform distribution.
48169//
48170// The generated values follow a uniform distribution in the range `[minval, maxval)`.
48171//
48172// The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.
48173//
48174// Arguments:
48175//
48176//	shape: The shape of the output tensor.
48177//	seed: 2 seeds (shape [2]).
48178//	minval: Minimum value (inclusive, scalar).
48179//	maxval: Maximum value (exclusive, scalar).
48180//
48181// Returns Random values with specified shape.
48182func StatelessRandomUniformInt(scope *Scope, shape tf.Output, seed tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
48183	if scope.Err() != nil {
48184		return
48185	}
48186	opspec := tf.OpSpec{
48187		Type: "StatelessRandomUniformInt",
48188		Input: []tf.Input{
48189			shape, seed, minval, maxval,
48190		},
48191	}
48192	op := scope.AddOperation(opspec)
48193	return op.Output(0)
48194}
48195
48196// Outputs deterministic pseudorandom random integers from a uniform distribution.
48197//
48198// The generated values follow a uniform distribution in the range `[minval, maxval)`.
48199//
48200// The outputs are a deterministic function of `shape`, `key`, `counter`, `alg`, `minval` and `maxval`.
48201//
48202// Arguments:
48203//
48204//	shape: The shape of the output tensor.
48205//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
48206//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
48207//	alg: The RNG algorithm (shape int32[]).
48208//	minval: Minimum value (inclusive, scalar).
48209//	maxval: Maximum value (exclusive, scalar).
48210//
48211// Returns Random values with specified shape.
48212func StatelessRandomUniformIntV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, minval tf.Output, maxval tf.Output) (output tf.Output) {
48213	if scope.Err() != nil {
48214		return
48215	}
48216	opspec := tf.OpSpec{
48217		Type: "StatelessRandomUniformIntV2",
48218		Input: []tf.Input{
48219			shape, key, counter, alg, minval, maxval,
48220		},
48221	}
48222	op := scope.AddOperation(opspec)
48223	return op.Output(0)
48224}
48225
48226// StatelessRandomUniformV2Attr is an optional argument to StatelessRandomUniformV2.
48227type StatelessRandomUniformV2Attr func(optionalAttr)
48228
48229// StatelessRandomUniformV2Dtype sets the optional dtype attribute to value.
48230//
48231// value: The type of the output.
48232// If not specified, defaults to DT_FLOAT
48233func StatelessRandomUniformV2Dtype(value tf.DataType) StatelessRandomUniformV2Attr {
48234	return func(m optionalAttr) {
48235		m["dtype"] = value
48236	}
48237}
48238
48239// Outputs deterministic pseudorandom random values from a uniform distribution.
48240//
48241// The generated values follow a uniform distribution in the range `[0, 1)`. The
48242// lower bound 0 is included in the range, while the upper bound 1 is excluded.
48243//
48244// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
48245//
48246// Arguments:
48247//
48248//	shape: The shape of the output tensor.
48249//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
48250//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
48251//	alg: The RNG algorithm (shape int32[]).
48252//
48253// Returns Random values with specified shape.
48254func StatelessRandomUniformV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessRandomUniformV2Attr) (output tf.Output) {
48255	if scope.Err() != nil {
48256		return
48257	}
48258	attrs := map[string]interface{}{}
48259	for _, a := range optional {
48260		a(attrs)
48261	}
48262	opspec := tf.OpSpec{
48263		Type: "StatelessRandomUniformV2",
48264		Input: []tf.Input{
48265			shape, key, counter, alg,
48266		},
48267		Attrs: attrs,
48268	}
48269	op := scope.AddOperation(opspec)
48270	return op.Output(0)
48271}
48272
48273// StatelessSampleDistortedBoundingBoxAttr is an optional argument to StatelessSampleDistortedBoundingBox.
48274type StatelessSampleDistortedBoundingBoxAttr func(optionalAttr)
48275
48276// StatelessSampleDistortedBoundingBoxAspectRatioRange sets the optional aspect_ratio_range attribute to value.
48277//
48278// value: The cropped area of the image must have an aspect ratio =
48279// width / height within this range.
48280// If not specified, defaults to {f:0.75 f:1.33}
48281func StatelessSampleDistortedBoundingBoxAspectRatioRange(value []float32) StatelessSampleDistortedBoundingBoxAttr {
48282	return func(m optionalAttr) {
48283		m["aspect_ratio_range"] = value
48284	}
48285}
48286
48287// StatelessSampleDistortedBoundingBoxAreaRange sets the optional area_range attribute to value.
48288//
48289// value: The cropped area of the image must contain a fraction of the
48290// supplied image within this range.
48291// If not specified, defaults to {f:0.05 f:1}
48292func StatelessSampleDistortedBoundingBoxAreaRange(value []float32) StatelessSampleDistortedBoundingBoxAttr {
48293	return func(m optionalAttr) {
48294		m["area_range"] = value
48295	}
48296}
48297
48298// StatelessSampleDistortedBoundingBoxMaxAttempts sets the optional max_attempts attribute to value.
48299//
48300// value: Number of attempts at generating a cropped region of the image
48301// of the specified constraints. After `max_attempts` failures, return the entire
48302// image.
48303// If not specified, defaults to 100
48304func StatelessSampleDistortedBoundingBoxMaxAttempts(value int64) StatelessSampleDistortedBoundingBoxAttr {
48305	return func(m optionalAttr) {
48306		m["max_attempts"] = value
48307	}
48308}
48309
48310// StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes sets the optional use_image_if_no_bounding_boxes attribute to value.
48311//
48312// value: Controls behavior if no bounding boxes supplied.
48313// If true, assume an implicit bounding box covering the whole input. If false,
48314// raise an error.
48315// If not specified, defaults to false
48316func StatelessSampleDistortedBoundingBoxUseImageIfNoBoundingBoxes(value bool) StatelessSampleDistortedBoundingBoxAttr {
48317	return func(m optionalAttr) {
48318		m["use_image_if_no_bounding_boxes"] = value
48319	}
48320}
48321
48322// Generate a randomly distorted bounding box for an image deterministically.
48323//
48324// Bounding box annotations are often supplied in addition to ground-truth labels
48325// in image recognition or object localization tasks. A common technique for
48326// training such a system is to randomly distort an image while preserving its
48327// content, i.e. *data augmentation*. This Op, given the same `seed`,
48328// deterministically outputs a randomly distorted localization of an object, i.e.
48329// bounding box, given an `image_size`, `bounding_boxes` and a series of
48330// constraints.
48331//
48332// The output of this Op is a single bounding box that may be used to crop the
48333// original image. The output is returned as 3 tensors: `begin`, `size` and
48334// `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the
48335// image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize
48336// what the bounding box looks like.
48337//
48338// Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The
48339// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
48340// the height of the underlying image.
48341//
48342// The output of this Op is guaranteed to be the same given the same `seed` and is
48343// independent of how many times the function is called, and independent of global
48344// seed settings (e.g. `tf.random.set_seed`).
48345//
48346// Example usage:
48347//
48348// >>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]])
48349// >>> bbox = tf.constant(
48350// ...   [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
48351// >>> seed = (1, 2)
48352// >>> # Generate a single distorted bounding box.
48353// >>> bbox_begin, bbox_size, bbox_draw = (
48354// ...   tf.image.stateless_sample_distorted_bounding_box(
48355// ...     tf.shape(image), bounding_boxes=bbox, seed=seed))
48356// >>> # Employ the bounding box to distort the image.
48357// >>> tf.slice(image, bbox_begin, bbox_size)
48358// <tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy=
48359// array([[[1],
48360//
48361//	 [2]],
48362//	[[4],
48363//	 [5]]])>
48364//
48365// >>> # Draw the bounding box in an image summary.
48366// >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])
48367// >>> tf.image.draw_bounding_boxes(
48368// ...   tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors)
48369// <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy=
48370// array([[[[1.],
48371//
48372//	 [1.],
48373//	 [3.]],
48374//	[[1.],
48375//	 [1.],
48376//	 [6.]],
48377//	[[7.],
48378//	 [8.],
48379//	 [9.]]]], dtype=float32)>
48380//
48381// Note that if no bounding box information is available, setting
48382// `use_image_if_no_bounding_boxes = true` will assume there is a single implicit
48383// bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is
48384// false and no bounding boxes are supplied, an error is raised.
48385//
48386// Arguments:
48387//
48388//	image_size: 1-D, containing `[height, width, channels]`.
48389//	bounding_boxes: 3-D with shape `[batch, N, 4]` describing the N bounding boxes
48390//
48391// associated with the image.
48392//
48393//	min_object_covered: The cropped area of the image must contain at least this
48394//
48395// fraction of any bounding box supplied. The value of this parameter should be
48396// non-negative. In the case of 0, the cropped area does not need to overlap
48397// any of the bounding boxes supplied.
48398//
48399//	seed: 1-D with shape `[2]`. The seed to the random number generator. Must have dtype
48400//
48401// `int32` or `int64`. (When using XLA, only `int32` is allowed.)
48402//
48403// Returns:
48404//
48405//	begin: 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to
48406//
48407// `tf.slice`.
48408//
48409//	size: 1-D, containing `[target_height, target_width, -1]`. Provide as input to
48410//
48411// `tf.slice`.
48412//
48413//	bboxes: 3-D with shape `[1, 1, 4]` containing the distorted bounding box.
48414//
48415// Provide as input to `tf.image.draw_bounding_boxes`.
48416func StatelessSampleDistortedBoundingBox(scope *Scope, image_size tf.Output, bounding_boxes tf.Output, min_object_covered tf.Output, seed tf.Output, optional ...StatelessSampleDistortedBoundingBoxAttr) (begin tf.Output, size tf.Output, bboxes tf.Output) {
48417	if scope.Err() != nil {
48418		return
48419	}
48420	attrs := map[string]interface{}{}
48421	for _, a := range optional {
48422		a(attrs)
48423	}
48424	opspec := tf.OpSpec{
48425		Type: "StatelessSampleDistortedBoundingBox",
48426		Input: []tf.Input{
48427			image_size, bounding_boxes, min_object_covered, seed,
48428		},
48429		Attrs: attrs,
48430	}
48431	op := scope.AddOperation(opspec)
48432	return op.Output(0), op.Output(1), op.Output(2)
48433}
48434
48435// Randomly and deterministically shuffles a tensor along its first dimension.
48436//
48437// The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
48438// to one and only one `output[i]`. For example, a mapping that might occur for a
48439// 3x2 tensor is:
48440//
48441// ```
48442// [[1, 2],       [[5, 6],
48443//
48444//	[3, 4],  ==>   [1, 2],
48445//	[5, 6]]        [3, 4]]
48446//
48447// ```
48448//
48449// The outputs are a deterministic function of `value`, `key`, `counter` and `alg`.
48450//
48451// Arguments:
48452//
48453//	value: The tensor to be shuffled.
48454//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
48455//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
48456//	alg: The RNG algorithm (shape int32[]).
48457//
48458// Returns A tensor of same shape and type as `value`, shuffled along its first
48459// dimension.
48460func StatelessShuffle(scope *Scope, value tf.Output, key tf.Output, counter tf.Output, alg tf.Output) (output tf.Output) {
48461	if scope.Err() != nil {
48462		return
48463	}
48464	opspec := tf.OpSpec{
48465		Type: "StatelessShuffle",
48466		Input: []tf.Input{
48467			value, key, counter, alg,
48468		},
48469	}
48470	op := scope.AddOperation(opspec)
48471	return op.Output(0)
48472}
48473
48474// StatelessTruncatedNormalAttr is an optional argument to StatelessTruncatedNormal.
48475type StatelessTruncatedNormalAttr func(optionalAttr)
48476
48477// StatelessTruncatedNormalDtype sets the optional dtype attribute to value.
48478//
48479// value: The type of the output.
48480// If not specified, defaults to DT_FLOAT
48481func StatelessTruncatedNormalDtype(value tf.DataType) StatelessTruncatedNormalAttr {
48482	return func(m optionalAttr) {
48483		m["dtype"] = value
48484	}
48485}
48486
48487// Outputs deterministic pseudorandom values from a truncated normal distribution.
48488//
48489// The generated values follow a normal distribution with mean 0 and standard
48490// deviation 1, except that values whose magnitude is more than 2 standard
48491// deviations from the mean are dropped and re-picked.
48492//
48493// The outputs are a deterministic function of `shape` and `seed`.
48494//
48495// Arguments:
48496//
48497//	shape: The shape of the output tensor.
48498//	seed: 2 seeds (shape [2]).
48499//
48500// Returns Random values with specified shape.
48501func StatelessTruncatedNormal(scope *Scope, shape tf.Output, seed tf.Output, optional ...StatelessTruncatedNormalAttr) (output tf.Output) {
48502	if scope.Err() != nil {
48503		return
48504	}
48505	attrs := map[string]interface{}{}
48506	for _, a := range optional {
48507		a(attrs)
48508	}
48509	opspec := tf.OpSpec{
48510		Type: "StatelessTruncatedNormal",
48511		Input: []tf.Input{
48512			shape, seed,
48513		},
48514		Attrs: attrs,
48515	}
48516	op := scope.AddOperation(opspec)
48517	return op.Output(0)
48518}
48519
48520// StatelessTruncatedNormalV2Attr is an optional argument to StatelessTruncatedNormalV2.
48521type StatelessTruncatedNormalV2Attr func(optionalAttr)
48522
48523// StatelessTruncatedNormalV2Dtype sets the optional dtype attribute to value.
48524//
48525// value: The type of the output.
48526// If not specified, defaults to DT_FLOAT
48527func StatelessTruncatedNormalV2Dtype(value tf.DataType) StatelessTruncatedNormalV2Attr {
48528	return func(m optionalAttr) {
48529		m["dtype"] = value
48530	}
48531}
48532
48533// Outputs deterministic pseudorandom values from a truncated normal distribution.
48534//
48535// The generated values follow a normal distribution with mean 0 and standard
48536// deviation 1, except that values whose magnitude is more than 2 standard
48537// deviations from the mean are dropped and re-picked.
48538//
48539// The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
48540//
48541// Arguments:
48542//
48543//	shape: The shape of the output tensor.
48544//	key: Key for the counter-based RNG algorithm (shape uint64[1]).
48545//	counter: Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.
48546//	alg: The RNG algorithm (shape int32[]).
48547//
48548// Returns Random values with specified shape.
48549func StatelessTruncatedNormalV2(scope *Scope, shape tf.Output, key tf.Output, counter tf.Output, alg tf.Output, optional ...StatelessTruncatedNormalV2Attr) (output tf.Output) {
48550	if scope.Err() != nil {
48551		return
48552	}
48553	attrs := map[string]interface{}{}
48554	for _, a := range optional {
48555		a(attrs)
48556	}
48557	opspec := tf.OpSpec{
48558		Type: "StatelessTruncatedNormalV2",
48559		Input: []tf.Input{
48560			shape, key, counter, alg,
48561		},
48562		Attrs: attrs,
48563	}
48564	op := scope.AddOperation(opspec)
48565	return op.Output(0)
48566}
48567
48568// Check if the input matches the regex pattern.
48569//
48570// The input is a string tensor of any shape. The pattern is the
48571// regular expression to be matched with every element of the input tensor.
48572// The boolean values (True or False) of the output tensor indicate
48573// if the input matches the regex pattern provided.
48574//
48575// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
48576//
48577// Arguments:
48578//
48579//	input: A string tensor of the text to be processed.
48580//	pattern: The regular expression to match the input.
48581//
48582// Returns A bool tensor with the same shape as `input`.
48583func StaticRegexFullMatch(scope *Scope, input tf.Output, pattern string) (output tf.Output) {
48584	if scope.Err() != nil {
48585		return
48586	}
48587	attrs := map[string]interface{}{"pattern": pattern}
48588	opspec := tf.OpSpec{
48589		Type: "StaticRegexFullMatch",
48590		Input: []tf.Input{
48591			input,
48592		},
48593		Attrs: attrs,
48594	}
48595	op := scope.AddOperation(opspec)
48596	return op.Output(0)
48597}
48598
48599// StaticRegexReplaceAttr is an optional argument to StaticRegexReplace.
48600type StaticRegexReplaceAttr func(optionalAttr)
48601
48602// StaticRegexReplaceReplaceGlobal sets the optional replace_global attribute to value.
48603//
48604// value: If True, the replacement is global, otherwise the replacement
48605// is done only on the first match.
48606// If not specified, defaults to true
48607func StaticRegexReplaceReplaceGlobal(value bool) StaticRegexReplaceAttr {
48608	return func(m optionalAttr) {
48609		m["replace_global"] = value
48610	}
48611}
48612
48613// Replaces the match of pattern in input with rewrite.
48614//
48615// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
48616//
48617// Arguments:
48618//
48619//	input: The text to be processed.
48620//	pattern: The regular expression to match the input.
48621//	rewrite: The rewrite to be applied to the matched expression.
48622//
48623// Returns The text after applying pattern and rewrite.
48624func StaticRegexReplace(scope *Scope, input tf.Output, pattern string, rewrite string, optional ...StaticRegexReplaceAttr) (output tf.Output) {
48625	if scope.Err() != nil {
48626		return
48627	}
48628	attrs := map[string]interface{}{"pattern": pattern, "rewrite": rewrite}
48629	for _, a := range optional {
48630		a(attrs)
48631	}
48632	opspec := tf.OpSpec{
48633		Type: "StaticRegexReplace",
48634		Input: []tf.Input{
48635			input,
48636		},
48637		Attrs: attrs,
48638	}
48639	op := scope.AddOperation(opspec)
48640	return op.Output(0)
48641}
48642
48643// StatsAggregatorHandleAttr is an optional argument to StatsAggregatorHandle.
48644type StatsAggregatorHandleAttr func(optionalAttr)
48645
48646// StatsAggregatorHandleContainer sets the optional container attribute to value.
48647// If not specified, defaults to ""
48648func StatsAggregatorHandleContainer(value string) StatsAggregatorHandleAttr {
48649	return func(m optionalAttr) {
48650		m["container"] = value
48651	}
48652}
48653
48654// StatsAggregatorHandleSharedName sets the optional shared_name attribute to value.
48655// If not specified, defaults to ""
48656func StatsAggregatorHandleSharedName(value string) StatsAggregatorHandleAttr {
48657	return func(m optionalAttr) {
48658		m["shared_name"] = value
48659	}
48660}
48661
48662// Creates a statistics manager resource.
48663func StatsAggregatorHandle(scope *Scope, optional ...StatsAggregatorHandleAttr) (handle tf.Output) {
48664	if scope.Err() != nil {
48665		return
48666	}
48667	attrs := map[string]interface{}{}
48668	for _, a := range optional {
48669		a(attrs)
48670	}
48671	opspec := tf.OpSpec{
48672		Type: "StatsAggregatorHandle",
48673
48674		Attrs: attrs,
48675	}
48676	op := scope.AddOperation(opspec)
48677	return op.Output(0)
48678}
48679
48680// Set a summary_writer_interface to record statistics using given stats_aggregator.
48681//
48682// Returns the created operation.
48683func StatsAggregatorSetSummaryWriter(scope *Scope, stats_aggregator tf.Output, summary tf.Output) (o *tf.Operation) {
48684	if scope.Err() != nil {
48685		return
48686	}
48687	opspec := tf.OpSpec{
48688		Type: "StatsAggregatorSetSummaryWriter",
48689		Input: []tf.Input{
48690			stats_aggregator, summary,
48691		},
48692	}
48693	return scope.AddOperation(opspec)
48694}
48695
48696// Produces a summary of any statistics recorded by the given statistics manager.
48697func StatsAggregatorSummary(scope *Scope, iterator tf.Output) (summary tf.Output) {
48698	if scope.Err() != nil {
48699		return
48700	}
48701	opspec := tf.OpSpec{
48702		Type: "StatsAggregatorSummary",
48703		Input: []tf.Input{
48704			iterator,
48705		},
48706	}
48707	op := scope.AddOperation(opspec)
48708	return op.Output(0)
48709}
48710
48711// Stops gradient computation.
48712//
48713// When executed in a graph, this op outputs its input tensor as-is.
48714//
48715// When building ops to compute gradients, this op prevents the contribution of
48716// its inputs to be taken into account.  Normally, the gradient generator adds ops
48717// to a graph to compute the derivatives of a specified 'loss' by recursively
48718// finding out inputs that contributed to its computation.  If you insert this op
48719// in the graph it inputs are masked from the gradient generator.  They are not
48720// taken into account for computing gradients.
48721//
48722// This is useful any time you want to compute a value with TensorFlow but need
48723// to pretend that the value was a constant. For example, the softmax function
48724// for a vector x can be written as
48725//
48726// ```python
48727//
48728//	def softmax(x):
48729//	  numerator = tf.exp(x)
48730//	  denominator = tf.reduce_sum(numerator)
48731//	  return numerator / denominator
48732//
48733// ```
48734//
48735// This however is susceptible to overflow if the values in x are large. An
48736// alternative more stable way is to subtract the maximum of x from each of the
48737// values.
48738//
48739// ```python
48740//
48741//	def stable_softmax(x):
48742//	  z = x - tf.reduce_max(x)
48743//	  numerator = tf.exp(z)
48744//	  denominator = tf.reduce_sum(numerator)
48745//	  return numerator / denominator
48746//
48747// ```
48748//
48749// However, when we backprop through the softmax to x, we dont want to backprop
48750// through the `tf.reduce_max(x)` (if the max values are not unique then the
48751// gradient could flow to the wrong input) calculation and treat that as a
48752// constant. Therefore, we should write this out as
48753//
48754// ```python
48755//
48756//	def stable_softmax(x):
48757//	  z = x - tf.stop_gradient(tf.reduce_max(x))
48758//	  numerator = tf.exp(z)
48759//	  denominator = tf.reduce_sum(numerator)
48760//	  return numerator / denominator
48761//
48762// ```
48763//
48764// Some other examples include:
48765//
48766//   - The *EM* algorithm where the *M-step* should not involve backpropagation
48767//     through the output of the *E-step*.
48768//   - Contrastive divergence training of Boltzmann machines where, when
48769//     differentiating the energy function, the training must not backpropagate
48770//     through the graph that generated the samples from the model.
48771//   - Adversarial training, where no backprop should happen through the adversarial
48772//     example generation process.
48773func StopGradient(scope *Scope, input tf.Output) (output tf.Output) {
48774	if scope.Err() != nil {
48775		return
48776	}
48777	opspec := tf.OpSpec{
48778		Type: "StopGradient",
48779		Input: []tf.Input{
48780			input,
48781		},
48782	}
48783	op := scope.AddOperation(opspec)
48784	return op.Output(0)
48785}
48786
48787// StridedSliceAttr is an optional argument to StridedSlice.
48788type StridedSliceAttr func(optionalAttr)
48789
48790// StridedSliceBeginMask sets the optional begin_mask attribute to value.
48791//
48792// value: a bitmask where a bit i being 1 means to ignore the begin
48793// value and instead use the largest interval possible. At runtime
48794// begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or
48795// `[-1, n-1]` if `stride[i] < 0`
48796// If not specified, defaults to 0
48797func StridedSliceBeginMask(value int64) StridedSliceAttr {
48798	return func(m optionalAttr) {
48799		m["begin_mask"] = value
48800	}
48801}
48802
48803// StridedSliceEndMask sets the optional end_mask attribute to value.
48804//
48805// value: analogous to `begin_mask`
48806// If not specified, defaults to 0
48807func StridedSliceEndMask(value int64) StridedSliceAttr {
48808	return func(m optionalAttr) {
48809		m["end_mask"] = value
48810	}
48811}
48812
48813// StridedSliceEllipsisMask sets the optional ellipsis_mask attribute to value.
48814//
48815// value: a bitmask where bit `i` being 1 means the `i`th
48816// position is actually an ellipsis. One bit at most can be 1.
48817// If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
48818// is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
48819// implicitly creates as many range specifications as necessary to fully
48820// specify the sliced range for every dimension. For example for a 4-dimensional
48821// tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
48822// If not specified, defaults to 0
48823func StridedSliceEllipsisMask(value int64) StridedSliceAttr {
48824	return func(m optionalAttr) {
48825		m["ellipsis_mask"] = value
48826	}
48827}
48828
48829// StridedSliceNewAxisMask sets the optional new_axis_mask attribute to value.
48830//
48831// value: a bitmask where bit `i` being 1 means the `i`th
48832// specification creates a new shape 1 dimension. For example
48833// `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
48834// If not specified, defaults to 0
48835func StridedSliceNewAxisMask(value int64) StridedSliceAttr {
48836	return func(m optionalAttr) {
48837		m["new_axis_mask"] = value
48838	}
48839}
48840
48841// StridedSliceShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
48842//
48843// value: a bitmask where bit `i` implies that the `i`th
48844// specification should shrink the dimensionality. begin and end
48845// must imply a slice of size 1 in the dimension. For example in
48846// python one might do `foo[:, 3, :]` which would result in
48847// `shrink_axis_mask` being 2.
48848// If not specified, defaults to 0
48849func StridedSliceShrinkAxisMask(value int64) StridedSliceAttr {
48850	return func(m optionalAttr) {
48851		m["shrink_axis_mask"] = value
48852	}
48853}
48854
48855// Return a strided slice from `input`.
48856//
48857// Note, most python users will want to use the Python `Tensor.__getitem__`
48858// or `Variable.__getitem__` rather than this op directly.
48859//
48860// The goal of this op is to produce a new tensor with a subset of
48861// the elements from the `n` dimensional `input` tensor. The subset is chosen using
48862// a sequence of `m` sparse range specifications encoded into the arguments
48863// of this function. Note, in some cases
48864// `m` could be equal to `n`, but this need not be the case. Each
48865// range specification entry can be one of the following:
48866//
48867//   - An ellipsis (...). Ellipses are used to imply zero or more
48868//     dimensions of full-dimension selection and are produced using
48869//     `ellipsis_mask`. For example, `foo[...]` is the identity slice.
48870//
48871//   - A new axis. This is used to insert a new shape=1 dimension and is
48872//     produced using `new_axis_mask`. For example, `foo[:, ...]` where
48873//     `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
48874//
48875//   - A range `begin:end:stride`. This is used to specify how much to choose from
48876//     a given dimension. `stride` can be any integer but 0.  `begin` is an integer
48877//     which represents the index of the first value to select while `end` represents
48878//     the index of the last value to select. The number of values selected in each
48879//     dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
48880//     `begin` and `end` can be negative where `-1` is the last element, `-2` is
48881//     the second to last. `begin_mask` controls whether to replace the explicitly
48882//     given `begin` with an implicit effective value of `0` if `stride > 0` and
48883//     `-1` if `stride < 0`. `end_mask` is analogous but produces the number
48884//     required to create the largest open interval. For example, given a shape
48885//     `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
48886//     not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
48887//     and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
48888//     first dimension of a tensor while dropping the last two (in the original
48889//     order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
48890//
48891//   - A single index. This is used to keep only elements that have a given
48892//     index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
48893//     shape `(6,)` tensor. This is encoded in `begin` and `end` and
48894//     `shrink_axis_mask`.
48895//
48896// Each conceptual range specification is encoded in the op's argument. This
48897// encoding is best understand by considering a non-trivial example. In
48898// particular,
48899// `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
48900//
48901// ```
48902// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
48903// end = [2, 4, x, x, -3, x]
48904// strides = [1, 1, x, x, -1, 1]
48905// begin_mask = 1<<4 | 1<<5 = 48
48906// end_mask = 1<<5 = 32
48907// ellipsis_mask = 1<<3 = 8
48908// new_axis_mask = 1<<2 = 4
48909// shrink_axis_mask = 1<<0 = 1
48910// ```
48911//
48912// In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
48913// the slice becomes (2, 1, 5, 5, 2, 5).
48914// Let us walk step by step through each argument specification.
48915//
48916// 1.  The first argument in the example slice is turned into `begin = 1` and
48917// `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
48918// also set the appropriate bit in `shrink_axis_mask`.
48919//
48920// 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
48921// zero bits contributed.
48922//
48923// 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
48924// dimension in the final shape. Dummy values are contributed to begin,
48925// end and stride, while the new_axis_mask bit is set.
48926//
48927// 4. `...` grab the full ranges from as many dimensions as needed to
48928// fully specify a slice for every dimension of the input shape.
48929//
48930// 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
48931// with a dimension that has shape `s` is converted to a positive index
48932// `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
48933// is done internally so begin, end and strides receive x, -3, and -1.
48934// The appropriate begin_mask bit is set to indicate the start range is the
48935// full range (ignoring the x).
48936//
48937// 6. `:` indicates that the entire contents of the corresponding dimension
48938// is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
48939// receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
48940// `end_mask` are also set.
48941//
48942// *Requirements*:
48943//
48944//	`0 != strides[i] for i in [0, m)`
48945//	`ellipsis_mask must be a power of two (only one ellipsis)`
48946//
48947// Arguments:
48948//
48949//	begin: `begin[k]` specifies the offset into the `k`th range specification.
48950//
48951// The exact dimension this corresponds to will be determined by context.
48952// Out-of-bounds values will be silently clamped. If the `k`th bit of
48953// `begin_mask` then `begin[k]` is ignored and the full range of the
48954// appropriate dimension is used instead. Negative values causes indexing
48955// to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
48956//
48957//	end: `end[i]` is like `begin` with the exception that `end_mask` is
48958//
48959// used to determine full ranges.
48960//
48961//	strides: `strides[i]` specifies the increment in the `i`th specification
48962//
48963// after extracting a given element. Negative indices will reverse
48964// the original order. Out or range values are
48965// clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
48966func StridedSlice(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, optional ...StridedSliceAttr) (output tf.Output) {
48967	if scope.Err() != nil {
48968		return
48969	}
48970	attrs := map[string]interface{}{}
48971	for _, a := range optional {
48972		a(attrs)
48973	}
48974	opspec := tf.OpSpec{
48975		Type: "StridedSlice",
48976		Input: []tf.Input{
48977			input, begin, end, strides,
48978		},
48979		Attrs: attrs,
48980	}
48981	op := scope.AddOperation(opspec)
48982	return op.Output(0)
48983}
48984
48985// StridedSliceGradAttr is an optional argument to StridedSliceGrad.
48986type StridedSliceGradAttr func(optionalAttr)
48987
48988// StridedSliceGradBeginMask sets the optional begin_mask attribute to value.
48989// If not specified, defaults to 0
48990func StridedSliceGradBeginMask(value int64) StridedSliceGradAttr {
48991	return func(m optionalAttr) {
48992		m["begin_mask"] = value
48993	}
48994}
48995
48996// StridedSliceGradEndMask sets the optional end_mask attribute to value.
48997// If not specified, defaults to 0
48998func StridedSliceGradEndMask(value int64) StridedSliceGradAttr {
48999	return func(m optionalAttr) {
49000		m["end_mask"] = value
49001	}
49002}
49003
49004// StridedSliceGradEllipsisMask sets the optional ellipsis_mask attribute to value.
49005// If not specified, defaults to 0
49006func StridedSliceGradEllipsisMask(value int64) StridedSliceGradAttr {
49007	return func(m optionalAttr) {
49008		m["ellipsis_mask"] = value
49009	}
49010}
49011
49012// StridedSliceGradNewAxisMask sets the optional new_axis_mask attribute to value.
49013// If not specified, defaults to 0
49014func StridedSliceGradNewAxisMask(value int64) StridedSliceGradAttr {
49015	return func(m optionalAttr) {
49016		m["new_axis_mask"] = value
49017	}
49018}
49019
49020// StridedSliceGradShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
49021// If not specified, defaults to 0
49022func StridedSliceGradShrinkAxisMask(value int64) StridedSliceGradAttr {
49023	return func(m optionalAttr) {
49024		m["shrink_axis_mask"] = value
49025	}
49026}
49027
49028// Returns the gradient of `StridedSlice`.
49029//
49030// Since `StridedSlice` cuts out pieces of its `input` which is size
49031// `shape`, its gradient will have the same shape (which is passed here
49032// as `shape`). The gradient will be zero in any element that the slice
49033// does not select.
49034//
49035// Arguments are the same as StridedSliceGrad with the exception that
49036// `dy` is the input gradient to be propagated and `shape` is the
49037// shape of `StridedSlice`'s `input`.
49038func StridedSliceGrad(scope *Scope, shape tf.Output, begin tf.Output, end tf.Output, strides tf.Output, dy tf.Output, optional ...StridedSliceGradAttr) (output tf.Output) {
49039	if scope.Err() != nil {
49040		return
49041	}
49042	attrs := map[string]interface{}{}
49043	for _, a := range optional {
49044		a(attrs)
49045	}
49046	opspec := tf.OpSpec{
49047		Type: "StridedSliceGrad",
49048		Input: []tf.Input{
49049			shape, begin, end, strides, dy,
49050		},
49051		Attrs: attrs,
49052	}
49053	op := scope.AddOperation(opspec)
49054	return op.Output(0)
49055}
49056
49057// StringFormatAttr is an optional argument to StringFormat.
49058type StringFormatAttr func(optionalAttr)
49059
49060// StringFormatTemplate sets the optional template attribute to value.
49061//
49062// value: A string, the template to format tensor summaries into.
49063// If not specified, defaults to "%s"
49064func StringFormatTemplate(value string) StringFormatAttr {
49065	return func(m optionalAttr) {
49066		m["template"] = value
49067	}
49068}
49069
49070// StringFormatPlaceholder sets the optional placeholder attribute to value.
49071//
49072// value: A string, at each placeholder in the template a subsequent tensor summary will be inserted.
49073// If not specified, defaults to "%s"
49074func StringFormatPlaceholder(value string) StringFormatAttr {
49075	return func(m optionalAttr) {
49076		m["placeholder"] = value
49077	}
49078}
49079
49080// StringFormatSummarize sets the optional summarize attribute to value.
49081//
49082// value: When formatting the tensor summaries print the first and last summarize entries of each tensor dimension.
49083// If not specified, defaults to 3
49084func StringFormatSummarize(value int64) StringFormatAttr {
49085	return func(m optionalAttr) {
49086		m["summarize"] = value
49087	}
49088}
49089
49090// Formats a string template using a list of tensors.
49091//
49092// Formats a string template using a list of tensors, pretty-printing tensor summaries.
49093//
49094// Arguments:
49095//
49096//	inputs: The list of tensors to format into the placeholder string.
49097//
49098// Returns = The resulting string scalar.
49099func StringFormat(scope *Scope, inputs []tf.Output, optional ...StringFormatAttr) (output tf.Output) {
49100	if scope.Err() != nil {
49101		return
49102	}
49103	attrs := map[string]interface{}{}
49104	for _, a := range optional {
49105		a(attrs)
49106	}
49107	opspec := tf.OpSpec{
49108		Type: "StringFormat",
49109		Input: []tf.Input{
49110			tf.OutputList(inputs),
49111		},
49112		Attrs: attrs,
49113	}
49114	op := scope.AddOperation(opspec)
49115	return op.Output(0)
49116}
49117
49118// StringJoinAttr is an optional argument to StringJoin.
49119type StringJoinAttr func(optionalAttr)
49120
49121// StringJoinSeparator sets the optional separator attribute to value.
49122//
49123// value: string, an optional join separator.
49124// If not specified, defaults to ""
49125func StringJoinSeparator(value string) StringJoinAttr {
49126	return func(m optionalAttr) {
49127		m["separator"] = value
49128	}
49129}
49130
49131// Joins the strings in the given list of string tensors into one tensor;
49132//
49133// with the given separator (default is an empty separator).
49134//
49135// Examples:
49136//
49137// >>> s = ["hello", "world", "tensorflow"]
49138// >>> tf.strings.join(s, " ")
49139// <tf.Tensor: shape=(), dtype=string, numpy=b'hello world tensorflow'>
49140//
49141// Arguments:
49142//
49143//	inputs: A list of string tensors.  The tensors must all have the same shape,
49144//
49145// or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
49146// of non-scalar inputs.
49147func StringJoin(scope *Scope, inputs []tf.Output, optional ...StringJoinAttr) (output tf.Output) {
49148	if scope.Err() != nil {
49149		return
49150	}
49151	attrs := map[string]interface{}{}
49152	for _, a := range optional {
49153		a(attrs)
49154	}
49155	opspec := tf.OpSpec{
49156		Type: "StringJoin",
49157		Input: []tf.Input{
49158			tf.OutputList(inputs),
49159		},
49160		Attrs: attrs,
49161	}
49162	op := scope.AddOperation(opspec)
49163	return op.Output(0)
49164}
49165
49166// StringLengthAttr is an optional argument to StringLength.
49167type StringLengthAttr func(optionalAttr)
49168
49169// StringLengthUnit sets the optional unit attribute to value.
49170//
49171// value: The unit that is counted to compute string length.  One of: `"BYTE"` (for
49172// the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8
49173// encoded Unicode code points in each string).  Results are undefined
49174// if `unit=UTF8_CHAR` and the `input` strings do not contain structurally
49175// valid UTF-8.
49176// If not specified, defaults to "BYTE"
49177func StringLengthUnit(value string) StringLengthAttr {
49178	return func(m optionalAttr) {
49179		m["unit"] = value
49180	}
49181}
49182
49183// String lengths of `input`.
49184//
49185// Computes the length of each string given in the input tensor.
49186//
49187// >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642'])
49188// >>> tf.strings.length(strings).numpy() # default counts bytes
49189// array([ 5, 10, 4], dtype=int32)
49190// >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy()
49191// array([ 5, 10, 1], dtype=int32)
49192//
49193// Arguments:
49194//
49195//	input: The strings for which to compute the length for each element.
49196//
49197// Returns Integer tensor that has the same shape as `input`. The output contains the
49198// element-wise string lengths of `input`.
49199func StringLength(scope *Scope, input tf.Output, optional ...StringLengthAttr) (output tf.Output) {
49200	if scope.Err() != nil {
49201		return
49202	}
49203	attrs := map[string]interface{}{}
49204	for _, a := range optional {
49205		a(attrs)
49206	}
49207	opspec := tf.OpSpec{
49208		Type: "StringLength",
49209		Input: []tf.Input{
49210			input,
49211		},
49212		Attrs: attrs,
49213	}
49214	op := scope.AddOperation(opspec)
49215	return op.Output(0)
49216}
49217
49218// StringLowerAttr is an optional argument to StringLower.
49219type StringLowerAttr func(optionalAttr)
49220
49221// StringLowerEncoding sets the optional encoding attribute to value.
49222//
49223// value: Character encoding of `input`. Allowed values are ” and 'utf-8'.
49224// Value ” is interpreted as ASCII.
49225// If not specified, defaults to ""
49226func StringLowerEncoding(value string) StringLowerAttr {
49227	return func(m optionalAttr) {
49228		m["encoding"] = value
49229	}
49230}
49231
49232// Converts all uppercase characters into their respective lowercase replacements.
49233//
49234// Example:
49235//
49236// >>> tf.strings.lower("CamelCase string and ALL CAPS")
49237// <tf.Tensor: shape=(), dtype=string, numpy=b'camelcase string and all caps'>
49238//
49239// Arguments:
49240//
49241//	input: The input to be lower-cased.
49242func StringLower(scope *Scope, input tf.Output, optional ...StringLowerAttr) (output tf.Output) {
49243	if scope.Err() != nil {
49244		return
49245	}
49246	attrs := map[string]interface{}{}
49247	for _, a := range optional {
49248		a(attrs)
49249	}
49250	opspec := tf.OpSpec{
49251		Type: "StringLower",
49252		Input: []tf.Input{
49253			input,
49254		},
49255		Attrs: attrs,
49256	}
49257	op := scope.AddOperation(opspec)
49258	return op.Output(0)
49259}
49260
49261// Creates ngrams from ragged string data.
49262//
49263// This op accepts a ragged tensor with 1 ragged dimension containing only
49264// strings and outputs a ragged tensor with 1 ragged dimension containing ngrams
49265// of that string, joined along the innermost axis.
49266//
49267// Arguments:
49268//
49269//	data: The values tensor of the ragged string tensor to make ngrams out of. Must be a
49270//
49271// 1D string tensor.
49272//
49273//	data_splits: The splits tensor of the ragged string tensor to make ngrams out of.
49274//	separator: The string to append between elements of the token. Use "" for no separator.
49275//	ngram_widths: The sizes of the ngrams to create.
49276//	left_pad: The string to use to pad the left side of the ngram sequence. Only used if
49277//
49278// pad_width != 0.
49279//
49280//	right_pad: The string to use to pad the right side of the ngram sequence. Only used if
49281//
49282// pad_width != 0.
49283//
49284//	pad_width: The number of padding elements to add to each side of each
49285//
49286// sequence. Note that padding will never be greater than 'ngram_widths'-1
49287// regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1`
49288// elements.
49289//
49290// Returns:
49291//
49292//	ngrams: The values tensor of the output ngrams ragged tensor.
49293//	ngrams_splits: The splits tensor of the output ngrams ragged tensor.
49294func StringNGrams(scope *Scope, data tf.Output, data_splits tf.Output, separator string, ngram_widths []int64, left_pad string, right_pad string, pad_width int64, preserve_short_sequences bool) (ngrams tf.Output, ngrams_splits tf.Output) {
49295	if scope.Err() != nil {
49296		return
49297	}
49298	attrs := map[string]interface{}{"separator": separator, "ngram_widths": ngram_widths, "left_pad": left_pad, "right_pad": right_pad, "pad_width": pad_width, "preserve_short_sequences": preserve_short_sequences}
49299	opspec := tf.OpSpec{
49300		Type: "StringNGrams",
49301		Input: []tf.Input{
49302			data, data_splits,
49303		},
49304		Attrs: attrs,
49305	}
49306	op := scope.AddOperation(opspec)
49307	return op.Output(0), op.Output(1)
49308}
49309
49310// StringSplitAttr is an optional argument to StringSplit.
49311type StringSplitAttr func(optionalAttr)
49312
49313// StringSplitSkipEmpty sets the optional skip_empty attribute to value.
49314//
49315// value: A `bool`. If `True`, skip the empty strings from the result.
49316// If not specified, defaults to true
49317func StringSplitSkipEmpty(value bool) StringSplitAttr {
49318	return func(m optionalAttr) {
49319		m["skip_empty"] = value
49320	}
49321}
49322
49323// Split elements of `input` based on `delimiter` into a `SparseTensor`.
49324//
49325// Let N be the size of source (typically N will be the batch size). Split each
49326// element of `input` based on `delimiter` and return a `SparseTensor`
49327// containing the splitted tokens. Empty tokens are ignored.
49328//
49329// `delimiter` can be empty, or a string of split characters. If `delimiter` is an
49330//
49331//	empty string, each element of `input` is split into individual single-byte
49332//	character strings, including splitting of UTF-8 multibyte sequences. Otherwise
49333//	every character of `delimiter` is a potential split point.
49334//
49335// For example:
49336//
49337//	N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
49338//	will be
49339//
49340//	indices = [0, 0;
49341//	           0, 1;
49342//	           1, 0;
49343//	           1, 1;
49344//	           1, 2]
49345//	shape = [2, 3]
49346//	values = ['hello', 'world', 'a', 'b', 'c']
49347//
49348// Arguments:
49349//
49350//	input: 1-D. Strings to split.
49351//	delimiter: 0-D. Delimiter characters (bytes), or empty string.
49352//
49353// Returns:
49354//
49355//	indices: A dense matrix of int64 representing the indices of the sparse tensor.
49356//	values: A vector of strings corresponding to the splited values.
49357//	shape: a length-2 vector of int64 representing the shape of the sparse
49358//
49359// tensor, where the first value is N and the second value is the maximum number
49360// of tokens in a single input entry.
49361func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output) {
49362	if scope.Err() != nil {
49363		return
49364	}
49365	attrs := map[string]interface{}{}
49366	for _, a := range optional {
49367		a(attrs)
49368	}
49369	opspec := tf.OpSpec{
49370		Type: "StringSplit",
49371		Input: []tf.Input{
49372			input, delimiter,
49373		},
49374		Attrs: attrs,
49375	}
49376	op := scope.AddOperation(opspec)
49377	return op.Output(0), op.Output(1), op.Output(2)
49378}
49379
49380// StringSplitV2Attr is an optional argument to StringSplitV2.
49381type StringSplitV2Attr func(optionalAttr)
49382
49383// StringSplitV2Maxsplit sets the optional maxsplit attribute to value.
49384//
49385// value: An `int`. If `maxsplit > 0`, limit of the split of the result.
49386// If not specified, defaults to -1
49387func StringSplitV2Maxsplit(value int64) StringSplitV2Attr {
49388	return func(m optionalAttr) {
49389		m["maxsplit"] = value
49390	}
49391}
49392
49393// Split elements of `source` based on `sep` into a `SparseTensor`.
49394//
49395// Let N be the size of source (typically N will be the batch size). Split each
49396// element of `source` based on `sep` and return a `SparseTensor`
49397// containing the split tokens. Empty tokens are ignored.
49398//
49399// For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',
49400// then the output will be
49401// ```
49402// st.indices = [0, 0;
49403//
49404//	0, 1;
49405//	1, 0;
49406//	1, 1;
49407//	1, 2]
49408//
49409// st.shape = [2, 3]
49410// st.values = ['hello', 'world', 'a', 'b', 'c']
49411// ```
49412//
49413// If `sep` is given, consecutive delimiters are not grouped together and are
49414// deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and
49415// sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty
49416// string, consecutive whitespace are regarded as a single separator, and the
49417// result will contain no empty strings at the startor end if the string has
49418// leading or trailing whitespace.
49419//
49420// Note that the above mentioned behavior matches python's str.split.
49421//
49422// Arguments:
49423//
49424//	input: `1-D` string `Tensor`, the strings to split.
49425//	sep: `0-D` string `Tensor`, the delimiter character.
49426func StringSplitV2(scope *Scope, input tf.Output, sep tf.Output, optional ...StringSplitV2Attr) (indices tf.Output, values tf.Output, shape tf.Output) {
49427	if scope.Err() != nil {
49428		return
49429	}
49430	attrs := map[string]interface{}{}
49431	for _, a := range optional {
49432		a(attrs)
49433	}
49434	opspec := tf.OpSpec{
49435		Type: "StringSplitV2",
49436		Input: []tf.Input{
49437			input, sep,
49438		},
49439		Attrs: attrs,
49440	}
49441	op := scope.AddOperation(opspec)
49442	return op.Output(0), op.Output(1), op.Output(2)
49443}
49444
49445// Strip leading and trailing whitespaces from the Tensor.
49446//
49447// Examples:
49448//
49449// >>> tf.strings.strip(["\nTensorFlow", "     The python library    "]).numpy()
49450// array([b'TensorFlow', b'The python library'], dtype=object)
49451//
49452// Arguments:
49453//
49454//	input: A string `Tensor` of any shape.
49455//
49456// Returns A string `Tensor` of the same shape as the input.
49457func StringStrip(scope *Scope, input tf.Output) (output tf.Output) {
49458	if scope.Err() != nil {
49459		return
49460	}
49461	opspec := tf.OpSpec{
49462		Type: "StringStrip",
49463		Input: []tf.Input{
49464			input,
49465		},
49466	}
49467	op := scope.AddOperation(opspec)
49468	return op.Output(0)
49469}
49470
49471// Converts each string in the input Tensor to its hash mod by a number of buckets.
49472//
49473// The hash function is deterministic on the content of the string within the
49474// process.
49475//
49476// Note that the hash function may change from time to time.
49477// This functionality will be deprecated and it's recommended to use
49478// `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`.
49479//
49480// Arguments:
49481//
49482//	num_buckets: The number of buckets.
49483//
49484// Returns A Tensor of the same shape as the input `string_tensor`.
49485func StringToHashBucket(scope *Scope, string_tensor tf.Output, num_buckets int64) (output tf.Output) {
49486	if scope.Err() != nil {
49487		return
49488	}
49489	attrs := map[string]interface{}{"num_buckets": num_buckets}
49490	opspec := tf.OpSpec{
49491		Type: "StringToHashBucket",
49492		Input: []tf.Input{
49493			string_tensor,
49494		},
49495		Attrs: attrs,
49496	}
49497	op := scope.AddOperation(opspec)
49498	return op.Output(0)
49499}
49500
49501// Converts each string in the input Tensor to its hash mod by a number of buckets.
49502//
49503// The hash function is deterministic on the content of the string within the
49504// process and will never change. However, it is not suitable for cryptography.
49505// This function may be used when CPU time is scarce and inputs are trusted or
49506// unimportant. There is a risk of adversaries constructing inputs that all hash
49507// to the same bucket. To prevent this problem, use a strong hash function with
49508// `tf.string_to_hash_bucket_strong`.
49509//
49510// Examples:
49511//
49512// >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy()
49513// array([0, 2, 2])
49514//
49515// Arguments:
49516//
49517//	input: The strings to assign a hash bucket.
49518//	num_buckets: The number of buckets.
49519//
49520// Returns A Tensor of the same shape as the input `string_tensor`.
49521func StringToHashBucketFast(scope *Scope, input tf.Output, num_buckets int64) (output tf.Output) {
49522	if scope.Err() != nil {
49523		return
49524	}
49525	attrs := map[string]interface{}{"num_buckets": num_buckets}
49526	opspec := tf.OpSpec{
49527		Type: "StringToHashBucketFast",
49528		Input: []tf.Input{
49529			input,
49530		},
49531		Attrs: attrs,
49532	}
49533	op := scope.AddOperation(opspec)
49534	return op.Output(0)
49535}
49536
49537// Converts each string in the input Tensor to its hash mod by a number of buckets.
49538//
49539// The hash function is deterministic on the content of the string within the
49540// process. The hash function is a keyed hash function, where attribute `key`
49541// defines the key of the hash function. `key` is an array of 2 elements.
49542//
49543// A strong hash is important when inputs may be malicious, e.g. URLs with
49544// additional components. Adversaries could try to make their inputs hash to the
49545// same bucket for a denial-of-service attack or to skew the results. A strong
49546// hash can be used to make it difficult to find inputs with a skewed hash value
49547// distribution over buckets. This requires that the hash function is
49548// seeded by a high-entropy (random) "key" unknown to the adversary.
49549//
49550// The additional robustness comes at a cost of roughly 4x higher compute
49551// time than `tf.string_to_hash_bucket_fast`.
49552//
49553// Examples:
49554//
49555// >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy()
49556// array([2, 0])
49557//
49558// Arguments:
49559//
49560//	input: The strings to assign a hash bucket.
49561//	num_buckets: The number of buckets.
49562//	key: The key used to seed the hash function, passed as a list of two uint64
49563//
49564// elements.
49565//
49566// Returns A Tensor of the same shape as the input `string_tensor`.
49567func StringToHashBucketStrong(scope *Scope, input tf.Output, num_buckets int64, key []int64) (output tf.Output) {
49568	if scope.Err() != nil {
49569		return
49570	}
49571	attrs := map[string]interface{}{"num_buckets": num_buckets, "key": key}
49572	opspec := tf.OpSpec{
49573		Type: "StringToHashBucketStrong",
49574		Input: []tf.Input{
49575			input,
49576		},
49577		Attrs: attrs,
49578	}
49579	op := scope.AddOperation(opspec)
49580	return op.Output(0)
49581}
49582
49583// StringToNumberAttr is an optional argument to StringToNumber.
49584type StringToNumberAttr func(optionalAttr)
49585
49586// StringToNumberOutType sets the optional out_type attribute to value.
49587//
49588// value: The numeric type to interpret each string in `string_tensor` as.
49589// If not specified, defaults to DT_FLOAT
49590func StringToNumberOutType(value tf.DataType) StringToNumberAttr {
49591	return func(m optionalAttr) {
49592		m["out_type"] = value
49593	}
49594}
49595
49596// Converts each string in the input Tensor to the specified numeric type.
49597//
49598// (Note that int32 overflow results in an error while float overflow
49599// results in a rounded value.)
49600//
49601// Example:
49602//
49603// >>> strings = ["5.0", "3.0", "7.0"]
49604// >>> tf.strings.to_number(strings)
49605// <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)>
49606//
49607// Returns A Tensor of the same shape as the input `string_tensor`.
49608func StringToNumber(scope *Scope, string_tensor tf.Output, optional ...StringToNumberAttr) (output tf.Output) {
49609	if scope.Err() != nil {
49610		return
49611	}
49612	attrs := map[string]interface{}{}
49613	for _, a := range optional {
49614		a(attrs)
49615	}
49616	opspec := tf.OpSpec{
49617		Type: "StringToNumber",
49618		Input: []tf.Input{
49619			string_tensor,
49620		},
49621		Attrs: attrs,
49622	}
49623	op := scope.AddOperation(opspec)
49624	return op.Output(0)
49625}
49626
49627// StringUpperAttr is an optional argument to StringUpper.
49628type StringUpperAttr func(optionalAttr)
49629
49630// StringUpperEncoding sets the optional encoding attribute to value.
49631//
49632// value: Character encoding of `input`. Allowed values are ” and 'utf-8'.
49633// Value ” is interpreted as ASCII.
49634// If not specified, defaults to ""
49635func StringUpperEncoding(value string) StringUpperAttr {
49636	return func(m optionalAttr) {
49637		m["encoding"] = value
49638	}
49639}
49640
49641// Converts all lowercase characters into their respective uppercase replacements.
49642//
49643// Example:
49644//
49645// >>> tf.strings.upper("CamelCase string and ALL CAPS")
49646// <tf.Tensor: shape=(), dtype=string, numpy=b'CAMELCASE STRING AND ALL CAPS'>
49647//
49648// Arguments:
49649//
49650//	input: The input to be upper-cased.
49651func StringUpper(scope *Scope, input tf.Output, optional ...StringUpperAttr) (output tf.Output) {
49652	if scope.Err() != nil {
49653		return
49654	}
49655	attrs := map[string]interface{}{}
49656	for _, a := range optional {
49657		a(attrs)
49658	}
49659	opspec := tf.OpSpec{
49660		Type: "StringUpper",
49661		Input: []tf.Input{
49662			input,
49663		},
49664		Attrs: attrs,
49665	}
49666	op := scope.AddOperation(opspec)
49667	return op.Output(0)
49668}
49669
49670// Returns x - y element-wise.
49671//
49672// *NOTE*: `Subtract` supports broadcasting. More about broadcasting
49673// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
49674func Sub(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
49675	if scope.Err() != nil {
49676		return
49677	}
49678	opspec := tf.OpSpec{
49679		Type: "Sub",
49680		Input: []tf.Input{
49681			x, y,
49682		},
49683	}
49684	op := scope.AddOperation(opspec)
49685	return op.Output(0)
49686}
49687
49688// SubstrAttr is an optional argument to Substr.
49689type SubstrAttr func(optionalAttr)
49690
49691// SubstrUnit sets the optional unit attribute to value.
49692//
49693// value: The unit that is used to create the substring.  One of: `"BYTE"` (for
49694// defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8
49695// encoded Unicode code points).  The default is `"BYTE"`. Results are undefined if
49696// `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid
49697// UTF-8.
49698// If not specified, defaults to "BYTE"
49699func SubstrUnit(value string) SubstrAttr {
49700	return func(m optionalAttr) {
49701		m["unit"] = value
49702	}
49703}
49704
49705// Return substrings from `Tensor` of strings.
49706//
49707// For each string in the input `Tensor`, creates a substring starting at index
49708// `pos` with a total length of `len`.
49709//
49710// If `len` defines a substring that would extend beyond the length of the input
49711// string, or if `len` is negative, then as many characters as possible are used.
49712//
49713// A negative `pos` indicates distance within the string backwards from the end.
49714//
49715// If `pos` specifies an index which is out of range for any of the input strings,
49716// then an `InvalidArgumentError` is thrown.
49717//
49718// `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on
49719// Op creation.
49720//
49721// *NOTE*: `Substr` supports broadcasting up to two dimensions. More about
49722// broadcasting
49723// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
49724//
49725// ---
49726//
49727// # Examples
49728//
49729// Using scalar `pos` and `len`:
49730//
49731// ```python
49732// input = [b'Hello', b'World']
49733// position = 1
49734// length = 3
49735//
49736// output = [b'ell', b'orl']
49737// ```
49738//
49739// Using `pos` and `len` with same shape as `input`:
49740//
49741// ```python
49742// input = [[b'ten', b'eleven', b'twelve'],
49743//
49744//	[b'thirteen', b'fourteen', b'fifteen'],
49745//	[b'sixteen', b'seventeen', b'eighteen']]
49746//
49747// position = [[1, 2, 3],
49748//
49749//	[1, 2, 3],
49750//	[1, 2, 3]]
49751//
49752// length =   [[2, 3, 4],
49753//
49754//	[4, 3, 2],
49755//	[5, 5, 5]]
49756//
49757// output = [[b'en', b'eve', b'lve'],
49758//
49759//	[b'hirt', b'urt', b'te'],
49760//	[b'ixtee', b'vente', b'hteen']]
49761//
49762// ```
49763//
49764// Broadcasting `pos` and `len` onto `input`:
49765//
49766// ```
49767// input = [[b'ten', b'eleven', b'twelve'],
49768//
49769//	[b'thirteen', b'fourteen', b'fifteen'],
49770//	[b'sixteen', b'seventeen', b'eighteen'],
49771//	[b'nineteen', b'twenty', b'twentyone']]
49772//
49773// position = [1, 2, 3]
49774// length =   [1, 2, 3]
49775//
49776// output = [[b'e', b'ev', b'lve'],
49777//
49778//	[b'h', b'ur', b'tee'],
49779//	[b'i', b've', b'hte'],
49780//	[b'i', b'en', b'nty']]
49781//
49782// ```
49783//
49784// Broadcasting `input` onto `pos` and `len`:
49785//
49786// ```
49787// input = b'thirteen'
49788// position = [1, 5, 7]
49789// length =   [3, 2, 1]
49790//
49791// output = [b'hir', b'ee', b'n']
49792// ```
49793//
49794// Raises:
49795//
49796//   - `ValueError`: If the first argument cannot be converted to a
49797//     Tensor of `dtype string`.
49798//   - `InvalidArgumentError`: If indices are out of range.
49799//   - `ValueError`: If `pos` and `len` are not the same shape.
49800//
49801// Arguments:
49802//
49803//	input: Tensor of strings
49804//	pos: Scalar defining the position of first character in each substring
49805//	len: Scalar defining the number of characters to include in each substring
49806//
49807// Returns Tensor of substrings
49808func Substr(scope *Scope, input tf.Output, pos tf.Output, len tf.Output, optional ...SubstrAttr) (output tf.Output) {
49809	if scope.Err() != nil {
49810		return
49811	}
49812	attrs := map[string]interface{}{}
49813	for _, a := range optional {
49814		a(attrs)
49815	}
49816	opspec := tf.OpSpec{
49817		Type: "Substr",
49818		Input: []tf.Input{
49819			input, pos, len,
49820		},
49821		Attrs: attrs,
49822	}
49823	op := scope.AddOperation(opspec)
49824	return op.Output(0)
49825}
49826
49827// SumAttr is an optional argument to Sum.
49828type SumAttr func(optionalAttr)
49829
49830// SumKeepDims sets the optional keep_dims attribute to value.
49831//
49832// value: If true, retain reduced dimensions with length 1.
49833// If not specified, defaults to false
49834func SumKeepDims(value bool) SumAttr {
49835	return func(m optionalAttr) {
49836		m["keep_dims"] = value
49837	}
49838}
49839
49840// Computes the sum of elements across dimensions of a tensor.
49841//
49842// Reduces `input` along the dimensions given in `axis`. Unless
49843// `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
49844// `axis`. If `keep_dims` is true, the reduced dimensions are
49845// retained with length 1.
49846//
49847// Arguments:
49848//
49849//	input: The tensor to reduce.
49850//	axis: The dimensions to reduce. Must be in the range
49851//
49852// `[-rank(input), rank(input))`.
49853//
49854// Returns The reduced tensor.
49855func Sum(scope *Scope, input tf.Output, axis tf.Output, optional ...SumAttr) (output tf.Output) {
49856	if scope.Err() != nil {
49857		return
49858	}
49859	attrs := map[string]interface{}{}
49860	for _, a := range optional {
49861		a(attrs)
49862	}
49863	opspec := tf.OpSpec{
49864		Type: "Sum",
49865		Input: []tf.Input{
49866			input, axis,
49867		},
49868		Attrs: attrs,
49869	}
49870	op := scope.AddOperation(opspec)
49871	return op.Output(0)
49872}
49873
49874// SvdAttr is an optional argument to Svd.
49875type SvdAttr func(optionalAttr)
49876
49877// SvdComputeUv sets the optional compute_uv attribute to value.
49878//
49879// value: If true, left and right singular vectors will be
49880// computed and returned in `u` and `v`, respectively.
49881// If false, `u` and `v` are not set and should never referenced.
49882// If not specified, defaults to true
49883func SvdComputeUv(value bool) SvdAttr {
49884	return func(m optionalAttr) {
49885		m["compute_uv"] = value
49886	}
49887}
49888
49889// SvdFullMatrices sets the optional full_matrices attribute to value.
49890//
49891// value: If true, compute full-sized `u` and `v`. If false
49892// (the default), compute only the leading `P` singular vectors.
49893// Ignored if `compute_uv` is `False`.
49894// If not specified, defaults to false
49895func SvdFullMatrices(value bool) SvdAttr {
49896	return func(m optionalAttr) {
49897		m["full_matrices"] = value
49898	}
49899}
49900
49901// Computes the singular value decompositions of one or more matrices.
49902//
49903// Computes the SVD of each inner matrix in `input` such that
49904// `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
49905//
49906// ```python
49907// # a is a tensor containing a batch of matrices.
49908// # s is a tensor of singular values for each matrix.
49909// # u is the tensor containing the left singular vectors for each matrix.
49910// # v is the tensor containing the right singular vectors for each matrix.
49911// s, u, v = svd(a)
49912// s, _, _ = svd(a, compute_uv=False)
49913// ```
49914//
49915// Arguments:
49916//
49917//	input: A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
49918//
49919// form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
49920//
49921// Returns:
49922//
49923//	s: Singular values. Shape is `[..., P]`.
49924//	u: Left singular vectors. If `full_matrices` is `False` then shape is
49925//
49926// `[..., M, P]`; if `full_matrices` is `True` then shape is
49927// `[..., M, M]`. Undefined if `compute_uv` is `False`.
49928//
49929//	v: Left singular vectors. If `full_matrices` is `False` then shape is
49930//
49931// `[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
49932// Undefined if `compute_uv` is false.
49933func Svd(scope *Scope, input tf.Output, optional ...SvdAttr) (s tf.Output, u tf.Output, v tf.Output) {
49934	if scope.Err() != nil {
49935		return
49936	}
49937	attrs := map[string]interface{}{}
49938	for _, a := range optional {
49939		a(attrs)
49940	}
49941	opspec := tf.OpSpec{
49942		Type: "Svd",
49943		Input: []tf.Input{
49944			input,
49945		},
49946		Attrs: attrs,
49947	}
49948	op := scope.AddOperation(opspec)
49949	return op.Output(0), op.Output(1), op.Output(2)
49950}
49951
49952// Forwards `data` to the output port determined by `pred`.
49953//
49954// If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise,
49955// the data goes to `output_false`.
49956//
49957// See also `RefSwitch` and `Merge`.
49958//
49959// Arguments:
49960//
49961//	data: The tensor to be forwarded to the appropriate output.
49962//	pred: A scalar that specifies which output port will receive data.
49963//
49964// Returns:
49965//
49966//	output_false: If `pred` is false, data will be forwarded to this output.
49967//	output_true: If `pred` is true, data will be forwarded to this output.
49968func Switch(scope *Scope, data tf.Output, pred tf.Output) (output_false tf.Output, output_true tf.Output) {
49969	if scope.Err() != nil {
49970		return
49971	}
49972	opspec := tf.OpSpec{
49973		Type: "Switch",
49974		Input: []tf.Input{
49975			data, pred,
49976		},
49977	}
49978	op := scope.AddOperation(opspec)
49979	return op.Output(0), op.Output(1)
49980}
49981
49982// TFRecordDatasetAttr is an optional argument to TFRecordDataset.
49983type TFRecordDatasetAttr func(optionalAttr)
49984
49985// TFRecordDatasetMetadata sets the optional metadata attribute to value.
49986// If not specified, defaults to ""
49987func TFRecordDatasetMetadata(value string) TFRecordDatasetAttr {
49988	return func(m optionalAttr) {
49989		m["metadata"] = value
49990	}
49991}
49992
49993// Creates a dataset that emits the records from one or more TFRecord files.
49994//
49995// Arguments:
49996//
49997//	filenames: A scalar or vector containing the name(s) of the file(s) to be
49998//
49999// read.
50000//
50001//	compression_type: A scalar containing either (i) the empty string (no
50002//
50003// compression), (ii) "ZLIB", or (iii) "GZIP".
50004//
50005//	buffer_size: A scalar representing the number of bytes to buffer. A value of
50006//
50007// 0 means no buffering will be performed.
50008func TFRecordDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output, optional ...TFRecordDatasetAttr) (handle tf.Output) {
50009	if scope.Err() != nil {
50010		return
50011	}
50012	attrs := map[string]interface{}{}
50013	for _, a := range optional {
50014		a(attrs)
50015	}
50016	opspec := tf.OpSpec{
50017		Type: "TFRecordDataset",
50018		Input: []tf.Input{
50019			filenames, compression_type, buffer_size,
50020		},
50021		Attrs: attrs,
50022	}
50023	op := scope.AddOperation(opspec)
50024	return op.Output(0)
50025}
50026
50027// TFRecordReaderV2Attr is an optional argument to TFRecordReaderV2.
50028type TFRecordReaderV2Attr func(optionalAttr)
50029
50030// TFRecordReaderV2Container sets the optional container attribute to value.
50031//
50032// value: If non-empty, this reader is placed in the given container.
50033// Otherwise, a default container is used.
50034// If not specified, defaults to ""
50035func TFRecordReaderV2Container(value string) TFRecordReaderV2Attr {
50036	return func(m optionalAttr) {
50037		m["container"] = value
50038	}
50039}
50040
50041// TFRecordReaderV2SharedName sets the optional shared_name attribute to value.
50042//
50043// value: If non-empty, this reader is named in the given bucket
50044// with this shared_name. Otherwise, the node name is used instead.
50045// If not specified, defaults to ""
50046func TFRecordReaderV2SharedName(value string) TFRecordReaderV2Attr {
50047	return func(m optionalAttr) {
50048		m["shared_name"] = value
50049	}
50050}
50051
50052// TFRecordReaderV2CompressionType sets the optional compression_type attribute to value.
50053// If not specified, defaults to ""
50054func TFRecordReaderV2CompressionType(value string) TFRecordReaderV2Attr {
50055	return func(m optionalAttr) {
50056		m["compression_type"] = value
50057	}
50058}
50059
50060// A Reader that outputs the records from a TensorFlow Records file.
50061//
50062// Returns The handle to reference the Reader.
50063func TFRecordReaderV2(scope *Scope, optional ...TFRecordReaderV2Attr) (reader_handle tf.Output) {
50064	if scope.Err() != nil {
50065		return
50066	}
50067	attrs := map[string]interface{}{}
50068	for _, a := range optional {
50069		a(attrs)
50070	}
50071	opspec := tf.OpSpec{
50072		Type: "TFRecordReaderV2",
50073
50074		Attrs: attrs,
50075	}
50076	op := scope.AddOperation(opspec)
50077	return op.Output(0)
50078}
50079
50080// Returns the result of a TPU compilation.
50081//
50082// This operation returns the result of a TPU compilation as a serialized
50083// CompilationResultProto, which holds a status and an error message if an error
50084// occurred during compilation.
50085func TPUCompilationResult(scope *Scope) (output tf.Output) {
50086	if scope.Err() != nil {
50087		return
50088	}
50089	opspec := tf.OpSpec{
50090		Type: "TPUCompilationResult",
50091	}
50092	op := scope.AddOperation(opspec)
50093	return op.Output(0)
50094}
50095
50096// Asserts that compilation succeeded.
50097//
50098// This op produces no output and closes the device during failure to ensure all
50099// pending device interactions fail.
50100//
50101// 'compilation_status' is a serialized CompilationResultProto.
50102//
50103// Returns the created operation.
50104func TPUCompileSucceededAssert(scope *Scope, compilation_status tf.Output) (o *tf.Operation) {
50105	if scope.Err() != nil {
50106		return
50107	}
50108	opspec := tf.OpSpec{
50109		Type: "TPUCompileSucceededAssert",
50110		Input: []tf.Input{
50111			compilation_status,
50112		},
50113	}
50114	return scope.AddOperation(opspec)
50115}
50116
50117// An op enabling differentiation of TPU Embeddings.
50118//
50119// This op simply returns its first input, which is assumed to have been sliced
50120// from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of
50121// this op, and its first argument being a trainable Variable, enables automatic
50122// differentiation of graphs containing embeddings via the TPU Embedding Python
50123// libraries.
50124//
50125// Arguments:
50126//
50127//	embedding_variable: A trainable variable, enabling optimizers to find this op.
50128//	sliced_activations: The embedding activations Tensor to return.
50129//	table_id: The id of the table in the embedding layer configuration from which
50130//
50131// these activations were computed.
50132//
50133//	lookup_id: Identifier of the set of embedding indices which produced these
50134//
50135// activations.
50136func TPUEmbeddingActivations(scope *Scope, embedding_variable tf.Output, sliced_activations tf.Output, table_id int64, lookup_id int64) (output tf.Output) {
50137	if scope.Err() != nil {
50138		return
50139	}
50140	attrs := map[string]interface{}{"table_id": table_id, "lookup_id": lookup_id}
50141	opspec := tf.OpSpec{
50142		Type: "TPUEmbeddingActivations",
50143		Input: []tf.Input{
50144			embedding_variable, sliced_activations,
50145		},
50146		Attrs: attrs,
50147	}
50148	op := scope.AddOperation(opspec)
50149	return op.Output(0)
50150}
50151
50152// Op that loads and executes a TPU program on a TPU device.
50153//
50154// For the internal use of the distributed TPU compiler.
50155func TPUExecute(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType) (results []tf.Output) {
50156	if scope.Err() != nil {
50157		return
50158	}
50159	attrs := map[string]interface{}{"Tresults": Tresults}
50160	opspec := tf.OpSpec{
50161		Type: "TPUExecute",
50162		Input: []tf.Input{
50163			tf.OutputList(args), key,
50164		},
50165		Attrs: attrs,
50166	}
50167	op := scope.AddOperation(opspec)
50168	if scope.Err() != nil {
50169		return
50170	}
50171	var idx int
50172	var err error
50173	if results, idx, err = makeOutputList(op, idx, "results"); err != nil {
50174		scope.UpdateErr("TPUExecute", err)
50175		return
50176	}
50177	return results
50178}
50179
50180// Op that executes a program with optional in-place variable updates.
50181//
50182// It (optionally) reads device variables, loads and executes a TPU program on a
50183// TPU device, and then (optionally) in-place updates variables using the program
50184// outputs, as specified in attributes device_var_reads_indices (program input
50185// indices from directly reading variables) and device_var_updates_indices (program
50186// output indices used to update variables, -1 means no-update/read-only). Such
50187// program outputs are consumed by these variables will not appear in the op
50188// output. For the internal use of the distributed TPU compiler.
50189func TPUExecuteAndUpdateVariables(scope *Scope, args []tf.Output, key tf.Output, Tresults []tf.DataType, device_var_reads_indices []int64, device_var_updates_indices []int64) (results []tf.Output) {
50190	if scope.Err() != nil {
50191		return
50192	}
50193	attrs := map[string]interface{}{"Tresults": Tresults, "device_var_reads_indices": device_var_reads_indices, "device_var_updates_indices": device_var_updates_indices}
50194	opspec := tf.OpSpec{
50195		Type: "TPUExecuteAndUpdateVariables",
50196		Input: []tf.Input{
50197			tf.OutputList(args), key,
50198		},
50199		Attrs: attrs,
50200	}
50201	op := scope.AddOperation(opspec)
50202	if scope.Err() != nil {
50203		return
50204	}
50205	var idx int
50206	var err error
50207	if results, idx, err = makeOutputList(op, idx, "results"); err != nil {
50208		scope.UpdateErr("TPUExecuteAndUpdateVariables", err)
50209		return
50210	}
50211	return results
50212}
50213
50214// A TPU core selector Op.
50215//
50216// This Op produces a set of TPU cores (for warm-up) or a single TPU core
50217// (for regular inference) to execute the TPU program on. The output is
50218// consumed by TPUPartitionedCall.
50219//
50220// Returns A vector 1 or more TPU cores.
50221func TPUOrdinalSelector(scope *Scope) (device_ordinals tf.Output) {
50222	if scope.Err() != nil {
50223		return
50224	}
50225	opspec := tf.OpSpec{
50226		Type: "TPUOrdinalSelector",
50227	}
50228	op := scope.AddOperation(opspec)
50229	return op.Output(0)
50230}
50231
50232// TPUPartitionedInputAttr is an optional argument to TPUPartitionedInput.
50233type TPUPartitionedInputAttr func(optionalAttr)
50234
50235// TPUPartitionedInputPartitionDim sets the optional partition_dim attribute to value.
50236//
50237// value: An integer describles which dimension is partitioned. -1 means
50238// those inputs are replicated.
50239// If not specified, defaults to 0
50240func TPUPartitionedInputPartitionDim(value int64) TPUPartitionedInputAttr {
50241	return func(m optionalAttr) {
50242		m["partition_dim"] = value
50243	}
50244}
50245
50246// An op that groups a list of partitioned inputs together. This op
50247//
50248// Arguments:
50249//
50250//	inputs: A list of partitioned inputs which must have the same shape.
50251//
50252// Returns A handle which represents the full shape of partitioned tensors.
50253func TPUPartitionedInput(scope *Scope, inputs []tf.Output, optional ...TPUPartitionedInputAttr) (output tf.Output) {
50254	if scope.Err() != nil {
50255		return
50256	}
50257	attrs := map[string]interface{}{}
50258	for _, a := range optional {
50259		a(attrs)
50260	}
50261	opspec := tf.OpSpec{
50262		Type: "TPUPartitionedInput",
50263		Input: []tf.Input{
50264			tf.OutputList(inputs),
50265		},
50266		Attrs: attrs,
50267	}
50268	op := scope.AddOperation(opspec)
50269	return op.Output(0)
50270}
50271
50272// TPUPartitionedOutputAttr is an optional argument to TPUPartitionedOutput.
50273type TPUPartitionedOutputAttr func(optionalAttr)
50274
50275// TPUPartitionedOutputPartitionDim sets the optional partition_dim attribute to value.
50276//
50277// value: An integer describles which dimension is partitioned.
50278// If not specified, defaults to 0
50279func TPUPartitionedOutputPartitionDim(value int64) TPUPartitionedOutputAttr {
50280	return func(m optionalAttr) {
50281		m["partition_dim"] = value
50282	}
50283}
50284
50285// An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned
50286//
50287// outputs outside the XLA computation.
50288//
50289// Arguments:
50290//
50291//	inputs: A tensor which represents the full shape of partitioned tensors.
50292//
50293// Returns A list of partitioned inputs which must have the same shape.
50294func TPUPartitionedOutput(scope *Scope, inputs tf.Output, num_splits int64, optional ...TPUPartitionedOutputAttr) (output []tf.Output) {
50295	if scope.Err() != nil {
50296		return
50297	}
50298	attrs := map[string]interface{}{"num_splits": num_splits}
50299	for _, a := range optional {
50300		a(attrs)
50301	}
50302	opspec := tf.OpSpec{
50303		Type: "TPUPartitionedOutput",
50304		Input: []tf.Input{
50305			inputs,
50306		},
50307		Attrs: attrs,
50308	}
50309	op := scope.AddOperation(opspec)
50310	if scope.Err() != nil {
50311		return
50312	}
50313	var idx int
50314	var err error
50315	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
50316		scope.UpdateErr("TPUPartitionedOutput", err)
50317		return
50318	}
50319	return output
50320}
50321
50322// TPUReplicateMetadataAttr is an optional argument to TPUReplicateMetadata.
50323type TPUReplicateMetadataAttr func(optionalAttr)
50324
50325// TPUReplicateMetadataNumCoresPerReplica sets the optional num_cores_per_replica attribute to value.
50326//
50327// value: Number of cores per replica. Used for model parallelism.
50328// If not specified, defaults to 1
50329func TPUReplicateMetadataNumCoresPerReplica(value int64) TPUReplicateMetadataAttr {
50330	return func(m optionalAttr) {
50331		m["num_cores_per_replica"] = value
50332	}
50333}
50334
50335// TPUReplicateMetadataTopology sets the optional topology attribute to value.
50336//
50337// value: TopologyProto indicating the topology of the TPU pod slice.
50338// If not specified, defaults to ""
50339func TPUReplicateMetadataTopology(value string) TPUReplicateMetadataAttr {
50340	return func(m optionalAttr) {
50341		m["topology"] = value
50342	}
50343}
50344
50345// TPUReplicateMetadataUseTpu sets the optional use_tpu attribute to value.
50346//
50347// value: Whether to place the computation on the TPU.
50348// If not specified, defaults to true
50349func TPUReplicateMetadataUseTpu(value bool) TPUReplicateMetadataAttr {
50350	return func(m optionalAttr) {
50351		m["use_tpu"] = value
50352	}
50353}
50354
50355// TPUReplicateMetadataDeviceAssignment sets the optional device_assignment attribute to value.
50356//
50357// value: The assignment of devices for the computation.
50358// If not specified, defaults to {}
50359func TPUReplicateMetadataDeviceAssignment(value []int64) TPUReplicateMetadataAttr {
50360	return func(m optionalAttr) {
50361		m["device_assignment"] = value
50362	}
50363}
50364
50365// TPUReplicateMetadataComputationShape sets the optional computation_shape attribute to value.
50366//
50367// value: DEPRECATED. Use num_cores_per_replica instead.
50368// If not specified, defaults to {}
50369func TPUReplicateMetadataComputationShape(value []int64) TPUReplicateMetadataAttr {
50370	return func(m optionalAttr) {
50371		m["computation_shape"] = value
50372	}
50373}
50374
50375// TPUReplicateMetadataHostComputeCore sets the optional host_compute_core attribute to value.
50376// If not specified, defaults to {}
50377func TPUReplicateMetadataHostComputeCore(value []string) TPUReplicateMetadataAttr {
50378	return func(m optionalAttr) {
50379		m["host_compute_core"] = value
50380	}
50381}
50382
50383// TPUReplicateMetadataPaddingMap sets the optional padding_map attribute to value.
50384// If not specified, defaults to {}
50385func TPUReplicateMetadataPaddingMap(value []string) TPUReplicateMetadataAttr {
50386	return func(m optionalAttr) {
50387		m["padding_map"] = value
50388	}
50389}
50390
50391// TPUReplicateMetadataStepMarkerLocation sets the optional step_marker_location attribute to value.
50392// If not specified, defaults to "STEP_MARK_AT_ENTRY"
50393func TPUReplicateMetadataStepMarkerLocation(value string) TPUReplicateMetadataAttr {
50394	return func(m optionalAttr) {
50395		m["step_marker_location"] = value
50396	}
50397}
50398
50399// TPUReplicateMetadataAllowSoftPlacement sets the optional allow_soft_placement attribute to value.
50400// If not specified, defaults to false
50401func TPUReplicateMetadataAllowSoftPlacement(value bool) TPUReplicateMetadataAttr {
50402	return func(m optionalAttr) {
50403		m["allow_soft_placement"] = value
50404	}
50405}
50406
50407// TPUReplicateMetadataUseSpmdForXlaPartitioning sets the optional use_spmd_for_xla_partitioning attribute to value.
50408// If not specified, defaults to false
50409func TPUReplicateMetadataUseSpmdForXlaPartitioning(value bool) TPUReplicateMetadataAttr {
50410	return func(m optionalAttr) {
50411		m["use_spmd_for_xla_partitioning"] = value
50412	}
50413}
50414
50415// TPUReplicateMetadataTpuCompileOptionsProto sets the optional tpu_compile_options_proto attribute to value.
50416// If not specified, defaults to ""
50417func TPUReplicateMetadataTpuCompileOptionsProto(value string) TPUReplicateMetadataAttr {
50418	return func(m optionalAttr) {
50419		m["tpu_compile_options_proto"] = value
50420	}
50421}
50422
50423// Metadata indicating how the TPU computation should be replicated.
50424//
50425// This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph.
50426//
50427// Arguments:
50428//
50429//	num_replicas: Number of replicas of the computation
50430//
50431// Returns the created operation.
50432func TPUReplicateMetadata(scope *Scope, num_replicas int64, optional ...TPUReplicateMetadataAttr) (o *tf.Operation) {
50433	if scope.Err() != nil {
50434		return
50435	}
50436	attrs := map[string]interface{}{"num_replicas": num_replicas}
50437	for _, a := range optional {
50438		a(attrs)
50439	}
50440	opspec := tf.OpSpec{
50441		Type: "TPUReplicateMetadata",
50442
50443		Attrs: attrs,
50444	}
50445	return scope.AddOperation(opspec)
50446}
50447
50448// TPUReplicatedInputAttr is an optional argument to TPUReplicatedInput.
50449type TPUReplicatedInputAttr func(optionalAttr)
50450
50451// TPUReplicatedInputIsMirroredVariable sets the optional is_mirrored_variable attribute to value.
50452// If not specified, defaults to false
50453func TPUReplicatedInputIsMirroredVariable(value bool) TPUReplicatedInputAttr {
50454	return func(m optionalAttr) {
50455		m["is_mirrored_variable"] = value
50456	}
50457}
50458
50459// TPUReplicatedInputIndex sets the optional index attribute to value.
50460// If not specified, defaults to -1
50461func TPUReplicatedInputIndex(value int64) TPUReplicatedInputAttr {
50462	return func(m optionalAttr) {
50463		m["index"] = value
50464	}
50465}
50466
50467// TPUReplicatedInputIsPacked sets the optional is_packed attribute to value.
50468// If not specified, defaults to false
50469func TPUReplicatedInputIsPacked(value bool) TPUReplicatedInputAttr {
50470	return func(m optionalAttr) {
50471		m["is_packed"] = value
50472	}
50473}
50474
50475// Connects N inputs to an N-way replicated TPU computation.
50476//
50477// This operation holds a replicated input to a `tpu.replicate()` computation subgraph.
50478// Each replicated input has the same shape and type alongside the output.
50479//
50480// For example:
50481// ```
50482// %a = "tf.opA"()
50483// %b = "tf.opB"()
50484// %replicated_input = "tf.TPUReplicatedInput"(%a, %b)
50485// %computation = "tf.Computation"(%replicated_input)
50486// ```
50487// The above computation has a replicated input of two replicas.
50488func TPUReplicatedInput(scope *Scope, inputs []tf.Output, optional ...TPUReplicatedInputAttr) (output tf.Output) {
50489	if scope.Err() != nil {
50490		return
50491	}
50492	attrs := map[string]interface{}{}
50493	for _, a := range optional {
50494		a(attrs)
50495	}
50496	opspec := tf.OpSpec{
50497		Type: "TPUReplicatedInput",
50498		Input: []tf.Input{
50499			tf.OutputList(inputs),
50500		},
50501		Attrs: attrs,
50502	}
50503	op := scope.AddOperation(opspec)
50504	return op.Output(0)
50505}
50506
50507// Connects N outputs from an N-way replicated TPU computation.
50508//
50509// This operation holds a replicated output from a `tpu.replicate()` computation subgraph.
50510// Each replicated output has the same shape and type alongside the input.
50511//
50512// For example:
50513// ```
50514// %computation = "tf.Computation"()
50515// %replicated_output:2 = "tf.TPUReplicatedOutput"(%computation)
50516// ```
50517// The above computation has a replicated output of two replicas.
50518func TPUReplicatedOutput(scope *Scope, input tf.Output, num_replicas int64) (outputs []tf.Output) {
50519	if scope.Err() != nil {
50520		return
50521	}
50522	attrs := map[string]interface{}{"num_replicas": num_replicas}
50523	opspec := tf.OpSpec{
50524		Type: "TPUReplicatedOutput",
50525		Input: []tf.Input{
50526			input,
50527		},
50528		Attrs: attrs,
50529	}
50530	op := scope.AddOperation(opspec)
50531	if scope.Err() != nil {
50532		return
50533	}
50534	var idx int
50535	var err error
50536	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
50537		scope.UpdateErr("TPUReplicatedOutput", err)
50538		return
50539	}
50540	return outputs
50541}
50542
50543// Op that reshards on-device TPU variables to specified state.
50544//
50545// Op that reshards on-device TPU variables to specified state. Internal use only.
50546//
50547// The sharding state is represented as the key of the compilation that generated
50548// the sharding/unsharding programs along with the main program. new_format_key
50549// specifies the desired state, and format_state_var is the current state of the
50550// variables.
50551//
50552// Returns the created operation.
50553func TPUReshardVariables(scope *Scope, vars []tf.Output, new_format_key tf.Output, format_state_var tf.Output) (o *tf.Operation) {
50554	if scope.Err() != nil {
50555		return
50556	}
50557	opspec := tf.OpSpec{
50558		Type: "TPUReshardVariables",
50559		Input: []tf.Input{
50560			tf.OutputList(vars), new_format_key, format_state_var,
50561		},
50562	}
50563	return scope.AddOperation(opspec)
50564}
50565
50566// Round-robin load balancing on TPU cores.
50567//
50568// A load balancing op that round-robins among TPU cores.
50569//
50570// This op round-robins between the integers in [0, NumTPUCoresVisiblePerHost]. It
50571// is useful for interfacing with TensorFlow ops that take as input a TPU core on
50572// which to execute computations, such as `TPUPartitionedCall`.
50573//
50574// device_ordinal: An integer in [0, NumTPUCoresVisiblePerHost].
50575func TPURoundRobin(scope *Scope) (device_ordinal tf.Output) {
50576	if scope.Err() != nil {
50577		return
50578	}
50579	opspec := tf.OpSpec{
50580		Type: "TPURoundRobin",
50581	}
50582	op := scope.AddOperation(opspec)
50583	return op.Output(0)
50584}
50585
50586// TakeDatasetAttr is an optional argument to TakeDataset.
50587type TakeDatasetAttr func(optionalAttr)
50588
50589// TakeDatasetMetadata sets the optional metadata attribute to value.
50590// If not specified, defaults to ""
50591func TakeDatasetMetadata(value string) TakeDatasetAttr {
50592	return func(m optionalAttr) {
50593		m["metadata"] = value
50594	}
50595}
50596
50597// Creates a dataset that contains `count` elements from the `input_dataset`.
50598//
50599// Arguments:
50600//
50601//	count: A scalar representing the number of elements from the `input_dataset`
50602//
50603// that should be taken. A value of `-1` indicates that all of `input_dataset`
50604// is taken.
50605func TakeDataset(scope *Scope, input_dataset tf.Output, count tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...TakeDatasetAttr) (handle tf.Output) {
50606	if scope.Err() != nil {
50607		return
50608	}
50609	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
50610	for _, a := range optional {
50611		a(attrs)
50612	}
50613	opspec := tf.OpSpec{
50614		Type: "TakeDataset",
50615		Input: []tf.Input{
50616			input_dataset, count,
50617		},
50618		Attrs: attrs,
50619	}
50620	op := scope.AddOperation(opspec)
50621	return op.Output(0)
50622}
50623
50624// TakeManySparseFromTensorsMapAttr is an optional argument to TakeManySparseFromTensorsMap.
50625type TakeManySparseFromTensorsMapAttr func(optionalAttr)
50626
50627// TakeManySparseFromTensorsMapContainer sets the optional container attribute to value.
50628//
50629// value: The container name for the `SparseTensorsMap` read by this op.
50630// If not specified, defaults to ""
50631func TakeManySparseFromTensorsMapContainer(value string) TakeManySparseFromTensorsMapAttr {
50632	return func(m optionalAttr) {
50633		m["container"] = value
50634	}
50635}
50636
50637// TakeManySparseFromTensorsMapSharedName sets the optional shared_name attribute to value.
50638//
50639// value: The shared name for the `SparseTensorsMap` read by this op.
50640// It should not be blank; rather the `shared_name` or unique Operation name
50641// of the Op that created the original `SparseTensorsMap` should be used.
50642// If not specified, defaults to ""
50643func TakeManySparseFromTensorsMapSharedName(value string) TakeManySparseFromTensorsMapAttr {
50644	return func(m optionalAttr) {
50645		m["shared_name"] = value
50646	}
50647}
50648
50649// Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
50650//
50651// The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
50652// `N` is the minibatch size and the rows correspond to the output handles of
50653// `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`.  The ranks of the
50654// original `SparseTensor` objects that went into the given input ops must all
50655// match.  When the final `SparseTensor` is created, it has rank one
50656// higher than the ranks of the incoming `SparseTensor` objects
50657// (they have been concatenated along a new row dimension on the left).
50658//
50659// The output `SparseTensor` object's shape values for all dimensions but the
50660// first are the max across the input `SparseTensor` objects' shape values
50661// for the corresponding dimensions.  Its first shape value is `N`, the minibatch
50662// size.
50663//
50664// The input `SparseTensor` objects' indices are assumed ordered in
50665// standard lexicographic order.  If this is not the case, after this
50666// step run `SparseReorder` to restore index ordering.
50667//
50668// For example, if the handles represent an input, which is a `[2, 3]` matrix
50669// representing two original `SparseTensor` objects:
50670//
50671// ```
50672//
50673//	index = [ 0]
50674//	        [10]
50675//	        [20]
50676//	values = [1, 2, 3]
50677//	shape = [50]
50678//
50679// ```
50680//
50681// and
50682//
50683// ```
50684//
50685//	index = [ 2]
50686//	        [10]
50687//	values = [4, 5]
50688//	shape = [30]
50689//
50690// ```
50691//
50692// then the final `SparseTensor` will be:
50693//
50694// ```
50695//
50696//	index = [0  0]
50697//	        [0 10]
50698//	        [0 20]
50699//	        [1  2]
50700//	        [1 10]
50701//	values = [1, 2, 3, 4, 5]
50702//	shape = [2 50]
50703//
50704// ```
50705//
50706// Arguments:
50707//
50708//	sparse_handles: 1-D, The `N` serialized `SparseTensor` objects.
50709//
50710// Shape: `[N]`.
50711//
50712//	dtype: The `dtype` of the `SparseTensor` objects stored in the
50713//
50714// `SparseTensorsMap`.
50715//
50716// Returns:
50717//
50718//	sparse_indices: 2-D.  The `indices` of the minibatch `SparseTensor`.
50719//	sparse_values: 1-D.  The `values` of the minibatch `SparseTensor`.
50720//	sparse_shape: 1-D.  The `shape` of the minibatch `SparseTensor`.
50721func TakeManySparseFromTensorsMap(scope *Scope, sparse_handles tf.Output, dtype tf.DataType, optional ...TakeManySparseFromTensorsMapAttr) (sparse_indices tf.Output, sparse_values tf.Output, sparse_shape tf.Output) {
50722	if scope.Err() != nil {
50723		return
50724	}
50725	attrs := map[string]interface{}{"dtype": dtype}
50726	for _, a := range optional {
50727		a(attrs)
50728	}
50729	opspec := tf.OpSpec{
50730		Type: "TakeManySparseFromTensorsMap",
50731		Input: []tf.Input{
50732			sparse_handles,
50733		},
50734		Attrs: attrs,
50735	}
50736	op := scope.AddOperation(opspec)
50737	return op.Output(0), op.Output(1), op.Output(2)
50738}
50739
50740// Computes tan of x element-wise.
50741//
50742//	Given an input tensor, this function computes tangent of every
50743//	element in the tensor. Input range is `(-inf, inf)` and
50744//	output range is `(-inf, inf)`. If input lies outside the boundary, `nan`
50745//	is returned.
50746//
50747//	```python
50748//	x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
50749//	tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]
50750//	```
50751func Tan(scope *Scope, x tf.Output) (y tf.Output) {
50752	if scope.Err() != nil {
50753		return
50754	}
50755	opspec := tf.OpSpec{
50756		Type: "Tan",
50757		Input: []tf.Input{
50758			x,
50759		},
50760	}
50761	op := scope.AddOperation(opspec)
50762	return op.Output(0)
50763}
50764
50765// Computes hyperbolic tangent of `x` element-wise.
50766//
50767//	Given an input tensor, this function computes hyperbolic tangent of every
50768//	element in the tensor. Input range is `[-inf, inf]` and
50769//	output range is `[-1,1]`.
50770//
50771//	>>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")])
50772//	>>> tf.math.tanh(x)
50773//	<tf.Tensor: shape=(8,), dtype=float32, numpy=
50774//	array([-1.0, -0.99990916, -0.46211717,  0.7615942 ,  0.8336547 ,
50775//	        0.9640276 ,  0.9950547 ,  1.0], dtype=float32)>
50776func Tanh(scope *Scope, x tf.Output) (y tf.Output) {
50777	if scope.Err() != nil {
50778		return
50779	}
50780	opspec := tf.OpSpec{
50781		Type: "Tanh",
50782		Input: []tf.Input{
50783			x,
50784		},
50785	}
50786	op := scope.AddOperation(opspec)
50787	return op.Output(0)
50788}
50789
50790// Computes the gradient for the tanh of `x` wrt its input.
50791//
50792// Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
50793// is the corresponding input gradient.
50794func TanhGrad(scope *Scope, y tf.Output, dy tf.Output) (z tf.Output) {
50795	if scope.Err() != nil {
50796		return
50797	}
50798	opspec := tf.OpSpec{
50799		Type: "TanhGrad",
50800		Input: []tf.Input{
50801			y, dy,
50802		},
50803	}
50804	op := scope.AddOperation(opspec)
50805	return op.Output(0)
50806}
50807
50808// Deprecated. Use TensorArrayCloseV3
50809//
50810// DEPRECATED at GraphDef version 26: Use TensorArrayCloseV3
50811//
50812// Returns the created operation.
50813func TensorArrayCloseV2(scope *Scope, handle tf.Output) (o *tf.Operation) {
50814	if scope.Err() != nil {
50815		return
50816	}
50817	opspec := tf.OpSpec{
50818		Type: "TensorArrayCloseV2",
50819		Input: []tf.Input{
50820			handle,
50821		},
50822	}
50823	return scope.AddOperation(opspec)
50824}
50825
50826// Delete the TensorArray from its resource container.
50827//
50828// This enables the user to close and release the resource in the middle
50829// of a step/run.
50830//
50831// Arguments:
50832//
50833//	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
50834//
50835// Returns the created operation.
50836func TensorArrayCloseV3(scope *Scope, handle tf.Output) (o *tf.Operation) {
50837	if scope.Err() != nil {
50838		return
50839	}
50840	opspec := tf.OpSpec{
50841		Type: "TensorArrayCloseV3",
50842		Input: []tf.Input{
50843			handle,
50844		},
50845	}
50846	return scope.AddOperation(opspec)
50847}
50848
50849// TensorArrayConcatV2Attr is an optional argument to TensorArrayConcatV2.
50850type TensorArrayConcatV2Attr func(optionalAttr)
50851
50852// TensorArrayConcatV2ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
50853// If not specified, defaults to {unknown_rank:true}
50854func TensorArrayConcatV2ElementShapeExcept0(value tf.Shape) TensorArrayConcatV2Attr {
50855	return func(m optionalAttr) {
50856		m["element_shape_except0"] = value
50857	}
50858}
50859
50860// Deprecated. Use TensorArrayConcatV3
50861func TensorArrayConcatV2(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV2Attr) (value tf.Output, lengths tf.Output) {
50862	if scope.Err() != nil {
50863		return
50864	}
50865	attrs := map[string]interface{}{"dtype": dtype}
50866	for _, a := range optional {
50867		a(attrs)
50868	}
50869	opspec := tf.OpSpec{
50870		Type: "TensorArrayConcatV2",
50871		Input: []tf.Input{
50872			handle, flow_in,
50873		},
50874		Attrs: attrs,
50875	}
50876	op := scope.AddOperation(opspec)
50877	return op.Output(0), op.Output(1)
50878}
50879
50880// TensorArrayConcatV3Attr is an optional argument to TensorArrayConcatV3.
50881type TensorArrayConcatV3Attr func(optionalAttr)
50882
50883// TensorArrayConcatV3ElementShapeExcept0 sets the optional element_shape_except0 attribute to value.
50884//
50885// value: The expected shape of an element, if known,
50886// excluding the first dimension. Used to validate the shapes of
50887// TensorArray elements. If this shape is not fully specified, concatenating
50888// zero-size TensorArrays is an error.
50889// If not specified, defaults to {unknown_rank:true}
50890func TensorArrayConcatV3ElementShapeExcept0(value tf.Shape) TensorArrayConcatV3Attr {
50891	return func(m optionalAttr) {
50892		m["element_shape_except0"] = value
50893	}
50894}
50895
50896// Concat the elements from the TensorArray into value `value`.
50897//
50898// Takes `T` elements of shapes
50899//
50900//	```
50901//	(n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
50902//	```
50903//
50904// and concatenates them into a Tensor of shape:
50905//
50906//	```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
50907//
50908// All elements must have the same shape (excepting the first dimension).
50909//
50910// Arguments:
50911//
50912//	handle: The handle to a TensorArray.
50913//	flow_in: A float scalar that enforces proper chaining of operations.
50914//	dtype: The type of the elem that is returned.
50915//
50916// Returns:
50917//
50918//	value: All of the elements in the TensorArray, concatenated along the first
50919//
50920// axis.
50921//
50922//	lengths: A vector of the row sizes of the original T elements in the
50923//
50924// value output.  In the example above, this would be the values:
50925// `(n1, n2, ..., n(T-1))`.
50926func TensorArrayConcatV3(scope *Scope, handle tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayConcatV3Attr) (value tf.Output, lengths tf.Output) {
50927	if scope.Err() != nil {
50928		return
50929	}
50930	attrs := map[string]interface{}{"dtype": dtype}
50931	for _, a := range optional {
50932		a(attrs)
50933	}
50934	opspec := tf.OpSpec{
50935		Type: "TensorArrayConcatV3",
50936		Input: []tf.Input{
50937			handle, flow_in,
50938		},
50939		Attrs: attrs,
50940	}
50941	op := scope.AddOperation(opspec)
50942	return op.Output(0), op.Output(1)
50943}
50944
50945// TensorArrayGatherV2Attr is an optional argument to TensorArrayGatherV2.
50946type TensorArrayGatherV2Attr func(optionalAttr)
50947
50948// TensorArrayGatherV2ElementShape sets the optional element_shape attribute to value.
50949// If not specified, defaults to {unknown_rank:true}
50950func TensorArrayGatherV2ElementShape(value tf.Shape) TensorArrayGatherV2Attr {
50951	return func(m optionalAttr) {
50952		m["element_shape"] = value
50953	}
50954}
50955
50956// Deprecated. Use TensorArrayGatherV3
50957//
50958// DEPRECATED at GraphDef version 26: Use TensorArrayGatherV3
50959func TensorArrayGatherV2(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV2Attr) (value tf.Output) {
50960	if scope.Err() != nil {
50961		return
50962	}
50963	attrs := map[string]interface{}{"dtype": dtype}
50964	for _, a := range optional {
50965		a(attrs)
50966	}
50967	opspec := tf.OpSpec{
50968		Type: "TensorArrayGatherV2",
50969		Input: []tf.Input{
50970			handle, indices, flow_in,
50971		},
50972		Attrs: attrs,
50973	}
50974	op := scope.AddOperation(opspec)
50975	return op.Output(0)
50976}
50977
50978// TensorArrayGatherV3Attr is an optional argument to TensorArrayGatherV3.
50979type TensorArrayGatherV3Attr func(optionalAttr)
50980
50981// TensorArrayGatherV3ElementShape sets the optional element_shape attribute to value.
50982//
50983// value: The expected shape of an element, if known. Used to
50984// validate the shapes of TensorArray elements. If this shape is not
50985// fully specified, gathering zero-size TensorArrays is an error.
50986// If not specified, defaults to {unknown_rank:true}
50987func TensorArrayGatherV3ElementShape(value tf.Shape) TensorArrayGatherV3Attr {
50988	return func(m optionalAttr) {
50989		m["element_shape"] = value
50990	}
50991}
50992
50993// Gather specific elements from the TensorArray into output `value`.
50994//
50995// All elements selected by `indices` must have the same shape.
50996//
50997// Arguments:
50998//
50999//	handle: The handle to a TensorArray.
51000//	indices: The locations in the TensorArray from which to read tensor elements.
51001//	flow_in: A float scalar that enforces proper chaining of operations.
51002//	dtype: The type of the elem that is returned.
51003//
51004// Returns All of the elements in the TensorArray, concatenated along a new
51005// axis (the new dimension 0).
51006func TensorArrayGatherV3(scope *Scope, handle tf.Output, indices tf.Output, flow_in tf.Output, dtype tf.DataType, optional ...TensorArrayGatherV3Attr) (value tf.Output) {
51007	if scope.Err() != nil {
51008		return
51009	}
51010	attrs := map[string]interface{}{"dtype": dtype}
51011	for _, a := range optional {
51012		a(attrs)
51013	}
51014	opspec := tf.OpSpec{
51015		Type: "TensorArrayGatherV3",
51016		Input: []tf.Input{
51017			handle, indices, flow_in,
51018		},
51019		Attrs: attrs,
51020	}
51021	op := scope.AddOperation(opspec)
51022	return op.Output(0)
51023}
51024
51025// Deprecated. Use TensorArrayGradV3
51026//
51027// DEPRECATED at GraphDef version 26: Use TensorArrayGradV3
51028func TensorArrayGradV2(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output) {
51029	if scope.Err() != nil {
51030		return
51031	}
51032	attrs := map[string]interface{}{"source": source}
51033	opspec := tf.OpSpec{
51034		Type: "TensorArrayGradV2",
51035		Input: []tf.Input{
51036			handle, flow_in,
51037		},
51038		Attrs: attrs,
51039	}
51040	op := scope.AddOperation(opspec)
51041	return op.Output(0)
51042}
51043
51044// Creates a TensorArray for storing the gradients of values in the given handle.
51045//
51046// If the given TensorArray gradient already exists, returns a reference to it.
51047//
51048// Locks the size of the original TensorArray by disabling its dynamic size flag.
51049//
51050// **A note about the input flow_in:**
51051//
51052// The handle flow_in forces the execution of the gradient lookup to occur
51053// only after certain other operations have occurred.  For example, when
51054// the forward TensorArray is dynamically sized, writes to this TensorArray
51055// may resize the object.  The gradient TensorArray is statically sized based
51056// on the size of the forward TensorArray when this operation executes.
51057// Furthermore, the size of the forward TensorArray is frozen by this call.
51058// As a result, the flow is used to ensure that the call to generate the gradient
51059// TensorArray only happens after all writes are executed.
51060//
51061// In the case of dynamically sized TensorArrays, gradient computation should
51062// only be performed on read operations that have themselves been chained via
51063// flow to occur only after all writes have executed. That way the final size
51064// of the forward TensorArray is known when this operation is called.
51065//
51066// **A note about the source attribute:**
51067//
51068// TensorArray gradient calls use an accumulator TensorArray object.  If
51069// multiple gradients are calculated and run in the same session, the multiple
51070// gradient nodes may accidentally flow through the same accumulator TensorArray.
51071// This double counts and generally breaks the TensorArray gradient flow.
51072//
51073// The solution is to identify which gradient call this particular
51074// TensorArray gradient is being called in.  This is performed by identifying
51075// a unique string (e.g. "gradients", "gradients_1", ...) from the input
51076// gradient Tensor's name.  This string is used as a suffix when creating
51077// the TensorArray gradient object here (the attribute `source`).
51078//
51079// The attribute `source` is added as a suffix to the forward TensorArray's
51080// name when performing the creation / lookup, so that each separate gradient
51081// calculation gets its own TensorArray accumulator.
51082//
51083// Arguments:
51084//
51085//	handle: The handle to the forward TensorArray.
51086//	flow_in: A float scalar that enforces proper chaining of operations.
51087//	source: The gradient source string, used to decide which gradient TensorArray
51088//
51089// to return.
51090func TensorArrayGradV3(scope *Scope, handle tf.Output, flow_in tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
51091	if scope.Err() != nil {
51092		return
51093	}
51094	attrs := map[string]interface{}{"source": source}
51095	opspec := tf.OpSpec{
51096		Type: "TensorArrayGradV3",
51097		Input: []tf.Input{
51098			handle, flow_in,
51099		},
51100		Attrs: attrs,
51101	}
51102	op := scope.AddOperation(opspec)
51103	return op.Output(0), op.Output(1)
51104}
51105
51106// Creates a TensorArray for storing multiple gradients of values in the given handle.
51107//
51108// Similar to TensorArrayGradV3. However it creates an accumulator with an
51109// expanded shape compared to the input TensorArray whose gradient is being
51110// computed. This enables multiple gradients for the same TensorArray to be
51111// calculated using the same accumulator.
51112//
51113// Arguments:
51114//
51115//	handle: The handle to the forward TensorArray.
51116//	flow_in: A float scalar that enforces proper chaining of operations.
51117//	shape_to_prepend: An int32 vector representing a shape. Elements in the gradient accumulator will
51118//
51119// have shape which is this shape_to_prepend value concatenated with shape of the
51120// elements in the TensorArray corresponding to the input handle.
51121//
51122//	source: The gradient source string, used to decide which gradient TensorArray
51123//
51124// to return.
51125func TensorArrayGradWithShape(scope *Scope, handle tf.Output, flow_in tf.Output, shape_to_prepend tf.Output, source string) (grad_handle tf.Output, flow_out tf.Output) {
51126	if scope.Err() != nil {
51127		return
51128	}
51129	attrs := map[string]interface{}{"source": source}
51130	opspec := tf.OpSpec{
51131		Type: "TensorArrayGradWithShape",
51132		Input: []tf.Input{
51133			handle, flow_in, shape_to_prepend,
51134		},
51135		Attrs: attrs,
51136	}
51137	op := scope.AddOperation(opspec)
51138	return op.Output(0), op.Output(1)
51139}
51140
51141// Deprecated. Use TensorArrayReadV3
51142//
51143// DEPRECATED at GraphDef version 26: Use TensorArrayReadV3
51144func TensorArrayReadV2(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
51145	if scope.Err() != nil {
51146		return
51147	}
51148	attrs := map[string]interface{}{"dtype": dtype}
51149	opspec := tf.OpSpec{
51150		Type: "TensorArrayReadV2",
51151		Input: []tf.Input{
51152			handle, index, flow_in,
51153		},
51154		Attrs: attrs,
51155	}
51156	op := scope.AddOperation(opspec)
51157	return op.Output(0)
51158}
51159
51160// Read an element from the TensorArray into output `value`.
51161//
51162// Arguments:
51163//
51164//	handle: The handle to a TensorArray.
51165//
51166//	flow_in: A float scalar that enforces proper chaining of operations.
51167//	dtype: The type of the elem that is returned.
51168//
51169// Returns The tensor that is read from the TensorArray.
51170func TensorArrayReadV3(scope *Scope, handle tf.Output, index tf.Output, flow_in tf.Output, dtype tf.DataType) (value tf.Output) {
51171	if scope.Err() != nil {
51172		return
51173	}
51174	attrs := map[string]interface{}{"dtype": dtype}
51175	opspec := tf.OpSpec{
51176		Type: "TensorArrayReadV3",
51177		Input: []tf.Input{
51178			handle, index, flow_in,
51179		},
51180		Attrs: attrs,
51181	}
51182	op := scope.AddOperation(opspec)
51183	return op.Output(0)
51184}
51185
51186// Deprecated. Use TensorArrayScatterV3
51187//
51188// DEPRECATED at GraphDef version 26: Use TensorArrayScatterV3
51189func TensorArrayScatterV2(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
51190	if scope.Err() != nil {
51191		return
51192	}
51193	opspec := tf.OpSpec{
51194		Type: "TensorArrayScatterV2",
51195		Input: []tf.Input{
51196			handle, indices, value, flow_in,
51197		},
51198	}
51199	op := scope.AddOperation(opspec)
51200	return op.Output(0)
51201}
51202
51203// Scatter the data from the input value into specific TensorArray elements.
51204//
51205// `indices` must be a vector, its length must match the first dim of `value`.
51206//
51207// Arguments:
51208//
51209//	handle: The handle to a TensorArray.
51210//	indices: The locations at which to write the tensor elements.
51211//	value: The concatenated tensor to write to the TensorArray.
51212//	flow_in: A float scalar that enforces proper chaining of operations.
51213//
51214// Returns A float scalar that enforces proper chaining of operations.
51215func TensorArrayScatterV3(scope *Scope, handle tf.Output, indices tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
51216	if scope.Err() != nil {
51217		return
51218	}
51219	opspec := tf.OpSpec{
51220		Type: "TensorArrayScatterV3",
51221		Input: []tf.Input{
51222			handle, indices, value, flow_in,
51223		},
51224	}
51225	op := scope.AddOperation(opspec)
51226	return op.Output(0)
51227}
51228
51229// Deprecated. Use TensorArraySizeV3
51230//
51231// DEPRECATED at GraphDef version 26: Use TensorArraySizeV3
51232func TensorArraySizeV2(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
51233	if scope.Err() != nil {
51234		return
51235	}
51236	opspec := tf.OpSpec{
51237		Type: "TensorArraySizeV2",
51238		Input: []tf.Input{
51239			handle, flow_in,
51240		},
51241	}
51242	op := scope.AddOperation(opspec)
51243	return op.Output(0)
51244}
51245
51246// Get the current size of the TensorArray.
51247//
51248// Arguments:
51249//
51250//	handle: The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
51251//	flow_in: A float scalar that enforces proper chaining of operations.
51252//
51253// Returns The current size of the TensorArray.
51254func TensorArraySizeV3(scope *Scope, handle tf.Output, flow_in tf.Output) (size tf.Output) {
51255	if scope.Err() != nil {
51256		return
51257	}
51258	opspec := tf.OpSpec{
51259		Type: "TensorArraySizeV3",
51260		Input: []tf.Input{
51261			handle, flow_in,
51262		},
51263	}
51264	op := scope.AddOperation(opspec)
51265	return op.Output(0)
51266}
51267
51268// Deprecated. Use TensorArraySplitV3
51269//
51270// DEPRECATED at GraphDef version 26: Use TensorArraySplitV3
51271func TensorArraySplitV2(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
51272	if scope.Err() != nil {
51273		return
51274	}
51275	opspec := tf.OpSpec{
51276		Type: "TensorArraySplitV2",
51277		Input: []tf.Input{
51278			handle, value, lengths, flow_in,
51279		},
51280	}
51281	op := scope.AddOperation(opspec)
51282	return op.Output(0)
51283}
51284
51285// Split the data from the input value into TensorArray elements.
51286//
51287// Assuming that `lengths` takes on values
51288//
51289//	```(n0, n1, ..., n(T-1))```
51290//
51291// and that `value` has shape
51292//
51293//	```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
51294//
51295// this splits values into a TensorArray with T tensors.
51296//
51297// TensorArray index t will be the subtensor of values with starting position
51298//
51299//	```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
51300//
51301// and having size
51302//
51303//	```nt x d0 x d1 x ...```
51304//
51305// Arguments:
51306//
51307//	handle: The handle to a TensorArray.
51308//	value: The concatenated tensor to write to the TensorArray.
51309//	lengths: The vector of lengths, how to split the rows of value into the
51310//
51311// TensorArray.
51312//
51313//	flow_in: A float scalar that enforces proper chaining of operations.
51314//
51315// Returns A float scalar that enforces proper chaining of operations.
51316func TensorArraySplitV3(scope *Scope, handle tf.Output, value tf.Output, lengths tf.Output, flow_in tf.Output) (flow_out tf.Output) {
51317	if scope.Err() != nil {
51318		return
51319	}
51320	opspec := tf.OpSpec{
51321		Type: "TensorArraySplitV3",
51322		Input: []tf.Input{
51323			handle, value, lengths, flow_in,
51324		},
51325	}
51326	op := scope.AddOperation(opspec)
51327	return op.Output(0)
51328}
51329
51330// TensorArrayV2Attr is an optional argument to TensorArrayV2.
51331type TensorArrayV2Attr func(optionalAttr)
51332
51333// TensorArrayV2ElementShape sets the optional element_shape attribute to value.
51334// If not specified, defaults to {unknown_rank:true}
51335func TensorArrayV2ElementShape(value tf.Shape) TensorArrayV2Attr {
51336	return func(m optionalAttr) {
51337		m["element_shape"] = value
51338	}
51339}
51340
51341// TensorArrayV2DynamicSize sets the optional dynamic_size attribute to value.
51342// If not specified, defaults to false
51343func TensorArrayV2DynamicSize(value bool) TensorArrayV2Attr {
51344	return func(m optionalAttr) {
51345		m["dynamic_size"] = value
51346	}
51347}
51348
51349// TensorArrayV2ClearAfterRead sets the optional clear_after_read attribute to value.
51350// If not specified, defaults to true
51351func TensorArrayV2ClearAfterRead(value bool) TensorArrayV2Attr {
51352	return func(m optionalAttr) {
51353		m["clear_after_read"] = value
51354	}
51355}
51356
51357// TensorArrayV2TensorArrayName sets the optional tensor_array_name attribute to value.
51358// If not specified, defaults to ""
51359func TensorArrayV2TensorArrayName(value string) TensorArrayV2Attr {
51360	return func(m optionalAttr) {
51361		m["tensor_array_name"] = value
51362	}
51363}
51364
51365// Deprecated. Use TensorArrayV3
51366//
51367// DEPRECATED at GraphDef version 26: Use TensorArrayV3
51368func TensorArrayV2(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV2Attr) (handle tf.Output) {
51369	if scope.Err() != nil {
51370		return
51371	}
51372	attrs := map[string]interface{}{"dtype": dtype}
51373	for _, a := range optional {
51374		a(attrs)
51375	}
51376	opspec := tf.OpSpec{
51377		Type: "TensorArrayV2",
51378		Input: []tf.Input{
51379			size,
51380		},
51381		Attrs: attrs,
51382	}
51383	op := scope.AddOperation(opspec)
51384	return op.Output(0)
51385}
51386
51387// TensorArrayV3Attr is an optional argument to TensorArrayV3.
51388type TensorArrayV3Attr func(optionalAttr)
51389
51390// TensorArrayV3ElementShape sets the optional element_shape attribute to value.
51391//
51392// value: The expected shape of an element, if known. Used to
51393// validate the shapes of TensorArray elements. If this shape is not
51394// fully specified, gathering zero-size TensorArrays is an error.
51395// If not specified, defaults to {unknown_rank:true}
51396func TensorArrayV3ElementShape(value tf.Shape) TensorArrayV3Attr {
51397	return func(m optionalAttr) {
51398		m["element_shape"] = value
51399	}
51400}
51401
51402// TensorArrayV3DynamicSize sets the optional dynamic_size attribute to value.
51403//
51404// value: A boolean that determines whether writes to the TensorArray
51405// are allowed to grow the size.  By default, this is not allowed.
51406// If not specified, defaults to false
51407func TensorArrayV3DynamicSize(value bool) TensorArrayV3Attr {
51408	return func(m optionalAttr) {
51409		m["dynamic_size"] = value
51410	}
51411}
51412
51413// TensorArrayV3ClearAfterRead sets the optional clear_after_read attribute to value.
51414//
51415// value: If true (default), Tensors in the TensorArray are cleared
51416// after being read.  This disables multiple read semantics but allows early
51417// release of memory.
51418// If not specified, defaults to true
51419func TensorArrayV3ClearAfterRead(value bool) TensorArrayV3Attr {
51420	return func(m optionalAttr) {
51421		m["clear_after_read"] = value
51422	}
51423}
51424
51425// TensorArrayV3IdenticalElementShapes sets the optional identical_element_shapes attribute to value.
51426//
51427// value: If true (default is false), then all
51428// elements in the TensorArray will be expected to have identical shapes.
51429// This allows certain behaviors, like dynamically checking for
51430// consistent shapes on write, and being able to fill in properly
51431// shaped zero tensors on stack -- even if the element_shape attribute
51432// is not fully defined.
51433// If not specified, defaults to false
51434func TensorArrayV3IdenticalElementShapes(value bool) TensorArrayV3Attr {
51435	return func(m optionalAttr) {
51436		m["identical_element_shapes"] = value
51437	}
51438}
51439
51440// TensorArrayV3TensorArrayName sets the optional tensor_array_name attribute to value.
51441//
51442// value: Overrides the name used for the temporary tensor_array
51443// resource. Default value is the name of the 'TensorArray' op (which
51444// is guaranteed unique).
51445// If not specified, defaults to ""
51446func TensorArrayV3TensorArrayName(value string) TensorArrayV3Attr {
51447	return func(m optionalAttr) {
51448		m["tensor_array_name"] = value
51449	}
51450}
51451
51452// An array of Tensors of given size.
51453//
51454// Write data via Write and read via Read or Pack.
51455//
51456// Arguments:
51457//
51458//	size: The size of the array.
51459//	dtype: The type of the elements on the tensor_array.
51460//
51461// Returns:
51462//
51463//	handle: The handle to the TensorArray.
51464//	flow: A scalar used to control gradient flow.
51465func TensorArrayV3(scope *Scope, size tf.Output, dtype tf.DataType, optional ...TensorArrayV3Attr) (handle tf.Output, flow tf.Output) {
51466	if scope.Err() != nil {
51467		return
51468	}
51469	attrs := map[string]interface{}{"dtype": dtype}
51470	for _, a := range optional {
51471		a(attrs)
51472	}
51473	opspec := tf.OpSpec{
51474		Type: "TensorArrayV3",
51475		Input: []tf.Input{
51476			size,
51477		},
51478		Attrs: attrs,
51479	}
51480	op := scope.AddOperation(opspec)
51481	return op.Output(0), op.Output(1)
51482}
51483
51484// Deprecated. Use TensorArrayGradV3
51485//
51486// DEPRECATED at GraphDef version 26: Use TensorArrayWriteV3
51487func TensorArrayWriteV2(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
51488	if scope.Err() != nil {
51489		return
51490	}
51491	opspec := tf.OpSpec{
51492		Type: "TensorArrayWriteV2",
51493		Input: []tf.Input{
51494			handle, index, value, flow_in,
51495		},
51496	}
51497	op := scope.AddOperation(opspec)
51498	return op.Output(0)
51499}
51500
51501// Push an element onto the tensor_array.
51502//
51503// Arguments:
51504//
51505//	handle: The handle to a TensorArray.
51506//	index: The position to write to inside the TensorArray.
51507//	value: The tensor to write to the TensorArray.
51508//	flow_in: A float scalar that enforces proper chaining of operations.
51509//
51510// Returns A float scalar that enforces proper chaining of operations.
51511func TensorArrayWriteV3(scope *Scope, handle tf.Output, index tf.Output, value tf.Output, flow_in tf.Output) (flow_out tf.Output) {
51512	if scope.Err() != nil {
51513		return
51514	}
51515	opspec := tf.OpSpec{
51516		Type: "TensorArrayWriteV3",
51517		Input: []tf.Input{
51518			handle, index, value, flow_in,
51519		},
51520	}
51521	op := scope.AddOperation(opspec)
51522	return op.Output(0)
51523}
51524
51525// TensorDatasetAttr is an optional argument to TensorDataset.
51526type TensorDatasetAttr func(optionalAttr)
51527
51528// TensorDatasetMetadata sets the optional metadata attribute to value.
51529// If not specified, defaults to ""
51530func TensorDatasetMetadata(value string) TensorDatasetAttr {
51531	return func(m optionalAttr) {
51532		m["metadata"] = value
51533	}
51534}
51535
51536// Creates a dataset that emits `components` as a tuple of tensors once.
51537func TensorDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape, optional ...TensorDatasetAttr) (handle tf.Output) {
51538	if scope.Err() != nil {
51539		return
51540	}
51541	attrs := map[string]interface{}{"output_shapes": output_shapes}
51542	for _, a := range optional {
51543		a(attrs)
51544	}
51545	opspec := tf.OpSpec{
51546		Type: "TensorDataset",
51547		Input: []tf.Input{
51548			tf.OutputList(components),
51549		},
51550		Attrs: attrs,
51551	}
51552	op := scope.AddOperation(opspec)
51553	return op.Output(0)
51554}
51555
51556// TensorListConcatAttr is an optional argument to TensorListConcat.
51557type TensorListConcatAttr func(optionalAttr)
51558
51559// TensorListConcatElementShape sets the optional element_shape attribute to value.
51560// If not specified, defaults to {unknown_rank:true}
51561func TensorListConcatElementShape(value tf.Shape) TensorListConcatAttr {
51562	return func(m optionalAttr) {
51563		m["element_shape"] = value
51564	}
51565}
51566
51567// Concats all tensors in the list along the 0th dimension.
51568//
51569// Requires that all tensors have the same shape except the first dimension.
51570//
51571// input_handle: The input list.
51572// tensor: The concated result.
51573// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
51574func TensorListConcat(scope *Scope, input_handle tf.Output, element_dtype tf.DataType, optional ...TensorListConcatAttr) (tensor tf.Output, lengths tf.Output) {
51575	if scope.Err() != nil {
51576		return
51577	}
51578	attrs := map[string]interface{}{"element_dtype": element_dtype}
51579	for _, a := range optional {
51580		a(attrs)
51581	}
51582	opspec := tf.OpSpec{
51583		Type: "TensorListConcat",
51584		Input: []tf.Input{
51585			input_handle,
51586		},
51587		Attrs: attrs,
51588	}
51589	op := scope.AddOperation(opspec)
51590	return op.Output(0), op.Output(1)
51591}
51592
51593// Concats all tensors in the list along the 0th dimension.
51594//
51595// Requires that all tensors have the same shape except the first dimension.
51596//
51597// input_handle: The input list.
51598// element_shape: The shape of the uninitialized elements in the list. If the first
51599//
51600//	dimension is not -1, it is assumed that all list elements have the same
51601//	leading dim.
51602//
51603// leading_dims: The list of leading dims of uninitialized list elements. Used if
51604//
51605//	the leading dim of input_handle.element_shape or the element_shape input arg
51606//	is not already set.
51607//
51608// tensor: The concated result.
51609// lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
51610func TensorListConcatV2(scope *Scope, input_handle tf.Output, element_shape tf.Output, leading_dims tf.Output, element_dtype tf.DataType) (tensor tf.Output, lengths tf.Output) {
51611	if scope.Err() != nil {
51612		return
51613	}
51614	attrs := map[string]interface{}{"element_dtype": element_dtype}
51615	opspec := tf.OpSpec{
51616		Type: "TensorListConcatV2",
51617		Input: []tf.Input{
51618			input_handle, element_shape, leading_dims,
51619		},
51620		Attrs: attrs,
51621	}
51622	op := scope.AddOperation(opspec)
51623	return op.Output(0), op.Output(1)
51624}
51625
51626// The shape of the elements of the given list, as a tensor.
51627//
51628//	input_handle: the list
51629//	element_shape: the shape of elements of the list
51630func TensorListElementShape(scope *Scope, input_handle tf.Output, shape_type tf.DataType) (element_shape tf.Output) {
51631	if scope.Err() != nil {
51632		return
51633	}
51634	attrs := map[string]interface{}{"shape_type": shape_type}
51635	opspec := tf.OpSpec{
51636		Type: "TensorListElementShape",
51637		Input: []tf.Input{
51638			input_handle,
51639		},
51640		Attrs: attrs,
51641	}
51642	op := scope.AddOperation(opspec)
51643	return op.Output(0)
51644}
51645
51646// Creates a TensorList which, when stacked, has the value of `tensor`.
51647//
51648// Each tensor in the result list corresponds to one row of the input tensor.
51649//
51650// tensor: The input tensor.
51651// output_handle: The list.
51652func TensorListFromTensor(scope *Scope, tensor tf.Output, element_shape tf.Output) (output_handle tf.Output) {
51653	if scope.Err() != nil {
51654		return
51655	}
51656	opspec := tf.OpSpec{
51657		Type: "TensorListFromTensor",
51658		Input: []tf.Input{
51659			tensor, element_shape,
51660		},
51661	}
51662	op := scope.AddOperation(opspec)
51663	return op.Output(0)
51664}
51665
51666// Creates a Tensor by indexing into the TensorList.
51667//
51668// Each row in the produced Tensor corresponds to the element in the TensorList
51669// specified by the given index (see `tf.gather`).
51670//
51671// input_handle: The input tensor list.
51672// indices: The indices used to index into the list.
51673// values: The tensor.
51674func TensorListGather(scope *Scope, input_handle tf.Output, indices tf.Output, element_shape tf.Output, element_dtype tf.DataType) (values tf.Output) {
51675	if scope.Err() != nil {
51676		return
51677	}
51678	attrs := map[string]interface{}{"element_dtype": element_dtype}
51679	opspec := tf.OpSpec{
51680		Type: "TensorListGather",
51681		Input: []tf.Input{
51682			input_handle, indices, element_shape,
51683		},
51684		Attrs: attrs,
51685	}
51686	op := scope.AddOperation(opspec)
51687	return op.Output(0)
51688}
51689
51690// Returns the item in the list with the given index.
51691//
51692// input_handle: the list
51693// index: the position in the list from which an element will be retrieved
51694// item: the element at that position
51695func TensorListGetItem(scope *Scope, input_handle tf.Output, index tf.Output, element_shape tf.Output, element_dtype tf.DataType) (item tf.Output) {
51696	if scope.Err() != nil {
51697		return
51698	}
51699	attrs := map[string]interface{}{"element_dtype": element_dtype}
51700	opspec := tf.OpSpec{
51701		Type: "TensorListGetItem",
51702		Input: []tf.Input{
51703			input_handle, index, element_shape,
51704		},
51705		Attrs: attrs,
51706	}
51707	op := scope.AddOperation(opspec)
51708	return op.Output(0)
51709}
51710
51711// Returns the number of tensors in the input tensor list.
51712//
51713// input_handle: the input list
51714// length: the number of tensors in the list
51715func TensorListLength(scope *Scope, input_handle tf.Output) (length tf.Output) {
51716	if scope.Err() != nil {
51717		return
51718	}
51719	opspec := tf.OpSpec{
51720		Type: "TensorListLength",
51721		Input: []tf.Input{
51722			input_handle,
51723		},
51724	}
51725	op := scope.AddOperation(opspec)
51726	return op.Output(0)
51727}
51728
51729// Returns the last element of the input list as well as a list with all but that element.
51730//
51731// Fails if the list is empty.
51732//
51733// input_handle: the input list
51734// tensor: the withdrawn last element of the list
51735// element_dtype: the type of elements in the list
51736// element_shape: the shape of the output tensor
51737func TensorListPopBack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType) (output_handle tf.Output, tensor tf.Output) {
51738	if scope.Err() != nil {
51739		return
51740	}
51741	attrs := map[string]interface{}{"element_dtype": element_dtype}
51742	opspec := tf.OpSpec{
51743		Type: "TensorListPopBack",
51744		Input: []tf.Input{
51745			input_handle, element_shape,
51746		},
51747		Attrs: attrs,
51748	}
51749	op := scope.AddOperation(opspec)
51750	return op.Output(0), op.Output(1)
51751}
51752
51753// Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.
51754//
51755// tensor: The tensor to put on the list.
51756// input_handle: The old list.
51757// output_handle: A list with the elements of the old list followed by tensor.
51758// element_dtype: the type of elements in the list.
51759// element_shape: a shape compatible with that of elements in the list.
51760func TensorListPushBack(scope *Scope, input_handle tf.Output, tensor tf.Output) (output_handle tf.Output) {
51761	if scope.Err() != nil {
51762		return
51763	}
51764	opspec := tf.OpSpec{
51765		Type: "TensorListPushBack",
51766		Input: []tf.Input{
51767			input_handle, tensor,
51768		},
51769	}
51770	op := scope.AddOperation(opspec)
51771	return op.Output(0)
51772}
51773
51774// List of the given size with empty elements.
51775//
51776// element_shape: the shape of the future elements of the list
51777// num_elements: the number of elements to reserve
51778// handle: the output list
51779// element_dtype: the desired type of elements in the list.
51780func TensorListReserve(scope *Scope, element_shape tf.Output, num_elements tf.Output, element_dtype tf.DataType) (handle tf.Output) {
51781	if scope.Err() != nil {
51782		return
51783	}
51784	attrs := map[string]interface{}{"element_dtype": element_dtype}
51785	opspec := tf.OpSpec{
51786		Type: "TensorListReserve",
51787		Input: []tf.Input{
51788			element_shape, num_elements,
51789		},
51790		Attrs: attrs,
51791	}
51792	op := scope.AddOperation(opspec)
51793	return op.Output(0)
51794}
51795
51796// Resizes the list.
51797//
51798// input_handle: the input list
51799// size: size of the output list
51800func TensorListResize(scope *Scope, input_handle tf.Output, size tf.Output) (output_handle tf.Output) {
51801	if scope.Err() != nil {
51802		return
51803	}
51804	opspec := tf.OpSpec{
51805		Type: "TensorListResize",
51806		Input: []tf.Input{
51807			input_handle, size,
51808		},
51809	}
51810	op := scope.AddOperation(opspec)
51811	return op.Output(0)
51812}
51813
51814// Creates a TensorList by indexing into a Tensor.
51815//
51816// Each member of the TensorList corresponds to one row of the input tensor,
51817// specified by the given index (see `tf.gather`).
51818//
51819// tensor: The input tensor.
51820// indices: The indices used to index into the list.
51821// element_shape: The shape of the elements in the list (can be less specified than
51822//
51823//	the shape of the tensor).
51824//
51825// output_handle: The TensorList.
51826func TensorListScatter(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output) (output_handle tf.Output) {
51827	if scope.Err() != nil {
51828		return
51829	}
51830	opspec := tf.OpSpec{
51831		Type: "TensorListScatter",
51832		Input: []tf.Input{
51833			tensor, indices, element_shape,
51834		},
51835	}
51836	op := scope.AddOperation(opspec)
51837	return op.Output(0)
51838}
51839
51840// Scatters tensor at indices in an input list.
51841//
51842// Each member of the TensorList corresponds to one row of the input tensor,
51843// specified by the given index (see `tf.gather`).
51844//
51845// input_handle: The list to scatter into.
51846// tensor: The input tensor.
51847// indices: The indices used to index into the list.
51848// output_handle: The TensorList.
51849func TensorListScatterIntoExistingList(scope *Scope, input_handle tf.Output, tensor tf.Output, indices tf.Output) (output_handle tf.Output) {
51850	if scope.Err() != nil {
51851		return
51852	}
51853	opspec := tf.OpSpec{
51854		Type: "TensorListScatterIntoExistingList",
51855		Input: []tf.Input{
51856			input_handle, tensor, indices,
51857		},
51858	}
51859	op := scope.AddOperation(opspec)
51860	return op.Output(0)
51861}
51862
51863// Creates a TensorList by indexing into a Tensor.
51864//
51865// Each member of the TensorList corresponds to one row of the input tensor,
51866// specified by the given index (see `tf.gather`).
51867//
51868// tensor: The input tensor.
51869// indices: The indices used to index into the list.
51870// element_shape: The shape of the elements in the list (can be less specified than
51871//
51872//	the shape of the tensor).
51873//
51874// num_elements: The size of the output list. Must be large enough to accommodate
51875//
51876//	the largest index in indices. If -1, the list is just large enough to include
51877//	the largest index in indices.
51878//
51879// output_handle: The TensorList.
51880func TensorListScatterV2(scope *Scope, tensor tf.Output, indices tf.Output, element_shape tf.Output, num_elements tf.Output) (output_handle tf.Output) {
51881	if scope.Err() != nil {
51882		return
51883	}
51884	opspec := tf.OpSpec{
51885		Type: "TensorListScatterV2",
51886		Input: []tf.Input{
51887			tensor, indices, element_shape, num_elements,
51888		},
51889	}
51890	op := scope.AddOperation(opspec)
51891	return op.Output(0)
51892}
51893
51894// Sets the index-th position of the list to contain the given tensor.
51895//
51896// input_handle: the list
51897// index: the position in the list to which the tensor will be assigned
51898// item: the element to be assigned to that position
51899// output_handle: the new list, with the element in the proper position
51900func TensorListSetItem(scope *Scope, input_handle tf.Output, index tf.Output, item tf.Output) (output_handle tf.Output) {
51901	if scope.Err() != nil {
51902		return
51903	}
51904	opspec := tf.OpSpec{
51905		Type: "TensorListSetItem",
51906		Input: []tf.Input{
51907			input_handle, index, item,
51908		},
51909	}
51910	op := scope.AddOperation(opspec)
51911	return op.Output(0)
51912}
51913
51914// Splits a tensor into a list.
51915//
51916// list[i] corresponds to lengths[i] tensors from the input tensor.
51917// The tensor must have rank at least 1 and contain exactly sum(lengths) elements.
51918//
51919// tensor: The input tensor.
51920// element_shape: A shape compatible with that of elements in the tensor.
51921// lengths: Vector of sizes of the 0th dimension of tensors in the list.
51922// output_handle: The list.
51923func TensorListSplit(scope *Scope, tensor tf.Output, element_shape tf.Output, lengths tf.Output) (output_handle tf.Output) {
51924	if scope.Err() != nil {
51925		return
51926	}
51927	opspec := tf.OpSpec{
51928		Type: "TensorListSplit",
51929		Input: []tf.Input{
51930			tensor, element_shape, lengths,
51931		},
51932	}
51933	op := scope.AddOperation(opspec)
51934	return op.Output(0)
51935}
51936
51937// TensorListStackAttr is an optional argument to TensorListStack.
51938type TensorListStackAttr func(optionalAttr)
51939
51940// TensorListStackNumElements sets the optional num_elements attribute to value.
51941// If not specified, defaults to -1
51942func TensorListStackNumElements(value int64) TensorListStackAttr {
51943	return func(m optionalAttr) {
51944		m["num_elements"] = value
51945	}
51946}
51947
51948// Stacks all tensors in the list.
51949//
51950// Requires that all tensors have the same shape.
51951//
51952// input_handle: the input list
51953// tensor: the gathered result
51954// num_elements: optional. If not -1, the number of elements in the list.
51955func TensorListStack(scope *Scope, input_handle tf.Output, element_shape tf.Output, element_dtype tf.DataType, optional ...TensorListStackAttr) (tensor tf.Output) {
51956	if scope.Err() != nil {
51957		return
51958	}
51959	attrs := map[string]interface{}{"element_dtype": element_dtype}
51960	for _, a := range optional {
51961		a(attrs)
51962	}
51963	opspec := tf.OpSpec{
51964		Type: "TensorListStack",
51965		Input: []tf.Input{
51966			input_handle, element_shape,
51967		},
51968		Attrs: attrs,
51969	}
51970	op := scope.AddOperation(opspec)
51971	return op.Output(0)
51972}
51973
51974// Returns a tensor map with item from given key erased.
51975//
51976// input_handle: the original map
51977// output_handle: the map with value from given key removed
51978// key: the key of the value to be erased
51979func TensorMapErase(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (output_handle tf.Output) {
51980	if scope.Err() != nil {
51981		return
51982	}
51983	attrs := map[string]interface{}{"value_dtype": value_dtype}
51984	opspec := tf.OpSpec{
51985		Type: "TensorMapErase",
51986		Input: []tf.Input{
51987			input_handle, key,
51988		},
51989		Attrs: attrs,
51990	}
51991	op := scope.AddOperation(opspec)
51992	return op.Output(0)
51993}
51994
51995// Returns whether the given key exists in the map.
51996//
51997// input_handle: the input map
51998// key: the key to check
51999// has_key: whether the key is already in the map or not
52000func TensorMapHasKey(scope *Scope, input_handle tf.Output, key tf.Output) (has_key tf.Output) {
52001	if scope.Err() != nil {
52002		return
52003	}
52004	opspec := tf.OpSpec{
52005		Type: "TensorMapHasKey",
52006		Input: []tf.Input{
52007			input_handle, key,
52008		},
52009	}
52010	op := scope.AddOperation(opspec)
52011	return op.Output(0)
52012}
52013
52014// Returns a map that is the 'input_handle' with the given key-value pair inserted.
52015//
52016// input_handle: the original map
52017// output_handle: the map with key and value inserted
52018// key: the key to be inserted
52019// value: the value to be inserted
52020func TensorMapInsert(scope *Scope, input_handle tf.Output, key tf.Output, value tf.Output) (output_handle tf.Output) {
52021	if scope.Err() != nil {
52022		return
52023	}
52024	opspec := tf.OpSpec{
52025		Type: "TensorMapInsert",
52026		Input: []tf.Input{
52027			input_handle, key, value,
52028		},
52029	}
52030	op := scope.AddOperation(opspec)
52031	return op.Output(0)
52032}
52033
52034// Returns the value from a given key in a tensor map.
52035//
52036// input_handle: the input map
52037// key: the key to be looked up
52038// value: the value found from the given key
52039func TensorMapLookup(scope *Scope, input_handle tf.Output, key tf.Output, value_dtype tf.DataType) (value tf.Output) {
52040	if scope.Err() != nil {
52041		return
52042	}
52043	attrs := map[string]interface{}{"value_dtype": value_dtype}
52044	opspec := tf.OpSpec{
52045		Type: "TensorMapLookup",
52046		Input: []tf.Input{
52047			input_handle, key,
52048		},
52049		Attrs: attrs,
52050	}
52051	op := scope.AddOperation(opspec)
52052	return op.Output(0)
52053}
52054
52055// Returns the number of tensors in the input tensor map.
52056//
52057// input_handle: the input map
52058// size: the number of tensors in the map
52059func TensorMapSize(scope *Scope, input_handle tf.Output) (size tf.Output) {
52060	if scope.Err() != nil {
52061		return
52062	}
52063	opspec := tf.OpSpec{
52064		Type: "TensorMapSize",
52065		Input: []tf.Input{
52066			input_handle,
52067		},
52068	}
52069	op := scope.AddOperation(opspec)
52070	return op.Output(0)
52071}
52072
52073// Returns a Tensor stack of all keys in a tensor map.
52074//
52075// input_handle: the input map
52076// keys: the returned Tensor of all keys in the map
52077func TensorMapStackKeys(scope *Scope, input_handle tf.Output, key_dtype tf.DataType) (keys tf.Output) {
52078	if scope.Err() != nil {
52079		return
52080	}
52081	attrs := map[string]interface{}{"key_dtype": key_dtype}
52082	opspec := tf.OpSpec{
52083		Type: "TensorMapStackKeys",
52084		Input: []tf.Input{
52085			input_handle,
52086		},
52087		Attrs: attrs,
52088	}
52089	op := scope.AddOperation(opspec)
52090	return op.Output(0)
52091}
52092
52093// Adds sparse `updates` to an existing tensor according to `indices`.
52094//
52095// This operation creates a new tensor by adding sparse `updates` to the passed
52096// in `tensor`.
52097// This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the
52098// updates are added onto an existing tensor (as opposed to a variable). If the
52099// memory for the existing tensor cannot be re-used, a copy is made and updated.
52100//
52101// `indices` is an integer tensor containing indices into a new tensor of shape
52102// `tensor.shape`.  The last dimension of `indices` can be at most the rank of
52103// `tensor.shape`:
52104//
52105// ```
52106// indices.shape[-1] <= tensor.shape.rank
52107// ```
52108//
52109// The last dimension of `indices` corresponds to indices into elements
52110// (if `indices.shape[-1] = tensor.shape.rank`) or slices
52111// (if `indices.shape[-1] < tensor.shape.rank`) along dimension
52112// `indices.shape[-1]` of `tensor.shape`.  `updates` is a tensor with shape
52113//
52114// ```
52115// indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
52116// ```
52117//
52118// The simplest form of `tensor_scatter_nd_add` is to add individual elements to a
52119// tensor by index. For example, say we want to add 4 elements in a rank-1
52120// tensor with 8 elements.
52121//
52122// In Python, this scatter add operation would look like this:
52123//
52124// >>> indices = tf.constant([[4], [3], [1], [7]])
52125// >>> updates = tf.constant([9, 10, 11, 12])
52126// >>> tensor = tf.ones([8], dtype=tf.int32)
52127// >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
52128// >>> updated
52129// <tf.Tensor: shape=(8,), dtype=int32,
52130// numpy=array([ 1, 12,  1, 11, 10,  1,  1, 13], dtype=int32)>
52131//
52132// We can also, insert entire slices of a higher rank tensor all at once. For
52133// example, if we wanted to insert two slices in the first dimension of a
52134// rank-3 tensor with two matrices of new values.
52135//
52136// In Python, this scatter add operation would look like this:
52137//
52138// >>> indices = tf.constant([[0], [2]])
52139// >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
52140// ...                         [7, 7, 7, 7], [8, 8, 8, 8]],
52141// ...                        [[5, 5, 5, 5], [6, 6, 6, 6],
52142// ...                         [7, 7, 7, 7], [8, 8, 8, 8]]])
52143// >>> tensor = tf.ones([4, 4, 4],dtype=tf.int32)
52144// >>> updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
52145// >>> updated
52146// <tf.Tensor: shape=(4, 4, 4), dtype=int32,
52147// numpy=array([[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
52148//
52149//	[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
52150//	[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
52151//	[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]], dtype=int32)>
52152//
52153// Note: on CPU, if an out of bound index is found, an error is returned.
52154// On GPU, if an out of bound index is found, the index is ignored.
52155//
52156// Arguments:
52157//
52158//	tensor: Tensor to copy/update.
52159//	indices: Index tensor.
52160//	updates: Updates to scatter into output.
52161//
52162// Returns A new tensor copied from tensor and updates added according to the indices.
52163func TensorScatterAdd(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
52164	if scope.Err() != nil {
52165		return
52166	}
52167	opspec := tf.OpSpec{
52168		Type: "TensorScatterAdd",
52169		Input: []tf.Input{
52170			tensor, indices, updates,
52171		},
52172	}
52173	op := scope.AddOperation(opspec)
52174	return op.Output(0)
52175}
52176
52177// Apply a sparse update to a tensor taking the element-wise maximum.
52178//
52179// Returns a new tensor copied from `tensor` whose values are element-wise maximum between
52180// tensor and updates according to the indices.
52181//
52182// >>> tensor = [0, 0, 0, 0, 0, 0, 0, 0]
52183// >>> indices = [[1], [4], [5]]
52184// >>> updates = [1, -1, 1]
52185// >>> tf.tensor_scatter_nd_max(tensor, indices, updates).numpy()
52186// array([0, 1, 0, 0, 0, 1, 0, 0], dtype=int32)
52187//
52188// Refer to `tf.tensor_scatter_nd_update` for more details.
52189//
52190// Arguments:
52191//
52192//	tensor: Tensor to update.
52193//	indices: Index tensor.
52194//	updates: Updates to scatter into output.
52195//
52196// Returns A new tensor copied from tensor whose values are element-wise maximum between tensor and updates according to the indices.
52197func TensorScatterMax(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
52198	if scope.Err() != nil {
52199		return
52200	}
52201	opspec := tf.OpSpec{
52202		Type: "TensorScatterMax",
52203		Input: []tf.Input{
52204			tensor, indices, updates,
52205		},
52206	}
52207	op := scope.AddOperation(opspec)
52208	return op.Output(0)
52209}
52210
52211// Subtracts sparse `updates` from an existing tensor according to `indices`.
52212//
52213// This operation creates a new tensor by subtracting sparse `updates` from the
52214// passed in `tensor`.
52215// This operation is very similar to `tf.scatter_nd_sub`, except that the updates
52216// are subtracted from an existing tensor (as opposed to a variable). If the memory
52217// for the existing tensor cannot be re-used, a copy is made and updated.
52218//
52219// `indices` is an integer tensor containing indices into a new tensor of shape
52220// `shape`.  The last dimension of `indices` can be at most the rank of `shape`:
52221//
52222//	indices.shape[-1] <= shape.rank
52223//
52224// The last dimension of `indices` corresponds to indices into elements
52225// (if `indices.shape[-1] = shape.rank`) or slices
52226// (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
52227// `shape`.  `updates` is a tensor with shape
52228//
52229//	indices.shape[:-1] + shape[indices.shape[-1]:]
52230//
52231// The simplest form of tensor_scatter_sub is to subtract individual elements
52232// from a tensor by index. For example, say we want to insert 4 scattered elements
52233// in a rank-1 tensor with 8 elements.
52234//
52235// In Python, this scatter subtract operation would look like this:
52236//
52237// ```python
52238//
52239//	indices = tf.constant([[4], [3], [1], [7]])
52240//	updates = tf.constant([9, 10, 11, 12])
52241//	tensor = tf.ones([8], dtype=tf.int32)
52242//	updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
52243//	print(updated)
52244//
52245// ```
52246//
52247// The resulting tensor would look like this:
52248//
52249//	[1, -10, 1, -9, -8, 1, 1, -11]
52250//
52251// We can also, insert entire slices of a higher rank tensor all at once. For
52252// example, if we wanted to insert two slices in the first dimension of a
52253// rank-3 tensor with two matrices of new values.
52254//
52255// In Python, this scatter add operation would look like this:
52256//
52257// ```python
52258//
52259//	indices = tf.constant([[0], [2]])
52260//	updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
52261//	                        [7, 7, 7, 7], [8, 8, 8, 8]],
52262//	                       [[5, 5, 5, 5], [6, 6, 6, 6],
52263//	                        [7, 7, 7, 7], [8, 8, 8, 8]]])
52264//	tensor = tf.ones([4, 4, 4],dtype=tf.int32)
52265//	updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
52266//	print(updated)
52267//
52268// ```
52269//
52270// The resulting tensor would look like this:
52271//
52272//	[[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
52273//	 [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
52274//	 [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
52275//	 [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
52276//
52277// Note that on CPU, if an out of bound index is found, an error is returned.
52278// On GPU, if an out of bound index is found, the index is ignored.
52279//
52280// Arguments:
52281//
52282//	tensor: Tensor to copy/update.
52283//	indices: Index tensor.
52284//	updates: Updates to scatter into output.
52285//
52286// Returns A new tensor copied from tensor and updates subtracted according to the indices.
52287func TensorScatterSub(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
52288	if scope.Err() != nil {
52289		return
52290	}
52291	opspec := tf.OpSpec{
52292		Type: "TensorScatterSub",
52293		Input: []tf.Input{
52294			tensor, indices, updates,
52295		},
52296	}
52297	op := scope.AddOperation(opspec)
52298	return op.Output(0)
52299}
52300
52301// Scatter `updates` into an existing tensor according to `indices`.
52302//
52303// This operation creates a new tensor by applying sparse `updates` to the passed
52304// in `tensor`.
52305// This operation is very similar to `tf.scatter_nd`, except that the updates are
52306// scattered onto an existing tensor (as opposed to a zero-tensor). If the memory
52307// for the existing tensor cannot be re-used, a copy is made and updated.
52308//
52309// If `indices` contains duplicates, then we pick the last update for the index.
52310//
52311// If an out of bound index is found on CPU, an error is returned.
52312//
52313// **WARNING**: There are some GPU specific semantics for this operation.
52314// - If an out of bound index is found, the index is ignored.
52315// - The order in which updates are applied is nondeterministic, so the output
52316// will be nondeterministic if `indices` contains duplicates.
52317//
52318// `indices` is an integer tensor containing indices into a new tensor of shape
52319// `shape`.
52320//
52321//   - `indices` must have at least 2 axes: `(num_updates, index_depth)`.
52322//   - The last axis of `indices` is how deep to index into `tensor` so  this index
52323//     depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim`
52324//
52325// if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements.
52326// if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input
52327// `tensor`.
52328//
52329// Each `update` has a rank of `tensor.rank - indices.shape[-1]`.
52330// The overall shape of `updates` is:
52331//
52332// ```
52333// indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
52334// ```
52335//
52336// For usage examples see the python [tf.tensor_scatter_nd_update](
52337// https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function
52338//
52339// Arguments:
52340//
52341//	tensor: Tensor to copy/update.
52342//	indices: Index tensor.
52343//	updates: Updates to scatter into output.
52344//
52345// Returns A new tensor with the given shape and updates applied according
52346// to the indices.
52347func TensorScatterUpdate(scope *Scope, tensor tf.Output, indices tf.Output, updates tf.Output) (output tf.Output) {
52348	if scope.Err() != nil {
52349		return
52350	}
52351	opspec := tf.OpSpec{
52352		Type: "TensorScatterUpdate",
52353		Input: []tf.Input{
52354			tensor, indices, updates,
52355		},
52356	}
52357	op := scope.AddOperation(opspec)
52358	return op.Output(0)
52359}
52360
52361// TensorSliceDatasetAttr is an optional argument to TensorSliceDataset.
52362type TensorSliceDatasetAttr func(optionalAttr)
52363
52364// TensorSliceDatasetIsFiles sets the optional is_files attribute to value.
52365// If not specified, defaults to false
52366func TensorSliceDatasetIsFiles(value bool) TensorSliceDatasetAttr {
52367	return func(m optionalAttr) {
52368		m["is_files"] = value
52369	}
52370}
52371
52372// TensorSliceDatasetMetadata sets the optional metadata attribute to value.
52373// If not specified, defaults to ""
52374func TensorSliceDatasetMetadata(value string) TensorSliceDatasetAttr {
52375	return func(m optionalAttr) {
52376		m["metadata"] = value
52377	}
52378}
52379
52380// TensorSliceDatasetReplicateOnSplit sets the optional replicate_on_split attribute to value.
52381// If not specified, defaults to false
52382func TensorSliceDatasetReplicateOnSplit(value bool) TensorSliceDatasetAttr {
52383	return func(m optionalAttr) {
52384		m["replicate_on_split"] = value
52385	}
52386}
52387
52388// Creates a dataset that emits each dim-0 slice of `components` once.
52389func TensorSliceDataset(scope *Scope, components []tf.Output, output_shapes []tf.Shape, optional ...TensorSliceDatasetAttr) (handle tf.Output) {
52390	if scope.Err() != nil {
52391		return
52392	}
52393	attrs := map[string]interface{}{"output_shapes": output_shapes}
52394	for _, a := range optional {
52395		a(attrs)
52396	}
52397	opspec := tf.OpSpec{
52398		Type: "TensorSliceDataset",
52399		Input: []tf.Input{
52400			tf.OutputList(components),
52401		},
52402		Attrs: attrs,
52403	}
52404	op := scope.AddOperation(opspec)
52405	return op.Output(0)
52406}
52407
52408// TensorStridedSliceUpdateAttr is an optional argument to TensorStridedSliceUpdate.
52409type TensorStridedSliceUpdateAttr func(optionalAttr)
52410
52411// TensorStridedSliceUpdateBeginMask sets the optional begin_mask attribute to value.
52412// If not specified, defaults to 0
52413func TensorStridedSliceUpdateBeginMask(value int64) TensorStridedSliceUpdateAttr {
52414	return func(m optionalAttr) {
52415		m["begin_mask"] = value
52416	}
52417}
52418
52419// TensorStridedSliceUpdateEndMask sets the optional end_mask attribute to value.
52420// If not specified, defaults to 0
52421func TensorStridedSliceUpdateEndMask(value int64) TensorStridedSliceUpdateAttr {
52422	return func(m optionalAttr) {
52423		m["end_mask"] = value
52424	}
52425}
52426
52427// TensorStridedSliceUpdateEllipsisMask sets the optional ellipsis_mask attribute to value.
52428// If not specified, defaults to 0
52429func TensorStridedSliceUpdateEllipsisMask(value int64) TensorStridedSliceUpdateAttr {
52430	return func(m optionalAttr) {
52431		m["ellipsis_mask"] = value
52432	}
52433}
52434
52435// TensorStridedSliceUpdateNewAxisMask sets the optional new_axis_mask attribute to value.
52436// If not specified, defaults to 0
52437func TensorStridedSliceUpdateNewAxisMask(value int64) TensorStridedSliceUpdateAttr {
52438	return func(m optionalAttr) {
52439		m["new_axis_mask"] = value
52440	}
52441}
52442
52443// TensorStridedSliceUpdateShrinkAxisMask sets the optional shrink_axis_mask attribute to value.
52444// If not specified, defaults to 0
52445func TensorStridedSliceUpdateShrinkAxisMask(value int64) TensorStridedSliceUpdateAttr {
52446	return func(m optionalAttr) {
52447		m["shrink_axis_mask"] = value
52448	}
52449}
52450
52451// Assign `value` to the sliced l-value reference of `input`.
52452//
52453// The values of `value` are assigned to the positions in the tensor `input` that
52454// are selected by the slice parameters. The slice parameters `begin` `end`
52455// `strides` etc. work exactly as in `StridedSlice`.
52456//
52457// NOTE this op currently does not support broadcasting and so `value`'s shape
52458// must be exactly the shape produced by the slice of `input`.
52459func TensorStridedSliceUpdate(scope *Scope, input tf.Output, begin tf.Output, end tf.Output, strides tf.Output, value tf.Output, optional ...TensorStridedSliceUpdateAttr) (output tf.Output) {
52460	if scope.Err() != nil {
52461		return
52462	}
52463	attrs := map[string]interface{}{}
52464	for _, a := range optional {
52465		a(attrs)
52466	}
52467	opspec := tf.OpSpec{
52468		Type: "TensorStridedSliceUpdate",
52469		Input: []tf.Input{
52470			input, begin, end, strides, value,
52471		},
52472		Attrs: attrs,
52473	}
52474	op := scope.AddOperation(opspec)
52475	return op.Output(0)
52476}
52477
52478// TensorSummaryAttr is an optional argument to TensorSummary.
52479type TensorSummaryAttr func(optionalAttr)
52480
52481// TensorSummaryDescription sets the optional description attribute to value.
52482//
52483// value: A json-encoded SummaryDescription proto.
52484// If not specified, defaults to ""
52485func TensorSummaryDescription(value string) TensorSummaryAttr {
52486	return func(m optionalAttr) {
52487		m["description"] = value
52488	}
52489}
52490
52491// TensorSummaryLabels sets the optional labels attribute to value.
52492//
52493// value: An unused list of strings.
52494// If not specified, defaults to {}
52495func TensorSummaryLabels(value []string) TensorSummaryAttr {
52496	return func(m optionalAttr) {
52497		m["labels"] = value
52498	}
52499}
52500
52501// TensorSummaryDisplayName sets the optional display_name attribute to value.
52502//
52503// value: An unused string.
52504// If not specified, defaults to ""
52505func TensorSummaryDisplayName(value string) TensorSummaryAttr {
52506	return func(m optionalAttr) {
52507		m["display_name"] = value
52508	}
52509}
52510
52511// Outputs a `Summary` protocol buffer with a tensor.
52512//
52513// This op is being phased out in favor of TensorSummaryV2, which lets callers pass
52514// a tag as well as a serialized SummaryMetadata proto string that contains
52515// plugin-specific data. We will keep this op to maintain backwards compatibility.
52516//
52517// Arguments:
52518//
52519//	tensor: A tensor to serialize.
52520func TensorSummary(scope *Scope, tensor tf.Output, optional ...TensorSummaryAttr) (summary tf.Output) {
52521	if scope.Err() != nil {
52522		return
52523	}
52524	attrs := map[string]interface{}{}
52525	for _, a := range optional {
52526		a(attrs)
52527	}
52528	opspec := tf.OpSpec{
52529		Type: "TensorSummary",
52530		Input: []tf.Input{
52531			tensor,
52532		},
52533		Attrs: attrs,
52534	}
52535	op := scope.AddOperation(opspec)
52536	return op.Output(0)
52537}
52538
52539// Outputs a `Summary` protocol buffer with a tensor and per-plugin data.
52540//
52541// Arguments:
52542//
52543//	tag: A string attached to this summary. Used for organization in TensorBoard.
52544//	tensor: A tensor to serialize.
52545//	serialized_summary_metadata: A serialized SummaryMetadata proto. Contains plugin
52546//
52547// data.
52548func TensorSummaryV2(scope *Scope, tag tf.Output, tensor tf.Output, serialized_summary_metadata tf.Output) (summary tf.Output) {
52549	if scope.Err() != nil {
52550		return
52551	}
52552	opspec := tf.OpSpec{
52553		Type: "TensorSummaryV2",
52554		Input: []tf.Input{
52555			tag, tensor, serialized_summary_metadata,
52556		},
52557	}
52558	op := scope.AddOperation(opspec)
52559	return op.Output(0)
52560}
52561
52562// TextLineDatasetAttr is an optional argument to TextLineDataset.
52563type TextLineDatasetAttr func(optionalAttr)
52564
52565// TextLineDatasetMetadata sets the optional metadata attribute to value.
52566// If not specified, defaults to ""
52567func TextLineDatasetMetadata(value string) TextLineDatasetAttr {
52568	return func(m optionalAttr) {
52569		m["metadata"] = value
52570	}
52571}
52572
52573// Creates a dataset that emits the lines of one or more text files.
52574//
52575// Arguments:
52576//
52577//	filenames: A scalar or a vector containing the name(s) of the file(s) to be
52578//
52579// read.
52580//
52581//	compression_type: A scalar containing either (i) the empty string (no
52582//
52583// compression), (ii) "ZLIB", or (iii) "GZIP".
52584//
52585//	buffer_size: A scalar containing the number of bytes to buffer.
52586func TextLineDataset(scope *Scope, filenames tf.Output, compression_type tf.Output, buffer_size tf.Output, optional ...TextLineDatasetAttr) (handle tf.Output) {
52587	if scope.Err() != nil {
52588		return
52589	}
52590	attrs := map[string]interface{}{}
52591	for _, a := range optional {
52592		a(attrs)
52593	}
52594	opspec := tf.OpSpec{
52595		Type: "TextLineDataset",
52596		Input: []tf.Input{
52597			filenames, compression_type, buffer_size,
52598		},
52599		Attrs: attrs,
52600	}
52601	op := scope.AddOperation(opspec)
52602	return op.Output(0)
52603}
52604
52605// TextLineReaderV2Attr is an optional argument to TextLineReaderV2.
52606type TextLineReaderV2Attr func(optionalAttr)
52607
52608// TextLineReaderV2SkipHeaderLines sets the optional skip_header_lines attribute to value.
52609//
52610// value: Number of lines to skip from the beginning of every file.
52611// If not specified, defaults to 0
52612func TextLineReaderV2SkipHeaderLines(value int64) TextLineReaderV2Attr {
52613	return func(m optionalAttr) {
52614		m["skip_header_lines"] = value
52615	}
52616}
52617
52618// TextLineReaderV2Container sets the optional container attribute to value.
52619//
52620// value: If non-empty, this reader is placed in the given container.
52621// Otherwise, a default container is used.
52622// If not specified, defaults to ""
52623func TextLineReaderV2Container(value string) TextLineReaderV2Attr {
52624	return func(m optionalAttr) {
52625		m["container"] = value
52626	}
52627}
52628
52629// TextLineReaderV2SharedName sets the optional shared_name attribute to value.
52630//
52631// value: If non-empty, this reader is named in the given bucket
52632// with this shared_name. Otherwise, the node name is used instead.
52633// If not specified, defaults to ""
52634func TextLineReaderV2SharedName(value string) TextLineReaderV2Attr {
52635	return func(m optionalAttr) {
52636		m["shared_name"] = value
52637	}
52638}
52639
52640// A Reader that outputs the lines of a file delimited by '\n'.
52641//
52642// Returns The handle to reference the Reader.
52643func TextLineReaderV2(scope *Scope, optional ...TextLineReaderV2Attr) (reader_handle tf.Output) {
52644	if scope.Err() != nil {
52645		return
52646	}
52647	attrs := map[string]interface{}{}
52648	for _, a := range optional {
52649		a(attrs)
52650	}
52651	opspec := tf.OpSpec{
52652		Type: "TextLineReaderV2",
52653
52654		Attrs: attrs,
52655	}
52656	op := scope.AddOperation(opspec)
52657	return op.Output(0)
52658}
52659
52660// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
52661//
52662// Arguments:
52663//
52664//	thread_pool: A resource produced by the ThreadPoolHandle op.
52665func ThreadPoolDataset(scope *Scope, input_dataset tf.Output, thread_pool tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
52666	if scope.Err() != nil {
52667		return
52668	}
52669	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
52670	opspec := tf.OpSpec{
52671		Type: "ThreadPoolDataset",
52672		Input: []tf.Input{
52673			input_dataset, thread_pool,
52674		},
52675		Attrs: attrs,
52676	}
52677	op := scope.AddOperation(opspec)
52678	return op.Output(0)
52679}
52680
52681// ThreadPoolHandleAttr is an optional argument to ThreadPoolHandle.
52682type ThreadPoolHandleAttr func(optionalAttr)
52683
52684// ThreadPoolHandleMaxIntraOpParallelism sets the optional max_intra_op_parallelism attribute to value.
52685//
52686// value: The maximum degree of parallelism to use within operations that execute on this
52687// threadpool.
52688// If not specified, defaults to 1
52689func ThreadPoolHandleMaxIntraOpParallelism(value int64) ThreadPoolHandleAttr {
52690	return func(m optionalAttr) {
52691		m["max_intra_op_parallelism"] = value
52692	}
52693}
52694
52695// ThreadPoolHandleContainer sets the optional container attribute to value.
52696// If not specified, defaults to ""
52697func ThreadPoolHandleContainer(value string) ThreadPoolHandleAttr {
52698	return func(m optionalAttr) {
52699		m["container"] = value
52700	}
52701}
52702
52703// ThreadPoolHandleSharedName sets the optional shared_name attribute to value.
52704// If not specified, defaults to ""
52705func ThreadPoolHandleSharedName(value string) ThreadPoolHandleAttr {
52706	return func(m optionalAttr) {
52707		m["shared_name"] = value
52708	}
52709}
52710
52711// Creates a dataset that uses a custom thread pool to compute `input_dataset`.
52712//
52713// Arguments:
52714//
52715//	num_threads: The number of threads in the thread pool.
52716//	display_name: A human-readable name for the threads that may be visible in some
52717//
52718// visualizations.
52719// threadpool.
52720//
52721// Returns A resource that can be consumed by one or more ExperimentalThreadPoolDataset
52722// ops.
52723func ThreadPoolHandle(scope *Scope, num_threads int64, display_name string, optional ...ThreadPoolHandleAttr) (handle tf.Output) {
52724	if scope.Err() != nil {
52725		return
52726	}
52727	attrs := map[string]interface{}{"num_threads": num_threads, "display_name": display_name}
52728	for _, a := range optional {
52729		a(attrs)
52730	}
52731	opspec := tf.OpSpec{
52732		Type: "ThreadPoolHandle",
52733
52734		Attrs: attrs,
52735	}
52736	op := scope.AddOperation(opspec)
52737	return op.Output(0)
52738}
52739
52740// ThreadUnsafeUnigramCandidateSamplerAttr is an optional argument to ThreadUnsafeUnigramCandidateSampler.
52741type ThreadUnsafeUnigramCandidateSamplerAttr func(optionalAttr)
52742
52743// ThreadUnsafeUnigramCandidateSamplerSeed sets the optional seed attribute to value.
52744//
52745// value: If either seed or seed2 are set to be non-zero, the random number
52746// generator is seeded by the given seed.  Otherwise, it is seeded by a
52747// random seed.
52748// If not specified, defaults to 0
52749func ThreadUnsafeUnigramCandidateSamplerSeed(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
52750	return func(m optionalAttr) {
52751		m["seed"] = value
52752	}
52753}
52754
52755// ThreadUnsafeUnigramCandidateSamplerSeed2 sets the optional seed2 attribute to value.
52756//
52757// value: An second seed to avoid seed collision.
52758// If not specified, defaults to 0
52759func ThreadUnsafeUnigramCandidateSamplerSeed2(value int64) ThreadUnsafeUnigramCandidateSamplerAttr {
52760	return func(m optionalAttr) {
52761		m["seed2"] = value
52762	}
52763}
52764
52765// Generates labels for candidate sampling with a learned unigram distribution.
52766//
52767// See explanations of candidate sampling and the data formats at
52768// go/candidate-sampling.
52769//
52770// For each batch, this op picks a single set of sampled candidate labels.
52771//
52772// The advantages of sampling candidates per-batch are simplicity and the
52773// possibility of efficient dense matrix multiplication. The disadvantage is that
52774// the sampled candidates must be chosen independently of the context and of the
52775// true labels.
52776//
52777// Arguments:
52778//
52779//	true_classes: A batch_size * num_true matrix, in which each row contains the
52780//
52781// IDs of the num_true target_classes in the corresponding original label.
52782//
52783//	num_true: Number of true labels per context.
52784//	num_sampled: Number of candidates to randomly sample.
52785//	unique: If unique is true, we sample with rejection, so that all sampled
52786//
52787// candidates in a batch are unique. This requires some approximation to
52788// estimate the post-rejection sampling probabilities.
52789//
52790//	range_max: The sampler will sample integers from the interval [0, range_max).
52791//
52792// Returns:
52793//
52794//	sampled_candidates: A vector of length num_sampled, in which each element is
52795//
52796// the ID of a sampled candidate.
52797//
52798//	true_expected_count: A batch_size * num_true matrix, representing
52799//
52800// the number of times each candidate is expected to occur in a batch
52801// of sampled candidates. If unique=true, then this is a probability.
52802//
52803//	sampled_expected_count: A vector of length num_sampled, for each sampled
52804//
52805// candidate representing the number of times the candidate is expected
52806// to occur in a batch of sampled candidates.  If unique=true, then this is a
52807// probability.
52808func ThreadUnsafeUnigramCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...ThreadUnsafeUnigramCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
52809	if scope.Err() != nil {
52810		return
52811	}
52812	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
52813	for _, a := range optional {
52814		a(attrs)
52815	}
52816	opspec := tf.OpSpec{
52817		Type: "ThreadUnsafeUnigramCandidateSampler",
52818		Input: []tf.Input{
52819			true_classes,
52820		},
52821		Attrs: attrs,
52822	}
52823	op := scope.AddOperation(opspec)
52824	return op.Output(0), op.Output(1), op.Output(2)
52825}
52826
52827// Constructs a tensor by tiling a given tensor.
52828//
52829// This operation creates a new tensor by replicating `input` `multiples` times.
52830// The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
52831// and the values of `input` are replicated `multiples[i]` times along the 'i'th
52832// dimension. For example, tiling `[a b c d]` by `[2]` produces
52833// `[a b c d a b c d]`.
52834//
52835// >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)
52836// >>> b = tf.constant([1,2], tf.int32)
52837// >>> tf.tile(a, b)
52838// <tf.Tensor: shape=(2, 6), dtype=int32, numpy=
52839// array([[1, 2, 3, 1, 2, 3],
52840//
52841//	[4, 5, 6, 4, 5, 6]], dtype=int32)>
52842//
52843// >>> c = tf.constant([2,1], tf.int32)
52844// >>> tf.tile(a, c)
52845// <tf.Tensor: shape=(4, 3), dtype=int32, numpy=
52846// array([[1, 2, 3],
52847//
52848//	[4, 5, 6],
52849//	[1, 2, 3],
52850//	[4, 5, 6]], dtype=int32)>
52851//
52852// >>> d = tf.constant([2,2], tf.int32)
52853// >>> tf.tile(a, d)
52854// <tf.Tensor: shape=(4, 6), dtype=int32, numpy=
52855// array([[1, 2, 3, 1, 2, 3],
52856//
52857//	[4, 5, 6, 4, 5, 6],
52858//	[1, 2, 3, 1, 2, 3],
52859//	[4, 5, 6, 4, 5, 6]], dtype=int32)>
52860//
52861// Arguments:
52862//
52863//	input: 1-D or higher.
52864//	multiples: 1-D. Length must be the same as the number of dimensions in `input`
52865func Tile(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
52866	if scope.Err() != nil {
52867		return
52868	}
52869	opspec := tf.OpSpec{
52870		Type: "Tile",
52871		Input: []tf.Input{
52872			input, multiples,
52873		},
52874	}
52875	op := scope.AddOperation(opspec)
52876	return op.Output(0)
52877}
52878
52879// Returns the gradient of `Tile`.
52880//
52881// DEPRECATED at GraphDef version 3: TileGrad has been replaced with reduce_sum
52882//
52883// Since `Tile` takes an input and repeats the input `multiples` times
52884// along each dimension, `TileGrad` takes in `multiples` and aggregates
52885// each repeated tile of `input` into `output`.
52886func TileGrad(scope *Scope, input tf.Output, multiples tf.Output) (output tf.Output) {
52887	if scope.Err() != nil {
52888		return
52889	}
52890	opspec := tf.OpSpec{
52891		Type: "TileGrad",
52892		Input: []tf.Input{
52893			input, multiples,
52894		},
52895	}
52896	op := scope.AddOperation(opspec)
52897	return op.Output(0)
52898}
52899
52900// Provides the time since epoch in seconds.
52901//
52902// Returns the timestamp as a `float64` for seconds since the Unix epoch.
52903//
52904// Note: the timestamp is computed when the op is executed, not when it is added
52905// to the graph.
52906func Timestamp(scope *Scope) (ts tf.Output) {
52907	if scope.Err() != nil {
52908		return
52909	}
52910	opspec := tf.OpSpec{
52911		Type: "Timestamp",
52912	}
52913	op := scope.AddOperation(opspec)
52914	return op.Output(0)
52915}
52916
52917// Converts a tensor to a scalar predicate.
52918//
52919// Converts a tensor to a scalar predicate with the following rules:
52920//
52921//   - For 0D tensors, truthiness is determined by comparing against a "zero"
52922//     value. For numerical types it is the obvious zero. For strings it is the
52923//     empty string.
52924//
52925//   - For >0D tensors, truthiness is determined by looking at the number of
52926//     elements. If has zero elements, then the result is false. Otherwise the
52927//     result is true.
52928//
52929// This matches the behavior of If and While for determining if a tensor counts
52930// as true/false for a branch condition.
52931func ToBool(scope *Scope, input tf.Output) (output tf.Output) {
52932	if scope.Err() != nil {
52933		return
52934	}
52935	opspec := tf.OpSpec{
52936		Type: "ToBool",
52937		Input: []tf.Input{
52938			input,
52939		},
52940	}
52941	op := scope.AddOperation(opspec)
52942	return op.Output(0)
52943}
52944
52945// TopKAttr is an optional argument to TopK.
52946type TopKAttr func(optionalAttr)
52947
52948// TopKSorted sets the optional sorted attribute to value.
52949//
52950// value: If true the resulting `k` elements will be sorted by the values in
52951// descending order.
52952// If not specified, defaults to true
52953func TopKSorted(value bool) TopKAttr {
52954	return func(m optionalAttr) {
52955		m["sorted"] = value
52956	}
52957}
52958
52959// Finds values and indices of the `k` largest elements for the last dimension.
52960//
52961// DEPRECATED at GraphDef version 7: Use TopKV2 instead
52962//
52963// If the input is a vector (rank-1), finds the `k` largest entries in the vector
52964// and outputs their values and indices as vectors.  Thus `values[j]` is the
52965// `j`-th largest entry in `input`, and its index is `indices[j]`.
52966//
52967// For matrices (resp. higher rank input), computes the top `k` entries in each
52968// row (resp. vector along the last dimension).  Thus,
52969//
52970//	values.shape = indices.shape = input.shape[:-1] + [k]
52971//
52972// If two elements are equal, the lower-index element appears first.
52973//
52974// If `k` varies dynamically, use `TopKV2` below.
52975//
52976// Arguments:
52977//
52978//	input: 1-D or higher with last dimension at least `k`.
52979//	k: Number of top elements to look for along the last dimension (along each
52980//
52981// row for matrices).
52982//
52983// Returns:
52984//
52985//	values: The `k` largest elements along each last dimensional slice.
52986//	indices: The indices of `values` within the last dimension of `input`.
52987func TopK(scope *Scope, input tf.Output, k int64, optional ...TopKAttr) (values tf.Output, indices tf.Output) {
52988	if scope.Err() != nil {
52989		return
52990	}
52991	attrs := map[string]interface{}{"k": k}
52992	for _, a := range optional {
52993		a(attrs)
52994	}
52995	opspec := tf.OpSpec{
52996		Type: "TopK",
52997		Input: []tf.Input{
52998			input,
52999		},
53000		Attrs: attrs,
53001	}
53002	op := scope.AddOperation(opspec)
53003	return op.Output(0), op.Output(1)
53004}
53005
53006// Returns the TopK unique values in the array in sorted order.
53007//
53008// The running time is proportional to the product of K and the input
53009// size. Sorting the whole array is more efficient for sufficiently large
53010// values of K. The median-of-medians algorithm is probably faster, but
53011// difficult to implement efficiently in XLA. If there are fewer than K
53012// unique numbers (not NANs), the results are padded with negative
53013// infinity. NaNs are never returned. Subnormal numbers are flushed to
53014// zero. If an element appears at multiple indices, the highest index is
53015// returned. If a TopK element never appears in the input due to padding
53016// values, the indices are padded with negative one. If a padding value
53017// appears in the input and padding is needed, the highest index of the
53018// padding value will be returned. The semantics are not the same as
53019// kth_order_statistic.
53020func TopKUnique(scope *Scope, input tf.Output, k int64) (topk tf.Output, topk_indices tf.Output) {
53021	if scope.Err() != nil {
53022		return
53023	}
53024	attrs := map[string]interface{}{"k": k}
53025	opspec := tf.OpSpec{
53026		Type: "TopKUnique",
53027		Input: []tf.Input{
53028			input,
53029		},
53030		Attrs: attrs,
53031	}
53032	op := scope.AddOperation(opspec)
53033	return op.Output(0), op.Output(1)
53034}
53035
53036// TopKV2Attr is an optional argument to TopKV2.
53037type TopKV2Attr func(optionalAttr)
53038
53039// TopKV2Sorted sets the optional sorted attribute to value.
53040//
53041// value: If true the resulting `k` elements will be sorted by the values in
53042// descending order.
53043// If not specified, defaults to true
53044func TopKV2Sorted(value bool) TopKV2Attr {
53045	return func(m optionalAttr) {
53046		m["sorted"] = value
53047	}
53048}
53049
53050// Finds values and indices of the `k` largest elements for the last dimension.
53051//
53052// If the input is a vector (rank-1), finds the `k` largest entries in the vector
53053// and outputs their values and indices as vectors.  Thus `values[j]` is the
53054// `j`-th largest entry in `input`, and its index is `indices[j]`.
53055//
53056// For matrices (resp. higher rank input), computes the top `k` entries in each
53057// row (resp. vector along the last dimension).  Thus,
53058//
53059//	values.shape = indices.shape = input.shape[:-1] + [k]
53060//
53061// If two elements are equal, the lower-index element appears first.
53062//
53063// Arguments:
53064//
53065//	input: 1-D or higher with last dimension at least `k`.
53066//	k: 0-D.  Number of top elements to look for along the last dimension (along each
53067//
53068// row for matrices).
53069//
53070// Returns:
53071//
53072//	values: The `k` largest elements along each last dimensional slice.
53073//	indices: The indices of `values` within the last dimension of `input`.
53074func TopKV2(scope *Scope, input tf.Output, k tf.Output, optional ...TopKV2Attr) (values tf.Output, indices tf.Output) {
53075	if scope.Err() != nil {
53076		return
53077	}
53078	attrs := map[string]interface{}{}
53079	for _, a := range optional {
53080		a(attrs)
53081	}
53082	opspec := tf.OpSpec{
53083		Type: "TopKV2",
53084		Input: []tf.Input{
53085			input, k,
53086		},
53087		Attrs: attrs,
53088	}
53089	op := scope.AddOperation(opspec)
53090	return op.Output(0), op.Output(1)
53091}
53092
53093// Returns the TopK values in the array in sorted order.
53094//
53095// This is a combination of MakeUnique and TopKUnique. The returned top-K will
53096// have its lower bits replaced by iota, thus it will be close to the original
53097// value but not exactly the same. The running time is proportional to the product
53098// of K and the input size. NaNs are never returned. Subnormal numbers are flushed
53099// to zero.
53100func TopKWithUnique(scope *Scope, input tf.Output, k int64) (topk tf.Output, topk_indices tf.Output) {
53101	if scope.Err() != nil {
53102		return
53103	}
53104	attrs := map[string]interface{}{"k": k}
53105	opspec := tf.OpSpec{
53106		Type: "TopKWithUnique",
53107		Input: []tf.Input{
53108			input,
53109		},
53110		Attrs: attrs,
53111	}
53112	op := scope.AddOperation(opspec)
53113	return op.Output(0), op.Output(1)
53114}
53115
53116// Converts XRT's uid handles to TensorFlow-friendly input format.
53117//
53118// Converts a uid handle for a compiled program into a vector of proto keys.
53119//
53120// XRT compile ops return uids, and the TensorFlow execute op takes a proto
53121// key. This op enables a client to compile on TPU using XRT and execute using the
53122// standard TensorFlow execute op.
53123//
53124// 'uid' is the input handle.
53125// 'proto_keys' is a vector of proto keys, one for each core program.
53126func TpuHandleToProtoKey(scope *Scope, uid tf.Output) (proto_keys tf.Output) {
53127	if scope.Err() != nil {
53128		return
53129	}
53130	opspec := tf.OpSpec{
53131		Type: "TpuHandleToProtoKey",
53132		Input: []tf.Input{
53133			uid,
53134		},
53135	}
53136	op := scope.AddOperation(opspec)
53137	return op.Output(0)
53138}
53139
53140// Shuffle dimensions of x according to a permutation.
53141//
53142// The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
53143//
53144//	`y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
53145func Transpose(scope *Scope, x tf.Output, perm tf.Output) (y tf.Output) {
53146	if scope.Err() != nil {
53147		return
53148	}
53149	opspec := tf.OpSpec{
53150		Type: "Transpose",
53151		Input: []tf.Input{
53152			x, perm,
53153		},
53154	}
53155	op := scope.AddOperation(opspec)
53156	return op.Output(0)
53157}
53158
53159// Calculate product with tridiagonal matrix.
53160//
53161// Calculates product of two matrices, where left matrix is a tridiagonal matrix.
53162//
53163// Arguments:
53164//
53165//	superdiag: Tensor of shape `[..., 1, M]`, representing superdiagonals of
53166//
53167// tri-diagonal matrices to the left of multiplication. Last element is ignored.
53168//
53169//	maindiag: Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal
53170//
53171// matrices to the left of multiplication.
53172//
53173//	subdiag: Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal
53174//
53175// matrices to the left of multiplication. First element is ignored.
53176//
53177//	rhs: Tensor of shape `[..., M, N]`, representing MxN matrices to the right of
53178//
53179// multiplication.
53180//
53181// Returns Tensor of shape `[..., M, N]` containing the product.
53182func TridiagonalMatMul(scope *Scope, superdiag tf.Output, maindiag tf.Output, subdiag tf.Output, rhs tf.Output) (output tf.Output) {
53183	if scope.Err() != nil {
53184		return
53185	}
53186	opspec := tf.OpSpec{
53187		Type: "TridiagonalMatMul",
53188		Input: []tf.Input{
53189			superdiag, maindiag, subdiag, rhs,
53190		},
53191	}
53192	op := scope.AddOperation(opspec)
53193	return op.Output(0)
53194}
53195
53196// TridiagonalSolveAttr is an optional argument to TridiagonalSolve.
53197type TridiagonalSolveAttr func(optionalAttr)
53198
53199// TridiagonalSolvePartialPivoting sets the optional partial_pivoting attribute to value.
53200//
53201// value: Whether to apply partial pivoting. Partial pivoting makes the procedure more
53202// stable, but slower.
53203// If not specified, defaults to true
53204func TridiagonalSolvePartialPivoting(value bool) TridiagonalSolveAttr {
53205	return func(m optionalAttr) {
53206		m["partial_pivoting"] = value
53207	}
53208}
53209
53210// TridiagonalSolvePerturbSingular sets the optional perturb_singular attribute to value.
53211// If not specified, defaults to false
53212func TridiagonalSolvePerturbSingular(value bool) TridiagonalSolveAttr {
53213	return func(m optionalAttr) {
53214		m["perturb_singular"] = value
53215	}
53216}
53217
53218// Solves tridiagonal systems of equations.
53219//
53220//	Solves tridiagonal systems of equations.
53221//	Supports batch dimensions and multiple right-hand sides per each left-hand
53222//	side.
53223//	On CPU, solution is computed via Gaussian elimination with or without partial
53224//	pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE
53225//	library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
53226//	Partial pivoting is not yet supported by XLA backends.
53227//
53228// Arguments:
53229//
53230//	diagonals: Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the
53231//
53232// tridiagonal matrices with three rows being the superdiagonal, diagonals, and
53233// subdiagonals, in order. The last element of the superdiagonal and the first
53234// element of the subdiagonal is ignored.
53235//
53236//	rhs: Tensor of shape `[..., M, K]`, representing K right-hand sides per each
53237//
53238// left-hand side.
53239//
53240// Returns Tensor of shape `[..., M, K]` containing the solutions
53241func TridiagonalSolve(scope *Scope, diagonals tf.Output, rhs tf.Output, optional ...TridiagonalSolveAttr) (output tf.Output) {
53242	if scope.Err() != nil {
53243		return
53244	}
53245	attrs := map[string]interface{}{}
53246	for _, a := range optional {
53247		a(attrs)
53248	}
53249	opspec := tf.OpSpec{
53250		Type: "TridiagonalSolve",
53251		Input: []tf.Input{
53252			diagonals, rhs,
53253		},
53254		Attrs: attrs,
53255	}
53256	op := scope.AddOperation(opspec)
53257	return op.Output(0)
53258}
53259
53260// Returns x / y element-wise for integer types.
53261//
53262// Truncation designates that negative numbers will round fractional quantities
53263// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
53264// than Python semantics. See `FloorDiv` for a division function that matches
53265// Python Semantics.
53266//
53267// *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
53268// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
53269func TruncateDiv(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
53270	if scope.Err() != nil {
53271		return
53272	}
53273	opspec := tf.OpSpec{
53274		Type: "TruncateDiv",
53275		Input: []tf.Input{
53276			x, y,
53277		},
53278	}
53279	op := scope.AddOperation(opspec)
53280	return op.Output(0)
53281}
53282
53283// Returns element-wise remainder of division. This emulates C semantics in that
53284//
53285// the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
53286// y + truncate_mod(x, y) = x`.
53287//
53288// *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
53289// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
53290func TruncateMod(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
53291	if scope.Err() != nil {
53292		return
53293	}
53294	opspec := tf.OpSpec{
53295		Type: "TruncateMod",
53296		Input: []tf.Input{
53297			x, y,
53298		},
53299	}
53300	op := scope.AddOperation(opspec)
53301	return op.Output(0)
53302}
53303
53304// TruncatedNormalAttr is an optional argument to TruncatedNormal.
53305type TruncatedNormalAttr func(optionalAttr)
53306
53307// TruncatedNormalSeed sets the optional seed attribute to value.
53308//
53309// value: If either `seed` or `seed2` are set to be non-zero, the random number
53310// generator is seeded by the given seed.  Otherwise, it is seeded by a
53311// random seed.
53312// If not specified, defaults to 0
53313func TruncatedNormalSeed(value int64) TruncatedNormalAttr {
53314	return func(m optionalAttr) {
53315		m["seed"] = value
53316	}
53317}
53318
53319// TruncatedNormalSeed2 sets the optional seed2 attribute to value.
53320//
53321// value: A second seed to avoid seed collision.
53322// If not specified, defaults to 0
53323func TruncatedNormalSeed2(value int64) TruncatedNormalAttr {
53324	return func(m optionalAttr) {
53325		m["seed2"] = value
53326	}
53327}
53328
53329// Outputs random values from a truncated normal distribution.
53330//
53331// The generated values follow a normal distribution with mean 0 and standard
53332// deviation 1, except that values whose magnitude is more than 2 standard
53333// deviations from the mean are dropped and re-picked.
53334//
53335// Arguments:
53336//
53337//	shape: The shape of the output tensor.
53338//	dtype: The type of the output.
53339//
53340// Returns A tensor of the specified shape filled with random truncated normal
53341// values.
53342func TruncatedNormal(scope *Scope, shape tf.Output, dtype tf.DataType, optional ...TruncatedNormalAttr) (output tf.Output) {
53343	if scope.Err() != nil {
53344		return
53345	}
53346	attrs := map[string]interface{}{"dtype": dtype}
53347	for _, a := range optional {
53348		a(attrs)
53349	}
53350	opspec := tf.OpSpec{
53351		Type: "TruncatedNormal",
53352		Input: []tf.Input{
53353			shape,
53354		},
53355		Attrs: attrs,
53356	}
53357	op := scope.AddOperation(opspec)
53358	return op.Output(0)
53359}
53360
53361// UnbatchAttr is an optional argument to Unbatch.
53362type UnbatchAttr func(optionalAttr)
53363
53364// UnbatchContainer sets the optional container attribute to value.
53365// If not specified, defaults to ""
53366func UnbatchContainer(value string) UnbatchAttr {
53367	return func(m optionalAttr) {
53368		m["container"] = value
53369	}
53370}
53371
53372// UnbatchSharedName sets the optional shared_name attribute to value.
53373// If not specified, defaults to ""
53374func UnbatchSharedName(value string) UnbatchAttr {
53375	return func(m optionalAttr) {
53376		m["shared_name"] = value
53377	}
53378}
53379
53380// Reverses the operation of Batch for a single output Tensor.
53381//
53382// An instance of Unbatch either receives an empty batched_tensor, in which case it
53383// asynchronously waits until the values become available from a concurrently
53384// running instance of Unbatch with the same container and shared_name, or receives
53385// a non-empty batched_tensor in which case it finalizes all other concurrently
53386// running instances and outputs its own element from the batch.
53387//
53388// batched_tensor: The possibly transformed output of Batch. The size of the first
53389//
53390//	dimension should remain unchanged by the transformations for the operation to
53391//	work.
53392//
53393// batch_index: The matching batch_index obtained from Batch.
53394// id: The id scalar emitted by Batch.
53395// unbatched_tensor: The Tensor corresponding to this execution.
53396// timeout_micros: Maximum amount of time (in microseconds) to wait to receive the
53397//
53398//	batched input tensor associated with a given invocation of the op.
53399//
53400// container: Container to control resource sharing.
53401// shared_name: Instances of Unbatch with the same container and shared_name are
53402//
53403//	assumed to possibly belong to the same batch. If left empty, the op name will
53404//	be used as the shared name.
53405func Unbatch(scope *Scope, batched_tensor tf.Output, batch_index tf.Output, id tf.Output, timeout_micros int64, optional ...UnbatchAttr) (unbatched_tensor tf.Output) {
53406	if scope.Err() != nil {
53407		return
53408	}
53409	attrs := map[string]interface{}{"timeout_micros": timeout_micros}
53410	for _, a := range optional {
53411		a(attrs)
53412	}
53413	opspec := tf.OpSpec{
53414		Type: "Unbatch",
53415		Input: []tf.Input{
53416			batched_tensor, batch_index, id,
53417		},
53418		Attrs: attrs,
53419	}
53420	op := scope.AddOperation(opspec)
53421	return op.Output(0)
53422}
53423
53424// UnbatchDatasetAttr is an optional argument to UnbatchDataset.
53425type UnbatchDatasetAttr func(optionalAttr)
53426
53427// UnbatchDatasetMetadata sets the optional metadata attribute to value.
53428// If not specified, defaults to ""
53429func UnbatchDatasetMetadata(value string) UnbatchDatasetAttr {
53430	return func(m optionalAttr) {
53431		m["metadata"] = value
53432	}
53433}
53434
53435// A dataset that splits the elements of its input into multiple elements.
53436func UnbatchDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...UnbatchDatasetAttr) (handle tf.Output) {
53437	if scope.Err() != nil {
53438		return
53439	}
53440	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
53441	for _, a := range optional {
53442		a(attrs)
53443	}
53444	opspec := tf.OpSpec{
53445		Type: "UnbatchDataset",
53446		Input: []tf.Input{
53447			input_dataset,
53448		},
53449		Attrs: attrs,
53450	}
53451	op := scope.AddOperation(opspec)
53452	return op.Output(0)
53453}
53454
53455// UnbatchGradAttr is an optional argument to UnbatchGrad.
53456type UnbatchGradAttr func(optionalAttr)
53457
53458// UnbatchGradContainer sets the optional container attribute to value.
53459// If not specified, defaults to ""
53460func UnbatchGradContainer(value string) UnbatchGradAttr {
53461	return func(m optionalAttr) {
53462		m["container"] = value
53463	}
53464}
53465
53466// UnbatchGradSharedName sets the optional shared_name attribute to value.
53467// If not specified, defaults to ""
53468func UnbatchGradSharedName(value string) UnbatchGradAttr {
53469	return func(m optionalAttr) {
53470		m["shared_name"] = value
53471	}
53472}
53473
53474// Gradient of Unbatch.
53475//
53476// Acts like Batch but using the given batch_index index of batching things as they
53477// become available. This ensures that the gradients are propagated back in the
53478// same session which did the forward pass.
53479//
53480// original_input: The input to the Unbatch operation this is the gradient of.
53481// batch_index: The batch_index given to the Unbatch operation this is the gradient
53482// of.
53483// grad: The downstream gradient.
53484// id: The id scalar emitted by Batch.
53485// batched_grad: The return value, either an empty tensor or the batched gradient.
53486// container: Container to control resource sharing.
53487// shared_name: Instances of UnbatchGrad with the same container and shared_name
53488//
53489//	are assumed to possibly belong to the same batch. If left empty, the op name
53490//	will be used as the shared name.
53491func UnbatchGrad(scope *Scope, original_input tf.Output, batch_index tf.Output, grad tf.Output, id tf.Output, optional ...UnbatchGradAttr) (batched_grad tf.Output) {
53492	if scope.Err() != nil {
53493		return
53494	}
53495	attrs := map[string]interface{}{}
53496	for _, a := range optional {
53497		a(attrs)
53498	}
53499	opspec := tf.OpSpec{
53500		Type: "UnbatchGrad",
53501		Input: []tf.Input{
53502			original_input, batch_index, grad, id,
53503		},
53504		Attrs: attrs,
53505	}
53506	op := scope.AddOperation(opspec)
53507	return op.Output(0)
53508}
53509
53510// Uncompresses a compressed dataset element.
53511func UncompressElement(scope *Scope, compressed tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (components []tf.Output) {
53512	if scope.Err() != nil {
53513		return
53514	}
53515	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
53516	opspec := tf.OpSpec{
53517		Type: "UncompressElement",
53518		Input: []tf.Input{
53519			compressed,
53520		},
53521		Attrs: attrs,
53522	}
53523	op := scope.AddOperation(opspec)
53524	if scope.Err() != nil {
53525		return
53526	}
53527	var idx int
53528	var err error
53529	if components, idx, err = makeOutputList(op, idx, "components"); err != nil {
53530		scope.UpdateErr("UncompressElement", err)
53531		return
53532	}
53533	return components
53534}
53535
53536// UnicodeDecodeAttr is an optional argument to UnicodeDecode.
53537type UnicodeDecodeAttr func(optionalAttr)
53538
53539// UnicodeDecodeErrors sets the optional errors attribute to value.
53540//
53541// value: Error handling policy when there is invalid formatting found in the input.
53542// The value of 'strict' will cause the operation to produce a InvalidArgument
53543// error on any invalid input formatting. A value of 'replace' (the default) will
53544// cause the operation to replace any invalid formatting in the input with the
53545// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
53546// skip any invalid formatting in the input and produce no corresponding output
53547// character.
53548// If not specified, defaults to "replace"
53549func UnicodeDecodeErrors(value string) UnicodeDecodeAttr {
53550	return func(m optionalAttr) {
53551		m["errors"] = value
53552	}
53553}
53554
53555// UnicodeDecodeReplacementChar sets the optional replacement_char attribute to value.
53556//
53557// value: The replacement character codepoint to be used in place of any invalid
53558// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
53559// be used. The default value is the default unicode replacement character is
53560// 0xFFFD or U+65533.)
53561// If not specified, defaults to 65533
53562func UnicodeDecodeReplacementChar(value int64) UnicodeDecodeAttr {
53563	return func(m optionalAttr) {
53564		m["replacement_char"] = value
53565	}
53566}
53567
53568// UnicodeDecodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
53569//
53570// value: Whether to replace the C0 control characters (00-1F) with the
53571// `replacement_char`. Default is false.
53572// If not specified, defaults to false
53573func UnicodeDecodeReplaceControlCharacters(value bool) UnicodeDecodeAttr {
53574	return func(m optionalAttr) {
53575		m["replace_control_characters"] = value
53576	}
53577}
53578
53579// UnicodeDecodeTsplits sets the optional Tsplits attribute to value.
53580// If not specified, defaults to DT_INT64
53581func UnicodeDecodeTsplits(value tf.DataType) UnicodeDecodeAttr {
53582	return func(m optionalAttr) {
53583		m["Tsplits"] = value
53584	}
53585}
53586
53587// Decodes each string in `input` into a sequence of Unicode code points.
53588//
53589// The character codepoints for all strings are returned using a single vector
53590// `char_values`, with strings expanded to characters in row-major order.
53591//
53592// The `row_splits` tensor indicates where the codepoints for
53593// each input string begin and end within the `char_values` tensor.
53594// In particular, the values for the `i`th
53595// string (in row-major order) are stored in the slice
53596// `[row_splits[i]:row_splits[i+1]]`. Thus:
53597//
53598//   - `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th
53599//     character in the `i`th string (in row-major order).
53600//   - `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th
53601//     string (in row-major order).
53602//
53603// Arguments:
53604//
53605//	input: The text to be decoded. Can have any shape. Note that the output is flattened
53606//
53607// to a vector of char values.
53608//
53609//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
53610//
53611// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
53612//
53613// Returns:
53614//
53615//	row_splits: A 1D int32 tensor containing the row splits.
53616//	char_values: A 1D int32 Tensor containing the decoded codepoints.
53617func UnicodeDecode(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeAttr) (row_splits tf.Output, char_values tf.Output) {
53618	if scope.Err() != nil {
53619		return
53620	}
53621	attrs := map[string]interface{}{"input_encoding": input_encoding}
53622	for _, a := range optional {
53623		a(attrs)
53624	}
53625	opspec := tf.OpSpec{
53626		Type: "UnicodeDecode",
53627		Input: []tf.Input{
53628			input,
53629		},
53630		Attrs: attrs,
53631	}
53632	op := scope.AddOperation(opspec)
53633	return op.Output(0), op.Output(1)
53634}
53635
53636// UnicodeDecodeWithOffsetsAttr is an optional argument to UnicodeDecodeWithOffsets.
53637type UnicodeDecodeWithOffsetsAttr func(optionalAttr)
53638
53639// UnicodeDecodeWithOffsetsErrors sets the optional errors attribute to value.
53640//
53641// value: Error handling policy when there is invalid formatting found in the input.
53642// The value of 'strict' will cause the operation to produce a InvalidArgument
53643// error on any invalid input formatting. A value of 'replace' (the default) will
53644// cause the operation to replace any invalid formatting in the input with the
53645// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
53646// skip any invalid formatting in the input and produce no corresponding output
53647// character.
53648// If not specified, defaults to "replace"
53649func UnicodeDecodeWithOffsetsErrors(value string) UnicodeDecodeWithOffsetsAttr {
53650	return func(m optionalAttr) {
53651		m["errors"] = value
53652	}
53653}
53654
53655// UnicodeDecodeWithOffsetsReplacementChar sets the optional replacement_char attribute to value.
53656//
53657// value: The replacement character codepoint to be used in place of any invalid
53658// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
53659// be used. The default value is the default unicode replacement character is
53660// 0xFFFD or U+65533.)
53661// If not specified, defaults to 65533
53662func UnicodeDecodeWithOffsetsReplacementChar(value int64) UnicodeDecodeWithOffsetsAttr {
53663	return func(m optionalAttr) {
53664		m["replacement_char"] = value
53665	}
53666}
53667
53668// UnicodeDecodeWithOffsetsReplaceControlCharacters sets the optional replace_control_characters attribute to value.
53669//
53670// value: Whether to replace the C0 control characters (00-1F) with the
53671// `replacement_char`. Default is false.
53672// If not specified, defaults to false
53673func UnicodeDecodeWithOffsetsReplaceControlCharacters(value bool) UnicodeDecodeWithOffsetsAttr {
53674	return func(m optionalAttr) {
53675		m["replace_control_characters"] = value
53676	}
53677}
53678
53679// UnicodeDecodeWithOffsetsTsplits sets the optional Tsplits attribute to value.
53680// If not specified, defaults to DT_INT64
53681func UnicodeDecodeWithOffsetsTsplits(value tf.DataType) UnicodeDecodeWithOffsetsAttr {
53682	return func(m optionalAttr) {
53683		m["Tsplits"] = value
53684	}
53685}
53686
53687// Decodes each string in `input` into a sequence of Unicode code points.
53688//
53689// The character codepoints for all strings are returned using a single vector
53690// `char_values`, with strings expanded to characters in row-major order.
53691// Similarly, the character start byte offsets are returned using a single vector
53692// `char_to_byte_starts`, with strings expanded in row-major order.
53693//
53694// The `row_splits` tensor indicates where the codepoints and start offsets for
53695// each input string begin and end within the `char_values` and
53696// `char_to_byte_starts` tensors.  In particular, the values for the `i`th
53697// string (in row-major order) are stored in the slice
53698// `[row_splits[i]:row_splits[i+1]]`. Thus:
53699//
53700//   - `char_values[row_splits[i]+j]` is the Unicode codepoint for the `j`th
53701//     character in the `i`th string (in row-major order).
53702//   - `char_to_bytes_starts[row_splits[i]+j]` is the start byte offset for the `j`th
53703//     character in the `i`th string (in row-major order).
53704//   - `row_splits[i+1] - row_splits[i]` is the number of characters in the `i`th
53705//     string (in row-major order).
53706//
53707// Arguments:
53708//
53709//	input: The text to be decoded. Can have any shape. Note that the output is flattened
53710//
53711// to a vector of char values.
53712//
53713//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
53714//
53715// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
53716//
53717// Returns:
53718//
53719//	row_splits: A 1D int32 tensor containing the row splits.
53720//	char_values: A 1D int32 Tensor containing the decoded codepoints.
53721//	char_to_byte_starts: A 1D int32 Tensor containing the byte index in the input string where each
53722//
53723// character in `char_values` starts.
53724func UnicodeDecodeWithOffsets(scope *Scope, input tf.Output, input_encoding string, optional ...UnicodeDecodeWithOffsetsAttr) (row_splits tf.Output, char_values tf.Output, char_to_byte_starts tf.Output) {
53725	if scope.Err() != nil {
53726		return
53727	}
53728	attrs := map[string]interface{}{"input_encoding": input_encoding}
53729	for _, a := range optional {
53730		a(attrs)
53731	}
53732	opspec := tf.OpSpec{
53733		Type: "UnicodeDecodeWithOffsets",
53734		Input: []tf.Input{
53735			input,
53736		},
53737		Attrs: attrs,
53738	}
53739	op := scope.AddOperation(opspec)
53740	return op.Output(0), op.Output(1), op.Output(2)
53741}
53742
53743// UnicodeEncodeAttr is an optional argument to UnicodeEncode.
53744type UnicodeEncodeAttr func(optionalAttr)
53745
53746// UnicodeEncodeErrors sets the optional errors attribute to value.
53747//
53748// value: Error handling policy when there is invalid formatting found in the input.
53749// The value of 'strict' will cause the operation to produce a InvalidArgument
53750// error on any invalid input formatting. A value of 'replace' (the default) will
53751// cause the operation to replace any invalid formatting in the input with the
53752// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
53753// skip any invalid formatting in the input and produce no corresponding output
53754// character.
53755// If not specified, defaults to "replace"
53756func UnicodeEncodeErrors(value string) UnicodeEncodeAttr {
53757	return func(m optionalAttr) {
53758		m["errors"] = value
53759	}
53760}
53761
53762// UnicodeEncodeReplacementChar sets the optional replacement_char attribute to value.
53763//
53764// value: The replacement character codepoint to be used in place of any invalid
53765// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
53766// be used. The default value is the default unicode replacement character is
53767// 0xFFFD (U+65533).
53768// If not specified, defaults to 65533
53769func UnicodeEncodeReplacementChar(value int64) UnicodeEncodeAttr {
53770	return func(m optionalAttr) {
53771		m["replacement_char"] = value
53772	}
53773}
53774
53775// Encode a tensor of ints into unicode strings.
53776//
53777// Returns a vector of strings, where `output[i]` is constructed by encoding the
53778// Unicode codepoints in `input_values[input_splits[i]:input_splits[i+1]]`
53779// using `output_encoding`.
53780//
53781// ---
53782//
53783// Example:
53784//
53785// ```
53786// input_values = [72, 101, 108, 108, 111, 87, 111, 114, 108, 100]
53787// input_splits = [0, 5, 10]
53788// output_encoding = 'UTF-8'
53789//
53790// output = ['Hello', 'World']
53791// ```
53792//
53793// Arguments:
53794//
53795//	input_values: A 1D tensor containing the unicode codepoints that should be encoded.
53796//	input_splits: A 1D tensor specifying how the unicode codepoints should be split into strings.
53797//
53798// In particular, `output[i]` is constructed by encoding the codepoints in the
53799// slice `input_values[input_splits[i]:input_splits[i+1]]`.
53800//
53801//	output_encoding: Unicode encoding of the output strings. Valid encodings are: `"UTF-8",
53802//
53803// "UTF-16-BE", and "UTF-32-BE"`.
53804//
53805// Returns The 1-D Tensor of strings encoded from the provided unicode codepoints.
53806func UnicodeEncode(scope *Scope, input_values tf.Output, input_splits tf.Output, output_encoding string, optional ...UnicodeEncodeAttr) (output tf.Output) {
53807	if scope.Err() != nil {
53808		return
53809	}
53810	attrs := map[string]interface{}{"output_encoding": output_encoding}
53811	for _, a := range optional {
53812		a(attrs)
53813	}
53814	opspec := tf.OpSpec{
53815		Type: "UnicodeEncode",
53816		Input: []tf.Input{
53817			input_values, input_splits,
53818		},
53819		Attrs: attrs,
53820	}
53821	op := scope.AddOperation(opspec)
53822	return op.Output(0)
53823}
53824
53825// Determine the script codes of a given tensor of Unicode integer code points.
53826//
53827// This operation converts Unicode code points to script codes corresponding to
53828// each code point. Script codes correspond to International Components for
53829// Unicode (ICU) UScriptCode values.
53830//
53831// See
53832// [ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html)
53833// for more details on script codes.
53834//
53835// For an example, see the unicode strings guide on [unicode scripts]
53836// (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode).
53837//
53838// Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will
53839// match input shape.
53840//
53841// Examples:
53842//
53843// >>> tf.strings.unicode_script([1, 31, 38])
53844// <tf.Tensor: shape=(3,), dtype=int32, numpy=array([0, 0, 0], dtype=int32)>
53845//
53846// Arguments:
53847//
53848//	input: A Tensor of int32 Unicode code points.
53849//
53850// Returns A Tensor of int32 script codes corresponding to each input code point.
53851func UnicodeScript(scope *Scope, input tf.Output) (output tf.Output) {
53852	if scope.Err() != nil {
53853		return
53854	}
53855	opspec := tf.OpSpec{
53856		Type: "UnicodeScript",
53857		Input: []tf.Input{
53858			input,
53859		},
53860	}
53861	op := scope.AddOperation(opspec)
53862	return op.Output(0)
53863}
53864
53865// UnicodeTranscodeAttr is an optional argument to UnicodeTranscode.
53866type UnicodeTranscodeAttr func(optionalAttr)
53867
53868// UnicodeTranscodeErrors sets the optional errors attribute to value.
53869//
53870// value: Error handling policy when there is invalid formatting found in the input.
53871// The value of 'strict' will cause the operation to produce a InvalidArgument
53872// error on any invalid input formatting. A value of 'replace' (the default) will
53873// cause the operation to replace any invalid formatting in the input with the
53874// `replacement_char` codepoint. A value of 'ignore' will cause the operation to
53875// skip any invalid formatting in the input and produce no corresponding output
53876// character.
53877// If not specified, defaults to "replace"
53878func UnicodeTranscodeErrors(value string) UnicodeTranscodeAttr {
53879	return func(m optionalAttr) {
53880		m["errors"] = value
53881	}
53882}
53883
53884// UnicodeTranscodeReplacementChar sets the optional replacement_char attribute to value.
53885//
53886// value: The replacement character codepoint to be used in place of any invalid
53887// formatting in the input when `errors='replace'`. Any valid unicode codepoint may
53888// be used. The default value is the default unicode replacement character is
53889// 0xFFFD or U+65533.)
53890//
53891// Note that for UTF-8, passing a replacement character expressible in 1 byte, such
53892// as ' ', will preserve string alignment to the source since invalid bytes will be
53893// replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte
53894// replacement character will preserve byte alignment to the source.
53895// If not specified, defaults to 65533
53896func UnicodeTranscodeReplacementChar(value int64) UnicodeTranscodeAttr {
53897	return func(m optionalAttr) {
53898		m["replacement_char"] = value
53899	}
53900}
53901
53902// UnicodeTranscodeReplaceControlCharacters sets the optional replace_control_characters attribute to value.
53903//
53904// value: Whether to replace the C0 control characters (00-1F) with the
53905// `replacement_char`. Default is false.
53906// If not specified, defaults to false
53907func UnicodeTranscodeReplaceControlCharacters(value bool) UnicodeTranscodeAttr {
53908	return func(m optionalAttr) {
53909		m["replace_control_characters"] = value
53910	}
53911}
53912
53913// Transcode the input text from a source encoding to a destination encoding.
53914//
53915// The input is a string tensor of any shape. The output is a string tensor of
53916// the same shape containing the transcoded strings. Output strings are always
53917// valid unicode. If the input contains invalid encoding positions, the
53918// `errors` attribute sets the policy for how to deal with them. If the default
53919// error-handling policy is used, invalid formatting will be substituted in the
53920// output by the `replacement_char`. If the errors policy is to `ignore`, any
53921// invalid encoding positions in the input are skipped and not included in the
53922// output. If it set to `strict` then any invalid formatting will result in an
53923// InvalidArgument error.
53924//
53925// This operation can be used with `output_encoding = input_encoding` to enforce
53926// correct formatting for inputs even if they are already in the desired encoding.
53927//
53928// If the input is prefixed by a Byte Order Mark needed to determine encoding
53929// (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that
53930// BOM will be consumed and not emitted into the output. If the input encoding
53931// is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is
53932// interpreted as a non-breaking-space and is preserved in the output (including
53933// always for UTF-8).
53934//
53935// The end result is that if the input is marked as an explicit endianness the
53936// transcoding is faithful to all codepoints in the source. If it is not marked
53937// with an explicit endianness, the BOM is not considered part of the string itself
53938// but as metadata, and so is not preserved in the output.
53939//
53940// Examples:
53941//
53942// >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE")
53943// <tf.Tensor: shape=(3,), dtype=string, numpy=
53944// array([b'\x00H\x00e\x00l\x00l\x00o',
53945//
53946//	b'\x00T\x00e\x00n\x00s\x00o\x00r\x00F\x00l\x00o\x00w',
53947//	b'\x002\x00.\x00x'], dtype=object)>
53948//
53949// >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy()
53950// array([b'A', b'B', b'C'], dtype=object)
53951//
53952// Arguments:
53953//
53954//	input: The text to be processed. Can have any shape.
53955//	input_encoding: Text encoding of the input strings. This is any of the encodings supported
53956//
53957// by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`.
53958//
53959//	output_encoding: The unicode encoding to use in the output. Must be one of
53960//
53961// `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian.
53962//
53963// Returns A string tensor containing unicode text encoded using `output_encoding`.
53964func UnicodeTranscode(scope *Scope, input tf.Output, input_encoding string, output_encoding string, optional ...UnicodeTranscodeAttr) (output tf.Output) {
53965	if scope.Err() != nil {
53966		return
53967	}
53968	attrs := map[string]interface{}{"input_encoding": input_encoding, "output_encoding": output_encoding}
53969	for _, a := range optional {
53970		a(attrs)
53971	}
53972	opspec := tf.OpSpec{
53973		Type: "UnicodeTranscode",
53974		Input: []tf.Input{
53975			input,
53976		},
53977		Attrs: attrs,
53978	}
53979	op := scope.AddOperation(opspec)
53980	return op.Output(0)
53981}
53982
53983// UniformCandidateSamplerAttr is an optional argument to UniformCandidateSampler.
53984type UniformCandidateSamplerAttr func(optionalAttr)
53985
53986// UniformCandidateSamplerSeed sets the optional seed attribute to value.
53987//
53988// value: If either seed or seed2 are set to be non-zero, the random number
53989// generator is seeded by the given seed.  Otherwise, it is seeded by a
53990// random seed.
53991// If not specified, defaults to 0
53992func UniformCandidateSamplerSeed(value int64) UniformCandidateSamplerAttr {
53993	return func(m optionalAttr) {
53994		m["seed"] = value
53995	}
53996}
53997
53998// UniformCandidateSamplerSeed2 sets the optional seed2 attribute to value.
53999//
54000// value: An second seed to avoid seed collision.
54001// If not specified, defaults to 0
54002func UniformCandidateSamplerSeed2(value int64) UniformCandidateSamplerAttr {
54003	return func(m optionalAttr) {
54004		m["seed2"] = value
54005	}
54006}
54007
54008// Generates labels for candidate sampling with a uniform distribution.
54009//
54010// See explanations of candidate sampling and the data formats at
54011// go/candidate-sampling.
54012//
54013// For each batch, this op picks a single set of sampled candidate labels.
54014//
54015// The advantages of sampling candidates per-batch are simplicity and the
54016// possibility of efficient dense matrix multiplication. The disadvantage is that
54017// the sampled candidates must be chosen independently of the context and of the
54018// true labels.
54019//
54020// Arguments:
54021//
54022//	true_classes: A batch_size * num_true matrix, in which each row contains the
54023//
54024// IDs of the num_true target_classes in the corresponding original label.
54025//
54026//	num_true: Number of true labels per context.
54027//	num_sampled: Number of candidates to randomly sample.
54028//	unique: If unique is true, we sample with rejection, so that all sampled
54029//
54030// candidates in a batch are unique. This requires some approximation to
54031// estimate the post-rejection sampling probabilities.
54032//
54033//	range_max: The sampler will sample integers from the interval [0, range_max).
54034//
54035// Returns:
54036//
54037//	sampled_candidates: A vector of length num_sampled, in which each element is
54038//
54039// the ID of a sampled candidate.
54040//
54041//	true_expected_count: A batch_size * num_true matrix, representing
54042//
54043// the number of times each candidate is expected to occur in a batch
54044// of sampled candidates. If unique=true, then this is a probability.
54045//
54046//	sampled_expected_count: A vector of length num_sampled, for each sampled
54047//
54048// candidate representing the number of times the candidate is expected
54049// to occur in a batch of sampled candidates.  If unique=true, then this is a
54050// probability.
54051func UniformCandidateSampler(scope *Scope, true_classes tf.Output, num_true int64, num_sampled int64, unique bool, range_max int64, optional ...UniformCandidateSamplerAttr) (sampled_candidates tf.Output, true_expected_count tf.Output, sampled_expected_count tf.Output) {
54052	if scope.Err() != nil {
54053		return
54054	}
54055	attrs := map[string]interface{}{"num_true": num_true, "num_sampled": num_sampled, "unique": unique, "range_max": range_max}
54056	for _, a := range optional {
54057		a(attrs)
54058	}
54059	opspec := tf.OpSpec{
54060		Type: "UniformCandidateSampler",
54061		Input: []tf.Input{
54062			true_classes,
54063		},
54064		Attrs: attrs,
54065	}
54066	op := scope.AddOperation(opspec)
54067	return op.Output(0), op.Output(1), op.Output(2)
54068}
54069
54070// UniformDequantizeAttr is an optional argument to UniformDequantize.
54071type UniformDequantizeAttr func(optionalAttr)
54072
54073// UniformDequantizeQuantizationAxis sets the optional quantization_axis attribute to value.
54074//
54075// value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.
54076// If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()).
54077// If not specified, defaults to -1
54078func UniformDequantizeQuantizationAxis(value int64) UniformDequantizeAttr {
54079	return func(m optionalAttr) {
54080		m["quantization_axis"] = value
54081	}
54082}
54083
54084// Perform dequantization on the quantized Tensor `input`.
54085//
54086// Given quantized `input` which was quantized using `scales` and `zero_points`, performs dequantization using the formula:
54087// dequantized_data = (quantized_data - zero_point) * scale.
54088//
54089// Arguments:
54090//
54091//	input: Must be a Tensor of Tin.
54092//	scales: The float value(s) used as scale(s) when quantizing original data that input represents.
54093//
54094// Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).
54095//
54096//	zero_points: The int32 value(s) used as zero_point(s) when quantizing original data that input represents.
54097//
54098// Same shape condition as scales.
54099//
54100//	Tout: The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32
54101//	quantization_min_val: The quantization min value that was used when input was quantized.
54102//
54103// The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to:
54104// `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise.
54105// For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not.
54106//
54107//	quantization_max_val: The quantization max value that was used when input was quantized.
54108//
54109// The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to:
54110// `(Tout max)` for both narrow range and not narrow range.
54111// For example, if Tin is qint8, this is set to 127.
54112//
54113// Returns The output dequantized Tensor of Tout, whose shape is same as input.
54114func UniformDequantize(scope *Scope, input tf.Output, scales tf.Output, zero_points tf.Output, Tout tf.DataType, quantization_min_val int64, quantization_max_val int64, optional ...UniformDequantizeAttr) (output tf.Output) {
54115	if scope.Err() != nil {
54116		return
54117	}
54118	attrs := map[string]interface{}{"Tout": Tout, "quantization_min_val": quantization_min_val, "quantization_max_val": quantization_max_val}
54119	for _, a := range optional {
54120		a(attrs)
54121	}
54122	opspec := tf.OpSpec{
54123		Type: "UniformDequantize",
54124		Input: []tf.Input{
54125			input, scales, zero_points,
54126		},
54127		Attrs: attrs,
54128	}
54129	op := scope.AddOperation(opspec)
54130	return op.Output(0)
54131}
54132
54133// UniformQuantizeAttr is an optional argument to UniformQuantize.
54134type UniformQuantizeAttr func(optionalAttr)
54135
54136// UniformQuantizeQuantizationAxis sets the optional quantization_axis attribute to value.
54137//
54138// value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.
54139// If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()).
54140// If not specified, defaults to -1
54141func UniformQuantizeQuantizationAxis(value int64) UniformQuantizeAttr {
54142	return func(m optionalAttr) {
54143		m["quantization_axis"] = value
54144	}
54145}
54146
54147// Perform quantization on Tensor `input`.
54148//
54149// Given `input`, `scales` and `zero_points`, performs quantization using the formula:
54150// quantized_data = floor(input_data * (1.0f / scale) + 0.5f) + zero_point
54151//
54152// Arguments:
54153//
54154//	input: Must be a Tensor of Tin.
54155//	scales: The float value(s) to use as scale(s) to quantize `input`.
54156//
54157// Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).
54158//
54159//	zero_points: The int32 value(s) to use as zero_point(s) to quantize `input`.
54160//
54161// Same shape condition as scales.
54162//
54163//	Tout: The type of output Tensor. A tf.DType from: tf.float32
54164//	quantization_min_val: The quantization min value to quantize `input`.
54165//
54166// The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to:
54167// `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise.
54168// For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not.
54169//
54170//	quantization_max_val: The quantization max value to quantize `input`.
54171//
54172// The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to:
54173// `(Tout max)` for both narrow range and not narrow range.
54174// For example, if Tin is qint8, this is set to 127.
54175//
54176// Returns The output quantized Tensor of Tout, whose shape is same as input.
54177func UniformQuantize(scope *Scope, input tf.Output, scales tf.Output, zero_points tf.Output, Tout tf.DataType, quantization_min_val int64, quantization_max_val int64, optional ...UniformQuantizeAttr) (output tf.Output) {
54178	if scope.Err() != nil {
54179		return
54180	}
54181	attrs := map[string]interface{}{"Tout": Tout, "quantization_min_val": quantization_min_val, "quantization_max_val": quantization_max_val}
54182	for _, a := range optional {
54183		a(attrs)
54184	}
54185	opspec := tf.OpSpec{
54186		Type: "UniformQuantize",
54187		Input: []tf.Input{
54188			input, scales, zero_points,
54189		},
54190		Attrs: attrs,
54191	}
54192	op := scope.AddOperation(opspec)
54193	return op.Output(0)
54194}
54195
54196// UniformQuantizedDotHybridAttr is an optional argument to UniformQuantizedDotHybrid.
54197type UniformQuantizedDotHybridAttr func(optionalAttr)
54198
54199// UniformQuantizedDotHybridRhsQuantizationAxis sets the optional rhs_quantization_axis attribute to value.
54200//
54201// value: Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.
54202// If set to -1 (default), this indicates per-tensor quantization.
54203// For dot op rhs, only per-tensor quantization or per-channel quantization along dimension 1 is supported.
54204// Thus, this attribute must be set to -1 or 1. Other values are rejected.
54205// If not specified, defaults to -1
54206func UniformQuantizedDotHybridRhsQuantizationAxis(value int64) UniformQuantizedDotHybridAttr {
54207	return func(m optionalAttr) {
54208		m["rhs_quantization_axis"] = value
54209	}
54210}
54211
54212// Perform hybrid quantized dot of float Tensor `lhs` and quantized Tensor `rhs`.
54213//
54214// Given float `lhs` and quantized `rhs`, internally performs quantization on `lhs`, and then performs quantized dot on quantized lhs and `rhs`.
54215// The internal quantization on `lhs` is a quantization to qint8, dynamic range, per-batch (per-axis along axis 0), asymmetric, and not narrow range (the range is [-128, 127]).
54216// `lhs` and `rhs` must be 2D Tensors and the lhs.dim_size(1) must match rhs.dim_size(0).
54217// `rhs` must be quantized Tensor, where its data value is quantized using the formula:
54218// quantized_data = clip(original_data / scale + zero_point, quantization_min_val, quantization_max_val).
54219//
54220// Arguments:
54221//
54222//	lhs: Must be a 2D Tensor of Tlhs.
54223//	rhs: Must be a 2D Tensor of Trhs.
54224//	rhs_scales: The float value(s) used as scale when quantizing original data that rhs represents.
54225//
54226// Must be a scalar Tensor (per-tensor quantization) or 1D Tensor of size (rhs.dim_size(1),) (per-channel quantization).
54227//
54228//	rhs_zero_points: The int32 value(s) used as zero_point when quantizing original data that rhs represents.
54229//
54230// Same shape condition as rhs_scales.
54231//
54232//	Tout: The type of output Tensor.
54233//	rhs_quantization_min_val: The min value of the quantized data stored in rhs.
54234//
54235// For example, if Trhs is qint8, this must be set to -127 if narrow range quantized or -128 if not.
54236//
54237//	rhs_quantization_max_val: The max value of the quantized data stored in rhs.
54238//
54239// For example, if Trhs is qint8, this must be set to 127.
54240//
54241// Returns The output 2D Tensor of Tout, whose shape is (lhs.dim_size(0), rhs.dim_size(1)).
54242// The output data is the original output data itself (Not quantized).
54243func UniformQuantizedDotHybrid(scope *Scope, lhs tf.Output, rhs tf.Output, rhs_scales tf.Output, rhs_zero_points tf.Output, Tout tf.DataType, rhs_quantization_min_val int64, rhs_quantization_max_val int64, optional ...UniformQuantizedDotHybridAttr) (output tf.Output) {
54244	if scope.Err() != nil {
54245		return
54246	}
54247	attrs := map[string]interface{}{"Tout": Tout, "rhs_quantization_min_val": rhs_quantization_min_val, "rhs_quantization_max_val": rhs_quantization_max_val}
54248	for _, a := range optional {
54249		a(attrs)
54250	}
54251	opspec := tf.OpSpec{
54252		Type: "UniformQuantizedDotHybrid",
54253		Input: []tf.Input{
54254			lhs, rhs, rhs_scales, rhs_zero_points,
54255		},
54256		Attrs: attrs,
54257	}
54258	op := scope.AddOperation(opspec)
54259	return op.Output(0)
54260}
54261
54262// UniformRequantizeAttr is an optional argument to UniformRequantize.
54263type UniformRequantizeAttr func(optionalAttr)
54264
54265// UniformRequantizeInputQuantizationAxis sets the optional input_quantization_axis attribute to value.
54266//
54267// value: The quantization axis that was used when quantizing original data that `input` represents.
54268// Indicates the dimension index of the tensor where per-axis quantization is applied for the slices along that dimension.
54269// If set to -1 (default), this indicates per-tensor quantization. Otherwise, it must be set within range [0, input.dims()).
54270// If not specified, defaults to -1
54271func UniformRequantizeInputQuantizationAxis(value int64) UniformRequantizeAttr {
54272	return func(m optionalAttr) {
54273		m["input_quantization_axis"] = value
54274	}
54275}
54276
54277// UniformRequantizeOutputQuantizationAxis sets the optional output_quantization_axis attribute to value.
54278//
54279// value: The new quantization axis to use to quantize original data that `input` represents.
54280// If not specified, defaults to -1
54281func UniformRequantizeOutputQuantizationAxis(value int64) UniformRequantizeAttr {
54282	return func(m optionalAttr) {
54283		m["output_quantization_axis"] = value
54284	}
54285}
54286
54287// Given quantized tensor `input`, requantize it with new quantization parameters.
54288//
54289// Given quantized tensor `input`, which was quantized using {input_scales, input_zero_points, input_quantization_axis, input_quantization_min_val, input_quantization_max_val},
54290// requantize it to a tensor, which is quantized using {output_scales, output_zero_points, output_quantization_axis, output_quantization_min_val, output_quantization_max_val}.
54291// The requantization is done by using the formula:
54292// output_quantized_data = clip(
54293//
54294//	(input_quantized_data - input_zero_point) * (input_scale / output_scale) + output_zero_point,
54295//	output_quantization_min_val,
54296//	output_quantization_max_val)
54297//
54298// Per-tensor and per-axis quantization supported cases are followings:
54299// * per-tensor -> per-tensor
54300// * per-tensor -> per-axis
54301// * per-axis -> per-axis where input_quantization_axis equals output_quantization_axis.
54302// i.e. At least one among input_quantization_axis and output_quantization_axis must be -1, or two must be equal.
54303//
54304// Arguments:
54305//
54306//	input: Must be a Tensor of Tin.
54307//	input_scales: The float value(s) used as scale(s) when quantizing original data that `input` represents.
54308//
54309// Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).
54310//
54311//	input_zero_points: The int32 value(s) used as zero_point(s) when quantizing original data that `input` represents.
54312//
54313// Same shape condition as scales.
54314//
54315//	output_scales: The float value(s) to use as new scale(s) to quantize original data that `input` represents.
54316//
54317// Must be a scalar Tensor if quantization_axis is -1 (per-tensor quantization), otherwise 1D Tensor of size (input.dim_size(quantization_axis),) (per-axis quantization).
54318//
54319//	output_zero_points: The int32 value(s) to use as new zero_point(s) to quantize original data that `input` represents.
54320//
54321// Same shape condition as scales.
54322//
54323//	Tout: The type of output Tensor. A tf.DType from: tf.qint8, tf.qint32
54324//	input_quantization_min_val: The quantization min value that was used when quantizing original data that `input` represents.
54325//
54326// The purpose of this attribute is typically (but not limited to) to indicate narrow range, where this is set to:
54327// `(Tin lowest) + 1` if narrow range, and `(Tin lowest)` otherwise.
54328// For example, if Tin is qint8, this is set to -127 if narrow range quantized or -128 if not.
54329//
54330//	input_quantization_max_val: The quantization max value that was used when quantizing original data that `input` represents.
54331//
54332// The purpose of this attribute is typically (but not limited to) indicate narrow range, where this is set to:
54333// `(Tout max)` for both narrow range and not narrow range.
54334// For example, if Tin is qint8, this is set to 127.
54335//
54336//	output_quantization_min_val: The new quantization min value to quantize original data that `input` represents.
54337//	output_quantization_max_val: The new quantization max value to quantize original data that `input` represents.
54338//
54339// Returns The output quantized Tensor of Tout, whose shape is same as input.
54340func UniformRequantize(scope *Scope, input tf.Output, input_scales tf.Output, input_zero_points tf.Output, output_scales tf.Output, output_zero_points tf.Output, Tout tf.DataType, input_quantization_min_val int64, input_quantization_max_val int64, output_quantization_min_val int64, output_quantization_max_val int64, optional ...UniformRequantizeAttr) (output tf.Output) {
54341	if scope.Err() != nil {
54342		return
54343	}
54344	attrs := map[string]interface{}{"Tout": Tout, "input_quantization_min_val": input_quantization_min_val, "input_quantization_max_val": input_quantization_max_val, "output_quantization_min_val": output_quantization_min_val, "output_quantization_max_val": output_quantization_max_val}
54345	for _, a := range optional {
54346		a(attrs)
54347	}
54348	opspec := tf.OpSpec{
54349		Type: "UniformRequantize",
54350		Input: []tf.Input{
54351			input, input_scales, input_zero_points, output_scales, output_zero_points,
54352		},
54353		Attrs: attrs,
54354	}
54355	op := scope.AddOperation(opspec)
54356	return op.Output(0)
54357}
54358
54359// UniqueAttr is an optional argument to Unique.
54360type UniqueAttr func(optionalAttr)
54361
54362// UniqueOutIdx sets the optional out_idx attribute to value.
54363// If not specified, defaults to DT_INT32
54364func UniqueOutIdx(value tf.DataType) UniqueAttr {
54365	return func(m optionalAttr) {
54366		m["out_idx"] = value
54367	}
54368}
54369
54370// Finds unique elements in a 1-D tensor.
54371//
54372// This operation returns a tensor `y` containing all of the unique elements of `x`
54373// sorted in the same order that they occur in `x`; `x` does not need to be sorted.
54374// This operation also returns a tensor `idx` the same size as `x` that contains
54375// the index of each value of `x` in the unique output `y`. In other words:
54376//
54377// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
54378//
54379// Examples:
54380//
54381// ```
54382// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
54383// y, idx = unique(x)
54384// y ==> [1, 2, 4, 7, 8]
54385// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
54386// ```
54387//
54388// ```
54389// # tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]
54390// y, idx = unique(x)
54391// y ==> [4, 5, 1, 2, 3]
54392// idx ==> [0, 1, 2, 3, 4, 4, 0, 1]
54393// ```
54394//
54395// Arguments:
54396//
54397//	x: 1-D.
54398//
54399// Returns:
54400//
54401//	y: 1-D.
54402//	idx: 1-D.
54403func Unique(scope *Scope, x tf.Output, optional ...UniqueAttr) (y tf.Output, idx tf.Output) {
54404	if scope.Err() != nil {
54405		return
54406	}
54407	attrs := map[string]interface{}{}
54408	for _, a := range optional {
54409		a(attrs)
54410	}
54411	opspec := tf.OpSpec{
54412		Type: "Unique",
54413		Input: []tf.Input{
54414			x,
54415		},
54416		Attrs: attrs,
54417	}
54418	op := scope.AddOperation(opspec)
54419	return op.Output(0), op.Output(1)
54420}
54421
54422// UniqueDatasetAttr is an optional argument to UniqueDataset.
54423type UniqueDatasetAttr func(optionalAttr)
54424
54425// UniqueDatasetMetadata sets the optional metadata attribute to value.
54426// If not specified, defaults to ""
54427func UniqueDatasetMetadata(value string) UniqueDatasetAttr {
54428	return func(m optionalAttr) {
54429		m["metadata"] = value
54430	}
54431}
54432
54433// Creates a dataset that contains the unique elements of `input_dataset`.
54434func UniqueDataset(scope *Scope, input_dataset tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...UniqueDatasetAttr) (handle tf.Output) {
54435	if scope.Err() != nil {
54436		return
54437	}
54438	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
54439	for _, a := range optional {
54440		a(attrs)
54441	}
54442	opspec := tf.OpSpec{
54443		Type: "UniqueDataset",
54444		Input: []tf.Input{
54445			input_dataset,
54446		},
54447		Attrs: attrs,
54448	}
54449	op := scope.AddOperation(opspec)
54450	return op.Output(0)
54451}
54452
54453// UniqueV2Attr is an optional argument to UniqueV2.
54454type UniqueV2Attr func(optionalAttr)
54455
54456// UniqueV2OutIdx sets the optional out_idx attribute to value.
54457// If not specified, defaults to DT_INT32
54458func UniqueV2OutIdx(value tf.DataType) UniqueV2Attr {
54459	return func(m optionalAttr) {
54460		m["out_idx"] = value
54461	}
54462}
54463
54464// Finds unique elements along an axis of a tensor.
54465//
54466// This operation either returns a tensor `y` containing unique elements
54467// along the `axis` of a tensor. The returned unique elements is sorted
54468// in the same order as they occur along `axis` in `x`.
54469// This operation also returns a tensor `idx` that is the same size as
54470// the number of the elements in `x` along the `axis` dimension. It
54471// contains the index in the unique output `y`.
54472// In other words, for an `1-D` tensor `x` with `axis = None:
54473//
54474// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
54475//
54476// For example:
54477//
54478// ```
54479// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
54480// y, idx = unique(x)
54481// y ==> [1, 2, 4, 7, 8]
54482// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
54483// ```
54484//
54485// For an `2-D` tensor `x` with `axis = 0`:
54486//
54487// ```
54488// # tensor 'x' is [[1, 0, 0],
54489// #                [1, 0, 0],
54490// #                [2, 0, 0]]
54491// y, idx = unique(x, axis=0)
54492// y ==> [[1, 0, 0],
54493//
54494//	[2, 0, 0]]
54495//
54496// idx ==> [0, 0, 1]
54497// ```
54498//
54499// For an `2-D` tensor `x` with `axis = 1`:
54500//
54501// ```
54502// # tensor 'x' is [[1, 0, 0],
54503// #                [1, 0, 0],
54504// #                [2, 0, 0]]
54505// y, idx = unique(x, axis=1)
54506// y ==> [[1, 0],
54507//
54508//	[1, 0],
54509//	[2, 0]]
54510//
54511// idx ==> [0, 1, 1]
54512// ```
54513//
54514// Arguments:
54515//
54516//	x: A `Tensor`.
54517//	axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
54518//
54519// find the unique elements.
54520//
54521// Returns:
54522//
54523//	y: A `Tensor`. Unique elements along the `axis` of `Tensor` x.
54524//	idx: A 1-D Tensor. Has the same type as x that contains the index of each
54525//
54526// value of x in the output y.
54527func UniqueV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueV2Attr) (y tf.Output, idx tf.Output) {
54528	if scope.Err() != nil {
54529		return
54530	}
54531	attrs := map[string]interface{}{}
54532	for _, a := range optional {
54533		a(attrs)
54534	}
54535	opspec := tf.OpSpec{
54536		Type: "UniqueV2",
54537		Input: []tf.Input{
54538			x, axis,
54539		},
54540		Attrs: attrs,
54541	}
54542	op := scope.AddOperation(opspec)
54543	return op.Output(0), op.Output(1)
54544}
54545
54546// UniqueWithCountsAttr is an optional argument to UniqueWithCounts.
54547type UniqueWithCountsAttr func(optionalAttr)
54548
54549// UniqueWithCountsOutIdx sets the optional out_idx attribute to value.
54550// If not specified, defaults to DT_INT32
54551func UniqueWithCountsOutIdx(value tf.DataType) UniqueWithCountsAttr {
54552	return func(m optionalAttr) {
54553		m["out_idx"] = value
54554	}
54555}
54556
54557// Finds unique elements in a 1-D tensor.
54558//
54559// This operation returns a tensor `y` containing all of the unique elements of `x`
54560// sorted in the same order that they occur in `x`. This operation also returns a
54561// tensor `idx` the same size as `x` that contains the index of each value of `x`
54562// in the unique output `y`. Finally, it returns a third tensor `count` that
54563// contains the count of each element of `y` in `x`. In other words:
54564//
54565// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
54566//
54567// For example:
54568//
54569// ```
54570// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
54571// y, idx, count = unique_with_counts(x)
54572// y ==> [1, 2, 4, 7, 8]
54573// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
54574// count ==> [2, 1, 3, 1, 2]
54575// ```
54576//
54577// Arguments:
54578//
54579//	x: 1-D.
54580//
54581// Returns:
54582//
54583//	y: 1-D.
54584//	idx: 1-D.
54585//	count: 1-D.
54586func UniqueWithCounts(scope *Scope, x tf.Output, optional ...UniqueWithCountsAttr) (y tf.Output, idx tf.Output, count tf.Output) {
54587	if scope.Err() != nil {
54588		return
54589	}
54590	attrs := map[string]interface{}{}
54591	for _, a := range optional {
54592		a(attrs)
54593	}
54594	opspec := tf.OpSpec{
54595		Type: "UniqueWithCounts",
54596		Input: []tf.Input{
54597			x,
54598		},
54599		Attrs: attrs,
54600	}
54601	op := scope.AddOperation(opspec)
54602	return op.Output(0), op.Output(1), op.Output(2)
54603}
54604
54605// UniqueWithCountsV2Attr is an optional argument to UniqueWithCountsV2.
54606type UniqueWithCountsV2Attr func(optionalAttr)
54607
54608// UniqueWithCountsV2OutIdx sets the optional out_idx attribute to value.
54609// If not specified, defaults to DT_INT32
54610func UniqueWithCountsV2OutIdx(value tf.DataType) UniqueWithCountsV2Attr {
54611	return func(m optionalAttr) {
54612		m["out_idx"] = value
54613	}
54614}
54615
54616// Finds unique elements along an axis of a tensor.
54617//
54618// This operation either returns a tensor `y` containing unique elements
54619// along the `axis` of a tensor. The returned unique elements is sorted
54620// in the same order as they occur along `axis` in `x`.
54621// This operation also returns a tensor `idx` and a tensor `count`
54622// that are the same size as the number of the elements in `x` along the
54623// `axis` dimension. The `idx` contains the index in the unique output `y`
54624// and the `count` contains the count in the unique output `y`.
54625// In other words, for an `1-D` tensor `x` with `axis = None:
54626//
54627// `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
54628//
54629// For example:
54630//
54631// ```
54632// x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
54633// y, idx, count = UniqueWithCountsV2(x, axis = [0])
54634// y ==> [1, 2, 4, 7, 8]
54635// idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
54636// count ==> [2, 1, 3, 1, 2]
54637// ```
54638//
54639// For a `2-D` tensor `x` with `axis = 0`:
54640//
54641// ```
54642// x = tf.constant([[1, 0, 0],
54643//
54644//	[1, 0, 0],
54645//	[2, 0, 0]])
54646//
54647// y, idx, count = UniqueWithCountsV2(x, axis=[0])
54648// y ==> [[1, 0, 0],
54649//
54650//	[2, 0, 0]]
54651//
54652// idx ==> [0, 0, 1]
54653// count ==> [2, 1]
54654// ```
54655//
54656// For a `2-D` tensor `x` with `axis = 1`:
54657//
54658// ```
54659// x = tf.constant([[1, 0, 0],
54660//
54661//	[1, 0, 0],
54662//	[2, 0, 0]])
54663//
54664// y, idx, count = UniqueWithCountsV2(x, axis=[1])
54665// y ==> [[1, 0],
54666//
54667//	[1, 0],
54668//	[2, 0]]
54669//
54670// idx ==> [0, 1, 1]
54671// count ==> [1, 2]
54672// ```
54673//
54674// Arguments:
54675//
54676//	x: A `Tensor`.
54677//	axis: A `Tensor` of type `int32` (default: None). The axis of the Tensor to
54678//
54679// find the unique elements.
54680//
54681// Returns:
54682//
54683//	y: A `Tensor`. Unique elements along the `axis` of `Tensor` x.
54684//	idx: A 1-D Tensor. Has the same type as x that contains the index of each
54685//
54686// value of x in the output y.
54687//
54688//	count: A 1-D Tensor. The count of each value of x in the output y.
54689func UniqueWithCountsV2(scope *Scope, x tf.Output, axis tf.Output, optional ...UniqueWithCountsV2Attr) (y tf.Output, idx tf.Output, count tf.Output) {
54690	if scope.Err() != nil {
54691		return
54692	}
54693	attrs := map[string]interface{}{}
54694	for _, a := range optional {
54695		a(attrs)
54696	}
54697	opspec := tf.OpSpec{
54698		Type: "UniqueWithCountsV2",
54699		Input: []tf.Input{
54700			x, axis,
54701		},
54702		Attrs: attrs,
54703	}
54704	op := scope.AddOperation(opspec)
54705	return op.Output(0), op.Output(1), op.Output(2)
54706}
54707
54708// UnpackAttr is an optional argument to Unpack.
54709type UnpackAttr func(optionalAttr)
54710
54711// UnpackAxis sets the optional axis attribute to value.
54712//
54713// value: Dimension along which to unpack.  Negative values wrap around, so the
54714// valid range is `[-R, R)`.
54715// If not specified, defaults to 0
54716func UnpackAxis(value int64) UnpackAttr {
54717	return func(m optionalAttr) {
54718		m["axis"] = value
54719	}
54720}
54721
54722// Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
54723//
54724// Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
54725// For example, given a tensor of shape `(A, B, C, D)`;
54726//
54727// If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
54728//
54729//	and each tensor in `output` will have shape `(B, C, D)`. (Note that the
54730//	dimension unpacked along is gone, unlike `split`).
54731//
54732// If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
54733//
54734//	and each tensor in `output` will have shape `(A, C, D)`.
54735//
54736// Etc.
54737//
54738// This is the opposite of `pack`.
54739//
54740// Arguments:
54741//
54742//	value: 1-D or higher, with `axis` dimension size equal to `num`.
54743//
54744// Returns The list of tensors unpacked from `value`.
54745func Unpack(scope *Scope, value tf.Output, num int64, optional ...UnpackAttr) (output []tf.Output) {
54746	if scope.Err() != nil {
54747		return
54748	}
54749	attrs := map[string]interface{}{"num": num}
54750	for _, a := range optional {
54751		a(attrs)
54752	}
54753	opspec := tf.OpSpec{
54754		Type: "Unpack",
54755		Input: []tf.Input{
54756			value,
54757		},
54758		Attrs: attrs,
54759	}
54760	op := scope.AddOperation(opspec)
54761	if scope.Err() != nil {
54762		return
54763	}
54764	var idx int
54765	var err error
54766	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
54767		scope.UpdateErr("Unpack", err)
54768		return
54769	}
54770	return output
54771}
54772
54773// Converts an array of flat indices into a tuple of coordinate arrays.
54774//
54775// Example:
54776//
54777// ```
54778// y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3])
54779// # 'dims' represent a hypothetical (3, 3) tensor of indices:
54780// # [[0, 1, *2*],
54781// #  [3, 4, *5*],
54782// #  [6, *7*, 8]]
54783// # For each entry from 'indices', this operation returns
54784// # its coordinates (marked with '*'), such as
54785// # 2 ==> (0, 2)
54786// # 5 ==> (1, 2)
54787// # 7 ==> (2, 1)
54788// y ==> [[0, 1, 2], [2, 2, 1]]
54789// ```
54790//
54791// @compatibility(numpy)
54792// Equivalent to np.unravel_index
54793// @end_compatibility
54794//
54795// Arguments:
54796//
54797//	indices: An 0-D or 1-D `int` Tensor whose elements are indices into the
54798//
54799// flattened version of an array of dimensions dims.
54800//
54801//	dims: An 1-D `int` Tensor. The shape of the array to use for unraveling
54802//
54803// indices.
54804//
54805// Returns An 2-D (or 1-D if indices is 0-D) tensor where each row has the
54806// same shape as the indices array.
54807func UnravelIndex(scope *Scope, indices tf.Output, dims tf.Output) (output tf.Output) {
54808	if scope.Err() != nil {
54809		return
54810	}
54811	opspec := tf.OpSpec{
54812		Type: "UnravelIndex",
54813		Input: []tf.Input{
54814			indices, dims,
54815		},
54816	}
54817	op := scope.AddOperation(opspec)
54818	return op.Output(0)
54819}
54820
54821// Computes the maximum along segments of a tensor.
54822//
54823// Read
54824// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
54825// for an explanation of segments.
54826//
54827// This operator is similar to `tf.math.unsorted_segment_sum`,
54828// Instead of computing the sum over segments, it computes the maximum such that:
54829//
54830// \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such
54831// that `segment_ids[j...] == i`.
54832//
54833// If the maximum is empty for a given segment ID `i`, it outputs the smallest
54834// possible value for the specific numeric type,
54835// `output[i] = numeric_limits<T>::lowest()`.
54836//
54837// If the given segment ID `i` is negative, then the corresponding value is
54838// dropped, and will not be included in the result.
54839//
54840// Caution: On CPU, values in `segment_ids` are always validated to be less than
54841// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
54842// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
54843// result in safe but unspecified behavior, which may include ignoring
54844// out-of-bound indices or outputting a tensor with a 0 stored in the first
54845// dimension of its shape if `num_segments` is 0.
54846//
54847// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
54848// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
54849// </div>
54850//
54851// For example:
54852//
54853// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
54854// >>> tf.math.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2).numpy()
54855// array([[4, 3, 3, 4],
54856//
54857//	[5,  6, 7, 8]], dtype=int32)
54858//
54859// Arguments:
54860//
54861//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
54862//
54863// The values must be less than `num_segments`.
54864//
54865// Caution: The values are always validated to be in range on CPU, never validated
54866// on GPU.
54867//
54868// Returns Has same shape as data, except for the first `segment_ids.rank`
54869// dimensions, which are replaced with a single dimension which has size
54870// `num_segments`.
54871func UnsortedSegmentMax(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
54872	if scope.Err() != nil {
54873		return
54874	}
54875	opspec := tf.OpSpec{
54876		Type: "UnsortedSegmentMax",
54877		Input: []tf.Input{
54878			data, segment_ids, num_segments,
54879		},
54880	}
54881	op := scope.AddOperation(opspec)
54882	return op.Output(0)
54883}
54884
54885// Computes the minimum along segments of a tensor.
54886//
54887// Read
54888// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
54889// for an explanation of segments.
54890//
54891// This operator is similar to `tf.math.unsorted_segment_sum`,
54892// Instead of computing the sum over segments, it computes the minimum such that:
54893//
54894// \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such
54895// that `segment_ids[j...] == i`.
54896//
54897// If the minimum is empty for a given segment ID `i`, it outputs the largest
54898// possible value for the specific numeric type,
54899// `output[i] = numeric_limits<T>::max()`.
54900//
54901// For example:
54902//
54903// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
54904// >>> tf.math.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2).numpy()
54905// array([[1, 2, 2, 1],
54906//
54907//	[5, 6, 7, 8]], dtype=int32)
54908//
54909// If the given segment ID `i` is negative, then the corresponding value is
54910// dropped, and will not be included in the result.
54911//
54912// Caution: On CPU, values in `segment_ids` are always validated to be less than
54913// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
54914// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
54915// result in safe but unspecified behavior, which may include ignoring
54916// out-of-bound indices or outputting a tensor with a 0 stored in the first
54917// dimension of its shape if `num_segments` is 0.
54918//
54919// Arguments:
54920//
54921//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
54922//
54923// The values must be less than `num_segments`.
54924//
54925// Caution: The values are always validated to be in range on CPU, never validated
54926// on GPU.
54927//
54928// Returns Has same shape as data, except for the first `segment_ids.rank`
54929// dimensions, which are replaced with a single dimension which has size
54930// `num_segments`.
54931func UnsortedSegmentMin(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
54932	if scope.Err() != nil {
54933		return
54934	}
54935	opspec := tf.OpSpec{
54936		Type: "UnsortedSegmentMin",
54937		Input: []tf.Input{
54938			data, segment_ids, num_segments,
54939		},
54940	}
54941	op := scope.AddOperation(opspec)
54942	return op.Output(0)
54943}
54944
54945// Computes the product along segments of a tensor.
54946//
54947// Read
54948// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
54949// for an explanation of segments.
54950//
54951// This operator is similar to `tf.math.unsorted_segment_sum`,
54952// Instead of computing the sum over segments, it computes the product of all
54953// entries belonging to a segment such that:
54954//
54955// \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples
54956// `j...` such that `segment_ids[j...] == i`.
54957//
54958// For example:
54959//
54960// >>> c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
54961// >>> tf.math.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2).numpy()
54962// array([[4, 6, 6, 4],
54963//
54964//	[5, 6, 7, 8]], dtype=int32)
54965//
54966// If there is no entry for a given segment ID `i`, it outputs 1.
54967//
54968// If the given segment ID `i` is negative, then the corresponding value is
54969// dropped, and will not be included in the result.
54970// Caution: On CPU, values in `segment_ids` are always validated to be less than
54971// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
54972// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
54973// result in safe but unspecified behavior, which may include ignoring
54974// out-of-bound indices or outputting a tensor with a 0 stored in the first
54975// dimension of its shape if `num_segments` is 0.
54976//
54977// Arguments:
54978//
54979//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
54980//
54981// The values must be less than `num_segments`.
54982//
54983// Caution: The values are always validated to be in range on CPU, never validated
54984// on GPU.
54985//
54986// Returns Has same shape as data, except for the first `segment_ids.rank`
54987// dimensions, which are replaced with a single dimension which has size
54988// `num_segments`.
54989func UnsortedSegmentProd(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
54990	if scope.Err() != nil {
54991		return
54992	}
54993	opspec := tf.OpSpec{
54994		Type: "UnsortedSegmentProd",
54995		Input: []tf.Input{
54996			data, segment_ids, num_segments,
54997		},
54998	}
54999	op := scope.AddOperation(opspec)
55000	return op.Output(0)
55001}
55002
55003// Computes the sum along segments of a tensor.
55004//
55005// Read
55006// [the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
55007// for an explanation of segments.
55008//
55009// Computes a tensor such that
55010// \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
55011// that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
55012// need not be sorted and need not cover all values in the full
55013// range of valid values.
55014//
55015// If the sum is empty for a given segment ID `i`, `output[i] = 0`.
55016// If the given segment ID `i` is negative, the value is dropped and will not be
55017// added to the sum of the segment.
55018//
55019// `num_segments` should equal the number of distinct segment IDs.
55020//
55021// Caution: On CPU, values in `segment_ids` are always validated to be less than
55022// `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
55023// does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
55024// result in safe but unspecified behavior, which may include ignoring
55025// out-of-bound indices or outputting a tensor with a 0 stored in the first
55026// dimension of its shape if `num_segments` is 0.
55027//
55028// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
55029// <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
55030// </div>
55031//
55032// >>> c = [[1,2,3,4], [5,6,7,8], [4,3,2,1]]
55033// >>> tf.math.unsorted_segment_sum(c, [0, 1, 0], num_segments=2).numpy()
55034// array([[5, 5, 5, 5],
55035//
55036//	[5, 6, 7, 8]], dtype=int32)
55037//
55038// Arguments:
55039//
55040//	segment_ids: A tensor whose shape is a prefix of `data.shape`.
55041//
55042// The values must be less than `num_segments`.
55043//
55044// Caution: The values are always validated to be in range on CPU, never validated
55045// on GPU.
55046//
55047// Returns Has same shape as data, except for the first `segment_ids.rank`
55048// dimensions, which are replaced with a single dimension which has size
55049// `num_segments`.
55050func UnsortedSegmentSum(scope *Scope, data tf.Output, segment_ids tf.Output, num_segments tf.Output) (output tf.Output) {
55051	if scope.Err() != nil {
55052		return
55053	}
55054	opspec := tf.OpSpec{
55055		Type: "UnsortedSegmentSum",
55056		Input: []tf.Input{
55057			data, segment_ids, num_segments,
55058		},
55059	}
55060	op := scope.AddOperation(opspec)
55061	return op.Output(0)
55062}
55063
55064// UnstageAttr is an optional argument to Unstage.
55065type UnstageAttr func(optionalAttr)
55066
55067// UnstageCapacity sets the optional capacity attribute to value.
55068// If not specified, defaults to 0
55069//
55070// REQUIRES: value >= 0
55071func UnstageCapacity(value int64) UnstageAttr {
55072	return func(m optionalAttr) {
55073		m["capacity"] = value
55074	}
55075}
55076
55077// UnstageMemoryLimit sets the optional memory_limit attribute to value.
55078// If not specified, defaults to 0
55079//
55080// REQUIRES: value >= 0
55081func UnstageMemoryLimit(value int64) UnstageAttr {
55082	return func(m optionalAttr) {
55083		m["memory_limit"] = value
55084	}
55085}
55086
55087// UnstageContainer sets the optional container attribute to value.
55088// If not specified, defaults to ""
55089func UnstageContainer(value string) UnstageAttr {
55090	return func(m optionalAttr) {
55091		m["container"] = value
55092	}
55093}
55094
55095// UnstageSharedName sets the optional shared_name attribute to value.
55096// If not specified, defaults to ""
55097func UnstageSharedName(value string) UnstageAttr {
55098	return func(m optionalAttr) {
55099		m["shared_name"] = value
55100	}
55101}
55102
55103// Op is similar to a lightweight Dequeue.
55104//
55105// The basic functionality is similar to dequeue with many fewer
55106// capabilities and options.  This Op is optimized for performance.
55107func Unstage(scope *Scope, dtypes []tf.DataType, optional ...UnstageAttr) (values []tf.Output) {
55108	if scope.Err() != nil {
55109		return
55110	}
55111	attrs := map[string]interface{}{"dtypes": dtypes}
55112	for _, a := range optional {
55113		a(attrs)
55114	}
55115	opspec := tf.OpSpec{
55116		Type: "Unstage",
55117
55118		Attrs: attrs,
55119	}
55120	op := scope.AddOperation(opspec)
55121	if scope.Err() != nil {
55122		return
55123	}
55124	var idx int
55125	var err error
55126	if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
55127		scope.UpdateErr("Unstage", err)
55128		return
55129	}
55130	return values
55131}
55132
55133// UpperBoundAttr is an optional argument to UpperBound.
55134type UpperBoundAttr func(optionalAttr)
55135
55136// UpperBoundOutType sets the optional out_type attribute to value.
55137// If not specified, defaults to DT_INT32
55138func UpperBoundOutType(value tf.DataType) UpperBoundAttr {
55139	return func(m optionalAttr) {
55140		m["out_type"] = value
55141	}
55142}
55143
55144// Applies upper_bound(sorted_search_values, values) along each row.
55145//
55146// Each set of rows with the same index in (sorted_inputs, values) is treated
55147// independently.  The resulting row is the equivalent of calling
55148// `np.searchsorted(sorted_inputs, values, side='right')`.
55149//
55150// The result is not a global index to the entire
55151// `Tensor`, but rather just the index in the last dimension.
55152//
55153// A 2-D example:
55154//
55155//	sorted_sequence = [[0, 3, 9, 9, 10],
55156//	                   [1, 2, 3, 4, 5]]
55157//	values = [[2, 4, 9],
55158//	          [0, 2, 6]]
55159//
55160//	result = UpperBound(sorted_sequence, values)
55161//
55162//	result == [[1, 2, 4],
55163//	           [0, 2, 5]]
55164//
55165// Arguments:
55166//
55167//	sorted_inputs: 2-D Tensor where each row is ordered.
55168//	values: 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
55169//
55170// the values that will be searched for in `sorted_search_values`.
55171//
55172// Returns A `Tensor` with the same shape as `values`.  It contains the last scalar index
55173// into the last dimension where values can be inserted without changing the
55174// ordered property.
55175func UpperBound(scope *Scope, sorted_inputs tf.Output, values tf.Output, optional ...UpperBoundAttr) (output tf.Output) {
55176	if scope.Err() != nil {
55177		return
55178	}
55179	attrs := map[string]interface{}{}
55180	for _, a := range optional {
55181		a(attrs)
55182	}
55183	opspec := tf.OpSpec{
55184		Type: "UpperBound",
55185		Input: []tf.Input{
55186			sorted_inputs, values,
55187		},
55188		Attrs: attrs,
55189	}
55190	op := scope.AddOperation(opspec)
55191	return op.Output(0)
55192}
55193
55194// VarHandleOpAttr is an optional argument to VarHandleOp.
55195type VarHandleOpAttr func(optionalAttr)
55196
55197// VarHandleOpContainer sets the optional container attribute to value.
55198//
55199// value: the container this variable is placed in.
55200// If not specified, defaults to ""
55201func VarHandleOpContainer(value string) VarHandleOpAttr {
55202	return func(m optionalAttr) {
55203		m["container"] = value
55204	}
55205}
55206
55207// VarHandleOpSharedName sets the optional shared_name attribute to value.
55208//
55209// value: the name by which this variable is referred to.
55210// If not specified, defaults to ""
55211func VarHandleOpSharedName(value string) VarHandleOpAttr {
55212	return func(m optionalAttr) {
55213		m["shared_name"] = value
55214	}
55215}
55216
55217// VarHandleOpAllowedDevices sets the optional allowed_devices attribute to value.
55218//
55219// value: DEPRECATED. The allowed devices containing the resource variable. Set when the
55220// output ResourceHandle represents a per-replica/partitioned resource variable.
55221// If not specified, defaults to {}
55222func VarHandleOpAllowedDevices(value []string) VarHandleOpAttr {
55223	return func(m optionalAttr) {
55224		m["allowed_devices"] = value
55225	}
55226}
55227
55228// Creates a handle to a Variable resource.
55229//
55230// Arguments:
55231//
55232//	dtype: the type of this variable. Must agree with the dtypes
55233//
55234// of all ops using this variable.
55235//
55236//	shape: The (possibly partially specified) shape of this variable.
55237func VarHandleOp(scope *Scope, dtype tf.DataType, shape tf.Shape, optional ...VarHandleOpAttr) (resource tf.Output) {
55238	if scope.Err() != nil {
55239		return
55240	}
55241	attrs := map[string]interface{}{"dtype": dtype, "shape": shape}
55242	for _, a := range optional {
55243		a(attrs)
55244	}
55245	opspec := tf.OpSpec{
55246		Type: "VarHandleOp",
55247
55248		Attrs: attrs,
55249	}
55250	op := scope.AddOperation(opspec)
55251	return op.Output(0)
55252}
55253
55254// Checks whether a resource handle-based variable has been initialized.
55255//
55256// Arguments:
55257//
55258//	resource: the input resource handle.
55259//
55260// Returns a scalar boolean which is true if the variable has been
55261// initialized.
55262func VarIsInitializedOp(scope *Scope, resource tf.Output) (is_initialized tf.Output) {
55263	if scope.Err() != nil {
55264		return
55265	}
55266	opspec := tf.OpSpec{
55267		Type: "VarIsInitializedOp",
55268		Input: []tf.Input{
55269			resource,
55270		},
55271	}
55272	op := scope.AddOperation(opspec)
55273	return op.Output(0)
55274}
55275
55276// VariableShapeAttr is an optional argument to VariableShape.
55277type VariableShapeAttr func(optionalAttr)
55278
55279// VariableShapeOutType sets the optional out_type attribute to value.
55280// If not specified, defaults to DT_INT32
55281func VariableShapeOutType(value tf.DataType) VariableShapeAttr {
55282	return func(m optionalAttr) {
55283		m["out_type"] = value
55284	}
55285}
55286
55287// Returns the shape of the variable pointed to by `resource`.
55288//
55289// This operation returns a 1-D integer tensor representing the shape of `input`.
55290//
55291// For example:
55292//
55293// ```
55294// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
55295// shape(t) ==> [2, 2, 3]
55296// ```
55297func VariableShape(scope *Scope, input tf.Output, optional ...VariableShapeAttr) (output tf.Output) {
55298	if scope.Err() != nil {
55299		return
55300	}
55301	attrs := map[string]interface{}{}
55302	for _, a := range optional {
55303		a(attrs)
55304	}
55305	opspec := tf.OpSpec{
55306		Type: "VariableShape",
55307		Input: []tf.Input{
55308			input,
55309		},
55310		Attrs: attrs,
55311	}
55312	op := scope.AddOperation(opspec)
55313	return op.Output(0)
55314}
55315
55316// Returns locations of nonzero / true values in a tensor.
55317//
55318// This operation returns the coordinates of true elements in `condition`. The
55319// coordinates are returned in a 2-D tensor where the first dimension (rows)
55320// represents the number of true elements, and the second dimension (columns)
55321// represents the coordinates of the true elements. Keep in mind, the shape of
55322// the output tensor can vary depending on how many true values there are in
55323// `condition`. Indices are output in row-major order.
55324//
55325// For example:
55326//
55327// ```
55328// # 'input' tensor is [[True, False]
55329// #                    [True, False]]
55330// # 'input' has two true values, so output has two coordinates.
55331// # 'input' has rank of 2, so coordinates have two indices.
55332// where(input) ==> [[0, 0],
55333//
55334//	[1, 0]]
55335//
55336// # `condition` tensor is [[[True, False]
55337// #                     [True, False]]
55338// #                    [[False, True]
55339// #                     [False, True]]
55340// #                    [[False, False]
55341// #                     [False, True]]]
55342// # 'input' has 5 true values, so output has 5 coordinates.
55343// # 'input' has rank of 3, so coordinates have three indices.
55344// where(input) ==> [[0, 0, 0],
55345//
55346//	[0, 1, 0],
55347//	[1, 0, 1],
55348//	[1, 1, 1],
55349//	[2, 1, 1]]
55350//
55351// # `condition` tensor is [[[1.5,  0.0]
55352// #                     [-0.5, 0.0]]
55353// #                    [[0.0,  0.25]
55354// #                     [0.0,  0.75]]
55355// #                    [[0.0,  0.0]
55356// #                     [0.0,  0.01]]]
55357// # 'input' has 5 nonzero values, so output has 5 coordinates.
55358// # 'input' has rank of 3, so coordinates have three indices.
55359// where(input) ==> [[0, 0, 0],
55360//
55361//	[0, 1, 0],
55362//	[1, 0, 1],
55363//	[1, 1, 1],
55364//	[2, 1, 1]]
55365//
55366// # `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
55367// #                     [0.0 + 0.5j, 0.0  + 0.0j]]
55368// #                    [[0.0 + 0.0j, 0.25 + 1.5j]
55369// #                     [0.0 + 0.0j, 0.75 + 0.0j]]
55370// #                    [[0.0 + 0.0j, 0.0  + 0.0j]
55371// #                     [0.0 + 0.0j, 0.01 + 0.0j]]]
55372// # 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
55373// # 'input' has rank of 3, so coordinates have three indices.
55374// where(input) ==> [[0, 0, 0],
55375//
55376//	[0, 1, 0],
55377//	[1, 0, 1],
55378//	[1, 1, 1],
55379//	[2, 1, 1]]
55380//
55381// ```
55382func Where(scope *Scope, condition tf.Output) (index tf.Output) {
55383	if scope.Err() != nil {
55384		return
55385	}
55386	opspec := tf.OpSpec{
55387		Type: "Where",
55388		Input: []tf.Input{
55389			condition,
55390		},
55391	}
55392	op := scope.AddOperation(opspec)
55393	return op.Output(0)
55394}
55395
55396// WholeFileReaderV2Attr is an optional argument to WholeFileReaderV2.
55397type WholeFileReaderV2Attr func(optionalAttr)
55398
55399// WholeFileReaderV2Container sets the optional container attribute to value.
55400//
55401// value: If non-empty, this reader is placed in the given container.
55402// Otherwise, a default container is used.
55403// If not specified, defaults to ""
55404func WholeFileReaderV2Container(value string) WholeFileReaderV2Attr {
55405	return func(m optionalAttr) {
55406		m["container"] = value
55407	}
55408}
55409
55410// WholeFileReaderV2SharedName sets the optional shared_name attribute to value.
55411//
55412// value: If non-empty, this reader is named in the given bucket
55413// with this shared_name. Otherwise, the node name is used instead.
55414// If not specified, defaults to ""
55415func WholeFileReaderV2SharedName(value string) WholeFileReaderV2Attr {
55416	return func(m optionalAttr) {
55417		m["shared_name"] = value
55418	}
55419}
55420
55421// A Reader that outputs the entire contents of a file as a value.
55422//
55423// To use, enqueue filenames in a Queue.  The output of ReaderRead will
55424// be a filename (key) and the contents of that file (value).
55425//
55426// Returns The handle to reference the Reader.
55427func WholeFileReaderV2(scope *Scope, optional ...WholeFileReaderV2Attr) (reader_handle tf.Output) {
55428	if scope.Err() != nil {
55429		return
55430	}
55431	attrs := map[string]interface{}{}
55432	for _, a := range optional {
55433		a(attrs)
55434	}
55435	opspec := tf.OpSpec{
55436		Type: "WholeFileReaderV2",
55437
55438		Attrs: attrs,
55439	}
55440	op := scope.AddOperation(opspec)
55441	return op.Output(0)
55442}
55443
55444// WindowDatasetAttr is an optional argument to WindowDataset.
55445type WindowDatasetAttr func(optionalAttr)
55446
55447// WindowDatasetMetadata sets the optional metadata attribute to value.
55448// If not specified, defaults to ""
55449func WindowDatasetMetadata(value string) WindowDatasetAttr {
55450	return func(m optionalAttr) {
55451		m["metadata"] = value
55452	}
55453}
55454
55455//	Combines (nests of) input elements into a dataset of (nests of) windows.
55456//
55457//	A "window" is a finite dataset of flat elements of size `size` (or possibly
55458//	fewer if there are not enough input elements to fill the window and
55459//	`drop_remainder` evaluates to false).
55460//
55461//	The `shift` argument determines the number of input elements by which
55462//	the window moves on each iteration.  The first element in the `k`th window
55463//	will be element
55464//
55465//	```
55466//	1 + (k-1) * shift
55467//	```
55468//
55469//	of the input dataset. In particular, the first element of the first window
55470//	will always be the first element of the input dataset.
55471//
55472//	If the `stride` parameter is greater than 1, then each window will skip
55473//	`(stride - 1)` input elements between each element that appears in the
55474//	window. Output windows will still contain `size` elements regardless of
55475//	the value of `stride`.
55476//
55477//	The `stride` argument determines the stride of the input elements, and the
55478//	`shift` argument determines the shift of the window.
55479//
55480//	For example, letting `{...}` to represent a Dataset:
55481//
55482//	- `tf.data.Dataset.range(7).window(2)` produces
55483//	  `{{0, 1}, {2, 3}, {4, 5}, {6}}`
55484//	- `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces
55485//	  `{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`
55486//	- `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces
55487//	  `{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`
55488//
55489//	Note that when the `window` transformation is applied to a dataset of
55490//	nested elements, it produces a dataset of nested windows.
55491//
55492//	For example:
55493//
55494//	- `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)`
55495//	  produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}`
55496//	- `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)`
55497//	  produces `{{"a": {0, 1}}, {"a": {2, 3}}}`
55498//
55499// Arguments:
55500//
55501//	size: An integer scalar, representing the number of elements
55502//
55503// of the input dataset to combine into a window. Must be positive.
55504//
55505//	shift: An integer scalar, representing the number of input elements
55506//
55507// by which the window moves in each iteration.  Defaults to `size`.
55508// Must be positive.
55509//
55510//	stride: An integer scalar, representing the stride of the input elements
55511//
55512// in the sliding window. Must be positive. The default value of 1 means
55513// "retain every input element".
55514//
55515//	drop_remainder: A Boolean scalar, representing whether the last window should be
55516//
55517// dropped if its size is smaller than `window_size`.
55518func WindowDataset(scope *Scope, input_dataset tf.Output, size tf.Output, shift tf.Output, stride tf.Output, drop_remainder tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...WindowDatasetAttr) (handle tf.Output) {
55519	if scope.Err() != nil {
55520		return
55521	}
55522	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
55523	for _, a := range optional {
55524		a(attrs)
55525	}
55526	opspec := tf.OpSpec{
55527		Type: "WindowDataset",
55528		Input: []tf.Input{
55529			input_dataset, size, shift, stride, drop_remainder,
55530		},
55531		Attrs: attrs,
55532	}
55533	op := scope.AddOperation(opspec)
55534	return op.Output(0)
55535}
55536
55537// Worker heartbeat op.
55538//
55539// Heartbeats may be sent periodically to indicate the coordinator is still active,
55540// to retrieve the current worker status and to expedite shutdown when necessary.
55541//
55542// Arguments:
55543//
55544//	request: A string tensor containing a serialized WorkerHeartbeatRequest
55545//
55546// Returns A string tensor containing a serialized WorkerHeartbeatResponse
55547func WorkerHeartbeat(scope *Scope, request tf.Output) (response tf.Output) {
55548	if scope.Err() != nil {
55549		return
55550	}
55551	opspec := tf.OpSpec{
55552		Type: "WorkerHeartbeat",
55553		Input: []tf.Input{
55554			request,
55555		},
55556	}
55557	op := scope.AddOperation(opspec)
55558	return op.Output(0)
55559}
55560
55561// WriteAudioSummaryAttr is an optional argument to WriteAudioSummary.
55562type WriteAudioSummaryAttr func(optionalAttr)
55563
55564// WriteAudioSummaryMaxOutputs sets the optional max_outputs attribute to value.
55565// If not specified, defaults to 3
55566//
55567// REQUIRES: value >= 1
55568func WriteAudioSummaryMaxOutputs(value int64) WriteAudioSummaryAttr {
55569	return func(m optionalAttr) {
55570		m["max_outputs"] = value
55571	}
55572}
55573
55574// Writes an audio summary.
55575//
55576// Writes encoded audio summary `tensor` at `step` with `tag` using summary `writer`.
55577// `sample_rate` is the audio sample rate is Hz.
55578//
55579// Returns the created operation.
55580func WriteAudioSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, sample_rate tf.Output, optional ...WriteAudioSummaryAttr) (o *tf.Operation) {
55581	if scope.Err() != nil {
55582		return
55583	}
55584	attrs := map[string]interface{}{}
55585	for _, a := range optional {
55586		a(attrs)
55587	}
55588	opspec := tf.OpSpec{
55589		Type: "WriteAudioSummary",
55590		Input: []tf.Input{
55591			writer, step, tag, tensor, sample_rate,
55592		},
55593		Attrs: attrs,
55594	}
55595	return scope.AddOperation(opspec)
55596}
55597
55598// Writes `contents` to the file at input `filename`.
55599//
55600// Creates the file and recursively creates directory if it does not exist.
55601//
55602// Arguments:
55603//
55604//	filename: scalar. The name of the file to which we write the contents.
55605//	contents: scalar. The content to be written to the output file.
55606//
55607// Returns the created operation.
55608func WriteFile(scope *Scope, filename tf.Output, contents tf.Output) (o *tf.Operation) {
55609	if scope.Err() != nil {
55610		return
55611	}
55612	opspec := tf.OpSpec{
55613		Type: "WriteFile",
55614		Input: []tf.Input{
55615			filename, contents,
55616		},
55617	}
55618	return scope.AddOperation(opspec)
55619}
55620
55621// Writes a graph summary.
55622//
55623// Writes TensorFlow graph `tensor` at `step` using summary `writer`.
55624//
55625// Returns the created operation.
55626func WriteGraphSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation) {
55627	if scope.Err() != nil {
55628		return
55629	}
55630	opspec := tf.OpSpec{
55631		Type: "WriteGraphSummary",
55632		Input: []tf.Input{
55633			writer, step, tensor,
55634		},
55635	}
55636	return scope.AddOperation(opspec)
55637}
55638
55639// Writes a histogram summary.
55640//
55641// Writes histogram `values` at `step` with `tag` using summary `writer`.
55642//
55643// Returns the created operation.
55644func WriteHistogramSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, values tf.Output) (o *tf.Operation) {
55645	if scope.Err() != nil {
55646		return
55647	}
55648	opspec := tf.OpSpec{
55649		Type: "WriteHistogramSummary",
55650		Input: []tf.Input{
55651			writer, step, tag, values,
55652		},
55653	}
55654	return scope.AddOperation(opspec)
55655}
55656
55657// WriteImageSummaryAttr is an optional argument to WriteImageSummary.
55658type WriteImageSummaryAttr func(optionalAttr)
55659
55660// WriteImageSummaryMaxImages sets the optional max_images attribute to value.
55661// If not specified, defaults to 3
55662//
55663// REQUIRES: value >= 1
55664func WriteImageSummaryMaxImages(value int64) WriteImageSummaryAttr {
55665	return func(m optionalAttr) {
55666		m["max_images"] = value
55667	}
55668}
55669
55670// Writes an image summary.
55671//
55672// Writes image `tensor` at `step` with `tag` using summary `writer`.
55673// `tensor` is image with shape [height, width, channels].
55674//
55675// Returns the created operation.
55676func WriteImageSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, tensor tf.Output, bad_color tf.Output, optional ...WriteImageSummaryAttr) (o *tf.Operation) {
55677	if scope.Err() != nil {
55678		return
55679	}
55680	attrs := map[string]interface{}{}
55681	for _, a := range optional {
55682		a(attrs)
55683	}
55684	opspec := tf.OpSpec{
55685		Type: "WriteImageSummary",
55686		Input: []tf.Input{
55687			writer, step, tag, tensor, bad_color,
55688		},
55689		Attrs: attrs,
55690	}
55691	return scope.AddOperation(opspec)
55692}
55693
55694// Writes a serialized proto summary.
55695//
55696// Writes `tensor`, a serialized proto at `step` using summary `writer`.
55697//
55698// Returns the created operation.
55699func WriteRawProtoSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output) (o *tf.Operation) {
55700	if scope.Err() != nil {
55701		return
55702	}
55703	opspec := tf.OpSpec{
55704		Type: "WriteRawProtoSummary",
55705		Input: []tf.Input{
55706			writer, step, tensor,
55707		},
55708	}
55709	return scope.AddOperation(opspec)
55710}
55711
55712// Writes a scalar summary.
55713//
55714// Writes scalar `value` at `step` with `tag` using summary `writer`.
55715//
55716// Returns the created operation.
55717func WriteScalarSummary(scope *Scope, writer tf.Output, step tf.Output, tag tf.Output, value tf.Output) (o *tf.Operation) {
55718	if scope.Err() != nil {
55719		return
55720	}
55721	opspec := tf.OpSpec{
55722		Type: "WriteScalarSummary",
55723		Input: []tf.Input{
55724			writer, step, tag, value,
55725		},
55726	}
55727	return scope.AddOperation(opspec)
55728}
55729
55730// Writes a tensor summary.
55731//
55732// Writes `tensor` at `step` with `tag` using summary `writer`.
55733//
55734// Returns the created operation.
55735func WriteSummary(scope *Scope, writer tf.Output, step tf.Output, tensor tf.Output, tag tf.Output, summary_metadata tf.Output) (o *tf.Operation) {
55736	if scope.Err() != nil {
55737		return
55738	}
55739	opspec := tf.OpSpec{
55740		Type: "WriteSummary",
55741		Input: []tf.Input{
55742			writer, step, tensor, tag, summary_metadata,
55743		},
55744	}
55745	return scope.AddOperation(opspec)
55746}
55747
55748// Returns 0 if x == 0, and x / y otherwise, elementwise.
55749func Xdivy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
55750	if scope.Err() != nil {
55751		return
55752	}
55753	opspec := tf.OpSpec{
55754		Type: "Xdivy",
55755		Input: []tf.Input{
55756			x, y,
55757		},
55758	}
55759	op := scope.AddOperation(opspec)
55760	return op.Output(0)
55761}
55762
55763// Wraps the XLA AllReduce operator
55764//
55765//	documented at https://www.tensorflow.org/xla/operation_semantics#allreduce.
55766//
55767// Arguments:
55768//
55769//	input: Array or a non-empty tuple of arrays to reduce across replicas.
55770//	group_assignment: Groups between which the reductions are performed.
55771//	reduce_op: Reduction computation.
55772//	mode: group mode.
55773//
55774// CrossReplica: group_assignment contains replica_id. Each group contains the
55775//
55776//	replicas for the current partition.
55777//
55778// CrossReplicaAndPartition: group_assignment contains replica_id. Each group
55779//
55780//	contains the replicas for all partitions.
55781func XlaAllReduce(scope *Scope, input tf.Output, group_assignment tf.Output, reduce_op string, mode string) (output tf.Output) {
55782	if scope.Err() != nil {
55783		return
55784	}
55785	attrs := map[string]interface{}{"reduce_op": reduce_op, "mode": mode}
55786	opspec := tf.OpSpec{
55787		Type: "XlaAllReduce",
55788		Input: []tf.Input{
55789			input, group_assignment,
55790		},
55791		Attrs: attrs,
55792	}
55793	op := scope.AddOperation(opspec)
55794	return op.Output(0)
55795}
55796
55797// Helper operator for performing XLA-style broadcasts
55798//
55799// Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to
55800// whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules
55801// for binary operators.
55802//
55803// Arguments:
55804//
55805//	lhs: the LHS input tensor
55806//	rhs: the RHS input tensor
55807//	broadcast_dims: an XLA-style broadcast dimension specification
55808//
55809// Returns:
55810//
55811//	lhs_output: the broadcasted LHS tensor
55812//	rhs_output: the broadcasted RHS tensor
55813func XlaBroadcastHelper(scope *Scope, lhs tf.Output, rhs tf.Output, broadcast_dims tf.Output) (lhs_output tf.Output, rhs_output tf.Output) {
55814	if scope.Err() != nil {
55815		return
55816	}
55817	opspec := tf.OpSpec{
55818		Type: "XlaBroadcastHelper",
55819		Input: []tf.Input{
55820			lhs, rhs, broadcast_dims,
55821		},
55822	}
55823	op := scope.AddOperation(opspec)
55824	return op.Output(0), op.Output(1)
55825}
55826
55827// Temporary op for experimenting with jax2tf.
55828//
55829// DO NOT USE THIS OP. It has no backwards compatibility guarantees. It is also
55830// very likely to change. This op will be used only in jax2tf under an
55831// experimental flag.
55832//
55833// This is an experimental op to allow a smooth evolution of jax2tf towards
55834// emitting and serializing MHLO directly from JAX. At the moment this op
55835// carries a serialized MHLO module, therefore there are no backward-compatibility
55836// guarantees, and should not be used for serialization.
55837// Eventually, the op will carry a MHLO object, which will have
55838// backwards-compatibility guarantees.
55839//
55840// The serialized module must return a tuple if and only if the Sout is an empty
55841// list or a list with more than 1 elements. The length of Tout and Sout must
55842// match. This op always returns a tuple of results, even if the module returns
55843// a single result.
55844//
55845// The handling of dynamic shapes is work-in-progress. At the moment, the
55846// JAX lowering for dynamic shapes will prepend one dimension parameter to the
55847// serialized module for each dimension whose value must be passed in.
55848// The "args" correspond to the non-dimension arguments. During compilation
55849// we compute the values of the dimension arguments based on the static shapes of
55850// the "args". In order to do this, we encode for each dimension argument a
55851// specification of how to compute its value, as a string, in the form
55852// "<arg_idx>.<axis_idx>".
55853// E.g., the specification "2.1" denotes the value args[2].shape[1].
55854//
55855// Arguments:
55856//
55857//	args: A list of `Tensor` with possibly different types to be passed as arguments
55858//
55859// to the HLO module.
55860//
55861//	module: A serialized computation, a text representation of mlir.Module.
55862//	Sout: List of output tensor shapes.
55863//	Tout: List of output tensor data types.
55864//	dim_args_spec: the specification for the dimension arguments, one for each
55865//
55866// dimension argument. In absence of dynamic shapes this list is empty.
55867func XlaCallModule(scope *Scope, args []tf.Output, module string, Sout []tf.Shape, Tout []tf.DataType, dim_args_spec []string) (output []tf.Output) {
55868	if scope.Err() != nil {
55869		return
55870	}
55871	attrs := map[string]interface{}{"module": module, "Sout": Sout, "Tout": Tout, "dim_args_spec": dim_args_spec}
55872	opspec := tf.OpSpec{
55873		Type: "XlaCallModule",
55874		Input: []tf.Input{
55875			tf.OutputList(args),
55876		},
55877		Attrs: attrs,
55878	}
55879	op := scope.AddOperation(opspec)
55880	if scope.Err() != nil {
55881		return
55882	}
55883	var idx int
55884	var err error
55885	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
55886		scope.UpdateErr("XlaCallModule", err)
55887		return
55888	}
55889	return output
55890}
55891
55892// XlaConcatNDAttr is an optional argument to XlaConcatND.
55893type XlaConcatNDAttr func(optionalAttr)
55894
55895// XlaConcatNDPaddings sets the optional paddings attribute to value.
55896//
55897// value: Optional list of right paddings per dimension to strip from the final merged
55898// tensor. These paddings must not exceed the dimension size of the merged result
55899// prior to stripping paddings.
55900// If not specified, defaults to {}
55901func XlaConcatNDPaddings(value []int64) XlaConcatNDAttr {
55902	return func(m optionalAttr) {
55903		m["paddings"] = value
55904	}
55905}
55906
55907// Concats input tensor across all dimensions.
55908//
55909// An op which merges slices the input tensor based on the given num_splits
55910// attribute, strips paddings optionally, and returns the merged tensor without
55911// paddings.
55912//
55913// This op may be generated via the TPU bridge.
55914//
55915// For example, with `input` tensor:
55916// ```
55917// [[0, 1],
55918//
55919//	[4, 5]]
55920//
55921// [[2, 3],
55922//
55923//	[6, 7]]
55924//
55925// [[8, 9],
55926//
55927//	[12, 13]]
55928//
55929// [[10, 11],
55930//
55931//	[14, 15]]
55932//
55933// ```
55934// `num_splits`:
55935// ```
55936// [2, 2]
55937// ```
55938// and `paddings`:
55939// ```
55940// [1, 1]
55941// ```
55942// the expected `outputs` is:
55943// ```
55944// [[0, 1, 2],
55945//
55946//	[4, 5, 6],
55947//	[8, 9, 10]]
55948//
55949// ```
55950//
55951// Arguments:
55952//
55953//	inputs: Input tensor slices in row-major order to merge across all dimensions. All
55954//
55955// inputs must have the same shape.
55956//
55957//	}
55958//	out_arg {
55959//	  name: "output"
55960//	  description: <<END
55961//
55962// Output tensor formed from merging input slices based on num_concats defined.
55963//
55964//	num_concats: Number of ways to merge per dimension.
55965func XlaConcatND(scope *Scope, inputs []tf.Output, num_concats []int64, optional ...XlaConcatNDAttr) (output tf.Output) {
55966	if scope.Err() != nil {
55967		return
55968	}
55969	attrs := map[string]interface{}{"num_concats": num_concats}
55970	for _, a := range optional {
55971		a(attrs)
55972	}
55973	opspec := tf.OpSpec{
55974		Type: "XlaConcatND",
55975		Input: []tf.Input{
55976			tf.OutputList(inputs),
55977		},
55978		Attrs: attrs,
55979	}
55980	op := scope.AddOperation(opspec)
55981	return op.Output(0)
55982}
55983
55984// Wraps the XLA ConvGeneralDilated operator, documented at
55985//
55986//	https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
55987//
55988// .
55989//
55990// Arguments:
55991//
55992//	lhs: the input tensor
55993//	rhs: the kernel tensor
55994//	window_strides: the inter-window strides
55995//	padding: the padding to apply at the start and end of each input dimensions
55996//	lhs_dilation: dilation to apply between input elements
55997//	rhs_dilation: dilation to apply between kernel elements
55998//	feature_group_count: number of feature groups for grouped convolution.
55999//	dimension_numbers: a serialized xla::ConvolutionDimensionNumbers proto.
56000//	precision_config: a serialized xla::PrecisionConfig proto.
56001func XlaConv(scope *Scope, lhs tf.Output, rhs tf.Output, window_strides tf.Output, padding tf.Output, lhs_dilation tf.Output, rhs_dilation tf.Output, feature_group_count tf.Output, dimension_numbers string, precision_config string) (output tf.Output) {
56002	if scope.Err() != nil {
56003		return
56004	}
56005	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config}
56006	opspec := tf.OpSpec{
56007		Type: "XlaConv",
56008		Input: []tf.Input{
56009			lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count,
56010		},
56011		Attrs: attrs,
56012	}
56013	op := scope.AddOperation(opspec)
56014	return op.Output(0)
56015}
56016
56017// XlaConvV2Attr is an optional argument to XlaConvV2.
56018type XlaConvV2Attr func(optionalAttr)
56019
56020// XlaConvV2BatchGroupCount sets the optional batch_group_count attribute to value.
56021//
56022// value: number of batch groups or grouped filters.
56023// If not specified, defaults to 1
56024func XlaConvV2BatchGroupCount(value int64) XlaConvV2Attr {
56025	return func(m optionalAttr) {
56026		m["batch_group_count"] = value
56027	}
56028}
56029
56030// Wraps the XLA ConvGeneralDilated operator, documented at
56031//
56032//	https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
56033//
56034// .
56035//
56036// Arguments:
56037//
56038//	lhs: input tensor
56039//	rhs: kernel tensor
56040//	window_strides: inter-window strides
56041//	padding: padding to apply at the start and end of each input dimensions
56042//	lhs_dilation: dilation to apply between input elements
56043//	rhs_dilation: dilation to apply between kernel elements
56044//	feature_group_count: number of feature groups for grouped convolution.
56045//	dimension_numbers: serialized xla::ConvolutionDimensionNumbers proto.
56046//	precision_config: serialized xla::PrecisionConfig proto.
56047//	preferred_element_type: type of the tensor.
56048func XlaConvV2(scope *Scope, lhs tf.Output, rhs tf.Output, window_strides tf.Output, padding tf.Output, lhs_dilation tf.Output, rhs_dilation tf.Output, feature_group_count tf.Output, dimension_numbers string, precision_config string, preferred_element_type tf.DataType, optional ...XlaConvV2Attr) (output tf.Output) {
56049	if scope.Err() != nil {
56050		return
56051	}
56052	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config, "preferred_element_type": preferred_element_type}
56053	for _, a := range optional {
56054		a(attrs)
56055	}
56056	opspec := tf.OpSpec{
56057		Type: "XlaConvV2",
56058		Input: []tf.Input{
56059			lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count,
56060		},
56061		Attrs: attrs,
56062	}
56063	op := scope.AddOperation(opspec)
56064	return op.Output(0)
56065}
56066
56067// Wraps the XLA CustomCall operator
56068//
56069//	documented at https://www.tensorflow.org/xla/operation_semantics#customcall.
56070//
56071// Arguments:
56072//
56073//	args: A list of `Tensor` with possibly different types.
56074//	target_name: Name of the function. A call instruction will be emitted which
56075//
56076// targets this symbol name.
56077//
56078//	backend_config: String, used to encode serialized metadata to the backend.
56079//	dtype: Output tensor data type.
56080//	shape: Output tensor shape.
56081func XlaCustomCall(scope *Scope, args []tf.Output, target_name string, backend_config string, dtype tf.DataType, shape tf.Shape) (output tf.Output) {
56082	if scope.Err() != nil {
56083		return
56084	}
56085	attrs := map[string]interface{}{"target_name": target_name, "backend_config": backend_config, "dtype": dtype, "shape": shape}
56086	opspec := tf.OpSpec{
56087		Type: "XlaCustomCall",
56088		Input: []tf.Input{
56089			tf.OutputList(args),
56090		},
56091		Attrs: attrs,
56092	}
56093	op := scope.AddOperation(opspec)
56094	return op.Output(0)
56095}
56096
56097// Takes the packed uint32 input and unpacks the input to uint8 to do
56098//
56099// Dequantization on device.
56100//
56101// Arguments:
56102//
56103//	input: Input tensors whose types is uint32, shape is [d0, ..., dn].
56104//	min_range: The minimum scalar value possibly produced for the input.
56105//	max_range: The maximum scalar value possibly produced for the input.
56106//	mode: String to determine the dequantize mode in {"MIN_COMBINED", "MIN_FIRST", "SCALED"}.
56107//	transpose_output: Boolean to determine if output is transposed. transpose_output
56108//
56109// is faster when input is large and rank of input is higher than 1.
56110//
56111// Returns Output tensors whose types is bloat16. If transpose_output is true,
56112// output shape is [dn * 4, dn-1, ..., d1, d0]. If transpose_output
56113// is false, output shape is [d0,..., dn * 4].
56114func XlaDequantize(scope *Scope, input tf.Output, min_range float32, max_range float32, mode string, transpose_output bool) (output tf.Output) {
56115	if scope.Err() != nil {
56116		return
56117	}
56118	attrs := map[string]interface{}{"min_range": min_range, "max_range": max_range, "mode": mode, "transpose_output": transpose_output}
56119	opspec := tf.OpSpec{
56120		Type: "XlaDequantize",
56121		Input: []tf.Input{
56122			input,
56123		},
56124		Attrs: attrs,
56125	}
56126	op := scope.AddOperation(opspec)
56127	return op.Output(0)
56128}
56129
56130// Wraps the XLA DotGeneral operator, documented at
56131//
56132//	https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
56133//
56134// .
56135//
56136// Arguments:
56137//
56138//	lhs: the LHS tensor
56139//	rhs: the RHS tensor
56140//	dimension_numbers: a serialized xla::DotDimensionNumbers proto.
56141//	precision_config: a serialized xla::PrecisionConfig proto.
56142func XlaDot(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, precision_config string) (output tf.Output) {
56143	if scope.Err() != nil {
56144		return
56145	}
56146	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config}
56147	opspec := tf.OpSpec{
56148		Type: "XlaDot",
56149		Input: []tf.Input{
56150			lhs, rhs,
56151		},
56152		Attrs: attrs,
56153	}
56154	op := scope.AddOperation(opspec)
56155	return op.Output(0)
56156}
56157
56158// Wraps the XLA DotGeneral operator, documented at
56159//
56160//	https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
56161//
56162// .
56163//
56164// Arguments:
56165//
56166//	lhs: the LHS tensor
56167//	rhs: the RHS tensor
56168//	dimension_numbers: a serialized xla::DotDimensionNumbers proto.
56169//	precision_config: a serialized xla::PrecisionConfig proto.
56170//	preferred_element_type: The type of the tensor.
56171func XlaDotV2(scope *Scope, lhs tf.Output, rhs tf.Output, dimension_numbers string, precision_config string, preferred_element_type tf.DataType) (output tf.Output) {
56172	if scope.Err() != nil {
56173		return
56174	}
56175	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "precision_config": precision_config, "preferred_element_type": preferred_element_type}
56176	opspec := tf.OpSpec{
56177		Type: "XlaDotV2",
56178		Input: []tf.Input{
56179			lhs, rhs,
56180		},
56181		Attrs: attrs,
56182	}
56183	op := scope.AddOperation(opspec)
56184	return op.Output(0)
56185}
56186
56187// Wraps the XLA DynamicSlice operator, documented at
56188//
56189//	https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice
56190//
56191// .
56192//
56193// DynamicSlice extracts a sub-array from the input array at dynamic
56194// start_indices. The size of the slice in each dimension is passed in
56195// size_indices, which specify the end point of exclusive slice intervals in each
56196// dimension -- [start, start + size). The shape of start_indices must have rank 1,
56197// with dimension size equal to the rank of operand.
56198//
56199// Arguments:
56200//
56201//	input: A `Tensor` of type T.
56202//	start_indices: List of N integers containing the slice size for each
56203//
56204// dimension. Each value must be strictly greater than zero, and start + size
56205// must be less than or equal to the size of the dimension to avoid
56206// implementation defined behavior.
56207func XlaDynamicSlice(scope *Scope, input tf.Output, start_indices tf.Output, size_indices tf.Output) (output tf.Output) {
56208	if scope.Err() != nil {
56209		return
56210	}
56211	opspec := tf.OpSpec{
56212		Type: "XlaDynamicSlice",
56213		Input: []tf.Input{
56214			input, start_indices, size_indices,
56215		},
56216	}
56217	op := scope.AddOperation(opspec)
56218	return op.Output(0)
56219}
56220
56221// Wraps the XLA DynamicUpdateSlice operator, documented at
56222//
56223//	https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice
56224//
56225// .
56226//
56227// XlaDynamicUpdateSlice generates a result which is the value of the `input`
56228// operand, with a slice update overwritten at `indices`. The shape of `update`
56229// determines the shape of the sub-array of the result which is updated. The shape
56230// of indices must be rank == 1, with dimension size equal to the rank of `input`.
56231//
56232// Handling of out-of-bounds slice indices is implementation-defined.
56233//
56234// Arguments:
56235//
56236//	input: A `Tensor` of type T.
56237//	update: A `Tensor` of type T. Same rank as `input`.
56238//	indices: A vector of indices into `input`. Must have length equal to the rank of
56239//
56240// `input`.
56241//
56242// Returns A `Tensor` of type T.
56243func XlaDynamicUpdateSlice(scope *Scope, input tf.Output, update tf.Output, indices tf.Output) (output tf.Output) {
56244	if scope.Err() != nil {
56245		return
56246	}
56247	opspec := tf.OpSpec{
56248		Type: "XlaDynamicUpdateSlice",
56249		Input: []tf.Input{
56250			input, update, indices,
56251		},
56252	}
56253	op := scope.AddOperation(opspec)
56254	return op.Output(0)
56255}
56256
56257// An op which supports basic einsum op with 2 inputs and 1 output.
56258//
56259// This op has better TPU performance since it doesn't have explicitly reshape and
56260// transpose operations as tf.einsum does.
56261func XlaEinsum(scope *Scope, a tf.Output, b tf.Output, equation string) (product tf.Output) {
56262	if scope.Err() != nil {
56263		return
56264	}
56265	attrs := map[string]interface{}{"equation": equation}
56266	opspec := tf.OpSpec{
56267		Type: "XlaEinsum",
56268		Input: []tf.Input{
56269			a, b,
56270		},
56271		Attrs: attrs,
56272	}
56273	op := scope.AddOperation(opspec)
56274	return op.Output(0)
56275}
56276
56277// Wraps the XLA Gather operator documented at
56278//
56279//	https://www.tensorflow.org/xla/operation_semantics#gather
56280//
56281// Arguments:
56282//
56283//	operand: The array we're gathering from.
56284//	start_indices: Array containing the starting indices of the slices we gather.
56285//	slice_sizes: slice_sizes[i] is the bounds for the slice on dimension i.
56286//	dimension_numbers: A serialized xla::GatherDimensionNumbers proto.
56287//	indices_are_sorted: Boolean indicating if the indices are sorted.
56288func XlaGather(scope *Scope, operand tf.Output, start_indices tf.Output, slice_sizes tf.Output, dimension_numbers string, indices_are_sorted bool) (output tf.Output) {
56289	if scope.Err() != nil {
56290		return
56291	}
56292	attrs := map[string]interface{}{"dimension_numbers": dimension_numbers, "indices_are_sorted": indices_are_sorted}
56293	opspec := tf.OpSpec{
56294		Type: "XlaGather",
56295		Input: []tf.Input{
56296			operand, start_indices, slice_sizes,
56297		},
56298		Attrs: attrs,
56299	}
56300	op := scope.AddOperation(opspec)
56301	return op.Output(0)
56302}
56303
56304// Wraps the XLA Sort operator, documented at
56305//
56306//	https://www.tensorflow.org/performance/xla/operation_semantics#sort
56307//
56308// .
56309//
56310// Sorts a tensor. Currently only sorts in ascending order are supported.
56311//
56312// Arguments:
56313//
56314//	keys: A `Tensor` of type K.
56315//	values: A `Tensor` of type V.
56316//
56317// Returns:
56318//
56319//	sorted_keys: A `Tensor` of type K.
56320//	sorted_values: A `Tensor` of type V.
56321func XlaKeyValueSort(scope *Scope, keys tf.Output, values tf.Output) (sorted_keys tf.Output, sorted_values tf.Output) {
56322	if scope.Err() != nil {
56323		return
56324	}
56325	opspec := tf.OpSpec{
56326		Type: "XlaKeyValueSort",
56327		Input: []tf.Input{
56328			keys, values,
56329		},
56330	}
56331	op := scope.AddOperation(opspec)
56332	return op.Output(0), op.Output(1)
56333}
56334
56335// Wraps the XLA OptimizationBarrier operator.
56336//
56337// Documented at https://www.tensorflow.org/xla/operation_semantics#optimizationbarrier.
56338//
56339// Arguments:
56340//
56341//	input: A Tuple of Arrays of any type.
56342func XlaOptimizationBarrier(scope *Scope, input []tf.Output) (output []tf.Output) {
56343	if scope.Err() != nil {
56344		return
56345	}
56346	opspec := tf.OpSpec{
56347		Type: "XlaOptimizationBarrier",
56348		Input: []tf.Input{
56349			tf.OutputList(input),
56350		},
56351	}
56352	op := scope.AddOperation(opspec)
56353	if scope.Err() != nil {
56354		return
56355	}
56356	var idx int
56357	var err error
56358	if output, idx, err = makeOutputList(op, idx, "output"); err != nil {
56359		scope.UpdateErr("XlaOptimizationBarrier", err)
56360		return
56361	}
56362	return output
56363}
56364
56365// Wraps the XLA Pad operator, documented at
56366//
56367//	https://www.tensorflow.org/performance/xla/operation_semantics#pad
56368//
56369// .
56370//
56371// Arguments:
56372//
56373//	input: A `Tensor` of type T.
56374//	padding_value: A scalar `Tensor` of type T.
56375//	padding_low: the padding to apply at the start of each input dimensions. Must
56376//
56377// be a compile-time constant 1D tensor of length equal to rank of input.
56378//
56379//	padding_high: the padding to apply at the end of each input dimension. Must
56380//
56381// be a compile-time constant 1D tensor of length equal to rank of input.
56382//
56383//	padding_interior: the padding to apply between each input element. Must
56384//
56385// be a compile-time constant 1D tensor of length equal to rank of input,
56386// containing only non-negative values.
56387//
56388// Returns A `Tensor` of type T.
56389func XlaPad(scope *Scope, input tf.Output, padding_value tf.Output, padding_low tf.Output, padding_high tf.Output, padding_interior tf.Output) (output tf.Output) {
56390	if scope.Err() != nil {
56391		return
56392	}
56393	opspec := tf.OpSpec{
56394		Type: "XlaPad",
56395		Input: []tf.Input{
56396			input, padding_value, padding_low, padding_high, padding_interior,
56397		},
56398	}
56399	op := scope.AddOperation(opspec)
56400	return op.Output(0)
56401}
56402
56403// Receives the named tensor from another XLA computation. Wraps the XLA Recv
56404//
56405// operator documented at
56406//
56407//	https://www.tensorflow.org/performance/xla/operation_semantics#recv .
56408//
56409// Arguments:
56410//
56411//	dtype: The type of the tensor.
56412//	tensor_name: A string key that identifies the channel.
56413//	shape: The shape of the tensor.
56414//
56415// Returns The tensor to receive.
56416func XlaRecv(scope *Scope, dtype tf.DataType, tensor_name string, shape tf.Shape) (tensor tf.Output) {
56417	if scope.Err() != nil {
56418		return
56419	}
56420	attrs := map[string]interface{}{"dtype": dtype, "tensor_name": tensor_name, "shape": shape}
56421	opspec := tf.OpSpec{
56422		Type: "XlaRecv",
56423
56424		Attrs: attrs,
56425	}
56426	op := scope.AddOperation(opspec)
56427	return op.Output(0)
56428}
56429
56430// An op to receive a tensor from the host.
56431//
56432// output: the tensor that will be received from the host.
56433// Toutput: element type for output.
56434// shape: shape for output.
56435// key: A unique identifier for this region used to match up host transfers.
56436func XlaRecvFromHost(scope *Scope, Toutput tf.DataType, shape tf.Shape, key string) (output tf.Output) {
56437	if scope.Err() != nil {
56438		return
56439	}
56440	attrs := map[string]interface{}{"Toutput": Toutput, "shape": shape, "key": key}
56441	opspec := tf.OpSpec{
56442		Type: "XlaRecvFromHost",
56443
56444		Attrs: attrs,
56445	}
56446	op := scope.AddOperation(opspec)
56447	return op.Output(0)
56448}
56449
56450// An op that receives embedding activations on the TPU.
56451//
56452// The TPU system performs the embedding lookups and aggregations. The results of
56453// these aggregations are visible to the Tensorflow Graph as the outputs of a
56454// XlaRecvTPUEmbeddingActivations Op. This op returns a list containing one
56455// Tensor of activations per table specified in the model.
56456//
56457// Arguments:
56458//
56459//	deduplication_data: A Tensor with type=DT_VARIANT containing the deduplication
56460//
56461// data. The tensor is an XLA nested tuple containing N elements (where N is
56462// the ratio of the number of embedding to tensor cores per TPU chip). Each
56463// element of the nested tuple is a tuple of rank 1 tensors. Each tensor either
56464// contains indices (DT_UINT32) for embedding lookup on the TensorCore or
56465// weights (DT_FLOAT) to apply to the output of the embedding lookup operation.
56466//
56467//	num_tables: The number of output activation tensors. If feature descriptor is
56468//
56469// present in the tpu embedding config, it is equal to the number of features
56470// otherwise equal to number of embedding tables in the model.
56471//
56472//	config: Serialized TPUEmbeddingConfiguration proto.
56473//
56474// Returns A TensorList of embedding activations containing one Tensor per
56475// embedding table in the model.
56476func XlaRecvTPUEmbeddingActivations(scope *Scope, deduplication_data tf.Output, num_tables int64, config string) (outputs []tf.Output) {
56477	if scope.Err() != nil {
56478		return
56479	}
56480	attrs := map[string]interface{}{"num_tables": num_tables, "config": config}
56481	opspec := tf.OpSpec{
56482		Type: "XlaRecvTPUEmbeddingActivations",
56483		Input: []tf.Input{
56484			deduplication_data,
56485		},
56486		Attrs: attrs,
56487	}
56488	op := scope.AddOperation(opspec)
56489	if scope.Err() != nil {
56490		return
56491	}
56492	var idx int
56493	var err error
56494	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
56495		scope.UpdateErr("XlaRecvTPUEmbeddingActivations", err)
56496		return
56497	}
56498	return outputs
56499}
56500
56501// Receives deduplication data (indices and weights) from the embedding core.
56502//
56503// The deduplication data is a Tensor with type=DT_VARIANT. The tensor itself is an
56504// XLA nested tuple containing N elements (where N is the ratio of the number of
56505// embedding to tensor cores per TPU chip). Each element of the nested tuple is a
56506// tuple of rank 1 tensors. Each tensor either contains indices (DT_UINT32) for
56507// embedding lookup on the TensorCore or weights (DT_FLOAT) to apply to the output
56508// of the embedding lookup operation.
56509//
56510// Arguments:
56511//
56512//	config: Serialized TPUEmbeddingConfiguration proto.
56513func XlaRecvTPUEmbeddingDeduplicationData(scope *Scope, config string) (output tf.Output) {
56514	if scope.Err() != nil {
56515		return
56516	}
56517	attrs := map[string]interface{}{"config": config}
56518	opspec := tf.OpSpec{
56519		Type: "XlaRecvTPUEmbeddingDeduplicationData",
56520
56521		Attrs: attrs,
56522	}
56523	op := scope.AddOperation(opspec)
56524	return op.Output(0)
56525}
56526
56527// Wraps the XLA ReducePrecision operator
56528//
56529//	documented at https://www.tensorflow.org/xla/operation_semantics#reduceprecision.
56530//
56531// Arguments:
56532//
56533//	operand: array of floating-point type.
56534//	exponent_bits: number of exponent bits in lower-precision format
56535//	mantissa_bits: number of mantissa bits in lower-precision format
56536func XlaReducePrecision(scope *Scope, operand tf.Output, exponent_bits int64, mantissa_bits int64) (output tf.Output) {
56537	if scope.Err() != nil {
56538		return
56539	}
56540	attrs := map[string]interface{}{"exponent_bits": exponent_bits, "mantissa_bits": mantissa_bits}
56541	opspec := tf.OpSpec{
56542		Type: "XlaReducePrecision",
56543		Input: []tf.Input{
56544			operand,
56545		},
56546		Attrs: attrs,
56547	}
56548	op := scope.AddOperation(opspec)
56549	return op.Output(0)
56550}
56551
56552// Wraps the XLA ReduceScatter operator
56553//
56554//	documented at https://www.tensorflow.org/xla/operation_semantics#reducescatter.
56555//
56556// Arguments:
56557//
56558//	input: Array or a non-empty tuple of arrays to reduce across replicas.
56559//	group_assignment: Groups between which the reductions are performed.
56560//	scatter_dimension: Dimension to scatter.
56561//	reduce_op: Reduction computation.
56562func XlaReduceScatter(scope *Scope, input tf.Output, group_assignment tf.Output, scatter_dimension tf.Output, reduce_op string) (output tf.Output) {
56563	if scope.Err() != nil {
56564		return
56565	}
56566	attrs := map[string]interface{}{"reduce_op": reduce_op}
56567	opspec := tf.OpSpec{
56568		Type: "XlaReduceScatter",
56569		Input: []tf.Input{
56570			input, group_assignment, scatter_dimension,
56571		},
56572		Attrs: attrs,
56573	}
56574	op := scope.AddOperation(opspec)
56575	return op.Output(0)
56576}
56577
56578// Inverse of XlaSetDynamicDimensionSize.
56579//
56580// Make an xla bounded dynamic dimension into a static dimension. The bound of the
56581// size of dimension `dim_index` becomes the static dimension size.
56582func XlaRemoveDynamicDimensionSize(scope *Scope, input tf.Output, dim_index tf.Output) (output tf.Output) {
56583	if scope.Err() != nil {
56584		return
56585	}
56586	opspec := tf.OpSpec{
56587		Type: "XlaRemoveDynamicDimensionSize",
56588		Input: []tf.Input{
56589			input, dim_index,
56590		},
56591	}
56592	op := scope.AddOperation(opspec)
56593	return op.Output(0)
56594}
56595
56596// Replica ID.
56597func XlaReplicaId(scope *Scope) (id tf.Output) {
56598	if scope.Err() != nil {
56599		return
56600	}
56601	opspec := tf.OpSpec{
56602		Type: "XlaReplicaId",
56603	}
56604	op := scope.AddOperation(opspec)
56605	return op.Output(0)
56606}
56607
56608// XlaRngBitGeneratorAttr is an optional argument to XlaRngBitGenerator.
56609type XlaRngBitGeneratorAttr func(optionalAttr)
56610
56611// XlaRngBitGeneratorDtype sets the optional dtype attribute to value.
56612//
56613// value: The type of the tensor.
56614// If not specified, defaults to DT_UINT64
56615func XlaRngBitGeneratorDtype(value tf.DataType) XlaRngBitGeneratorAttr {
56616	return func(m optionalAttr) {
56617		m["dtype"] = value
56618	}
56619}
56620
56621// Stateless PRNG bit generator.
56622//
56623// Wraps the XLA RngBitGenerator operator, documented at
56624//
56625//	https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator.
56626//
56627// Arguments:
56628//
56629//	algorithm: The PRNG algorithm to use, one of
56630//
56631// tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}.
56632//
56633//	initial_state: Initial state for the PRNG algorithm. For THREEFRY, it should be
56634//
56635// a u64[2] and for PHILOX a u64[3].
56636//
56637//	shape: The output shape of the generated data.
56638func XlaRngBitGenerator(scope *Scope, algorithm tf.Output, initial_state tf.Output, shape tf.Output, optional ...XlaRngBitGeneratorAttr) (output_key tf.Output, output tf.Output) {
56639	if scope.Err() != nil {
56640		return
56641	}
56642	attrs := map[string]interface{}{}
56643	for _, a := range optional {
56644		a(attrs)
56645	}
56646	opspec := tf.OpSpec{
56647		Type: "XlaRngBitGenerator",
56648		Input: []tf.Input{
56649			algorithm, initial_state, shape,
56650		},
56651		Attrs: attrs,
56652	}
56653	op := scope.AddOperation(opspec)
56654	return op.Output(0), op.Output(1)
56655}
56656
56657// Computes the eigen decomposition of a batch of self-adjoint matrices
56658//
56659// (Note: Only real inputs are supported).
56660//
56661// Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in
56662// tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for
56663// i=0...N-1.
56664//
56665// Arguments:
56666//
56667//	a: the input tensor.
56668//	lower: a boolean specifies whether the calculation is done with the lower
56669//
56670// triangular part or the upper triangular part.
56671//
56672//	max_iter: maximum number of sweep update, i.e., the whole lower triangular
56673//
56674// part or upper triangular part based on parameter lower. Heuristically, it has
56675// been argued that approximately logN sweeps are needed in practice (Ref: Golub &
56676// van Loan "Matrix Computation").
56677//
56678//	epsilon: the tolerance ratio.
56679//
56680// Returns:
56681//
56682//	w: The eigenvalues in ascending order, each repeated according to its
56683//
56684// multiplicity.
56685//
56686//	v: The column v[..., :, i] is the normalized eigenvector corresponding to the
56687//
56688// eigenvalue w[..., i].
56689func XlaSelfAdjointEig(scope *Scope, a tf.Output, lower bool, max_iter int64, epsilon float32) (w tf.Output, v tf.Output) {
56690	if scope.Err() != nil {
56691		return
56692	}
56693	attrs := map[string]interface{}{"lower": lower, "max_iter": max_iter, "epsilon": epsilon}
56694	opspec := tf.OpSpec{
56695		Type: "XlaSelfAdjointEig",
56696		Input: []tf.Input{
56697			a,
56698		},
56699		Attrs: attrs,
56700	}
56701	op := scope.AddOperation(opspec)
56702	return op.Output(0), op.Output(1)
56703}
56704
56705// Sends the named tensor to another XLA computation. Wraps the XLA Send operator
56706//
56707// documented at
56708//
56709//	https://www.tensorflow.org/performance/xla/operation_semantics#send .
56710//
56711// Arguments:
56712//
56713//	tensor: The tensor to send.
56714//	tensor_name: A string key that identifies the channel.
56715//
56716// Returns the created operation.
56717func XlaSend(scope *Scope, tensor tf.Output, tensor_name string) (o *tf.Operation) {
56718	if scope.Err() != nil {
56719		return
56720	}
56721	attrs := map[string]interface{}{"tensor_name": tensor_name}
56722	opspec := tf.OpSpec{
56723		Type: "XlaSend",
56724		Input: []tf.Input{
56725			tensor,
56726		},
56727		Attrs: attrs,
56728	}
56729	return scope.AddOperation(opspec)
56730}
56731
56732// An op that performs gradient updates of embedding tables.
56733//
56734// The gradients argument is a TensorList having the same length and shapes as the
56735// return value of XlaRecvTPUEmbeddingActivations, but contains gradients of the
56736// model's loss with respect to the embedding activations. The embedding tables are
56737// updated from these gradients via the optimizer specified in the
56738// TPUEmbeddingConfiguration proto given to tpu.initialize_system.
56739//
56740// Arguments:
56741//
56742//	gradients: A TensorList of gradients with which to update embedding tables.
56743//	learning_rates: A TensorList of learning rates used for updating the embedding
56744//
56745// tables via the optimizer. The length of the TensorList must be equal to the
56746// number of dynamic learning rate tags specified in the
56747// TPUEmbeddingConfiguration proto.
56748//
56749//	deduplication_data: A Tensor with type=DT_VARIANT containing the deduplication
56750//
56751// data. The tensor is an XLA nested tuple containing N elements (where N is
56752// the ratio of the number of embedding to tensor cores per TPU chip). Each
56753// element of the nested tuple is a tuple of rank 1 tensors. Each tensor either
56754// contains indices (DT_UINT32) for embedding lookup on the TensorCore or
56755// weights (DT_FLOAT) to apply to the output of the embedding lookup operation.
56756//
56757//	config: Serialized TPUEmbeddingConfiguration proto.
56758//
56759// Returns the created operation.
56760func XlaSendTPUEmbeddingGradients(scope *Scope, gradients []tf.Output, learning_rates []tf.Output, deduplication_data tf.Output, config string) (o *tf.Operation) {
56761	if scope.Err() != nil {
56762		return
56763	}
56764	attrs := map[string]interface{}{"config": config}
56765	opspec := tf.OpSpec{
56766		Type: "XlaSendTPUEmbeddingGradients",
56767		Input: []tf.Input{
56768			tf.OutputList(gradients), tf.OutputList(learning_rates), deduplication_data,
56769		},
56770		Attrs: attrs,
56771	}
56772	return scope.AddOperation(opspec)
56773}
56774
56775// An op to send a tensor to the host.
56776//
56777// input: the tensor that will be sent to the host.
56778// Tinput: element type for input.
56779// key: A unique identifier for this region used to match up host transfers.
56780//
56781// Returns the created operation.
56782func XlaSendToHost(scope *Scope, input tf.Output, key string) (o *tf.Operation) {
56783	if scope.Err() != nil {
56784		return
56785	}
56786	attrs := map[string]interface{}{"key": key}
56787	opspec := tf.OpSpec{
56788		Type: "XlaSendToHost",
56789		Input: []tf.Input{
56790			input,
56791		},
56792		Attrs: attrs,
56793	}
56794	return scope.AddOperation(opspec)
56795}
56796
56797// Set a bound for the given input value as a hint to Xla compiler,
56798//
56799//	returns the same value.
56800func XlaSetBound(scope *Scope, input tf.Output, bound tf.Output) (output tf.Output) {
56801	if scope.Err() != nil {
56802		return
56803	}
56804	opspec := tf.OpSpec{
56805		Type: "XlaSetBound",
56806		Input: []tf.Input{
56807			input, bound,
56808		},
56809	}
56810	op := scope.AddOperation(opspec)
56811	return op.Output(0)
56812}
56813
56814// Make a static dimension into a xla bounded dynamic dimension.
56815//
56816//	The current static dimension size will become the bound and the second
56817//	operand becomes the dynamic size of the dimension.
56818func XlaSetDynamicDimensionSize(scope *Scope, input tf.Output, dim_index tf.Output, size tf.Output) (output tf.Output) {
56819	if scope.Err() != nil {
56820		return
56821	}
56822	opspec := tf.OpSpec{
56823		Type: "XlaSetDynamicDimensionSize",
56824		Input: []tf.Input{
56825			input, dim_index, size,
56826		},
56827	}
56828	op := scope.AddOperation(opspec)
56829	return op.Output(0)
56830}
56831
56832// XlaShardingAttr is an optional argument to XlaSharding.
56833type XlaShardingAttr func(optionalAttr)
56834
56835// XlaShardingSharding sets the optional sharding attribute to value.
56836// If not specified, defaults to ""
56837func XlaShardingSharding(value string) XlaShardingAttr {
56838	return func(m optionalAttr) {
56839		m["sharding"] = value
56840	}
56841}
56842
56843// XlaShardingUnspecifiedDims sets the optional unspecified_dims attribute to value.
56844// If not specified, defaults to {}
56845func XlaShardingUnspecifiedDims(value []int64) XlaShardingAttr {
56846	return func(m optionalAttr) {
56847		m["unspecified_dims"] = value
56848	}
56849}
56850
56851// An op which shards the input based on the given sharding attribute. It can
56852//
56853// selectively annotate a subset of tensor dimensions by skipping unspecified_dims,
56854// and the sharding annotation should be replicated in those dims.
56855func XlaSharding(scope *Scope, input tf.Output, optional ...XlaShardingAttr) (output tf.Output) {
56856	if scope.Err() != nil {
56857		return
56858	}
56859	attrs := map[string]interface{}{}
56860	for _, a := range optional {
56861		a(attrs)
56862	}
56863	opspec := tf.OpSpec{
56864		Type: "XlaSharding",
56865		Input: []tf.Input{
56866			input,
56867		},
56868		Attrs: attrs,
56869	}
56870	op := scope.AddOperation(opspec)
56871	return op.Output(0)
56872}
56873
56874// Wraps the XLA Sort operator, documented at
56875//
56876//	https://www.tensorflow.org/performance/xla/operation_semantics#sort
56877//
56878// .
56879//
56880// Sorts a tensor. Currently only sorts in ascending order are supported.
56881//
56882// Arguments:
56883//
56884//	input: A `Tensor` of type T.
56885//
56886// Returns A `Tensor` of type T.
56887func XlaSort(scope *Scope, input tf.Output) (output tf.Output) {
56888	if scope.Err() != nil {
56889		return
56890	}
56891	opspec := tf.OpSpec{
56892		Type: "XlaSort",
56893		Input: []tf.Input{
56894			input,
56895		},
56896	}
56897	op := scope.AddOperation(opspec)
56898	return op.Output(0)
56899}
56900
56901// XlaSplitNDAttr is an optional argument to XlaSplitND.
56902type XlaSplitNDAttr func(optionalAttr)
56903
56904// XlaSplitNDPaddings sets the optional paddings attribute to value.
56905//
56906// value: Optional list of right paddings per dimension of input tensor to apply before
56907// splitting. This can be used to make a dimension evenly divisible.
56908// If not specified, defaults to {}
56909func XlaSplitNDPaddings(value []int64) XlaSplitNDAttr {
56910	return func(m optionalAttr) {
56911		m["paddings"] = value
56912	}
56913}
56914
56915// Splits input tensor across all dimensions.
56916//
56917// An op which slices the input tensor based on the given num_splits attribute,
56918// pads slices optionally, and returned the slices. Slices are returned in
56919// row-major order.
56920//
56921// This op may be generated via the TPU bridge.
56922//
56923// For example, with `input` tensor:
56924// ```
56925// [[0, 1, 2],
56926//
56927//	[3, 4, 5],
56928//	[6, 7, 8]]
56929//
56930// ```
56931// `num_splits`:
56932// ```
56933// [2, 2]
56934// ```
56935// and `paddings`:
56936// ```
56937// [1, 1]
56938// ```
56939// the expected `outputs` is:
56940// ```
56941// [[0, 1],
56942//
56943//	[3, 4]]
56944//
56945// [[2, 0],
56946//
56947//	[5, 0]]
56948//
56949// [[6, 7],
56950//
56951//	[0, 0]]
56952//
56953// [[8, 0],
56954//
56955//	[0, 0]]
56956//
56957// ```
56958//
56959// Arguments:
56960//
56961//		input: Input tensor to split across all dimensions.
56962//	  }
56963//	  out_arg {
56964//	    name: "outputs"
56965//	    description: <<END
56966//
56967// Output slices based on input and num_splits defined, in row-major order.
56968//
56969//	num_splits: Number of ways to split per dimension. Shape dimensions must be evenly
56970//
56971// divisible.
56972func XlaSplitND(scope *Scope, input tf.Output, N int64, num_splits []int64, optional ...XlaSplitNDAttr) (outputs []tf.Output) {
56973	if scope.Err() != nil {
56974		return
56975	}
56976	attrs := map[string]interface{}{"N": N, "num_splits": num_splits}
56977	for _, a := range optional {
56978		a(attrs)
56979	}
56980	opspec := tf.OpSpec{
56981		Type: "XlaSplitND",
56982		Input: []tf.Input{
56983			input,
56984		},
56985		Attrs: attrs,
56986	}
56987	op := scope.AddOperation(opspec)
56988	if scope.Err() != nil {
56989		return
56990	}
56991	var idx int
56992	var err error
56993	if outputs, idx, err = makeOutputList(op, idx, "outputs"); err != nil {
56994		scope.UpdateErr("XlaSplitND", err)
56995		return
56996	}
56997	return outputs
56998}
56999
57000// XlaSpmdFullToShardShapeAttr is an optional argument to XlaSpmdFullToShardShape.
57001type XlaSpmdFullToShardShapeAttr func(optionalAttr)
57002
57003// XlaSpmdFullToShardShapeDim sets the optional dim attribute to value.
57004// If not specified, defaults to -1
57005func XlaSpmdFullToShardShapeDim(value int64) XlaSpmdFullToShardShapeAttr {
57006	return func(m optionalAttr) {
57007		m["dim"] = value
57008	}
57009}
57010
57011// XlaSpmdFullToShardShapeUnspecifiedDims sets the optional unspecified_dims attribute to value.
57012// If not specified, defaults to {}
57013func XlaSpmdFullToShardShapeUnspecifiedDims(value []int64) XlaSpmdFullToShardShapeAttr {
57014	return func(m optionalAttr) {
57015		m["unspecified_dims"] = value
57016	}
57017}
57018
57019// An op used by XLA SPMD partitioner to switch from automatic partitioning to
57020//
57021// manual partitioning. It annotates the input (full-shape, to be automatically
57022// partitioned) with the same sharding used by manual partitioning, and outputs a
57023// shard-shaped tensor to be consumed by later manually-partitioned ops. If the
57024// shape is not evenly partitionable, the padding region will be masked with 0s.
57025// The conversion can happen partially in subgroups, by specifying the dim
57026// attribute, where only that dim will be converted.
57027func XlaSpmdFullToShardShape(scope *Scope, input tf.Output, manual_sharding string, optional ...XlaSpmdFullToShardShapeAttr) (output tf.Output) {
57028	if scope.Err() != nil {
57029		return
57030	}
57031	attrs := map[string]interface{}{"manual_sharding": manual_sharding}
57032	for _, a := range optional {
57033		a(attrs)
57034	}
57035	opspec := tf.OpSpec{
57036		Type: "XlaSpmdFullToShardShape",
57037		Input: []tf.Input{
57038			input,
57039		},
57040		Attrs: attrs,
57041	}
57042	op := scope.AddOperation(opspec)
57043	return op.Output(0)
57044}
57045
57046// XlaSpmdShardToFullShapeAttr is an optional argument to XlaSpmdShardToFullShape.
57047type XlaSpmdShardToFullShapeAttr func(optionalAttr)
57048
57049// XlaSpmdShardToFullShapeDim sets the optional dim attribute to value.
57050// If not specified, defaults to -1
57051func XlaSpmdShardToFullShapeDim(value int64) XlaSpmdShardToFullShapeAttr {
57052	return func(m optionalAttr) {
57053		m["dim"] = value
57054	}
57055}
57056
57057// XlaSpmdShardToFullShapeUnspecifiedDims sets the optional unspecified_dims attribute to value.
57058// If not specified, defaults to {}
57059func XlaSpmdShardToFullShapeUnspecifiedDims(value []int64) XlaSpmdShardToFullShapeAttr {
57060	return func(m optionalAttr) {
57061		m["unspecified_dims"] = value
57062	}
57063}
57064
57065// An op used by XLA SPMD partitioner to switch from manual partitioning to
57066//
57067// automatic partitioning. It converts the shard-shaped, manually partitioned input
57068// into full-shaped tensor to be partitioned automatically with the same sharding
57069// used by manual partitioning. The conversion can happen partially in subgroups,
57070// by specifying the dim attribute, where only that dim will be converted.
57071func XlaSpmdShardToFullShape(scope *Scope, input tf.Output, manual_sharding string, full_shape tf.Shape, optional ...XlaSpmdShardToFullShapeAttr) (output tf.Output) {
57072	if scope.Err() != nil {
57073		return
57074	}
57075	attrs := map[string]interface{}{"manual_sharding": manual_sharding, "full_shape": full_shape}
57076	for _, a := range optional {
57077		a(attrs)
57078	}
57079	opspec := tf.OpSpec{
57080		Type: "XlaSpmdShardToFullShape",
57081		Input: []tf.Input{
57082			input,
57083		},
57084		Attrs: attrs,
57085	}
57086	op := scope.AddOperation(opspec)
57087	return op.Output(0)
57088}
57089
57090// Computes the eigen decomposition of a batch of self-adjoint matrices
57091//
57092// (Note: Only real inputs are supported).
57093//
57094// Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in
57095// tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]).
57096//
57097// Arguments:
57098//
57099//	a: the input tensor.
57100//	max_iter: maximum number of sweep update, i.e., the whole lower triangular
57101//
57102// part or upper triangular part based on parameter lower. Heuristically, it has
57103// been argued that approximately log(min (M, N)) sweeps are needed in practice
57104// (Ref: Golub & van Loan "Matrix Computation").
57105//
57106//	epsilon: the tolerance ratio.
57107//	precision_config: a serialized xla::PrecisionConfig proto.
57108//
57109// Returns:
57110//
57111//	s: Singular values. The values are sorted in reverse order of magnitude, so
57112//
57113// s[..., 0] is the largest value, s[..., 1] is the second largest, etc.
57114//
57115//	u: Left singular vectors.
57116//	v: Right singular vectors.
57117func XlaSvd(scope *Scope, a tf.Output, max_iter int64, epsilon float32, precision_config string) (s tf.Output, u tf.Output, v tf.Output) {
57118	if scope.Err() != nil {
57119		return
57120	}
57121	attrs := map[string]interface{}{"max_iter": max_iter, "epsilon": epsilon, "precision_config": precision_config}
57122	opspec := tf.OpSpec{
57123		Type: "XlaSvd",
57124		Input: []tf.Input{
57125			a,
57126		},
57127		Attrs: attrs,
57128	}
57129	op := scope.AddOperation(opspec)
57130	return op.Output(0), op.Output(1), op.Output(2)
57131}
57132
57133// Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise.
57134func Xlog1py(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
57135	if scope.Err() != nil {
57136		return
57137	}
57138	opspec := tf.OpSpec{
57139		Type: "Xlog1py",
57140		Input: []tf.Input{
57141			x, y,
57142		},
57143	}
57144	op := scope.AddOperation(opspec)
57145	return op.Output(0)
57146}
57147
57148// Returns 0 if x == 0, and x * log(y) otherwise, elementwise.
57149func Xlogy(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
57150	if scope.Err() != nil {
57151		return
57152	}
57153	opspec := tf.OpSpec{
57154		Type: "Xlogy",
57155		Input: []tf.Input{
57156			x, y,
57157		},
57158	}
57159	op := scope.AddOperation(opspec)
57160	return op.Output(0)
57161}
57162
57163// Returns a tensor of zeros with the same shape and type as x.
57164//
57165// Arguments:
57166//
57167//	x: a tensor of type T.
57168//
57169// Returns a tensor of the same shape and type as x but filled with zeros.
57170func ZerosLike(scope *Scope, x tf.Output) (y tf.Output) {
57171	if scope.Err() != nil {
57172		return
57173	}
57174	opspec := tf.OpSpec{
57175		Type: "ZerosLike",
57176		Input: []tf.Input{
57177			x,
57178		},
57179	}
57180	op := scope.AddOperation(opspec)
57181	return op.Output(0)
57182}
57183
57184// Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
57185//
57186// The Hurwitz zeta function is defined as:
57187//
57188// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
57189func Zeta(scope *Scope, x tf.Output, q tf.Output) (z tf.Output) {
57190	if scope.Err() != nil {
57191		return
57192	}
57193	opspec := tf.OpSpec{
57194		Type: "Zeta",
57195		Input: []tf.Input{
57196			x, q,
57197		},
57198	}
57199	op := scope.AddOperation(opspec)
57200	return op.Output(0)
57201}
57202
57203// ZipDatasetAttr is an optional argument to ZipDataset.
57204type ZipDatasetAttr func(optionalAttr)
57205
57206// ZipDatasetMetadata sets the optional metadata attribute to value.
57207// If not specified, defaults to ""
57208func ZipDatasetMetadata(value string) ZipDatasetAttr {
57209	return func(m optionalAttr) {
57210		m["metadata"] = value
57211	}
57212}
57213
57214// Creates a dataset that zips together `input_datasets`.
57215//
57216// The elements of the resulting dataset are created by zipping corresponding
57217// elements from each of the input datasets.
57218//
57219// The size of the resulting dataset will match the size of the smallest input
57220// dataset, and no error will be raised if input datasets have different sizes.
57221//
57222// Arguments:
57223//
57224//	input_datasets: List of `N` variant Tensors representing datasets to be zipped together.
57225func ZipDataset(scope *Scope, input_datasets []tf.Output, output_types []tf.DataType, output_shapes []tf.Shape, optional ...ZipDatasetAttr) (handle tf.Output) {
57226	if scope.Err() != nil {
57227		return
57228	}
57229	attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes}
57230	for _, a := range optional {
57231		a(attrs)
57232	}
57233	opspec := tf.OpSpec{
57234		Type: "ZipDataset",
57235		Input: []tf.Input{
57236			tf.OutputList(input_datasets),
57237		},
57238		Attrs: attrs,
57239	}
57240	op := scope.AddOperation(opspec)
57241	return op.Output(0)
57242}
57243