xref: /aosp_15_r20/external/googleapis/google/cloud/aiplatform/v1beta1/explanation.proto (revision d5c09012810ac0c9f33fe448fb6da8260d444cc9)
1// Copyright 2023 Google LLC
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15syntax = "proto3";
16
17package google.cloud.aiplatform.v1beta1;
18
19import "google/api/field_behavior.proto";
20import "google/cloud/aiplatform/v1beta1/explanation_metadata.proto";
21import "google/cloud/aiplatform/v1beta1/io.proto";
22import "google/protobuf/struct.proto";
23
24option csharp_namespace = "Google.Cloud.AIPlatform.V1Beta1";
25option go_package = "cloud.google.com/go/aiplatform/apiv1beta1/aiplatformpb;aiplatformpb";
26option java_multiple_files = true;
27option java_outer_classname = "ExplanationProto";
28option java_package = "com.google.cloud.aiplatform.v1beta1";
29option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
30option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
31
32// Explanation of a prediction (provided in
33// [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions])
34// produced by the Model on a given
35// [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
36message Explanation {
37  // Output only. Feature attributions grouped by predicted outputs.
38  //
39  // For Models that predict only one output, such as regression Models that
40  // predict only one score, there is only one attibution that explains the
41  // predicted output. For Models that predict multiple outputs, such as
42  // multiclass Models that predict multiple classes, each element explains one
43  // specific item.
44  // [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
45  // can be used to identify which output this attribution is explaining.
46  //
47  // By default, we provide Shapley values for the predicted class. However,
48  // you can configure the explanation request to generate Shapley values for
49  // any other classes too. For example, if a model predicts a probability of
50  // `0.4` for approving a loan application, the model's decision is to reject
51  // the application since `p(reject) = 0.6 > p(approve) = 0.4`, and the default
52  // Shapley values would be computed for rejection decision and not approval,
53  // even though the latter might be the positive class.
54  //
55  // If users set
56  // [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k],
57  // the attributions are sorted by
58  // [instance_output_value][Attributions.instance_output_value] in descending
59  // order. If
60  // [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices]
61  // is specified, the attributions are stored by
62  // [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
63  // in the same order as they appear in the output_indices.
64  repeated Attribution attributions = 1
65      [(google.api.field_behavior) = OUTPUT_ONLY];
66
67  // Output only. List of the nearest neighbors for example-based explanations.
68  //
69  // For models deployed with the examples explanations feature enabled, the
70  // attributions field is empty and instead the neighbors field is populated.
71  repeated Neighbor neighbors = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
72}
73
74// Aggregated explanation metrics for a Model over a set of instances.
75message ModelExplanation {
76  // Output only. Aggregated attributions explaining the Model's prediction
77  // outputs over the set of instances. The attributions are grouped by outputs.
78  //
79  // For Models that predict only one output, such as regression Models that
80  // predict only one score, there is only one attibution that explains the
81  // predicted output. For Models that predict multiple outputs, such as
82  // multiclass Models that predict multiple classes, each element explains one
83  // specific item.
84  // [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
85  // can be used to identify which output this attribution is explaining.
86  //
87  // The
88  // [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value],
89  // [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value]
90  // and
91  // [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
92  // fields are averaged over the test data.
93  //
94  // NOTE: Currently AutoML tabular classification Models produce only one
95  // attribution, which averages attributions over all the classes it predicts.
96  // [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error]
97  // is not populated.
98  repeated Attribution mean_attributions = 1
99      [(google.api.field_behavior) = OUTPUT_ONLY];
100}
101
102// Attribution that explains a particular prediction output.
103message Attribution {
104  // Output only. Model predicted output if the input instance is constructed
105  // from the baselines of all the features defined in
106  // [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
107  // The field name of the output is determined by the key in
108  // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
109  //
110  // If the Model's predicted output has multiple dimensions (rank > 1), this is
111  // the value in the output located by
112  // [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
113  //
114  // If there are multiple baselines, their output values are averaged.
115  double baseline_output_value = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
116
117  // Output only. Model predicted output on the corresponding [explanation
118  // instance][ExplainRequest.instances]. The field name of the output is
119  // determined by the key in
120  // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
121  //
122  // If the Model predicted output has multiple dimensions, this is the value in
123  // the output located by
124  // [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
125  double instance_output_value = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
126
127  // Output only. Attributions of each explained feature. Features are extracted
128  // from the [prediction
129  // instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
130  // according to [explanation metadata for
131  // inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
132  //
133  // The value is a struct, whose keys are the name of the feature. The values
134  // are how much the feature in the
135  // [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
136  // contributed to the predicted result.
137  //
138  // The format of the value is determined by the feature's input format:
139  //
140  //   * If the feature is a scalar value, the attribution value is a
141  //     [floating number][google.protobuf.Value.number_value].
142  //
143  //   * If the feature is an array of scalar values, the attribution value is
144  //     an [array][google.protobuf.Value.list_value].
145  //
146  //   * If the feature is a struct, the attribution value is a
147  //     [struct][google.protobuf.Value.struct_value]. The keys in the
148  //     attribution value struct are the same as the keys in the feature
149  //     struct. The formats of the values in the attribution struct are
150  //     determined by the formats of the values in the feature struct.
151  //
152  // The
153  // [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri]
154  // field, pointed to by the
155  // [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] field of
156  // the
157  // [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models]
158  // object, points to the schema file that describes the features and their
159  // attribution values (if it is populated).
160  google.protobuf.Value feature_attributions = 3
161      [(google.api.field_behavior) = OUTPUT_ONLY];
162
163  // Output only. The index that locates the explained prediction output.
164  //
165  // If the prediction output is a scalar value, output_index is not populated.
166  // If the prediction output has multiple dimensions, the length of the
167  // output_index list is the same as the number of dimensions of the output.
168  // The i-th element in output_index is the element index of the i-th dimension
169  // of the output vector. Indices start from 0.
170  repeated int32 output_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
171
172  // Output only. The display name of the output identified by
173  // [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
174  // For example, the predicted class name by a multi-classification Model.
175  //
176  // This field is only populated iff the Model predicts display names as a
177  // separate field along with the explained output. The predicted display name
178  // must has the same shape of the explained output, and can be located using
179  // output_index.
180  string output_display_name = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
181
182  // Output only. Error of
183  // [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]
184  // caused by approximation used in the explanation method. Lower value means
185  // more precise attributions.
186  //
187  // * For Sampled Shapley
188  // [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution],
189  // increasing
190  // [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count]
191  // might reduce the error.
192  // * For Integrated Gradients
193  // [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution],
194  // increasing
195  // [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count]
196  // might reduce the error.
197  // * For [XRAI
198  // attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution],
199  // increasing
200  // [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count]
201  // might reduce the error.
202  //
203  // See [this introduction](/vertex-ai/docs/explainable-ai/overview)
204  // for more information.
205  double approximation_error = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
206
207  // Output only. Name of the explain output. Specified as the key in
208  // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
209  string output_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
210}
211
212// Neighbors for example-based explanations.
213message Neighbor {
214  // Output only. The neighbor id.
215  string neighbor_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
216
217  // Output only. The neighbor distance.
218  double neighbor_distance = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
219}
220
221// Specification of Model explanation.
222message ExplanationSpec {
223  // Required. Parameters that configure explaining of the Model's predictions.
224  ExplanationParameters parameters = 1 [(google.api.field_behavior) = REQUIRED];
225
226  // Optional. Metadata describing the Model's input and output for explanation.
227  ExplanationMetadata metadata = 2 [(google.api.field_behavior) = OPTIONAL];
228}
229
230// Parameters to configure explaining for Model's predictions.
231message ExplanationParameters {
232  oneof method {
233    // An attribution method that approximates Shapley values for features that
234    // contribute to the label being predicted. A sampling strategy is used to
235    // approximate the value rather than considering all subsets of features.
236    // Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
237    SampledShapleyAttribution sampled_shapley_attribution = 1;
238
239    // An attribution method that computes Aumann-Shapley values taking
240    // advantage of the model's fully differentiable structure. Refer to this
241    // paper for more details: https://arxiv.org/abs/1703.01365
242    IntegratedGradientsAttribution integrated_gradients_attribution = 2;
243
244    // An attribution method that redistributes Integrated Gradients
245    // attribution to segmented regions, taking advantage of the model's fully
246    // differentiable structure. Refer to this paper for
247    // more details: https://arxiv.org/abs/1906.02825
248    //
249    // XRAI currently performs better on natural images, like a picture of a
250    // house or an animal. If the images are taken in artificial environments,
251    // like a lab or manufacturing line, or from diagnostic equipment, like
252    // x-rays or quality-control cameras, use Integrated Gradients instead.
253    XraiAttribution xrai_attribution = 3;
254
255    // Example-based explanations that returns the nearest neighbors from the
256    // provided dataset.
257    Examples examples = 7;
258  }
259
260  // If populated, returns attributions for top K indices of outputs
261  // (defaults to 1). Only applies to Models that predicts more than one outputs
262  // (e,g, multi-class Models). When set to -1, returns explanations for all
263  // outputs.
264  int32 top_k = 4;
265
266  // If populated, only returns attributions that have
267  // [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]
268  // contained in output_indices. It must be an ndarray of integers, with the
269  // same shape of the output it's explaining.
270  //
271  // If not populated, returns attributions for
272  // [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k]
273  // indices of outputs. If neither top_k nor output_indices is populated,
274  // returns the argmax index of the outputs.
275  //
276  // Only applicable to Models that predict multiple outputs (e,g, multi-class
277  // Models that predict multiple classes).
278  google.protobuf.ListValue output_indices = 5;
279}
280
281// An attribution method that approximates Shapley values for features that
282// contribute to the label being predicted. A sampling strategy is used to
283// approximate the value rather than considering all subsets of features.
284message SampledShapleyAttribution {
285  // Required. The number of feature permutations to consider when approximating
286  // the Shapley values.
287  //
288  // Valid range of its value is [1, 50], inclusively.
289  int32 path_count = 1 [(google.api.field_behavior) = REQUIRED];
290}
291
292// An attribution method that computes the Aumann-Shapley value taking advantage
293// of the model's fully differentiable structure. Refer to this paper for
294// more details: https://arxiv.org/abs/1703.01365
295message IntegratedGradientsAttribution {
296  // Required. The number of steps for approximating the path integral.
297  // A good value to start is 50 and gradually increase until the
298  // sum to diff property is within the desired error range.
299  //
300  // Valid range of its value is [1, 100], inclusively.
301  int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
302
303  // Config for SmoothGrad approximation of gradients.
304  //
305  // When enabled, the gradients are approximated by averaging the gradients
306  // from noisy samples in the vicinity of the inputs. Adding
307  // noise can help improve the computed gradients. Refer to this paper for more
308  // details: https://arxiv.org/pdf/1706.03825.pdf
309  SmoothGradConfig smooth_grad_config = 2;
310
311  // Config for IG with blur baseline.
312  //
313  // When enabled, a linear path from the maximally blurred image to the input
314  // image is created. Using a blurred baseline instead of zero (black image) is
315  // motivated by the BlurIG approach explained here:
316  // https://arxiv.org/abs/2004.03383
317  BlurBaselineConfig blur_baseline_config = 3;
318}
319
320// An explanation method that redistributes Integrated Gradients
321// attributions to segmented regions, taking advantage of the model's fully
322// differentiable structure. Refer to this paper for more details:
323// https://arxiv.org/abs/1906.02825
324//
325// Supported only by image Models.
326message XraiAttribution {
327  // Required. The number of steps for approximating the path integral.
328  // A good value to start is 50 and gradually increase until the
329  // sum to diff property is met within the desired error range.
330  //
331  // Valid range of its value is [1, 100], inclusively.
332  int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
333
334  // Config for SmoothGrad approximation of gradients.
335  //
336  // When enabled, the gradients are approximated by averaging the gradients
337  // from noisy samples in the vicinity of the inputs. Adding
338  // noise can help improve the computed gradients. Refer to this paper for more
339  // details: https://arxiv.org/pdf/1706.03825.pdf
340  SmoothGradConfig smooth_grad_config = 2;
341
342  // Config for XRAI with blur baseline.
343  //
344  // When enabled, a linear path from the maximally blurred image to the input
345  // image is created. Using a blurred baseline instead of zero (black image) is
346  // motivated by the BlurIG approach explained here:
347  // https://arxiv.org/abs/2004.03383
348  BlurBaselineConfig blur_baseline_config = 3;
349}
350
351// Config for SmoothGrad approximation of gradients.
352//
353// When enabled, the gradients are approximated by averaging the gradients from
354// noisy samples in the vicinity of the inputs. Adding noise can help improve
355// the computed gradients. Refer to this paper for more details:
356// https://arxiv.org/pdf/1706.03825.pdf
357message SmoothGradConfig {
358  // Represents the standard deviation of the gaussian kernel
359  // that will be used to add noise to the interpolated inputs
360  // prior to computing gradients.
361  oneof GradientNoiseSigma {
362    // This is a single float value and will be used to add noise to all the
363    // features. Use this field when all features are normalized to have the
364    // same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where
365    // features are normalized to have 0-mean and 1-variance. Learn more about
366    // [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization).
367    //
368    // For best results the recommended value is about 10% - 20% of the standard
369    // deviation of the input feature. Refer to section 3.2 of the SmoothGrad
370    // paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1.
371    //
372    // If the distribution is different per feature, set
373    // [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma]
374    // instead for each feature.
375    float noise_sigma = 1;
376
377    // This is similar to
378    // [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma],
379    // but provides additional flexibility. A separate noise sigma can be
380    // provided for each feature, which is useful if their distributions are
381    // different. No noise is added to features that are not set. If this field
382    // is unset,
383    // [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma]
384    // will be used for all features.
385    FeatureNoiseSigma feature_noise_sigma = 2;
386  }
387
388  // The number of gradient samples to use for
389  // approximation. The higher this number, the more accurate the gradient
390  // is, but the runtime complexity increases by this factor as well.
391  // Valid range of its value is [1, 50]. Defaults to 3.
392  int32 noisy_sample_count = 3;
393}
394
395// Noise sigma by features. Noise sigma represents the standard deviation of the
396// gaussian kernel that will be used to add noise to interpolated inputs prior
397// to computing gradients.
398message FeatureNoiseSigma {
399  // Noise sigma for a single feature.
400  message NoiseSigmaForFeature {
401    // The name of the input feature for which noise sigma is provided. The
402    // features are defined in
403    // [explanation metadata
404    // inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
405    string name = 1;
406
407    // This represents the standard deviation of the Gaussian kernel that will
408    // be used to add noise to the feature prior to computing gradients. Similar
409    // to
410    // [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma]
411    // but represents the noise added to the current feature. Defaults to 0.1.
412    float sigma = 2;
413  }
414
415  // Noise sigma per feature. No noise is added to features that are not set.
416  repeated NoiseSigmaForFeature noise_sigma = 1;
417}
418
419// Config for blur baseline.
420//
421// When enabled, a linear path from the maximally blurred image to the input
422// image is created. Using a blurred baseline instead of zero (black image) is
423// motivated by the BlurIG approach explained here:
424// https://arxiv.org/abs/2004.03383
425message BlurBaselineConfig {
426  // The standard deviation of the blur kernel for the blurred baseline. The
427  // same blurring parameter is used for both the height and the width
428  // dimension. If not set, the method defaults to the zero (i.e. black for
429  // images) baseline.
430  float max_blur_sigma = 1;
431}
432
433// Example-based explainability that returns the nearest neighbors from the
434// provided dataset.
435message Examples {
436  // The Cloud Storage input instances.
437  message ExampleGcsSource {
438    // The format of the input example instances.
439    enum DataFormat {
440      // Format unspecified, used when unset.
441      DATA_FORMAT_UNSPECIFIED = 0;
442
443      // Examples are stored in JSONL files.
444      JSONL = 1;
445    }
446
447    // The format in which instances are given, if not specified, assume it's
448    // JSONL format. Currently only JSONL format is supported.
449    DataFormat data_format = 1;
450
451    // The Cloud Storage location for the input instances.
452    GcsSource gcs_source = 2;
453  }
454
455  oneof source {
456    // The Cloud Storage input instances.
457    ExampleGcsSource example_gcs_source = 5;
458  }
459
460  oneof config {
461    // The full configuration for the generated index, the semantics are the
462    // same as [metadata][google.cloud.aiplatform.v1beta1.Index.metadata] and
463    // should match
464    // [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config).
465    google.protobuf.Value nearest_neighbor_search_config = 2;
466
467    // Simplified preset configuration, which automatically sets configuration
468    // values based on the desired query speed-precision trade-off and modality.
469    Presets presets = 4;
470  }
471
472  // The Cloud Storage locations that contain the instances to be
473  // indexed for approximate nearest neighbor search.
474  GcsSource gcs_source = 1;
475
476  // The number of neighbors to return when querying for examples.
477  int32 neighbor_count = 3;
478}
479
480// Preset configuration for example-based explanations
481message Presets {
482  // Preset option controlling parameters for query speed-precision trade-off
483  enum Query {
484    // More precise neighbors as a trade-off against slower response.
485    PRECISE = 0;
486
487    // Faster response as a trade-off against less precise neighbors.
488    FAST = 1;
489  }
490
491  // Preset option controlling parameters for different modalities
492  enum Modality {
493    // Should not be set. Added as a recommended best practice for enums
494    MODALITY_UNSPECIFIED = 0;
495
496    // IMAGE modality
497    IMAGE = 1;
498
499    // TEXT modality
500    TEXT = 2;
501
502    // TABULAR modality
503    TABULAR = 3;
504  }
505
506  // Preset option controlling parameters for speed-precision trade-off when
507  // querying for examples. If omitted, defaults to `PRECISE`.
508  optional Query query = 1;
509
510  // The modality of the uploaded model, which automatically configures the
511  // distance measurement and feature normalization for the underlying example
512  // index and queries. If your model does not precisely fit one of these types,
513  // it is okay to choose the closest type.
514  Modality modality = 2;
515}
516
517// The [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec]
518// entries that can be overridden at [online
519// explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
520message ExplanationSpecOverride {
521  // The parameters to be overridden. Note that the
522  // attribution method cannot be changed. If not specified,
523  // no parameter is overridden.
524  ExplanationParameters parameters = 1;
525
526  // The metadata to be overridden. If not specified, no metadata is overridden.
527  ExplanationMetadataOverride metadata = 2;
528
529  // The example-based explanations parameter overrides.
530  ExamplesOverride examples_override = 3;
531}
532
533// The
534// [ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata]
535// entries that can be overridden at [online
536// explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
537message ExplanationMetadataOverride {
538  // The [input
539  // metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata]
540  // entries to be overridden.
541  message InputMetadataOverride {
542    // Baseline inputs for this feature.
543    //
544    // This overrides the `input_baseline` field of the
545    // [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata]
546    // object of the corresponding feature's input metadata. If it's not
547    // specified, the original baselines are not overridden.
548    repeated google.protobuf.Value input_baselines = 1;
549  }
550
551  // Required. Overrides the [input
552  // metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs] of
553  // the features. The key is the name of the feature to be overridden. The keys
554  // specified here must exist in the input metadata to be overridden. If a
555  // feature is not specified here, the corresponding feature's input metadata
556  // is not overridden.
557  map<string, InputMetadataOverride> inputs = 1
558      [(google.api.field_behavior) = REQUIRED];
559}
560
561// Overrides for example-based explanations.
562message ExamplesOverride {
563  // Data format enum.
564  enum DataFormat {
565    // Unspecified format. Must not be used.
566    DATA_FORMAT_UNSPECIFIED = 0;
567
568    // Provided data is a set of model inputs.
569    INSTANCES = 1;
570
571    // Provided data is a set of embeddings.
572    EMBEDDINGS = 2;
573  }
574
575  // The number of neighbors to return.
576  int32 neighbor_count = 1;
577
578  // The number of neighbors to return that have the same crowding tag.
579  int32 crowding_count = 2;
580
581  // Restrict the resulting nearest neighbors to respect these constraints.
582  repeated ExamplesRestrictionsNamespace restrictions = 3;
583
584  // If true, return the embeddings instead of neighbors.
585  bool return_embeddings = 4;
586
587  // The format of the data being provided with each call.
588  DataFormat data_format = 5;
589}
590
591// Restrictions namespace for example-based explanations overrides.
592message ExamplesRestrictionsNamespace {
593  // The namespace name.
594  string namespace_name = 1;
595
596  // The list of allowed tags.
597  repeated string allow = 2;
598
599  // The list of deny tags.
600  repeated string deny = 3;
601}
602