1// Copyright 2021 Google LLC 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15syntax = "proto3"; 16 17package google.cloud.automl.v1; 18 19import "google/api/resource.proto"; 20import "google/cloud/automl/v1/classification.proto"; 21import "google/cloud/automl/v1/detection.proto"; 22import "google/cloud/automl/v1/text_extraction.proto"; 23import "google/cloud/automl/v1/text_sentiment.proto"; 24import "google/cloud/automl/v1/translation.proto"; 25import "google/protobuf/timestamp.proto"; 26 27option csharp_namespace = "Google.Cloud.AutoML.V1"; 28option go_package = "cloud.google.com/go/automl/apiv1/automlpb;automlpb"; 29option java_multiple_files = true; 30option java_package = "com.google.cloud.automl.v1"; 31option php_namespace = "Google\\Cloud\\AutoMl\\V1"; 32option ruby_package = "Google::Cloud::AutoML::V1"; 33 34// Evaluation results of a model. 35message ModelEvaluation { 36 option (google.api.resource) = { 37 type: "automl.googleapis.com/ModelEvaluation" 38 pattern: "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}" 39 }; 40 41 // Output only. Problem type specific evaluation metrics. 42 oneof metrics { 43 // Model evaluation metrics for image, text, video and tables 44 // classification. 45 // Tables problem is considered a classification when the target column 46 // is CATEGORY DataType. 47 ClassificationEvaluationMetrics classification_evaluation_metrics = 8; 48 49 // Model evaluation metrics for translation. 50 TranslationEvaluationMetrics translation_evaluation_metrics = 9; 51 52 // Model evaluation metrics for image object detection. 53 ImageObjectDetectionEvaluationMetrics image_object_detection_evaluation_metrics = 12; 54 55 // Evaluation metrics for text sentiment models. 56 TextSentimentEvaluationMetrics text_sentiment_evaluation_metrics = 11; 57 58 // Evaluation metrics for text extraction models. 59 TextExtractionEvaluationMetrics text_extraction_evaluation_metrics = 13; 60 } 61 62 // Output only. Resource name of the model evaluation. 63 // Format: 64 // `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}` 65 string name = 1; 66 67 // Output only. The ID of the annotation spec that the model evaluation applies to. The 68 // The ID is empty for the overall model evaluation. 69 // For Tables annotation specs in the dataset do not exist and this ID is 70 // always not set, but for CLASSIFICATION 71 // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] 72 // the 73 // [display_name][google.cloud.automl.v1.ModelEvaluation.display_name] 74 // field is used. 75 string annotation_spec_id = 2; 76 77 // Output only. The value of 78 // [display_name][google.cloud.automl.v1.AnnotationSpec.display_name] 79 // at the moment when the model was trained. Because this field returns a 80 // value at model training time, for different models trained from the same 81 // dataset, the values may differ, since display names could had been changed 82 // between the two model's trainings. For Tables CLASSIFICATION 83 // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] 84 // distinct values of the target column at the moment of the model evaluation 85 // are populated here. 86 // The display_name is empty for the overall model evaluation. 87 string display_name = 15; 88 89 // Output only. Timestamp when this model evaluation was created. 90 google.protobuf.Timestamp create_time = 5; 91 92 // Output only. The number of examples used for model evaluation, i.e. for 93 // which ground truth from time of model creation is compared against the 94 // predicted annotations created by the model. 95 // For overall ModelEvaluation (i.e. with annotation_spec_id not set) this is 96 // the total number of all examples used for evaluation. 97 // Otherwise, this is the count of examples that according to the ground 98 // truth were annotated by the 99 // [annotation_spec_id][google.cloud.automl.v1.ModelEvaluation.annotation_spec_id]. 100 int32 evaluated_example_count = 6; 101} 102