1// Copyright 2023 Google LLC 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15syntax = "proto3"; 16 17package google.cloud.aiplatform.v1; 18 19import "google/api/field_behavior.proto"; 20import "google/api/resource.proto"; 21import "google/cloud/aiplatform/v1/encryption_spec.proto"; 22import "google/cloud/aiplatform/v1/io.proto"; 23import "google/cloud/aiplatform/v1/saved_query.proto"; 24import "google/protobuf/struct.proto"; 25import "google/protobuf/timestamp.proto"; 26 27option csharp_namespace = "Google.Cloud.AIPlatform.V1"; 28option go_package = "cloud.google.com/go/aiplatform/apiv1/aiplatformpb;aiplatformpb"; 29option java_multiple_files = true; 30option java_outer_classname = "DatasetProto"; 31option java_package = "com.google.cloud.aiplatform.v1"; 32option php_namespace = "Google\\Cloud\\AIPlatform\\V1"; 33option ruby_package = "Google::Cloud::AIPlatform::V1"; 34 35// A collection of DataItems and Annotations on them. 36message Dataset { 37 option (google.api.resource) = { 38 type: "aiplatform.googleapis.com/Dataset" 39 pattern: "projects/{project}/locations/{location}/datasets/{dataset}" 40 }; 41 42 // Output only. The resource name of the Dataset. 43 string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; 44 45 // Required. The user-defined name of the Dataset. 46 // The name can be up to 128 characters long and can consist of any UTF-8 47 // characters. 48 string display_name = 2 [(google.api.field_behavior) = REQUIRED]; 49 50 // The description of the Dataset. 51 string description = 16; 52 53 // Required. Points to a YAML file stored on Google Cloud Storage describing 54 // additional information about the Dataset. The schema is defined as an 55 // OpenAPI 3.0.2 Schema Object. The schema files that can be used here are 56 // found in gs://google-cloud-aiplatform/schema/dataset/metadata/. 57 string metadata_schema_uri = 3 [(google.api.field_behavior) = REQUIRED]; 58 59 // Required. Additional information about the Dataset. 60 google.protobuf.Value metadata = 8 [(google.api.field_behavior) = REQUIRED]; 61 62 // Output only. Timestamp when this Dataset was created. 63 google.protobuf.Timestamp create_time = 4 64 [(google.api.field_behavior) = OUTPUT_ONLY]; 65 66 // Output only. Timestamp when this Dataset was last updated. 67 google.protobuf.Timestamp update_time = 5 68 [(google.api.field_behavior) = OUTPUT_ONLY]; 69 70 // Used to perform consistent read-modify-write updates. If not set, a blind 71 // "overwrite" update happens. 72 string etag = 6; 73 74 // The labels with user-defined metadata to organize your Datasets. 75 // 76 // Label keys and values can be no longer than 64 characters 77 // (Unicode codepoints), can only contain lowercase letters, numeric 78 // characters, underscores and dashes. International characters are allowed. 79 // No more than 64 user labels can be associated with one Dataset (System 80 // labels are excluded). 81 // 82 // See https://goo.gl/xmQnxf for more information and examples of labels. 83 // System reserved label keys are prefixed with "aiplatform.googleapis.com/" 84 // and are immutable. Following system labels exist for each Dataset: 85 // 86 // * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its 87 // value is the 88 // [metadata_schema's][google.cloud.aiplatform.v1.Dataset.metadata_schema_uri] 89 // title. 90 map<string, string> labels = 7; 91 92 // All SavedQueries belong to the Dataset will be returned in List/Get 93 // Dataset response. The annotation_specs field 94 // will not be populated except for UI cases which will only use 95 // [annotation_spec_count][google.cloud.aiplatform.v1.SavedQuery.annotation_spec_count]. 96 // In CreateDataset request, a SavedQuery is created together if 97 // this field is set, up to one SavedQuery can be set in CreateDatasetRequest. 98 // The SavedQuery should not contain any AnnotationSpec. 99 repeated SavedQuery saved_queries = 9; 100 101 // Customer-managed encryption key spec for a Dataset. If set, this Dataset 102 // and all sub-resources of this Dataset will be secured by this key. 103 EncryptionSpec encryption_spec = 11; 104 105 // Output only. The resource name of the Artifact that was created in 106 // MetadataStore when creating the Dataset. The Artifact resource name pattern 107 // is 108 // `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. 109 string metadata_artifact = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; 110} 111 112// Describes the location from where we import data into a Dataset, together 113// with the labels that will be applied to the DataItems and the Annotations. 114message ImportDataConfig { 115 // The source of the input. 116 oneof source { 117 // The Google Cloud Storage location for the input content. 118 GcsSource gcs_source = 1; 119 } 120 121 // Labels that will be applied to newly imported DataItems. If an identical 122 // DataItem as one being imported already exists in the Dataset, then these 123 // labels will be appended to these of the already existing one, and if labels 124 // with identical key is imported before, the old label value will be 125 // overwritten. If two DataItems are identical in the same import data 126 // operation, the labels will be combined and if key collision happens in this 127 // case, one of the values will be picked randomly. Two DataItems are 128 // considered identical if their content bytes are identical (e.g. image bytes 129 // or pdf bytes). 130 // These labels will be overridden by Annotation labels specified inside index 131 // file referenced by 132 // [import_schema_uri][google.cloud.aiplatform.v1.ImportDataConfig.import_schema_uri], 133 // e.g. jsonl file. 134 map<string, string> data_item_labels = 2; 135 136 // Labels that will be applied to newly imported Annotations. If two 137 // Annotations are identical, one of them will be deduped. Two Annotations are 138 // considered identical if their 139 // [payload][google.cloud.aiplatform.v1.Annotation.payload], 140 // [payload_schema_uri][google.cloud.aiplatform.v1.Annotation.payload_schema_uri] 141 // and all of their [labels][google.cloud.aiplatform.v1.Annotation.labels] are 142 // the same. These labels will be overridden by Annotation labels specified 143 // inside index file referenced by 144 // [import_schema_uri][google.cloud.aiplatform.v1.ImportDataConfig.import_schema_uri], 145 // e.g. jsonl file. 146 map<string, string> annotation_labels = 3; 147 148 // Required. Points to a YAML file stored on Google Cloud Storage describing 149 // the import format. Validation will be done against the schema. The schema 150 // is defined as an [OpenAPI 3.0.2 Schema 151 // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). 152 string import_schema_uri = 4 [(google.api.field_behavior) = REQUIRED]; 153} 154 155// Describes what part of the Dataset is to be exported, the destination of 156// the export and how to export. 157message ExportDataConfig { 158 // The destination of the output. 159 oneof destination { 160 // The Google Cloud Storage location where the output is to be written to. 161 // In the given directory a new directory will be created with name: 162 // `export-data-<dataset-display-name>-<timestamp-of-export-call>` where 163 // timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export 164 // output will be written into that directory. Inside that directory, 165 // annotations with the same schema will be grouped into sub directories 166 // which are named with the corresponding annotations' schema title. Inside 167 // these sub directories, a schema.yaml will be created to describe the 168 // output format. 169 GcsDestination gcs_destination = 1; 170 } 171 172 // The instructions how the export data should be split between the 173 // training, validation and test sets. 174 oneof split { 175 // Split based on fractions defining the size of each set. 176 ExportFractionSplit fraction_split = 5; 177 } 178 179 // An expression for filtering what part of the Dataset is to be exported. 180 // Only Annotations that match this filter will be exported. The filter syntax 181 // is the same as in 182 // [ListAnnotations][google.cloud.aiplatform.v1.DatasetService.ListAnnotations]. 183 string annotations_filter = 2; 184} 185 186// Assigns the input data to training, validation, and test sets as per the 187// given fractions. Any of `training_fraction`, `validation_fraction` and 188// `test_fraction` may optionally be provided, they must sum to up to 1. If the 189// provided ones sum to less than 1, the remainder is assigned to sets as 190// decided by Vertex AI. If none of the fractions are set, by default roughly 191// 80% of data is used for training, 10% for validation, and 10% for test. 192message ExportFractionSplit { 193 // The fraction of the input data that is to be used to train the Model. 194 double training_fraction = 1; 195 196 // The fraction of the input data that is to be used to validate the Model. 197 double validation_fraction = 2; 198 199 // The fraction of the input data that is to be used to evaluate the Model. 200 double test_fraction = 3; 201} 202