1// Copyright 2023 Google LLC 2// 3// Licensed under the Apache License, Version 2.0 (the "License"); 4// you may not use this file except in compliance with the License. 5// You may obtain a copy of the License at 6// 7// http://www.apache.org/licenses/LICENSE-2.0 8// 9// Unless required by applicable law or agreed to in writing, software 10// distributed under the License is distributed on an "AS IS" BASIS, 11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12// See the License for the specific language governing permissions and 13// limitations under the License. 14 15syntax = "proto3"; 16 17package google.ai.generativelanguage.v1; 18 19import "google/api/field_behavior.proto"; 20import "google/api/resource.proto"; 21 22option go_package = "cloud.google.com/go/ai/generativelanguage/apiv1/generativelanguagepb;generativelanguagepb"; 23option java_multiple_files = true; 24option java_outer_classname = "ModelProto"; 25option java_package = "com.google.ai.generativelanguage.v1"; 26 27// Information about a Generative Language Model. 28message Model { 29 option (google.api.resource) = { 30 type: "generativelanguage.googleapis.com/Model" 31 pattern: "models/{model}" 32 }; 33 34 // Required. The resource name of the `Model`. 35 // 36 // Format: `models/{model}` with a `{model}` naming convention of: 37 // 38 // * "{base_model_id}-{version}" 39 // 40 // Examples: 41 // 42 // * `models/chat-bison-001` 43 string name = 1 [(google.api.field_behavior) = REQUIRED]; 44 45 // Required. The name of the base model, pass this to the generation request. 46 // 47 // Examples: 48 // 49 // * `chat-bison` 50 string base_model_id = 2 [(google.api.field_behavior) = REQUIRED]; 51 52 // Required. The version number of the model. 53 // 54 // This represents the major version 55 string version = 3 [(google.api.field_behavior) = REQUIRED]; 56 57 // The human-readable name of the model. E.g. "Chat Bison". 58 // 59 // The name can be up to 128 characters long and can consist of any UTF-8 60 // characters. 61 string display_name = 4; 62 63 // A short description of the model. 64 string description = 5; 65 66 // Maximum number of input tokens allowed for this model. 67 int32 input_token_limit = 6; 68 69 // Maximum number of output tokens available for this model. 70 int32 output_token_limit = 7; 71 72 // The model's supported generation methods. 73 // 74 // The method names are defined as Pascal case 75 // strings, such as `generateMessage` which correspond to API methods. 76 repeated string supported_generation_methods = 8; 77 78 // Controls the randomness of the output. 79 // 80 // Values can range over `[0.0,1.0]`, inclusive. A value closer to `1.0` will 81 // produce responses that are more varied, while a value closer to `0.0` will 82 // typically result in less surprising responses from the model. 83 // This value specifies default to be used by the backend while making the 84 // call to the model. 85 optional float temperature = 9; 86 87 // For Nucleus sampling. 88 // 89 // Nucleus sampling considers the smallest set of tokens whose probability 90 // sum is at least `top_p`. 91 // This value specifies default to be used by the backend while making the 92 // call to the model. 93 optional float top_p = 10; 94 95 // For Top-k sampling. 96 // 97 // Top-k sampling considers the set of `top_k` most probable tokens. 98 // This value specifies default to be used by the backend while making the 99 // call to the model. 100 // If empty, indicates the model doesn't use top-k sampling, and `top_k` isn't 101 // allowed as a generation parameter. 102 optional int32 top_k = 11; 103} 104