123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622 |
- // Copyright 2022 Google LLC
- //
- // Licensed under the Apache License, Version 2.0 (the "License");
- // you may not use this file except in compliance with the License.
- // You may obtain a copy of the License at
- //
- // http://www.apache.org/licenses/LICENSE-2.0
- //
- // Unless required by applicable law or agreed to in writing, software
- // distributed under the License is distributed on an "AS IS" BASIS,
- // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- // See the License for the specific language governing permissions and
- // limitations under the License.
- syntax = "proto3";
- package google.cloud.aiplatform.v1;
- import "google/api/field_behavior.proto";
- import "google/api/resource.proto";
- import "google/cloud/aiplatform/v1/deployed_model_ref.proto";
- import "google/cloud/aiplatform/v1/encryption_spec.proto";
- import "google/cloud/aiplatform/v1/env_var.proto";
- import "google/cloud/aiplatform/v1/explanation.proto";
- import "google/protobuf/struct.proto";
- import "google/protobuf/timestamp.proto";
- option csharp_namespace = "Google.Cloud.AIPlatform.V1";
- option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1;aiplatform";
- option java_multiple_files = true;
- option java_outer_classname = "ModelProto";
- option java_package = "com.google.cloud.aiplatform.v1";
- option php_namespace = "Google\\Cloud\\AIPlatform\\V1";
- option ruby_package = "Google::Cloud::AIPlatform::V1";
- // A trained machine learning Model.
- message Model {
- option (google.api.resource) = {
- type: "aiplatform.googleapis.com/Model"
- pattern: "projects/{project}/locations/{location}/models/{model}"
- };
- // Represents export format supported by the Model.
- // All formats export to Google Cloud Storage.
- message ExportFormat {
- // The Model content that can be exported.
- enum ExportableContent {
- // Should not be used.
- EXPORTABLE_CONTENT_UNSPECIFIED = 0;
- // Model artifact and any of its supported files. Will be exported to the
- // location specified by the `artifactDestination` field of the
- // [ExportModelRequest.output_config][google.cloud.aiplatform.v1.ExportModelRequest.output_config] object.
- ARTIFACT = 1;
- // The container image that is to be used when deploying this Model. Will
- // be exported to the location specified by the `imageDestination` field
- // of the [ExportModelRequest.output_config][google.cloud.aiplatform.v1.ExportModelRequest.output_config] object.
- IMAGE = 2;
- }
- // Output only. The ID of the export format.
- // The possible format IDs are:
- //
- // * `tflite`
- // Used for Android mobile devices.
- //
- // * `edgetpu-tflite`
- // Used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices.
- //
- // * `tf-saved-model`
- // A tensorflow model in SavedModel format.
- //
- // * `tf-js`
- // A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used
- // in the browser and in Node.js using JavaScript.
- //
- // * `core-ml`
- // Used for iOS mobile devices.
- //
- // * `custom-trained`
- // A Model that was uploaded or trained by custom code.
- string id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Output only. The content of this Model that may be exported.
- repeated ExportableContent exportable_contents = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
- }
- // Identifies a type of Model's prediction resources.
- enum DeploymentResourcesType {
- // Should not be used.
- DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0;
- // Resources that are dedicated to the [DeployedModel][google.cloud.aiplatform.v1.DeployedModel], and that need a
- // higher degree of manual configuration.
- DEDICATED_RESOURCES = 1;
- // Resources that to large degree are decided by Vertex AI, and require
- // only a modest additional configuration.
- AUTOMATIC_RESOURCES = 2;
- // Resources that can be shared by multiple [DeployedModels][google.cloud.aiplatform.v1.DeployedModel].
- // A pre-configured [DeploymentResourcePool][] is required.
- SHARED_RESOURCES = 3;
- }
- // The resource name of the Model.
- string name = 1;
- // Output only. Immutable. The version ID of the model.
- // A new version is committed when a new model version is uploaded or
- // trained under an existing model id. It is an auto-incrementing decimal
- // number in string representation.
- string version_id = 28 [
- (google.api.field_behavior) = IMMUTABLE,
- (google.api.field_behavior) = OUTPUT_ONLY
- ];
- // User provided version aliases so that a model version can be referenced via
- // alias (i.e.
- // `projects/{project}/locations/{location}/models/{model_id}@{version_alias}`
- // instead of auto-generated version id (i.e.
- // `projects/{project}/locations/{location}/models/{model_id}@{version_id})`.
- // The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] to distinguish from
- // version_id. A default version alias will be created for the first version
- // of the model, and there must be exactly one default version alias for a
- // model.
- repeated string version_aliases = 29;
- // Output only. Timestamp when this version was created.
- google.protobuf.Timestamp version_create_time = 31 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Output only. Timestamp when this version was most recently updated.
- google.protobuf.Timestamp version_update_time = 32 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Required. The display name of the Model.
- // The name can be up to 128 characters long and can be consist of any UTF-8
- // characters.
- string display_name = 2 [(google.api.field_behavior) = REQUIRED];
- // The description of the Model.
- string description = 3;
- // The description of this version.
- string version_description = 30;
- // The schemata that describe formats of the Model's predictions and
- // explanations as given and returned via
- // [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] and [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain].
- PredictSchemata predict_schemata = 4;
- // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional
- // information about the Model, that is specific to it. Unset if the Model
- // does not have any additional information.
- // The schema is defined as an OpenAPI 3.0.2 [Schema
- // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
- // AutoML Models always have this field populated by Vertex AI, if no
- // additional metadata is needed, this field is set to an empty string.
- // Note: The URI given on output will be immutable and probably different,
- // including the URI scheme, than the one given on input. The output URI will
- // point to a location where the user only has a read access.
- string metadata_schema_uri = 5 [(google.api.field_behavior) = IMMUTABLE];
- // Immutable. An additional information about the Model; the schema of the metadata can
- // be found in [metadata_schema][google.cloud.aiplatform.v1.Model.metadata_schema_uri].
- // Unset if the Model does not have any additional information.
- google.protobuf.Value metadata = 6 [(google.api.field_behavior) = IMMUTABLE];
- // Output only. The formats in which this Model may be exported. If empty, this Model is
- // not available for export.
- repeated ExportFormat supported_export_formats = 20 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Output only. The resource name of the TrainingPipeline that uploaded this Model, if
- // any.
- string training_pipeline = 7 [
- (google.api.field_behavior) = OUTPUT_ONLY,
- (google.api.resource_reference) = {
- type: "aiplatform.googleapis.com/TrainingPipeline"
- }
- ];
- // Input only. The specification of the container that is to be used when deploying
- // this Model. The specification is ingested upon
- // [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], and all binaries it contains are copied
- // and stored internally by Vertex AI.
- // Not present for AutoML Models.
- ModelContainerSpec container_spec = 9 [(google.api.field_behavior) = INPUT_ONLY];
- // Immutable. The path to the directory containing the Model artifact and any of its
- // supporting files.
- // Not present for AutoML Models.
- string artifact_uri = 26 [(google.api.field_behavior) = IMMUTABLE];
- // Output only. When this Model is deployed, its prediction resources are described by the
- // `prediction_resources` field of the [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object.
- // Because not all Models support all resource configuration types, the
- // configuration types this Model supports are listed here. If no
- // configuration types are listed, the Model cannot be deployed to an
- // [Endpoint][google.cloud.aiplatform.v1.Endpoint] and does not support
- // online predictions ([PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] or
- // [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain]). Such a Model can serve predictions by
- // using a [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob], if it has at least one entry each in
- // [supported_input_storage_formats][google.cloud.aiplatform.v1.Model.supported_input_storage_formats] and
- // [supported_output_storage_formats][google.cloud.aiplatform.v1.Model.supported_output_storage_formats].
- repeated DeploymentResourcesType supported_deployment_resources_types = 10 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Output only. The formats this Model supports in
- // [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config]. If
- // [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] exists, the instances
- // should be given as per that schema.
- //
- // The possible formats are:
- //
- // * `jsonl`
- // The JSON Lines format, where each instance is a single line. Uses
- // [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source].
- //
- // * `csv`
- // The CSV format, where each instance is a single comma-separated line.
- // The first line in the file is the header, containing comma-separated field
- // names. Uses [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source].
- //
- // * `tf-record`
- // The TFRecord format, where each instance is a single record in tfrecord
- // syntax. Uses [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source].
- //
- // * `tf-record-gzip`
- // Similar to `tf-record`, but the file is gzipped. Uses
- // [GcsSource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.gcs_source].
- //
- // * `bigquery`
- // Each instance is a single row in BigQuery. Uses
- // [BigQuerySource][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig.bigquery_source].
- //
- // * `file-list`
- // Each line of the file is the location of an instance to process, uses
- // `gcs_source` field of the
- // [InputConfig][google.cloud.aiplatform.v1.BatchPredictionJob.InputConfig] object.
- //
- //
- // If this Model doesn't support any of these formats it means it cannot be
- // used with a [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. However, if it has
- // [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], it could serve online
- // predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] or
- // [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain].
- repeated string supported_input_storage_formats = 11 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Output only. The formats this Model supports in
- // [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config]. If both
- // [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri] and
- // [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.prediction_schema_uri] exist, the predictions
- // are returned together with their instances. In other words, the
- // prediction has the original instance data first, followed
- // by the actual prediction content (as per the schema).
- //
- // The possible formats are:
- //
- // * `jsonl`
- // The JSON Lines format, where each prediction is a single line. Uses
- // [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination].
- //
- // * `csv`
- // The CSV format, where each prediction is a single comma-separated line.
- // The first line in the file is the header, containing comma-separated field
- // names. Uses
- // [GcsDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.gcs_destination].
- //
- // * `bigquery`
- // Each prediction is a single row in a BigQuery table, uses
- // [BigQueryDestination][google.cloud.aiplatform.v1.BatchPredictionJob.OutputConfig.bigquery_destination]
- // .
- //
- //
- // If this Model doesn't support any of these formats it means it cannot be
- // used with a [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob]. However, if it has
- // [supported_deployment_resources_types][google.cloud.aiplatform.v1.Model.supported_deployment_resources_types], it could serve online
- // predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict] or
- // [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain].
- repeated string supported_output_storage_formats = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Output only. Timestamp when this Model was uploaded into Vertex AI.
- google.protobuf.Timestamp create_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Output only. Timestamp when this Model was most recently updated.
- google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Output only. The pointers to DeployedModels created from this Model. Note that
- // Model could have been deployed to Endpoints in different Locations.
- repeated DeployedModelRef deployed_models = 15 [(google.api.field_behavior) = OUTPUT_ONLY];
- // The default explanation specification for this Model.
- //
- // The Model can be used for [requesting
- // explanation][PredictionService.Explain] after being
- // [deployed][google.cloud.aiplatform.v1.EndpointService.DeployModel] if it is populated.
- // The Model can be used for [batch
- // explanation][BatchPredictionJob.generate_explanation] if it is populated.
- //
- // All fields of the explanation_spec can be overridden by
- // [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] of
- // [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1.DeployModelRequest.deployed_model], or
- // [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] of
- // [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob].
- //
- // If the default explanation specification is not set for this Model, this
- // Model can still be used for [requesting
- // explanation][PredictionService.Explain] by setting
- // [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] of
- // [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1.DeployModelRequest.deployed_model] and for [batch
- // explanation][BatchPredictionJob.generate_explanation] by setting
- // [explanation_spec][google.cloud.aiplatform.v1.BatchPredictionJob.explanation_spec] of
- // [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob].
- ExplanationSpec explanation_spec = 23;
- // Used to perform consistent read-modify-write updates. If not set, a blind
- // "overwrite" update happens.
- string etag = 16;
- // The labels with user-defined metadata to organize your Models.
- //
- // Label keys and values can be no longer than 64 characters
- // (Unicode codepoints), can only contain lowercase letters, numeric
- // characters, underscores and dashes. International characters are allowed.
- //
- // See https://goo.gl/xmQnxf for more information and examples of labels.
- map<string, string> labels = 17;
- // Customer-managed encryption key spec for a Model. If set, this
- // Model and all sub-resources of this Model will be secured by this key.
- EncryptionSpec encryption_spec = 24;
- // Output only. Source of a model. It can either be automl training pipeline, custom
- // training pipeline, BigQuery ML, or existing Vertex AI Model.
- ModelSourceInfo model_source_info = 38 [(google.api.field_behavior) = OUTPUT_ONLY];
- // Output only. The resource name of the Artifact that was created in MetadataStore when
- // creating the Model. The Artifact resource name pattern is
- // `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`.
- string metadata_artifact = 44 [(google.api.field_behavior) = OUTPUT_ONLY];
- }
- // Contains the schemata used in Model's predictions and explanations via
- // [PredictionService.Predict][google.cloud.aiplatform.v1.PredictionService.Predict], [PredictionService.Explain][google.cloud.aiplatform.v1.PredictionService.Explain] and
- // [BatchPredictionJob][google.cloud.aiplatform.v1.BatchPredictionJob].
- message PredictSchemata {
- // Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
- // of a single instance, which are used in [PredictRequest.instances][google.cloud.aiplatform.v1.PredictRequest.instances],
- // [ExplainRequest.instances][google.cloud.aiplatform.v1.ExplainRequest.instances] and
- // [BatchPredictionJob.input_config][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
- // The schema is defined as an OpenAPI 3.0.2 [Schema
- // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
- // AutoML Models always have this field populated by Vertex AI.
- // Note: The URI given on output will be immutable and probably different,
- // including the URI scheme, than the one given on input. The output URI will
- // point to a location where the user only has a read access.
- string instance_schema_uri = 1 [(google.api.field_behavior) = IMMUTABLE];
- // Immutable. Points to a YAML file stored on Google Cloud Storage describing the
- // parameters of prediction and explanation via
- // [PredictRequest.parameters][google.cloud.aiplatform.v1.PredictRequest.parameters], [ExplainRequest.parameters][google.cloud.aiplatform.v1.ExplainRequest.parameters] and
- // [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1.BatchPredictionJob.model_parameters].
- // The schema is defined as an OpenAPI 3.0.2 [Schema
- // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
- // AutoML Models always have this field populated by Vertex AI, if no
- // parameters are supported, then it is set to an empty string.
- // Note: The URI given on output will be immutable and probably different,
- // including the URI scheme, than the one given on input. The output URI will
- // point to a location where the user only has a read access.
- string parameters_schema_uri = 2 [(google.api.field_behavior) = IMMUTABLE];
- // Immutable. Points to a YAML file stored on Google Cloud Storage describing the format
- // of a single prediction produced by this Model, which are returned via
- // [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions], [ExplainResponse.explanations][google.cloud.aiplatform.v1.ExplainResponse.explanations], and
- // [BatchPredictionJob.output_config][google.cloud.aiplatform.v1.BatchPredictionJob.output_config].
- // The schema is defined as an OpenAPI 3.0.2 [Schema
- // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
- // AutoML Models always have this field populated by Vertex AI.
- // Note: The URI given on output will be immutable and probably different,
- // including the URI scheme, than the one given on input. The output URI will
- // point to a location where the user only has a read access.
- string prediction_schema_uri = 3 [(google.api.field_behavior) = IMMUTABLE];
- }
- // Specification of a container for serving predictions. Some fields in this
- // message correspond to fields in the [Kubernetes Container v1 core
- // specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
- message ModelContainerSpec {
- // Required. Immutable. URI of the Docker image to be used as the custom container for serving
- // predictions. This URI must identify an image in Artifact Registry or
- // Container Registry. Learn more about the [container publishing
- // requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing),
- // including permissions requirements for the Vertex AI Service Agent.
- //
- // The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1.ModelService.UploadModel], stored
- // internally, and this original path is afterwards not used.
- //
- // To learn about the requirements for the Docker image itself, see
- // [Custom container
- // requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#).
- //
- // You can use the URI to one of Vertex AI's [pre-built container images for
- // prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers)
- // in this field.
- string image_uri = 1 [
- (google.api.field_behavior) = REQUIRED,
- (google.api.field_behavior) = IMMUTABLE
- ];
- // Immutable. Specifies the command that runs when the container starts. This overrides
- // the container's
- // [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint).
- // Specify this field as an array of executable and arguments, similar to a
- // Docker `ENTRYPOINT`'s "exec" form, not its "shell" form.
- //
- // If you do not specify this field, then the container's `ENTRYPOINT` runs,
- // in conjunction with the [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] field or the
- // container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd),
- // if either exists. If this field is not specified and the container does not
- // have an `ENTRYPOINT`, then refer to the Docker documentation about [how
- // `CMD` and `ENTRYPOINT`
- // interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
- //
- // If you specify this field, then you can also specify the `args` field to
- // provide additional arguments for this command. However, if you specify this
- // field, then the container's `CMD` is ignored. See the
- // [Kubernetes documentation about how the
- // `command` and `args` fields interact with a container's `ENTRYPOINT` and
- // `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
- //
- // In this field, you can reference [environment variables set by Vertex
- // AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
- // and environment variables set in the [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] field.
- // You cannot reference environment variables set in the Docker image. In
- // order for environment variables to be expanded, reference them by using the
- // following syntax:
- // <code>$(<var>VARIABLE_NAME</var>)</code>
- // Note that this differs from Bash variable expansion, which does not use
- // parentheses. If a variable cannot be resolved, the reference in the input
- // string is used unchanged. To avoid variable expansion, you can escape this
- // syntax with `$$`; for example:
- // <code>$$(<var>VARIABLE_NAME</var>)</code>
- // This field corresponds to the `command` field of the Kubernetes Containers
- // [v1 core
- // API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
- repeated string command = 2 [(google.api.field_behavior) = IMMUTABLE];
- // Immutable. Specifies arguments for the command that runs when the container starts.
- // This overrides the container's
- // [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify
- // this field as an array of executable and arguments, similar to a Docker
- // `CMD`'s "default parameters" form.
- //
- // If you don't specify this field but do specify the
- // [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] field, then the command from the
- // `command` field runs without any additional arguments. See the
- // [Kubernetes documentation about how the
- // `command` and `args` fields interact with a container's `ENTRYPOINT` and
- // `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes).
- //
- // If you don't specify this field and don't specify the `command` field,
- // then the container's
- // [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and
- // `CMD` determine what runs based on their default behavior. See the Docker
- // documentation about [how `CMD` and `ENTRYPOINT`
- // interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact).
- //
- // In this field, you can reference [environment variables
- // set by Vertex
- // AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables)
- // and environment variables set in the [env][google.cloud.aiplatform.v1.ModelContainerSpec.env] field.
- // You cannot reference environment variables set in the Docker image. In
- // order for environment variables to be expanded, reference them by using the
- // following syntax:
- // <code>$(<var>VARIABLE_NAME</var>)</code>
- // Note that this differs from Bash variable expansion, which does not use
- // parentheses. If a variable cannot be resolved, the reference in the input
- // string is used unchanged. To avoid variable expansion, you can escape this
- // syntax with `$$`; for example:
- // <code>$$(<var>VARIABLE_NAME</var>)</code>
- // This field corresponds to the `args` field of the Kubernetes Containers
- // [v1 core
- // API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
- repeated string args = 3 [(google.api.field_behavior) = IMMUTABLE];
- // Immutable. List of environment variables to set in the container. After the container
- // starts running, code running in the container can read these environment
- // variables.
- //
- // Additionally, the [command][google.cloud.aiplatform.v1.ModelContainerSpec.command] and
- // [args][google.cloud.aiplatform.v1.ModelContainerSpec.args] fields can reference these variables. Later
- // entries in this list can also reference earlier entries. For example, the
- // following example sets the variable `VAR_2` to have the value `foo bar`:
- //
- // ```json
- // [
- // {
- // "name": "VAR_1",
- // "value": "foo"
- // },
- // {
- // "name": "VAR_2",
- // "value": "$(VAR_1) bar"
- // }
- // ]
- // ```
- //
- // If you switch the order of the variables in the example, then the expansion
- // does not occur.
- //
- // This field corresponds to the `env` field of the Kubernetes Containers
- // [v1 core
- // API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
- repeated EnvVar env = 4 [(google.api.field_behavior) = IMMUTABLE];
- // Immutable. List of ports to expose from the container. Vertex AI sends any
- // prediction requests that it receives to the first port on this list. Vertex
- // AI also sends
- // [liveness and health
- // checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness)
- // to this port.
- //
- // If you do not specify this field, it defaults to following value:
- //
- // ```json
- // [
- // {
- // "containerPort": 8080
- // }
- // ]
- // ```
- //
- // Vertex AI does not use ports other than the first one listed. This field
- // corresponds to the `ports` field of the Kubernetes Containers
- // [v1 core
- // API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core).
- repeated Port ports = 5 [(google.api.field_behavior) = IMMUTABLE];
- // Immutable. HTTP path on the container to send prediction requests to. Vertex AI
- // forwards requests sent using
- // [projects.locations.endpoints.predict][google.cloud.aiplatform.v1.PredictionService.Predict] to this
- // path on the container's IP address and port. Vertex AI then returns the
- // container's response in the API response.
- //
- // For example, if you set this field to `/foo`, then when Vertex AI
- // receives a prediction request, it forwards the request body in a POST
- // request to the `/foo` path on the port of your container specified by the
- // first value of this `ModelContainerSpec`'s
- // [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] field.
- //
- // If you don't specify this field, it defaults to the following value when
- // you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]:
- // <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
- // The placeholders in this value are replaced as follows:
- //
- // * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
- // Endpoint.name][] field of the Endpoint where this Model has been
- // deployed. (Vertex AI makes this value available to your container code
- // as the [`AIP_ENDPOINT_ID` environment
- // variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
- //
- // * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] of the `DeployedModel`.
- // (Vertex AI makes this value available to your container code
- // as the [`AIP_DEPLOYED_MODEL_ID` environment
- // variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
- string predict_route = 6 [(google.api.field_behavior) = IMMUTABLE];
- // Immutable. HTTP path on the container to send health checks to. Vertex AI
- // intermittently sends GET requests to this path on the container's IP
- // address and port to check that the container is healthy. Read more about
- // [health
- // checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health).
- //
- // For example, if you set this field to `/bar`, then Vertex AI
- // intermittently sends a GET request to the `/bar` path on the port of your
- // container specified by the first value of this `ModelContainerSpec`'s
- // [ports][google.cloud.aiplatform.v1.ModelContainerSpec.ports] field.
- //
- // If you don't specify this field, it defaults to the following value when
- // you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1.EndpointService.DeployModel]:
- // <code>/v1/endpoints/<var>ENDPOINT</var>/deployedModels/<var>DEPLOYED_MODEL</var>:predict</code>
- // The placeholders in this value are replaced as follows:
- //
- // * <var>ENDPOINT</var>: The last segment (following `endpoints/`)of the
- // Endpoint.name][] field of the Endpoint where this Model has been
- // deployed. (Vertex AI makes this value available to your container code
- // as the [`AIP_ENDPOINT_ID` environment
- // variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
- //
- // * <var>DEPLOYED_MODEL</var>: [DeployedModel.id][google.cloud.aiplatform.v1.DeployedModel.id] of the `DeployedModel`.
- // (Vertex AI makes this value available to your container code as the
- // [`AIP_DEPLOYED_MODEL_ID` environment
- // variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).)
- string health_route = 7 [(google.api.field_behavior) = IMMUTABLE];
- }
- // Represents a network port in a container.
- message Port {
- // The number of the port to expose on the pod's IP address.
- // Must be a valid port number, between 1 and 65535 inclusive.
- int32 container_port = 3;
- }
- // Detail description of the source information of the model.
- message ModelSourceInfo {
- // Source of the model.
- enum ModelSourceType {
- // Should not be used.
- MODEL_SOURCE_TYPE_UNSPECIFIED = 0;
- // The Model is uploaded by automl training pipeline.
- AUTOML = 1;
- // The Model is uploaded by user or custom training pipeline.
- CUSTOM = 2;
- // The Model is registered and sync'ed from BigQuery ML.
- BQML = 3;
- }
- // Type of the model source.
- ModelSourceType source_type = 1;
- }
|