image.proto 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. // Copyright 2021 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.automl.v1;
  16. import "google/api/field_behavior.proto";
  17. import "google/cloud/automl/v1/classification.proto";
  18. option csharp_namespace = "Google.Cloud.AutoML.V1";
  19. option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
  20. option java_multiple_files = true;
  21. option java_outer_classname = "ImageProto";
  22. option java_package = "com.google.cloud.automl.v1";
  23. option php_namespace = "Google\\Cloud\\AutoMl\\V1";
  24. option ruby_package = "Google::Cloud::AutoML::V1";
  25. // Dataset metadata that is specific to image classification.
  26. message ImageClassificationDatasetMetadata {
  27. // Required. Type of the classification problem.
  28. ClassificationType classification_type = 1 [(google.api.field_behavior) = REQUIRED];
  29. }
  30. // Dataset metadata specific to image object detection.
  31. message ImageObjectDetectionDatasetMetadata {
  32. }
  33. // Model metadata for image classification.
  34. message ImageClassificationModelMetadata {
  35. // Optional. The ID of the `base` model. If it is specified, the new model
  36. // will be created based on the `base` model. Otherwise, the new model will be
  37. // created from scratch. The `base` model must be in the same
  38. // `project` and `location` as the new model to create, and have the same
  39. // `model_type`.
  40. string base_model_id = 1 [(google.api.field_behavior) = OPTIONAL];
  41. // Optional. The train budget of creating this model, expressed in milli node
  42. // hours i.e. 1,000 value in this field means 1 node hour. The actual
  43. // `train_cost` will be equal or less than this value. If further model
  44. // training ceases to provide any improvements, it will stop without using
  45. // full budget and the stop_reason will be `MODEL_CONVERGED`.
  46. // Note, node_hour = actual_hour * number_of_nodes_invovled.
  47. // For model type `cloud`(default), the train budget must be between 8,000
  48. // and 800,000 milli node hours, inclusive. The default value is 192, 000
  49. // which represents one day in wall time. For model type
  50. // `mobile-low-latency-1`, `mobile-versatile-1`, `mobile-high-accuracy-1`,
  51. // `mobile-core-ml-low-latency-1`, `mobile-core-ml-versatile-1`,
  52. // `mobile-core-ml-high-accuracy-1`, the train budget must be between 1,000
  53. // and 100,000 milli node hours, inclusive. The default value is 24, 000 which
  54. // represents one day in wall time.
  55. int64 train_budget_milli_node_hours = 16 [(google.api.field_behavior) = OPTIONAL];
  56. // Output only. The actual train cost of creating this model, expressed in
  57. // milli node hours, i.e. 1,000 value in this field means 1 node hour.
  58. // Guaranteed to not exceed the train budget.
  59. int64 train_cost_milli_node_hours = 17 [(google.api.field_behavior) = OUTPUT_ONLY];
  60. // Output only. The reason that this create model operation stopped,
  61. // e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
  62. string stop_reason = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
  63. // Optional. Type of the model. The available values are:
  64. // * `cloud` - Model to be used via prediction calls to AutoML API.
  65. // This is the default value.
  66. // * `mobile-low-latency-1` - A model that, in addition to providing
  67. // prediction via AutoML API, can also be exported (see
  68. // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
  69. // with TensorFlow afterwards. Expected to have low latency, but
  70. // may have lower prediction quality than other models.
  71. // * `mobile-versatile-1` - A model that, in addition to providing
  72. // prediction via AutoML API, can also be exported (see
  73. // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
  74. // with TensorFlow afterwards.
  75. // * `mobile-high-accuracy-1` - A model that, in addition to providing
  76. // prediction via AutoML API, can also be exported (see
  77. // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
  78. // with TensorFlow afterwards. Expected to have a higher
  79. // latency, but should also have a higher prediction quality
  80. // than other models.
  81. // * `mobile-core-ml-low-latency-1` - A model that, in addition to providing
  82. // prediction via AutoML API, can also be exported (see
  83. // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with Core
  84. // ML afterwards. Expected to have low latency, but may have
  85. // lower prediction quality than other models.
  86. // * `mobile-core-ml-versatile-1` - A model that, in addition to providing
  87. // prediction via AutoML API, can also be exported (see
  88. // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with Core
  89. // ML afterwards.
  90. // * `mobile-core-ml-high-accuracy-1` - A model that, in addition to
  91. // providing prediction via AutoML API, can also be exported
  92. // (see [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile device with
  93. // Core ML afterwards. Expected to have a higher latency, but
  94. // should also have a higher prediction quality than other
  95. // models.
  96. string model_type = 7 [(google.api.field_behavior) = OPTIONAL];
  97. // Output only. An approximate number of online prediction QPS that can
  98. // be supported by this model per each node on which it is deployed.
  99. double node_qps = 13 [(google.api.field_behavior) = OUTPUT_ONLY];
  100. // Output only. The number of nodes this model is deployed on. A node is an
  101. // abstraction of a machine resource, which can handle online prediction QPS
  102. // as given in the node_qps field.
  103. int64 node_count = 14 [(google.api.field_behavior) = OUTPUT_ONLY];
  104. }
  105. // Model metadata specific to image object detection.
  106. message ImageObjectDetectionModelMetadata {
  107. // Optional. Type of the model. The available values are:
  108. // * `cloud-high-accuracy-1` - (default) A model to be used via prediction
  109. // calls to AutoML API. Expected to have a higher latency, but
  110. // should also have a higher prediction quality than other
  111. // models.
  112. // * `cloud-low-latency-1` - A model to be used via prediction
  113. // calls to AutoML API. Expected to have low latency, but may
  114. // have lower prediction quality than other models.
  115. // * `mobile-low-latency-1` - A model that, in addition to providing
  116. // prediction via AutoML API, can also be exported (see
  117. // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
  118. // with TensorFlow afterwards. Expected to have low latency, but
  119. // may have lower prediction quality than other models.
  120. // * `mobile-versatile-1` - A model that, in addition to providing
  121. // prediction via AutoML API, can also be exported (see
  122. // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
  123. // with TensorFlow afterwards.
  124. // * `mobile-high-accuracy-1` - A model that, in addition to providing
  125. // prediction via AutoML API, can also be exported (see
  126. // [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]) and used on a mobile or edge device
  127. // with TensorFlow afterwards. Expected to have a higher
  128. // latency, but should also have a higher prediction quality
  129. // than other models.
  130. string model_type = 1 [(google.api.field_behavior) = OPTIONAL];
  131. // Output only. The number of nodes this model is deployed on. A node is an
  132. // abstraction of a machine resource, which can handle online prediction QPS
  133. // as given in the qps_per_node field.
  134. int64 node_count = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
  135. // Output only. An approximate number of online prediction QPS that can
  136. // be supported by this model per each node on which it is deployed.
  137. double node_qps = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
  138. // Output only. The reason that this create model operation stopped,
  139. // e.g. `BUDGET_REACHED`, `MODEL_CONVERGED`.
  140. string stop_reason = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
  141. // Optional. The train budget of creating this model, expressed in milli node
  142. // hours i.e. 1,000 value in this field means 1 node hour. The actual
  143. // `train_cost` will be equal or less than this value. If further model
  144. // training ceases to provide any improvements, it will stop without using
  145. // full budget and the stop_reason will be `MODEL_CONVERGED`.
  146. // Note, node_hour = actual_hour * number_of_nodes_invovled.
  147. // For model type `cloud-high-accuracy-1`(default) and `cloud-low-latency-1`,
  148. // the train budget must be between 20,000 and 900,000 milli node hours,
  149. // inclusive. The default value is 216, 000 which represents one day in
  150. // wall time.
  151. // For model type `mobile-low-latency-1`, `mobile-versatile-1`,
  152. // `mobile-high-accuracy-1`, `mobile-core-ml-low-latency-1`,
  153. // `mobile-core-ml-versatile-1`, `mobile-core-ml-high-accuracy-1`, the train
  154. // budget must be between 1,000 and 100,000 milli node hours, inclusive.
  155. // The default value is 24, 000 which represents one day in wall time.
  156. int64 train_budget_milli_node_hours = 6 [(google.api.field_behavior) = OPTIONAL];
  157. // Output only. The actual train cost of creating this model, expressed in
  158. // milli node hours, i.e. 1,000 value in this field means 1 node hour.
  159. // Guaranteed to not exceed the train budget.
  160. int64 train_cost_milli_node_hours = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
  161. }
  162. // Model deployment metadata specific to Image Classification.
  163. message ImageClassificationModelDeploymentMetadata {
  164. // Input only. The number of nodes to deploy the model on. A node is an
  165. // abstraction of a machine resource, which can handle online prediction QPS
  166. // as given in the model's
  167. // [node_qps][google.cloud.automl.v1.ImageClassificationModelMetadata.node_qps].
  168. // Must be between 1 and 100, inclusive on both ends.
  169. int64 node_count = 1 [(google.api.field_behavior) = INPUT_ONLY];
  170. }
  171. // Model deployment metadata specific to Image Object Detection.
  172. message ImageObjectDetectionModelDeploymentMetadata {
  173. // Input only. The number of nodes to deploy the model on. A node is an
  174. // abstraction of a machine resource, which can handle online prediction QPS
  175. // as given in the model's
  176. // [qps_per_node][google.cloud.automl.v1.ImageObjectDetectionModelMetadata.qps_per_node].
  177. // Must be between 1 and 100, inclusive on both ends.
  178. int64 node_count = 1 [(google.api.field_behavior) = INPUT_ONLY];
  179. }