prediction_service.proto 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. // Copyright 2020 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.automl.v1beta1;
  16. import "google/api/annotations.proto";
  17. import "google/api/client.proto";
  18. import "google/api/field_behavior.proto";
  19. import "google/api/resource.proto";
  20. import "google/cloud/automl/v1beta1/annotation_payload.proto";
  21. import "google/cloud/automl/v1beta1/data_items.proto";
  22. import "google/cloud/automl/v1beta1/io.proto";
  23. import "google/cloud/automl/v1beta1/operations.proto";
  24. import "google/longrunning/operations.proto";
  25. option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl";
  26. option java_multiple_files = true;
  27. option java_outer_classname = "PredictionServiceProto";
  28. option java_package = "com.google.cloud.automl.v1beta1";
  29. option php_namespace = "Google\\Cloud\\AutoMl\\V1beta1";
  30. option ruby_package = "Google::Cloud::AutoML::V1beta1";
  31. // AutoML Prediction API.
  32. //
  33. // On any input that is documented to expect a string parameter in
  34. // snake_case or kebab-case, either of those cases is accepted.
  35. service PredictionService {
  36. option (google.api.default_host) = "automl.googleapis.com";
  37. option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
  38. // Perform an online prediction. The prediction result will be directly
  39. // returned in the response.
  40. // Available for following ML problems, and their expected request payloads:
  41. // * Image Classification - Image in .JPEG, .GIF or .PNG format, image_bytes
  42. // up to 30MB.
  43. // * Image Object Detection - Image in .JPEG, .GIF or .PNG format, image_bytes
  44. // up to 30MB.
  45. // * Text Classification - TextSnippet, content up to 60,000 characters,
  46. // UTF-8 encoded.
  47. // * Text Extraction - TextSnippet, content up to 30,000 characters,
  48. // UTF-8 NFC encoded.
  49. // * Translation - TextSnippet, content up to 25,000 characters, UTF-8
  50. // encoded.
  51. // * Tables - Row, with column values matching the columns of the model,
  52. // up to 5MB. Not available for FORECASTING
  53. //
  54. // [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type].
  55. // * Text Sentiment - TextSnippet, content up 500 characters, UTF-8
  56. // encoded.
  57. rpc Predict(PredictRequest) returns (PredictResponse) {
  58. option (google.api.http) = {
  59. post: "/v1beta1/{name=projects/*/locations/*/models/*}:predict"
  60. body: "*"
  61. };
  62. option (google.api.method_signature) = "name,payload,params";
  63. }
  64. // Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], batch
  65. // prediction result won't be immediately available in the response. Instead,
  66. // a long running operation object is returned. User can poll the operation
  67. // result via [GetOperation][google.longrunning.Operations.GetOperation]
  68. // method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] is returned in
  69. // the [response][google.longrunning.Operation.response] field.
  70. // Available for following ML problems:
  71. // * Image Classification
  72. // * Image Object Detection
  73. // * Video Classification
  74. // * Video Object Tracking * Text Extraction
  75. // * Tables
  76. rpc BatchPredict(BatchPredictRequest) returns (google.longrunning.Operation) {
  77. option (google.api.http) = {
  78. post: "/v1beta1/{name=projects/*/locations/*/models/*}:batchPredict"
  79. body: "*"
  80. };
  81. option (google.api.method_signature) = "name,input_config,output_config,params";
  82. option (google.longrunning.operation_info) = {
  83. response_type: "BatchPredictResult"
  84. metadata_type: "OperationMetadata"
  85. };
  86. }
  87. }
  88. // Request message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict].
  89. message PredictRequest {
  90. // Required. Name of the model requested to serve the prediction.
  91. string name = 1 [
  92. (google.api.field_behavior) = REQUIRED,
  93. (google.api.resource_reference) = {
  94. type: "automl.googleapis.com/Model"
  95. }
  96. ];
  97. // Required. Payload to perform a prediction on. The payload must match the
  98. // problem type that the model was trained to solve.
  99. ExamplePayload payload = 2 [(google.api.field_behavior) = REQUIRED];
  100. // Additional domain-specific parameters, any string must be up to 25000
  101. // characters long.
  102. //
  103. // * For Image Classification:
  104. //
  105. // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
  106. // makes predictions for an image, it will only produce results that have
  107. // at least this confidence score. The default is 0.5.
  108. //
  109. // * For Image Object Detection:
  110. // `score_threshold` - (float) When Model detects objects on the image,
  111. // it will only produce bounding boxes which have at least this
  112. // confidence score. Value in 0 to 1 range, default is 0.5.
  113. // `max_bounding_box_count` - (int64) No more than this number of bounding
  114. // boxes will be returned in the response. Default is 100, the
  115. // requested value may be limited by server.
  116. // * For Tables:
  117. // feature_imp<span>ortan</span>ce - (boolean) Whether feature importance
  118. // should be populated in the returned TablesAnnotation.
  119. // The default is false.
  120. map<string, string> params = 3;
  121. }
  122. // Response message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict].
  123. message PredictResponse {
  124. // Prediction result.
  125. // Translation and Text Sentiment will return precisely one payload.
  126. repeated AnnotationPayload payload = 1;
  127. // The preprocessed example that AutoML actually makes prediction on.
  128. // Empty if AutoML does not preprocess the input example.
  129. // * For Text Extraction:
  130. // If the input is a .pdf file, the OCR'ed text will be provided in
  131. // [document_text][google.cloud.automl.v1beta1.Document.document_text].
  132. ExamplePayload preprocessed_input = 3;
  133. // Additional domain-specific prediction response metadata.
  134. //
  135. // * For Image Object Detection:
  136. // `max_bounding_box_count` - (int64) At most that many bounding boxes per
  137. // image could have been returned.
  138. //
  139. // * For Text Sentiment:
  140. // `sentiment_score` - (float, deprecated) A value between -1 and 1,
  141. // -1 maps to least positive sentiment, while 1 maps to the most positive
  142. // one and the higher the score, the more positive the sentiment in the
  143. // document is. Yet these values are relative to the training data, so
  144. // e.g. if all data was positive then -1 will be also positive (though
  145. // the least).
  146. // The sentiment_score shouldn't be confused with "score" or "magnitude"
  147. // from the previous Natural Language Sentiment Analysis API.
  148. map<string, string> metadata = 2;
  149. }
  150. // Request message for [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict].
  151. message BatchPredictRequest {
  152. // Required. Name of the model requested to serve the batch prediction.
  153. string name = 1 [
  154. (google.api.field_behavior) = REQUIRED,
  155. (google.api.resource_reference) = {
  156. type: "automl.googleapis.com/Model"
  157. }
  158. ];
  159. // Required. The input configuration for batch prediction.
  160. BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED];
  161. // Required. The Configuration specifying where output predictions should
  162. // be written.
  163. BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED];
  164. // Required. Additional domain-specific parameters for the predictions, any string must
  165. // be up to 25000 characters long.
  166. //
  167. // * For Text Classification:
  168. //
  169. // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
  170. // makes predictions for a text snippet, it will only produce results
  171. // that have at least this confidence score. The default is 0.5.
  172. //
  173. // * For Image Classification:
  174. //
  175. // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
  176. // makes predictions for an image, it will only produce results that
  177. // have at least this confidence score. The default is 0.5.
  178. //
  179. // * For Image Object Detection:
  180. //
  181. // `score_threshold` - (float) When Model detects objects on the image,
  182. // it will only produce bounding boxes which have at least this
  183. // confidence score. Value in 0 to 1 range, default is 0.5.
  184. // `max_bounding_box_count` - (int64) No more than this number of bounding
  185. // boxes will be produced per image. Default is 100, the
  186. // requested value may be limited by server.
  187. //
  188. // * For Video Classification :
  189. //
  190. // `score_threshold` - (float) A value from 0.0 to 1.0. When the model
  191. // makes predictions for a video, it will only produce results that
  192. // have at least this confidence score. The default is 0.5.
  193. // `segment_classification` - (boolean) Set to true to request
  194. // segment-level classification. AutoML Video Intelligence returns
  195. // labels and their confidence scores for the entire segment of the
  196. // video that user specified in the request configuration.
  197. // The default is "true".
  198. // `shot_classification` - (boolean) Set to true to request shot-level
  199. // classification. AutoML Video Intelligence determines the boundaries
  200. // for each camera shot in the entire segment of the video that user
  201. // specified in the request configuration. AutoML Video Intelligence
  202. // then returns labels and their confidence scores for each detected
  203. // shot, along with the start and end time of the shot.
  204. // WARNING: Model evaluation is not done for this classification type,
  205. // the quality of it depends on training data, but there are no metrics
  206. // provided to describe that quality. The default is "false".
  207. // `1s_interval_classification` - (boolean) Set to true to request
  208. // classification for a video at one-second intervals. AutoML Video
  209. // Intelligence returns labels and their confidence scores for each
  210. // second of the entire segment of the video that user specified in the
  211. // request configuration.
  212. // WARNING: Model evaluation is not done for this classification
  213. // type, the quality of it depends on training data, but there are no
  214. // metrics provided to describe that quality. The default is
  215. // "false".
  216. //
  217. // * For Tables:
  218. //
  219. // feature_imp<span>ortan</span>ce - (boolean) Whether feature importance
  220. // should be populated in the returned TablesAnnotations. The
  221. // default is false.
  222. //
  223. // * For Video Object Tracking:
  224. //
  225. // `score_threshold` - (float) When Model detects objects on video frames,
  226. // it will only produce bounding boxes which have at least this
  227. // confidence score. Value in 0 to 1 range, default is 0.5.
  228. // `max_bounding_box_count` - (int64) No more than this number of bounding
  229. // boxes will be returned per frame. Default is 100, the requested
  230. // value may be limited by server.
  231. // `min_bounding_box_size` - (float) Only bounding boxes with shortest edge
  232. // at least that long as a relative value of video frame size will be
  233. // returned. Value in 0 to 1 range. Default is 0.
  234. map<string, string> params = 5 [(google.api.field_behavior) = REQUIRED];
  235. }
  236. // Result of the Batch Predict. This message is returned in
  237. // [response][google.longrunning.Operation.response] of the operation returned
  238. // by the [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict].
  239. message BatchPredictResult {
  240. // Additional domain-specific prediction response metadata.
  241. //
  242. // * For Image Object Detection:
  243. // `max_bounding_box_count` - (int64) At most that many bounding boxes per
  244. // image could have been returned.
  245. //
  246. // * For Video Object Tracking:
  247. // `max_bounding_box_count` - (int64) At most that many bounding boxes per
  248. // frame could have been returned.
  249. map<string, string> metadata = 1;
  250. }