prediction_service.proto 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. // Copyright 2021 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.automl.v1;
  16. import "google/api/annotations.proto";
  17. import "google/api/client.proto";
  18. import "google/api/field_behavior.proto";
  19. import "google/api/resource.proto";
  20. import "google/cloud/automl/v1/annotation_payload.proto";
  21. import "google/cloud/automl/v1/data_items.proto";
  22. import "google/cloud/automl/v1/io.proto";
  23. import "google/longrunning/operations.proto";
  24. option csharp_namespace = "Google.Cloud.AutoML.V1";
  25. option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl";
  26. option java_multiple_files = true;
  27. option java_outer_classname = "PredictionServiceProto";
  28. option java_package = "com.google.cloud.automl.v1";
  29. option php_namespace = "Google\\Cloud\\AutoMl\\V1";
  30. option ruby_package = "Google::Cloud::AutoML::V1";
  31. // AutoML Prediction API.
  32. //
  33. // On any input that is documented to expect a string parameter in
  34. // snake_case or dash-case, either of those cases is accepted.
  35. service PredictionService {
  36. option (google.api.default_host) = "automl.googleapis.com";
  37. option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
  38. // Perform an online prediction. The prediction result is directly
  39. // returned in the response.
  40. // Available for following ML scenarios, and their expected request payloads:
  41. //
  42. // AutoML Vision Classification
  43. //
  44. // * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB.
  45. //
  46. // AutoML Vision Object Detection
  47. //
  48. // * An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB.
  49. //
  50. // AutoML Natural Language Classification
  51. //
  52. // * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
  53. // .PDF, .TIF or .TIFF format with size upto 2MB.
  54. //
  55. // AutoML Natural Language Entity Extraction
  56. //
  57. // * A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a document
  58. // in .PDF, .TIF or .TIFF format with size upto 20MB.
  59. //
  60. // AutoML Natural Language Sentiment Analysis
  61. //
  62. // * A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in
  63. // .PDF, .TIF or .TIFF format with size upto 2MB.
  64. //
  65. // AutoML Translation
  66. //
  67. // * A TextSnippet up to 25,000 characters, UTF-8 encoded.
  68. //
  69. // AutoML Tables
  70. //
  71. // * A row with column values matching
  72. // the columns of the model, up to 5MB. Not available for FORECASTING
  73. // `prediction_type`.
  74. rpc Predict(PredictRequest) returns (PredictResponse) {
  75. option (google.api.http) = {
  76. post: "/v1/{name=projects/*/locations/*/models/*}:predict"
  77. body: "*"
  78. };
  79. option (google.api.method_signature) = "name,payload,params";
  80. }
  81. // Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1.PredictionService.Predict], batch
  82. // prediction result won't be immediately available in the response. Instead,
  83. // a long running operation object is returned. User can poll the operation
  84. // result via [GetOperation][google.longrunning.Operations.GetOperation]
  85. // method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1.BatchPredictResult] is returned in
  86. // the [response][google.longrunning.Operation.response] field.
  87. // Available for following ML scenarios:
  88. //
  89. // * AutoML Vision Classification
  90. // * AutoML Vision Object Detection
  91. // * AutoML Video Intelligence Classification
  92. // * AutoML Video Intelligence Object Tracking * AutoML Natural Language Classification
  93. // * AutoML Natural Language Entity Extraction
  94. // * AutoML Natural Language Sentiment Analysis
  95. // * AutoML Tables
  96. rpc BatchPredict(BatchPredictRequest) returns (google.longrunning.Operation) {
  97. option (google.api.http) = {
  98. post: "/v1/{name=projects/*/locations/*/models/*}:batchPredict"
  99. body: "*"
  100. };
  101. option (google.api.method_signature) = "name,input_config,output_config,params";
  102. option (google.longrunning.operation_info) = {
  103. response_type: "BatchPredictResult"
  104. metadata_type: "OperationMetadata"
  105. };
  106. }
  107. }
  108. // Request message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
  109. message PredictRequest {
  110. // Required. Name of the model requested to serve the prediction.
  111. string name = 1 [
  112. (google.api.field_behavior) = REQUIRED,
  113. (google.api.resource_reference) = {
  114. type: "automl.googleapis.com/Model"
  115. }
  116. ];
  117. // Required. Payload to perform a prediction on. The payload must match the
  118. // problem type that the model was trained to solve.
  119. ExamplePayload payload = 2 [(google.api.field_behavior) = REQUIRED];
  120. // Additional domain-specific parameters, any string must be up to 25000
  121. // characters long.
  122. //
  123. // AutoML Vision Classification
  124. //
  125. // `score_threshold`
  126. // : (float) A value from 0.0 to 1.0. When the model
  127. // makes predictions for an image, it will only produce results that have
  128. // at least this confidence score. The default is 0.5.
  129. //
  130. // AutoML Vision Object Detection
  131. //
  132. // `score_threshold`
  133. // : (float) When Model detects objects on the image,
  134. // it will only produce bounding boxes which have at least this
  135. // confidence score. Value in 0 to 1 range, default is 0.5.
  136. //
  137. // `max_bounding_box_count`
  138. // : (int64) The maximum number of bounding
  139. // boxes returned. The default is 100. The
  140. // number of returned bounding boxes might be limited by the server.
  141. //
  142. // AutoML Tables
  143. //
  144. // `feature_importance`
  145. // : (boolean) Whether
  146. // [feature_importance][google.cloud.automl.v1.TablesModelColumnInfo.feature_importance]
  147. // is populated in the returned list of
  148. // [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation]
  149. // objects. The default is false.
  150. map<string, string> params = 3;
  151. }
  152. // Response message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict].
  153. message PredictResponse {
  154. // Prediction result.
  155. // AutoML Translation and AutoML Natural Language Sentiment Analysis
  156. // return precisely one payload.
  157. repeated AnnotationPayload payload = 1;
  158. // The preprocessed example that AutoML actually makes prediction on.
  159. // Empty if AutoML does not preprocess the input example.
  160. //
  161. // For AutoML Natural Language (Classification, Entity Extraction, and
  162. // Sentiment Analysis), if the input is a document, the recognized text is
  163. // returned in the
  164. // [document_text][google.cloud.automl.v1.Document.document_text]
  165. // property.
  166. ExamplePayload preprocessed_input = 3;
  167. // Additional domain-specific prediction response metadata.
  168. //
  169. // AutoML Vision Object Detection
  170. //
  171. // `max_bounding_box_count`
  172. // : (int64) The maximum number of bounding boxes to return per image.
  173. //
  174. // AutoML Natural Language Sentiment Analysis
  175. //
  176. // `sentiment_score`
  177. // : (float, deprecated) A value between -1 and 1,
  178. // -1 maps to least positive sentiment, while 1 maps to the most positive
  179. // one and the higher the score, the more positive the sentiment in the
  180. // document is. Yet these values are relative to the training data, so
  181. // e.g. if all data was positive then -1 is also positive (though
  182. // the least).
  183. // `sentiment_score` is not the same as "score" and "magnitude"
  184. // from Sentiment Analysis in the Natural Language API.
  185. map<string, string> metadata = 2;
  186. }
  187. // Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
  188. message BatchPredictRequest {
  189. // Required. Name of the model requested to serve the batch prediction.
  190. string name = 1 [
  191. (google.api.field_behavior) = REQUIRED,
  192. (google.api.resource_reference) = {
  193. type: "automl.googleapis.com/Model"
  194. }
  195. ];
  196. // Required. The input configuration for batch prediction.
  197. BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED];
  198. // Required. The Configuration specifying where output predictions should
  199. // be written.
  200. BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED];
  201. // Additional domain-specific parameters for the predictions, any string must
  202. // be up to 25000 characters long.
  203. //
  204. // AutoML Natural Language Classification
  205. //
  206. // `score_threshold`
  207. // : (float) A value from 0.0 to 1.0. When the model
  208. // makes predictions for a text snippet, it will only produce results
  209. // that have at least this confidence score. The default is 0.5.
  210. //
  211. //
  212. // AutoML Vision Classification
  213. //
  214. // `score_threshold`
  215. // : (float) A value from 0.0 to 1.0. When the model
  216. // makes predictions for an image, it will only produce results that
  217. // have at least this confidence score. The default is 0.5.
  218. //
  219. // AutoML Vision Object Detection
  220. //
  221. // `score_threshold`
  222. // : (float) When Model detects objects on the image,
  223. // it will only produce bounding boxes which have at least this
  224. // confidence score. Value in 0 to 1 range, default is 0.5.
  225. //
  226. // `max_bounding_box_count`
  227. // : (int64) The maximum number of bounding
  228. // boxes returned per image. The default is 100, the
  229. // number of bounding boxes returned might be limited by the server.
  230. // AutoML Video Intelligence Classification
  231. //
  232. // `score_threshold`
  233. // : (float) A value from 0.0 to 1.0. When the model
  234. // makes predictions for a video, it will only produce results that
  235. // have at least this confidence score. The default is 0.5.
  236. //
  237. // `segment_classification`
  238. // : (boolean) Set to true to request
  239. // segment-level classification. AutoML Video Intelligence returns
  240. // labels and their confidence scores for the entire segment of the
  241. // video that user specified in the request configuration.
  242. // The default is true.
  243. //
  244. // `shot_classification`
  245. // : (boolean) Set to true to request shot-level
  246. // classification. AutoML Video Intelligence determines the boundaries
  247. // for each camera shot in the entire segment of the video that user
  248. // specified in the request configuration. AutoML Video Intelligence
  249. // then returns labels and their confidence scores for each detected
  250. // shot, along with the start and end time of the shot.
  251. // The default is false.
  252. //
  253. // WARNING: Model evaluation is not done for this classification type,
  254. // the quality of it depends on training data, but there are no metrics
  255. // provided to describe that quality.
  256. //
  257. // `1s_interval_classification`
  258. // : (boolean) Set to true to request
  259. // classification for a video at one-second intervals. AutoML Video
  260. // Intelligence returns labels and their confidence scores for each
  261. // second of the entire segment of the video that user specified in the
  262. // request configuration. The default is false.
  263. //
  264. // WARNING: Model evaluation is not done for this classification
  265. // type, the quality of it depends on training data, but there are no
  266. // metrics provided to describe that quality.
  267. //
  268. // AutoML Video Intelligence Object Tracking
  269. //
  270. // `score_threshold`
  271. // : (float) When Model detects objects on video frames,
  272. // it will only produce bounding boxes which have at least this
  273. // confidence score. Value in 0 to 1 range, default is 0.5.
  274. //
  275. // `max_bounding_box_count`
  276. // : (int64) The maximum number of bounding
  277. // boxes returned per image. The default is 100, the
  278. // number of bounding boxes returned might be limited by the server.
  279. //
  280. // `min_bounding_box_size`
  281. // : (float) Only bounding boxes with shortest edge
  282. // at least that long as a relative value of video frame size are
  283. // returned. Value in 0 to 1 range. Default is 0.
  284. //
  285. map<string, string> params = 5;
  286. }
  287. // Result of the Batch Predict. This message is returned in
  288. // [response][google.longrunning.Operation.response] of the operation returned
  289. // by the [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict].
  290. message BatchPredictResult {
  291. // Additional domain-specific prediction response metadata.
  292. //
  293. // AutoML Vision Object Detection
  294. //
  295. // `max_bounding_box_count`
  296. // : (int64) The maximum number of bounding boxes returned per image.
  297. //
  298. // AutoML Video Intelligence Object Tracking
  299. //
  300. // `max_bounding_box_count`
  301. // : (int64) The maximum number of bounding boxes returned per frame.
  302. map<string, string> metadata = 1;
  303. }