video_intelligence.proto 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. // Copyright 2019 Google LLC.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. //
  15. syntax = "proto3";
  16. package google.cloud.videointelligence.v1p2beta1;
  17. import "google/api/annotations.proto";
  18. import "google/api/client.proto";
  19. import "google/api/field_behavior.proto";
  20. import "google/longrunning/operations.proto";
  21. import "google/protobuf/duration.proto";
  22. import "google/protobuf/timestamp.proto";
  23. import "google/rpc/status.proto";
  24. option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P2Beta1";
  25. option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence";
  26. option java_multiple_files = true;
  27. option java_outer_classname = "VideoIntelligenceServiceProto";
  28. option java_package = "com.google.cloud.videointelligence.v1p2beta1";
  29. option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p2beta1";
  30. option ruby_package = "Google::Cloud::VideoIntelligence::V1p2beta1";
  31. // Service that implements Google Cloud Video Intelligence API.
  32. service VideoIntelligenceService {
  33. option (google.api.default_host) = "videointelligence.googleapis.com";
  34. option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform";
  35. // Performs asynchronous video annotation. Progress and results can be
  36. // retrieved through the `google.longrunning.Operations` interface.
  37. // `Operation.metadata` contains `AnnotateVideoProgress` (progress).
  38. // `Operation.response` contains `AnnotateVideoResponse` (results).
  39. rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) {
  40. option (google.api.http) = {
  41. post: "/v1p2beta1/videos:annotate"
  42. body: "*"
  43. };
  44. option (google.api.method_signature) = "input_uri,features";
  45. option (google.longrunning.operation_info) = {
  46. response_type: "AnnotateVideoResponse"
  47. metadata_type: "AnnotateVideoProgress"
  48. };
  49. }
  50. }
  51. // Video annotation request.
  52. message AnnotateVideoRequest {
  53. // Input video location. Currently, only
  54. // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
  55. // supported, which must be specified in the following format:
  56. // `gs://bucket-id/object-id` (other URI formats return
  57. // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
  58. // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
  59. // A video URI may include wildcards in `object-id`, and thus identify
  60. // multiple videos. Supported wildcards: '*' to match 0 or more characters;
  61. // '?' to match 1 character. If unset, the input video should be embedded
  62. // in the request as `input_content`. If set, `input_content` should be unset.
  63. string input_uri = 1;
  64. // The video data bytes.
  65. // If unset, the input video(s) should be specified via `input_uri`.
  66. // If set, `input_uri` should be unset.
  67. bytes input_content = 6;
  68. // Required. Requested video annotation features.
  69. repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED];
  70. // Additional video context and/or feature-specific parameters.
  71. VideoContext video_context = 3;
  72. // Optional. Location where the output (in JSON format) should be stored.
  73. // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
  74. // URIs are supported, which must be specified in the following format:
  75. // `gs://bucket-id/object-id` (other URI formats return
  76. // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
  77. // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
  78. string output_uri = 4 [(google.api.field_behavior) = OPTIONAL];
  79. // Optional. Cloud region where annotation should take place. Supported cloud
  80. // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
  81. // is specified, a region will be determined based on video file location.
  82. string location_id = 5 [(google.api.field_behavior) = OPTIONAL];
  83. }
  84. // Video context and/or feature-specific parameters.
  85. message VideoContext {
  86. // Video segments to annotate. The segments may overlap and are not required
  87. // to be contiguous or span the whole video. If unspecified, each video is
  88. // treated as a single segment.
  89. repeated VideoSegment segments = 1;
  90. // Config for LABEL_DETECTION.
  91. LabelDetectionConfig label_detection_config = 2;
  92. // Config for SHOT_CHANGE_DETECTION.
  93. ShotChangeDetectionConfig shot_change_detection_config = 3;
  94. // Config for EXPLICIT_CONTENT_DETECTION.
  95. ExplicitContentDetectionConfig explicit_content_detection_config = 4;
  96. // Config for TEXT_DETECTION.
  97. TextDetectionConfig text_detection_config = 8;
  98. }
  99. // Config for LABEL_DETECTION.
  100. message LabelDetectionConfig {
  101. // What labels should be detected with LABEL_DETECTION, in addition to
  102. // video-level labels or segment-level labels.
  103. // If unspecified, defaults to `SHOT_MODE`.
  104. LabelDetectionMode label_detection_mode = 1;
  105. // Whether the video has been shot from a stationary (i.e. non-moving) camera.
  106. // When set to true, might improve detection accuracy for moving objects.
  107. // Should be used with `SHOT_AND_FRAME_MODE` enabled.
  108. bool stationary_camera = 2;
  109. // Model to use for label detection.
  110. // Supported values: "builtin/stable" (the default if unset) and
  111. // "builtin/latest".
  112. string model = 3;
  113. }
  114. // Config for SHOT_CHANGE_DETECTION.
  115. message ShotChangeDetectionConfig {
  116. // Model to use for shot change detection.
  117. // Supported values: "builtin/stable" (the default if unset) and
  118. // "builtin/latest".
  119. string model = 1;
  120. }
  121. // Config for EXPLICIT_CONTENT_DETECTION.
  122. message ExplicitContentDetectionConfig {
  123. // Model to use for explicit content detection.
  124. // Supported values: "builtin/stable" (the default if unset) and
  125. // "builtin/latest".
  126. string model = 1;
  127. }
  128. // Config for TEXT_DETECTION.
  129. message TextDetectionConfig {
  130. // Language hint can be specified if the language to be detected is known a
  131. // priori. It can increase the accuracy of the detection. Language hint must
  132. // be language code in BCP-47 format.
  133. //
  134. // Automatic language detection is performed if no hint is provided.
  135. repeated string language_hints = 1;
  136. }
  137. // Video segment.
  138. message VideoSegment {
  139. // Time-offset, relative to the beginning of the video,
  140. // corresponding to the start of the segment (inclusive).
  141. google.protobuf.Duration start_time_offset = 1;
  142. // Time-offset, relative to the beginning of the video,
  143. // corresponding to the end of the segment (inclusive).
  144. google.protobuf.Duration end_time_offset = 2;
  145. }
  146. // Video segment level annotation results for label detection.
  147. message LabelSegment {
  148. // Video segment where a label was detected.
  149. VideoSegment segment = 1;
  150. // Confidence that the label is accurate. Range: [0, 1].
  151. float confidence = 2;
  152. }
  153. // Video frame level annotation results for label detection.
  154. message LabelFrame {
  155. // Time-offset, relative to the beginning of the video, corresponding to the
  156. // video frame for this location.
  157. google.protobuf.Duration time_offset = 1;
  158. // Confidence that the label is accurate. Range: [0, 1].
  159. float confidence = 2;
  160. }
  161. // Detected entity from video analysis.
  162. message Entity {
  163. // Opaque entity ID. Some IDs may be available in
  164. // [Google Knowledge Graph Search
  165. // API](https://developers.google.com/knowledge-graph/).
  166. string entity_id = 1;
  167. // Textual description, e.g. `Fixed-gear bicycle`.
  168. string description = 2;
  169. // Language code for `description` in BCP-47 format.
  170. string language_code = 3;
  171. }
  172. // Label annotation.
  173. message LabelAnnotation {
  174. // Detected entity.
  175. Entity entity = 1;
  176. // Common categories for the detected entity.
  177. // E.g. when the label is `Terrier` the category is likely `dog`. And in some
  178. // cases there might be more than one categories e.g. `Terrier` could also be
  179. // a `pet`.
  180. repeated Entity category_entities = 2;
  181. // All video segments where a label was detected.
  182. repeated LabelSegment segments = 3;
  183. // All video frames where a label was detected.
  184. repeated LabelFrame frames = 4;
  185. }
  186. // Video frame level annotation results for explicit content.
  187. message ExplicitContentFrame {
  188. // Time-offset, relative to the beginning of the video, corresponding to the
  189. // video frame for this location.
  190. google.protobuf.Duration time_offset = 1;
  191. // Likelihood of the pornography content..
  192. Likelihood pornography_likelihood = 2;
  193. }
  194. // Explicit content annotation (based on per-frame visual signals only).
  195. // If no explicit content has been detected in a frame, no annotations are
  196. // present for that frame.
  197. message ExplicitContentAnnotation {
  198. // All video frames where explicit content was detected.
  199. repeated ExplicitContentFrame frames = 1;
  200. }
  201. // Normalized bounding box.
  202. // The normalized vertex coordinates are relative to the original image.
  203. // Range: [0, 1].
  204. message NormalizedBoundingBox {
  205. // Left X coordinate.
  206. float left = 1;
  207. // Top Y coordinate.
  208. float top = 2;
  209. // Right X coordinate.
  210. float right = 3;
  211. // Bottom Y coordinate.
  212. float bottom = 4;
  213. }
  214. // Annotation results for a single video.
  215. message VideoAnnotationResults {
  216. // Video file location in
  217. // [Google Cloud Storage](https://cloud.google.com/storage/).
  218. string input_uri = 1;
  219. // Label annotations on video level or user specified segment level.
  220. // There is exactly one element for each unique label.
  221. repeated LabelAnnotation segment_label_annotations = 2;
  222. // Label annotations on shot level.
  223. // There is exactly one element for each unique label.
  224. repeated LabelAnnotation shot_label_annotations = 3;
  225. // Label annotations on frame level.
  226. // There is exactly one element for each unique label.
  227. repeated LabelAnnotation frame_label_annotations = 4;
  228. // Shot annotations. Each shot is represented as a video segment.
  229. repeated VideoSegment shot_annotations = 6;
  230. // Explicit content annotation.
  231. ExplicitContentAnnotation explicit_annotation = 7;
  232. // OCR text detection and tracking.
  233. // Annotations for list of detected text snippets. Each will have list of
  234. // frame information associated with it.
  235. repeated TextAnnotation text_annotations = 12;
  236. // Annotations for list of objects detected and tracked in video.
  237. repeated ObjectTrackingAnnotation object_annotations = 14;
  238. // If set, indicates an error. Note that for a single `AnnotateVideoRequest`
  239. // some videos may succeed and some may fail.
  240. google.rpc.Status error = 9;
  241. }
  242. // Video annotation response. Included in the `response`
  243. // field of the `Operation` returned by the `GetOperation`
  244. // call of the `google::longrunning::Operations` service.
  245. message AnnotateVideoResponse {
  246. // Annotation results for all videos specified in `AnnotateVideoRequest`.
  247. repeated VideoAnnotationResults annotation_results = 1;
  248. }
  249. // Annotation progress for a single video.
  250. message VideoAnnotationProgress {
  251. // Video file location in
  252. // [Google Cloud Storage](https://cloud.google.com/storage/).
  253. string input_uri = 1;
  254. // Approximate percentage processed thus far. Guaranteed to be
  255. // 100 when fully processed.
  256. int32 progress_percent = 2;
  257. // Time when the request was received.
  258. google.protobuf.Timestamp start_time = 3;
  259. // Time of the most recent update.
  260. google.protobuf.Timestamp update_time = 4;
  261. }
  262. // Video annotation progress. Included in the `metadata`
  263. // field of the `Operation` returned by the `GetOperation`
  264. // call of the `google::longrunning::Operations` service.
  265. message AnnotateVideoProgress {
  266. // Progress metadata for all videos specified in `AnnotateVideoRequest`.
  267. repeated VideoAnnotationProgress annotation_progress = 1;
  268. }
  269. // A vertex represents a 2D point in the image.
  270. // NOTE: the normalized vertex coordinates are relative to the original image
  271. // and range from 0 to 1.
  272. message NormalizedVertex {
  273. // X coordinate.
  274. float x = 1;
  275. // Y coordinate.
  276. float y = 2;
  277. }
  278. // Normalized bounding polygon for text (that might not be aligned with axis).
  279. // Contains list of the corner points in clockwise order starting from
  280. // top-left corner. For example, for a rectangular bounding box:
  281. // When the text is horizontal it might look like:
  282. // 0----1
  283. // | |
  284. // 3----2
  285. //
  286. // When it's clockwise rotated 180 degrees around the top-left corner it
  287. // becomes:
  288. // 2----3
  289. // | |
  290. // 1----0
  291. //
  292. // and the vertex order will still be (0, 1, 2, 3). Note that values can be less
  293. // than 0, or greater than 1 due to trignometric calculations for location of
  294. // the box.
  295. message NormalizedBoundingPoly {
  296. // Normalized vertices of the bounding polygon.
  297. repeated NormalizedVertex vertices = 1;
  298. }
  299. // Video segment level annotation results for text detection.
  300. message TextSegment {
  301. // Video segment where a text snippet was detected.
  302. VideoSegment segment = 1;
  303. // Confidence for the track of detected text. It is calculated as the highest
  304. // over all frames where OCR detected text appears.
  305. float confidence = 2;
  306. // Information related to the frames where OCR detected text appears.
  307. repeated TextFrame frames = 3;
  308. }
  309. // Video frame level annotation results for text annotation (OCR).
  310. // Contains information regarding timestamp and bounding box locations for the
  311. // frames containing detected OCR text snippets.
  312. message TextFrame {
  313. // Bounding polygon of the detected text for this frame.
  314. NormalizedBoundingPoly rotated_bounding_box = 1;
  315. // Timestamp of this frame.
  316. google.protobuf.Duration time_offset = 2;
  317. }
  318. // Annotations related to one detected OCR text snippet. This will contain the
  319. // corresponding text, confidence value, and frame level information for each
  320. // detection.
  321. message TextAnnotation {
  322. // The detected text.
  323. string text = 1;
  324. // All video segments where OCR detected text appears.
  325. repeated TextSegment segments = 2;
  326. }
  327. // Video frame level annotations for object detection and tracking. This field
  328. // stores per frame location, time offset, and confidence.
  329. message ObjectTrackingFrame {
  330. // The normalized bounding box location of this object track for the frame.
  331. NormalizedBoundingBox normalized_bounding_box = 1;
  332. // The timestamp of the frame in microseconds.
  333. google.protobuf.Duration time_offset = 2;
  334. }
  335. // Annotations corresponding to one tracked object.
  336. message ObjectTrackingAnnotation {
  337. // Different representation of tracking info in non-streaming batch
  338. // and streaming modes.
  339. oneof track_info {
  340. // Non-streaming batch mode ONLY.
  341. // Each object track corresponds to one video segment where it appears.
  342. VideoSegment segment = 3;
  343. // Streaming mode ONLY.
  344. // In streaming mode, we do not know the end time of a tracked object
  345. // before it is completed. Hence, there is no VideoSegment info returned.
  346. // Instead, we provide a unique identifiable integer track_id so that
  347. // the customers can correlate the results of the ongoing
  348. // ObjectTrackAnnotation of the same track_id over time.
  349. int64 track_id = 5;
  350. }
  351. // Entity to specify the object category that this track is labeled as.
  352. Entity entity = 1;
  353. // Object category's labeling confidence of this track.
  354. float confidence = 4;
  355. // Information corresponding to all frames where this object track appears.
  356. repeated ObjectTrackingFrame frames = 2;
  357. }
  358. // Video annotation feature.
  359. enum Feature {
  360. // Unspecified.
  361. FEATURE_UNSPECIFIED = 0;
  362. // Label detection. Detect objects, such as dog or flower.
  363. LABEL_DETECTION = 1;
  364. // Shot change detection.
  365. SHOT_CHANGE_DETECTION = 2;
  366. // Explicit content detection.
  367. EXPLICIT_CONTENT_DETECTION = 3;
  368. // OCR text detection and tracking.
  369. TEXT_DETECTION = 7;
  370. // Object detection and tracking.
  371. OBJECT_TRACKING = 9;
  372. }
  373. // Label detection mode.
  374. enum LabelDetectionMode {
  375. // Unspecified.
  376. LABEL_DETECTION_MODE_UNSPECIFIED = 0;
  377. // Detect shot-level labels.
  378. SHOT_MODE = 1;
  379. // Detect frame-level labels.
  380. FRAME_MODE = 2;
  381. // Detect both shot-level and frame-level labels.
  382. SHOT_AND_FRAME_MODE = 3;
  383. }
  384. // Bucketized representation of likelihood.
  385. enum Likelihood {
  386. // Unspecified likelihood.
  387. LIKELIHOOD_UNSPECIFIED = 0;
  388. // Very unlikely.
  389. VERY_UNLIKELY = 1;
  390. // Unlikely.
  391. UNLIKELY = 2;
  392. // Possible.
  393. POSSIBLE = 3;
  394. // Likely.
  395. LIKELY = 4;
  396. // Very likely.
  397. VERY_LIKELY = 5;
  398. }