explanation_metadata.proto 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398
  1. // Copyright 2022 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.aiplatform.v1;
  16. import "google/api/field_behavior.proto";
  17. import "google/protobuf/struct.proto";
  18. option csharp_namespace = "Google.Cloud.AIPlatform.V1";
  19. option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1;aiplatform";
  20. option java_multiple_files = true;
  21. option java_outer_classname = "ExplanationMetadataProto";
  22. option java_package = "com.google.cloud.aiplatform.v1";
  23. option php_namespace = "Google\\Cloud\\AIPlatform\\V1";
  24. option ruby_package = "Google::Cloud::AIPlatform::V1";
  25. // Metadata describing the Model's input and output for explanation.
  26. message ExplanationMetadata {
  27. // Metadata of the input of a feature.
  28. //
  29. // Fields other than [InputMetadata.input_baselines][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.input_baselines] are applicable only
  30. // for Models that are using Vertex AI-provided images for Tensorflow.
  31. message InputMetadata {
  32. // Domain details of the input feature value. Provides numeric information
  33. // about the feature, such as its range (min, max). If the feature has been
  34. // pre-processed, for example with z-scoring, then it provides information
  35. // about how to recover the original feature. For example, if the input
  36. // feature is an image and it has been pre-processed to obtain 0-mean and
  37. // stddev = 1 values, then original_mean, and original_stddev refer to the
  38. // mean and stddev of the original feature (e.g. image tensor) from which
  39. // input feature (with mean = 0 and stddev = 1) was obtained.
  40. message FeatureValueDomain {
  41. // The minimum permissible value for this feature.
  42. float min_value = 1;
  43. // The maximum permissible value for this feature.
  44. float max_value = 2;
  45. // If this input feature has been normalized to a mean value of 0,
  46. // the original_mean specifies the mean value of the domain prior to
  47. // normalization.
  48. float original_mean = 3;
  49. // If this input feature has been normalized to a standard deviation of
  50. // 1.0, the original_stddev specifies the standard deviation of the domain
  51. // prior to normalization.
  52. float original_stddev = 4;
  53. }
  54. // Visualization configurations for image explanation.
  55. message Visualization {
  56. // Type of the image visualization. Only applicable to
  57. // [Integrated Gradients attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution].
  58. enum Type {
  59. // Should not be used.
  60. TYPE_UNSPECIFIED = 0;
  61. // Shows which pixel contributed to the image prediction.
  62. PIXELS = 1;
  63. // Shows which region contributed to the image prediction by outlining
  64. // the region.
  65. OUTLINES = 2;
  66. }
  67. // Whether to only highlight pixels with positive contributions, negative
  68. // or both. Defaults to POSITIVE.
  69. enum Polarity {
  70. // Default value. This is the same as POSITIVE.
  71. POLARITY_UNSPECIFIED = 0;
  72. // Highlights the pixels/outlines that were most influential to the
  73. // model's prediction.
  74. POSITIVE = 1;
  75. // Setting polarity to negative highlights areas that does not lead to
  76. // the models's current prediction.
  77. NEGATIVE = 2;
  78. // Shows both positive and negative attributions.
  79. BOTH = 3;
  80. }
  81. // The color scheme used for highlighting areas.
  82. enum ColorMap {
  83. // Should not be used.
  84. COLOR_MAP_UNSPECIFIED = 0;
  85. // Positive: green. Negative: pink.
  86. PINK_GREEN = 1;
  87. // Viridis color map: A perceptually uniform color mapping which is
  88. // easier to see by those with colorblindness and progresses from yellow
  89. // to green to blue. Positive: yellow. Negative: blue.
  90. VIRIDIS = 2;
  91. // Positive: red. Negative: red.
  92. RED = 3;
  93. // Positive: green. Negative: green.
  94. GREEN = 4;
  95. // Positive: green. Negative: red.
  96. RED_GREEN = 6;
  97. // PiYG palette.
  98. PINK_WHITE_GREEN = 5;
  99. }
  100. // How the original image is displayed in the visualization.
  101. enum OverlayType {
  102. // Default value. This is the same as NONE.
  103. OVERLAY_TYPE_UNSPECIFIED = 0;
  104. // No overlay.
  105. NONE = 1;
  106. // The attributions are shown on top of the original image.
  107. ORIGINAL = 2;
  108. // The attributions are shown on top of grayscaled version of the
  109. // original image.
  110. GRAYSCALE = 3;
  111. // The attributions are used as a mask to reveal predictive parts of
  112. // the image and hide the un-predictive parts.
  113. MASK_BLACK = 4;
  114. }
  115. // Type of the image visualization. Only applicable to
  116. // [Integrated Gradients attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution].
  117. // OUTLINES shows regions of attribution, while PIXELS shows per-pixel
  118. // attribution. Defaults to OUTLINES.
  119. Type type = 1;
  120. // Whether to only highlight pixels with positive contributions, negative
  121. // or both. Defaults to POSITIVE.
  122. Polarity polarity = 2;
  123. // The color scheme used for the highlighted areas.
  124. //
  125. // Defaults to PINK_GREEN for
  126. // [Integrated Gradients attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution],
  127. // which shows positive attributions in green and negative in pink.
  128. //
  129. // Defaults to VIRIDIS for
  130. // [XRAI attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution], which
  131. // highlights the most influential regions in yellow and the least
  132. // influential in blue.
  133. ColorMap color_map = 3;
  134. // Excludes attributions above the specified percentile from the
  135. // highlighted areas. Using the clip_percent_upperbound and
  136. // clip_percent_lowerbound together can be useful for filtering out noise
  137. // and making it easier to see areas of strong attribution. Defaults to
  138. // 99.9.
  139. float clip_percent_upperbound = 4;
  140. // Excludes attributions below the specified percentile, from the
  141. // highlighted areas. Defaults to 62.
  142. float clip_percent_lowerbound = 5;
  143. // How the original image is displayed in the visualization.
  144. // Adjusting the overlay can help increase visual clarity if the original
  145. // image makes it difficult to view the visualization. Defaults to NONE.
  146. OverlayType overlay_type = 6;
  147. }
  148. // Defines how a feature is encoded. Defaults to IDENTITY.
  149. enum Encoding {
  150. // Default value. This is the same as IDENTITY.
  151. ENCODING_UNSPECIFIED = 0;
  152. // The tensor represents one feature.
  153. IDENTITY = 1;
  154. // The tensor represents a bag of features where each index maps to
  155. // a feature. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided for
  156. // this encoding. For example:
  157. // ```
  158. // input = [27, 6.0, 150]
  159. // index_feature_mapping = ["age", "height", "weight"]
  160. // ```
  161. BAG_OF_FEATURES = 2;
  162. // The tensor represents a bag of features where each index maps to a
  163. // feature. Zero values in the tensor indicates feature being
  164. // non-existent. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided
  165. // for this encoding. For example:
  166. // ```
  167. // input = [2, 0, 5, 0, 1]
  168. // index_feature_mapping = ["a", "b", "c", "d", "e"]
  169. // ```
  170. BAG_OF_FEATURES_SPARSE = 3;
  171. // The tensor is a list of binaries representing whether a feature exists
  172. // or not (1 indicates existence). [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.index_feature_mapping]
  173. // must be provided for this encoding. For example:
  174. // ```
  175. // input = [1, 0, 1, 0, 1]
  176. // index_feature_mapping = ["a", "b", "c", "d", "e"]
  177. // ```
  178. INDICATOR = 4;
  179. // The tensor is encoded into a 1-dimensional array represented by an
  180. // encoded tensor. [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided
  181. // for this encoding. For example:
  182. // ```
  183. // input = ["This", "is", "a", "test", "."]
  184. // encoded = [0.1, 0.2, 0.3, 0.4, 0.5]
  185. // ```
  186. COMBINED_EMBEDDING = 5;
  187. // Select this encoding when the input tensor is encoded into a
  188. // 2-dimensional array represented by an encoded tensor.
  189. // [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided for this
  190. // encoding. The first dimension of the encoded tensor's shape is the same
  191. // as the input tensor's shape. For example:
  192. // ```
  193. // input = ["This", "is", "a", "test", "."]
  194. // encoded = [[0.1, 0.2, 0.3, 0.4, 0.5],
  195. // [0.2, 0.1, 0.4, 0.3, 0.5],
  196. // [0.5, 0.1, 0.3, 0.5, 0.4],
  197. // [0.5, 0.3, 0.1, 0.2, 0.4],
  198. // [0.4, 0.3, 0.2, 0.5, 0.1]]
  199. // ```
  200. CONCAT_EMBEDDING = 6;
  201. }
  202. // Baseline inputs for this feature.
  203. //
  204. // If no baseline is specified, Vertex AI chooses the baseline for this
  205. // feature. If multiple baselines are specified, Vertex AI returns the
  206. // average attributions across them in [Attribution.feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions].
  207. //
  208. // For Vertex AI-provided Tensorflow images (both 1.x and 2.x), the shape
  209. // of each baseline must match the shape of the input tensor. If a scalar is
  210. // provided, we broadcast to the same shape as the input tensor.
  211. //
  212. // For custom images, the element of the baselines must be in the same
  213. // format as the feature's input in the
  214. // [instance][google.cloud.aiplatform.v1.ExplainRequest.instances][]. The schema of any single instance
  215. // may be specified via Endpoint's DeployedModels'
  216. // [Model's][google.cloud.aiplatform.v1.DeployedModel.model]
  217. // [PredictSchemata's][google.cloud.aiplatform.v1.Model.predict_schemata]
  218. // [instance_schema_uri][google.cloud.aiplatform.v1.PredictSchemata.instance_schema_uri].
  219. repeated google.protobuf.Value input_baselines = 1;
  220. // Name of the input tensor for this feature. Required and is only
  221. // applicable to Vertex AI-provided images for Tensorflow.
  222. string input_tensor_name = 2;
  223. // Defines how the feature is encoded into the input tensor. Defaults to
  224. // IDENTITY.
  225. Encoding encoding = 3;
  226. // Modality of the feature. Valid values are: numeric, image. Defaults to
  227. // numeric.
  228. string modality = 4;
  229. // The domain details of the input feature value. Like min/max, original
  230. // mean or standard deviation if normalized.
  231. FeatureValueDomain feature_value_domain = 5;
  232. // Specifies the index of the values of the input tensor.
  233. // Required when the input tensor is a sparse representation. Refer to
  234. // Tensorflow documentation for more details:
  235. // https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
  236. string indices_tensor_name = 6;
  237. // Specifies the shape of the values of the input if the input is a sparse
  238. // representation. Refer to Tensorflow documentation for more details:
  239. // https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor.
  240. string dense_shape_tensor_name = 7;
  241. // A list of feature names for each index in the input tensor.
  242. // Required when the input [InputMetadata.encoding][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata.encoding] is BAG_OF_FEATURES,
  243. // BAG_OF_FEATURES_SPARSE, INDICATOR.
  244. repeated string index_feature_mapping = 8;
  245. // Encoded tensor is a transformation of the input tensor. Must be provided
  246. // if choosing
  247. // [Integrated Gradients attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution]
  248. // or [XRAI attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution] and the
  249. // input tensor is not differentiable.
  250. //
  251. // An encoded tensor is generated if the input tensor is encoded by a lookup
  252. // table.
  253. string encoded_tensor_name = 9;
  254. // A list of baselines for the encoded tensor.
  255. //
  256. // The shape of each baseline should match the shape of the encoded tensor.
  257. // If a scalar is provided, Vertex AI broadcasts to the same shape as the
  258. // encoded tensor.
  259. repeated google.protobuf.Value encoded_baselines = 10;
  260. // Visualization configurations for image explanation.
  261. Visualization visualization = 11;
  262. // Name of the group that the input belongs to. Features with the same group
  263. // name will be treated as one feature when computing attributions. Features
  264. // grouped together can have different shapes in value. If provided, there
  265. // will be one single attribution generated in
  266. // [Attribution.feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions], keyed by the group name.
  267. string group_name = 12;
  268. }
  269. // Metadata of the prediction output to be explained.
  270. message OutputMetadata {
  271. // Defines how to map [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] to
  272. // [Attribution.output_display_name][google.cloud.aiplatform.v1.Attribution.output_display_name].
  273. //
  274. // If neither of the fields are specified,
  275. // [Attribution.output_display_name][google.cloud.aiplatform.v1.Attribution.output_display_name] will not be populated.
  276. oneof display_name_mapping {
  277. // Static mapping between the index and display name.
  278. //
  279. // Use this if the outputs are a deterministic n-dimensional array, e.g. a
  280. // list of scores of all the classes in a pre-defined order for a
  281. // multi-classification Model. It's not feasible if the outputs are
  282. // non-deterministic, e.g. the Model produces top-k classes or sort the
  283. // outputs by their values.
  284. //
  285. // The shape of the value must be an n-dimensional array of strings. The
  286. // number of dimensions must match that of the outputs to be explained.
  287. // The [Attribution.output_display_name][google.cloud.aiplatform.v1.Attribution.output_display_name] is populated by locating in the
  288. // mapping with [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index].
  289. google.protobuf.Value index_display_name_mapping = 1;
  290. // Specify a field name in the prediction to look for the display name.
  291. //
  292. // Use this if the prediction contains the display names for the outputs.
  293. //
  294. // The display names in the prediction must have the same shape of the
  295. // outputs, so that it can be located by [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] for
  296. // a specific output.
  297. string display_name_mapping_key = 2;
  298. }
  299. // Name of the output tensor. Required and is only applicable to Vertex
  300. // AI provided images for Tensorflow.
  301. string output_tensor_name = 3;
  302. }
  303. // Required. Map from feature names to feature input metadata. Keys are the name of the
  304. // features. Values are the specification of the feature.
  305. //
  306. // An empty InputMetadata is valid. It describes a text feature which has the
  307. // name specified as the key in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs]. The baseline
  308. // of the empty feature is chosen by Vertex AI.
  309. //
  310. // For Vertex AI-provided Tensorflow images, the key can be any friendly
  311. // name of the feature. Once specified,
  312. // [featureAttributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] are keyed by
  313. // this key (if not grouped with another feature).
  314. //
  315. // For custom images, the key must match with the key in
  316. // [instance][google.cloud.aiplatform.v1.ExplainRequest.instances].
  317. map<string, InputMetadata> inputs = 1 [(google.api.field_behavior) = REQUIRED];
  318. // Required. Map from output names to output metadata.
  319. //
  320. // For Vertex AI-provided Tensorflow images, keys can be any user defined
  321. // string that consists of any UTF-8 characters.
  322. //
  323. // For custom images, keys are the name of the output field in the prediction
  324. // to be explained.
  325. //
  326. // Currently only one key is allowed.
  327. map<string, OutputMetadata> outputs = 2 [(google.api.field_behavior) = REQUIRED];
  328. // Points to a YAML file stored on Google Cloud Storage describing the format
  329. // of the [feature attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions].
  330. // The schema is defined as an OpenAPI 3.0.2 [Schema
  331. // Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject).
  332. // AutoML tabular Models always have this field populated by Vertex AI.
  333. // Note: The URI given on output may be different, including the URI scheme,
  334. // than the one given on input. The output URI will point to a location where
  335. // the user only has a read access.
  336. string feature_attributions_schema_uri = 3;
  337. // Name of the source to generate embeddings for example based explanations.
  338. string latent_space_source = 5;
  339. }