explanation.proto 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. // Copyright 2022 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.aiplatform.v1;
  16. import "google/api/field_behavior.proto";
  17. import "google/cloud/aiplatform/v1/explanation_metadata.proto";
  18. import "google/protobuf/struct.proto";
  19. option csharp_namespace = "Google.Cloud.AIPlatform.V1";
  20. option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1;aiplatform";
  21. option java_multiple_files = true;
  22. option java_outer_classname = "ExplanationProto";
  23. option java_package = "com.google.cloud.aiplatform.v1";
  24. option php_namespace = "Google\\Cloud\\AIPlatform\\V1";
  25. option ruby_package = "Google::Cloud::AIPlatform::V1";
  26. // Explanation of a prediction (provided in [PredictResponse.predictions][google.cloud.aiplatform.v1.PredictResponse.predictions])
  27. // produced by the Model on a given [instance][google.cloud.aiplatform.v1.ExplainRequest.instances].
  28. message Explanation {
  29. // Output only. Feature attributions grouped by predicted outputs.
  30. //
  31. // For Models that predict only one output, such as regression Models that
  32. // predict only one score, there is only one attibution that explains the
  33. // predicted output. For Models that predict multiple outputs, such as
  34. // multiclass Models that predict multiple classes, each element explains one
  35. // specific item. [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] can be used to identify which
  36. // output this attribution is explaining.
  37. //
  38. // If users set [ExplanationParameters.top_k][google.cloud.aiplatform.v1.ExplanationParameters.top_k], the attributions are sorted
  39. // by [instance_output_value][Attributions.instance_output_value] in
  40. // descending order. If [ExplanationParameters.output_indices][google.cloud.aiplatform.v1.ExplanationParameters.output_indices] is specified,
  41. // the attributions are stored by [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] in the same
  42. // order as they appear in the output_indices.
  43. repeated Attribution attributions = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  44. // Output only. List of the nearest neighbors for example-based explanations.
  45. //
  46. // For models deployed with the examples explanations feature enabled, the
  47. // attributions field is empty and instead the neighbors field is populated.
  48. repeated Neighbor neighbors = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
  49. }
  50. // Aggregated explanation metrics for a Model over a set of instances.
  51. message ModelExplanation {
  52. // Output only. Aggregated attributions explaining the Model's prediction outputs over the
  53. // set of instances. The attributions are grouped by outputs.
  54. //
  55. // For Models that predict only one output, such as regression Models that
  56. // predict only one score, there is only one attibution that explains the
  57. // predicted output. For Models that predict multiple outputs, such as
  58. // multiclass Models that predict multiple classes, each element explains one
  59. // specific item. [Attribution.output_index][google.cloud.aiplatform.v1.Attribution.output_index] can be used to identify which
  60. // output this attribution is explaining.
  61. //
  62. // The [baselineOutputValue][google.cloud.aiplatform.v1.Attribution.baseline_output_value],
  63. // [instanceOutputValue][google.cloud.aiplatform.v1.Attribution.instance_output_value] and
  64. // [featureAttributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] fields are
  65. // averaged over the test data.
  66. //
  67. // NOTE: Currently AutoML tabular classification Models produce only one
  68. // attribution, which averages attributions over all the classes it predicts.
  69. // [Attribution.approximation_error][google.cloud.aiplatform.v1.Attribution.approximation_error] is not populated.
  70. repeated Attribution mean_attributions = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  71. }
  72. // Attribution that explains a particular prediction output.
  73. message Attribution {
  74. // Output only. Model predicted output if the input instance is constructed from the
  75. // baselines of all the features defined in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
  76. // The field name of the output is determined by the key in
  77. // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
  78. //
  79. // If the Model's predicted output has multiple dimensions (rank > 1), this is
  80. // the value in the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
  81. //
  82. // If there are multiple baselines, their output values are averaged.
  83. double baseline_output_value = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  84. // Output only. Model predicted output on the corresponding [explanation
  85. // instance][ExplainRequest.instances]. The field name of the output is
  86. // determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
  87. //
  88. // If the Model predicted output has multiple dimensions, this is the value in
  89. // the output located by [output_index][google.cloud.aiplatform.v1.Attribution.output_index].
  90. double instance_output_value = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
  91. // Output only. Attributions of each explained feature. Features are extracted from
  92. // the [prediction instances][google.cloud.aiplatform.v1.ExplainRequest.instances] according to
  93. // [explanation metadata for inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
  94. //
  95. // The value is a struct, whose keys are the name of the feature. The values
  96. // are how much the feature in the [instance][google.cloud.aiplatform.v1.ExplainRequest.instances]
  97. // contributed to the predicted result.
  98. //
  99. // The format of the value is determined by the feature's input format:
  100. //
  101. // * If the feature is a scalar value, the attribution value is a
  102. // [floating number][google.protobuf.Value.number_value].
  103. //
  104. // * If the feature is an array of scalar values, the attribution value is
  105. // an [array][google.protobuf.Value.list_value].
  106. //
  107. // * If the feature is a struct, the attribution value is a
  108. // [struct][google.protobuf.Value.struct_value]. The keys in the
  109. // attribution value struct are the same as the keys in the feature
  110. // struct. The formats of the values in the attribution struct are
  111. // determined by the formats of the values in the feature struct.
  112. //
  113. // The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1.ExplanationMetadata.feature_attributions_schema_uri] field,
  114. // pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] field of the
  115. // [Endpoint.deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] object, points to the schema file that
  116. // describes the features and their attribution values (if it is populated).
  117. google.protobuf.Value feature_attributions = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
  118. // Output only. The index that locates the explained prediction output.
  119. //
  120. // If the prediction output is a scalar value, output_index is not populated.
  121. // If the prediction output has multiple dimensions, the length of the
  122. // output_index list is the same as the number of dimensions of the output.
  123. // The i-th element in output_index is the element index of the i-th dimension
  124. // of the output vector. Indices start from 0.
  125. repeated int32 output_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
  126. // Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1.Attribution.output_index]. For example,
  127. // the predicted class name by a multi-classification Model.
  128. //
  129. // This field is only populated iff the Model predicts display names as a
  130. // separate field along with the explained output. The predicted display name
  131. // must has the same shape of the explained output, and can be located using
  132. // output_index.
  133. string output_display_name = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
  134. // Output only. Error of [feature_attributions][google.cloud.aiplatform.v1.Attribution.feature_attributions] caused by approximation used in the
  135. // explanation method. Lower value means more precise attributions.
  136. //
  137. // * For Sampled Shapley
  138. // [attribution][google.cloud.aiplatform.v1.ExplanationParameters.sampled_shapley_attribution],
  139. // increasing [path_count][google.cloud.aiplatform.v1.SampledShapleyAttribution.path_count] might reduce
  140. // the error.
  141. // * For Integrated Gradients
  142. // [attribution][google.cloud.aiplatform.v1.ExplanationParameters.integrated_gradients_attribution],
  143. // increasing [step_count][google.cloud.aiplatform.v1.IntegratedGradientsAttribution.step_count] might
  144. // reduce the error.
  145. // * For [XRAI attribution][google.cloud.aiplatform.v1.ExplanationParameters.xrai_attribution],
  146. // increasing
  147. // [step_count][google.cloud.aiplatform.v1.XraiAttribution.step_count] might reduce the error.
  148. //
  149. // See [this introduction](/vertex-ai/docs/explainable-ai/overview)
  150. // for more information.
  151. double approximation_error = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
  152. // Output only. Name of the explain output. Specified as the key in
  153. // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1.ExplanationMetadata.outputs].
  154. string output_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
  155. }
  156. // Neighbors for example-based explanations.
  157. message Neighbor {
  158. // Output only. The neighbor id.
  159. string neighbor_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  160. // Output only. The neighbor distance.
  161. double neighbor_distance = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
  162. }
  163. // Specification of Model explanation.
  164. message ExplanationSpec {
  165. // Required. Parameters that configure explaining of the Model's predictions.
  166. ExplanationParameters parameters = 1 [(google.api.field_behavior) = REQUIRED];
  167. // Optional. Metadata describing the Model's input and output for explanation.
  168. ExplanationMetadata metadata = 2 [(google.api.field_behavior) = OPTIONAL];
  169. }
  170. // Parameters to configure explaining for Model's predictions.
  171. message ExplanationParameters {
  172. oneof method {
  173. // An attribution method that approximates Shapley values for features that
  174. // contribute to the label being predicted. A sampling strategy is used to
  175. // approximate the value rather than considering all subsets of features.
  176. // Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
  177. SampledShapleyAttribution sampled_shapley_attribution = 1;
  178. // An attribution method that computes Aumann-Shapley values taking
  179. // advantage of the model's fully differentiable structure. Refer to this
  180. // paper for more details: https://arxiv.org/abs/1703.01365
  181. IntegratedGradientsAttribution integrated_gradients_attribution = 2;
  182. // An attribution method that redistributes Integrated Gradients
  183. // attribution to segmented regions, taking advantage of the model's fully
  184. // differentiable structure. Refer to this paper for
  185. // more details: https://arxiv.org/abs/1906.02825
  186. //
  187. // XRAI currently performs better on natural images, like a picture of a
  188. // house or an animal. If the images are taken in artificial environments,
  189. // like a lab or manufacturing line, or from diagnostic equipment, like
  190. // x-rays or quality-control cameras, use Integrated Gradients instead.
  191. XraiAttribution xrai_attribution = 3;
  192. }
  193. // If populated, returns attributions for top K indices of outputs
  194. // (defaults to 1). Only applies to Models that predicts more than one outputs
  195. // (e,g, multi-class Models). When set to -1, returns explanations for all
  196. // outputs.
  197. int32 top_k = 4;
  198. // If populated, only returns attributions that have
  199. // [output_index][google.cloud.aiplatform.v1.Attribution.output_index] contained in output_indices. It
  200. // must be an ndarray of integers, with the same shape of the output it's
  201. // explaining.
  202. //
  203. // If not populated, returns attributions for [top_k][google.cloud.aiplatform.v1.ExplanationParameters.top_k] indices of outputs.
  204. // If neither top_k nor output_indices is populated, returns the argmax
  205. // index of the outputs.
  206. //
  207. // Only applicable to Models that predict multiple outputs (e,g, multi-class
  208. // Models that predict multiple classes).
  209. google.protobuf.ListValue output_indices = 5;
  210. }
  211. // An attribution method that approximates Shapley values for features that
  212. // contribute to the label being predicted. A sampling strategy is used to
  213. // approximate the value rather than considering all subsets of features.
  214. message SampledShapleyAttribution {
  215. // Required. The number of feature permutations to consider when approximating the
  216. // Shapley values.
  217. //
  218. // Valid range of its value is [1, 50], inclusively.
  219. int32 path_count = 1 [(google.api.field_behavior) = REQUIRED];
  220. }
  221. // An attribution method that computes the Aumann-Shapley value taking advantage
  222. // of the model's fully differentiable structure. Refer to this paper for
  223. // more details: https://arxiv.org/abs/1703.01365
  224. message IntegratedGradientsAttribution {
  225. // Required. The number of steps for approximating the path integral.
  226. // A good value to start is 50 and gradually increase until the
  227. // sum to diff property is within the desired error range.
  228. //
  229. // Valid range of its value is [1, 100], inclusively.
  230. int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
  231. // Config for SmoothGrad approximation of gradients.
  232. //
  233. // When enabled, the gradients are approximated by averaging the gradients
  234. // from noisy samples in the vicinity of the inputs. Adding
  235. // noise can help improve the computed gradients. Refer to this paper for more
  236. // details: https://arxiv.org/pdf/1706.03825.pdf
  237. SmoothGradConfig smooth_grad_config = 2;
  238. // Config for IG with blur baseline.
  239. //
  240. // When enabled, a linear path from the maximally blurred image to the input
  241. // image is created. Using a blurred baseline instead of zero (black image) is
  242. // motivated by the BlurIG approach explained here:
  243. // https://arxiv.org/abs/2004.03383
  244. BlurBaselineConfig blur_baseline_config = 3;
  245. }
  246. // An explanation method that redistributes Integrated Gradients
  247. // attributions to segmented regions, taking advantage of the model's fully
  248. // differentiable structure. Refer to this paper for more details:
  249. // https://arxiv.org/abs/1906.02825
  250. //
  251. // Supported only by image Models.
  252. message XraiAttribution {
  253. // Required. The number of steps for approximating the path integral.
  254. // A good value to start is 50 and gradually increase until the
  255. // sum to diff property is met within the desired error range.
  256. //
  257. // Valid range of its value is [1, 100], inclusively.
  258. int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
  259. // Config for SmoothGrad approximation of gradients.
  260. //
  261. // When enabled, the gradients are approximated by averaging the gradients
  262. // from noisy samples in the vicinity of the inputs. Adding
  263. // noise can help improve the computed gradients. Refer to this paper for more
  264. // details: https://arxiv.org/pdf/1706.03825.pdf
  265. SmoothGradConfig smooth_grad_config = 2;
  266. // Config for XRAI with blur baseline.
  267. //
  268. // When enabled, a linear path from the maximally blurred image to the input
  269. // image is created. Using a blurred baseline instead of zero (black image) is
  270. // motivated by the BlurIG approach explained here:
  271. // https://arxiv.org/abs/2004.03383
  272. BlurBaselineConfig blur_baseline_config = 3;
  273. }
  274. // Config for SmoothGrad approximation of gradients.
  275. //
  276. // When enabled, the gradients are approximated by averaging the gradients from
  277. // noisy samples in the vicinity of the inputs. Adding noise can help improve
  278. // the computed gradients. Refer to this paper for more details:
  279. // https://arxiv.org/pdf/1706.03825.pdf
  280. message SmoothGradConfig {
  281. // Represents the standard deviation of the gaussian kernel
  282. // that will be used to add noise to the interpolated inputs
  283. // prior to computing gradients.
  284. oneof GradientNoiseSigma {
  285. // This is a single float value and will be used to add noise to all the
  286. // features. Use this field when all features are normalized to have the
  287. // same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where
  288. // features are normalized to have 0-mean and 1-variance. Learn more about
  289. // [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization).
  290. //
  291. // For best results the recommended value is about 10% - 20% of the standard
  292. // deviation of the input feature. Refer to section 3.2 of the SmoothGrad
  293. // paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1.
  294. //
  295. // If the distribution is different per feature, set
  296. // [feature_noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.feature_noise_sigma] instead
  297. // for each feature.
  298. float noise_sigma = 1;
  299. // This is similar to [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma], but
  300. // provides additional flexibility. A separate noise sigma can be provided
  301. // for each feature, which is useful if their distributions are different.
  302. // No noise is added to features that are not set. If this field is unset,
  303. // [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma] will be used for all
  304. // features.
  305. FeatureNoiseSigma feature_noise_sigma = 2;
  306. }
  307. // The number of gradient samples to use for
  308. // approximation. The higher this number, the more accurate the gradient
  309. // is, but the runtime complexity increases by this factor as well.
  310. // Valid range of its value is [1, 50]. Defaults to 3.
  311. int32 noisy_sample_count = 3;
  312. }
  313. // Noise sigma by features. Noise sigma represents the standard deviation of the
  314. // gaussian kernel that will be used to add noise to interpolated inputs prior
  315. // to computing gradients.
  316. message FeatureNoiseSigma {
  317. // Noise sigma for a single feature.
  318. message NoiseSigmaForFeature {
  319. // The name of the input feature for which noise sigma is provided. The
  320. // features are defined in
  321. // [explanation metadata inputs][google.cloud.aiplatform.v1.ExplanationMetadata.inputs].
  322. string name = 1;
  323. // This represents the standard deviation of the Gaussian kernel that will
  324. // be used to add noise to the feature prior to computing gradients. Similar
  325. // to [noise_sigma][google.cloud.aiplatform.v1.SmoothGradConfig.noise_sigma] but represents the
  326. // noise added to the current feature. Defaults to 0.1.
  327. float sigma = 2;
  328. }
  329. // Noise sigma per feature. No noise is added to features that are not set.
  330. repeated NoiseSigmaForFeature noise_sigma = 1;
  331. }
  332. // Config for blur baseline.
  333. //
  334. // When enabled, a linear path from the maximally blurred image to the input
  335. // image is created. Using a blurred baseline instead of zero (black image) is
  336. // motivated by the BlurIG approach explained here:
  337. // https://arxiv.org/abs/2004.03383
  338. message BlurBaselineConfig {
  339. // The standard deviation of the blur kernel for the blurred baseline. The
  340. // same blurring parameter is used for both the height and the width
  341. // dimension. If not set, the method defaults to the zero (i.e. black for
  342. // images) baseline.
  343. float max_blur_sigma = 1;
  344. }
  345. // The [ExplanationSpec][google.cloud.aiplatform.v1.ExplanationSpec] entries that can be overridden at
  346. // [online explanation][google.cloud.aiplatform.v1.PredictionService.Explain] time.
  347. message ExplanationSpecOverride {
  348. // The parameters to be overridden. Note that the
  349. // [method][google.cloud.aiplatform.v1.ExplanationParameters.method] cannot be changed. If not specified,
  350. // no parameter is overridden.
  351. ExplanationParameters parameters = 1;
  352. // The metadata to be overridden. If not specified, no metadata is overridden.
  353. ExplanationMetadataOverride metadata = 2;
  354. // The example-based explanations parameter overrides.
  355. ExamplesOverride examples_override = 3;
  356. }
  357. // The [ExplanationMetadata][google.cloud.aiplatform.v1.ExplanationMetadata] entries that can be overridden at
  358. // [online explanation][google.cloud.aiplatform.v1.PredictionService.Explain] time.
  359. message ExplanationMetadataOverride {
  360. // The [input metadata][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata] entries to be
  361. // overridden.
  362. message InputMetadataOverride {
  363. // Baseline inputs for this feature.
  364. //
  365. // This overrides the `input_baseline` field of the
  366. // [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1.ExplanationMetadata.InputMetadata]
  367. // object of the corresponding feature's input metadata. If it's not
  368. // specified, the original baselines are not overridden.
  369. repeated google.protobuf.Value input_baselines = 1;
  370. }
  371. // Required. Overrides the [input metadata][google.cloud.aiplatform.v1.ExplanationMetadata.inputs] of the features.
  372. // The key is the name of the feature to be overridden. The keys specified
  373. // here must exist in the input metadata to be overridden. If a feature is
  374. // not specified here, the corresponding feature's input metadata is not
  375. // overridden.
  376. map<string, InputMetadataOverride> inputs = 1 [(google.api.field_behavior) = REQUIRED];
  377. }
  378. // Overrides for example-based explanations.
  379. message ExamplesOverride {
  380. // Data format enum.
  381. enum DataFormat {
  382. // Unspecified format. Must not be used.
  383. DATA_FORMAT_UNSPECIFIED = 0;
  384. // Provided data is a set of model inputs.
  385. INSTANCES = 1;
  386. // Provided data is a set of embeddings.
  387. EMBEDDINGS = 2;
  388. }
  389. // The number of neighbors to return.
  390. int32 neighbor_count = 1;
  391. // The number of neighbors to return that have the same crowding tag.
  392. int32 crowding_count = 2;
  393. // Restrict the resulting nearest neighbors to respect these constraints.
  394. repeated ExamplesRestrictionsNamespace restrictions = 3;
  395. // If true, return the embeddings instead of neighbors.
  396. bool return_embeddings = 4;
  397. // The format of the data being provided with each call.
  398. DataFormat data_format = 5;
  399. }
  400. // Restrictions namespace for example-based explanations overrides.
  401. message ExamplesRestrictionsNamespace {
  402. // The namespace name.
  403. string namespace_name = 1;
  404. // The list of allowed tags.
  405. repeated string allow = 2;
  406. // The list of deny tags.
  407. repeated string deny = 3;
  408. }