explanation.proto 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. // Copyright 2022 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.aiplatform.v1beta1;
  16. import "google/api/field_behavior.proto";
  17. import "google/cloud/aiplatform/v1beta1/explanation_metadata.proto";
  18. import "google/cloud/aiplatform/v1beta1/io.proto";
  19. import "google/protobuf/struct.proto";
  20. option csharp_namespace = "Google.Cloud.AIPlatform.V1Beta1";
  21. option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform";
  22. option java_multiple_files = true;
  23. option java_outer_classname = "ExplanationProto";
  24. option java_package = "com.google.cloud.aiplatform.v1beta1";
  25. option php_namespace = "Google\\Cloud\\AIPlatform\\V1beta1";
  26. option ruby_package = "Google::Cloud::AIPlatform::V1beta1";
  27. // Explanation of a prediction (provided in [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions])
  28. // produced by the Model on a given [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances].
  29. message Explanation {
  30. // Output only. Feature attributions grouped by predicted outputs.
  31. //
  32. // For Models that predict only one output, such as regression Models that
  33. // predict only one score, there is only one attibution that explains the
  34. // predicted output. For Models that predict multiple outputs, such as
  35. // multiclass Models that predict multiple classes, each element explains one
  36. // specific item. [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] can be used to identify which
  37. // output this attribution is explaining.
  38. //
  39. // If users set [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k], the attributions are sorted
  40. // by [instance_output_value][Attributions.instance_output_value] in
  41. // descending order. If [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices] is specified,
  42. // the attributions are stored by [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] in the same
  43. // order as they appear in the output_indices.
  44. repeated Attribution attributions = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  45. // Output only. List of the nearest neighbors for example-based explanations.
  46. //
  47. // For models deployed with the examples explanations feature enabled, the
  48. // attributions field is empty and instead the neighbors field is populated.
  49. repeated Neighbor neighbors = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
  50. }
  51. // Aggregated explanation metrics for a Model over a set of instances.
  52. message ModelExplanation {
  53. // Output only. Aggregated attributions explaining the Model's prediction outputs over the
  54. // set of instances. The attributions are grouped by outputs.
  55. //
  56. // For Models that predict only one output, such as regression Models that
  57. // predict only one score, there is only one attibution that explains the
  58. // predicted output. For Models that predict multiple outputs, such as
  59. // multiclass Models that predict multiple classes, each element explains one
  60. // specific item. [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] can be used to identify which
  61. // output this attribution is explaining.
  62. //
  63. // The [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value],
  64. // [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value] and
  65. // [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] fields are
  66. // averaged over the test data.
  67. //
  68. // NOTE: Currently AutoML tabular classification Models produce only one
  69. // attribution, which averages attributions over all the classes it predicts.
  70. // [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error] is not populated.
  71. repeated Attribution mean_attributions = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  72. }
  73. // Attribution that explains a particular prediction output.
  74. message Attribution {
  75. // Output only. Model predicted output if the input instance is constructed from the
  76. // baselines of all the features defined in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
  77. // The field name of the output is determined by the key in
  78. // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
  79. //
  80. // If the Model's predicted output has multiple dimensions (rank > 1), this is
  81. // the value in the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
  82. //
  83. // If there are multiple baselines, their output values are averaged.
  84. double baseline_output_value = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  85. // Output only. Model predicted output on the corresponding [explanation
  86. // instance][ExplainRequest.instances]. The field name of the output is
  87. // determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
  88. //
  89. // If the Model predicted output has multiple dimensions, this is the value in
  90. // the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index].
  91. double instance_output_value = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
  92. // Output only. Attributions of each explained feature. Features are extracted from
  93. // the [prediction instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] according to
  94. // [explanation metadata for inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
  95. //
  96. // The value is a struct, whose keys are the name of the feature. The values
  97. // are how much the feature in the [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]
  98. // contributed to the predicted result.
  99. //
  100. // The format of the value is determined by the feature's input format:
  101. //
  102. // * If the feature is a scalar value, the attribution value is a
  103. // [floating number][google.protobuf.Value.number_value].
  104. //
  105. // * If the feature is an array of scalar values, the attribution value is
  106. // an [array][google.protobuf.Value.list_value].
  107. //
  108. // * If the feature is a struct, the attribution value is a
  109. // [struct][google.protobuf.Value.struct_value]. The keys in the
  110. // attribution value struct are the same as the keys in the feature
  111. // struct. The formats of the values in the attribution struct are
  112. // determined by the formats of the values in the feature struct.
  113. //
  114. // The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] field,
  115. // pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] field of the
  116. // [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] object, points to the schema file that
  117. // describes the features and their attribution values (if it is populated).
  118. google.protobuf.Value feature_attributions = 3 [(google.api.field_behavior) = OUTPUT_ONLY];
  119. // Output only. The index that locates the explained prediction output.
  120. //
  121. // If the prediction output is a scalar value, output_index is not populated.
  122. // If the prediction output has multiple dimensions, the length of the
  123. // output_index list is the same as the number of dimensions of the output.
  124. // The i-th element in output_index is the element index of the i-th dimension
  125. // of the output vector. Indices start from 0.
  126. repeated int32 output_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY];
  127. // Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. For example,
  128. // the predicted class name by a multi-classification Model.
  129. //
  130. // This field is only populated iff the Model predicts display names as a
  131. // separate field along with the explained output. The predicted display name
  132. // must has the same shape of the explained output, and can be located using
  133. // output_index.
  134. string output_display_name = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
  135. // Output only. Error of [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] caused by approximation used in the
  136. // explanation method. Lower value means more precise attributions.
  137. //
  138. // * For Sampled Shapley
  139. // [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.sampled_shapley_attribution],
  140. // increasing [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] might reduce
  141. // the error.
  142. // * For Integrated Gradients
  143. // [attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.integrated_gradients_attribution],
  144. // increasing [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] might
  145. // reduce the error.
  146. // * For [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution],
  147. // increasing
  148. // [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] might reduce the error.
  149. //
  150. // See [this introduction](/vertex-ai/docs/explainable-ai/overview)
  151. // for more information.
  152. double approximation_error = 6 [(google.api.field_behavior) = OUTPUT_ONLY];
  153. // Output only. Name of the explain output. Specified as the key in
  154. // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs].
  155. string output_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY];
  156. }
  157. // Neighbors for example-based explanations.
  158. message Neighbor {
  159. // Output only. The neighbor id.
  160. string neighbor_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY];
  161. // Output only. The neighbor distance.
  162. double neighbor_distance = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
  163. }
  164. // Specification of Model explanation.
  165. message ExplanationSpec {
  166. // Required. Parameters that configure explaining of the Model's predictions.
  167. ExplanationParameters parameters = 1 [(google.api.field_behavior) = REQUIRED];
  168. // Optional. Metadata describing the Model's input and output for explanation.
  169. ExplanationMetadata metadata = 2 [(google.api.field_behavior) = OPTIONAL];
  170. }
  171. // Parameters to configure explaining for Model's predictions.
  172. message ExplanationParameters {
  173. oneof method {
  174. // An attribution method that approximates Shapley values for features that
  175. // contribute to the label being predicted. A sampling strategy is used to
  176. // approximate the value rather than considering all subsets of features.
  177. // Refer to this paper for model details: https://arxiv.org/abs/1306.4265.
  178. SampledShapleyAttribution sampled_shapley_attribution = 1;
  179. // An attribution method that computes Aumann-Shapley values taking
  180. // advantage of the model's fully differentiable structure. Refer to this
  181. // paper for more details: https://arxiv.org/abs/1703.01365
  182. IntegratedGradientsAttribution integrated_gradients_attribution = 2;
  183. // An attribution method that redistributes Integrated Gradients
  184. // attribution to segmented regions, taking advantage of the model's fully
  185. // differentiable structure. Refer to this paper for
  186. // more details: https://arxiv.org/abs/1906.02825
  187. //
  188. // XRAI currently performs better on natural images, like a picture of a
  189. // house or an animal. If the images are taken in artificial environments,
  190. // like a lab or manufacturing line, or from diagnostic equipment, like
  191. // x-rays or quality-control cameras, use Integrated Gradients instead.
  192. XraiAttribution xrai_attribution = 3;
  193. // Example-based explanations that returns the nearest neighbors from the
  194. // provided dataset.
  195. Examples examples = 7;
  196. }
  197. // If populated, returns attributions for top K indices of outputs
  198. // (defaults to 1). Only applies to Models that predicts more than one outputs
  199. // (e,g, multi-class Models). When set to -1, returns explanations for all
  200. // outputs.
  201. int32 top_k = 4;
  202. // If populated, only returns attributions that have
  203. // [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] contained in output_indices. It
  204. // must be an ndarray of integers, with the same shape of the output it's
  205. // explaining.
  206. //
  207. // If not populated, returns attributions for [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k] indices of outputs.
  208. // If neither top_k nor output_indices is populated, returns the argmax
  209. // index of the outputs.
  210. //
  211. // Only applicable to Models that predict multiple outputs (e,g, multi-class
  212. // Models that predict multiple classes).
  213. google.protobuf.ListValue output_indices = 5;
  214. }
  215. // An attribution method that approximates Shapley values for features that
  216. // contribute to the label being predicted. A sampling strategy is used to
  217. // approximate the value rather than considering all subsets of features.
  218. message SampledShapleyAttribution {
  219. // Required. The number of feature permutations to consider when approximating the
  220. // Shapley values.
  221. //
  222. // Valid range of its value is [1, 50], inclusively.
  223. int32 path_count = 1 [(google.api.field_behavior) = REQUIRED];
  224. }
  225. // An attribution method that computes the Aumann-Shapley value taking advantage
  226. // of the model's fully differentiable structure. Refer to this paper for
  227. // more details: https://arxiv.org/abs/1703.01365
  228. message IntegratedGradientsAttribution {
  229. // Required. The number of steps for approximating the path integral.
  230. // A good value to start is 50 and gradually increase until the
  231. // sum to diff property is within the desired error range.
  232. //
  233. // Valid range of its value is [1, 100], inclusively.
  234. int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
  235. // Config for SmoothGrad approximation of gradients.
  236. //
  237. // When enabled, the gradients are approximated by averaging the gradients
  238. // from noisy samples in the vicinity of the inputs. Adding
  239. // noise can help improve the computed gradients. Refer to this paper for more
  240. // details: https://arxiv.org/pdf/1706.03825.pdf
  241. SmoothGradConfig smooth_grad_config = 2;
  242. // Config for IG with blur baseline.
  243. //
  244. // When enabled, a linear path from the maximally blurred image to the input
  245. // image is created. Using a blurred baseline instead of zero (black image) is
  246. // motivated by the BlurIG approach explained here:
  247. // https://arxiv.org/abs/2004.03383
  248. BlurBaselineConfig blur_baseline_config = 3;
  249. }
  250. // An explanation method that redistributes Integrated Gradients
  251. // attributions to segmented regions, taking advantage of the model's fully
  252. // differentiable structure. Refer to this paper for more details:
  253. // https://arxiv.org/abs/1906.02825
  254. //
  255. // Supported only by image Models.
  256. message XraiAttribution {
  257. // Required. The number of steps for approximating the path integral.
  258. // A good value to start is 50 and gradually increase until the
  259. // sum to diff property is met within the desired error range.
  260. //
  261. // Valid range of its value is [1, 100], inclusively.
  262. int32 step_count = 1 [(google.api.field_behavior) = REQUIRED];
  263. // Config for SmoothGrad approximation of gradients.
  264. //
  265. // When enabled, the gradients are approximated by averaging the gradients
  266. // from noisy samples in the vicinity of the inputs. Adding
  267. // noise can help improve the computed gradients. Refer to this paper for more
  268. // details: https://arxiv.org/pdf/1706.03825.pdf
  269. SmoothGradConfig smooth_grad_config = 2;
  270. // Config for XRAI with blur baseline.
  271. //
  272. // When enabled, a linear path from the maximally blurred image to the input
  273. // image is created. Using a blurred baseline instead of zero (black image) is
  274. // motivated by the BlurIG approach explained here:
  275. // https://arxiv.org/abs/2004.03383
  276. BlurBaselineConfig blur_baseline_config = 3;
  277. }
  278. // Config for SmoothGrad approximation of gradients.
  279. //
  280. // When enabled, the gradients are approximated by averaging the gradients from
  281. // noisy samples in the vicinity of the inputs. Adding noise can help improve
  282. // the computed gradients. Refer to this paper for more details:
  283. // https://arxiv.org/pdf/1706.03825.pdf
  284. message SmoothGradConfig {
  285. // Represents the standard deviation of the gaussian kernel
  286. // that will be used to add noise to the interpolated inputs
  287. // prior to computing gradients.
  288. oneof GradientNoiseSigma {
  289. // This is a single float value and will be used to add noise to all the
  290. // features. Use this field when all features are normalized to have the
  291. // same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where
  292. // features are normalized to have 0-mean and 1-variance. Learn more about
  293. // [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization).
  294. //
  295. // For best results the recommended value is about 10% - 20% of the standard
  296. // deviation of the input feature. Refer to section 3.2 of the SmoothGrad
  297. // paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1.
  298. //
  299. // If the distribution is different per feature, set
  300. // [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma] instead
  301. // for each feature.
  302. float noise_sigma = 1;
  303. // This is similar to [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma], but
  304. // provides additional flexibility. A separate noise sigma can be provided
  305. // for each feature, which is useful if their distributions are different.
  306. // No noise is added to features that are not set. If this field is unset,
  307. // [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] will be used for all
  308. // features.
  309. FeatureNoiseSigma feature_noise_sigma = 2;
  310. }
  311. // The number of gradient samples to use for
  312. // approximation. The higher this number, the more accurate the gradient
  313. // is, but the runtime complexity increases by this factor as well.
  314. // Valid range of its value is [1, 50]. Defaults to 3.
  315. int32 noisy_sample_count = 3;
  316. }
  317. // Noise sigma by features. Noise sigma represents the standard deviation of the
  318. // gaussian kernel that will be used to add noise to interpolated inputs prior
  319. // to computing gradients.
  320. message FeatureNoiseSigma {
  321. // Noise sigma for a single feature.
  322. message NoiseSigmaForFeature {
  323. // The name of the input feature for which noise sigma is provided. The
  324. // features are defined in
  325. // [explanation metadata inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs].
  326. string name = 1;
  327. // This represents the standard deviation of the Gaussian kernel that will
  328. // be used to add noise to the feature prior to computing gradients. Similar
  329. // to [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] but represents the
  330. // noise added to the current feature. Defaults to 0.1.
  331. float sigma = 2;
  332. }
  333. // Noise sigma per feature. No noise is added to features that are not set.
  334. repeated NoiseSigmaForFeature noise_sigma = 1;
  335. }
  336. // Config for blur baseline.
  337. //
  338. // When enabled, a linear path from the maximally blurred image to the input
  339. // image is created. Using a blurred baseline instead of zero (black image) is
  340. // motivated by the BlurIG approach explained here:
  341. // https://arxiv.org/abs/2004.03383
  342. message BlurBaselineConfig {
  343. // The standard deviation of the blur kernel for the blurred baseline. The
  344. // same blurring parameter is used for both the height and the width
  345. // dimension. If not set, the method defaults to the zero (i.e. black for
  346. // images) baseline.
  347. float max_blur_sigma = 1;
  348. }
  349. // Example-based explainability that returns the nearest neighbors from the
  350. // provided dataset.
  351. message Examples {
  352. oneof config {
  353. // The configuration for the generated index, the semantics are the same as
  354. // [metadata][google.cloud.aiplatform.v1beta1.Index.metadata] and should match NearestNeighborSearchConfig.
  355. google.protobuf.Value nearest_neighbor_search_config = 2;
  356. // Preset config based on the desired query speed-precision trade-off
  357. // and modality
  358. Presets presets = 4;
  359. }
  360. // The Cloud Storage location for the input instances.
  361. GcsSource gcs_source = 1;
  362. // The number of neighbors to return.
  363. int32 neighbor_count = 3;
  364. }
  365. // Preset configuration for example-based explanations
  366. message Presets {
  367. // Preset option controlling parameters for query speed-precision trade-off
  368. enum Query {
  369. // More precise neighbors as a trade-off against slower response.
  370. // This is also the default value (field-number 0).
  371. PRECISE = 0;
  372. // Faster response as a trade-off against less precise neighbors.
  373. FAST = 1;
  374. }
  375. // Preset option controlling parameters for different modalities
  376. enum Modality {
  377. // Should not be set. Added as a recommended best practice for enums
  378. MODALITY_UNSPECIFIED = 0;
  379. // IMAGE modality
  380. IMAGE = 1;
  381. // TEXT modality
  382. TEXT = 2;
  383. // TABULAR modality
  384. TABULAR = 3;
  385. }
  386. // Preset option controlling parameters for query speed-precision trade-off
  387. optional Query query = 1;
  388. // Preset option controlling parameters for different modalities
  389. Modality modality = 2;
  390. }
  391. // The [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] entries that can be overridden at
  392. // [online explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
  393. message ExplanationSpecOverride {
  394. // The parameters to be overridden. Note that the
  395. // [method][google.cloud.aiplatform.v1beta1.ExplanationParameters.method] cannot be changed. If not specified,
  396. // no parameter is overridden.
  397. ExplanationParameters parameters = 1;
  398. // The metadata to be overridden. If not specified, no metadata is overridden.
  399. ExplanationMetadataOverride metadata = 2;
  400. // The example-based explanations parameter overrides.
  401. ExamplesOverride examples_override = 3;
  402. }
  403. // The [ExplanationMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata] entries that can be overridden at
  404. // [online explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] time.
  405. message ExplanationMetadataOverride {
  406. // The [input metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata] entries to be
  407. // overridden.
  408. message InputMetadataOverride {
  409. // Baseline inputs for this feature.
  410. //
  411. // This overrides the `input_baseline` field of the
  412. // [ExplanationMetadata.InputMetadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata]
  413. // object of the corresponding feature's input metadata. If it's not
  414. // specified, the original baselines are not overridden.
  415. repeated google.protobuf.Value input_baselines = 1;
  416. }
  417. // Required. Overrides the [input metadata][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs] of the features.
  418. // The key is the name of the feature to be overridden. The keys specified
  419. // here must exist in the input metadata to be overridden. If a feature is
  420. // not specified here, the corresponding feature's input metadata is not
  421. // overridden.
  422. map<string, InputMetadataOverride> inputs = 1 [(google.api.field_behavior) = REQUIRED];
  423. }
  424. // Overrides for example-based explanations.
  425. message ExamplesOverride {
  426. // Data format enum.
  427. enum DataFormat {
  428. // Unspecified format. Must not be used.
  429. DATA_FORMAT_UNSPECIFIED = 0;
  430. // Provided data is a set of model inputs.
  431. INSTANCES = 1;
  432. // Provided data is a set of embeddings.
  433. EMBEDDINGS = 2;
  434. }
  435. // The number of neighbors to return.
  436. int32 neighbor_count = 1;
  437. // The number of neighbors to return that have the same crowding tag.
  438. int32 crowding_count = 2;
  439. // Restrict the resulting nearest neighbors to respect these constraints.
  440. repeated ExamplesRestrictionsNamespace restrictions = 3;
  441. // If true, return the embeddings instead of neighbors.
  442. bool return_embeddings = 4;
  443. // The format of the data being provided with each call.
  444. DataFormat data_format = 5;
  445. }
  446. // Restrictions namespace for example-based explanations overrides.
  447. message ExamplesRestrictionsNamespace {
  448. // The namespace name.
  449. string namespace_name = 1;
  450. // The list of allowed tags.
  451. repeated string allow = 2;
  452. // The list of deny tags.
  453. repeated string deny = 3;
  454. }