speech_transcribe_multilanguage_beta.yaml 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344
  1. type: com.google.api.codegen.samplegen.v1p2.SampleConfigProto
  2. schema_version: 1.2.0
  3. samples:
  4. - region_tag: speech_transcribe_multilanguage_beta
  5. title: Detecting language spoken automatically (Local File) (Beta)
  6. description: |
  7. Transcribe a short audio file with language detected from a list of possible languages
  8. rpc: Recognize
  9. service: google.cloud.speech.v1p1beta1.Speech
  10. request:
  11. - field: audio.content
  12. value: resources/brooklyn_bridge.flac
  13. input_parameter: local_file_path
  14. comment: Path to local audio file, e.g. /path/audio.wav
  15. value_is_file: true
  16. - field: config.language_code
  17. value: "fr"
  18. comment: |
  19. The language of the supplied audio. Even though additional languages are
  20. provided by alternative_language_codes, a primary language is still required.
  21. - field: config.alternative_language_codes[0]
  22. value: "es"
  23. comment: |
  24. Specify up to 3 additional languages as possible alternative languages
  25. of the supplied audio.
  26. - field: config.alternative_language_codes[1]
  27. value: "en"
  28. response:
  29. - loop:
  30. variable: result
  31. collection: $resp.results
  32. body:
  33. - comment:
  34. - The %s which was detected as the most likely being spoken in the audio
  35. - language_code
  36. - print:
  37. - "Detected language: %s"
  38. - result.language_code
  39. - comment:
  40. - First alternative is the most probable result
  41. - define: alternative = result.alternatives[0]
  42. - print:
  43. - "Transcript: %s"
  44. - alternative.transcript