Copyright | (c) 2015-2016 Brendan Hay |
---|---|
License | Mozilla Public License, v. 2.0. |
Maintainer | Brendan Hay <brendan.g.hay@gmail.com> |
Stability | auto-generated |
Portability | non-portable (GHC extensions) |
Safe Haskell | None |
Language | Haskell2010 |
- Service Configuration
- OAuth Scopes
- LongRunningRecognizeMetadata
- Status
- SpeechContext
- RecognitionMetadataOriginalMediaType
- ListOperationsResponse
- RecognitionMetadata
- RecognizeRequest
- Operation
- SpeechRecognitionAlternative
- WordInfo
- RecognitionMetadataInteractionType
- StatusDetailsItem
- SpeechRecognitionResult
- RecognitionAudio
- RecognizeResponse
- RecognitionMetadataRecordingDeviceType
- RecognitionMetadataMicrophoneDistance
- Xgafv
- LongRunningRecognizeResponse
- RecognitionConfig
- LongRunningRecognizeRequest
- OperationMetadata
- RecognitionConfigEncoding
- OperationResponse
- SpeakerDiarizationConfig
Synopsis
- speechService :: ServiceConfig
- cloudPlatformScope :: Proxy '["https://www.googleapis.com/auth/cloud-platform"]
- data LongRunningRecognizeMetadata
- longRunningRecognizeMetadata :: LongRunningRecognizeMetadata
- lrrmStartTime :: Lens' LongRunningRecognizeMetadata (Maybe UTCTime)
- lrrmProgressPercent :: Lens' LongRunningRecognizeMetadata (Maybe Int32)
- lrrmLastUpdateTime :: Lens' LongRunningRecognizeMetadata (Maybe UTCTime)
- data Status
- status :: Status
- sDetails :: Lens' Status [StatusDetailsItem]
- sCode :: Lens' Status (Maybe Int32)
- sMessage :: Lens' Status (Maybe Text)
- data SpeechContext
- speechContext :: SpeechContext
- scPhrases :: Lens' SpeechContext [Text]
- data RecognitionMetadataOriginalMediaType
- data ListOperationsResponse
- listOperationsResponse :: ListOperationsResponse
- lorNextPageToken :: Lens' ListOperationsResponse (Maybe Text)
- lorOperations :: Lens' ListOperationsResponse [Operation]
- data RecognitionMetadata
- recognitionMetadata :: RecognitionMetadata
- rmAudioTopic :: Lens' RecognitionMetadata (Maybe Text)
- rmInteractionType :: Lens' RecognitionMetadata (Maybe RecognitionMetadataInteractionType)
- rmOriginalMediaType :: Lens' RecognitionMetadata (Maybe RecognitionMetadataOriginalMediaType)
- rmOriginalMimeType :: Lens' RecognitionMetadata (Maybe Text)
- rmIndustryNaicsCodeOfAudio :: Lens' RecognitionMetadata (Maybe Word32)
- rmObfuscatedId :: Lens' RecognitionMetadata (Maybe Int64)
- rmRecordingDeviceName :: Lens' RecognitionMetadata (Maybe Text)
- rmRecordingDeviceType :: Lens' RecognitionMetadata (Maybe RecognitionMetadataRecordingDeviceType)
- rmMicrophoneDistance :: Lens' RecognitionMetadata (Maybe RecognitionMetadataMicrophoneDistance)
- data RecognizeRequest
- recognizeRequest :: RecognizeRequest
- rrConfig :: Lens' RecognizeRequest (Maybe RecognitionConfig)
- rrAudio :: Lens' RecognizeRequest (Maybe RecognitionAudio)
- data Operation
- operation :: Operation
- oDone :: Lens' Operation (Maybe Bool)
- oError :: Lens' Operation (Maybe Status)
- oResponse :: Lens' Operation (Maybe OperationResponse)
- oName :: Lens' Operation (Maybe Text)
- oMetadata :: Lens' Operation (Maybe OperationMetadata)
- data SpeechRecognitionAlternative
- speechRecognitionAlternative :: SpeechRecognitionAlternative
- sraConfidence :: Lens' SpeechRecognitionAlternative (Maybe Double)
- sraWords :: Lens' SpeechRecognitionAlternative [WordInfo]
- sraTranscript :: Lens' SpeechRecognitionAlternative (Maybe Text)
- data WordInfo
- wordInfo :: WordInfo
- wiStartTime :: Lens' WordInfo (Maybe Scientific)
- wiConfidence :: Lens' WordInfo (Maybe Double)
- wiEndTime :: Lens' WordInfo (Maybe Scientific)
- wiWord :: Lens' WordInfo (Maybe Text)
- wiSpeakerTag :: Lens' WordInfo (Maybe Int32)
- data RecognitionMetadataInteractionType
- data StatusDetailsItem
- statusDetailsItem :: HashMap Text JSONValue -> StatusDetailsItem
- sdiAddtional :: Lens' StatusDetailsItem (HashMap Text JSONValue)
- data SpeechRecognitionResult
- speechRecognitionResult :: SpeechRecognitionResult
- srrAlternatives :: Lens' SpeechRecognitionResult [SpeechRecognitionAlternative]
- srrLanguageCode :: Lens' SpeechRecognitionResult (Maybe Text)
- srrChannelTag :: Lens' SpeechRecognitionResult (Maybe Int32)
- data RecognitionAudio
- recognitionAudio :: RecognitionAudio
- raURI :: Lens' RecognitionAudio (Maybe Text)
- raContent :: Lens' RecognitionAudio (Maybe ByteString)
- data RecognizeResponse
- recognizeResponse :: RecognizeResponse
- rrResults :: Lens' RecognizeResponse [SpeechRecognitionResult]
- data RecognitionMetadataRecordingDeviceType
- data RecognitionMetadataMicrophoneDistance
- data Xgafv
- data LongRunningRecognizeResponse
- longRunningRecognizeResponse :: LongRunningRecognizeResponse
- lrrrResults :: Lens' LongRunningRecognizeResponse [SpeechRecognitionResult]
- data RecognitionConfig
- recognitionConfig :: RecognitionConfig
- rcEnableWordTimeOffSets :: Lens' RecognitionConfig (Maybe Bool)
- rcSpeechContexts :: Lens' RecognitionConfig [SpeechContext]
- rcLanguageCode :: Lens' RecognitionConfig (Maybe Text)
- rcDiarizationConfig :: Lens' RecognitionConfig (Maybe SpeakerDiarizationConfig)
- rcSampleRateHertz :: Lens' RecognitionConfig (Maybe Int32)
- rcEnableAutomaticPunctuation :: Lens' RecognitionConfig (Maybe Bool)
- rcMaxAlternatives :: Lens' RecognitionConfig (Maybe Int32)
- rcAudioChannelCount :: Lens' RecognitionConfig (Maybe Int32)
- rcEnableSeparateRecognitionPerChannel :: Lens' RecognitionConfig (Maybe Bool)
- rcModel :: Lens' RecognitionConfig (Maybe Text)
- rcEnableSpeakerDiarization :: Lens' RecognitionConfig (Maybe Bool)
- rcMetadata :: Lens' RecognitionConfig (Maybe RecognitionMetadata)
- rcUseEnhanced :: Lens' RecognitionConfig (Maybe Bool)
- rcProfanityFilter :: Lens' RecognitionConfig (Maybe Bool)
- rcDiarizationSpeakerCount :: Lens' RecognitionConfig (Maybe Int32)
- rcEncoding :: Lens' RecognitionConfig (Maybe RecognitionConfigEncoding)
- rcAlternativeLanguageCodes :: Lens' RecognitionConfig [Text]
- rcEnableWordConfidence :: Lens' RecognitionConfig (Maybe Bool)
- data LongRunningRecognizeRequest
- longRunningRecognizeRequest :: LongRunningRecognizeRequest
- lrrrConfig :: Lens' LongRunningRecognizeRequest (Maybe RecognitionConfig)
- lrrrAudio :: Lens' LongRunningRecognizeRequest (Maybe RecognitionAudio)
- data OperationMetadata
- operationMetadata :: HashMap Text JSONValue -> OperationMetadata
- omAddtional :: Lens' OperationMetadata (HashMap Text JSONValue)
- data RecognitionConfigEncoding
- data OperationResponse
- operationResponse :: HashMap Text JSONValue -> OperationResponse
- orAddtional :: Lens' OperationResponse (HashMap Text JSONValue)
- data SpeakerDiarizationConfig
- speakerDiarizationConfig :: SpeakerDiarizationConfig
- sdcMinSpeakerCount :: Lens' SpeakerDiarizationConfig (Maybe Int32)
- sdcMaxSpeakerCount :: Lens' SpeakerDiarizationConfig (Maybe Int32)
- sdcEnableSpeakerDiarization :: Lens' SpeakerDiarizationConfig (Maybe Bool)
Service Configuration
speechService :: ServiceConfig Source #
Default request referring to version v1p1beta1
of the Cloud Speech API. This contains the host and root path used as a starting point for constructing service requests.
OAuth Scopes
cloudPlatformScope :: Proxy '["https://www.googleapis.com/auth/cloud-platform"] Source #
View and manage your data across Google Cloud Platform services
LongRunningRecognizeMetadata
data LongRunningRecognizeMetadata Source #
Describes the progress of a long-running `LongRunningRecognize` call. It is included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.
See: longRunningRecognizeMetadata
smart constructor.
Instances
longRunningRecognizeMetadata :: LongRunningRecognizeMetadata Source #
Creates a value of LongRunningRecognizeMetadata
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
lrrmStartTime :: Lens' LongRunningRecognizeMetadata (Maybe UTCTime) Source #
Time when the request was received.
lrrmProgressPercent :: Lens' LongRunningRecognizeMetadata (Maybe Int32) Source #
Approximate percentage of audio processed thus far. Guaranteed to be 100 when the audio is fully processed and the results are available.
lrrmLastUpdateTime :: Lens' LongRunningRecognizeMetadata (Maybe UTCTime) Source #
Time of the most recent processing update.
Status
The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC. The error model is designed to be: - Simple to use and understand for most users - Flexible enough to meet unexpected needs # Overview The `Status` message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers *understand* and *resolve* the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package `google.rpc` that can be used for common error conditions. # Language mapping The `Status` message is the logical representation of the error model, but it is not necessarily the actual wire format. When the `Status` message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C. # Other uses The error model and the `Status` message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments. Example uses of this error model include: - Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors. - Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting. - Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub-response. - Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message. - Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons.
See: status
smart constructor.
Instances
Eq Status Source # | |
Data Status Source # | |
Defined in Network.Google.Speech.Types.Product gfoldl :: (forall d b. Data d => c (d -> b) -> d -> c b) -> (forall g. g -> c g) -> Status -> c Status # gunfold :: (forall b r. Data b => c (b -> r) -> c r) -> (forall r. r -> c r) -> Constr -> c Status # toConstr :: Status -> Constr # dataTypeOf :: Status -> DataType # dataCast1 :: Typeable t => (forall d. Data d => c (t d)) -> Maybe (c Status) # dataCast2 :: Typeable t => (forall d e. (Data d, Data e) => c (t d e)) -> Maybe (c Status) # gmapT :: (forall b. Data b => b -> b) -> Status -> Status # gmapQl :: (r -> r' -> r) -> r -> (forall d. Data d => d -> r') -> Status -> r # gmapQr :: (r' -> r -> r) -> r -> (forall d. Data d => d -> r') -> Status -> r # gmapQ :: (forall d. Data d => d -> u) -> Status -> [u] # gmapQi :: Int -> (forall d. Data d => d -> u) -> Status -> u # gmapM :: Monad m => (forall d. Data d => d -> m d) -> Status -> m Status # gmapMp :: MonadPlus m => (forall d. Data d => d -> m d) -> Status -> m Status # gmapMo :: MonadPlus m => (forall d. Data d => d -> m d) -> Status -> m Status # | |
Show Status Source # | |
Generic Status Source # | |
ToJSON Status Source # | |
Defined in Network.Google.Speech.Types.Product | |
FromJSON Status Source # | |
type Rep Status Source # | |
Defined in Network.Google.Speech.Types.Product type Rep Status = D1 (MetaData "Status" "Network.Google.Speech.Types.Product" "gogol-speech-0.4.0-DjEaGflTHLS8uenGffcunS" False) (C1 (MetaCons "Status'" PrefixI True) (S1 (MetaSel (Just "_sDetails") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 (Maybe [StatusDetailsItem])) :*: (S1 (MetaSel (Just "_sCode") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 (Maybe (Textual Int32))) :*: S1 (MetaSel (Just "_sMessage") NoSourceUnpackedness SourceStrict DecidedStrict) (Rec0 (Maybe Text))))) |
sDetails :: Lens' Status [StatusDetailsItem] Source #
A list of messages that carry the error details. There is a common set of message types for APIs to use.
sCode :: Lens' Status (Maybe Int32) Source #
The status code, which should be an enum value of google.rpc.Code.
sMessage :: Lens' Status (Maybe Text) Source #
A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
SpeechContext
data SpeechContext Source #
Provides "hints" to the speech recognizer to favor specific words and phrases in the results.
See: speechContext
smart constructor.
Instances
speechContext :: SpeechContext Source #
Creates a value of SpeechContext
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
scPhrases :: Lens' SpeechContext [Text] Source #
- Optional* A list of strings containing words and phrases "hints" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add additional words to the vocabulary of the recognizer. See usage limits.
RecognitionMetadataOriginalMediaType
data RecognitionMetadataOriginalMediaType Source #
The original media the speech was recorded on.
OriginalMediaTypeUnspecified |
|
Audio |
|
Video |
|
Instances
ListOperationsResponse
data ListOperationsResponse Source #
The response message for Operations.ListOperations.
See: listOperationsResponse
smart constructor.
Instances
listOperationsResponse :: ListOperationsResponse Source #
Creates a value of ListOperationsResponse
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
lorNextPageToken :: Lens' ListOperationsResponse (Maybe Text) Source #
The standard List next-page token.
lorOperations :: Lens' ListOperationsResponse [Operation] Source #
A list of operations that matches the specified filter in the request.
RecognitionMetadata
data RecognitionMetadata Source #
Description of audio data to be recognized.
See: recognitionMetadata
smart constructor.
Instances
recognitionMetadata :: RecognitionMetadata Source #
Creates a value of RecognitionMetadata
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rmAudioTopic :: Lens' RecognitionMetadata (Maybe Text) Source #
Description of the content. Eg. "Recordings of federal supreme court hearings from 2012".
rmInteractionType :: Lens' RecognitionMetadata (Maybe RecognitionMetadataInteractionType) Source #
The use case most closely describing the audio content to be recognized.
rmOriginalMediaType :: Lens' RecognitionMetadata (Maybe RecognitionMetadataOriginalMediaType) Source #
The original media the speech was recorded on.
rmOriginalMimeType :: Lens' RecognitionMetadata (Maybe Text) Source #
Mime type of the original audio file. For example `audio/m4a`, `audio/x-alaw-basic`, `audio/mp3`, `audio/3gpp`. A list of possible audio mime types is maintained at http://www.iana.org/assignments/media-types/media-types.xhtml#audio
rmIndustryNaicsCodeOfAudio :: Lens' RecognitionMetadata (Maybe Word32) Source #
The industry vertical to which this speech recognition request most closely applies. This is most indicative of the topics contained in the audio. Use the 6-digit NAICS code to identify the industry vertical - see https://www.naics.com/search/.
rmObfuscatedId :: Lens' RecognitionMetadata (Maybe Int64) Source #
Obfuscated (privacy-protected) ID of the user, to identify number of unique users using the service.
rmRecordingDeviceName :: Lens' RecognitionMetadata (Maybe Text) Source #
The device used to make the recording. Examples 'Nexus 5X' or 'Polycom SoundStation IP 6000' or 'POTS' or 'VoIP' or 'Cardioid Microphone'.
rmRecordingDeviceType :: Lens' RecognitionMetadata (Maybe RecognitionMetadataRecordingDeviceType) Source #
The type of device the speech was recorded with.
rmMicrophoneDistance :: Lens' RecognitionMetadata (Maybe RecognitionMetadataMicrophoneDistance) Source #
The audio type that most closely describes the audio being recognized.
RecognizeRequest
data RecognizeRequest Source #
The top-level message sent by the client for the `Recognize` method.
See: recognizeRequest
smart constructor.
Instances
recognizeRequest :: RecognizeRequest Source #
Creates a value of RecognizeRequest
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rrConfig :: Lens' RecognizeRequest (Maybe RecognitionConfig) Source #
- Required* Provides information to the recognizer that specifies how to process the request.
rrAudio :: Lens' RecognizeRequest (Maybe RecognitionAudio) Source #
- Required* The audio data to be recognized.
Operation
This resource represents a long-running operation that is the result of a network API call.
See: operation
smart constructor.
Instances
oDone :: Lens' Operation (Maybe Bool) Source #
If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
oError :: Lens' Operation (Maybe Status) Source #
The error result of the operation in case of failure or cancellation.
oResponse :: Lens' Operation (Maybe OperationResponse) Source #
The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
oName :: Lens' Operation (Maybe Text) Source #
The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should have the format of `operations/some/unique/name`.
oMetadata :: Lens' Operation (Maybe OperationMetadata) Source #
Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
SpeechRecognitionAlternative
data SpeechRecognitionAlternative Source #
Alternative hypotheses (a.k.a. n-best list).
See: speechRecognitionAlternative
smart constructor.
Instances
speechRecognitionAlternative :: SpeechRecognitionAlternative Source #
Creates a value of SpeechRecognitionAlternative
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
sraConfidence :: Lens' SpeechRecognitionAlternative (Maybe Double) Source #
Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative of a non-streaming result or, of a streaming result where `is_final=true`. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set.
sraWords :: Lens' SpeechRecognitionAlternative [WordInfo] Source #
Output only. A list of word-specific information for each recognized word. Note: When `enable_speaker_diarization` is true, you will see all the words from the beginning of the audio.
sraTranscript :: Lens' SpeechRecognitionAlternative (Maybe Text) Source #
Output only. Transcript text representing the words that the user spoke.
WordInfo
Word-specific information for recognized words.
See: wordInfo
smart constructor.
Instances
Creates a value of WordInfo
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
wiStartTime :: Lens' WordInfo (Maybe Scientific) Source #
Output only. Time offset relative to the beginning of the audio, and corresponding to the start of the spoken word. This field is only set if `enable_word_time_offsets=true` and only in the top hypothesis. This is an experimental feature and the accuracy of the time offset can vary.
wiConfidence :: Lens' WordInfo (Maybe Double) Source #
Output only. The confidence estimate between 0.0 and 1.0. A higher number indicates an estimated greater likelihood that the recognized words are correct. This field is set only for the top alternative of a non-streaming result or, of a streaming result where `is_final=true`. This field is not guaranteed to be accurate and users should not rely on it to be always provided. The default of 0.0 is a sentinel value indicating `confidence` was not set.
wiEndTime :: Lens' WordInfo (Maybe Scientific) Source #
Output only. Time offset relative to the beginning of the audio, and corresponding to the end of the spoken word. This field is only set if `enable_word_time_offsets=true` and only in the top hypothesis. This is an experimental feature and the accuracy of the time offset can vary.
wiWord :: Lens' WordInfo (Maybe Text) Source #
Output only. The word corresponding to this set of information.
wiSpeakerTag :: Lens' WordInfo (Maybe Int32) Source #
Output only. A distinct integer value is assigned for every speaker within the audio. This field specifies which one of those speakers was detected to have spoken this word. Value ranges from '1' to diarization_speaker_count. speaker_tag is set if enable_speaker_diarization = 'true' and only in the top alternative.
RecognitionMetadataInteractionType
data RecognitionMetadataInteractionType Source #
The use case most closely describing the audio content to be recognized.
InteractionTypeUnspecified |
|
Discussion |
|
Presentation |
|
PhoneCall |
|
Voicemail |
|
ProfessionallyProduced |
|
VoiceSearch |
|
VoiceCommand |
|
Dictation |
|
Instances
StatusDetailsItem
data StatusDetailsItem Source #
Instances
Creates a value of StatusDetailsItem
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
sdiAddtional :: Lens' StatusDetailsItem (HashMap Text JSONValue) Source #
Properties of the object. Contains field 'type with type URL.
SpeechRecognitionResult
data SpeechRecognitionResult Source #
A speech recognition result corresponding to a portion of the audio.
See: speechRecognitionResult
smart constructor.
Instances
speechRecognitionResult :: SpeechRecognitionResult Source #
Creates a value of SpeechRecognitionResult
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
srrAlternatives :: Lens' SpeechRecognitionResult [SpeechRecognitionAlternative] Source #
Output only. May contain one or more recognition hypotheses (up to the maximum specified in `max_alternatives`). These alternatives are ordered in terms of accuracy, with the top (first) alternative being the most probable, as ranked by the recognizer.
srrLanguageCode :: Lens' SpeechRecognitionResult (Maybe Text) Source #
Output only. The BCP-47 language tag of the language in this result. This language code was detected to have the most likelihood of being spoken in the audio.
srrChannelTag :: Lens' SpeechRecognitionResult (Maybe Int32) Source #
For multi-channel audio, this is the channel number corresponding to the recognized result for the audio from that channel. For audio_channel_count = N, its output values can range from '1' to 'N'.
RecognitionAudio
data RecognitionAudio Source #
Contains audio data in the encoding specified in the `RecognitionConfig`. Either `content` or `uri` must be supplied. Supplying both or neither returns google.rpc.Code.INVALID_ARGUMENT. See content limits.
See: recognitionAudio
smart constructor.
Instances
recognitionAudio :: RecognitionAudio Source #
Creates a value of RecognitionAudio
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
raURI :: Lens' RecognitionAudio (Maybe Text) Source #
URI that points to a file that contains audio data bytes as specified in `RecognitionConfig`. The file must not be compressed (for example, gzip). Currently, only Google Cloud Storage URIs are supported, which must be specified in the following format: `gs://bucket_name/object_name` (other URI formats return google.rpc.Code.INVALID_ARGUMENT). For more information, see Request URIs.
raContent :: Lens' RecognitionAudio (Maybe ByteString) Source #
The audio data bytes encoded as specified in `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a pure binary representation, whereas JSON representations use base64.
RecognizeResponse
data RecognizeResponse Source #
The only message returned to the client by the `Recognize` method. It contains the result as zero or more sequential `SpeechRecognitionResult` messages.
See: recognizeResponse
smart constructor.
Instances
recognizeResponse :: RecognizeResponse Source #
Creates a value of RecognizeResponse
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rrResults :: Lens' RecognizeResponse [SpeechRecognitionResult] Source #
Output only. Sequential list of transcription results corresponding to sequential portions of audio.
RecognitionMetadataRecordingDeviceType
data RecognitionMetadataRecordingDeviceType Source #
The type of device the speech was recorded with.
RecordingDeviceTypeUnspecified |
|
Smartphone |
|
PC |
|
PhoneLine |
|
Vehicle |
|
OtherOutdoorDevice |
|
OtherIndoorDevice |
|
Instances
RecognitionMetadataMicrophoneDistance
data RecognitionMetadataMicrophoneDistance Source #
The audio type that most closely describes the audio being recognized.
MicrophoneDistanceUnspecified |
|
Nearfield |
|
Midfield |
|
Farfield |
|
Instances
Xgafv
V1 error format.
Instances
LongRunningRecognizeResponse
data LongRunningRecognizeResponse Source #
The only message returned to the client by the `LongRunningRecognize` method. It contains the result as zero or more sequential `SpeechRecognitionResult` messages. It is included in the `result.response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.
See: longRunningRecognizeResponse
smart constructor.
Instances
longRunningRecognizeResponse :: LongRunningRecognizeResponse Source #
Creates a value of LongRunningRecognizeResponse
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
lrrrResults :: Lens' LongRunningRecognizeResponse [SpeechRecognitionResult] Source #
Output only. Sequential list of transcription results corresponding to sequential portions of audio.
RecognitionConfig
data RecognitionConfig Source #
Provides information to the recognizer that specifies how to process the request.
See: recognitionConfig
smart constructor.
Instances
recognitionConfig :: RecognitionConfig Source #
Creates a value of RecognitionConfig
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
rcEnableWordTimeOffSets
rcSpeechContexts
rcLanguageCode
rcDiarizationConfig
rcSampleRateHertz
rcEnableAutomaticPunctuation
rcMaxAlternatives
rcAudioChannelCount
rcEnableSeparateRecognitionPerChannel
rcModel
rcEnableSpeakerDiarization
rcMetadata
rcUseEnhanced
rcProfanityFilter
rcDiarizationSpeakerCount
rcEncoding
rcAlternativeLanguageCodes
rcEnableWordConfidence
rcEnableWordTimeOffSets :: Lens' RecognitionConfig (Maybe Bool) Source #
- Optional* If `true`, the top result includes a list of words and the start and end time offsets (timestamps) for those words. If `false`, no word-level time offset information is returned. The default is `false`.
rcSpeechContexts :: Lens' RecognitionConfig [SpeechContext] Source #
- Optional* array of SpeechContext. A means to provide context to assist the speech recognition. For more information, see Phrase Hints.
rcLanguageCode :: Lens' RecognitionConfig (Maybe Text) Source #
- Required* The language of the supplied audio as a BCP-47 language tag. Example: "en-US". See Language Support for a list of the currently supported language codes.
rcDiarizationConfig :: Lens' RecognitionConfig (Maybe SpeakerDiarizationConfig) Source #
- Optional* Config to enable speaker diarization and set additional parameters to make diarization better suited for your application. Note: When this is enabled, we send all the words from the beginning of the audio for the top alternative in every consecutive STREAMING responses. This is done in order to improve our speaker tags as our models learn to identify the speakers in the conversation over time. For non-streaming requests, the diarization results will be provided only in the top alternative of the FINAL SpeechRecognitionResult.
rcSampleRateHertz :: Lens' RecognitionConfig (Maybe Int32) Source #
Sample rate in Hertz of the audio data sent in all `RecognitionAudio` messages. Valid values are: 8000-48000. 16000 is optimal. For best results, set the sampling rate of the audio source to 16000 Hz. If that's not possible, use the native sample rate of the audio source (instead of re-sampling). This field is optional for `FLAC` and `WAV` audio files and required for all other audio formats. For details, see AudioEncoding.
rcEnableAutomaticPunctuation :: Lens' RecognitionConfig (Maybe Bool) Source #
- Optional* If 'true', adds punctuation to recognition result hypotheses. This feature is only available in select languages. Setting this for requests in other languages has no effect at all. The default 'false' value does not add punctuation to result hypotheses. Note: This is currently offered as an experimental service, complimentary to all users. In the future this may be exclusively available as a premium feature.
rcMaxAlternatives :: Lens' RecognitionConfig (Maybe Int32) Source #
- Optional* Maximum number of recognition hypotheses to be returned. Specifically, the maximum number of `SpeechRecognitionAlternative` messages within each `SpeechRecognitionResult`. The server may return fewer than `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of one. If omitted, will return a maximum of one.
rcAudioChannelCount :: Lens' RecognitionConfig (Maybe Int32) Source #
- Optional* The number of channels in the input audio data. ONLY set this for MULTI-CHANNEL recognition. Valid values for LINEAR16 and FLAC are `1`-`8`. Valid values for OGG_OPUS are '1'-'254'. Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`. If `0` or omitted, defaults to one channel (mono). Note: We only recognize the first channel by default. To perform independent recognition on each channel set `enable_separate_recognition_per_channel` to 'true'.
rcEnableSeparateRecognitionPerChannel :: Lens' RecognitionConfig (Maybe Bool) Source #
This needs to be set to ‘true’ explicitly and `audio_channel_count` > 1 to get each channel recognized separately. The recognition result will contain a `channel_tag` field to state which channel that result belongs to. If this is not true, we will only recognize the first channel. The request is billed cumulatively for all channels recognized: `audio_channel_count` multiplied by the length of the audio.
rcModel :: Lens' RecognitionConfig (Maybe Text) Source #
- Optional* Which model to select for the given request. Select the model
best suited to your domain to get best results. If a model is not
explicitly specified, then we auto-select a model based on the
parameters in the RecognitionConfig.
> ---------------------- ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
> Model Description
>
command_and_search
Best for short queries such as voice commands or voice search. >phone_call
Best for audio that originated from a phone call (typically recorded at an 8khz sampling rate). >video
Best for audio that originated from from video or includes multiple speakers. Ideally the audio is recorded at a 16khz or greater sampling rate. This is a premium model that costs more than the standard rate. > 'default' Best for audio that is not one of the specific audio models. For example, long-form audio. Ideally the audio is high-fidelity, recorded at a 16khz or greater sampling rate. > ---------------------- ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
rcEnableSpeakerDiarization :: Lens' RecognitionConfig (Maybe Bool) Source #
- Optional* If 'true', enables speaker detection for each recognized word in the top alternative of the recognition result using a speaker_tag provided in the WordInfo. Note: Use diarization_config instead. This field will be DEPRECATED soon.
rcMetadata :: Lens' RecognitionConfig (Maybe RecognitionMetadata) Source #
- Optional* Metadata regarding this request.
rcUseEnhanced :: Lens' RecognitionConfig (Maybe Bool) Source #
- Optional* Set to true to use an enhanced model for speech recognition. If `use_enhanced` is set to true and the `model` field is not set, then an appropriate enhanced model is chosen if: 1. project is eligible for requesting enhanced models 2. an enhanced model exists for the audio If `use_enhanced` is true and an enhanced version of the specified model does not exist, then the speech is recognized using the standard version of the specified model. Enhanced speech models require that you opt-in to data logging using instructions in the documentation. If you set `use_enhanced` to true and you have not enabled audio logging, then you will receive an error.
rcProfanityFilter :: Lens' RecognitionConfig (Maybe Bool) Source #
- Optional* If set to `true`, the server will attempt to filter out profanities, replacing all but the initial character in each filtered word with asterisks, e.g. "f***". If set to `false` or omitted, profanities won't be filtered out.
rcDiarizationSpeakerCount :: Lens' RecognitionConfig (Maybe Int32) Source #
- Optional* If set, specifies the estimated number of speakers in the conversation. If not set, defaults to '2'. Ignored unless enable_speaker_diarization is set to true." Note: Use diarization_config instead. This field will be DEPRECATED soon.
rcEncoding :: Lens' RecognitionConfig (Maybe RecognitionConfigEncoding) Source #
Encoding of audio data sent in all `RecognitionAudio` messages. This field is optional for `FLAC` and `WAV` audio files and required for all other audio formats. For details, see AudioEncoding.
rcAlternativeLanguageCodes :: Lens' RecognitionConfig [Text] Source #
- Optional* A list of up to 3 additional BCP-47 language tags, listing possible alternative languages of the supplied audio. See Language Support for a list of the currently supported language codes. If alternative languages are listed, recognition result will contain recognition in the most likely language detected including the main language_code. The recognition result will include the language tag of the language detected in the audio. Note: This feature is only supported for Voice Command and Voice Search use cases and performance may vary for other use cases (e.g., phone call transcription).
rcEnableWordConfidence :: Lens' RecognitionConfig (Maybe Bool) Source #
- Optional* If `true`, the top result includes a list of words and the confidence for those words. If `false`, no word-level confidence information is returned. The default is `false`.
LongRunningRecognizeRequest
data LongRunningRecognizeRequest Source #
The top-level message sent by the client for the `LongRunningRecognize` method.
See: longRunningRecognizeRequest
smart constructor.
Instances
longRunningRecognizeRequest :: LongRunningRecognizeRequest Source #
Creates a value of LongRunningRecognizeRequest
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
lrrrConfig :: Lens' LongRunningRecognizeRequest (Maybe RecognitionConfig) Source #
- Required* Provides information to the recognizer that specifies how to process the request.
lrrrAudio :: Lens' LongRunningRecognizeRequest (Maybe RecognitionAudio) Source #
- Required* The audio data to be recognized.
OperationMetadata
data OperationMetadata Source #
Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
See: operationMetadata
smart constructor.
Instances
Creates a value of OperationMetadata
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
omAddtional :: Lens' OperationMetadata (HashMap Text JSONValue) Source #
Properties of the object. Contains field 'type with type URL.
RecognitionConfigEncoding
data RecognitionConfigEncoding Source #
Encoding of audio data sent in all `RecognitionAudio` messages. This field is optional for `FLAC` and `WAV` audio files and required for all other audio formats. For details, see AudioEncoding.
EncodingUnspecified |
|
LINEAR16 |
|
Flac |
|
Mulaw |
|
Amr |
|
AmrWb |
|
OggOpus |
|
SpeexWithHeaderByte |
|
Instances
OperationResponse
data OperationResponse Source #
The normal response of the operation in case of success. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
See: operationResponse
smart constructor.
Instances
Creates a value of OperationResponse
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
orAddtional :: Lens' OperationResponse (HashMap Text JSONValue) Source #
Properties of the object. Contains field 'type with type URL.
SpeakerDiarizationConfig
data SpeakerDiarizationConfig Source #
Instances
speakerDiarizationConfig :: SpeakerDiarizationConfig Source #
Creates a value of SpeakerDiarizationConfig
with the minimum fields required to make a request.
Use one of the following lenses to modify other fields as desired:
sdcMinSpeakerCount :: Lens' SpeakerDiarizationConfig (Maybe Int32) Source #
- Optional* Only used if diarization_speaker_count is not set. Minimum number of speakers in the conversation. This range gives you more flexibility by allowing the system to automatically determine the correct number of speakers. If not set, the default value is 2.
sdcMaxSpeakerCount :: Lens' SpeakerDiarizationConfig (Maybe Int32) Source #
- Optional* Only used if diarization_speaker_count is not set. Maximum number of speakers in the conversation. This range gives you more flexibility by allowing the system to automatically determine the correct number of speakers. If not set, the default value is 6.
sdcEnableSpeakerDiarization :: Lens' SpeakerDiarizationConfig (Maybe Bool) Source #
- Optional* If 'true', enables speaker detection for each recognized word in the top alternative of the recognition result using a speaker_tag provided in the WordInfo.