Copyright | (c) 2013-2023 Brendan Hay |
---|---|
License | Mozilla Public License, v. 2.0. |
Maintainer | Brendan Hay |
Stability | auto-generated |
Portability | non-portable (GHC extensions) |
Safe Haskell | Safe-Inferred |
Language | Haskell2010 |
- Service Configuration
- Errors
- BaseModelName
- CLMLanguageCode
- CallAnalyticsJobStatus
- InputType
- LanguageCode
- MediaFormat
- MedicalContentIdentificationType
- ModelStatus
- OutputLocationType
- ParticipantRole
- PiiEntityType
- RedactionOutput
- RedactionType
- SentimentValue
- Specialty
- SubtitleFormat
- TranscriptFilterType
- TranscriptionJobStatus
- Type
- VocabularyFilterMethod
- VocabularyState
- AbsoluteTimeRange
- CallAnalyticsJob
- CallAnalyticsJobSettings
- CallAnalyticsJobSummary
- CategoryProperties
- ChannelDefinition
- ContentRedaction
- InputDataConfig
- InterruptionFilter
- JobExecutionSettings
- LanguageCodeItem
- LanguageIdSettings
- LanguageModel
- Media
- MedicalTranscript
- MedicalTranscriptionJob
- MedicalTranscriptionJobSummary
- MedicalTranscriptionSetting
- ModelSettings
- NonTalkTimeFilter
- RelativeTimeRange
- Rule
- SentimentFilter
- Settings
- Subtitles
- SubtitlesOutput
- Tag
- Transcript
- TranscriptFilter
- TranscriptionJob
- TranscriptionJobSummary
- VocabularyFilterInfo
- VocabularyInfo
Synopsis
- defaultService :: Service
- _BadRequestException :: AsError a => Fold a ServiceError
- _ConflictException :: AsError a => Fold a ServiceError
- _InternalFailureException :: AsError a => Fold a ServiceError
- _LimitExceededException :: AsError a => Fold a ServiceError
- _NotFoundException :: AsError a => Fold a ServiceError
- newtype BaseModelName where
- BaseModelName' { }
- pattern BaseModelName_NarrowBand :: BaseModelName
- pattern BaseModelName_WideBand :: BaseModelName
- newtype CLMLanguageCode where
- CLMLanguageCode' { }
- pattern CLMLanguageCode_De_DE :: CLMLanguageCode
- pattern CLMLanguageCode_En_AU :: CLMLanguageCode
- pattern CLMLanguageCode_En_GB :: CLMLanguageCode
- pattern CLMLanguageCode_En_US :: CLMLanguageCode
- pattern CLMLanguageCode_Es_US :: CLMLanguageCode
- pattern CLMLanguageCode_Hi_IN :: CLMLanguageCode
- pattern CLMLanguageCode_Ja_JP :: CLMLanguageCode
- newtype CallAnalyticsJobStatus where
- newtype InputType where
- InputType' { }
- pattern InputType_POST_CALL :: InputType
- pattern InputType_REAL_TIME :: InputType
- newtype LanguageCode where
- LanguageCode' { }
- pattern LanguageCode_Af_ZA :: LanguageCode
- pattern LanguageCode_Ar_AE :: LanguageCode
- pattern LanguageCode_Ar_SA :: LanguageCode
- pattern LanguageCode_Da_DK :: LanguageCode
- pattern LanguageCode_De_CH :: LanguageCode
- pattern LanguageCode_De_DE :: LanguageCode
- pattern LanguageCode_En_AB :: LanguageCode
- pattern LanguageCode_En_AU :: LanguageCode
- pattern LanguageCode_En_GB :: LanguageCode
- pattern LanguageCode_En_IE :: LanguageCode
- pattern LanguageCode_En_IN :: LanguageCode
- pattern LanguageCode_En_NZ :: LanguageCode
- pattern LanguageCode_En_US :: LanguageCode
- pattern LanguageCode_En_WL :: LanguageCode
- pattern LanguageCode_En_ZA :: LanguageCode
- pattern LanguageCode_Es_ES :: LanguageCode
- pattern LanguageCode_Es_US :: LanguageCode
- pattern LanguageCode_Fa_IR :: LanguageCode
- pattern LanguageCode_Fr_CA :: LanguageCode
- pattern LanguageCode_Fr_FR :: LanguageCode
- pattern LanguageCode_He_IL :: LanguageCode
- pattern LanguageCode_Hi_IN :: LanguageCode
- pattern LanguageCode_Id_ID :: LanguageCode
- pattern LanguageCode_It_IT :: LanguageCode
- pattern LanguageCode_Ja_JP :: LanguageCode
- pattern LanguageCode_Ko_KR :: LanguageCode
- pattern LanguageCode_Ms_MY :: LanguageCode
- pattern LanguageCode_Nl_NL :: LanguageCode
- pattern LanguageCode_Pt_BR :: LanguageCode
- pattern LanguageCode_Pt_PT :: LanguageCode
- pattern LanguageCode_Ru_RU :: LanguageCode
- pattern LanguageCode_Sv_SE :: LanguageCode
- pattern LanguageCode_Ta_IN :: LanguageCode
- pattern LanguageCode_Te_IN :: LanguageCode
- pattern LanguageCode_Th_TH :: LanguageCode
- pattern LanguageCode_Tr_TR :: LanguageCode
- pattern LanguageCode_Vi_VN :: LanguageCode
- pattern LanguageCode_Zh_CN :: LanguageCode
- pattern LanguageCode_Zh_TW :: LanguageCode
- newtype MediaFormat where
- MediaFormat' { }
- pattern MediaFormat_Amr :: MediaFormat
- pattern MediaFormat_Flac :: MediaFormat
- pattern MediaFormat_Mp3 :: MediaFormat
- pattern MediaFormat_Mp4 :: MediaFormat
- pattern MediaFormat_Ogg :: MediaFormat
- pattern MediaFormat_Wav :: MediaFormat
- pattern MediaFormat_Webm :: MediaFormat
- newtype MedicalContentIdentificationType where
- newtype ModelStatus where
- ModelStatus' { }
- pattern ModelStatus_COMPLETED :: ModelStatus
- pattern ModelStatus_FAILED :: ModelStatus
- pattern ModelStatus_IN_PROGRESS :: ModelStatus
- newtype OutputLocationType where
- newtype ParticipantRole where
- ParticipantRole' { }
- pattern ParticipantRole_AGENT :: ParticipantRole
- pattern ParticipantRole_CUSTOMER :: ParticipantRole
- newtype PiiEntityType where
- PiiEntityType' { }
- pattern PiiEntityType_ADDRESS :: PiiEntityType
- pattern PiiEntityType_ALL :: PiiEntityType
- pattern PiiEntityType_BANK_ACCOUNT_NUMBER :: PiiEntityType
- pattern PiiEntityType_BANK_ROUTING :: PiiEntityType
- pattern PiiEntityType_CREDIT_DEBIT_CVV :: PiiEntityType
- pattern PiiEntityType_CREDIT_DEBIT_EXPIRY :: PiiEntityType
- pattern PiiEntityType_CREDIT_DEBIT_NUMBER :: PiiEntityType
- pattern PiiEntityType_EMAIL :: PiiEntityType
- pattern PiiEntityType_NAME :: PiiEntityType
- pattern PiiEntityType_PHONE :: PiiEntityType
- pattern PiiEntityType_PIN :: PiiEntityType
- pattern PiiEntityType_SSN :: PiiEntityType
- newtype RedactionOutput where
- RedactionOutput' { }
- pattern RedactionOutput_Redacted :: RedactionOutput
- pattern RedactionOutput_Redacted_and_unredacted :: RedactionOutput
- newtype RedactionType where
- RedactionType' { }
- pattern RedactionType_PII :: RedactionType
- newtype SentimentValue where
- SentimentValue' { }
- pattern SentimentValue_MIXED :: SentimentValue
- pattern SentimentValue_NEGATIVE :: SentimentValue
- pattern SentimentValue_NEUTRAL :: SentimentValue
- pattern SentimentValue_POSITIVE :: SentimentValue
- newtype Specialty where
- Specialty' { }
- pattern Specialty_PRIMARYCARE :: Specialty
- newtype SubtitleFormat where
- SubtitleFormat' { }
- pattern SubtitleFormat_Srt :: SubtitleFormat
- pattern SubtitleFormat_Vtt :: SubtitleFormat
- newtype TranscriptFilterType where
- newtype TranscriptionJobStatus where
- newtype Type where
- Type' { }
- pattern Type_CONVERSATION :: Type
- pattern Type_DICTATION :: Type
- newtype VocabularyFilterMethod where
- newtype VocabularyState where
- VocabularyState' { }
- pattern VocabularyState_FAILED :: VocabularyState
- pattern VocabularyState_PENDING :: VocabularyState
- pattern VocabularyState_READY :: VocabularyState
- data AbsoluteTimeRange = AbsoluteTimeRange' {}
- newAbsoluteTimeRange :: AbsoluteTimeRange
- absoluteTimeRange_endTime :: Lens' AbsoluteTimeRange (Maybe Natural)
- absoluteTimeRange_first :: Lens' AbsoluteTimeRange (Maybe Natural)
- absoluteTimeRange_last :: Lens' AbsoluteTimeRange (Maybe Natural)
- absoluteTimeRange_startTime :: Lens' AbsoluteTimeRange (Maybe Natural)
- data CallAnalyticsJob = CallAnalyticsJob' {
- callAnalyticsJobName :: Maybe Text
- callAnalyticsJobStatus :: Maybe CallAnalyticsJobStatus
- channelDefinitions :: Maybe (NonEmpty ChannelDefinition)
- completionTime :: Maybe POSIX
- creationTime :: Maybe POSIX
- dataAccessRoleArn :: Maybe Text
- failureReason :: Maybe Text
- identifiedLanguageScore :: Maybe Double
- languageCode :: Maybe LanguageCode
- media :: Maybe Media
- mediaFormat :: Maybe MediaFormat
- mediaSampleRateHertz :: Maybe Natural
- settings :: Maybe CallAnalyticsJobSettings
- startTime :: Maybe POSIX
- transcript :: Maybe Transcript
- newCallAnalyticsJob :: CallAnalyticsJob
- callAnalyticsJob_callAnalyticsJobName :: Lens' CallAnalyticsJob (Maybe Text)
- callAnalyticsJob_callAnalyticsJobStatus :: Lens' CallAnalyticsJob (Maybe CallAnalyticsJobStatus)
- callAnalyticsJob_channelDefinitions :: Lens' CallAnalyticsJob (Maybe (NonEmpty ChannelDefinition))
- callAnalyticsJob_completionTime :: Lens' CallAnalyticsJob (Maybe UTCTime)
- callAnalyticsJob_creationTime :: Lens' CallAnalyticsJob (Maybe UTCTime)
- callAnalyticsJob_dataAccessRoleArn :: Lens' CallAnalyticsJob (Maybe Text)
- callAnalyticsJob_failureReason :: Lens' CallAnalyticsJob (Maybe Text)
- callAnalyticsJob_identifiedLanguageScore :: Lens' CallAnalyticsJob (Maybe Double)
- callAnalyticsJob_languageCode :: Lens' CallAnalyticsJob (Maybe LanguageCode)
- callAnalyticsJob_media :: Lens' CallAnalyticsJob (Maybe Media)
- callAnalyticsJob_mediaFormat :: Lens' CallAnalyticsJob (Maybe MediaFormat)
- callAnalyticsJob_mediaSampleRateHertz :: Lens' CallAnalyticsJob (Maybe Natural)
- callAnalyticsJob_settings :: Lens' CallAnalyticsJob (Maybe CallAnalyticsJobSettings)
- callAnalyticsJob_startTime :: Lens' CallAnalyticsJob (Maybe UTCTime)
- callAnalyticsJob_transcript :: Lens' CallAnalyticsJob (Maybe Transcript)
- data CallAnalyticsJobSettings = CallAnalyticsJobSettings' {
- contentRedaction :: Maybe ContentRedaction
- languageIdSettings :: Maybe (HashMap LanguageCode LanguageIdSettings)
- languageModelName :: Maybe Text
- languageOptions :: Maybe (NonEmpty LanguageCode)
- vocabularyFilterMethod :: Maybe VocabularyFilterMethod
- vocabularyFilterName :: Maybe Text
- vocabularyName :: Maybe Text
- newCallAnalyticsJobSettings :: CallAnalyticsJobSettings
- callAnalyticsJobSettings_contentRedaction :: Lens' CallAnalyticsJobSettings (Maybe ContentRedaction)
- callAnalyticsJobSettings_languageIdSettings :: Lens' CallAnalyticsJobSettings (Maybe (HashMap LanguageCode LanguageIdSettings))
- callAnalyticsJobSettings_languageModelName :: Lens' CallAnalyticsJobSettings (Maybe Text)
- callAnalyticsJobSettings_languageOptions :: Lens' CallAnalyticsJobSettings (Maybe (NonEmpty LanguageCode))
- callAnalyticsJobSettings_vocabularyFilterMethod :: Lens' CallAnalyticsJobSettings (Maybe VocabularyFilterMethod)
- callAnalyticsJobSettings_vocabularyFilterName :: Lens' CallAnalyticsJobSettings (Maybe Text)
- callAnalyticsJobSettings_vocabularyName :: Lens' CallAnalyticsJobSettings (Maybe Text)
- data CallAnalyticsJobSummary = CallAnalyticsJobSummary' {}
- newCallAnalyticsJobSummary :: CallAnalyticsJobSummary
- callAnalyticsJobSummary_callAnalyticsJobName :: Lens' CallAnalyticsJobSummary (Maybe Text)
- callAnalyticsJobSummary_callAnalyticsJobStatus :: Lens' CallAnalyticsJobSummary (Maybe CallAnalyticsJobStatus)
- callAnalyticsJobSummary_completionTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime)
- callAnalyticsJobSummary_creationTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime)
- callAnalyticsJobSummary_failureReason :: Lens' CallAnalyticsJobSummary (Maybe Text)
- callAnalyticsJobSummary_languageCode :: Lens' CallAnalyticsJobSummary (Maybe LanguageCode)
- callAnalyticsJobSummary_startTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime)
- data CategoryProperties = CategoryProperties' {}
- newCategoryProperties :: CategoryProperties
- categoryProperties_categoryName :: Lens' CategoryProperties (Maybe Text)
- categoryProperties_createTime :: Lens' CategoryProperties (Maybe UTCTime)
- categoryProperties_inputType :: Lens' CategoryProperties (Maybe InputType)
- categoryProperties_lastUpdateTime :: Lens' CategoryProperties (Maybe UTCTime)
- categoryProperties_rules :: Lens' CategoryProperties (Maybe (NonEmpty Rule))
- data ChannelDefinition = ChannelDefinition' {}
- newChannelDefinition :: ChannelDefinition
- channelDefinition_channelId :: Lens' ChannelDefinition (Maybe Natural)
- channelDefinition_participantRole :: Lens' ChannelDefinition (Maybe ParticipantRole)
- data ContentRedaction = ContentRedaction' {}
- newContentRedaction :: RedactionType -> RedactionOutput -> ContentRedaction
- contentRedaction_piiEntityTypes :: Lens' ContentRedaction (Maybe [PiiEntityType])
- contentRedaction_redactionType :: Lens' ContentRedaction RedactionType
- contentRedaction_redactionOutput :: Lens' ContentRedaction RedactionOutput
- data InputDataConfig = InputDataConfig' {}
- newInputDataConfig :: Text -> Text -> InputDataConfig
- inputDataConfig_tuningDataS3Uri :: Lens' InputDataConfig (Maybe Text)
- inputDataConfig_s3Uri :: Lens' InputDataConfig Text
- inputDataConfig_dataAccessRoleArn :: Lens' InputDataConfig Text
- data InterruptionFilter = InterruptionFilter' {}
- newInterruptionFilter :: InterruptionFilter
- interruptionFilter_absoluteTimeRange :: Lens' InterruptionFilter (Maybe AbsoluteTimeRange)
- interruptionFilter_negate :: Lens' InterruptionFilter (Maybe Bool)
- interruptionFilter_participantRole :: Lens' InterruptionFilter (Maybe ParticipantRole)
- interruptionFilter_relativeTimeRange :: Lens' InterruptionFilter (Maybe RelativeTimeRange)
- interruptionFilter_threshold :: Lens' InterruptionFilter (Maybe Natural)
- data JobExecutionSettings = JobExecutionSettings' {}
- newJobExecutionSettings :: JobExecutionSettings
- jobExecutionSettings_allowDeferredExecution :: Lens' JobExecutionSettings (Maybe Bool)
- jobExecutionSettings_dataAccessRoleArn :: Lens' JobExecutionSettings (Maybe Text)
- data LanguageCodeItem = LanguageCodeItem' {}
- newLanguageCodeItem :: LanguageCodeItem
- languageCodeItem_durationInSeconds :: Lens' LanguageCodeItem (Maybe Double)
- languageCodeItem_languageCode :: Lens' LanguageCodeItem (Maybe LanguageCode)
- data LanguageIdSettings = LanguageIdSettings' {}
- newLanguageIdSettings :: LanguageIdSettings
- languageIdSettings_languageModelName :: Lens' LanguageIdSettings (Maybe Text)
- languageIdSettings_vocabularyFilterName :: Lens' LanguageIdSettings (Maybe Text)
- languageIdSettings_vocabularyName :: Lens' LanguageIdSettings (Maybe Text)
- data LanguageModel = LanguageModel' {}
- newLanguageModel :: LanguageModel
- languageModel_baseModelName :: Lens' LanguageModel (Maybe BaseModelName)
- languageModel_createTime :: Lens' LanguageModel (Maybe UTCTime)
- languageModel_failureReason :: Lens' LanguageModel (Maybe Text)
- languageModel_inputDataConfig :: Lens' LanguageModel (Maybe InputDataConfig)
- languageModel_languageCode :: Lens' LanguageModel (Maybe CLMLanguageCode)
- languageModel_lastModifiedTime :: Lens' LanguageModel (Maybe UTCTime)
- languageModel_modelName :: Lens' LanguageModel (Maybe Text)
- languageModel_modelStatus :: Lens' LanguageModel (Maybe ModelStatus)
- languageModel_upgradeAvailability :: Lens' LanguageModel (Maybe Bool)
- data Media = Media' {}
- newMedia :: Media
- media_mediaFileUri :: Lens' Media (Maybe Text)
- media_redactedMediaFileUri :: Lens' Media (Maybe Text)
- data MedicalTranscript = MedicalTranscript' {}
- newMedicalTranscript :: MedicalTranscript
- medicalTranscript_transcriptFileUri :: Lens' MedicalTranscript (Maybe Text)
- data MedicalTranscriptionJob = MedicalTranscriptionJob' {
- completionTime :: Maybe POSIX
- contentIdentificationType :: Maybe MedicalContentIdentificationType
- creationTime :: Maybe POSIX
- failureReason :: Maybe Text
- languageCode :: Maybe LanguageCode
- media :: Maybe Media
- mediaFormat :: Maybe MediaFormat
- mediaSampleRateHertz :: Maybe Natural
- medicalTranscriptionJobName :: Maybe Text
- settings :: Maybe MedicalTranscriptionSetting
- specialty :: Maybe Specialty
- startTime :: Maybe POSIX
- tags :: Maybe (NonEmpty Tag)
- transcript :: Maybe MedicalTranscript
- transcriptionJobStatus :: Maybe TranscriptionJobStatus
- type' :: Maybe Type
- newMedicalTranscriptionJob :: MedicalTranscriptionJob
- medicalTranscriptionJob_completionTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime)
- medicalTranscriptionJob_contentIdentificationType :: Lens' MedicalTranscriptionJob (Maybe MedicalContentIdentificationType)
- medicalTranscriptionJob_creationTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime)
- medicalTranscriptionJob_failureReason :: Lens' MedicalTranscriptionJob (Maybe Text)
- medicalTranscriptionJob_languageCode :: Lens' MedicalTranscriptionJob (Maybe LanguageCode)
- medicalTranscriptionJob_media :: Lens' MedicalTranscriptionJob (Maybe Media)
- medicalTranscriptionJob_mediaFormat :: Lens' MedicalTranscriptionJob (Maybe MediaFormat)
- medicalTranscriptionJob_mediaSampleRateHertz :: Lens' MedicalTranscriptionJob (Maybe Natural)
- medicalTranscriptionJob_medicalTranscriptionJobName :: Lens' MedicalTranscriptionJob (Maybe Text)
- medicalTranscriptionJob_settings :: Lens' MedicalTranscriptionJob (Maybe MedicalTranscriptionSetting)
- medicalTranscriptionJob_specialty :: Lens' MedicalTranscriptionJob (Maybe Specialty)
- medicalTranscriptionJob_startTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime)
- medicalTranscriptionJob_tags :: Lens' MedicalTranscriptionJob (Maybe (NonEmpty Tag))
- medicalTranscriptionJob_transcript :: Lens' MedicalTranscriptionJob (Maybe MedicalTranscript)
- medicalTranscriptionJob_transcriptionJobStatus :: Lens' MedicalTranscriptionJob (Maybe TranscriptionJobStatus)
- medicalTranscriptionJob_type :: Lens' MedicalTranscriptionJob (Maybe Type)
- data MedicalTranscriptionJobSummary = MedicalTranscriptionJobSummary' {
- completionTime :: Maybe POSIX
- contentIdentificationType :: Maybe MedicalContentIdentificationType
- creationTime :: Maybe POSIX
- failureReason :: Maybe Text
- languageCode :: Maybe LanguageCode
- medicalTranscriptionJobName :: Maybe Text
- outputLocationType :: Maybe OutputLocationType
- specialty :: Maybe Specialty
- startTime :: Maybe POSIX
- transcriptionJobStatus :: Maybe TranscriptionJobStatus
- type' :: Maybe Type
- newMedicalTranscriptionJobSummary :: MedicalTranscriptionJobSummary
- medicalTranscriptionJobSummary_completionTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime)
- medicalTranscriptionJobSummary_contentIdentificationType :: Lens' MedicalTranscriptionJobSummary (Maybe MedicalContentIdentificationType)
- medicalTranscriptionJobSummary_creationTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime)
- medicalTranscriptionJobSummary_failureReason :: Lens' MedicalTranscriptionJobSummary (Maybe Text)
- medicalTranscriptionJobSummary_languageCode :: Lens' MedicalTranscriptionJobSummary (Maybe LanguageCode)
- medicalTranscriptionJobSummary_medicalTranscriptionJobName :: Lens' MedicalTranscriptionJobSummary (Maybe Text)
- medicalTranscriptionJobSummary_outputLocationType :: Lens' MedicalTranscriptionJobSummary (Maybe OutputLocationType)
- medicalTranscriptionJobSummary_specialty :: Lens' MedicalTranscriptionJobSummary (Maybe Specialty)
- medicalTranscriptionJobSummary_startTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime)
- medicalTranscriptionJobSummary_transcriptionJobStatus :: Lens' MedicalTranscriptionJobSummary (Maybe TranscriptionJobStatus)
- medicalTranscriptionJobSummary_type :: Lens' MedicalTranscriptionJobSummary (Maybe Type)
- data MedicalTranscriptionSetting = MedicalTranscriptionSetting' {}
- newMedicalTranscriptionSetting :: MedicalTranscriptionSetting
- medicalTranscriptionSetting_channelIdentification :: Lens' MedicalTranscriptionSetting (Maybe Bool)
- medicalTranscriptionSetting_maxAlternatives :: Lens' MedicalTranscriptionSetting (Maybe Natural)
- medicalTranscriptionSetting_maxSpeakerLabels :: Lens' MedicalTranscriptionSetting (Maybe Natural)
- medicalTranscriptionSetting_showAlternatives :: Lens' MedicalTranscriptionSetting (Maybe Bool)
- medicalTranscriptionSetting_showSpeakerLabels :: Lens' MedicalTranscriptionSetting (Maybe Bool)
- medicalTranscriptionSetting_vocabularyName :: Lens' MedicalTranscriptionSetting (Maybe Text)
- data ModelSettings = ModelSettings' {}
- newModelSettings :: ModelSettings
- modelSettings_languageModelName :: Lens' ModelSettings (Maybe Text)
- data NonTalkTimeFilter = NonTalkTimeFilter' {}
- newNonTalkTimeFilter :: NonTalkTimeFilter
- nonTalkTimeFilter_absoluteTimeRange :: Lens' NonTalkTimeFilter (Maybe AbsoluteTimeRange)
- nonTalkTimeFilter_negate :: Lens' NonTalkTimeFilter (Maybe Bool)
- nonTalkTimeFilter_relativeTimeRange :: Lens' NonTalkTimeFilter (Maybe RelativeTimeRange)
- nonTalkTimeFilter_threshold :: Lens' NonTalkTimeFilter (Maybe Natural)
- data RelativeTimeRange = RelativeTimeRange' {}
- newRelativeTimeRange :: RelativeTimeRange
- relativeTimeRange_endPercentage :: Lens' RelativeTimeRange (Maybe Natural)
- relativeTimeRange_first :: Lens' RelativeTimeRange (Maybe Natural)
- relativeTimeRange_last :: Lens' RelativeTimeRange (Maybe Natural)
- relativeTimeRange_startPercentage :: Lens' RelativeTimeRange (Maybe Natural)
- data Rule = Rule' {}
- newRule :: Rule
- rule_interruptionFilter :: Lens' Rule (Maybe InterruptionFilter)
- rule_nonTalkTimeFilter :: Lens' Rule (Maybe NonTalkTimeFilter)
- rule_sentimentFilter :: Lens' Rule (Maybe SentimentFilter)
- rule_transcriptFilter :: Lens' Rule (Maybe TranscriptFilter)
- data SentimentFilter = SentimentFilter' {}
- newSentimentFilter :: NonEmpty SentimentValue -> SentimentFilter
- sentimentFilter_absoluteTimeRange :: Lens' SentimentFilter (Maybe AbsoluteTimeRange)
- sentimentFilter_negate :: Lens' SentimentFilter (Maybe Bool)
- sentimentFilter_participantRole :: Lens' SentimentFilter (Maybe ParticipantRole)
- sentimentFilter_relativeTimeRange :: Lens' SentimentFilter (Maybe RelativeTimeRange)
- sentimentFilter_sentiments :: Lens' SentimentFilter (NonEmpty SentimentValue)
- data Settings = Settings' {}
- newSettings :: Settings
- settings_channelIdentification :: Lens' Settings (Maybe Bool)
- settings_maxAlternatives :: Lens' Settings (Maybe Natural)
- settings_maxSpeakerLabels :: Lens' Settings (Maybe Natural)
- settings_showAlternatives :: Lens' Settings (Maybe Bool)
- settings_showSpeakerLabels :: Lens' Settings (Maybe Bool)
- settings_vocabularyFilterMethod :: Lens' Settings (Maybe VocabularyFilterMethod)
- settings_vocabularyFilterName :: Lens' Settings (Maybe Text)
- settings_vocabularyName :: Lens' Settings (Maybe Text)
- data Subtitles = Subtitles' {}
- newSubtitles :: Subtitles
- subtitles_formats :: Lens' Subtitles (Maybe [SubtitleFormat])
- subtitles_outputStartIndex :: Lens' Subtitles (Maybe Natural)
- data SubtitlesOutput = SubtitlesOutput' {}
- newSubtitlesOutput :: SubtitlesOutput
- subtitlesOutput_formats :: Lens' SubtitlesOutput (Maybe [SubtitleFormat])
- subtitlesOutput_outputStartIndex :: Lens' SubtitlesOutput (Maybe Natural)
- subtitlesOutput_subtitleFileUris :: Lens' SubtitlesOutput (Maybe [Text])
- data Tag = Tag' {}
- newTag :: Text -> Text -> Tag
- tag_key :: Lens' Tag Text
- tag_value :: Lens' Tag Text
- data Transcript = Transcript' {}
- newTranscript :: Transcript
- transcript_redactedTranscriptFileUri :: Lens' Transcript (Maybe Text)
- transcript_transcriptFileUri :: Lens' Transcript (Maybe Text)
- data TranscriptFilter = TranscriptFilter' {}
- newTranscriptFilter :: TranscriptFilterType -> NonEmpty Text -> TranscriptFilter
- transcriptFilter_absoluteTimeRange :: Lens' TranscriptFilter (Maybe AbsoluteTimeRange)
- transcriptFilter_negate :: Lens' TranscriptFilter (Maybe Bool)
- transcriptFilter_participantRole :: Lens' TranscriptFilter (Maybe ParticipantRole)
- transcriptFilter_relativeTimeRange :: Lens' TranscriptFilter (Maybe RelativeTimeRange)
- transcriptFilter_transcriptFilterType :: Lens' TranscriptFilter TranscriptFilterType
- transcriptFilter_targets :: Lens' TranscriptFilter (NonEmpty Text)
- data TranscriptionJob = TranscriptionJob' {
- completionTime :: Maybe POSIX
- contentRedaction :: Maybe ContentRedaction
- creationTime :: Maybe POSIX
- failureReason :: Maybe Text
- identifiedLanguageScore :: Maybe Double
- identifyLanguage :: Maybe Bool
- identifyMultipleLanguages :: Maybe Bool
- jobExecutionSettings :: Maybe JobExecutionSettings
- languageCode :: Maybe LanguageCode
- languageCodes :: Maybe [LanguageCodeItem]
- languageIdSettings :: Maybe (HashMap LanguageCode LanguageIdSettings)
- languageOptions :: Maybe (NonEmpty LanguageCode)
- media :: Maybe Media
- mediaFormat :: Maybe MediaFormat
- mediaSampleRateHertz :: Maybe Natural
- modelSettings :: Maybe ModelSettings
- settings :: Maybe Settings
- startTime :: Maybe POSIX
- subtitles :: Maybe SubtitlesOutput
- tags :: Maybe (NonEmpty Tag)
- transcript :: Maybe Transcript
- transcriptionJobName :: Maybe Text
- transcriptionJobStatus :: Maybe TranscriptionJobStatus
- newTranscriptionJob :: TranscriptionJob
- transcriptionJob_completionTime :: Lens' TranscriptionJob (Maybe UTCTime)
- transcriptionJob_contentRedaction :: Lens' TranscriptionJob (Maybe ContentRedaction)
- transcriptionJob_creationTime :: Lens' TranscriptionJob (Maybe UTCTime)
- transcriptionJob_failureReason :: Lens' TranscriptionJob (Maybe Text)
- transcriptionJob_identifiedLanguageScore :: Lens' TranscriptionJob (Maybe Double)
- transcriptionJob_identifyLanguage :: Lens' TranscriptionJob (Maybe Bool)
- transcriptionJob_identifyMultipleLanguages :: Lens' TranscriptionJob (Maybe Bool)
- transcriptionJob_jobExecutionSettings :: Lens' TranscriptionJob (Maybe JobExecutionSettings)
- transcriptionJob_languageCode :: Lens' TranscriptionJob (Maybe LanguageCode)
- transcriptionJob_languageCodes :: Lens' TranscriptionJob (Maybe [LanguageCodeItem])
- transcriptionJob_languageIdSettings :: Lens' TranscriptionJob (Maybe (HashMap LanguageCode LanguageIdSettings))
- transcriptionJob_languageOptions :: Lens' TranscriptionJob (Maybe (NonEmpty LanguageCode))
- transcriptionJob_media :: Lens' TranscriptionJob (Maybe Media)
- transcriptionJob_mediaFormat :: Lens' TranscriptionJob (Maybe MediaFormat)
- transcriptionJob_mediaSampleRateHertz :: Lens' TranscriptionJob (Maybe Natural)
- transcriptionJob_modelSettings :: Lens' TranscriptionJob (Maybe ModelSettings)
- transcriptionJob_settings :: Lens' TranscriptionJob (Maybe Settings)
- transcriptionJob_startTime :: Lens' TranscriptionJob (Maybe UTCTime)
- transcriptionJob_subtitles :: Lens' TranscriptionJob (Maybe SubtitlesOutput)
- transcriptionJob_tags :: Lens' TranscriptionJob (Maybe (NonEmpty Tag))
- transcriptionJob_transcript :: Lens' TranscriptionJob (Maybe Transcript)
- transcriptionJob_transcriptionJobName :: Lens' TranscriptionJob (Maybe Text)
- transcriptionJob_transcriptionJobStatus :: Lens' TranscriptionJob (Maybe TranscriptionJobStatus)
- data TranscriptionJobSummary = TranscriptionJobSummary' {
- completionTime :: Maybe POSIX
- contentRedaction :: Maybe ContentRedaction
- creationTime :: Maybe POSIX
- failureReason :: Maybe Text
- identifiedLanguageScore :: Maybe Double
- identifyLanguage :: Maybe Bool
- identifyMultipleLanguages :: Maybe Bool
- languageCode :: Maybe LanguageCode
- languageCodes :: Maybe [LanguageCodeItem]
- modelSettings :: Maybe ModelSettings
- outputLocationType :: Maybe OutputLocationType
- startTime :: Maybe POSIX
- transcriptionJobName :: Maybe Text
- transcriptionJobStatus :: Maybe TranscriptionJobStatus
- newTranscriptionJobSummary :: TranscriptionJobSummary
- transcriptionJobSummary_completionTime :: Lens' TranscriptionJobSummary (Maybe UTCTime)
- transcriptionJobSummary_contentRedaction :: Lens' TranscriptionJobSummary (Maybe ContentRedaction)
- transcriptionJobSummary_creationTime :: Lens' TranscriptionJobSummary (Maybe UTCTime)
- transcriptionJobSummary_failureReason :: Lens' TranscriptionJobSummary (Maybe Text)
- transcriptionJobSummary_identifiedLanguageScore :: Lens' TranscriptionJobSummary (Maybe Double)
- transcriptionJobSummary_identifyLanguage :: Lens' TranscriptionJobSummary (Maybe Bool)
- transcriptionJobSummary_identifyMultipleLanguages :: Lens' TranscriptionJobSummary (Maybe Bool)
- transcriptionJobSummary_languageCode :: Lens' TranscriptionJobSummary (Maybe LanguageCode)
- transcriptionJobSummary_languageCodes :: Lens' TranscriptionJobSummary (Maybe [LanguageCodeItem])
- transcriptionJobSummary_modelSettings :: Lens' TranscriptionJobSummary (Maybe ModelSettings)
- transcriptionJobSummary_outputLocationType :: Lens' TranscriptionJobSummary (Maybe OutputLocationType)
- transcriptionJobSummary_startTime :: Lens' TranscriptionJobSummary (Maybe UTCTime)
- transcriptionJobSummary_transcriptionJobName :: Lens' TranscriptionJobSummary (Maybe Text)
- transcriptionJobSummary_transcriptionJobStatus :: Lens' TranscriptionJobSummary (Maybe TranscriptionJobStatus)
- data VocabularyFilterInfo = VocabularyFilterInfo' {}
- newVocabularyFilterInfo :: VocabularyFilterInfo
- vocabularyFilterInfo_languageCode :: Lens' VocabularyFilterInfo (Maybe LanguageCode)
- vocabularyFilterInfo_lastModifiedTime :: Lens' VocabularyFilterInfo (Maybe UTCTime)
- vocabularyFilterInfo_vocabularyFilterName :: Lens' VocabularyFilterInfo (Maybe Text)
- data VocabularyInfo = VocabularyInfo' {}
- newVocabularyInfo :: VocabularyInfo
- vocabularyInfo_languageCode :: Lens' VocabularyInfo (Maybe LanguageCode)
- vocabularyInfo_lastModifiedTime :: Lens' VocabularyInfo (Maybe UTCTime)
- vocabularyInfo_vocabularyName :: Lens' VocabularyInfo (Maybe Text)
- vocabularyInfo_vocabularyState :: Lens' VocabularyInfo (Maybe VocabularyState)
Service Configuration
defaultService :: Service Source #
API version 2017-10-26
of the Amazon Transcribe Service SDK configuration.
Errors
_BadRequestException :: AsError a => Fold a ServiceError Source #
Your request didn't pass one or more validation tests. This can occur
when the entity you're trying to delete doesn't exist or if it's in a
non-terminal state (such as IN PROGRESS
). See the exception message
field for more information.
_ConflictException :: AsError a => Fold a ServiceError Source #
A resource already exists with this name. Resource names must be unique within an Amazon Web Services account.
_InternalFailureException :: AsError a => Fold a ServiceError Source #
There was an internal error. Check the error message, correct the issue, and try your request again.
_LimitExceededException :: AsError a => Fold a ServiceError Source #
You've either sent too many requests or your input file is too long. Wait before retrying your request, or use a smaller file and try your request again.
_NotFoundException :: AsError a => Fold a ServiceError Source #
We can't find the requested resource. Check that the specified name is correct and try your request again.
BaseModelName
newtype BaseModelName Source #
pattern BaseModelName_NarrowBand :: BaseModelName | |
pattern BaseModelName_WideBand :: BaseModelName |
Instances
CLMLanguageCode
newtype CLMLanguageCode Source #
pattern CLMLanguageCode_De_DE :: CLMLanguageCode | |
pattern CLMLanguageCode_En_AU :: CLMLanguageCode | |
pattern CLMLanguageCode_En_GB :: CLMLanguageCode | |
pattern CLMLanguageCode_En_US :: CLMLanguageCode | |
pattern CLMLanguageCode_Es_US :: CLMLanguageCode | |
pattern CLMLanguageCode_Hi_IN :: CLMLanguageCode | |
pattern CLMLanguageCode_Ja_JP :: CLMLanguageCode |
Instances
CallAnalyticsJobStatus
newtype CallAnalyticsJobStatus Source #
Instances
InputType
pattern InputType_POST_CALL :: InputType | |
pattern InputType_REAL_TIME :: InputType |
Instances
LanguageCode
newtype LanguageCode Source #
Instances
MediaFormat
newtype MediaFormat Source #
pattern MediaFormat_Amr :: MediaFormat | |
pattern MediaFormat_Flac :: MediaFormat | |
pattern MediaFormat_Mp3 :: MediaFormat | |
pattern MediaFormat_Mp4 :: MediaFormat | |
pattern MediaFormat_Ogg :: MediaFormat | |
pattern MediaFormat_Wav :: MediaFormat | |
pattern MediaFormat_Webm :: MediaFormat |
Instances
MedicalContentIdentificationType
newtype MedicalContentIdentificationType Source #
Instances
ModelStatus
newtype ModelStatus Source #
pattern ModelStatus_COMPLETED :: ModelStatus | |
pattern ModelStatus_FAILED :: ModelStatus | |
pattern ModelStatus_IN_PROGRESS :: ModelStatus |
Instances
OutputLocationType
newtype OutputLocationType Source #
pattern OutputLocationType_CUSTOMER_BUCKET :: OutputLocationType | |
pattern OutputLocationType_SERVICE_BUCKET :: OutputLocationType |
Instances
ParticipantRole
newtype ParticipantRole Source #
pattern ParticipantRole_AGENT :: ParticipantRole | |
pattern ParticipantRole_CUSTOMER :: ParticipantRole |
Instances
PiiEntityType
newtype PiiEntityType Source #
pattern PiiEntityType_ADDRESS :: PiiEntityType | |
pattern PiiEntityType_ALL :: PiiEntityType | |
pattern PiiEntityType_BANK_ACCOUNT_NUMBER :: PiiEntityType | |
pattern PiiEntityType_BANK_ROUTING :: PiiEntityType | |
pattern PiiEntityType_CREDIT_DEBIT_CVV :: PiiEntityType | |
pattern PiiEntityType_CREDIT_DEBIT_EXPIRY :: PiiEntityType | |
pattern PiiEntityType_CREDIT_DEBIT_NUMBER :: PiiEntityType | |
pattern PiiEntityType_EMAIL :: PiiEntityType | |
pattern PiiEntityType_NAME :: PiiEntityType | |
pattern PiiEntityType_PHONE :: PiiEntityType | |
pattern PiiEntityType_PIN :: PiiEntityType | |
pattern PiiEntityType_SSN :: PiiEntityType |
Instances
RedactionOutput
newtype RedactionOutput Source #
pattern RedactionOutput_Redacted :: RedactionOutput | |
pattern RedactionOutput_Redacted_and_unredacted :: RedactionOutput |
Instances
RedactionType
newtype RedactionType Source #
pattern RedactionType_PII :: RedactionType |
Instances
SentimentValue
newtype SentimentValue Source #
pattern SentimentValue_MIXED :: SentimentValue | |
pattern SentimentValue_NEGATIVE :: SentimentValue | |
pattern SentimentValue_NEUTRAL :: SentimentValue | |
pattern SentimentValue_POSITIVE :: SentimentValue |
Instances
Specialty
pattern Specialty_PRIMARYCARE :: Specialty |
Instances
SubtitleFormat
newtype SubtitleFormat Source #
pattern SubtitleFormat_Srt :: SubtitleFormat | |
pattern SubtitleFormat_Vtt :: SubtitleFormat |
Instances
TranscriptFilterType
newtype TranscriptFilterType Source #
pattern TranscriptFilterType_EXACT :: TranscriptFilterType |
Instances
TranscriptionJobStatus
newtype TranscriptionJobStatus Source #
Instances
Type
pattern Type_CONVERSATION :: Type | |
pattern Type_DICTATION :: Type |
Instances
VocabularyFilterMethod
newtype VocabularyFilterMethod Source #
pattern VocabularyFilterMethod_Mask :: VocabularyFilterMethod | |
pattern VocabularyFilterMethod_Remove :: VocabularyFilterMethod | |
pattern VocabularyFilterMethod_Tag :: VocabularyFilterMethod |
Instances
VocabularyState
newtype VocabularyState Source #
pattern VocabularyState_FAILED :: VocabularyState | |
pattern VocabularyState_PENDING :: VocabularyState | |
pattern VocabularyState_READY :: VocabularyState |
Instances
AbsoluteTimeRange
data AbsoluteTimeRange Source #
A time range, in milliseconds, between two points in your media file.
You can use StartTime
and EndTime
to search a custom segment. For
example, setting StartTime
to 10000 and EndTime
to 50000 only
searches for your specified criteria in the audio contained between the
10,000 millisecond mark and the 50,000 millisecond mark of your media
file. You must use StartTime
and EndTime
as a set; that is, if you
include one, you must include both.
You can use also First
to search from the start of the audio until the
time that you specify, or Last
to search from the time that you
specify until the end of the audio. For example, setting First
to
50000 only searches for your specified criteria in the audio contained
between the start of the media file to the 50,000 millisecond mark. You
can use First
and Last
independently of each other.
If you prefer to use percentage instead of milliseconds, see .
See: newAbsoluteTimeRange
smart constructor.
AbsoluteTimeRange' | |
|
Instances
newAbsoluteTimeRange :: AbsoluteTimeRange Source #
Create a value of AbsoluteTimeRange
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:endTime:AbsoluteTimeRange'
, absoluteTimeRange_endTime
- The time, in milliseconds, when Amazon Transcribe stops searching for
the specified criteria in your audio. If you include EndTime
in your
request, you must also include StartTime
.
$sel:first:AbsoluteTimeRange'
, absoluteTimeRange_first
- The time, in milliseconds, from the start of your media file until the
specified value. Amazon Transcribe searches for your specified criteria
in this time segment.
$sel:last:AbsoluteTimeRange'
, absoluteTimeRange_last
- The time, in milliseconds, from the specified value until the end of
your media file. Amazon Transcribe searches for your specified criteria
in this time segment.
$sel:startTime:AbsoluteTimeRange'
, absoluteTimeRange_startTime
- The time, in milliseconds, when Amazon Transcribe starts searching for
the specified criteria in your audio. If you include StartTime
in your
request, you must also include EndTime
.
absoluteTimeRange_endTime :: Lens' AbsoluteTimeRange (Maybe Natural) Source #
The time, in milliseconds, when Amazon Transcribe stops searching for
the specified criteria in your audio. If you include EndTime
in your
request, you must also include StartTime
.
absoluteTimeRange_first :: Lens' AbsoluteTimeRange (Maybe Natural) Source #
The time, in milliseconds, from the start of your media file until the specified value. Amazon Transcribe searches for your specified criteria in this time segment.
absoluteTimeRange_last :: Lens' AbsoluteTimeRange (Maybe Natural) Source #
The time, in milliseconds, from the specified value until the end of your media file. Amazon Transcribe searches for your specified criteria in this time segment.
absoluteTimeRange_startTime :: Lens' AbsoluteTimeRange (Maybe Natural) Source #
The time, in milliseconds, when Amazon Transcribe starts searching for
the specified criteria in your audio. If you include StartTime
in your
request, you must also include EndTime
.
CallAnalyticsJob
data CallAnalyticsJob Source #
Provides detailed information about a Call Analytics job.
To view the job's status, refer to CallAnalyticsJobStatus
. If the
status is COMPLETED
, the job is finished. You can find your completed
transcript at the URI specified in TranscriptFileUri
. If the status is
FAILED
, FailureReason
provides details on why your transcription job
failed.
If you enabled personally identifiable information (PII) redaction, the
redacted transcript appears at the location specified in
RedactedTranscriptFileUri
.
If you chose to redact the audio in your media file, you can find your
redacted media file at the location specified in the
RedactedMediaFileUri
field of your response.
See: newCallAnalyticsJob
smart constructor.
CallAnalyticsJob' | |
|
Instances
newCallAnalyticsJob :: CallAnalyticsJob Source #
Create a value of CallAnalyticsJob
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:callAnalyticsJobName:CallAnalyticsJob'
, callAnalyticsJob_callAnalyticsJobName
- The name of the Call Analytics job. Job names are case sensitive and
must be unique within an Amazon Web Services account.
$sel:callAnalyticsJobStatus:CallAnalyticsJob'
, callAnalyticsJob_callAnalyticsJobStatus
- Provides the status of the specified Call Analytics job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
(or
RedactedTranscriptFileUri
, if you requested transcript redaction). If
the status is FAILED
, FailureReason
provides details on why your
transcription job failed.
$sel:channelDefinitions:CallAnalyticsJob'
, callAnalyticsJob_channelDefinitions
- Indicates which speaker is on which channel.
$sel:completionTime:CallAnalyticsJob'
, callAnalyticsJob_completionTime
- The date and time the specified Call Analytics job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
$sel:creationTime:CallAnalyticsJob'
, callAnalyticsJob_creationTime
- The date and time the specified Call Analytics job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:dataAccessRoleArn:CallAnalyticsJob'
, callAnalyticsJob_dataAccessRoleArn
- The Amazon Resource Name (ARN) you included in your request.
$sel:failureReason:CallAnalyticsJob'
, callAnalyticsJob_failureReason
- If CallAnalyticsJobStatus
is FAILED
, FailureReason
contains
information about why the Call Analytics job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.The media format specified in
MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.The media format provided does not match the detected media format
.The media format specified in
MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.Invalid sample rate for audio file
.The sample rate specified in
MediaSampleRateHertz
isn't valid. The sample rate must be between 8,000 and 48,000 hertz.The sample rate provided does not match the detected sample rate
.The sample rate specified in
MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.Invalid file size: file size too large
.The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
$sel:identifiedLanguageScore:CallAnalyticsJob'
, callAnalyticsJob_identifiedLanguageScore
- The confidence score associated with the language identified in your
media file.
Confidence scores are values between 0 and 1; a larger value indicates a higher probability that the identified language correctly matches the language spoken in your media.
$sel:languageCode:CallAnalyticsJob'
, callAnalyticsJob_languageCode
- The language code used to create your Call Analytics job. For a list of
supported languages and their associated language codes, refer to the
Supported languages
table.
If you don't know the language spoken in your media file, you can omit this field and let Amazon Transcribe automatically identify the language of your media. To improve the accuracy of language identification, you can include several language codes and Amazon Transcribe chooses the closest match for your transcription.
$sel:media:CallAnalyticsJob'
, callAnalyticsJob_media
- Provides the Amazon S3 location of the media file you used in your Call
Analytics request.
$sel:mediaFormat:CallAnalyticsJob'
, callAnalyticsJob_mediaFormat
- The format of the input media file.
$sel:mediaSampleRateHertz:CallAnalyticsJob'
, callAnalyticsJob_mediaSampleRateHertz
- The sample rate, in hertz, of the audio track in your input media file.
$sel:settings:CallAnalyticsJob'
, callAnalyticsJob_settings
- Provides information on any additional settings that were included in
your request. Additional settings include content redaction and language
identification settings.
$sel:startTime:CallAnalyticsJob'
, callAnalyticsJob_startTime
- The date and time the specified Call Analytics job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:transcript:CallAnalyticsJob'
, callAnalyticsJob_transcript
- Undocumented member.
callAnalyticsJob_callAnalyticsJobName :: Lens' CallAnalyticsJob (Maybe Text) Source #
The name of the Call Analytics job. Job names are case sensitive and must be unique within an Amazon Web Services account.
callAnalyticsJob_callAnalyticsJobStatus :: Lens' CallAnalyticsJob (Maybe CallAnalyticsJobStatus) Source #
Provides the status of the specified Call Analytics job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
(or
RedactedTranscriptFileUri
, if you requested transcript redaction). If
the status is FAILED
, FailureReason
provides details on why your
transcription job failed.
callAnalyticsJob_channelDefinitions :: Lens' CallAnalyticsJob (Maybe (NonEmpty ChannelDefinition)) Source #
Indicates which speaker is on which channel.
callAnalyticsJob_completionTime :: Lens' CallAnalyticsJob (Maybe UTCTime) Source #
The date and time the specified Call Analytics job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
callAnalyticsJob_creationTime :: Lens' CallAnalyticsJob (Maybe UTCTime) Source #
The date and time the specified Call Analytics job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
callAnalyticsJob_dataAccessRoleArn :: Lens' CallAnalyticsJob (Maybe Text) Source #
The Amazon Resource Name (ARN) you included in your request.
callAnalyticsJob_failureReason :: Lens' CallAnalyticsJob (Maybe Text) Source #
If CallAnalyticsJobStatus
is FAILED
, FailureReason
contains
information about why the Call Analytics job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.The media format specified in
MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.The media format provided does not match the detected media format
.The media format specified in
MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.Invalid sample rate for audio file
.The sample rate specified in
MediaSampleRateHertz
isn't valid. The sample rate must be between 8,000 and 48,000 hertz.The sample rate provided does not match the detected sample rate
.The sample rate specified in
MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.Invalid file size: file size too large
.The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
callAnalyticsJob_identifiedLanguageScore :: Lens' CallAnalyticsJob (Maybe Double) Source #
The confidence score associated with the language identified in your media file.
Confidence scores are values between 0 and 1; a larger value indicates a higher probability that the identified language correctly matches the language spoken in your media.
callAnalyticsJob_languageCode :: Lens' CallAnalyticsJob (Maybe LanguageCode) Source #
The language code used to create your Call Analytics job. For a list of supported languages and their associated language codes, refer to the Supported languages table.
If you don't know the language spoken in your media file, you can omit this field and let Amazon Transcribe automatically identify the language of your media. To improve the accuracy of language identification, you can include several language codes and Amazon Transcribe chooses the closest match for your transcription.
callAnalyticsJob_media :: Lens' CallAnalyticsJob (Maybe Media) Source #
Provides the Amazon S3 location of the media file you used in your Call Analytics request.
callAnalyticsJob_mediaFormat :: Lens' CallAnalyticsJob (Maybe MediaFormat) Source #
The format of the input media file.
callAnalyticsJob_mediaSampleRateHertz :: Lens' CallAnalyticsJob (Maybe Natural) Source #
The sample rate, in hertz, of the audio track in your input media file.
callAnalyticsJob_settings :: Lens' CallAnalyticsJob (Maybe CallAnalyticsJobSettings) Source #
Provides information on any additional settings that were included in your request. Additional settings include content redaction and language identification settings.
callAnalyticsJob_startTime :: Lens' CallAnalyticsJob (Maybe UTCTime) Source #
The date and time the specified Call Analytics job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
callAnalyticsJob_transcript :: Lens' CallAnalyticsJob (Maybe Transcript) Source #
Undocumented member.
CallAnalyticsJobSettings
data CallAnalyticsJobSettings Source #
Provides additional optional settings for your request, including content redaction, automatic language identification; allows you to apply custom language models, custom vocabulary filters, and custom vocabularies.
See: newCallAnalyticsJobSettings
smart constructor.
CallAnalyticsJobSettings' | |
|
Instances
newCallAnalyticsJobSettings :: CallAnalyticsJobSettings Source #
Create a value of CallAnalyticsJobSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:contentRedaction:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_contentRedaction
- Undocumented member.
$sel:languageIdSettings:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_languageIdSettings
- If using automatic language identification in your request and you want
to apply a custom language model, a custom vocabulary, or a custom
vocabulary filter, include LanguageIdSettings
with the relevant
sub-parameters (VocabularyName
, LanguageModelName
, and
VocabularyFilterName
).
LanguageIdSettings
supports two to five language codes. Each language
code you include can have an associated custom language model, custom
vocabulary, and custom vocabulary filter. The language codes that you
specify must match the languages of the associated custom language
models, custom vocabularies, and custom vocabulary filters.
It's recommended that you include LanguageOptions
when using
LanguageIdSettings
to ensure that the correct language dialect is
identified. For example, if you specify a custom vocabulary that is in
en-US
but Amazon Transcribe determines that the language spoken in
your media is en-AU
, your custom vocabulary is not applied to your
transcription. If you include LanguageOptions
and include en-US
as
the only English language dialect, your custom vocabulary is applied
to your transcription.
If you want to include a custom language model, custom vocabulary, or
custom vocabulary filter with your request but do not want to use
automatic language identification, use instead the parameter with the
LanguageModelName
, VocabularyName
, or VocabularyFilterName
sub-parameters.
For a list of languages supported with Call Analytics, refer to Supported languages and language-specific features.
CallAnalyticsJobSettings
, callAnalyticsJobSettings_languageModelName
- The name of the custom language model you want to use when processing
your Call Analytics job. Note that custom language model names are case
sensitive.
The language of the specified custom language model must match the language code that you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
$sel:languageOptions:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_languageOptions
- You can specify two or more language codes that represent the languages
you think may be present in your media. Including more than five is not
recommended. If you're unsure what languages are present, do not
include this parameter.
Including language options can improve the accuracy of language identification.
For a list of languages supported with Call Analytics, refer to the Supported languages table.
To transcribe speech in Modern Standard Arabic (ar-SA
), your media
file must be encoded at a sample rate of 16,000 Hz or higher.
$sel:vocabularyFilterMethod:CallAnalyticsJobSettings'
, callAnalyticsJobSettings_vocabularyFilterMethod
- Specify how you want your custom vocabulary filter applied to your
transcript.
To replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
CallAnalyticsJobSettings
, callAnalyticsJobSettings_vocabularyFilterName
- The name of the custom vocabulary filter you want to include in your
Call Analytics transcription request. Custom vocabulary filter names are
case sensitive.
Note that if you include VocabularyFilterName
in your request, you
must also include VocabularyFilterMethod
.
CallAnalyticsJobSettings
, callAnalyticsJobSettings_vocabularyName
- The name of the custom vocabulary you want to include in your Call
Analytics transcription request. Custom vocabulary names are case
sensitive.
callAnalyticsJobSettings_contentRedaction :: Lens' CallAnalyticsJobSettings (Maybe ContentRedaction) Source #
Undocumented member.
callAnalyticsJobSettings_languageIdSettings :: Lens' CallAnalyticsJobSettings (Maybe (HashMap LanguageCode LanguageIdSettings)) Source #
If using automatic language identification in your request and you want
to apply a custom language model, a custom vocabulary, or a custom
vocabulary filter, include LanguageIdSettings
with the relevant
sub-parameters (VocabularyName
, LanguageModelName
, and
VocabularyFilterName
).
LanguageIdSettings
supports two to five language codes. Each language
code you include can have an associated custom language model, custom
vocabulary, and custom vocabulary filter. The language codes that you
specify must match the languages of the associated custom language
models, custom vocabularies, and custom vocabulary filters.
It's recommended that you include LanguageOptions
when using
LanguageIdSettings
to ensure that the correct language dialect is
identified. For example, if you specify a custom vocabulary that is in
en-US
but Amazon Transcribe determines that the language spoken in
your media is en-AU
, your custom vocabulary is not applied to your
transcription. If you include LanguageOptions
and include en-US
as
the only English language dialect, your custom vocabulary is applied
to your transcription.
If you want to include a custom language model, custom vocabulary, or
custom vocabulary filter with your request but do not want to use
automatic language identification, use instead the parameter with the
LanguageModelName
, VocabularyName
, or VocabularyFilterName
sub-parameters.
For a list of languages supported with Call Analytics, refer to Supported languages and language-specific features.
callAnalyticsJobSettings_languageModelName :: Lens' CallAnalyticsJobSettings (Maybe Text) Source #
The name of the custom language model you want to use when processing your Call Analytics job. Note that custom language model names are case sensitive.
The language of the specified custom language model must match the language code that you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
callAnalyticsJobSettings_languageOptions :: Lens' CallAnalyticsJobSettings (Maybe (NonEmpty LanguageCode)) Source #
You can specify two or more language codes that represent the languages you think may be present in your media. Including more than five is not recommended. If you're unsure what languages are present, do not include this parameter.
Including language options can improve the accuracy of language identification.
For a list of languages supported with Call Analytics, refer to the Supported languages table.
To transcribe speech in Modern Standard Arabic (ar-SA
), your media
file must be encoded at a sample rate of 16,000 Hz or higher.
callAnalyticsJobSettings_vocabularyFilterMethod :: Lens' CallAnalyticsJobSettings (Maybe VocabularyFilterMethod) Source #
Specify how you want your custom vocabulary filter applied to your transcript.
To replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
callAnalyticsJobSettings_vocabularyFilterName :: Lens' CallAnalyticsJobSettings (Maybe Text) Source #
The name of the custom vocabulary filter you want to include in your Call Analytics transcription request. Custom vocabulary filter names are case sensitive.
Note that if you include VocabularyFilterName
in your request, you
must also include VocabularyFilterMethod
.
callAnalyticsJobSettings_vocabularyName :: Lens' CallAnalyticsJobSettings (Maybe Text) Source #
The name of the custom vocabulary you want to include in your Call Analytics transcription request. Custom vocabulary names are case sensitive.
CallAnalyticsJobSummary
data CallAnalyticsJobSummary Source #
Provides detailed information about a specific Call Analytics job.
See: newCallAnalyticsJobSummary
smart constructor.
CallAnalyticsJobSummary' | |
|
Instances
newCallAnalyticsJobSummary :: CallAnalyticsJobSummary Source #
Create a value of CallAnalyticsJobSummary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:callAnalyticsJobName:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_callAnalyticsJobName
- The name of the Call Analytics job. Job names are case sensitive and
must be unique within an Amazon Web Services account.
$sel:callAnalyticsJobStatus:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_callAnalyticsJobStatus
- Provides the status of your Call Analytics job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
(or
RedactedTranscriptFileUri
, if you requested transcript redaction). If
the status is FAILED
, FailureReason
provides details on why your
transcription job failed.
$sel:completionTime:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_completionTime
- The date and time the specified Call Analytics job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
$sel:creationTime:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_creationTime
- The date and time the specified Call Analytics job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:failureReason:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_failureReason
- If CallAnalyticsJobStatus
is FAILED
, FailureReason
contains
information about why the Call Analytics job failed. See also:
Common Errors.
$sel:languageCode:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_languageCode
- The language code used to create your Call Analytics transcription.
$sel:startTime:CallAnalyticsJobSummary'
, callAnalyticsJobSummary_startTime
- The date and time your Call Analytics job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
callAnalyticsJobSummary_callAnalyticsJobName :: Lens' CallAnalyticsJobSummary (Maybe Text) Source #
The name of the Call Analytics job. Job names are case sensitive and must be unique within an Amazon Web Services account.
callAnalyticsJobSummary_callAnalyticsJobStatus :: Lens' CallAnalyticsJobSummary (Maybe CallAnalyticsJobStatus) Source #
Provides the status of your Call Analytics job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
(or
RedactedTranscriptFileUri
, if you requested transcript redaction). If
the status is FAILED
, FailureReason
provides details on why your
transcription job failed.
callAnalyticsJobSummary_completionTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime) Source #
The date and time the specified Call Analytics job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
callAnalyticsJobSummary_creationTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime) Source #
The date and time the specified Call Analytics job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
callAnalyticsJobSummary_failureReason :: Lens' CallAnalyticsJobSummary (Maybe Text) Source #
If CallAnalyticsJobStatus
is FAILED
, FailureReason
contains
information about why the Call Analytics job failed. See also:
Common Errors.
callAnalyticsJobSummary_languageCode :: Lens' CallAnalyticsJobSummary (Maybe LanguageCode) Source #
The language code used to create your Call Analytics transcription.
callAnalyticsJobSummary_startTime :: Lens' CallAnalyticsJobSummary (Maybe UTCTime) Source #
The date and time your Call Analytics job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
CategoryProperties
data CategoryProperties Source #
Provides you with the properties of the Call Analytics category you specified in your request. This includes the list of rules that define the specified category.
See: newCategoryProperties
smart constructor.
CategoryProperties' | |
|
Instances
newCategoryProperties :: CategoryProperties Source #
Create a value of CategoryProperties
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:categoryName:CategoryProperties'
, categoryProperties_categoryName
- The name of the Call Analytics category. Category names are case
sensitive and must be unique within an Amazon Web Services account.
$sel:createTime:CategoryProperties'
, categoryProperties_createTime
- The date and time the specified Call Analytics category was created.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
$sel:inputType:CategoryProperties'
, categoryProperties_inputType
- The input type associated with the specified category. POST_CALL
refers to a category that is applied to batch transcriptions;
REAL_TIME
refers to a category that is applied to streaming
transcriptions.
$sel:lastUpdateTime:CategoryProperties'
, categoryProperties_lastUpdateTime
- The date and time the specified Call Analytics category was last
updated.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-05T12:45:32.691000-07:00
represents 12:45 PM UTC-7 on
May 5, 2022.
$sel:rules:CategoryProperties'
, categoryProperties_rules
- The rules used to define a Call Analytics category. Each category can
have between 1 and 20 rules.
categoryProperties_categoryName :: Lens' CategoryProperties (Maybe Text) Source #
The name of the Call Analytics category. Category names are case sensitive and must be unique within an Amazon Web Services account.
categoryProperties_createTime :: Lens' CategoryProperties (Maybe UTCTime) Source #
The date and time the specified Call Analytics category was created.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
categoryProperties_inputType :: Lens' CategoryProperties (Maybe InputType) Source #
The input type associated with the specified category. POST_CALL
refers to a category that is applied to batch transcriptions;
REAL_TIME
refers to a category that is applied to streaming
transcriptions.
categoryProperties_lastUpdateTime :: Lens' CategoryProperties (Maybe UTCTime) Source #
The date and time the specified Call Analytics category was last updated.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-05T12:45:32.691000-07:00
represents 12:45 PM UTC-7 on
May 5, 2022.
categoryProperties_rules :: Lens' CategoryProperties (Maybe (NonEmpty Rule)) Source #
The rules used to define a Call Analytics category. Each category can have between 1 and 20 rules.
ChannelDefinition
data ChannelDefinition Source #
Makes it possible to specify which speaker is on which channel. For
example, if your agent is the first participant to speak, you would set
ChannelId
to 0
(to indicate the first channel) and ParticipantRole
to AGENT
(to indicate that it's the agent speaking).
See: newChannelDefinition
smart constructor.
ChannelDefinition' | |
|
Instances
newChannelDefinition :: ChannelDefinition Source #
Create a value of ChannelDefinition
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:channelId:ChannelDefinition'
, channelDefinition_channelId
- Specify the audio channel you want to define.
$sel:participantRole:ChannelDefinition'
, channelDefinition_participantRole
- Specify the speaker you want to define. Omitting this parameter is
equivalent to specifying both participants.
channelDefinition_channelId :: Lens' ChannelDefinition (Maybe Natural) Source #
Specify the audio channel you want to define.
channelDefinition_participantRole :: Lens' ChannelDefinition (Maybe ParticipantRole) Source #
Specify the speaker you want to define. Omitting this parameter is equivalent to specifying both participants.
ContentRedaction
data ContentRedaction Source #
Makes it possible to redact or flag specified personally identifiable
information (PII) in your transcript. If you use ContentRedaction
, you
must also include the sub-parameters: PiiEntityTypes
,
RedactionOutput
, and RedactionType
.
See: newContentRedaction
smart constructor.
ContentRedaction' | |
|
Instances
Create a value of ContentRedaction
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:piiEntityTypes:ContentRedaction'
, contentRedaction_piiEntityTypes
- Specify which types of personally identifiable information (PII) you
want to redact in your transcript. You can include as many types as
you'd like, or you can select ALL
.
$sel:redactionType:ContentRedaction'
, contentRedaction_redactionType
- Specify the category of information you want to redact; PII
(personally identifiable information) is the only valid value. You can
use PiiEntityTypes
to choose which types of PII you want to redact.
$sel:redactionOutput:ContentRedaction'
, contentRedaction_redactionOutput
- Specify if you want only a redacted transcript, or if you want a
redacted and an unredacted transcript.
When you choose redacted
Amazon Transcribe creates only a redacted
transcript.
When you choose redacted_and_unredacted
Amazon Transcribe creates a
redacted and an unredacted transcript (as two separate files).
contentRedaction_piiEntityTypes :: Lens' ContentRedaction (Maybe [PiiEntityType]) Source #
Specify which types of personally identifiable information (PII) you
want to redact in your transcript. You can include as many types as
you'd like, or you can select ALL
.
contentRedaction_redactionType :: Lens' ContentRedaction RedactionType Source #
Specify the category of information you want to redact; PII
(personally identifiable information) is the only valid value. You can
use PiiEntityTypes
to choose which types of PII you want to redact.
contentRedaction_redactionOutput :: Lens' ContentRedaction RedactionOutput Source #
Specify if you want only a redacted transcript, or if you want a redacted and an unredacted transcript.
When you choose redacted
Amazon Transcribe creates only a redacted
transcript.
When you choose redacted_and_unredacted
Amazon Transcribe creates a
redacted and an unredacted transcript (as two separate files).
InputDataConfig
data InputDataConfig Source #
Contains the Amazon S3 location of the training data you want to use to create a new custom language model, and permissions to access this location.
When using InputDataConfig
, you must include these sub-parameters:
S3Uri
and DataAccessRoleArn
. You can optionally include
TuningDataS3Uri
.
See: newInputDataConfig
smart constructor.
InputDataConfig' | |
|
Instances
Create a value of InputDataConfig
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:tuningDataS3Uri:InputDataConfig'
, inputDataConfig_tuningDataS3Uri
- The Amazon S3 location (URI) of the text files you want to use to tune
your custom language model.
Here's an example URI path:
s3://DOC-EXAMPLE-BUCKET/my-model-tuning-data/
$sel:s3Uri:InputDataConfig'
, inputDataConfig_s3Uri
- The Amazon S3 location (URI) of the text files you want to use to train
your custom language model.
Here's an example URI path:
s3://DOC-EXAMPLE-BUCKET/my-model-training-data/
$sel:dataAccessRoleArn:InputDataConfig'
, inputDataConfig_dataAccessRoleArn
- The Amazon Resource Name (ARN) of an IAM role that has permissions to
access the Amazon S3 bucket that contains your input files. If the role
that you specify doesn’t have the appropriate permissions to access the
specified Amazon S3 location, your request fails.
IAM role ARNs have the format
arn:partition:iam::account:role/role-name-with-path
. For example:
arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM ARNs.
inputDataConfig_tuningDataS3Uri :: Lens' InputDataConfig (Maybe Text) Source #
The Amazon S3 location (URI) of the text files you want to use to tune your custom language model.
Here's an example URI path:
s3://DOC-EXAMPLE-BUCKET/my-model-tuning-data/
inputDataConfig_s3Uri :: Lens' InputDataConfig Text Source #
The Amazon S3 location (URI) of the text files you want to use to train your custom language model.
Here's an example URI path:
s3://DOC-EXAMPLE-BUCKET/my-model-training-data/
inputDataConfig_dataAccessRoleArn :: Lens' InputDataConfig Text Source #
The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role that you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
IAM role ARNs have the format
arn:partition:iam::account:role/role-name-with-path
. For example:
arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM ARNs.
InterruptionFilter
data InterruptionFilter Source #
Flag the presence or absence of interruptions in your Call Analytics transcription output.
Rules using InterruptionFilter
are designed to match:
- Instances where an agent interrupts a customer
- Instances where a customer interrupts an agent
- Either participant interrupting the other
- A lack of interruptions
See Rule criteria for batch categories for usage examples.
See: newInterruptionFilter
smart constructor.
InterruptionFilter' | |
|
Instances
newInterruptionFilter :: InterruptionFilter Source #
Create a value of InterruptionFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:absoluteTimeRange:InterruptionFilter'
, interruptionFilter_absoluteTimeRange
- Makes it possible to specify a time range (in milliseconds) in your
audio, during which you want to search for an interruption. See for more
detail.
$sel:negate:InterruptionFilter'
, interruptionFilter_negate
- Set to TRUE
to flag speech that does not contain interruptions. Set to
FALSE
to flag speech that contains interruptions.
$sel:participantRole:InterruptionFilter'
, interruptionFilter_participantRole
- Specify the interrupter that you want to flag. Omitting this parameter
is equivalent to specifying both participants.
$sel:relativeTimeRange:InterruptionFilter'
, interruptionFilter_relativeTimeRange
- Makes it possible to specify a time range (in percentage) in your media
file, during which you want to search for an interruption. See for more
detail.
$sel:threshold:InterruptionFilter'
, interruptionFilter_threshold
- Specify the duration of the interruptions in milliseconds. For example,
you can flag speech that contains more than 10,000 milliseconds of
interruptions.
interruptionFilter_absoluteTimeRange :: Lens' InterruptionFilter (Maybe AbsoluteTimeRange) Source #
Makes it possible to specify a time range (in milliseconds) in your audio, during which you want to search for an interruption. See for more detail.
interruptionFilter_negate :: Lens' InterruptionFilter (Maybe Bool) Source #
Set to TRUE
to flag speech that does not contain interruptions. Set to
FALSE
to flag speech that contains interruptions.
interruptionFilter_participantRole :: Lens' InterruptionFilter (Maybe ParticipantRole) Source #
Specify the interrupter that you want to flag. Omitting this parameter is equivalent to specifying both participants.
interruptionFilter_relativeTimeRange :: Lens' InterruptionFilter (Maybe RelativeTimeRange) Source #
Makes it possible to specify a time range (in percentage) in your media file, during which you want to search for an interruption. See for more detail.
interruptionFilter_threshold :: Lens' InterruptionFilter (Maybe Natural) Source #
Specify the duration of the interruptions in milliseconds. For example, you can flag speech that contains more than 10,000 milliseconds of interruptions.
JobExecutionSettings
data JobExecutionSettings Source #
Makes it possible to control how your transcription job is processed.
Currently, the only JobExecutionSettings
modification you can choose
is enabling job queueing using the AllowDeferredExecution
sub-parameter.
If you include JobExecutionSettings
in your request, you must also
include the sub-parameters: AllowDeferredExecution
and
DataAccessRoleArn
.
See: newJobExecutionSettings
smart constructor.
JobExecutionSettings' | |
|
Instances
newJobExecutionSettings :: JobExecutionSettings Source #
Create a value of JobExecutionSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:allowDeferredExecution:JobExecutionSettings'
, jobExecutionSettings_allowDeferredExecution
- Makes it possible to enable job queuing when your concurrent request
limit is exceeded. When AllowDeferredExecution
is set to true
,
transcription job requests are placed in a queue until the number of
jobs falls below the concurrent request limit. If
AllowDeferredExecution
is set to false
and the number of
transcription job requests exceed the concurrent request limit, you get
a LimitExceededException
error.
Note that job queuing is enabled by default for Call Analytics jobs.
If you include AllowDeferredExecution
in your request, you must also
include DataAccessRoleArn
.
$sel:dataAccessRoleArn:JobExecutionSettings'
, jobExecutionSettings_dataAccessRoleArn
- The Amazon Resource Name (ARN) of an IAM role that has permissions to
access the Amazon S3 bucket that contains your input files. If the role
that you specify doesn’t have the appropriate permissions to access the
specified Amazon S3 location, your request fails.
IAM role ARNs have the format
arn:partition:iam::account:role/role-name-with-path
. For example:
arn:aws:iam::111122223333:role/Admin
. For more information, see
IAM ARNs.
Note that if you include DataAccessRoleArn
in your request, you must
also include AllowDeferredExecution
.
jobExecutionSettings_allowDeferredExecution :: Lens' JobExecutionSettings (Maybe Bool) Source #
Makes it possible to enable job queuing when your concurrent request
limit is exceeded. When AllowDeferredExecution
is set to true
,
transcription job requests are placed in a queue until the number of
jobs falls below the concurrent request limit. If
AllowDeferredExecution
is set to false
and the number of
transcription job requests exceed the concurrent request limit, you get
a LimitExceededException
error.
Note that job queuing is enabled by default for Call Analytics jobs.
If you include AllowDeferredExecution
in your request, you must also
include DataAccessRoleArn
.
jobExecutionSettings_dataAccessRoleArn :: Lens' JobExecutionSettings (Maybe Text) Source #
The Amazon Resource Name (ARN) of an IAM role that has permissions to access the Amazon S3 bucket that contains your input files. If the role that you specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
IAM role ARNs have the format
arn:partition:iam::account:role/role-name-with-path
. For example:
arn:aws:iam::111122223333:role/Admin
. For more information, see
IAM ARNs.
Note that if you include DataAccessRoleArn
in your request, you must
also include AllowDeferredExecution
.
LanguageCodeItem
data LanguageCodeItem Source #
Provides information on the speech contained in a discreet utterance when multi-language identification is enabled in your request. This utterance represents a block of speech consisting of one language, preceded or followed by a block of speech in a different language.
See: newLanguageCodeItem
smart constructor.
LanguageCodeItem' | |
|
Instances
newLanguageCodeItem :: LanguageCodeItem Source #
Create a value of LanguageCodeItem
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:durationInSeconds:LanguageCodeItem'
, languageCodeItem_durationInSeconds
- Provides the total time, in seconds, each identified language is spoken
in your media.
$sel:languageCode:LanguageCodeItem'
, languageCodeItem_languageCode
- Provides the language code for each language identified in your media.
languageCodeItem_durationInSeconds :: Lens' LanguageCodeItem (Maybe Double) Source #
Provides the total time, in seconds, each identified language is spoken in your media.
languageCodeItem_languageCode :: Lens' LanguageCodeItem (Maybe LanguageCode) Source #
Provides the language code for each language identified in your media.
LanguageIdSettings
data LanguageIdSettings Source #
If using automatic language identification in your request and you want
to apply a custom language model, a custom vocabulary, or a custom
vocabulary filter, include LanguageIdSettings
with the relevant
sub-parameters (VocabularyName
, LanguageModelName
, and
VocabularyFilterName
). Note that multi-language identification
(IdentifyMultipleLanguages
) doesn't support custom language models.
LanguageIdSettings
supports two to five language codes. Each language
code you include can have an associated custom language model, custom
vocabulary, and custom vocabulary filter. The language codes that you
specify must match the languages of the associated custom language
models, custom vocabularies, and custom vocabulary filters.
It's recommended that you include LanguageOptions
when using
LanguageIdSettings
to ensure that the correct language dialect is
identified. For example, if you specify a custom vocabulary that is in
en-US
but Amazon Transcribe determines that the language spoken in
your media is en-AU
, your custom vocabulary is not applied to your
transcription. If you include LanguageOptions
and include en-US
as
the only English language dialect, your custom vocabulary is applied
to your transcription.
If you want to include a custom language model with your request but
do not want to use automatic language identification, use instead
the parameter with the LanguageModelName
sub-parameter. If you want to
include a custom vocabulary or a custom vocabulary filter (or both) with
your request but do not want to use automatic language
identification, use instead the parameter with the VocabularyName
or
VocabularyFilterName
(or both) sub-parameter.
See: newLanguageIdSettings
smart constructor.
LanguageIdSettings' | |
|
Instances
newLanguageIdSettings :: LanguageIdSettings Source #
Create a value of LanguageIdSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:languageModelName:LanguageIdSettings'
, languageIdSettings_languageModelName
- The name of the custom language model you want to use when processing
your transcription job. Note that custom language model names are case
sensitive.
The language of the specified custom language model must match the language code that you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
$sel:vocabularyFilterName:LanguageIdSettings'
, languageIdSettings_vocabularyFilterName
- The name of the custom vocabulary filter you want to use when processing
your transcription job. Custom vocabulary filter names are case
sensitive.
The language of the specified custom vocabulary filter must match the language code that you specify in your transcription request. If the languages don't match, the custom vocabulary filter isn't applied. There are no errors or warnings associated with a language mismatch.
Note that if you include VocabularyFilterName
in your request, you
must also include VocabularyFilterMethod
.
$sel:vocabularyName:LanguageIdSettings'
, languageIdSettings_vocabularyName
- The name of the custom vocabulary you want to use when processing your
transcription job. Custom vocabulary names are case sensitive.
The language of the specified custom vocabulary must match the language code that you specify in your transcription request. If the languages don't match, the custom vocabulary isn't applied. There are no errors or warnings associated with a language mismatch.
languageIdSettings_languageModelName :: Lens' LanguageIdSettings (Maybe Text) Source #
The name of the custom language model you want to use when processing your transcription job. Note that custom language model names are case sensitive.
The language of the specified custom language model must match the language code that you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
languageIdSettings_vocabularyFilterName :: Lens' LanguageIdSettings (Maybe Text) Source #
The name of the custom vocabulary filter you want to use when processing your transcription job. Custom vocabulary filter names are case sensitive.
The language of the specified custom vocabulary filter must match the language code that you specify in your transcription request. If the languages don't match, the custom vocabulary filter isn't applied. There are no errors or warnings associated with a language mismatch.
Note that if you include VocabularyFilterName
in your request, you
must also include VocabularyFilterMethod
.
languageIdSettings_vocabularyName :: Lens' LanguageIdSettings (Maybe Text) Source #
The name of the custom vocabulary you want to use when processing your transcription job. Custom vocabulary names are case sensitive.
The language of the specified custom vocabulary must match the language code that you specify in your transcription request. If the languages don't match, the custom vocabulary isn't applied. There are no errors or warnings associated with a language mismatch.
LanguageModel
data LanguageModel Source #
Provides information about a custom language model, including the base model name, when the model was created, the location of the files used to train the model, when the model was last modified, the name you chose for the model, its language, its processing state, and if there is an upgrade available for the base model.
See: newLanguageModel
smart constructor.
LanguageModel' | |
|
Instances
newLanguageModel :: LanguageModel Source #
Create a value of LanguageModel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:baseModelName:LanguageModel'
, languageModel_baseModelName
- The Amazon Transcribe standard language model, or base model, used to
create your custom language model.
$sel:createTime:LanguageModel'
, languageModel_createTime
- The date and time the specified custom language model was created.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
$sel:failureReason:LanguageModel'
, languageModel_failureReason
- If ModelStatus
is FAILED
, FailureReason
contains information about
why the custom language model request failed. See also:
Common Errors.
$sel:inputDataConfig:LanguageModel'
, languageModel_inputDataConfig
- The Amazon S3 location of the input files used to train and tune your
custom language model, in addition to the data access role ARN (Amazon
Resource Name) that has permissions to access these data.
$sel:languageCode:LanguageModel'
, languageModel_languageCode
- The language code used to create your custom language model. Each custom
language model must contain terms in only one language, and the language
you select for your custom language model must match the language of
your training and tuning data.
For a list of supported languages and their associated language codes,
refer to the
Supported languages
table. Note that U.S. English (en-US
) is the only language supported
with Amazon Transcribe Medical.
$sel:lastModifiedTime:LanguageModel'
, languageModel_lastModifiedTime
- The date and time the specified custom language model was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
$sel:modelName:LanguageModel'
, languageModel_modelName
- A unique name, chosen by you, for your custom language model.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account.
$sel:modelStatus:LanguageModel'
, languageModel_modelStatus
- The status of the specified custom language model. When the status
displays as COMPLETED
the model is ready for use.
$sel:upgradeAvailability:LanguageModel'
, languageModel_upgradeAvailability
- Shows if a more current base model is available for use with the
specified custom language model.
If false
, your custom language model is using the most up-to-date base
model.
If true
, there is a newer base model available than the one your
language model is using.
Note that to update a base model, you must recreate the custom language model using the new base model. Base model upgrades for existing custom language models are not supported.
languageModel_baseModelName :: Lens' LanguageModel (Maybe BaseModelName) Source #
The Amazon Transcribe standard language model, or base model, used to create your custom language model.
languageModel_createTime :: Lens' LanguageModel (Maybe UTCTime) Source #
The date and time the specified custom language model was created.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
languageModel_failureReason :: Lens' LanguageModel (Maybe Text) Source #
If ModelStatus
is FAILED
, FailureReason
contains information about
why the custom language model request failed. See also:
Common Errors.
languageModel_inputDataConfig :: Lens' LanguageModel (Maybe InputDataConfig) Source #
The Amazon S3 location of the input files used to train and tune your custom language model, in addition to the data access role ARN (Amazon Resource Name) that has permissions to access these data.
languageModel_languageCode :: Lens' LanguageModel (Maybe CLMLanguageCode) Source #
The language code used to create your custom language model. Each custom language model must contain terms in only one language, and the language you select for your custom language model must match the language of your training and tuning data.
For a list of supported languages and their associated language codes,
refer to the
Supported languages
table. Note that U.S. English (en-US
) is the only language supported
with Amazon Transcribe Medical.
languageModel_lastModifiedTime :: Lens' LanguageModel (Maybe UTCTime) Source #
The date and time the specified custom language model was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
languageModel_modelName :: Lens' LanguageModel (Maybe Text) Source #
A unique name, chosen by you, for your custom language model.
This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account.
languageModel_modelStatus :: Lens' LanguageModel (Maybe ModelStatus) Source #
The status of the specified custom language model. When the status
displays as COMPLETED
the model is ready for use.
languageModel_upgradeAvailability :: Lens' LanguageModel (Maybe Bool) Source #
Shows if a more current base model is available for use with the specified custom language model.
If false
, your custom language model is using the most up-to-date base
model.
If true
, there is a newer base model available than the one your
language model is using.
Note that to update a base model, you must recreate the custom language model using the new base model. Base model upgrades for existing custom language models are not supported.
Media
Describes the Amazon S3 location of the media file you want to use in your request.
For information on supported media formats, refer to the MediaFormat parameter or the Media formats section in the Amazon S3 Developer Guide.
See: newMedia
smart constructor.
Media' | |
|
Instances
FromJSON Media Source # | |
ToJSON Media Source # | |
Defined in Amazonka.Transcribe.Types.Media | |
Generic Media Source # | |
Read Media Source # | |
Show Media Source # | |
NFData Media Source # | |
Defined in Amazonka.Transcribe.Types.Media | |
Eq Media Source # | |
Hashable Media Source # | |
Defined in Amazonka.Transcribe.Types.Media | |
type Rep Media Source # | |
Defined in Amazonka.Transcribe.Types.Media type Rep Media = D1 ('MetaData "Media" "Amazonka.Transcribe.Types.Media" "amazonka-transcribe-2.0-4BjJqzwmm0K94bsEMe1tnF" 'False) (C1 ('MetaCons "Media'" 'PrefixI 'True) (S1 ('MetaSel ('Just "mediaFileUri") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: S1 ('MetaSel ('Just "redactedMediaFileUri") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)))) |
Create a value of Media
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:mediaFileUri:Media'
, media_mediaFileUri
- The Amazon S3 location of the media file you want to transcribe. For
example:
s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
Note that the Amazon S3 bucket that contains your input media must be located in the same Amazon Web Services Region where you're making your transcription request.
$sel:redactedMediaFileUri:Media'
, media_redactedMediaFileUri
- The Amazon S3 location of the media file you want to redact. For
example:
s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
Note that the Amazon S3 bucket that contains your input media must be located in the same Amazon Web Services Region where you're making your transcription request.
RedactedMediaFileUri
produces a redacted audio file in addition to a
redacted transcript. It is only supported for Call Analytics
(StartCallAnalyticsJob
) transcription requests.
media_mediaFileUri :: Lens' Media (Maybe Text) Source #
The Amazon S3 location of the media file you want to transcribe. For example:
s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
Note that the Amazon S3 bucket that contains your input media must be located in the same Amazon Web Services Region where you're making your transcription request.
media_redactedMediaFileUri :: Lens' Media (Maybe Text) Source #
The Amazon S3 location of the media file you want to redact. For example:
s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
Note that the Amazon S3 bucket that contains your input media must be located in the same Amazon Web Services Region where you're making your transcription request.
RedactedMediaFileUri
produces a redacted audio file in addition to a
redacted transcript. It is only supported for Call Analytics
(StartCallAnalyticsJob
) transcription requests.
MedicalTranscript
data MedicalTranscript Source #
Provides you with the Amazon S3 URI you can use to access your transcript.
See: newMedicalTranscript
smart constructor.
MedicalTranscript' | |
|
Instances
newMedicalTranscript :: MedicalTranscript Source #
Create a value of MedicalTranscript
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:transcriptFileUri:MedicalTranscript'
, medicalTranscript_transcriptFileUri
- The Amazon S3 location of your transcript. You can use this URI to
access or download your transcript.
Note that this is the Amazon S3 location you specified in your request
using the OutputBucketName
parameter.
medicalTranscript_transcriptFileUri :: Lens' MedicalTranscript (Maybe Text) Source #
The Amazon S3 location of your transcript. You can use this URI to access or download your transcript.
Note that this is the Amazon S3 location you specified in your request
using the OutputBucketName
parameter.
MedicalTranscriptionJob
data MedicalTranscriptionJob Source #
Provides detailed information about a medical transcription job.
To view the status of the specified medical transcription job, check the
TranscriptionJobStatus
field. If the status is COMPLETED
, the job is
finished and you can find the results at the location specified in
TranscriptFileUri
. If the status is FAILED
, FailureReason
provides
details on why your transcription job failed.
See: newMedicalTranscriptionJob
smart constructor.
MedicalTranscriptionJob' | |
|
Instances
newMedicalTranscriptionJob :: MedicalTranscriptionJob Source #
Create a value of MedicalTranscriptionJob
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:completionTime:MedicalTranscriptionJob'
, medicalTranscriptionJob_completionTime
- The date and time the specified medical transcription job finished
processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
$sel:contentIdentificationType:MedicalTranscriptionJob'
, medicalTranscriptionJob_contentIdentificationType
- Indicates whether content identification was enabled for your
transcription request.
$sel:creationTime:MedicalTranscriptionJob'
, medicalTranscriptionJob_creationTime
- The date and time the specified medical transcription job request was
made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:failureReason:MedicalTranscriptionJob'
, medicalTranscriptionJob_failureReason
- If TranscriptionJobStatus
is FAILED
, FailureReason
contains
information about why the transcription job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.The media format specified in
MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.The media format provided does not match the detected media format
.The media format specified in
MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.Invalid sample rate for audio file
.The sample rate specified in
MediaSampleRateHertz
isn't valid. The sample rate must be between 16,000 and 48,000 hertz.The sample rate provided does not match the detected sample rate
.The sample rate specified in
MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.Invalid file size: file size too large
.The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
$sel:languageCode:MedicalTranscriptionJob'
, medicalTranscriptionJob_languageCode
- The language code used to create your medical transcription job. US
English (en-US
) is the only supported language for medical
transcriptions.
$sel:media:MedicalTranscriptionJob'
, medicalTranscriptionJob_media
- Undocumented member.
$sel:mediaFormat:MedicalTranscriptionJob'
, medicalTranscriptionJob_mediaFormat
- The format of the input media file.
$sel:mediaSampleRateHertz:MedicalTranscriptionJob'
, medicalTranscriptionJob_mediaSampleRateHertz
- The sample rate, in hertz, of the audio track in your input media file.
$sel:medicalTranscriptionJobName:MedicalTranscriptionJob'
, medicalTranscriptionJob_medicalTranscriptionJobName
- The name of the medical transcription job. Job names are case sensitive
and must be unique within an Amazon Web Services account.
$sel:settings:MedicalTranscriptionJob'
, medicalTranscriptionJob_settings
- Provides information on any additional settings that were included in
your request. Additional settings include channel identification,
alternative transcriptions, speaker partitioning, custom vocabularies,
and custom vocabulary filters.
$sel:specialty:MedicalTranscriptionJob'
, medicalTranscriptionJob_specialty
- Describes the medical specialty represented in your media.
$sel:startTime:MedicalTranscriptionJob'
, medicalTranscriptionJob_startTime
- The date and time the specified medical transcription job began
processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:tags:MedicalTranscriptionJob'
, medicalTranscriptionJob_tags
- The tags, each in the form of a key:value pair, assigned to the
specified medical transcription job.
$sel:transcript:MedicalTranscriptionJob'
, medicalTranscriptionJob_transcript
- Provides you with the Amazon S3 URI you can use to access your
transcript.
$sel:transcriptionJobStatus:MedicalTranscriptionJob'
, medicalTranscriptionJob_transcriptionJobStatus
- Provides the status of the specified medical transcription job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
. If the status
is FAILED
, FailureReason
provides details on why your transcription
job failed.
$sel:type':MedicalTranscriptionJob'
, medicalTranscriptionJob_type
- Indicates whether the input media is a dictation or a conversation, as
specified in the StartMedicalTranscriptionJob
request.
medicalTranscriptionJob_completionTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime) Source #
The date and time the specified medical transcription job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
medicalTranscriptionJob_contentIdentificationType :: Lens' MedicalTranscriptionJob (Maybe MedicalContentIdentificationType) Source #
Indicates whether content identification was enabled for your transcription request.
medicalTranscriptionJob_creationTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime) Source #
The date and time the specified medical transcription job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
medicalTranscriptionJob_failureReason :: Lens' MedicalTranscriptionJob (Maybe Text) Source #
If TranscriptionJobStatus
is FAILED
, FailureReason
contains
information about why the transcription job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.The media format specified in
MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.The media format provided does not match the detected media format
.The media format specified in
MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.Invalid sample rate for audio file
.The sample rate specified in
MediaSampleRateHertz
isn't valid. The sample rate must be between 16,000 and 48,000 hertz.The sample rate provided does not match the detected sample rate
.The sample rate specified in
MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.Invalid file size: file size too large
.The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
medicalTranscriptionJob_languageCode :: Lens' MedicalTranscriptionJob (Maybe LanguageCode) Source #
The language code used to create your medical transcription job. US
English (en-US
) is the only supported language for medical
transcriptions.
medicalTranscriptionJob_media :: Lens' MedicalTranscriptionJob (Maybe Media) Source #
Undocumented member.
medicalTranscriptionJob_mediaFormat :: Lens' MedicalTranscriptionJob (Maybe MediaFormat) Source #
The format of the input media file.
medicalTranscriptionJob_mediaSampleRateHertz :: Lens' MedicalTranscriptionJob (Maybe Natural) Source #
The sample rate, in hertz, of the audio track in your input media file.
medicalTranscriptionJob_medicalTranscriptionJobName :: Lens' MedicalTranscriptionJob (Maybe Text) Source #
The name of the medical transcription job. Job names are case sensitive and must be unique within an Amazon Web Services account.
medicalTranscriptionJob_settings :: Lens' MedicalTranscriptionJob (Maybe MedicalTranscriptionSetting) Source #
Provides information on any additional settings that were included in your request. Additional settings include channel identification, alternative transcriptions, speaker partitioning, custom vocabularies, and custom vocabulary filters.
medicalTranscriptionJob_specialty :: Lens' MedicalTranscriptionJob (Maybe Specialty) Source #
Describes the medical specialty represented in your media.
medicalTranscriptionJob_startTime :: Lens' MedicalTranscriptionJob (Maybe UTCTime) Source #
The date and time the specified medical transcription job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
medicalTranscriptionJob_tags :: Lens' MedicalTranscriptionJob (Maybe (NonEmpty Tag)) Source #
The tags, each in the form of a key:value pair, assigned to the specified medical transcription job.
medicalTranscriptionJob_transcript :: Lens' MedicalTranscriptionJob (Maybe MedicalTranscript) Source #
Provides you with the Amazon S3 URI you can use to access your transcript.
medicalTranscriptionJob_transcriptionJobStatus :: Lens' MedicalTranscriptionJob (Maybe TranscriptionJobStatus) Source #
Provides the status of the specified medical transcription job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
. If the status
is FAILED
, FailureReason
provides details on why your transcription
job failed.
medicalTranscriptionJob_type :: Lens' MedicalTranscriptionJob (Maybe Type) Source #
Indicates whether the input media is a dictation or a conversation, as
specified in the StartMedicalTranscriptionJob
request.
MedicalTranscriptionJobSummary
data MedicalTranscriptionJobSummary Source #
Provides detailed information about a specific medical transcription job.
See: newMedicalTranscriptionJobSummary
smart constructor.
MedicalTranscriptionJobSummary' | |
|
Instances
newMedicalTranscriptionJobSummary :: MedicalTranscriptionJobSummary Source #
Create a value of MedicalTranscriptionJobSummary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:completionTime:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_completionTime
- The date and time the specified medical transcription job finished
processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
$sel:contentIdentificationType:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_contentIdentificationType
- Labels all personal health information (PHI) identified in your
transcript. For more information, see
Identifying personal health information (PHI) in a transcription.
$sel:creationTime:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_creationTime
- The date and time the specified medical transcription job request was
made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:failureReason:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_failureReason
- If TranscriptionJobStatus
is FAILED
, FailureReason
contains
information about why the transcription job failed. See also:
Common Errors.
$sel:languageCode:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_languageCode
- The language code used to create your medical transcription. US English
(en-US
) is the only supported language for medical transcriptions.
$sel:medicalTranscriptionJobName:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_medicalTranscriptionJobName
- The name of the medical transcription job. Job names are case sensitive
and must be unique within an Amazon Web Services account.
$sel:outputLocationType:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_outputLocationType
- Indicates where the specified medical transcription output is stored.
If the value is CUSTOMER_BUCKET
, the location is the Amazon S3 bucket
you specified using the OutputBucketName
parameter in your request. If
you also included OutputKey
in your request, your output is located in
the path you specified in your request.
If the value is SERVICE_BUCKET
, the location is a service-managed
Amazon S3 bucket. To access a transcript stored in a service-managed
bucket, use the URI shown in the TranscriptFileUri
field.
$sel:specialty:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_specialty
- Provides the medical specialty represented in your media.
$sel:startTime:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_startTime
- The date and time your medical transcription job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:transcriptionJobStatus:MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_transcriptionJobStatus
- Provides the status of your medical transcription job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
. If the status
is FAILED
, FailureReason
provides details on why your transcription
job failed.
$sel:type':MedicalTranscriptionJobSummary'
, medicalTranscriptionJobSummary_type
- Indicates whether the input media is a dictation or a conversation, as
specified in the StartMedicalTranscriptionJob
request.
medicalTranscriptionJobSummary_completionTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime) Source #
The date and time the specified medical transcription job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
medicalTranscriptionJobSummary_contentIdentificationType :: Lens' MedicalTranscriptionJobSummary (Maybe MedicalContentIdentificationType) Source #
Labels all personal health information (PHI) identified in your transcript. For more information, see Identifying personal health information (PHI) in a transcription.
medicalTranscriptionJobSummary_creationTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime) Source #
The date and time the specified medical transcription job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
medicalTranscriptionJobSummary_failureReason :: Lens' MedicalTranscriptionJobSummary (Maybe Text) Source #
If TranscriptionJobStatus
is FAILED
, FailureReason
contains
information about why the transcription job failed. See also:
Common Errors.
medicalTranscriptionJobSummary_languageCode :: Lens' MedicalTranscriptionJobSummary (Maybe LanguageCode) Source #
The language code used to create your medical transcription. US English
(en-US
) is the only supported language for medical transcriptions.
medicalTranscriptionJobSummary_medicalTranscriptionJobName :: Lens' MedicalTranscriptionJobSummary (Maybe Text) Source #
The name of the medical transcription job. Job names are case sensitive and must be unique within an Amazon Web Services account.
medicalTranscriptionJobSummary_outputLocationType :: Lens' MedicalTranscriptionJobSummary (Maybe OutputLocationType) Source #
Indicates where the specified medical transcription output is stored.
If the value is CUSTOMER_BUCKET
, the location is the Amazon S3 bucket
you specified using the OutputBucketName
parameter in your request. If
you also included OutputKey
in your request, your output is located in
the path you specified in your request.
If the value is SERVICE_BUCKET
, the location is a service-managed
Amazon S3 bucket. To access a transcript stored in a service-managed
bucket, use the URI shown in the TranscriptFileUri
field.
medicalTranscriptionJobSummary_specialty :: Lens' MedicalTranscriptionJobSummary (Maybe Specialty) Source #
Provides the medical specialty represented in your media.
medicalTranscriptionJobSummary_startTime :: Lens' MedicalTranscriptionJobSummary (Maybe UTCTime) Source #
The date and time your medical transcription job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
medicalTranscriptionJobSummary_transcriptionJobStatus :: Lens' MedicalTranscriptionJobSummary (Maybe TranscriptionJobStatus) Source #
Provides the status of your medical transcription job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
. If the status
is FAILED
, FailureReason
provides details on why your transcription
job failed.
medicalTranscriptionJobSummary_type :: Lens' MedicalTranscriptionJobSummary (Maybe Type) Source #
Indicates whether the input media is a dictation or a conversation, as
specified in the StartMedicalTranscriptionJob
request.
MedicalTranscriptionSetting
data MedicalTranscriptionSetting Source #
Allows additional optional settings in your request, including channel identification, alternative transcriptions, and speaker partitioning. You can use that to apply custom vocabularies to your medical transcription job.
See: newMedicalTranscriptionSetting
smart constructor.
MedicalTranscriptionSetting' | |
|
Instances
newMedicalTranscriptionSetting :: MedicalTranscriptionSetting Source #
Create a value of MedicalTranscriptionSetting
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:channelIdentification:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_channelIdentification
- Enables channel identification in multi-channel audio.
Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.
If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript does not separate the speech by channel.
You can't include both ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a
BadRequestException
.
For more information, see Transcribing multi-channel audio.
$sel:maxAlternatives:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_maxAlternatives
- Indicate the maximum number of alternative transcriptions you want
Amazon Transcribe Medical to include in your transcript.
If you select a number greater than the number of alternative transcriptions generated by Amazon Transcribe Medical, only the actual number of alternative transcriptions are included.
If you include MaxAlternatives
in your request, you must also include
ShowAlternatives
with a value of true
.
For more information, see Alternative transcriptions.
$sel:maxSpeakerLabels:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_maxSpeakerLabels
- Specify the maximum number of speakers you want to partition in your
media.
Note that if your media contains more speakers than the specified number, multiple speakers are treated as a single speaker.
If you specify the MaxSpeakerLabels
field, you must set the
ShowSpeakerLabels
field to true.
$sel:showAlternatives:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_showAlternatives
- To include alternative transcriptions within your transcription output,
include ShowAlternatives
in your transcription request.
If you include ShowAlternatives
, you must also include
MaxAlternatives
, which is the maximum number of alternative
transcriptions you want Amazon Transcribe Medical to generate.
For more information, see Alternative transcriptions.
$sel:showSpeakerLabels:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_showSpeakerLabels
- Enables speaker partitioning (diarization) in your transcription output.
Speaker partitioning labels the speech from individual speakers in your
media file.
If you enable ShowSpeakerLabels
in your request, you must also include
MaxSpeakerLabels
.
You can't include ShowSpeakerLabels
and ChannelIdentification
in
the same request. Including both parameters returns a
BadRequestException
.
For more information, see Partitioning speakers (diarization).
$sel:vocabularyName:MedicalTranscriptionSetting'
, medicalTranscriptionSetting_vocabularyName
- The name of the custom vocabulary you want to use when processing your
medical transcription job. Custom vocabulary names are case sensitive.
The language of the specified custom vocabulary must match the language
code that you specify in your transcription request. If the languages
don't match, the custom vocabulary isn't applied. There are no errors
or warnings associated with a language mismatch. US English (en-US
) is
the only valid language for Amazon Transcribe Medical.
medicalTranscriptionSetting_channelIdentification :: Lens' MedicalTranscriptionSetting (Maybe Bool) Source #
Enables channel identification in multi-channel audio.
Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.
If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript does not separate the speech by channel.
You can't include both ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a
BadRequestException
.
For more information, see Transcribing multi-channel audio.
medicalTranscriptionSetting_maxAlternatives :: Lens' MedicalTranscriptionSetting (Maybe Natural) Source #
Indicate the maximum number of alternative transcriptions you want Amazon Transcribe Medical to include in your transcript.
If you select a number greater than the number of alternative transcriptions generated by Amazon Transcribe Medical, only the actual number of alternative transcriptions are included.
If you include MaxAlternatives
in your request, you must also include
ShowAlternatives
with a value of true
.
For more information, see Alternative transcriptions.
medicalTranscriptionSetting_maxSpeakerLabels :: Lens' MedicalTranscriptionSetting (Maybe Natural) Source #
Specify the maximum number of speakers you want to partition in your media.
Note that if your media contains more speakers than the specified number, multiple speakers are treated as a single speaker.
If you specify the MaxSpeakerLabels
field, you must set the
ShowSpeakerLabels
field to true.
medicalTranscriptionSetting_showAlternatives :: Lens' MedicalTranscriptionSetting (Maybe Bool) Source #
To include alternative transcriptions within your transcription output,
include ShowAlternatives
in your transcription request.
If you include ShowAlternatives
, you must also include
MaxAlternatives
, which is the maximum number of alternative
transcriptions you want Amazon Transcribe Medical to generate.
For more information, see Alternative transcriptions.
medicalTranscriptionSetting_showSpeakerLabels :: Lens' MedicalTranscriptionSetting (Maybe Bool) Source #
Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.
If you enable ShowSpeakerLabels
in your request, you must also include
MaxSpeakerLabels
.
You can't include ShowSpeakerLabels
and ChannelIdentification
in
the same request. Including both parameters returns a
BadRequestException
.
For more information, see Partitioning speakers (diarization).
medicalTranscriptionSetting_vocabularyName :: Lens' MedicalTranscriptionSetting (Maybe Text) Source #
The name of the custom vocabulary you want to use when processing your medical transcription job. Custom vocabulary names are case sensitive.
The language of the specified custom vocabulary must match the language
code that you specify in your transcription request. If the languages
don't match, the custom vocabulary isn't applied. There are no errors
or warnings associated with a language mismatch. US English (en-US
) is
the only valid language for Amazon Transcribe Medical.
ModelSettings
data ModelSettings Source #
Provides the name of the custom language model that was included in the specified transcription job.
Only use ModelSettings
with the LanguageModelName
sub-parameter if
you're not using automatic language identification (). If using
LanguageIdSettings
in your request, this parameter contains a
LanguageModelName
sub-parameter.
See: newModelSettings
smart constructor.
ModelSettings' | |
|
Instances
newModelSettings :: ModelSettings Source #
Create a value of ModelSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:languageModelName:ModelSettings'
, modelSettings_languageModelName
- The name of the custom language model you want to use when processing
your transcription job. Note that custom language model names are case
sensitive.
The language of the specified custom language model must match the language code that you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
modelSettings_languageModelName :: Lens' ModelSettings (Maybe Text) Source #
The name of the custom language model you want to use when processing your transcription job. Note that custom language model names are case sensitive.
The language of the specified custom language model must match the language code that you specify in your transcription request. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch.
NonTalkTimeFilter
data NonTalkTimeFilter Source #
Flag the presence or absence of periods of silence in your Call Analytics transcription output.
Rules using NonTalkTimeFilter
are designed to match:
- The presence of silence at specified periods throughout the call
- The presence of speech at specified periods throughout the call
See Rule criteria for batch categories for usage examples.
See: newNonTalkTimeFilter
smart constructor.
NonTalkTimeFilter' | |
|
Instances
newNonTalkTimeFilter :: NonTalkTimeFilter Source #
Create a value of NonTalkTimeFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:absoluteTimeRange:NonTalkTimeFilter'
, nonTalkTimeFilter_absoluteTimeRange
- Makes it possible to specify a time range (in milliseconds) in your
audio, during which you want to search for a period of silence. See for
more detail.
$sel:negate:NonTalkTimeFilter'
, nonTalkTimeFilter_negate
- Set to TRUE
to flag periods of speech. Set to FALSE
to flag periods
of silence
$sel:relativeTimeRange:NonTalkTimeFilter'
, nonTalkTimeFilter_relativeTimeRange
- Makes it possible to specify a time range (in percentage) in your media
file, during which you want to search for a period of silence. See for
more detail.
$sel:threshold:NonTalkTimeFilter'
, nonTalkTimeFilter_threshold
- Specify the duration, in milliseconds, of the period of silence that you
want to flag. For example, you can flag a silent period that lasts
30,000 milliseconds.
nonTalkTimeFilter_absoluteTimeRange :: Lens' NonTalkTimeFilter (Maybe AbsoluteTimeRange) Source #
Makes it possible to specify a time range (in milliseconds) in your audio, during which you want to search for a period of silence. See for more detail.
nonTalkTimeFilter_negate :: Lens' NonTalkTimeFilter (Maybe Bool) Source #
Set to TRUE
to flag periods of speech. Set to FALSE
to flag periods
of silence
nonTalkTimeFilter_relativeTimeRange :: Lens' NonTalkTimeFilter (Maybe RelativeTimeRange) Source #
Makes it possible to specify a time range (in percentage) in your media file, during which you want to search for a period of silence. See for more detail.
nonTalkTimeFilter_threshold :: Lens' NonTalkTimeFilter (Maybe Natural) Source #
Specify the duration, in milliseconds, of the period of silence that you want to flag. For example, you can flag a silent period that lasts 30,000 milliseconds.
RelativeTimeRange
data RelativeTimeRange Source #
A time range, in percentage, between two points in your media file.
You can use StartPercentage
and EndPercentage
to search a custom
segment. For example, setting StartPercentage
to 10 and
EndPercentage
to 50 only searches for your specified criteria in the
audio contained between the 10 percent mark and the 50 percent mark of
your media file.
You can use also First
to search from the start of the media file
until the time that you specify. Or use Last
to search from the time
that you specify until the end of the media file. For example, setting
First
to 10 only searches for your specified criteria in the audio
contained in the first 10 percent of the media file.
If you prefer to use milliseconds instead of percentage, see .
See: newRelativeTimeRange
smart constructor.
RelativeTimeRange' | |
|
Instances
newRelativeTimeRange :: RelativeTimeRange Source #
Create a value of RelativeTimeRange
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:endPercentage:RelativeTimeRange'
, relativeTimeRange_endPercentage
- The time, in percentage, when Amazon Transcribe stops searching for the
specified criteria in your media file. If you include EndPercentage
in
your request, you must also include StartPercentage
.
$sel:first:RelativeTimeRange'
, relativeTimeRange_first
- The time, in percentage, from the start of your media file until the
specified value. Amazon Transcribe searches for your specified criteria
in this time segment.
$sel:last:RelativeTimeRange'
, relativeTimeRange_last
- The time, in percentage, from the specified value until the end of your
media file. Amazon Transcribe searches for your specified criteria in
this time segment.
$sel:startPercentage:RelativeTimeRange'
, relativeTimeRange_startPercentage
- The time, in percentage, when Amazon Transcribe starts searching for the
specified criteria in your media file. If you include StartPercentage
in your request, you must also include EndPercentage
.
relativeTimeRange_endPercentage :: Lens' RelativeTimeRange (Maybe Natural) Source #
The time, in percentage, when Amazon Transcribe stops searching for the
specified criteria in your media file. If you include EndPercentage
in
your request, you must also include StartPercentage
.
relativeTimeRange_first :: Lens' RelativeTimeRange (Maybe Natural) Source #
The time, in percentage, from the start of your media file until the specified value. Amazon Transcribe searches for your specified criteria in this time segment.
relativeTimeRange_last :: Lens' RelativeTimeRange (Maybe Natural) Source #
The time, in percentage, from the specified value until the end of your media file. Amazon Transcribe searches for your specified criteria in this time segment.
relativeTimeRange_startPercentage :: Lens' RelativeTimeRange (Maybe Natural) Source #
The time, in percentage, when Amazon Transcribe starts searching for the
specified criteria in your media file. If you include StartPercentage
in your request, you must also include EndPercentage
.
Rule
A rule is a set of criteria that you can specify to flag an attribute in your Call Analytics output. Rules define a Call Analytics category.
Rules can include these parameters: , , , and .
To learn more about Call Analytics rules and categories, see Creating categories for batch transcriptions and Creating categories for streaming transcriptions.
To learn more about Call Analytics, see Analyzing call center audio with Call Analytics.
See: newRule
smart constructor.
Rule' | |
|
Instances
Create a value of Rule
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:interruptionFilter:Rule'
, rule_interruptionFilter
- Flag the presence or absence of interruptions in your Call Analytics
transcription output. Refer to for more detail.
$sel:nonTalkTimeFilter:Rule'
, rule_nonTalkTimeFilter
- Flag the presence or absence of periods of silence in your Call
Analytics transcription output. Refer to for more detail.
$sel:sentimentFilter:Rule'
, rule_sentimentFilter
- Flag the presence or absence of specific sentiments in your Call
Analytics transcription output. Refer to for more detail.
$sel:transcriptFilter:Rule'
, rule_transcriptFilter
- Flag the presence or absence of specific words or phrases in your Call
Analytics transcription output. Refer to for more detail.
rule_interruptionFilter :: Lens' Rule (Maybe InterruptionFilter) Source #
Flag the presence or absence of interruptions in your Call Analytics transcription output. Refer to for more detail.
rule_nonTalkTimeFilter :: Lens' Rule (Maybe NonTalkTimeFilter) Source #
Flag the presence or absence of periods of silence in your Call Analytics transcription output. Refer to for more detail.
rule_sentimentFilter :: Lens' Rule (Maybe SentimentFilter) Source #
Flag the presence or absence of specific sentiments in your Call Analytics transcription output. Refer to for more detail.
rule_transcriptFilter :: Lens' Rule (Maybe TranscriptFilter) Source #
Flag the presence or absence of specific words or phrases in your Call Analytics transcription output. Refer to for more detail.
SentimentFilter
data SentimentFilter Source #
Flag the presence or absence of specific sentiments detected in your Call Analytics transcription output.
Rules using SentimentFilter
are designed to match:
- The presence or absence of a positive sentiment felt by the customer, agent, or both at specified points in the call
- The presence or absence of a negative sentiment felt by the customer, agent, or both at specified points in the call
- The presence or absence of a neutral sentiment felt by the customer, agent, or both at specified points in the call
- The presence or absence of a mixed sentiment felt by the customer, the agent, or both at specified points in the call
See Rule criteria for batch categories for usage examples.
See: newSentimentFilter
smart constructor.
SentimentFilter' | |
|
Instances
Create a value of SentimentFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:absoluteTimeRange:SentimentFilter'
, sentimentFilter_absoluteTimeRange
- Makes it possible to specify a time range (in milliseconds) in your
audio, during which you want to search for the specified sentiments. See
for more detail.
$sel:negate:SentimentFilter'
, sentimentFilter_negate
- Set to TRUE
to flag the sentiments that you didn't include in your
request. Set to FALSE
to flag the sentiments that you specified in
your request.
$sel:participantRole:SentimentFilter'
, sentimentFilter_participantRole
- Specify the participant that you want to flag. Omitting this parameter
is equivalent to specifying both participants.
$sel:relativeTimeRange:SentimentFilter'
, sentimentFilter_relativeTimeRange
- Makes it possible to specify a time range (in percentage) in your media
file, during which you want to search for the specified sentiments. See
for more detail.
$sel:sentiments:SentimentFilter'
, sentimentFilter_sentiments
- Specify the sentiments that you want to flag.
sentimentFilter_absoluteTimeRange :: Lens' SentimentFilter (Maybe AbsoluteTimeRange) Source #
Makes it possible to specify a time range (in milliseconds) in your audio, during which you want to search for the specified sentiments. See for more detail.
sentimentFilter_negate :: Lens' SentimentFilter (Maybe Bool) Source #
Set to TRUE
to flag the sentiments that you didn't include in your
request. Set to FALSE
to flag the sentiments that you specified in
your request.
sentimentFilter_participantRole :: Lens' SentimentFilter (Maybe ParticipantRole) Source #
Specify the participant that you want to flag. Omitting this parameter is equivalent to specifying both participants.
sentimentFilter_relativeTimeRange :: Lens' SentimentFilter (Maybe RelativeTimeRange) Source #
Makes it possible to specify a time range (in percentage) in your media file, during which you want to search for the specified sentiments. See for more detail.
sentimentFilter_sentiments :: Lens' SentimentFilter (NonEmpty SentimentValue) Source #
Specify the sentiments that you want to flag.
Settings
Allows additional optional settings in your request, including channel identification, alternative transcriptions, and speaker partitioning. You can use that to apply custom vocabularies to your transcription job.
See: newSettings
smart constructor.
Settings' | |
|
Instances
newSettings :: Settings Source #
Create a value of Settings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:channelIdentification:Settings'
, settings_channelIdentification
- Enables channel identification in multi-channel audio.
Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.
You can't include both ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a
BadRequestException
.
For more information, see Transcribing multi-channel audio.
$sel:maxAlternatives:Settings'
, settings_maxAlternatives
- Indicate the maximum number of alternative transcriptions you want
Amazon Transcribe to include in your transcript.
If you select a number greater than the number of alternative transcriptions generated by Amazon Transcribe, only the actual number of alternative transcriptions are included.
If you include MaxAlternatives
in your request, you must also include
ShowAlternatives
with a value of true
.
For more information, see Alternative transcriptions.
$sel:maxSpeakerLabels:Settings'
, settings_maxSpeakerLabels
- Specify the maximum number of speakers you want to partition in your
media.
Note that if your media contains more speakers than the specified number, multiple speakers are treated as a single speaker.
If you specify the MaxSpeakerLabels
field, you must set the
ShowSpeakerLabels
field to true.
$sel:showAlternatives:Settings'
, settings_showAlternatives
- To include alternative transcriptions within your transcription output,
include ShowAlternatives
in your transcription request.
If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript does not separate the speech by channel.
If you include ShowAlternatives
, you must also include
MaxAlternatives
, which is the maximum number of alternative
transcriptions you want Amazon Transcribe to generate.
For more information, see Alternative transcriptions.
$sel:showSpeakerLabels:Settings'
, settings_showSpeakerLabels
- Enables speaker partitioning (diarization) in your transcription output.
Speaker partitioning labels the speech from individual speakers in your
media file.
If you enable ShowSpeakerLabels
in your request, you must also include
MaxSpeakerLabels
.
You can't include both ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a
BadRequestException
.
For more information, see Partitioning speakers (diarization).
$sel:vocabularyFilterMethod:Settings'
, settings_vocabularyFilterMethod
- Specify how you want your custom vocabulary filter applied to your
transcript.
To replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
$sel:vocabularyFilterName:Settings'
, settings_vocabularyFilterName
- The name of the custom vocabulary filter you want to use in your
transcription job request. This name is case sensitive, cannot contain
spaces, and must be unique within an Amazon Web Services account.
Note that if you include VocabularyFilterName
in your request, you
must also include VocabularyFilterMethod
.
$sel:vocabularyName:Settings'
, settings_vocabularyName
- The name of the custom vocabulary you want to use in your transcription
job request. This name is case sensitive, cannot contain spaces, and
must be unique within an Amazon Web Services account.
settings_channelIdentification :: Lens' Settings (Maybe Bool) Source #
Enables channel identification in multi-channel audio.
Channel identification transcribes the audio on each channel independently, then appends the output for each channel into one transcript.
You can't include both ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a
BadRequestException
.
For more information, see Transcribing multi-channel audio.
settings_maxAlternatives :: Lens' Settings (Maybe Natural) Source #
Indicate the maximum number of alternative transcriptions you want Amazon Transcribe to include in your transcript.
If you select a number greater than the number of alternative transcriptions generated by Amazon Transcribe, only the actual number of alternative transcriptions are included.
If you include MaxAlternatives
in your request, you must also include
ShowAlternatives
with a value of true
.
For more information, see Alternative transcriptions.
settings_maxSpeakerLabels :: Lens' Settings (Maybe Natural) Source #
Specify the maximum number of speakers you want to partition in your media.
Note that if your media contains more speakers than the specified number, multiple speakers are treated as a single speaker.
If you specify the MaxSpeakerLabels
field, you must set the
ShowSpeakerLabels
field to true.
settings_showAlternatives :: Lens' Settings (Maybe Bool) Source #
To include alternative transcriptions within your transcription output,
include ShowAlternatives
in your transcription request.
If you have multi-channel audio and do not enable channel identification, your audio is transcribed in a continuous manner and your transcript does not separate the speech by channel.
If you include ShowAlternatives
, you must also include
MaxAlternatives
, which is the maximum number of alternative
transcriptions you want Amazon Transcribe to generate.
For more information, see Alternative transcriptions.
settings_showSpeakerLabels :: Lens' Settings (Maybe Bool) Source #
Enables speaker partitioning (diarization) in your transcription output. Speaker partitioning labels the speech from individual speakers in your media file.
If you enable ShowSpeakerLabels
in your request, you must also include
MaxSpeakerLabels
.
You can't include both ShowSpeakerLabels
and ChannelIdentification
in the same request. Including both parameters returns a
BadRequestException
.
For more information, see Partitioning speakers (diarization).
settings_vocabularyFilterMethod :: Lens' Settings (Maybe VocabularyFilterMethod) Source #
Specify how you want your custom vocabulary filter applied to your transcript.
To replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
settings_vocabularyFilterName :: Lens' Settings (Maybe Text) Source #
The name of the custom vocabulary filter you want to use in your transcription job request. This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account.
Note that if you include VocabularyFilterName
in your request, you
must also include VocabularyFilterMethod
.
settings_vocabularyName :: Lens' Settings (Maybe Text) Source #
The name of the custom vocabulary you want to use in your transcription job request. This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account.
Subtitles
Generate subtitles for your media file with your transcription request.
You can choose a start index of 0 or 1, and you can specify either WebVTT or SubRip (or both) as your output format.
Note that your subtitle files are placed in the same location as your transcription output.
See: newSubtitles
smart constructor.
Subtitles' | |
|
Instances
ToJSON Subtitles Source # | |
Defined in Amazonka.Transcribe.Types.Subtitles | |
Generic Subtitles Source # | |
Read Subtitles Source # | |
Show Subtitles Source # | |
NFData Subtitles Source # | |
Defined in Amazonka.Transcribe.Types.Subtitles | |
Eq Subtitles Source # | |
Hashable Subtitles Source # | |
Defined in Amazonka.Transcribe.Types.Subtitles | |
type Rep Subtitles Source # | |
Defined in Amazonka.Transcribe.Types.Subtitles type Rep Subtitles = D1 ('MetaData "Subtitles" "Amazonka.Transcribe.Types.Subtitles" "amazonka-transcribe-2.0-4BjJqzwmm0K94bsEMe1tnF" 'False) (C1 ('MetaCons "Subtitles'" 'PrefixI 'True) (S1 ('MetaSel ('Just "formats") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe [SubtitleFormat])) :*: S1 ('MetaSel ('Just "outputStartIndex") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Natural)))) |
newSubtitles :: Subtitles Source #
Create a value of Subtitles
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:formats:Subtitles'
, subtitles_formats
- Specify the output format for your subtitle file; if you select both
WebVTT (vtt
) and SubRip (srt
) formats, two output files are
generated.
$sel:outputStartIndex:Subtitles'
, subtitles_outputStartIndex
- Specify the starting value that is assigned to the first subtitle
segment.
The default start index for Amazon Transcribe is 0
, which differs from
the more widely used standard of 1
. If you're uncertain which value
to use, we recommend choosing 1
, as this may improve compatibility
with other services.
subtitles_formats :: Lens' Subtitles (Maybe [SubtitleFormat]) Source #
Specify the output format for your subtitle file; if you select both
WebVTT (vtt
) and SubRip (srt
) formats, two output files are
generated.
subtitles_outputStartIndex :: Lens' Subtitles (Maybe Natural) Source #
Specify the starting value that is assigned to the first subtitle segment.
The default start index for Amazon Transcribe is 0
, which differs from
the more widely used standard of 1
. If you're uncertain which value
to use, we recommend choosing 1
, as this may improve compatibility
with other services.
SubtitlesOutput
data SubtitlesOutput Source #
Provides information about your subtitle file, including format, start index, and Amazon S3 location.
See: newSubtitlesOutput
smart constructor.
SubtitlesOutput' | |
|
Instances
newSubtitlesOutput :: SubtitlesOutput Source #
Create a value of SubtitlesOutput
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:formats:SubtitlesOutput'
, subtitlesOutput_formats
- Provides the format of your subtitle files. If your request included
both WebVTT (vtt
) and SubRip (srt
) formats, both formats are shown.
$sel:outputStartIndex:SubtitlesOutput'
, subtitlesOutput_outputStartIndex
- Provides the start index value for your subtitle files. If you did not
specify a value in your request, the default value of 0
is used.
$sel:subtitleFileUris:SubtitlesOutput'
, subtitlesOutput_subtitleFileUris
- The Amazon S3 location of your transcript. You can use this URI to
access or download your subtitle file. Your subtitle file is stored in
the same location as your transcript. If you specified both WebVTT and
SubRip subtitle formats, two URIs are provided.
If you included OutputBucketName
in your transcription job request,
this is the URI of that bucket. If you also included OutputKey
in your
request, your output is located in the path you specified in your
request.
If you didn't include OutputBucketName
in your transcription job
request, your subtitle file is stored in a service-managed bucket, and
TranscriptFileUri
provides you with a temporary URI you can use for
secure access to your subtitle file.
Temporary URIs for service-managed Amazon S3 buckets are only valid for
15 minutes. If you get an AccesDenied
error, you can get a new
temporary URI by running a GetTranscriptionJob
or
ListTranscriptionJob
request.
subtitlesOutput_formats :: Lens' SubtitlesOutput (Maybe [SubtitleFormat]) Source #
Provides the format of your subtitle files. If your request included
both WebVTT (vtt
) and SubRip (srt
) formats, both formats are shown.
subtitlesOutput_outputStartIndex :: Lens' SubtitlesOutput (Maybe Natural) Source #
Provides the start index value for your subtitle files. If you did not
specify a value in your request, the default value of 0
is used.
subtitlesOutput_subtitleFileUris :: Lens' SubtitlesOutput (Maybe [Text]) Source #
The Amazon S3 location of your transcript. You can use this URI to access or download your subtitle file. Your subtitle file is stored in the same location as your transcript. If you specified both WebVTT and SubRip subtitle formats, two URIs are provided.
If you included OutputBucketName
in your transcription job request,
this is the URI of that bucket. If you also included OutputKey
in your
request, your output is located in the path you specified in your
request.
If you didn't include OutputBucketName
in your transcription job
request, your subtitle file is stored in a service-managed bucket, and
TranscriptFileUri
provides you with a temporary URI you can use for
secure access to your subtitle file.
Temporary URIs for service-managed Amazon S3 buckets are only valid for
15 minutes. If you get an AccesDenied
error, you can get a new
temporary URI by running a GetTranscriptionJob
or
ListTranscriptionJob
request.
Tag
Adds metadata, in the form of a key:value pair, to the specified resource.
For example, you could add the tag Department:Sales
to a resource to
indicate that it pertains to your organization's sales department. You
can also use tags for tag-based access control.
To learn more about tagging, see Tagging resources.
See: newTag
smart constructor.
Tag' | |
|
Instances
FromJSON Tag Source # | |
ToJSON Tag Source # | |
Defined in Amazonka.Transcribe.Types.Tag | |
Generic Tag Source # | |
Read Tag Source # | |
Show Tag Source # | |
NFData Tag Source # | |
Defined in Amazonka.Transcribe.Types.Tag | |
Eq Tag Source # | |
Hashable Tag Source # | |
Defined in Amazonka.Transcribe.Types.Tag | |
type Rep Tag Source # | |
Defined in Amazonka.Transcribe.Types.Tag type Rep Tag = D1 ('MetaData "Tag" "Amazonka.Transcribe.Types.Tag" "amazonka-transcribe-2.0-4BjJqzwmm0K94bsEMe1tnF" 'False) (C1 ('MetaCons "Tag'" 'PrefixI 'True) (S1 ('MetaSel ('Just "key") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 Text) :*: S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 Text))) |
Create a value of Tag
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:key:Tag'
, tag_key
- The first part of a key:value pair that forms a tag associated with a
given resource. For example, in the tag Department:Sales
, the key is
'Department'.
$sel:value:Tag'
, tag_value
- The second part of a key:value pair that forms a tag associated with a
given resource. For example, in the tag Department:Sales
, the value is
'Sales'.
Note that you can set the value of a tag to an empty string, but you can't set the value of a tag to null. Omitting the tag value is the same as using an empty string.
tag_key :: Lens' Tag Text Source #
The first part of a key:value pair that forms a tag associated with a
given resource. For example, in the tag Department:Sales
, the key is
'Department'.
tag_value :: Lens' Tag Text Source #
The second part of a key:value pair that forms a tag associated with a
given resource. For example, in the tag Department:Sales
, the value is
'Sales'.
Note that you can set the value of a tag to an empty string, but you can't set the value of a tag to null. Omitting the tag value is the same as using an empty string.
Transcript
data Transcript Source #
Provides you with the Amazon S3 URI you can use to access your transcript.
See: newTranscript
smart constructor.
Transcript' | |
|
Instances
newTranscript :: Transcript Source #
Create a value of Transcript
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:redactedTranscriptFileUri:Transcript'
, transcript_redactedTranscriptFileUri
- The Amazon S3 location of your redacted transcript. You can use this URI
to access or download your transcript.
If you included OutputBucketName
in your transcription job request,
this is the URI of that bucket. If you also included OutputKey
in your
request, your output is located in the path you specified in your
request.
If you didn't include OutputBucketName
in your transcription job
request, your transcript is stored in a service-managed bucket, and
RedactedTranscriptFileUri
provides you with a temporary URI you can
use for secure access to your transcript.
Temporary URIs for service-managed Amazon S3 buckets are only valid for
15 minutes. If you get an AccesDenied
error, you can get a new
temporary URI by running a GetTranscriptionJob
or
ListTranscriptionJob
request.
$sel:transcriptFileUri:Transcript'
, transcript_transcriptFileUri
- The Amazon S3 location of your transcript. You can use this URI to
access or download your transcript.
If you included OutputBucketName
in your transcription job request,
this is the URI of that bucket. If you also included OutputKey
in your
request, your output is located in the path you specified in your
request.
If you didn't include OutputBucketName
in your transcription job
request, your transcript is stored in a service-managed bucket, and
TranscriptFileUri
provides you with a temporary URI you can use for
secure access to your transcript.
Temporary URIs for service-managed Amazon S3 buckets are only valid for
15 minutes. If you get an AccesDenied
error, you can get a new
temporary URI by running a GetTranscriptionJob
or
ListTranscriptionJob
request.
transcript_redactedTranscriptFileUri :: Lens' Transcript (Maybe Text) Source #
The Amazon S3 location of your redacted transcript. You can use this URI to access or download your transcript.
If you included OutputBucketName
in your transcription job request,
this is the URI of that bucket. If you also included OutputKey
in your
request, your output is located in the path you specified in your
request.
If you didn't include OutputBucketName
in your transcription job
request, your transcript is stored in a service-managed bucket, and
RedactedTranscriptFileUri
provides you with a temporary URI you can
use for secure access to your transcript.
Temporary URIs for service-managed Amazon S3 buckets are only valid for
15 minutes. If you get an AccesDenied
error, you can get a new
temporary URI by running a GetTranscriptionJob
or
ListTranscriptionJob
request.
transcript_transcriptFileUri :: Lens' Transcript (Maybe Text) Source #
The Amazon S3 location of your transcript. You can use this URI to access or download your transcript.
If you included OutputBucketName
in your transcription job request,
this is the URI of that bucket. If you also included OutputKey
in your
request, your output is located in the path you specified in your
request.
If you didn't include OutputBucketName
in your transcription job
request, your transcript is stored in a service-managed bucket, and
TranscriptFileUri
provides you with a temporary URI you can use for
secure access to your transcript.
Temporary URIs for service-managed Amazon S3 buckets are only valid for
15 minutes. If you get an AccesDenied
error, you can get a new
temporary URI by running a GetTranscriptionJob
or
ListTranscriptionJob
request.
TranscriptFilter
data TranscriptFilter Source #
Flag the presence or absence of specific words or phrases detected in your Call Analytics transcription output.
Rules using TranscriptFilter
are designed to match:
- Custom words or phrases spoken by the agent, the customer, or both
- Custom words or phrases not spoken by the agent, the customer, or either
- Custom words or phrases that occur at a specific time frame
See Rule criteria for batch categories and Rule criteria for streaming categories for usage examples.
See: newTranscriptFilter
smart constructor.
TranscriptFilter' | |
|
Instances
Create a value of TranscriptFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:absoluteTimeRange:TranscriptFilter'
, transcriptFilter_absoluteTimeRange
- Makes it possible to specify a time range (in milliseconds) in your
audio, during which you want to search for the specified key words or
phrases. See for more detail.
$sel:negate:TranscriptFilter'
, transcriptFilter_negate
- Set to TRUE
to flag the absence of the phrase that you specified in
your request. Set to FALSE
to flag the presence of the phrase that you
specified in your request.
$sel:participantRole:TranscriptFilter'
, transcriptFilter_participantRole
- Specify the participant that you want to flag. Omitting this parameter
is equivalent to specifying both participants.
$sel:relativeTimeRange:TranscriptFilter'
, transcriptFilter_relativeTimeRange
- Makes it possible to specify a time range (in percentage) in your media
file, during which you want to search for the specified key words or
phrases. See for more detail.
$sel:transcriptFilterType:TranscriptFilter'
, transcriptFilter_transcriptFilterType
- Flag the presence or absence of an exact match to the phrases that you
specify. For example, if you specify the phrase "speak to a manager"
as your Targets
value, only that exact phrase is flagged.
Note that semantic matching is not supported. For example, if your customer says "speak to the manager", instead of "speak to a manager", your content is not flagged.
$sel:targets:TranscriptFilter'
, transcriptFilter_targets
- Specify the phrases that you want to flag.
transcriptFilter_absoluteTimeRange :: Lens' TranscriptFilter (Maybe AbsoluteTimeRange) Source #
Makes it possible to specify a time range (in milliseconds) in your audio, during which you want to search for the specified key words or phrases. See for more detail.
transcriptFilter_negate :: Lens' TranscriptFilter (Maybe Bool) Source #
Set to TRUE
to flag the absence of the phrase that you specified in
your request. Set to FALSE
to flag the presence of the phrase that you
specified in your request.
transcriptFilter_participantRole :: Lens' TranscriptFilter (Maybe ParticipantRole) Source #
Specify the participant that you want to flag. Omitting this parameter is equivalent to specifying both participants.
transcriptFilter_relativeTimeRange :: Lens' TranscriptFilter (Maybe RelativeTimeRange) Source #
Makes it possible to specify a time range (in percentage) in your media file, during which you want to search for the specified key words or phrases. See for more detail.
transcriptFilter_transcriptFilterType :: Lens' TranscriptFilter TranscriptFilterType Source #
Flag the presence or absence of an exact match to the phrases that you
specify. For example, if you specify the phrase "speak to a manager"
as your Targets
value, only that exact phrase is flagged.
Note that semantic matching is not supported. For example, if your customer says "speak to the manager", instead of "speak to a manager", your content is not flagged.
transcriptFilter_targets :: Lens' TranscriptFilter (NonEmpty Text) Source #
Specify the phrases that you want to flag.
TranscriptionJob
data TranscriptionJob Source #
Provides detailed information about a transcription job.
To view the status of the specified transcription job, check the
TranscriptionJobStatus
field. If the status is COMPLETED
, the job is
finished and you can find the results at the location specified in
TranscriptFileUri
. If the status is FAILED
, FailureReason
provides
details on why your transcription job failed.
If you enabled content redaction, the redacted transcript can be found
at the location specified in RedactedTranscriptFileUri
.
See: newTranscriptionJob
smart constructor.
TranscriptionJob' | |
|
Instances
newTranscriptionJob :: TranscriptionJob Source #
Create a value of TranscriptionJob
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:completionTime:TranscriptionJob'
, transcriptionJob_completionTime
- The date and time the specified transcription job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
$sel:contentRedaction:TranscriptionJob'
, transcriptionJob_contentRedaction
- Indicates whether redaction was enabled in your transcript.
$sel:creationTime:TranscriptionJob'
, transcriptionJob_creationTime
- The date and time the specified transcription job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:failureReason:TranscriptionJob'
, transcriptionJob_failureReason
- If TranscriptionJobStatus
is FAILED
, FailureReason
contains
information about why the transcription job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.The media format specified in
MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.The media format provided does not match the detected media format
.The media format specified in
MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.Invalid sample rate for audio file
.The sample rate specified in
MediaSampleRateHertz
isn't valid. The sample rate must be between 8,000 and 48,000 hertz.The sample rate provided does not match the detected sample rate
.The sample rate specified in
MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.Invalid file size: file size too large
.The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
$sel:identifiedLanguageScore:TranscriptionJob'
, transcriptionJob_identifiedLanguageScore
- The confidence score associated with the language identified in your
media file.
Confidence scores are values between 0 and 1; a larger value indicates a higher probability that the identified language correctly matches the language spoken in your media.
$sel:identifyLanguage:TranscriptionJob'
, transcriptionJob_identifyLanguage
- Indicates whether automatic language identification was enabled (TRUE
)
for the specified transcription job.
$sel:identifyMultipleLanguages:TranscriptionJob'
, transcriptionJob_identifyMultipleLanguages
- Indicates whether automatic multi-language identification was enabled
(TRUE
) for the specified transcription job.
$sel:jobExecutionSettings:TranscriptionJob'
, transcriptionJob_jobExecutionSettings
- Provides information about how your transcription job was processed.
This parameter shows if your request was queued and what data access
role was used.
TranscriptionJob
, transcriptionJob_languageCode
- The language code used to create your transcription job. This parameter
is used with single-language identification. For multi-language
identification requests, refer to the plural version of this parameter,
LanguageCodes
.
$sel:languageCodes:TranscriptionJob'
, transcriptionJob_languageCodes
- The language codes used to create your transcription job. This parameter
is used with multi-language identification. For single-language
identification requests, refer to the singular version of this
parameter, LanguageCode
.
$sel:languageIdSettings:TranscriptionJob'
, transcriptionJob_languageIdSettings
- Provides the name and language of all custom language models, custom
vocabularies, and custom vocabulary filters that you included in your
request.
$sel:languageOptions:TranscriptionJob'
, transcriptionJob_languageOptions
- Provides the language codes you specified in your request.
$sel:media:TranscriptionJob'
, transcriptionJob_media
- Provides the Amazon S3 location of the media file you used in your
request.
$sel:mediaFormat:TranscriptionJob'
, transcriptionJob_mediaFormat
- The format of the input media file.
$sel:mediaSampleRateHertz:TranscriptionJob'
, transcriptionJob_mediaSampleRateHertz
- The sample rate, in hertz, of the audio track in your input media file.
$sel:modelSettings:TranscriptionJob'
, transcriptionJob_modelSettings
- Provides information on the custom language model you included in your
request.
$sel:settings:TranscriptionJob'
, transcriptionJob_settings
- Provides information on any additional settings that were included in
your request. Additional settings include channel identification,
alternative transcriptions, speaker partitioning, custom vocabularies,
and custom vocabulary filters.
$sel:startTime:TranscriptionJob'
, transcriptionJob_startTime
- The date and time the specified transcription job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:subtitles:TranscriptionJob'
, transcriptionJob_subtitles
- Indicates whether subtitles were generated with your transcription.
$sel:tags:TranscriptionJob'
, transcriptionJob_tags
- The tags, each in the form of a key:value pair, assigned to the
specified transcription job.
$sel:transcript:TranscriptionJob'
, transcriptionJob_transcript
- Provides you with the Amazon S3 URI you can use to access your
transcript.
$sel:transcriptionJobName:TranscriptionJob'
, transcriptionJob_transcriptionJobName
- The name of the transcription job. Job names are case sensitive and must
be unique within an Amazon Web Services account.
$sel:transcriptionJobStatus:TranscriptionJob'
, transcriptionJob_transcriptionJobStatus
- Provides the status of the specified transcription job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
(or
RedactedTranscriptFileUri
, if you requested transcript redaction). If
the status is FAILED
, FailureReason
provides details on why your
transcription job failed.
transcriptionJob_completionTime :: Lens' TranscriptionJob (Maybe UTCTime) Source #
The date and time the specified transcription job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
transcriptionJob_contentRedaction :: Lens' TranscriptionJob (Maybe ContentRedaction) Source #
Indicates whether redaction was enabled in your transcript.
transcriptionJob_creationTime :: Lens' TranscriptionJob (Maybe UTCTime) Source #
The date and time the specified transcription job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
transcriptionJob_failureReason :: Lens' TranscriptionJob (Maybe Text) Source #
If TranscriptionJobStatus
is FAILED
, FailureReason
contains
information about why the transcription job request failed.
The FailureReason
field contains one of the following values:
Unsupported media format
.The media format specified in
MediaFormat
isn't valid. Refer to MediaFormat for a list of supported formats.The media format provided does not match the detected media format
.The media format specified in
MediaFormat
doesn't match the format of the input file. Check the media format of your media file and correct the specified value.Invalid sample rate for audio file
.The sample rate specified in
MediaSampleRateHertz
isn't valid. The sample rate must be between 8,000 and 48,000 hertz.The sample rate provided does not match the detected sample rate
.The sample rate specified in
MediaSampleRateHertz
doesn't match the sample rate detected in your input media file. Check the sample rate of your media file and correct the specified value.Invalid file size: file size too large
.The size of your media file is larger than what Amazon Transcribe can process. For more information, refer to Guidelines and quotas.
Invalid number of channels: number of channels too large
.Your audio contains more channels than Amazon Transcribe is able to process. For more information, refer to Guidelines and quotas.
transcriptionJob_identifiedLanguageScore :: Lens' TranscriptionJob (Maybe Double) Source #
The confidence score associated with the language identified in your media file.
Confidence scores are values between 0 and 1; a larger value indicates a higher probability that the identified language correctly matches the language spoken in your media.
transcriptionJob_identifyLanguage :: Lens' TranscriptionJob (Maybe Bool) Source #
Indicates whether automatic language identification was enabled (TRUE
)
for the specified transcription job.
transcriptionJob_identifyMultipleLanguages :: Lens' TranscriptionJob (Maybe Bool) Source #
Indicates whether automatic multi-language identification was enabled
(TRUE
) for the specified transcription job.
transcriptionJob_jobExecutionSettings :: Lens' TranscriptionJob (Maybe JobExecutionSettings) Source #
Provides information about how your transcription job was processed. This parameter shows if your request was queued and what data access role was used.
transcriptionJob_languageCode :: Lens' TranscriptionJob (Maybe LanguageCode) Source #
The language code used to create your transcription job. This parameter
is used with single-language identification. For multi-language
identification requests, refer to the plural version of this parameter,
LanguageCodes
.
transcriptionJob_languageCodes :: Lens' TranscriptionJob (Maybe [LanguageCodeItem]) Source #
The language codes used to create your transcription job. This parameter
is used with multi-language identification. For single-language
identification requests, refer to the singular version of this
parameter, LanguageCode
.
transcriptionJob_languageIdSettings :: Lens' TranscriptionJob (Maybe (HashMap LanguageCode LanguageIdSettings)) Source #
Provides the name and language of all custom language models, custom vocabularies, and custom vocabulary filters that you included in your request.
transcriptionJob_languageOptions :: Lens' TranscriptionJob (Maybe (NonEmpty LanguageCode)) Source #
Provides the language codes you specified in your request.
transcriptionJob_media :: Lens' TranscriptionJob (Maybe Media) Source #
Provides the Amazon S3 location of the media file you used in your request.
transcriptionJob_mediaFormat :: Lens' TranscriptionJob (Maybe MediaFormat) Source #
The format of the input media file.
transcriptionJob_mediaSampleRateHertz :: Lens' TranscriptionJob (Maybe Natural) Source #
The sample rate, in hertz, of the audio track in your input media file.
transcriptionJob_modelSettings :: Lens' TranscriptionJob (Maybe ModelSettings) Source #
Provides information on the custom language model you included in your request.
transcriptionJob_settings :: Lens' TranscriptionJob (Maybe Settings) Source #
Provides information on any additional settings that were included in your request. Additional settings include channel identification, alternative transcriptions, speaker partitioning, custom vocabularies, and custom vocabulary filters.
transcriptionJob_startTime :: Lens' TranscriptionJob (Maybe UTCTime) Source #
The date and time the specified transcription job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
transcriptionJob_subtitles :: Lens' TranscriptionJob (Maybe SubtitlesOutput) Source #
Indicates whether subtitles were generated with your transcription.
transcriptionJob_tags :: Lens' TranscriptionJob (Maybe (NonEmpty Tag)) Source #
The tags, each in the form of a key:value pair, assigned to the specified transcription job.
transcriptionJob_transcript :: Lens' TranscriptionJob (Maybe Transcript) Source #
Provides you with the Amazon S3 URI you can use to access your transcript.
transcriptionJob_transcriptionJobName :: Lens' TranscriptionJob (Maybe Text) Source #
The name of the transcription job. Job names are case sensitive and must be unique within an Amazon Web Services account.
transcriptionJob_transcriptionJobStatus :: Lens' TranscriptionJob (Maybe TranscriptionJobStatus) Source #
Provides the status of the specified transcription job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
(or
RedactedTranscriptFileUri
, if you requested transcript redaction). If
the status is FAILED
, FailureReason
provides details on why your
transcription job failed.
TranscriptionJobSummary
data TranscriptionJobSummary Source #
Provides detailed information about a specific transcription job.
See: newTranscriptionJobSummary
smart constructor.
TranscriptionJobSummary' | |
|
Instances
newTranscriptionJobSummary :: TranscriptionJobSummary Source #
Create a value of TranscriptionJobSummary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:completionTime:TranscriptionJobSummary'
, transcriptionJobSummary_completionTime
- The date and time the specified transcription job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
$sel:contentRedaction:TranscriptionJobSummary'
, transcriptionJobSummary_contentRedaction
- The content redaction settings of the transcription job.
$sel:creationTime:TranscriptionJobSummary'
, transcriptionJobSummary_creationTime
- The date and time the specified transcription job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:failureReason:TranscriptionJobSummary'
, transcriptionJobSummary_failureReason
- If TranscriptionJobStatus
is FAILED
, FailureReason
contains
information about why the transcription job failed. See also:
Common Errors.
$sel:identifiedLanguageScore:TranscriptionJobSummary'
, transcriptionJobSummary_identifiedLanguageScore
- The confidence score associated with the language identified in your
media file.
Confidence scores are values between 0 and 1; a larger value indicates a higher probability that the identified language correctly matches the language spoken in your media.
$sel:identifyLanguage:TranscriptionJobSummary'
, transcriptionJobSummary_identifyLanguage
- Indicates whether automatic language identification was enabled (TRUE
)
for the specified transcription job.
$sel:identifyMultipleLanguages:TranscriptionJobSummary'
, transcriptionJobSummary_identifyMultipleLanguages
- Indicates whether automatic multi-language identification was enabled
(TRUE
) for the specified transcription job.
TranscriptionJobSummary
, transcriptionJobSummary_languageCode
- The language code used to create your transcription.
$sel:languageCodes:TranscriptionJobSummary'
, transcriptionJobSummary_languageCodes
- The language codes used to create your transcription job. This parameter
is used with multi-language identification. For single-language
identification, the singular version of this parameter, LanguageCode
,
is present.
$sel:modelSettings:TranscriptionJobSummary'
, transcriptionJobSummary_modelSettings
- Undocumented member.
$sel:outputLocationType:TranscriptionJobSummary'
, transcriptionJobSummary_outputLocationType
- Indicates where the specified transcription output is stored.
If the value is CUSTOMER_BUCKET
, the location is the Amazon S3 bucket
you specified using the OutputBucketName
parameter in your request. If
you also included OutputKey
in your request, your output is located in
the path you specified in your request.
If the value is SERVICE_BUCKET
, the location is a service-managed
Amazon S3 bucket. To access a transcript stored in a service-managed
bucket, use the URI shown in the TranscriptFileUri
or
RedactedTranscriptFileUri
field.
$sel:startTime:TranscriptionJobSummary'
, transcriptionJobSummary_startTime
- The date and time your transcription job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
$sel:transcriptionJobName:TranscriptionJobSummary'
, transcriptionJobSummary_transcriptionJobName
- The name of the transcription job. Job names are case sensitive and must
be unique within an Amazon Web Services account.
$sel:transcriptionJobStatus:TranscriptionJobSummary'
, transcriptionJobSummary_transcriptionJobStatus
- Provides the status of your transcription job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
(or
RedactedTranscriptFileUri
, if you requested transcript redaction). If
the status is FAILED
, FailureReason
provides details on why your
transcription job failed.
transcriptionJobSummary_completionTime :: Lens' TranscriptionJobSummary (Maybe UTCTime) Source #
The date and time the specified transcription job finished processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:33:13.922000-07:00
represents a transcription
job that started processing at 12:33 PM UTC-7 on May 4, 2022.
transcriptionJobSummary_contentRedaction :: Lens' TranscriptionJobSummary (Maybe ContentRedaction) Source #
The content redaction settings of the transcription job.
transcriptionJobSummary_creationTime :: Lens' TranscriptionJobSummary (Maybe UTCTime) Source #
The date and time the specified transcription job request was made.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
transcriptionJobSummary_failureReason :: Lens' TranscriptionJobSummary (Maybe Text) Source #
If TranscriptionJobStatus
is FAILED
, FailureReason
contains
information about why the transcription job failed. See also:
Common Errors.
transcriptionJobSummary_identifiedLanguageScore :: Lens' TranscriptionJobSummary (Maybe Double) Source #
The confidence score associated with the language identified in your media file.
Confidence scores are values between 0 and 1; a larger value indicates a higher probability that the identified language correctly matches the language spoken in your media.
transcriptionJobSummary_identifyLanguage :: Lens' TranscriptionJobSummary (Maybe Bool) Source #
Indicates whether automatic language identification was enabled (TRUE
)
for the specified transcription job.
transcriptionJobSummary_identifyMultipleLanguages :: Lens' TranscriptionJobSummary (Maybe Bool) Source #
Indicates whether automatic multi-language identification was enabled
(TRUE
) for the specified transcription job.
transcriptionJobSummary_languageCode :: Lens' TranscriptionJobSummary (Maybe LanguageCode) Source #
The language code used to create your transcription.
transcriptionJobSummary_languageCodes :: Lens' TranscriptionJobSummary (Maybe [LanguageCodeItem]) Source #
The language codes used to create your transcription job. This parameter
is used with multi-language identification. For single-language
identification, the singular version of this parameter, LanguageCode
,
is present.
transcriptionJobSummary_modelSettings :: Lens' TranscriptionJobSummary (Maybe ModelSettings) Source #
Undocumented member.
transcriptionJobSummary_outputLocationType :: Lens' TranscriptionJobSummary (Maybe OutputLocationType) Source #
Indicates where the specified transcription output is stored.
If the value is CUSTOMER_BUCKET
, the location is the Amazon S3 bucket
you specified using the OutputBucketName
parameter in your request. If
you also included OutputKey
in your request, your output is located in
the path you specified in your request.
If the value is SERVICE_BUCKET
, the location is a service-managed
Amazon S3 bucket. To access a transcript stored in a service-managed
bucket, use the URI shown in the TranscriptFileUri
or
RedactedTranscriptFileUri
field.
transcriptionJobSummary_startTime :: Lens' TranscriptionJobSummary (Maybe UTCTime) Source #
The date and time your transcription job began processing.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.789000-07:00
represents a transcription
job that started processing at 12:32 PM UTC-7 on May 4, 2022.
transcriptionJobSummary_transcriptionJobName :: Lens' TranscriptionJobSummary (Maybe Text) Source #
The name of the transcription job. Job names are case sensitive and must be unique within an Amazon Web Services account.
transcriptionJobSummary_transcriptionJobStatus :: Lens' TranscriptionJobSummary (Maybe TranscriptionJobStatus) Source #
Provides the status of your transcription job.
If the status is COMPLETED
, the job is finished and you can find the
results at the location specified in TranscriptFileUri
(or
RedactedTranscriptFileUri
, if you requested transcript redaction). If
the status is FAILED
, FailureReason
provides details on why your
transcription job failed.
VocabularyFilterInfo
data VocabularyFilterInfo Source #
Provides information about a custom vocabulary filter, including the language of the filter, when it was last modified, and its name.
See: newVocabularyFilterInfo
smart constructor.
VocabularyFilterInfo' | |
|
Instances
newVocabularyFilterInfo :: VocabularyFilterInfo Source #
Create a value of VocabularyFilterInfo
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:languageCode:VocabularyFilterInfo'
, vocabularyFilterInfo_languageCode
- The language code that represents the language of the entries in your
vocabulary filter. Each custom vocabulary filter must contain terms in
only one language.
A custom vocabulary filter can only be used to transcribe files in the
same language as the filter. For example, if you create a custom
vocabulary filter using US English (en-US
), you can only apply this
filter to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
$sel:lastModifiedTime:VocabularyFilterInfo'
, vocabularyFilterInfo_lastModifiedTime
- The date and time the specified custom vocabulary filter was last
modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
$sel:vocabularyFilterName:VocabularyFilterInfo'
, vocabularyFilterInfo_vocabularyFilterName
- A unique name, chosen by you, for your custom vocabulary filter. This
name is case sensitive, cannot contain spaces, and must be unique within
an Amazon Web Services account.
vocabularyFilterInfo_languageCode :: Lens' VocabularyFilterInfo (Maybe LanguageCode) Source #
The language code that represents the language of the entries in your vocabulary filter. Each custom vocabulary filter must contain terms in only one language.
A custom vocabulary filter can only be used to transcribe files in the
same language as the filter. For example, if you create a custom
vocabulary filter using US English (en-US
), you can only apply this
filter to files that contain English audio.
For a list of supported languages and their associated language codes, refer to the Supported languages table.
vocabularyFilterInfo_lastModifiedTime :: Lens' VocabularyFilterInfo (Maybe UTCTime) Source #
The date and time the specified custom vocabulary filter was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
vocabularyFilterInfo_vocabularyFilterName :: Lens' VocabularyFilterInfo (Maybe Text) Source #
A unique name, chosen by you, for your custom vocabulary filter. This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account.
VocabularyInfo
data VocabularyInfo Source #
Provides information about a custom vocabulary, including the language of the custom vocabulary, when it was last modified, its name, and the processing state.
See: newVocabularyInfo
smart constructor.
VocabularyInfo' | |
|
Instances
newVocabularyInfo :: VocabularyInfo Source #
Create a value of VocabularyInfo
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:languageCode:VocabularyInfo'
, vocabularyInfo_languageCode
- The language code used to create your custom vocabulary. Each custom
vocabulary must contain terms in only one language.
A custom vocabulary can only be used to transcribe files in the same
language as the custom vocabulary. For example, if you create a custom
vocabulary using US English (en-US
), you can only apply this custom
vocabulary to files that contain English audio.
$sel:lastModifiedTime:VocabularyInfo'
, vocabularyInfo_lastModifiedTime
- The date and time the specified custom vocabulary was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
$sel:vocabularyName:VocabularyInfo'
, vocabularyInfo_vocabularyName
- A unique name, chosen by you, for your custom vocabulary. This name is
case sensitive, cannot contain spaces, and must be unique within an
Amazon Web Services account.
$sel:vocabularyState:VocabularyInfo'
, vocabularyInfo_vocabularyState
- The processing state of your custom vocabulary. If the state is READY
,
you can use the custom vocabulary in a StartTranscriptionJob
request.
vocabularyInfo_languageCode :: Lens' VocabularyInfo (Maybe LanguageCode) Source #
The language code used to create your custom vocabulary. Each custom vocabulary must contain terms in only one language.
A custom vocabulary can only be used to transcribe files in the same
language as the custom vocabulary. For example, if you create a custom
vocabulary using US English (en-US
), you can only apply this custom
vocabulary to files that contain English audio.
vocabularyInfo_lastModifiedTime :: Lens' VocabularyInfo (Maybe UTCTime) Source #
The date and time the specified custom vocabulary was last modified.
Timestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For
example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on
May 4, 2022.
vocabularyInfo_vocabularyName :: Lens' VocabularyInfo (Maybe Text) Source #
A unique name, chosen by you, for your custom vocabulary. This name is case sensitive, cannot contain spaces, and must be unique within an Amazon Web Services account.
vocabularyInfo_vocabularyState :: Lens' VocabularyInfo (Maybe VocabularyState) Source #
The processing state of your custom vocabulary. If the state is READY
,
you can use the custom vocabulary in a StartTranscriptionJob
request.