Copyright | (c) 2013-2023 Brendan Hay |
---|---|
License | Mozilla Public License, v. 2.0. |
Maintainer | Brendan Hay |
Stability | auto-generated |
Portability | non-portable (GHC extensions) |
Safe Haskell | Safe-Inferred |
Language | Haskell2010 |
- Service Configuration
- Errors
- Attribute
- BodyPart
- CelebrityRecognitionSortBy
- ContentClassifier
- ContentModerationSortBy
- DatasetStatus
- DatasetStatusMessageCode
- DatasetType
- DetectLabelsFeatureName
- EmotionName
- FaceAttributes
- FaceSearchSortBy
- GenderType
- KnownGenderType
- LabelDetectionAggregateBy
- LabelDetectionFeatureName
- LabelDetectionSortBy
- LandmarkType
- OrientationCorrection
- PersonTrackingSortBy
- ProjectStatus
- ProjectVersionStatus
- ProtectiveEquipmentType
- QualityFilter
- Reason
- SegmentType
- StreamProcessorParameterToDelete
- StreamProcessorStatus
- TechnicalCueType
- TextTypes
- VideoColorRange
- VideoJobStatus
- AgeRange
- Asset
- AudioMetadata
- Beard
- BlackFrame
- BoundingBox
- Celebrity
- CelebrityDetail
- CelebrityRecognition
- CompareFacesMatch
- ComparedFace
- ComparedSourceImageFace
- ConnectedHomeSettings
- ConnectedHomeSettingsForUpdate
- ContentModerationDetection
- CoversBodyPart
- CustomLabel
- DatasetChanges
- DatasetDescription
- DatasetLabelDescription
- DatasetLabelStats
- DatasetMetadata
- DatasetSource
- DatasetStats
- DetectLabelsImageBackground
- DetectLabelsImageForeground
- DetectLabelsImageProperties
- DetectLabelsImagePropertiesSettings
- DetectLabelsImageQuality
- DetectLabelsSettings
- DetectTextFilters
- DetectionFilter
- DistributeDataset
- DominantColor
- Emotion
- EquipmentDetection
- EvaluationResult
- EyeOpen
- Eyeglasses
- Face
- FaceDetail
- FaceDetection
- FaceMatch
- FaceRecord
- FaceSearchSettings
- Gender
- GeneralLabelsSettings
- Geometry
- GroundTruthManifest
- HumanLoopActivationOutput
- HumanLoopConfig
- HumanLoopDataAttributes
- Image
- ImageQuality
- Instance
- KinesisDataStream
- KinesisVideoStream
- KinesisVideoStreamStartSelector
- KnownGender
- Label
- LabelAlias
- LabelCategory
- LabelDetection
- LabelDetectionSettings
- Landmark
- ModerationLabel
- MouthOpen
- Mustache
- NotificationChannel
- OutputConfig
- Parent
- PersonDetail
- PersonDetection
- PersonMatch
- Point
- Pose
- ProjectDescription
- ProjectPolicy
- ProjectVersionDescription
- ProtectiveEquipmentBodyPart
- ProtectiveEquipmentPerson
- ProtectiveEquipmentSummarizationAttributes
- ProtectiveEquipmentSummary
- RegionOfInterest
- S3Destination
- S3Object
- SegmentDetection
- SegmentTypeInfo
- ShotSegment
- Smile
- StartSegmentDetectionFilters
- StartShotDetectionFilter
- StartTechnicalCueDetectionFilter
- StartTextDetectionFilters
- StreamProcessingStartSelector
- StreamProcessingStopSelector
- StreamProcessor
- StreamProcessorDataSharingPreference
- StreamProcessorInput
- StreamProcessorNotificationChannel
- StreamProcessorOutput
- StreamProcessorSettings
- StreamProcessorSettingsForUpdate
- Summary
- Sunglasses
- TechnicalCueSegment
- TestingData
- TestingDataResult
- TextDetection
- TextDetectionResult
- TrainingData
- TrainingDataResult
- UnindexedFace
- ValidationData
- Video
- VideoMetadata
Synopsis
- defaultService :: Service
- _AccessDeniedException :: AsError a => Fold a ServiceError
- _HumanLoopQuotaExceededException :: AsError a => Fold a ServiceError
- _IdempotentParameterMismatchException :: AsError a => Fold a ServiceError
- _ImageTooLargeException :: AsError a => Fold a ServiceError
- _InternalServerError :: AsError a => Fold a ServiceError
- _InvalidImageFormatException :: AsError a => Fold a ServiceError
- _InvalidPaginationTokenException :: AsError a => Fold a ServiceError
- _InvalidParameterException :: AsError a => Fold a ServiceError
- _InvalidPolicyRevisionIdException :: AsError a => Fold a ServiceError
- _InvalidS3ObjectException :: AsError a => Fold a ServiceError
- _LimitExceededException :: AsError a => Fold a ServiceError
- _MalformedPolicyDocumentException :: AsError a => Fold a ServiceError
- _ProvisionedThroughputExceededException :: AsError a => Fold a ServiceError
- _ResourceAlreadyExistsException :: AsError a => Fold a ServiceError
- _ResourceInUseException :: AsError a => Fold a ServiceError
- _ResourceNotFoundException :: AsError a => Fold a ServiceError
- _ResourceNotReadyException :: AsError a => Fold a ServiceError
- _ServiceQuotaExceededException :: AsError a => Fold a ServiceError
- _ThrottlingException :: AsError a => Fold a ServiceError
- _VideoTooLargeException :: AsError a => Fold a ServiceError
- newtype Attribute where
- Attribute' { }
- pattern Attribute_ALL :: Attribute
- pattern Attribute_DEFAULT :: Attribute
- newtype BodyPart where
- BodyPart' {
- fromBodyPart :: Text
- pattern BodyPart_FACE :: BodyPart
- pattern BodyPart_HEAD :: BodyPart
- pattern BodyPart_LEFT_HAND :: BodyPart
- pattern BodyPart_RIGHT_HAND :: BodyPart
- BodyPart' {
- newtype CelebrityRecognitionSortBy where
- newtype ContentClassifier where
- newtype ContentModerationSortBy where
- newtype DatasetStatus where
- DatasetStatus' { }
- pattern DatasetStatus_CREATE_COMPLETE :: DatasetStatus
- pattern DatasetStatus_CREATE_FAILED :: DatasetStatus
- pattern DatasetStatus_CREATE_IN_PROGRESS :: DatasetStatus
- pattern DatasetStatus_DELETE_IN_PROGRESS :: DatasetStatus
- pattern DatasetStatus_UPDATE_COMPLETE :: DatasetStatus
- pattern DatasetStatus_UPDATE_FAILED :: DatasetStatus
- pattern DatasetStatus_UPDATE_IN_PROGRESS :: DatasetStatus
- newtype DatasetStatusMessageCode where
- newtype DatasetType where
- DatasetType' { }
- pattern DatasetType_TEST :: DatasetType
- pattern DatasetType_TRAIN :: DatasetType
- newtype DetectLabelsFeatureName where
- newtype EmotionName where
- EmotionName' { }
- pattern EmotionName_ANGRY :: EmotionName
- pattern EmotionName_CALM :: EmotionName
- pattern EmotionName_CONFUSED :: EmotionName
- pattern EmotionName_DISGUSTED :: EmotionName
- pattern EmotionName_FEAR :: EmotionName
- pattern EmotionName_HAPPY :: EmotionName
- pattern EmotionName_SAD :: EmotionName
- pattern EmotionName_SURPRISED :: EmotionName
- pattern EmotionName_UNKNOWN :: EmotionName
- newtype FaceAttributes where
- FaceAttributes' { }
- pattern FaceAttributes_ALL :: FaceAttributes
- pattern FaceAttributes_DEFAULT :: FaceAttributes
- newtype FaceSearchSortBy where
- FaceSearchSortBy' { }
- pattern FaceSearchSortBy_INDEX :: FaceSearchSortBy
- pattern FaceSearchSortBy_TIMESTAMP :: FaceSearchSortBy
- newtype GenderType where
- GenderType' { }
- pattern GenderType_Female :: GenderType
- pattern GenderType_Male :: GenderType
- newtype KnownGenderType where
- KnownGenderType' { }
- pattern KnownGenderType_Female :: KnownGenderType
- pattern KnownGenderType_Male :: KnownGenderType
- pattern KnownGenderType_Nonbinary :: KnownGenderType
- pattern KnownGenderType_Unlisted :: KnownGenderType
- newtype LabelDetectionAggregateBy where
- newtype LabelDetectionFeatureName where
- newtype LabelDetectionSortBy where
- newtype LandmarkType where
- LandmarkType' { }
- pattern LandmarkType_ChinBottom :: LandmarkType
- pattern LandmarkType_EyeLeft :: LandmarkType
- pattern LandmarkType_EyeRight :: LandmarkType
- pattern LandmarkType_LeftEyeBrowLeft :: LandmarkType
- pattern LandmarkType_LeftEyeBrowRight :: LandmarkType
- pattern LandmarkType_LeftEyeBrowUp :: LandmarkType
- pattern LandmarkType_LeftEyeDown :: LandmarkType
- pattern LandmarkType_LeftEyeLeft :: LandmarkType
- pattern LandmarkType_LeftEyeRight :: LandmarkType
- pattern LandmarkType_LeftEyeUp :: LandmarkType
- pattern LandmarkType_LeftPupil :: LandmarkType
- pattern LandmarkType_MidJawlineLeft :: LandmarkType
- pattern LandmarkType_MidJawlineRight :: LandmarkType
- pattern LandmarkType_MouthDown :: LandmarkType
- pattern LandmarkType_MouthLeft :: LandmarkType
- pattern LandmarkType_MouthRight :: LandmarkType
- pattern LandmarkType_MouthUp :: LandmarkType
- pattern LandmarkType_Nose :: LandmarkType
- pattern LandmarkType_NoseLeft :: LandmarkType
- pattern LandmarkType_NoseRight :: LandmarkType
- pattern LandmarkType_RightEyeBrowLeft :: LandmarkType
- pattern LandmarkType_RightEyeBrowRight :: LandmarkType
- pattern LandmarkType_RightEyeBrowUp :: LandmarkType
- pattern LandmarkType_RightEyeDown :: LandmarkType
- pattern LandmarkType_RightEyeLeft :: LandmarkType
- pattern LandmarkType_RightEyeRight :: LandmarkType
- pattern LandmarkType_RightEyeUp :: LandmarkType
- pattern LandmarkType_RightPupil :: LandmarkType
- pattern LandmarkType_UpperJawlineLeft :: LandmarkType
- pattern LandmarkType_UpperJawlineRight :: LandmarkType
- newtype OrientationCorrection where
- newtype PersonTrackingSortBy where
- newtype ProjectStatus where
- ProjectStatus' { }
- pattern ProjectStatus_CREATED :: ProjectStatus
- pattern ProjectStatus_CREATING :: ProjectStatus
- pattern ProjectStatus_DELETING :: ProjectStatus
- newtype ProjectVersionStatus where
- ProjectVersionStatus' { }
- pattern ProjectVersionStatus_COPYING_COMPLETED :: ProjectVersionStatus
- pattern ProjectVersionStatus_COPYING_FAILED :: ProjectVersionStatus
- pattern ProjectVersionStatus_COPYING_IN_PROGRESS :: ProjectVersionStatus
- pattern ProjectVersionStatus_DELETING :: ProjectVersionStatus
- pattern ProjectVersionStatus_FAILED :: ProjectVersionStatus
- pattern ProjectVersionStatus_RUNNING :: ProjectVersionStatus
- pattern ProjectVersionStatus_STARTING :: ProjectVersionStatus
- pattern ProjectVersionStatus_STOPPED :: ProjectVersionStatus
- pattern ProjectVersionStatus_STOPPING :: ProjectVersionStatus
- pattern ProjectVersionStatus_TRAINING_COMPLETED :: ProjectVersionStatus
- pattern ProjectVersionStatus_TRAINING_FAILED :: ProjectVersionStatus
- pattern ProjectVersionStatus_TRAINING_IN_PROGRESS :: ProjectVersionStatus
- newtype ProtectiveEquipmentType where
- newtype QualityFilter where
- QualityFilter' { }
- pattern QualityFilter_AUTO :: QualityFilter
- pattern QualityFilter_HIGH :: QualityFilter
- pattern QualityFilter_LOW :: QualityFilter
- pattern QualityFilter_MEDIUM :: QualityFilter
- pattern QualityFilter_NONE :: QualityFilter
- newtype Reason where
- Reason' {
- fromReason :: Text
- pattern Reason_EXCEEDS_MAX_FACES :: Reason
- pattern Reason_EXTREME_POSE :: Reason
- pattern Reason_LOW_BRIGHTNESS :: Reason
- pattern Reason_LOW_CONFIDENCE :: Reason
- pattern Reason_LOW_FACE_QUALITY :: Reason
- pattern Reason_LOW_SHARPNESS :: Reason
- pattern Reason_SMALL_BOUNDING_BOX :: Reason
- Reason' {
- newtype SegmentType where
- SegmentType' { }
- pattern SegmentType_SHOT :: SegmentType
- pattern SegmentType_TECHNICAL_CUE :: SegmentType
- newtype StreamProcessorParameterToDelete where
- newtype StreamProcessorStatus where
- StreamProcessorStatus' { }
- pattern StreamProcessorStatus_FAILED :: StreamProcessorStatus
- pattern StreamProcessorStatus_RUNNING :: StreamProcessorStatus
- pattern StreamProcessorStatus_STARTING :: StreamProcessorStatus
- pattern StreamProcessorStatus_STOPPED :: StreamProcessorStatus
- pattern StreamProcessorStatus_STOPPING :: StreamProcessorStatus
- pattern StreamProcessorStatus_UPDATING :: StreamProcessorStatus
- newtype TechnicalCueType where
- TechnicalCueType' { }
- pattern TechnicalCueType_BlackFrames :: TechnicalCueType
- pattern TechnicalCueType_ColorBars :: TechnicalCueType
- pattern TechnicalCueType_Content :: TechnicalCueType
- pattern TechnicalCueType_EndCredits :: TechnicalCueType
- pattern TechnicalCueType_OpeningCredits :: TechnicalCueType
- pattern TechnicalCueType_Slate :: TechnicalCueType
- pattern TechnicalCueType_StudioLogo :: TechnicalCueType
- newtype TextTypes where
- TextTypes' { }
- pattern TextTypes_LINE :: TextTypes
- pattern TextTypes_WORD :: TextTypes
- newtype VideoColorRange where
- VideoColorRange' { }
- pattern VideoColorRange_FULL :: VideoColorRange
- pattern VideoColorRange_LIMITED :: VideoColorRange
- newtype VideoJobStatus where
- VideoJobStatus' { }
- pattern VideoJobStatus_FAILED :: VideoJobStatus
- pattern VideoJobStatus_IN_PROGRESS :: VideoJobStatus
- pattern VideoJobStatus_SUCCEEDED :: VideoJobStatus
- data AgeRange = AgeRange' {}
- newAgeRange :: AgeRange
- ageRange_high :: Lens' AgeRange (Maybe Natural)
- ageRange_low :: Lens' AgeRange (Maybe Natural)
- data Asset = Asset' {}
- newAsset :: Asset
- asset_groundTruthManifest :: Lens' Asset (Maybe GroundTruthManifest)
- data AudioMetadata = AudioMetadata' {}
- newAudioMetadata :: AudioMetadata
- audioMetadata_codec :: Lens' AudioMetadata (Maybe Text)
- audioMetadata_durationMillis :: Lens' AudioMetadata (Maybe Natural)
- audioMetadata_numberOfChannels :: Lens' AudioMetadata (Maybe Natural)
- audioMetadata_sampleRate :: Lens' AudioMetadata (Maybe Natural)
- data Beard = Beard' {}
- newBeard :: Beard
- beard_confidence :: Lens' Beard (Maybe Double)
- beard_value :: Lens' Beard (Maybe Bool)
- data BlackFrame = BlackFrame' {}
- newBlackFrame :: BlackFrame
- blackFrame_maxPixelThreshold :: Lens' BlackFrame (Maybe Double)
- blackFrame_minCoveragePercentage :: Lens' BlackFrame (Maybe Double)
- data BoundingBox = BoundingBox' {}
- newBoundingBox :: BoundingBox
- boundingBox_height :: Lens' BoundingBox (Maybe Double)
- boundingBox_left :: Lens' BoundingBox (Maybe Double)
- boundingBox_top :: Lens' BoundingBox (Maybe Double)
- boundingBox_width :: Lens' BoundingBox (Maybe Double)
- data Celebrity = Celebrity' {
- face :: Maybe ComparedFace
- id :: Maybe Text
- knownGender :: Maybe KnownGender
- matchConfidence :: Maybe Double
- name :: Maybe Text
- urls :: Maybe [Text]
- newCelebrity :: Celebrity
- celebrity_face :: Lens' Celebrity (Maybe ComparedFace)
- celebrity_id :: Lens' Celebrity (Maybe Text)
- celebrity_knownGender :: Lens' Celebrity (Maybe KnownGender)
- celebrity_matchConfidence :: Lens' Celebrity (Maybe Double)
- celebrity_name :: Lens' Celebrity (Maybe Text)
- celebrity_urls :: Lens' Celebrity (Maybe [Text])
- data CelebrityDetail = CelebrityDetail' {
- boundingBox :: Maybe BoundingBox
- confidence :: Maybe Double
- face :: Maybe FaceDetail
- id :: Maybe Text
- knownGender :: Maybe KnownGender
- name :: Maybe Text
- urls :: Maybe [Text]
- newCelebrityDetail :: CelebrityDetail
- celebrityDetail_boundingBox :: Lens' CelebrityDetail (Maybe BoundingBox)
- celebrityDetail_confidence :: Lens' CelebrityDetail (Maybe Double)
- celebrityDetail_face :: Lens' CelebrityDetail (Maybe FaceDetail)
- celebrityDetail_id :: Lens' CelebrityDetail (Maybe Text)
- celebrityDetail_knownGender :: Lens' CelebrityDetail (Maybe KnownGender)
- celebrityDetail_name :: Lens' CelebrityDetail (Maybe Text)
- celebrityDetail_urls :: Lens' CelebrityDetail (Maybe [Text])
- data CelebrityRecognition = CelebrityRecognition' {}
- newCelebrityRecognition :: CelebrityRecognition
- celebrityRecognition_celebrity :: Lens' CelebrityRecognition (Maybe CelebrityDetail)
- celebrityRecognition_timestamp :: Lens' CelebrityRecognition (Maybe Integer)
- data CompareFacesMatch = CompareFacesMatch' {}
- newCompareFacesMatch :: CompareFacesMatch
- compareFacesMatch_face :: Lens' CompareFacesMatch (Maybe ComparedFace)
- compareFacesMatch_similarity :: Lens' CompareFacesMatch (Maybe Double)
- data ComparedFace = ComparedFace' {}
- newComparedFace :: ComparedFace
- comparedFace_boundingBox :: Lens' ComparedFace (Maybe BoundingBox)
- comparedFace_confidence :: Lens' ComparedFace (Maybe Double)
- comparedFace_emotions :: Lens' ComparedFace (Maybe [Emotion])
- comparedFace_landmarks :: Lens' ComparedFace (Maybe [Landmark])
- comparedFace_pose :: Lens' ComparedFace (Maybe Pose)
- comparedFace_quality :: Lens' ComparedFace (Maybe ImageQuality)
- comparedFace_smile :: Lens' ComparedFace (Maybe Smile)
- data ComparedSourceImageFace = ComparedSourceImageFace' {}
- newComparedSourceImageFace :: ComparedSourceImageFace
- comparedSourceImageFace_boundingBox :: Lens' ComparedSourceImageFace (Maybe BoundingBox)
- comparedSourceImageFace_confidence :: Lens' ComparedSourceImageFace (Maybe Double)
- data ConnectedHomeSettings = ConnectedHomeSettings' {}
- newConnectedHomeSettings :: NonEmpty Text -> ConnectedHomeSettings
- connectedHomeSettings_minConfidence :: Lens' ConnectedHomeSettings (Maybe Double)
- connectedHomeSettings_labels :: Lens' ConnectedHomeSettings (NonEmpty Text)
- data ConnectedHomeSettingsForUpdate = ConnectedHomeSettingsForUpdate' {}
- newConnectedHomeSettingsForUpdate :: ConnectedHomeSettingsForUpdate
- connectedHomeSettingsForUpdate_labels :: Lens' ConnectedHomeSettingsForUpdate (Maybe (NonEmpty Text))
- connectedHomeSettingsForUpdate_minConfidence :: Lens' ConnectedHomeSettingsForUpdate (Maybe Double)
- data ContentModerationDetection = ContentModerationDetection' {}
- newContentModerationDetection :: ContentModerationDetection
- contentModerationDetection_moderationLabel :: Lens' ContentModerationDetection (Maybe ModerationLabel)
- contentModerationDetection_timestamp :: Lens' ContentModerationDetection (Maybe Integer)
- data CoversBodyPart = CoversBodyPart' {}
- newCoversBodyPart :: CoversBodyPart
- coversBodyPart_confidence :: Lens' CoversBodyPart (Maybe Double)
- coversBodyPart_value :: Lens' CoversBodyPart (Maybe Bool)
- data CustomLabel = CustomLabel' {}
- newCustomLabel :: CustomLabel
- customLabel_confidence :: Lens' CustomLabel (Maybe Double)
- customLabel_geometry :: Lens' CustomLabel (Maybe Geometry)
- customLabel_name :: Lens' CustomLabel (Maybe Text)
- data DatasetChanges = DatasetChanges' {}
- newDatasetChanges :: ByteString -> DatasetChanges
- datasetChanges_groundTruth :: Lens' DatasetChanges ByteString
- data DatasetDescription = DatasetDescription' {}
- newDatasetDescription :: DatasetDescription
- datasetDescription_creationTimestamp :: Lens' DatasetDescription (Maybe UTCTime)
- datasetDescription_datasetStats :: Lens' DatasetDescription (Maybe DatasetStats)
- datasetDescription_lastUpdatedTimestamp :: Lens' DatasetDescription (Maybe UTCTime)
- datasetDescription_status :: Lens' DatasetDescription (Maybe DatasetStatus)
- datasetDescription_statusMessage :: Lens' DatasetDescription (Maybe Text)
- datasetDescription_statusMessageCode :: Lens' DatasetDescription (Maybe DatasetStatusMessageCode)
- data DatasetLabelDescription = DatasetLabelDescription' {}
- newDatasetLabelDescription :: DatasetLabelDescription
- datasetLabelDescription_labelName :: Lens' DatasetLabelDescription (Maybe Text)
- datasetLabelDescription_labelStats :: Lens' DatasetLabelDescription (Maybe DatasetLabelStats)
- data DatasetLabelStats = DatasetLabelStats' {}
- newDatasetLabelStats :: DatasetLabelStats
- datasetLabelStats_boundingBoxCount :: Lens' DatasetLabelStats (Maybe Natural)
- datasetLabelStats_entryCount :: Lens' DatasetLabelStats (Maybe Natural)
- data DatasetMetadata = DatasetMetadata' {}
- newDatasetMetadata :: DatasetMetadata
- datasetMetadata_creationTimestamp :: Lens' DatasetMetadata (Maybe UTCTime)
- datasetMetadata_datasetArn :: Lens' DatasetMetadata (Maybe Text)
- datasetMetadata_datasetType :: Lens' DatasetMetadata (Maybe DatasetType)
- datasetMetadata_status :: Lens' DatasetMetadata (Maybe DatasetStatus)
- datasetMetadata_statusMessage :: Lens' DatasetMetadata (Maybe Text)
- datasetMetadata_statusMessageCode :: Lens' DatasetMetadata (Maybe DatasetStatusMessageCode)
- data DatasetSource = DatasetSource' {}
- newDatasetSource :: DatasetSource
- datasetSource_datasetArn :: Lens' DatasetSource (Maybe Text)
- datasetSource_groundTruthManifest :: Lens' DatasetSource (Maybe GroundTruthManifest)
- data DatasetStats = DatasetStats' {}
- newDatasetStats :: DatasetStats
- datasetStats_errorEntries :: Lens' DatasetStats (Maybe Natural)
- datasetStats_labeledEntries :: Lens' DatasetStats (Maybe Natural)
- datasetStats_totalEntries :: Lens' DatasetStats (Maybe Natural)
- datasetStats_totalLabels :: Lens' DatasetStats (Maybe Natural)
- data DetectLabelsImageBackground = DetectLabelsImageBackground' {}
- newDetectLabelsImageBackground :: DetectLabelsImageBackground
- detectLabelsImageBackground_dominantColors :: Lens' DetectLabelsImageBackground (Maybe [DominantColor])
- detectLabelsImageBackground_quality :: Lens' DetectLabelsImageBackground (Maybe DetectLabelsImageQuality)
- data DetectLabelsImageForeground = DetectLabelsImageForeground' {}
- newDetectLabelsImageForeground :: DetectLabelsImageForeground
- detectLabelsImageForeground_dominantColors :: Lens' DetectLabelsImageForeground (Maybe [DominantColor])
- detectLabelsImageForeground_quality :: Lens' DetectLabelsImageForeground (Maybe DetectLabelsImageQuality)
- data DetectLabelsImageProperties = DetectLabelsImageProperties' {}
- newDetectLabelsImageProperties :: DetectLabelsImageProperties
- detectLabelsImageProperties_background :: Lens' DetectLabelsImageProperties (Maybe DetectLabelsImageBackground)
- detectLabelsImageProperties_dominantColors :: Lens' DetectLabelsImageProperties (Maybe [DominantColor])
- detectLabelsImageProperties_foreground :: Lens' DetectLabelsImageProperties (Maybe DetectLabelsImageForeground)
- detectLabelsImageProperties_quality :: Lens' DetectLabelsImageProperties (Maybe DetectLabelsImageQuality)
- data DetectLabelsImagePropertiesSettings = DetectLabelsImagePropertiesSettings' {}
- newDetectLabelsImagePropertiesSettings :: DetectLabelsImagePropertiesSettings
- detectLabelsImagePropertiesSettings_maxDominantColors :: Lens' DetectLabelsImagePropertiesSettings (Maybe Natural)
- data DetectLabelsImageQuality = DetectLabelsImageQuality' {}
- newDetectLabelsImageQuality :: DetectLabelsImageQuality
- detectLabelsImageQuality_brightness :: Lens' DetectLabelsImageQuality (Maybe Double)
- detectLabelsImageQuality_contrast :: Lens' DetectLabelsImageQuality (Maybe Double)
- detectLabelsImageQuality_sharpness :: Lens' DetectLabelsImageQuality (Maybe Double)
- data DetectLabelsSettings = DetectLabelsSettings' {}
- newDetectLabelsSettings :: DetectLabelsSettings
- detectLabelsSettings_generalLabels :: Lens' DetectLabelsSettings (Maybe GeneralLabelsSettings)
- detectLabelsSettings_imageProperties :: Lens' DetectLabelsSettings (Maybe DetectLabelsImagePropertiesSettings)
- data DetectTextFilters = DetectTextFilters' {}
- newDetectTextFilters :: DetectTextFilters
- detectTextFilters_regionsOfInterest :: Lens' DetectTextFilters (Maybe [RegionOfInterest])
- detectTextFilters_wordFilter :: Lens' DetectTextFilters (Maybe DetectionFilter)
- data DetectionFilter = DetectionFilter' {}
- newDetectionFilter :: DetectionFilter
- detectionFilter_minBoundingBoxHeight :: Lens' DetectionFilter (Maybe Double)
- detectionFilter_minBoundingBoxWidth :: Lens' DetectionFilter (Maybe Double)
- detectionFilter_minConfidence :: Lens' DetectionFilter (Maybe Double)
- data DistributeDataset = DistributeDataset' {}
- newDistributeDataset :: Text -> DistributeDataset
- distributeDataset_arn :: Lens' DistributeDataset Text
- data DominantColor = DominantColor' {}
- newDominantColor :: DominantColor
- dominantColor_blue :: Lens' DominantColor (Maybe Natural)
- dominantColor_cSSColor :: Lens' DominantColor (Maybe Text)
- dominantColor_green :: Lens' DominantColor (Maybe Natural)
- dominantColor_hexCode :: Lens' DominantColor (Maybe Text)
- dominantColor_pixelPercent :: Lens' DominantColor (Maybe Double)
- dominantColor_red :: Lens' DominantColor (Maybe Natural)
- dominantColor_simplifiedColor :: Lens' DominantColor (Maybe Text)
- data Emotion = Emotion' {}
- newEmotion :: Emotion
- emotion_confidence :: Lens' Emotion (Maybe Double)
- emotion_type :: Lens' Emotion (Maybe EmotionName)
- data EquipmentDetection = EquipmentDetection' {}
- newEquipmentDetection :: EquipmentDetection
- equipmentDetection_boundingBox :: Lens' EquipmentDetection (Maybe BoundingBox)
- equipmentDetection_confidence :: Lens' EquipmentDetection (Maybe Double)
- equipmentDetection_coversBodyPart :: Lens' EquipmentDetection (Maybe CoversBodyPart)
- equipmentDetection_type :: Lens' EquipmentDetection (Maybe ProtectiveEquipmentType)
- data EvaluationResult = EvaluationResult' {}
- newEvaluationResult :: EvaluationResult
- evaluationResult_f1Score :: Lens' EvaluationResult (Maybe Double)
- evaluationResult_summary :: Lens' EvaluationResult (Maybe Summary)
- data EyeOpen = EyeOpen' {}
- newEyeOpen :: EyeOpen
- eyeOpen_confidence :: Lens' EyeOpen (Maybe Double)
- eyeOpen_value :: Lens' EyeOpen (Maybe Bool)
- data Eyeglasses = Eyeglasses' {}
- newEyeglasses :: Eyeglasses
- eyeglasses_confidence :: Lens' Eyeglasses (Maybe Double)
- eyeglasses_value :: Lens' Eyeglasses (Maybe Bool)
- data Face = Face' {}
- newFace :: Face
- face_boundingBox :: Lens' Face (Maybe BoundingBox)
- face_confidence :: Lens' Face (Maybe Double)
- face_externalImageId :: Lens' Face (Maybe Text)
- face_faceId :: Lens' Face (Maybe Text)
- face_imageId :: Lens' Face (Maybe Text)
- face_indexFacesModelVersion :: Lens' Face (Maybe Text)
- data FaceDetail = FaceDetail' {
- ageRange :: Maybe AgeRange
- beard :: Maybe Beard
- boundingBox :: Maybe BoundingBox
- confidence :: Maybe Double
- emotions :: Maybe [Emotion]
- eyeglasses :: Maybe Eyeglasses
- eyesOpen :: Maybe EyeOpen
- gender :: Maybe Gender
- landmarks :: Maybe [Landmark]
- mouthOpen :: Maybe MouthOpen
- mustache :: Maybe Mustache
- pose :: Maybe Pose
- quality :: Maybe ImageQuality
- smile :: Maybe Smile
- sunglasses :: Maybe Sunglasses
- newFaceDetail :: FaceDetail
- faceDetail_ageRange :: Lens' FaceDetail (Maybe AgeRange)
- faceDetail_beard :: Lens' FaceDetail (Maybe Beard)
- faceDetail_boundingBox :: Lens' FaceDetail (Maybe BoundingBox)
- faceDetail_confidence :: Lens' FaceDetail (Maybe Double)
- faceDetail_emotions :: Lens' FaceDetail (Maybe [Emotion])
- faceDetail_eyeglasses :: Lens' FaceDetail (Maybe Eyeglasses)
- faceDetail_eyesOpen :: Lens' FaceDetail (Maybe EyeOpen)
- faceDetail_gender :: Lens' FaceDetail (Maybe Gender)
- faceDetail_landmarks :: Lens' FaceDetail (Maybe [Landmark])
- faceDetail_mouthOpen :: Lens' FaceDetail (Maybe MouthOpen)
- faceDetail_mustache :: Lens' FaceDetail (Maybe Mustache)
- faceDetail_pose :: Lens' FaceDetail (Maybe Pose)
- faceDetail_quality :: Lens' FaceDetail (Maybe ImageQuality)
- faceDetail_smile :: Lens' FaceDetail (Maybe Smile)
- faceDetail_sunglasses :: Lens' FaceDetail (Maybe Sunglasses)
- data FaceDetection = FaceDetection' {}
- newFaceDetection :: FaceDetection
- faceDetection_face :: Lens' FaceDetection (Maybe FaceDetail)
- faceDetection_timestamp :: Lens' FaceDetection (Maybe Integer)
- data FaceMatch = FaceMatch' {}
- newFaceMatch :: FaceMatch
- faceMatch_face :: Lens' FaceMatch (Maybe Face)
- faceMatch_similarity :: Lens' FaceMatch (Maybe Double)
- data FaceRecord = FaceRecord' {
- face :: Maybe Face
- faceDetail :: Maybe FaceDetail
- newFaceRecord :: FaceRecord
- faceRecord_face :: Lens' FaceRecord (Maybe Face)
- faceRecord_faceDetail :: Lens' FaceRecord (Maybe FaceDetail)
- data FaceSearchSettings = FaceSearchSettings' {}
- newFaceSearchSettings :: FaceSearchSettings
- faceSearchSettings_collectionId :: Lens' FaceSearchSettings (Maybe Text)
- faceSearchSettings_faceMatchThreshold :: Lens' FaceSearchSettings (Maybe Double)
- data Gender = Gender' {}
- newGender :: Gender
- gender_confidence :: Lens' Gender (Maybe Double)
- gender_value :: Lens' Gender (Maybe GenderType)
- data GeneralLabelsSettings = GeneralLabelsSettings' {}
- newGeneralLabelsSettings :: GeneralLabelsSettings
- generalLabelsSettings_labelCategoryExclusionFilters :: Lens' GeneralLabelsSettings (Maybe [Text])
- generalLabelsSettings_labelCategoryInclusionFilters :: Lens' GeneralLabelsSettings (Maybe [Text])
- generalLabelsSettings_labelExclusionFilters :: Lens' GeneralLabelsSettings (Maybe [Text])
- generalLabelsSettings_labelInclusionFilters :: Lens' GeneralLabelsSettings (Maybe [Text])
- data Geometry = Geometry' {
- boundingBox :: Maybe BoundingBox
- polygon :: Maybe [Point]
- newGeometry :: Geometry
- geometry_boundingBox :: Lens' Geometry (Maybe BoundingBox)
- geometry_polygon :: Lens' Geometry (Maybe [Point])
- data GroundTruthManifest = GroundTruthManifest' {}
- newGroundTruthManifest :: GroundTruthManifest
- groundTruthManifest_s3Object :: Lens' GroundTruthManifest (Maybe S3Object)
- data HumanLoopActivationOutput = HumanLoopActivationOutput' {}
- newHumanLoopActivationOutput :: HumanLoopActivationOutput
- humanLoopActivationOutput_humanLoopActivationConditionsEvaluationResults :: Lens' HumanLoopActivationOutput (Maybe Text)
- humanLoopActivationOutput_humanLoopActivationReasons :: Lens' HumanLoopActivationOutput (Maybe (NonEmpty Text))
- humanLoopActivationOutput_humanLoopArn :: Lens' HumanLoopActivationOutput (Maybe Text)
- data HumanLoopConfig = HumanLoopConfig' {}
- newHumanLoopConfig :: Text -> Text -> HumanLoopConfig
- humanLoopConfig_dataAttributes :: Lens' HumanLoopConfig (Maybe HumanLoopDataAttributes)
- humanLoopConfig_humanLoopName :: Lens' HumanLoopConfig Text
- humanLoopConfig_flowDefinitionArn :: Lens' HumanLoopConfig Text
- data HumanLoopDataAttributes = HumanLoopDataAttributes' {}
- newHumanLoopDataAttributes :: HumanLoopDataAttributes
- humanLoopDataAttributes_contentClassifiers :: Lens' HumanLoopDataAttributes (Maybe [ContentClassifier])
- data Image = Image' {}
- newImage :: Image
- image_bytes :: Lens' Image (Maybe ByteString)
- image_s3Object :: Lens' Image (Maybe S3Object)
- data ImageQuality = ImageQuality' {}
- newImageQuality :: ImageQuality
- imageQuality_brightness :: Lens' ImageQuality (Maybe Double)
- imageQuality_sharpness :: Lens' ImageQuality (Maybe Double)
- data Instance = Instance' {}
- newInstance :: Instance
- instance_boundingBox :: Lens' Instance (Maybe BoundingBox)
- instance_confidence :: Lens' Instance (Maybe Double)
- instance_dominantColors :: Lens' Instance (Maybe [DominantColor])
- data KinesisDataStream = KinesisDataStream' {}
- newKinesisDataStream :: KinesisDataStream
- kinesisDataStream_arn :: Lens' KinesisDataStream (Maybe Text)
- data KinesisVideoStream = KinesisVideoStream' {}
- newKinesisVideoStream :: KinesisVideoStream
- kinesisVideoStream_arn :: Lens' KinesisVideoStream (Maybe Text)
- data KinesisVideoStreamStartSelector = KinesisVideoStreamStartSelector' {}
- newKinesisVideoStreamStartSelector :: KinesisVideoStreamStartSelector
- kinesisVideoStreamStartSelector_fragmentNumber :: Lens' KinesisVideoStreamStartSelector (Maybe Text)
- kinesisVideoStreamStartSelector_producerTimestamp :: Lens' KinesisVideoStreamStartSelector (Maybe Natural)
- data KnownGender = KnownGender' {}
- newKnownGender :: KnownGender
- knownGender_type :: Lens' KnownGender (Maybe KnownGenderType)
- data Label = Label' {
- aliases :: Maybe [LabelAlias]
- categories :: Maybe [LabelCategory]
- confidence :: Maybe Double
- instances :: Maybe [Instance]
- name :: Maybe Text
- parents :: Maybe [Parent]
- newLabel :: Label
- label_aliases :: Lens' Label (Maybe [LabelAlias])
- label_categories :: Lens' Label (Maybe [LabelCategory])
- label_confidence :: Lens' Label (Maybe Double)
- label_instances :: Lens' Label (Maybe [Instance])
- label_name :: Lens' Label (Maybe Text)
- label_parents :: Lens' Label (Maybe [Parent])
- data LabelAlias = LabelAlias' {}
- newLabelAlias :: LabelAlias
- labelAlias_name :: Lens' LabelAlias (Maybe Text)
- data LabelCategory = LabelCategory' {}
- newLabelCategory :: LabelCategory
- labelCategory_name :: Lens' LabelCategory (Maybe Text)
- data LabelDetection = LabelDetection' {}
- newLabelDetection :: LabelDetection
- labelDetection_durationMillis :: Lens' LabelDetection (Maybe Natural)
- labelDetection_endTimestampMillis :: Lens' LabelDetection (Maybe Natural)
- labelDetection_label :: Lens' LabelDetection (Maybe Label)
- labelDetection_startTimestampMillis :: Lens' LabelDetection (Maybe Natural)
- labelDetection_timestamp :: Lens' LabelDetection (Maybe Integer)
- data LabelDetectionSettings = LabelDetectionSettings' {}
- newLabelDetectionSettings :: LabelDetectionSettings
- labelDetectionSettings_generalLabels :: Lens' LabelDetectionSettings (Maybe GeneralLabelsSettings)
- data Landmark = Landmark' {}
- newLandmark :: Landmark
- landmark_type :: Lens' Landmark (Maybe LandmarkType)
- landmark_x :: Lens' Landmark (Maybe Double)
- landmark_y :: Lens' Landmark (Maybe Double)
- data ModerationLabel = ModerationLabel' {
- confidence :: Maybe Double
- name :: Maybe Text
- parentName :: Maybe Text
- newModerationLabel :: ModerationLabel
- moderationLabel_confidence :: Lens' ModerationLabel (Maybe Double)
- moderationLabel_name :: Lens' ModerationLabel (Maybe Text)
- moderationLabel_parentName :: Lens' ModerationLabel (Maybe Text)
- data MouthOpen = MouthOpen' {}
- newMouthOpen :: MouthOpen
- mouthOpen_confidence :: Lens' MouthOpen (Maybe Double)
- mouthOpen_value :: Lens' MouthOpen (Maybe Bool)
- data Mustache = Mustache' {}
- newMustache :: Mustache
- mustache_confidence :: Lens' Mustache (Maybe Double)
- mustache_value :: Lens' Mustache (Maybe Bool)
- data NotificationChannel = NotificationChannel' {
- sNSTopicArn :: Text
- roleArn :: Text
- newNotificationChannel :: Text -> Text -> NotificationChannel
- notificationChannel_sNSTopicArn :: Lens' NotificationChannel Text
- notificationChannel_roleArn :: Lens' NotificationChannel Text
- data OutputConfig = OutputConfig' {}
- newOutputConfig :: OutputConfig
- outputConfig_s3Bucket :: Lens' OutputConfig (Maybe Text)
- outputConfig_s3KeyPrefix :: Lens' OutputConfig (Maybe Text)
- data Parent = Parent' {}
- newParent :: Parent
- parent_name :: Lens' Parent (Maybe Text)
- data PersonDetail = PersonDetail' {}
- newPersonDetail :: PersonDetail
- personDetail_boundingBox :: Lens' PersonDetail (Maybe BoundingBox)
- personDetail_face :: Lens' PersonDetail (Maybe FaceDetail)
- personDetail_index :: Lens' PersonDetail (Maybe Integer)
- data PersonDetection = PersonDetection' {}
- newPersonDetection :: PersonDetection
- personDetection_person :: Lens' PersonDetection (Maybe PersonDetail)
- personDetection_timestamp :: Lens' PersonDetection (Maybe Integer)
- data PersonMatch = PersonMatch' {}
- newPersonMatch :: PersonMatch
- personMatch_faceMatches :: Lens' PersonMatch (Maybe [FaceMatch])
- personMatch_person :: Lens' PersonMatch (Maybe PersonDetail)
- personMatch_timestamp :: Lens' PersonMatch (Maybe Integer)
- data Point = Point' {}
- newPoint :: Point
- point_x :: Lens' Point (Maybe Double)
- point_y :: Lens' Point (Maybe Double)
- data Pose = Pose' {}
- newPose :: Pose
- pose_pitch :: Lens' Pose (Maybe Double)
- pose_roll :: Lens' Pose (Maybe Double)
- pose_yaw :: Lens' Pose (Maybe Double)
- data ProjectDescription = ProjectDescription' {}
- newProjectDescription :: ProjectDescription
- projectDescription_creationTimestamp :: Lens' ProjectDescription (Maybe UTCTime)
- projectDescription_datasets :: Lens' ProjectDescription (Maybe [DatasetMetadata])
- projectDescription_projectArn :: Lens' ProjectDescription (Maybe Text)
- projectDescription_status :: Lens' ProjectDescription (Maybe ProjectStatus)
- data ProjectPolicy = ProjectPolicy' {}
- newProjectPolicy :: ProjectPolicy
- projectPolicy_creationTimestamp :: Lens' ProjectPolicy (Maybe UTCTime)
- projectPolicy_lastUpdatedTimestamp :: Lens' ProjectPolicy (Maybe UTCTime)
- projectPolicy_policyDocument :: Lens' ProjectPolicy (Maybe Text)
- projectPolicy_policyName :: Lens' ProjectPolicy (Maybe Text)
- projectPolicy_policyRevisionId :: Lens' ProjectPolicy (Maybe Text)
- projectPolicy_projectArn :: Lens' ProjectPolicy (Maybe Text)
- data ProjectVersionDescription = ProjectVersionDescription' {
- billableTrainingTimeInSeconds :: Maybe Natural
- creationTimestamp :: Maybe POSIX
- evaluationResult :: Maybe EvaluationResult
- kmsKeyId :: Maybe Text
- manifestSummary :: Maybe GroundTruthManifest
- maxInferenceUnits :: Maybe Natural
- minInferenceUnits :: Maybe Natural
- outputConfig :: Maybe OutputConfig
- projectVersionArn :: Maybe Text
- sourceProjectVersionArn :: Maybe Text
- status :: Maybe ProjectVersionStatus
- statusMessage :: Maybe Text
- testingDataResult :: Maybe TestingDataResult
- trainingDataResult :: Maybe TrainingDataResult
- trainingEndTimestamp :: Maybe POSIX
- newProjectVersionDescription :: ProjectVersionDescription
- projectVersionDescription_billableTrainingTimeInSeconds :: Lens' ProjectVersionDescription (Maybe Natural)
- projectVersionDescription_creationTimestamp :: Lens' ProjectVersionDescription (Maybe UTCTime)
- projectVersionDescription_evaluationResult :: Lens' ProjectVersionDescription (Maybe EvaluationResult)
- projectVersionDescription_kmsKeyId :: Lens' ProjectVersionDescription (Maybe Text)
- projectVersionDescription_manifestSummary :: Lens' ProjectVersionDescription (Maybe GroundTruthManifest)
- projectVersionDescription_maxInferenceUnits :: Lens' ProjectVersionDescription (Maybe Natural)
- projectVersionDescription_minInferenceUnits :: Lens' ProjectVersionDescription (Maybe Natural)
- projectVersionDescription_outputConfig :: Lens' ProjectVersionDescription (Maybe OutputConfig)
- projectVersionDescription_projectVersionArn :: Lens' ProjectVersionDescription (Maybe Text)
- projectVersionDescription_sourceProjectVersionArn :: Lens' ProjectVersionDescription (Maybe Text)
- projectVersionDescription_status :: Lens' ProjectVersionDescription (Maybe ProjectVersionStatus)
- projectVersionDescription_statusMessage :: Lens' ProjectVersionDescription (Maybe Text)
- projectVersionDescription_testingDataResult :: Lens' ProjectVersionDescription (Maybe TestingDataResult)
- projectVersionDescription_trainingDataResult :: Lens' ProjectVersionDescription (Maybe TrainingDataResult)
- projectVersionDescription_trainingEndTimestamp :: Lens' ProjectVersionDescription (Maybe UTCTime)
- data ProtectiveEquipmentBodyPart = ProtectiveEquipmentBodyPart' {}
- newProtectiveEquipmentBodyPart :: ProtectiveEquipmentBodyPart
- protectiveEquipmentBodyPart_confidence :: Lens' ProtectiveEquipmentBodyPart (Maybe Double)
- protectiveEquipmentBodyPart_equipmentDetections :: Lens' ProtectiveEquipmentBodyPart (Maybe [EquipmentDetection])
- protectiveEquipmentBodyPart_name :: Lens' ProtectiveEquipmentBodyPart (Maybe BodyPart)
- data ProtectiveEquipmentPerson = ProtectiveEquipmentPerson' {}
- newProtectiveEquipmentPerson :: ProtectiveEquipmentPerson
- protectiveEquipmentPerson_bodyParts :: Lens' ProtectiveEquipmentPerson (Maybe [ProtectiveEquipmentBodyPart])
- protectiveEquipmentPerson_boundingBox :: Lens' ProtectiveEquipmentPerson (Maybe BoundingBox)
- protectiveEquipmentPerson_confidence :: Lens' ProtectiveEquipmentPerson (Maybe Double)
- protectiveEquipmentPerson_id :: Lens' ProtectiveEquipmentPerson (Maybe Natural)
- data ProtectiveEquipmentSummarizationAttributes = ProtectiveEquipmentSummarizationAttributes' {}
- newProtectiveEquipmentSummarizationAttributes :: Double -> ProtectiveEquipmentSummarizationAttributes
- protectiveEquipmentSummarizationAttributes_minConfidence :: Lens' ProtectiveEquipmentSummarizationAttributes Double
- protectiveEquipmentSummarizationAttributes_requiredEquipmentTypes :: Lens' ProtectiveEquipmentSummarizationAttributes [ProtectiveEquipmentType]
- data ProtectiveEquipmentSummary = ProtectiveEquipmentSummary' {}
- newProtectiveEquipmentSummary :: ProtectiveEquipmentSummary
- protectiveEquipmentSummary_personsIndeterminate :: Lens' ProtectiveEquipmentSummary (Maybe [Natural])
- protectiveEquipmentSummary_personsWithRequiredEquipment :: Lens' ProtectiveEquipmentSummary (Maybe [Natural])
- protectiveEquipmentSummary_personsWithoutRequiredEquipment :: Lens' ProtectiveEquipmentSummary (Maybe [Natural])
- data RegionOfInterest = RegionOfInterest' {
- boundingBox :: Maybe BoundingBox
- polygon :: Maybe [Point]
- newRegionOfInterest :: RegionOfInterest
- regionOfInterest_boundingBox :: Lens' RegionOfInterest (Maybe BoundingBox)
- regionOfInterest_polygon :: Lens' RegionOfInterest (Maybe [Point])
- data S3Destination = S3Destination' {}
- newS3Destination :: S3Destination
- s3Destination_bucket :: Lens' S3Destination (Maybe Text)
- s3Destination_keyPrefix :: Lens' S3Destination (Maybe Text)
- data S3Object = S3Object' {}
- newS3Object :: S3Object
- s3Object_bucket :: Lens' S3Object (Maybe Text)
- s3Object_name :: Lens' S3Object (Maybe Text)
- s3Object_version :: Lens' S3Object (Maybe Text)
- data SegmentDetection = SegmentDetection' {
- durationFrames :: Maybe Natural
- durationMillis :: Maybe Natural
- durationSMPTE :: Maybe Text
- endFrameNumber :: Maybe Natural
- endTimecodeSMPTE :: Maybe Text
- endTimestampMillis :: Maybe Integer
- shotSegment :: Maybe ShotSegment
- startFrameNumber :: Maybe Natural
- startTimecodeSMPTE :: Maybe Text
- startTimestampMillis :: Maybe Integer
- technicalCueSegment :: Maybe TechnicalCueSegment
- type' :: Maybe SegmentType
- newSegmentDetection :: SegmentDetection
- segmentDetection_durationFrames :: Lens' SegmentDetection (Maybe Natural)
- segmentDetection_durationMillis :: Lens' SegmentDetection (Maybe Natural)
- segmentDetection_durationSMPTE :: Lens' SegmentDetection (Maybe Text)
- segmentDetection_endFrameNumber :: Lens' SegmentDetection (Maybe Natural)
- segmentDetection_endTimecodeSMPTE :: Lens' SegmentDetection (Maybe Text)
- segmentDetection_endTimestampMillis :: Lens' SegmentDetection (Maybe Integer)
- segmentDetection_shotSegment :: Lens' SegmentDetection (Maybe ShotSegment)
- segmentDetection_startFrameNumber :: Lens' SegmentDetection (Maybe Natural)
- segmentDetection_startTimecodeSMPTE :: Lens' SegmentDetection (Maybe Text)
- segmentDetection_startTimestampMillis :: Lens' SegmentDetection (Maybe Integer)
- segmentDetection_technicalCueSegment :: Lens' SegmentDetection (Maybe TechnicalCueSegment)
- segmentDetection_type :: Lens' SegmentDetection (Maybe SegmentType)
- data SegmentTypeInfo = SegmentTypeInfo' {}
- newSegmentTypeInfo :: SegmentTypeInfo
- segmentTypeInfo_modelVersion :: Lens' SegmentTypeInfo (Maybe Text)
- segmentTypeInfo_type :: Lens' SegmentTypeInfo (Maybe SegmentType)
- data ShotSegment = ShotSegment' {}
- newShotSegment :: ShotSegment
- shotSegment_confidence :: Lens' ShotSegment (Maybe Double)
- shotSegment_index :: Lens' ShotSegment (Maybe Natural)
- data Smile = Smile' {}
- newSmile :: Smile
- smile_confidence :: Lens' Smile (Maybe Double)
- smile_value :: Lens' Smile (Maybe Bool)
- data StartSegmentDetectionFilters = StartSegmentDetectionFilters' {}
- newStartSegmentDetectionFilters :: StartSegmentDetectionFilters
- startSegmentDetectionFilters_shotFilter :: Lens' StartSegmentDetectionFilters (Maybe StartShotDetectionFilter)
- startSegmentDetectionFilters_technicalCueFilter :: Lens' StartSegmentDetectionFilters (Maybe StartTechnicalCueDetectionFilter)
- data StartShotDetectionFilter = StartShotDetectionFilter' {}
- newStartShotDetectionFilter :: StartShotDetectionFilter
- startShotDetectionFilter_minSegmentConfidence :: Lens' StartShotDetectionFilter (Maybe Double)
- data StartTechnicalCueDetectionFilter = StartTechnicalCueDetectionFilter' {}
- newStartTechnicalCueDetectionFilter :: StartTechnicalCueDetectionFilter
- startTechnicalCueDetectionFilter_blackFrame :: Lens' StartTechnicalCueDetectionFilter (Maybe BlackFrame)
- startTechnicalCueDetectionFilter_minSegmentConfidence :: Lens' StartTechnicalCueDetectionFilter (Maybe Double)
- data StartTextDetectionFilters = StartTextDetectionFilters' {}
- newStartTextDetectionFilters :: StartTextDetectionFilters
- startTextDetectionFilters_regionsOfInterest :: Lens' StartTextDetectionFilters (Maybe [RegionOfInterest])
- startTextDetectionFilters_wordFilter :: Lens' StartTextDetectionFilters (Maybe DetectionFilter)
- data StreamProcessingStartSelector = StreamProcessingStartSelector' {}
- newStreamProcessingStartSelector :: StreamProcessingStartSelector
- streamProcessingStartSelector_kVSStreamStartSelector :: Lens' StreamProcessingStartSelector (Maybe KinesisVideoStreamStartSelector)
- data StreamProcessingStopSelector = StreamProcessingStopSelector' {}
- newStreamProcessingStopSelector :: StreamProcessingStopSelector
- streamProcessingStopSelector_maxDurationInSeconds :: Lens' StreamProcessingStopSelector (Maybe Natural)
- data StreamProcessor = StreamProcessor' {}
- newStreamProcessor :: StreamProcessor
- streamProcessor_name :: Lens' StreamProcessor (Maybe Text)
- streamProcessor_status :: Lens' StreamProcessor (Maybe StreamProcessorStatus)
- data StreamProcessorDataSharingPreference = StreamProcessorDataSharingPreference' {}
- newStreamProcessorDataSharingPreference :: Bool -> StreamProcessorDataSharingPreference
- streamProcessorDataSharingPreference_optIn :: Lens' StreamProcessorDataSharingPreference Bool
- data StreamProcessorInput = StreamProcessorInput' {}
- newStreamProcessorInput :: StreamProcessorInput
- streamProcessorInput_kinesisVideoStream :: Lens' StreamProcessorInput (Maybe KinesisVideoStream)
- data StreamProcessorNotificationChannel = StreamProcessorNotificationChannel' {
- sNSTopicArn :: Text
- newStreamProcessorNotificationChannel :: Text -> StreamProcessorNotificationChannel
- streamProcessorNotificationChannel_sNSTopicArn :: Lens' StreamProcessorNotificationChannel Text
- data StreamProcessorOutput = StreamProcessorOutput' {}
- newStreamProcessorOutput :: StreamProcessorOutput
- streamProcessorOutput_kinesisDataStream :: Lens' StreamProcessorOutput (Maybe KinesisDataStream)
- streamProcessorOutput_s3Destination :: Lens' StreamProcessorOutput (Maybe S3Destination)
- data StreamProcessorSettings = StreamProcessorSettings' {}
- newStreamProcessorSettings :: StreamProcessorSettings
- streamProcessorSettings_connectedHome :: Lens' StreamProcessorSettings (Maybe ConnectedHomeSettings)
- streamProcessorSettings_faceSearch :: Lens' StreamProcessorSettings (Maybe FaceSearchSettings)
- data StreamProcessorSettingsForUpdate = StreamProcessorSettingsForUpdate' {}
- newStreamProcessorSettingsForUpdate :: StreamProcessorSettingsForUpdate
- streamProcessorSettingsForUpdate_connectedHomeForUpdate :: Lens' StreamProcessorSettingsForUpdate (Maybe ConnectedHomeSettingsForUpdate)
- data Summary = Summary' {}
- newSummary :: Summary
- summary_s3Object :: Lens' Summary (Maybe S3Object)
- data Sunglasses = Sunglasses' {}
- newSunglasses :: Sunglasses
- sunglasses_confidence :: Lens' Sunglasses (Maybe Double)
- sunglasses_value :: Lens' Sunglasses (Maybe Bool)
- data TechnicalCueSegment = TechnicalCueSegment' {}
- newTechnicalCueSegment :: TechnicalCueSegment
- technicalCueSegment_confidence :: Lens' TechnicalCueSegment (Maybe Double)
- technicalCueSegment_type :: Lens' TechnicalCueSegment (Maybe TechnicalCueType)
- data TestingData = TestingData' {}
- newTestingData :: TestingData
- testingData_assets :: Lens' TestingData (Maybe [Asset])
- testingData_autoCreate :: Lens' TestingData (Maybe Bool)
- data TestingDataResult = TestingDataResult' {}
- newTestingDataResult :: TestingDataResult
- testingDataResult_input :: Lens' TestingDataResult (Maybe TestingData)
- testingDataResult_output :: Lens' TestingDataResult (Maybe TestingData)
- testingDataResult_validation :: Lens' TestingDataResult (Maybe ValidationData)
- data TextDetection = TextDetection' {}
- newTextDetection :: TextDetection
- textDetection_confidence :: Lens' TextDetection (Maybe Double)
- textDetection_detectedText :: Lens' TextDetection (Maybe Text)
- textDetection_geometry :: Lens' TextDetection (Maybe Geometry)
- textDetection_id :: Lens' TextDetection (Maybe Natural)
- textDetection_parentId :: Lens' TextDetection (Maybe Natural)
- textDetection_type :: Lens' TextDetection (Maybe TextTypes)
- data TextDetectionResult = TextDetectionResult' {}
- newTextDetectionResult :: TextDetectionResult
- textDetectionResult_textDetection :: Lens' TextDetectionResult (Maybe TextDetection)
- textDetectionResult_timestamp :: Lens' TextDetectionResult (Maybe Integer)
- data TrainingData = TrainingData' {}
- newTrainingData :: TrainingData
- trainingData_assets :: Lens' TrainingData (Maybe [Asset])
- data TrainingDataResult = TrainingDataResult' {}
- newTrainingDataResult :: TrainingDataResult
- trainingDataResult_input :: Lens' TrainingDataResult (Maybe TrainingData)
- trainingDataResult_output :: Lens' TrainingDataResult (Maybe TrainingData)
- trainingDataResult_validation :: Lens' TrainingDataResult (Maybe ValidationData)
- data UnindexedFace = UnindexedFace' {
- faceDetail :: Maybe FaceDetail
- reasons :: Maybe [Reason]
- newUnindexedFace :: UnindexedFace
- unindexedFace_faceDetail :: Lens' UnindexedFace (Maybe FaceDetail)
- unindexedFace_reasons :: Lens' UnindexedFace (Maybe [Reason])
- data ValidationData = ValidationData' {}
- newValidationData :: ValidationData
- validationData_assets :: Lens' ValidationData (Maybe [Asset])
- data Video = Video' {}
- newVideo :: Video
- video_s3Object :: Lens' Video (Maybe S3Object)
- data VideoMetadata = VideoMetadata' {}
- newVideoMetadata :: VideoMetadata
- videoMetadata_codec :: Lens' VideoMetadata (Maybe Text)
- videoMetadata_colorRange :: Lens' VideoMetadata (Maybe VideoColorRange)
- videoMetadata_durationMillis :: Lens' VideoMetadata (Maybe Natural)
- videoMetadata_format :: Lens' VideoMetadata (Maybe Text)
- videoMetadata_frameHeight :: Lens' VideoMetadata (Maybe Natural)
- videoMetadata_frameRate :: Lens' VideoMetadata (Maybe Double)
- videoMetadata_frameWidth :: Lens' VideoMetadata (Maybe Natural)
Service Configuration
defaultService :: Service Source #
API version 2016-06-27
of the Amazon Rekognition SDK configuration.
Errors
_AccessDeniedException :: AsError a => Fold a ServiceError Source #
You are not authorized to perform the action.
_HumanLoopQuotaExceededException :: AsError a => Fold a ServiceError Source #
The number of in-progress human reviews you have has exceeded the number allowed.
_IdempotentParameterMismatchException :: AsError a => Fold a ServiceError Source #
A ClientRequestToken
input parameter was reused with an operation, but
at least one of the other input parameters is different from the
previous call to the operation.
_ImageTooLargeException :: AsError a => Fold a ServiceError Source #
The input image size exceeds the allowed limit. If you are calling DetectProtectiveEquipment, the image size or resolution exceeds the allowed limit. For more information, see Guidelines and quotas in Amazon Rekognition in the Amazon Rekognition Developer Guide.
_InternalServerError :: AsError a => Fold a ServiceError Source #
Amazon Rekognition experienced a service issue. Try your call again.
_InvalidImageFormatException :: AsError a => Fold a ServiceError Source #
The provided image format is not supported.
_InvalidPaginationTokenException :: AsError a => Fold a ServiceError Source #
Pagination token in the request is not valid.
_InvalidParameterException :: AsError a => Fold a ServiceError Source #
Input parameter violated a constraint. Validate your parameter before calling the API operation again.
_InvalidPolicyRevisionIdException :: AsError a => Fold a ServiceError Source #
The supplied revision id for the project policy is invalid.
_InvalidS3ObjectException :: AsError a => Fold a ServiceError Source #
Amazon Rekognition is unable to access the S3 object specified in the request.
_LimitExceededException :: AsError a => Fold a ServiceError Source #
An Amazon Rekognition service limit was exceeded. For example, if you
start too many Amazon Rekognition Video jobs concurrently, calls to
start operations (StartLabelDetection
, for example) will raise a
LimitExceededException
exception (HTTP status code: 400) until the
number of concurrently running jobs is below the Amazon Rekognition
service limit.
_MalformedPolicyDocumentException :: AsError a => Fold a ServiceError Source #
The format of the project policy document that you supplied to
PutProjectPolicy
is incorrect.
_ProvisionedThroughputExceededException :: AsError a => Fold a ServiceError Source #
The number of requests exceeded your throughput limit. If you want to increase this limit, contact Amazon Rekognition.
_ResourceAlreadyExistsException :: AsError a => Fold a ServiceError Source #
A resource with the specified ID already exists.
_ResourceInUseException :: AsError a => Fold a ServiceError Source #
The specified resource is already being used.
_ResourceNotFoundException :: AsError a => Fold a ServiceError Source #
The resource specified in the request cannot be found.
_ResourceNotReadyException :: AsError a => Fold a ServiceError Source #
The requested resource isn't ready. For example, this exception occurs
when you call DetectCustomLabels
with a model version that isn't
deployed.
_ServiceQuotaExceededException :: AsError a => Fold a ServiceError Source #
The size of the collection exceeds the allowed limit. For more information, see Guidelines and quotas in Amazon Rekognition in the Amazon Rekognition Developer Guide.
_ThrottlingException :: AsError a => Fold a ServiceError Source #
Amazon Rekognition is temporarily unable to process the request. Try your call again.
_VideoTooLargeException :: AsError a => Fold a ServiceError Source #
The file size or duration of the supplied media is too large. The maximum file size is 10GB. The maximum duration is 6 hours.
Attribute
pattern Attribute_ALL :: Attribute | |
pattern Attribute_DEFAULT :: Attribute |
Instances
BodyPart
pattern BodyPart_FACE :: BodyPart | |
pattern BodyPart_HEAD :: BodyPart | |
pattern BodyPart_LEFT_HAND :: BodyPart | |
pattern BodyPart_RIGHT_HAND :: BodyPart |
Instances
CelebrityRecognitionSortBy
newtype CelebrityRecognitionSortBy Source #
pattern CelebrityRecognitionSortBy_ID :: CelebrityRecognitionSortBy | |
pattern CelebrityRecognitionSortBy_TIMESTAMP :: CelebrityRecognitionSortBy |
Instances
ContentClassifier
newtype ContentClassifier Source #
pattern ContentClassifier_FreeOfAdultContent :: ContentClassifier | |
pattern ContentClassifier_FreeOfPersonallyIdentifiableInformation :: ContentClassifier |
Instances
ContentModerationSortBy
newtype ContentModerationSortBy Source #
pattern ContentModerationSortBy_NAME :: ContentModerationSortBy | |
pattern ContentModerationSortBy_TIMESTAMP :: ContentModerationSortBy |
Instances
DatasetStatus
newtype DatasetStatus Source #
pattern DatasetStatus_CREATE_COMPLETE :: DatasetStatus | |
pattern DatasetStatus_CREATE_FAILED :: DatasetStatus | |
pattern DatasetStatus_CREATE_IN_PROGRESS :: DatasetStatus | |
pattern DatasetStatus_DELETE_IN_PROGRESS :: DatasetStatus | |
pattern DatasetStatus_UPDATE_COMPLETE :: DatasetStatus | |
pattern DatasetStatus_UPDATE_FAILED :: DatasetStatus | |
pattern DatasetStatus_UPDATE_IN_PROGRESS :: DatasetStatus |
Instances
DatasetStatusMessageCode
newtype DatasetStatusMessageCode Source #
Instances
DatasetType
newtype DatasetType Source #
pattern DatasetType_TEST :: DatasetType | |
pattern DatasetType_TRAIN :: DatasetType |
Instances
DetectLabelsFeatureName
newtype DetectLabelsFeatureName Source #
pattern DetectLabelsFeatureName_GENERAL_LABELS :: DetectLabelsFeatureName | |
pattern DetectLabelsFeatureName_IMAGE_PROPERTIES :: DetectLabelsFeatureName |
Instances
EmotionName
newtype EmotionName Source #
pattern EmotionName_ANGRY :: EmotionName | |
pattern EmotionName_CALM :: EmotionName | |
pattern EmotionName_CONFUSED :: EmotionName | |
pattern EmotionName_DISGUSTED :: EmotionName | |
pattern EmotionName_FEAR :: EmotionName | |
pattern EmotionName_HAPPY :: EmotionName | |
pattern EmotionName_SAD :: EmotionName | |
pattern EmotionName_SURPRISED :: EmotionName | |
pattern EmotionName_UNKNOWN :: EmotionName |
Instances
FaceAttributes
newtype FaceAttributes Source #
pattern FaceAttributes_ALL :: FaceAttributes | |
pattern FaceAttributes_DEFAULT :: FaceAttributes |
Instances
FaceSearchSortBy
newtype FaceSearchSortBy Source #
pattern FaceSearchSortBy_INDEX :: FaceSearchSortBy | |
pattern FaceSearchSortBy_TIMESTAMP :: FaceSearchSortBy |
Instances
GenderType
newtype GenderType Source #
pattern GenderType_Female :: GenderType | |
pattern GenderType_Male :: GenderType |
Instances
KnownGenderType
newtype KnownGenderType Source #
A list of enum string of possible gender values that Celebrity returns.
pattern KnownGenderType_Female :: KnownGenderType | |
pattern KnownGenderType_Male :: KnownGenderType | |
pattern KnownGenderType_Nonbinary :: KnownGenderType | |
pattern KnownGenderType_Unlisted :: KnownGenderType |
Instances
LabelDetectionAggregateBy
newtype LabelDetectionAggregateBy Source #
pattern LabelDetectionAggregateBy_SEGMENTS :: LabelDetectionAggregateBy | |
pattern LabelDetectionAggregateBy_TIMESTAMPS :: LabelDetectionAggregateBy |
Instances
LabelDetectionFeatureName
newtype LabelDetectionFeatureName Source #
Instances
LabelDetectionSortBy
newtype LabelDetectionSortBy Source #
pattern LabelDetectionSortBy_NAME :: LabelDetectionSortBy | |
pattern LabelDetectionSortBy_TIMESTAMP :: LabelDetectionSortBy |
Instances
LandmarkType
newtype LandmarkType Source #
Instances
OrientationCorrection
newtype OrientationCorrection Source #
Instances
PersonTrackingSortBy
newtype PersonTrackingSortBy Source #
pattern PersonTrackingSortBy_INDEX :: PersonTrackingSortBy | |
pattern PersonTrackingSortBy_TIMESTAMP :: PersonTrackingSortBy |
Instances
ProjectStatus
newtype ProjectStatus Source #
pattern ProjectStatus_CREATED :: ProjectStatus | |
pattern ProjectStatus_CREATING :: ProjectStatus | |
pattern ProjectStatus_DELETING :: ProjectStatus |
Instances
ProjectVersionStatus
newtype ProjectVersionStatus Source #
Instances
ProtectiveEquipmentType
newtype ProtectiveEquipmentType Source #
Instances
QualityFilter
newtype QualityFilter Source #
pattern QualityFilter_AUTO :: QualityFilter | |
pattern QualityFilter_HIGH :: QualityFilter | |
pattern QualityFilter_LOW :: QualityFilter | |
pattern QualityFilter_MEDIUM :: QualityFilter | |
pattern QualityFilter_NONE :: QualityFilter |
Instances
Reason
pattern Reason_EXCEEDS_MAX_FACES :: Reason | |
pattern Reason_EXTREME_POSE :: Reason | |
pattern Reason_LOW_BRIGHTNESS :: Reason | |
pattern Reason_LOW_CONFIDENCE :: Reason | |
pattern Reason_LOW_FACE_QUALITY :: Reason | |
pattern Reason_LOW_SHARPNESS :: Reason | |
pattern Reason_SMALL_BOUNDING_BOX :: Reason |
Instances
SegmentType
newtype SegmentType Source #
pattern SegmentType_SHOT :: SegmentType | |
pattern SegmentType_TECHNICAL_CUE :: SegmentType |
Instances
StreamProcessorParameterToDelete
newtype StreamProcessorParameterToDelete Source #
pattern StreamProcessorParameterToDelete_ConnectedHomeMinConfidence :: StreamProcessorParameterToDelete | |
pattern StreamProcessorParameterToDelete_RegionsOfInterest :: StreamProcessorParameterToDelete |
Instances
StreamProcessorStatus
newtype StreamProcessorStatus Source #
pattern StreamProcessorStatus_FAILED :: StreamProcessorStatus | |
pattern StreamProcessorStatus_RUNNING :: StreamProcessorStatus | |
pattern StreamProcessorStatus_STARTING :: StreamProcessorStatus | |
pattern StreamProcessorStatus_STOPPED :: StreamProcessorStatus | |
pattern StreamProcessorStatus_STOPPING :: StreamProcessorStatus | |
pattern StreamProcessorStatus_UPDATING :: StreamProcessorStatus |
Instances
TechnicalCueType
newtype TechnicalCueType Source #
pattern TechnicalCueType_BlackFrames :: TechnicalCueType | |
pattern TechnicalCueType_ColorBars :: TechnicalCueType | |
pattern TechnicalCueType_Content :: TechnicalCueType | |
pattern TechnicalCueType_EndCredits :: TechnicalCueType | |
pattern TechnicalCueType_OpeningCredits :: TechnicalCueType | |
pattern TechnicalCueType_Slate :: TechnicalCueType | |
pattern TechnicalCueType_StudioLogo :: TechnicalCueType |
Instances
TextTypes
pattern TextTypes_LINE :: TextTypes | |
pattern TextTypes_WORD :: TextTypes |
Instances
VideoColorRange
newtype VideoColorRange Source #
pattern VideoColorRange_FULL :: VideoColorRange | |
pattern VideoColorRange_LIMITED :: VideoColorRange |
Instances
VideoJobStatus
newtype VideoJobStatus Source #
pattern VideoJobStatus_FAILED :: VideoJobStatus | |
pattern VideoJobStatus_IN_PROGRESS :: VideoJobStatus | |
pattern VideoJobStatus_SUCCEEDED :: VideoJobStatus |
Instances
AgeRange
Structure containing the estimated age range, in years, for a face.
Amazon Rekognition estimates an age range for faces detected in the input image. Estimated age ranges can overlap. A face of a 5-year-old might have an estimated range of 4-6, while the face of a 6-year-old might have an estimated range of 4-8.
See: newAgeRange
smart constructor.
Instances
FromJSON AgeRange Source # | |
Generic AgeRange Source # | |
Read AgeRange Source # | |
Show AgeRange Source # | |
NFData AgeRange Source # | |
Defined in Amazonka.Rekognition.Types.AgeRange | |
Eq AgeRange Source # | |
Hashable AgeRange Source # | |
Defined in Amazonka.Rekognition.Types.AgeRange | |
type Rep AgeRange Source # | |
Defined in Amazonka.Rekognition.Types.AgeRange type Rep AgeRange = D1 ('MetaData "AgeRange" "Amazonka.Rekognition.Types.AgeRange" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "AgeRange'" 'PrefixI 'True) (S1 ('MetaSel ('Just "high") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Natural)) :*: S1 ('MetaSel ('Just "low") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Natural)))) |
newAgeRange :: AgeRange Source #
Create a value of AgeRange
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:high:AgeRange'
, ageRange_high
- The highest estimated age.
$sel:low:AgeRange'
, ageRange_low
- The lowest estimated age.
Asset
Assets are the images that you use to train and evaluate a model version. Assets can also contain validation information that you use to debug a failed model training.
See: newAsset
smart constructor.
Instances
FromJSON Asset Source # | |
ToJSON Asset Source # | |
Defined in Amazonka.Rekognition.Types.Asset | |
Generic Asset Source # | |
Read Asset Source # | |
Show Asset Source # | |
NFData Asset Source # | |
Defined in Amazonka.Rekognition.Types.Asset | |
Eq Asset Source # | |
Hashable Asset Source # | |
Defined in Amazonka.Rekognition.Types.Asset | |
type Rep Asset Source # | |
Defined in Amazonka.Rekognition.Types.Asset type Rep Asset = D1 ('MetaData "Asset" "Amazonka.Rekognition.Types.Asset" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Asset'" 'PrefixI 'True) (S1 ('MetaSel ('Just "groundTruthManifest") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe GroundTruthManifest)))) |
Create a value of Asset
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:groundTruthManifest:Asset'
, asset_groundTruthManifest
- Undocumented member.
asset_groundTruthManifest :: Lens' Asset (Maybe GroundTruthManifest) Source #
Undocumented member.
AudioMetadata
data AudioMetadata Source #
Metadata information about an audio stream. An array of AudioMetadata
objects for the audio streams found in a stored video is returned by
GetSegmentDetection.
See: newAudioMetadata
smart constructor.
AudioMetadata' | |
|
Instances
newAudioMetadata :: AudioMetadata Source #
Create a value of AudioMetadata
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:codec:AudioMetadata'
, audioMetadata_codec
- The audio codec used to encode or decode the audio stream.
$sel:durationMillis:AudioMetadata'
, audioMetadata_durationMillis
- The duration of the audio stream in milliseconds.
$sel:numberOfChannels:AudioMetadata'
, audioMetadata_numberOfChannels
- The number of audio channels in the segment.
$sel:sampleRate:AudioMetadata'
, audioMetadata_sampleRate
- The sample rate for the audio stream.
audioMetadata_codec :: Lens' AudioMetadata (Maybe Text) Source #
The audio codec used to encode or decode the audio stream.
audioMetadata_durationMillis :: Lens' AudioMetadata (Maybe Natural) Source #
The duration of the audio stream in milliseconds.
audioMetadata_numberOfChannels :: Lens' AudioMetadata (Maybe Natural) Source #
The number of audio channels in the segment.
audioMetadata_sampleRate :: Lens' AudioMetadata (Maybe Natural) Source #
The sample rate for the audio stream.
Beard
Indicates whether or not the face has a beard, and the confidence level in the determination.
See: newBeard
smart constructor.
Instances
FromJSON Beard Source # | |
Generic Beard Source # | |
Read Beard Source # | |
Show Beard Source # | |
NFData Beard Source # | |
Defined in Amazonka.Rekognition.Types.Beard | |
Eq Beard Source # | |
Hashable Beard Source # | |
Defined in Amazonka.Rekognition.Types.Beard | |
type Rep Beard Source # | |
Defined in Amazonka.Rekognition.Types.Beard type Rep Beard = D1 ('MetaData "Beard" "Amazonka.Rekognition.Types.Beard" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Beard'" 'PrefixI 'True) (S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)))) |
Create a value of Beard
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:Beard'
, beard_confidence
- Level of confidence in the determination.
$sel:value:Beard'
, beard_value
- Boolean value that indicates whether the face has beard or not.
beard_value :: Lens' Beard (Maybe Bool) Source #
Boolean value that indicates whether the face has beard or not.
BlackFrame
data BlackFrame Source #
A filter that allows you to control the black frame detection by specifying the black levels and pixel coverage of black pixels in a frame. As videos can come from multiple sources, formats, and time periods, they may contain different standards and varying noise levels for black frames that need to be accounted for. For more information, see StartSegmentDetection.
See: newBlackFrame
smart constructor.
BlackFrame' | |
|
Instances
newBlackFrame :: BlackFrame Source #
Create a value of BlackFrame
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:maxPixelThreshold:BlackFrame'
, blackFrame_maxPixelThreshold
- A threshold used to determine the maximum luminance value for a pixel to
be considered black. In a full color range video, luminance values range
from 0-255. A pixel value of 0 is pure black, and the most strict
filter. The maximum black pixel value is computed as follows:
max_black_pixel_value = minimum_luminance + MaxPixelThreshold
*luminance_range.
For example, for a full range video with BlackPixelThreshold = 0.1, max_black_pixel_value is 0 + 0.1 * (255-0) = 25.5.
The default value of MaxPixelThreshold is 0.2, which maps to a max_black_pixel_value of 51 for a full range video. You can lower this threshold to be more strict on black levels.
$sel:minCoveragePercentage:BlackFrame'
, blackFrame_minCoveragePercentage
- The minimum percentage of pixels in a frame that need to have a
luminance below the max_black_pixel_value for a frame to be considered a
black frame. Luminance is calculated using the BT.709 matrix.
The default value is 99, which means at least 99% of all pixels in the
frame are black pixels as per the MaxPixelThreshold
set. You can
reduce this value to allow more noise on the black frame.
blackFrame_maxPixelThreshold :: Lens' BlackFrame (Maybe Double) Source #
A threshold used to determine the maximum luminance value for a pixel to be considered black. In a full color range video, luminance values range from 0-255. A pixel value of 0 is pure black, and the most strict filter. The maximum black pixel value is computed as follows: max_black_pixel_value = minimum_luminance + MaxPixelThreshold *luminance_range.
For example, for a full range video with BlackPixelThreshold = 0.1, max_black_pixel_value is 0 + 0.1 * (255-0) = 25.5.
The default value of MaxPixelThreshold is 0.2, which maps to a max_black_pixel_value of 51 for a full range video. You can lower this threshold to be more strict on black levels.
blackFrame_minCoveragePercentage :: Lens' BlackFrame (Maybe Double) Source #
The minimum percentage of pixels in a frame that need to have a luminance below the max_black_pixel_value for a frame to be considered a black frame. Luminance is calculated using the BT.709 matrix.
The default value is 99, which means at least 99% of all pixels in the
frame are black pixels as per the MaxPixelThreshold
set. You can
reduce this value to allow more noise on the black frame.
BoundingBox
data BoundingBox Source #
Identifies the bounding box around the label, face, text, object of
interest, or personal protective equipment. The left
(x-coordinate)
and top
(y-coordinate) are coordinates representing the top and left
sides of the bounding box. Note that the upper-left corner of the image
is the origin (0,0).
The top
and left
values returned are ratios of the overall image
size. For example, if the input image is 700x200 pixels, and the
top-left coordinate of the bounding box is 350x50 pixels, the API
returns a left
value of 0.5 (350/700) and a top
value of 0.25
(50/200).
The width
and height
values represent the dimensions of the bounding
box as a ratio of the overall image dimension. For example, if the input
image is 700x200 pixels, and the bounding box width is 70 pixels, the
width returned is 0.1.
The bounding box coordinates can have negative values. For example, if
Amazon Rekognition is able to detect a face that is at the image edge
and is only partially visible, the service can return coordinates that
are outside the image bounds and, depending on the image edge, you might
get negative values or values greater than 1 for the left
or top
values.
See: newBoundingBox
smart constructor.
BoundingBox' | |
|
Instances
newBoundingBox :: BoundingBox Source #
Create a value of BoundingBox
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:height:BoundingBox'
, boundingBox_height
- Height of the bounding box as a ratio of the overall image height.
$sel:left:BoundingBox'
, boundingBox_left
- Left coordinate of the bounding box as a ratio of overall image width.
$sel:top:BoundingBox'
, boundingBox_top
- Top coordinate of the bounding box as a ratio of overall image height.
$sel:width:BoundingBox'
, boundingBox_width
- Width of the bounding box as a ratio of the overall image width.
boundingBox_height :: Lens' BoundingBox (Maybe Double) Source #
Height of the bounding box as a ratio of the overall image height.
boundingBox_left :: Lens' BoundingBox (Maybe Double) Source #
Left coordinate of the bounding box as a ratio of overall image width.
boundingBox_top :: Lens' BoundingBox (Maybe Double) Source #
Top coordinate of the bounding box as a ratio of overall image height.
boundingBox_width :: Lens' BoundingBox (Maybe Double) Source #
Width of the bounding box as a ratio of the overall image width.
Celebrity
Provides information about a celebrity recognized by the RecognizeCelebrities operation.
See: newCelebrity
smart constructor.
Celebrity' | |
|
Instances
newCelebrity :: Celebrity Source #
Create a value of Celebrity
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:face:Celebrity'
, celebrity_face
- Provides information about the celebrity's face, such as its location
on the image.
$sel:id:Celebrity'
, celebrity_id
- A unique identifier for the celebrity.
$sel:knownGender:Celebrity'
, celebrity_knownGender
- Undocumented member.
$sel:matchConfidence:Celebrity'
, celebrity_matchConfidence
- The confidence, in percentage, that Amazon Rekognition has that the
recognized face is the celebrity.
$sel:name:Celebrity'
, celebrity_name
- The name of the celebrity.
$sel:urls:Celebrity'
, celebrity_urls
- An array of URLs pointing to additional information about the celebrity.
If there is no additional information about the celebrity, this list is
empty.
celebrity_face :: Lens' Celebrity (Maybe ComparedFace) Source #
Provides information about the celebrity's face, such as its location on the image.
celebrity_knownGender :: Lens' Celebrity (Maybe KnownGender) Source #
Undocumented member.
celebrity_matchConfidence :: Lens' Celebrity (Maybe Double) Source #
The confidence, in percentage, that Amazon Rekognition has that the recognized face is the celebrity.
celebrity_urls :: Lens' Celebrity (Maybe [Text]) Source #
An array of URLs pointing to additional information about the celebrity. If there is no additional information about the celebrity, this list is empty.
CelebrityDetail
data CelebrityDetail Source #
Information about a recognized celebrity.
See: newCelebrityDetail
smart constructor.
CelebrityDetail' | |
|
Instances
newCelebrityDetail :: CelebrityDetail Source #
Create a value of CelebrityDetail
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
CelebrityDetail
, celebrityDetail_boundingBox
- Bounding box around the body of a celebrity.
CelebrityDetail
, celebrityDetail_confidence
- The confidence, in percentage, that Amazon Rekognition has that the
recognized face is the celebrity.
$sel:face:CelebrityDetail'
, celebrityDetail_face
- Face details for the recognized celebrity.
$sel:id:CelebrityDetail'
, celebrityDetail_id
- The unique identifier for the celebrity.
$sel:knownGender:CelebrityDetail'
, celebrityDetail_knownGender
- Retrieves the known gender for the celebrity.
$sel:name:CelebrityDetail'
, celebrityDetail_name
- The name of the celebrity.
$sel:urls:CelebrityDetail'
, celebrityDetail_urls
- An array of URLs pointing to additional celebrity information.
celebrityDetail_boundingBox :: Lens' CelebrityDetail (Maybe BoundingBox) Source #
Bounding box around the body of a celebrity.
celebrityDetail_confidence :: Lens' CelebrityDetail (Maybe Double) Source #
The confidence, in percentage, that Amazon Rekognition has that the recognized face is the celebrity.
celebrityDetail_face :: Lens' CelebrityDetail (Maybe FaceDetail) Source #
Face details for the recognized celebrity.
celebrityDetail_id :: Lens' CelebrityDetail (Maybe Text) Source #
The unique identifier for the celebrity.
celebrityDetail_knownGender :: Lens' CelebrityDetail (Maybe KnownGender) Source #
Retrieves the known gender for the celebrity.
celebrityDetail_name :: Lens' CelebrityDetail (Maybe Text) Source #
The name of the celebrity.
celebrityDetail_urls :: Lens' CelebrityDetail (Maybe [Text]) Source #
An array of URLs pointing to additional celebrity information.
CelebrityRecognition
data CelebrityRecognition Source #
Information about a detected celebrity and the time the celebrity was detected in a stored video. For more information, see GetCelebrityRecognition in the Amazon Rekognition Developer Guide.
See: newCelebrityRecognition
smart constructor.
CelebrityRecognition' | |
|
Instances
newCelebrityRecognition :: CelebrityRecognition Source #
Create a value of CelebrityRecognition
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:celebrity:CelebrityRecognition'
, celebrityRecognition_celebrity
- Information about a recognized celebrity.
$sel:timestamp:CelebrityRecognition'
, celebrityRecognition_timestamp
- The time, in milliseconds from the start of the video, that the
celebrity was recognized. Note that Timestamp
is not guaranteed to be
accurate to the individual frame where the celebrity first appears.
celebrityRecognition_celebrity :: Lens' CelebrityRecognition (Maybe CelebrityDetail) Source #
Information about a recognized celebrity.
celebrityRecognition_timestamp :: Lens' CelebrityRecognition (Maybe Integer) Source #
The time, in milliseconds from the start of the video, that the
celebrity was recognized. Note that Timestamp
is not guaranteed to be
accurate to the individual frame where the celebrity first appears.
CompareFacesMatch
data CompareFacesMatch Source #
Provides information about a face in a target image that matches the
source image face analyzed by CompareFaces
. The Face
property
contains the bounding box of the face in the target image. The
Similarity
property is the confidence that the source image face
matches the face in the bounding box.
See: newCompareFacesMatch
smart constructor.
CompareFacesMatch' | |
|
Instances
newCompareFacesMatch :: CompareFacesMatch Source #
Create a value of CompareFacesMatch
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:face:CompareFacesMatch'
, compareFacesMatch_face
- Provides face metadata (bounding box and confidence that the bounding
box actually contains a face).
$sel:similarity:CompareFacesMatch'
, compareFacesMatch_similarity
- Level of confidence that the faces match.
compareFacesMatch_face :: Lens' CompareFacesMatch (Maybe ComparedFace) Source #
Provides face metadata (bounding box and confidence that the bounding box actually contains a face).
compareFacesMatch_similarity :: Lens' CompareFacesMatch (Maybe Double) Source #
Level of confidence that the faces match.
ComparedFace
data ComparedFace Source #
Provides face metadata for target image faces that are analyzed by
CompareFaces
and RecognizeCelebrities
.
See: newComparedFace
smart constructor.
ComparedFace' | |
|
Instances
newComparedFace :: ComparedFace Source #
Create a value of ComparedFace
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:ComparedFace'
, comparedFace_boundingBox
- Bounding box of the face.
ComparedFace
, comparedFace_confidence
- Level of confidence that what the bounding box contains is a face.
$sel:emotions:ComparedFace'
, comparedFace_emotions
- The emotions that appear to be expressed on the face, and the confidence
level in the determination. Valid values include "Happy", "Sad",
"Angry", "Confused", "Disgusted", "Surprised", "Calm",
"Unknown", and "Fear".
$sel:landmarks:ComparedFace'
, comparedFace_landmarks
- An array of facial landmarks.
$sel:pose:ComparedFace'
, comparedFace_pose
- Indicates the pose of the face as determined by its pitch, roll, and
yaw.
$sel:quality:ComparedFace'
, comparedFace_quality
- Identifies face image brightness and sharpness.
$sel:smile:ComparedFace'
, comparedFace_smile
- Indicates whether or not the face is smiling, and the confidence level
in the determination.
comparedFace_boundingBox :: Lens' ComparedFace (Maybe BoundingBox) Source #
Bounding box of the face.
comparedFace_confidence :: Lens' ComparedFace (Maybe Double) Source #
Level of confidence that what the bounding box contains is a face.
comparedFace_emotions :: Lens' ComparedFace (Maybe [Emotion]) Source #
The emotions that appear to be expressed on the face, and the confidence level in the determination. Valid values include "Happy", "Sad", "Angry", "Confused", "Disgusted", "Surprised", "Calm", "Unknown", and "Fear".
comparedFace_landmarks :: Lens' ComparedFace (Maybe [Landmark]) Source #
An array of facial landmarks.
comparedFace_pose :: Lens' ComparedFace (Maybe Pose) Source #
Indicates the pose of the face as determined by its pitch, roll, and yaw.
comparedFace_quality :: Lens' ComparedFace (Maybe ImageQuality) Source #
Identifies face image brightness and sharpness.
comparedFace_smile :: Lens' ComparedFace (Maybe Smile) Source #
Indicates whether or not the face is smiling, and the confidence level in the determination.
ComparedSourceImageFace
data ComparedSourceImageFace Source #
Type that describes the face Amazon Rekognition chose to compare with the faces in the target. This contains a bounding box for the selected face and confidence level that the bounding box contains a face. Note that Amazon Rekognition selects the largest face in the source image for this comparison.
See: newComparedSourceImageFace
smart constructor.
ComparedSourceImageFace' | |
|
Instances
newComparedSourceImageFace :: ComparedSourceImageFace Source #
Create a value of ComparedSourceImageFace
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:ComparedSourceImageFace'
, comparedSourceImageFace_boundingBox
- Bounding box of the face.
$sel:confidence:ComparedSourceImageFace'
, comparedSourceImageFace_confidence
- Confidence level that the selected bounding box contains a face.
comparedSourceImageFace_boundingBox :: Lens' ComparedSourceImageFace (Maybe BoundingBox) Source #
Bounding box of the face.
comparedSourceImageFace_confidence :: Lens' ComparedSourceImageFace (Maybe Double) Source #
Confidence level that the selected bounding box contains a face.
ConnectedHomeSettings
data ConnectedHomeSettings Source #
Label detection settings to use on a streaming video. Defining the
settings is required in the request parameter for CreateStreamProcessor.
Including this setting in the CreateStreamProcessor
request enables
you to use the stream processor for label detection. You can then select
what you want the stream processor to detect, such as people or pets.
When the stream processor has started, one notification is sent for each
object class specified. For example, if packages and pets are selected,
one SNS notification is published the first time a package is detected
and one SNS notification is published the first time a pet is detected,
as well as an end-of-session summary.
See: newConnectedHomeSettings
smart constructor.
Instances
newConnectedHomeSettings Source #
Create a value of ConnectedHomeSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:minConfidence:ConnectedHomeSettings'
, connectedHomeSettings_minConfidence
- The minimum confidence required to label an object in the video.
$sel:labels:ConnectedHomeSettings'
, connectedHomeSettings_labels
- Specifies what you want to detect in the video, such as people,
packages, or pets. The current valid labels you can include in this list
are: "PERSON", "PET", "PACKAGE", and "ALL".
connectedHomeSettings_minConfidence :: Lens' ConnectedHomeSettings (Maybe Double) Source #
The minimum confidence required to label an object in the video.
connectedHomeSettings_labels :: Lens' ConnectedHomeSettings (NonEmpty Text) Source #
Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: "PERSON", "PET", "PACKAGE", and "ALL".
ConnectedHomeSettingsForUpdate
data ConnectedHomeSettingsForUpdate Source #
The label detection settings you want to use in your stream processor. This includes the labels you want the stream processor to detect and the minimum confidence level allowed to label objects.
See: newConnectedHomeSettingsForUpdate
smart constructor.
ConnectedHomeSettingsForUpdate' | |
|
Instances
newConnectedHomeSettingsForUpdate :: ConnectedHomeSettingsForUpdate Source #
Create a value of ConnectedHomeSettingsForUpdate
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:labels:ConnectedHomeSettingsForUpdate'
, connectedHomeSettingsForUpdate_labels
- Specifies what you want to detect in the video, such as people,
packages, or pets. The current valid labels you can include in this list
are: "PERSON", "PET", "PACKAGE", and "ALL".
$sel:minConfidence:ConnectedHomeSettingsForUpdate'
, connectedHomeSettingsForUpdate_minConfidence
- The minimum confidence required to label an object in the video.
connectedHomeSettingsForUpdate_labels :: Lens' ConnectedHomeSettingsForUpdate (Maybe (NonEmpty Text)) Source #
Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: "PERSON", "PET", "PACKAGE", and "ALL".
connectedHomeSettingsForUpdate_minConfidence :: Lens' ConnectedHomeSettingsForUpdate (Maybe Double) Source #
The minimum confidence required to label an object in the video.
ContentModerationDetection
data ContentModerationDetection Source #
Information about an inappropriate, unwanted, or offensive content label detection in a stored video.
See: newContentModerationDetection
smart constructor.
ContentModerationDetection' | |
|
Instances
newContentModerationDetection :: ContentModerationDetection Source #
Create a value of ContentModerationDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:moderationLabel:ContentModerationDetection'
, contentModerationDetection_moderationLabel
- The content moderation label detected by in the stored video.
$sel:timestamp:ContentModerationDetection'
, contentModerationDetection_timestamp
- Time, in milliseconds from the beginning of the video, that the content
moderation label was detected. Note that Timestamp
is not guaranteed
to be accurate to the individual frame where the moderated content first
appears.
contentModerationDetection_moderationLabel :: Lens' ContentModerationDetection (Maybe ModerationLabel) Source #
The content moderation label detected by in the stored video.
contentModerationDetection_timestamp :: Lens' ContentModerationDetection (Maybe Integer) Source #
Time, in milliseconds from the beginning of the video, that the content
moderation label was detected. Note that Timestamp
is not guaranteed
to be accurate to the individual frame where the moderated content first
appears.
CoversBodyPart
data CoversBodyPart Source #
Information about an item of Personal Protective Equipment covering a corresponding body part. For more information, see DetectProtectiveEquipment.
See: newCoversBodyPart
smart constructor.
Instances
newCoversBodyPart :: CoversBodyPart Source #
Create a value of CoversBodyPart
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:CoversBodyPart'
, coversBodyPart_confidence
- The confidence that Amazon Rekognition has in the value of Value
.
$sel:value:CoversBodyPart'
, coversBodyPart_value
- True if the PPE covers the corresponding body part, otherwise false.
coversBodyPart_confidence :: Lens' CoversBodyPart (Maybe Double) Source #
The confidence that Amazon Rekognition has in the value of Value
.
coversBodyPart_value :: Lens' CoversBodyPart (Maybe Bool) Source #
True if the PPE covers the corresponding body part, otherwise false.
CustomLabel
data CustomLabel Source #
A custom label detected in an image by a call to DetectCustomLabels.
See: newCustomLabel
smart constructor.
CustomLabel' | |
|
Instances
newCustomLabel :: CustomLabel Source #
Create a value of CustomLabel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:CustomLabel'
, customLabel_confidence
- The confidence that the model has in the detection of the custom label.
The range is 0-100. A higher value indicates a higher confidence.
$sel:geometry:CustomLabel'
, customLabel_geometry
- The location of the detected object on the image that corresponds to the
custom label. Includes an axis aligned coarse bounding box surrounding
the object and a finer grain polygon for more accurate spatial
information.
$sel:name:CustomLabel'
, customLabel_name
- The name of the custom label.
customLabel_confidence :: Lens' CustomLabel (Maybe Double) Source #
The confidence that the model has in the detection of the custom label. The range is 0-100. A higher value indicates a higher confidence.
customLabel_geometry :: Lens' CustomLabel (Maybe Geometry) Source #
The location of the detected object on the image that corresponds to the custom label. Includes an axis aligned coarse bounding box surrounding the object and a finer grain polygon for more accurate spatial information.
customLabel_name :: Lens' CustomLabel (Maybe Text) Source #
The name of the custom label.
DatasetChanges
data DatasetChanges Source #
Describes updates or additions to a dataset. A Single update or addition
is an entry (JSON Line) that provides information about a single image.
To update an existing entry, you match the source-ref
field of the
update entry with the source-ref
filed of the entry that you want to
update. If the source-ref
field doesn't match an existing entry, the
entry is added to dataset as a new entry.
See: newDatasetChanges
smart constructor.
DatasetChanges' | |
|
Instances
Create a value of DatasetChanges
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:groundTruth:DatasetChanges'
, datasetChanges_groundTruth
- A Base64-encoded binary data object containing one or JSON lines that
either update the dataset or are additions to the dataset. You change a
dataset by calling UpdateDatasetEntries. If you are using an AWS SDK to
call UpdateDatasetEntries
, you don't need to encode Changes
as the
SDK encodes the data for you.
For example JSON lines, see Image-Level labels in manifest files and and
Object localization in manifest files in the /Amazon Rekognition Custom
Labels Developer Guide/.--
-- Note: This Lens
automatically encodes and decodes Base64 data.
-- The underlying isomorphism will encode to Base64 representation during
-- serialisation, and decode from Base64 representation during deserialisation.
-- This Lens
accepts and returns only raw unencoded data.
datasetChanges_groundTruth :: Lens' DatasetChanges ByteString Source #
A Base64-encoded binary data object containing one or JSON lines that
either update the dataset or are additions to the dataset. You change a
dataset by calling UpdateDatasetEntries. If you are using an AWS SDK to
call UpdateDatasetEntries
, you don't need to encode Changes
as the
SDK encodes the data for you.
For example JSON lines, see Image-Level labels in manifest files and and
Object localization in manifest files in the /Amazon Rekognition Custom
Labels Developer Guide/.--
-- Note: This Lens
automatically encodes and decodes Base64 data.
-- The underlying isomorphism will encode to Base64 representation during
-- serialisation, and decode from Base64 representation during deserialisation.
-- This Lens
accepts and returns only raw unencoded data.
DatasetDescription
data DatasetDescription Source #
A description for a dataset. For more information, see DescribeDataset.
The status fields Status
, StatusMessage
, and StatusMessageCode
reflect the last operation on the dataset.
See: newDatasetDescription
smart constructor.
DatasetDescription' | |
|
Instances
newDatasetDescription :: DatasetDescription Source #
Create a value of DatasetDescription
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:creationTimestamp:DatasetDescription'
, datasetDescription_creationTimestamp
- The Unix timestamp for the time and date that the dataset was created.
$sel:datasetStats:DatasetDescription'
, datasetDescription_datasetStats
- The status message code for the dataset.
$sel:lastUpdatedTimestamp:DatasetDescription'
, datasetDescription_lastUpdatedTimestamp
- The Unix timestamp for the date and time that the dataset was last
updated.
$sel:status:DatasetDescription'
, datasetDescription_status
- The status of the dataset.
$sel:statusMessage:DatasetDescription'
, datasetDescription_statusMessage
- The status message for the dataset.
$sel:statusMessageCode:DatasetDescription'
, datasetDescription_statusMessageCode
- The status message code for the dataset operation. If a service error
occurs, try the API call again later. If a client error occurs, check
the input parameters to the dataset API call that failed.
datasetDescription_creationTimestamp :: Lens' DatasetDescription (Maybe UTCTime) Source #
The Unix timestamp for the time and date that the dataset was created.
datasetDescription_datasetStats :: Lens' DatasetDescription (Maybe DatasetStats) Source #
The status message code for the dataset.
datasetDescription_lastUpdatedTimestamp :: Lens' DatasetDescription (Maybe UTCTime) Source #
The Unix timestamp for the date and time that the dataset was last updated.
datasetDescription_status :: Lens' DatasetDescription (Maybe DatasetStatus) Source #
The status of the dataset.
datasetDescription_statusMessage :: Lens' DatasetDescription (Maybe Text) Source #
The status message for the dataset.
datasetDescription_statusMessageCode :: Lens' DatasetDescription (Maybe DatasetStatusMessageCode) Source #
The status message code for the dataset operation. If a service error occurs, try the API call again later. If a client error occurs, check the input parameters to the dataset API call that failed.
DatasetLabelDescription
data DatasetLabelDescription Source #
Describes a dataset label. For more information, see ListDatasetLabels.
See: newDatasetLabelDescription
smart constructor.
DatasetLabelDescription' | |
|
Instances
newDatasetLabelDescription :: DatasetLabelDescription Source #
Create a value of DatasetLabelDescription
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:labelName:DatasetLabelDescription'
, datasetLabelDescription_labelName
- The name of the label.
$sel:labelStats:DatasetLabelDescription'
, datasetLabelDescription_labelStats
- Statistics about the label.
datasetLabelDescription_labelName :: Lens' DatasetLabelDescription (Maybe Text) Source #
The name of the label.
datasetLabelDescription_labelStats :: Lens' DatasetLabelDescription (Maybe DatasetLabelStats) Source #
Statistics about the label.
DatasetLabelStats
data DatasetLabelStats Source #
Statistics about a label used in a dataset. For more information, see DatasetLabelDescription.
See: newDatasetLabelStats
smart constructor.
DatasetLabelStats' | |
|
Instances
newDatasetLabelStats :: DatasetLabelStats Source #
Create a value of DatasetLabelStats
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBoxCount:DatasetLabelStats'
, datasetLabelStats_boundingBoxCount
- The total number of images that have the label assigned to a bounding
box.
$sel:entryCount:DatasetLabelStats'
, datasetLabelStats_entryCount
- The total number of images that use the label.
datasetLabelStats_boundingBoxCount :: Lens' DatasetLabelStats (Maybe Natural) Source #
The total number of images that have the label assigned to a bounding box.
datasetLabelStats_entryCount :: Lens' DatasetLabelStats (Maybe Natural) Source #
The total number of images that use the label.
DatasetMetadata
data DatasetMetadata Source #
Summary information for an Amazon Rekognition Custom Labels dataset. For more information, see ProjectDescription.
See: newDatasetMetadata
smart constructor.
DatasetMetadata' | |
|
Instances
newDatasetMetadata :: DatasetMetadata Source #
Create a value of DatasetMetadata
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:creationTimestamp:DatasetMetadata'
, datasetMetadata_creationTimestamp
- The Unix timestamp for the date and time that the dataset was created.
$sel:datasetArn:DatasetMetadata'
, datasetMetadata_datasetArn
- The Amazon Resource Name (ARN) for the dataset.
$sel:datasetType:DatasetMetadata'
, datasetMetadata_datasetType
- The type of the dataset.
$sel:status:DatasetMetadata'
, datasetMetadata_status
- The status for the dataset.
$sel:statusMessage:DatasetMetadata'
, datasetMetadata_statusMessage
- The status message for the dataset.
$sel:statusMessageCode:DatasetMetadata'
, datasetMetadata_statusMessageCode
- The status message code for the dataset operation. If a service error
occurs, try the API call again later. If a client error occurs, check
the input parameters to the dataset API call that failed.
datasetMetadata_creationTimestamp :: Lens' DatasetMetadata (Maybe UTCTime) Source #
The Unix timestamp for the date and time that the dataset was created.
datasetMetadata_datasetArn :: Lens' DatasetMetadata (Maybe Text) Source #
The Amazon Resource Name (ARN) for the dataset.
datasetMetadata_datasetType :: Lens' DatasetMetadata (Maybe DatasetType) Source #
The type of the dataset.
datasetMetadata_status :: Lens' DatasetMetadata (Maybe DatasetStatus) Source #
The status for the dataset.
datasetMetadata_statusMessage :: Lens' DatasetMetadata (Maybe Text) Source #
The status message for the dataset.
datasetMetadata_statusMessageCode :: Lens' DatasetMetadata (Maybe DatasetStatusMessageCode) Source #
The status message code for the dataset operation. If a service error occurs, try the API call again later. If a client error occurs, check the input parameters to the dataset API call that failed.
DatasetSource
data DatasetSource Source #
The source that Amazon Rekognition Custom Labels uses to create a
dataset. To use an Amazon Sagemaker format manifest file, specify the S3
bucket location in the GroundTruthManifest
field. The S3 bucket must
be in your AWS account. To create a copy of an existing dataset, specify
the Amazon Resource Name (ARN) of an existing dataset in DatasetArn
.
You need to specify a value for DatasetArn
or GroundTruthManifest
,
but not both. if you supply both values, or if you don't specify any
values, an InvalidParameterException exception occurs.
For more information, see CreateDataset.
See: newDatasetSource
smart constructor.
DatasetSource' | |
|
Instances
newDatasetSource :: DatasetSource Source #
Create a value of DatasetSource
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:datasetArn:DatasetSource'
, datasetSource_datasetArn
- The ARN of an Amazon Rekognition Custom Labels dataset that you want to
copy.
$sel:groundTruthManifest:DatasetSource'
, datasetSource_groundTruthManifest
- Undocumented member.
datasetSource_datasetArn :: Lens' DatasetSource (Maybe Text) Source #
The ARN of an Amazon Rekognition Custom Labels dataset that you want to copy.
datasetSource_groundTruthManifest :: Lens' DatasetSource (Maybe GroundTruthManifest) Source #
Undocumented member.
DatasetStats
data DatasetStats Source #
Provides statistics about a dataset. For more information, see DescribeDataset.
See: newDatasetStats
smart constructor.
DatasetStats' | |
|
Instances
newDatasetStats :: DatasetStats Source #
Create a value of DatasetStats
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:errorEntries:DatasetStats'
, datasetStats_errorEntries
- The total number of entries that contain at least one error.
$sel:labeledEntries:DatasetStats'
, datasetStats_labeledEntries
- The total number of images in the dataset that have labels.
$sel:totalEntries:DatasetStats'
, datasetStats_totalEntries
- The total number of images in the dataset.
$sel:totalLabels:DatasetStats'
, datasetStats_totalLabels
- The total number of labels declared in the dataset.
datasetStats_errorEntries :: Lens' DatasetStats (Maybe Natural) Source #
The total number of entries that contain at least one error.
datasetStats_labeledEntries :: Lens' DatasetStats (Maybe Natural) Source #
The total number of images in the dataset that have labels.
datasetStats_totalEntries :: Lens' DatasetStats (Maybe Natural) Source #
The total number of images in the dataset.
datasetStats_totalLabels :: Lens' DatasetStats (Maybe Natural) Source #
The total number of labels declared in the dataset.
DetectLabelsImageBackground
data DetectLabelsImageBackground Source #
The background of the image with regard to image quality and dominant colors.
See: newDetectLabelsImageBackground
smart constructor.
DetectLabelsImageBackground' | |
|
Instances
newDetectLabelsImageBackground :: DetectLabelsImageBackground Source #
Create a value of DetectLabelsImageBackground
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dominantColors:DetectLabelsImageBackground'
, detectLabelsImageBackground_dominantColors
- The dominant colors found in the background of an image, defined with
RGB values, CSS color name, simplified color name, and PixelPercentage
(the percentage of image pixels that have a particular color).
$sel:quality:DetectLabelsImageBackground'
, detectLabelsImageBackground_quality
- The quality of the image background as defined by brightness and
sharpness.
detectLabelsImageBackground_dominantColors :: Lens' DetectLabelsImageBackground (Maybe [DominantColor]) Source #
The dominant colors found in the background of an image, defined with RGB values, CSS color name, simplified color name, and PixelPercentage (the percentage of image pixels that have a particular color).
detectLabelsImageBackground_quality :: Lens' DetectLabelsImageBackground (Maybe DetectLabelsImageQuality) Source #
The quality of the image background as defined by brightness and sharpness.
DetectLabelsImageForeground
data DetectLabelsImageForeground Source #
The foreground of the image with regard to image quality and dominant colors.
See: newDetectLabelsImageForeground
smart constructor.
DetectLabelsImageForeground' | |
|
Instances
newDetectLabelsImageForeground :: DetectLabelsImageForeground Source #
Create a value of DetectLabelsImageForeground
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dominantColors:DetectLabelsImageForeground'
, detectLabelsImageForeground_dominantColors
- The dominant colors found in the foreground of an image, defined with
RGB values, CSS color name, simplified color name, and PixelPercentage
(the percentage of image pixels that have a particular color).
$sel:quality:DetectLabelsImageForeground'
, detectLabelsImageForeground_quality
- The quality of the image foreground as defined by brightness and
sharpness.
detectLabelsImageForeground_dominantColors :: Lens' DetectLabelsImageForeground (Maybe [DominantColor]) Source #
The dominant colors found in the foreground of an image, defined with RGB values, CSS color name, simplified color name, and PixelPercentage (the percentage of image pixels that have a particular color).
detectLabelsImageForeground_quality :: Lens' DetectLabelsImageForeground (Maybe DetectLabelsImageQuality) Source #
The quality of the image foreground as defined by brightness and sharpness.
DetectLabelsImageProperties
data DetectLabelsImageProperties Source #
Information about the quality and dominant colors of an input image. Quality and color information is returned for the entire image, foreground, and background.
See: newDetectLabelsImageProperties
smart constructor.
DetectLabelsImageProperties' | |
|
Instances
newDetectLabelsImageProperties :: DetectLabelsImageProperties Source #
Create a value of DetectLabelsImageProperties
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:background:DetectLabelsImageProperties'
, detectLabelsImageProperties_background
- Information about the properties of an image’s background, including the
background’s quality and dominant colors, including the quality and
dominant colors of the image.
DetectLabelsImageProperties
, detectLabelsImageProperties_dominantColors
- Information about the dominant colors found in an image, described with
RGB values, CSS color name, simplified color name, and PixelPercentage
(the percentage of image pixels that have a particular color).
$sel:foreground:DetectLabelsImageProperties'
, detectLabelsImageProperties_foreground
- Information about the properties of an image’s foreground, including the
foreground’s quality and dominant colors, including the quality and
dominant colors of the image.
DetectLabelsImageProperties
, detectLabelsImageProperties_quality
- Information about the quality of the image foreground as defined by
brightness, sharpness, and contrast. The higher the value the greater
the brightness, sharpness, and contrast respectively.
detectLabelsImageProperties_background :: Lens' DetectLabelsImageProperties (Maybe DetectLabelsImageBackground) Source #
Information about the properties of an image’s background, including the background’s quality and dominant colors, including the quality and dominant colors of the image.
detectLabelsImageProperties_dominantColors :: Lens' DetectLabelsImageProperties (Maybe [DominantColor]) Source #
Information about the dominant colors found in an image, described with RGB values, CSS color name, simplified color name, and PixelPercentage (the percentage of image pixels that have a particular color).
detectLabelsImageProperties_foreground :: Lens' DetectLabelsImageProperties (Maybe DetectLabelsImageForeground) Source #
Information about the properties of an image’s foreground, including the foreground’s quality and dominant colors, including the quality and dominant colors of the image.
detectLabelsImageProperties_quality :: Lens' DetectLabelsImageProperties (Maybe DetectLabelsImageQuality) Source #
Information about the quality of the image foreground as defined by brightness, sharpness, and contrast. The higher the value the greater the brightness, sharpness, and contrast respectively.
DetectLabelsImagePropertiesSettings
data DetectLabelsImagePropertiesSettings Source #
Settings for the IMAGE_PROPERTIES feature type.
See: newDetectLabelsImagePropertiesSettings
smart constructor.
DetectLabelsImagePropertiesSettings' | |
|
Instances
ToJSON DetectLabelsImagePropertiesSettings Source # | |
Generic DetectLabelsImagePropertiesSettings Source # | |
Read DetectLabelsImagePropertiesSettings Source # | |
Show DetectLabelsImagePropertiesSettings Source # | |
NFData DetectLabelsImagePropertiesSettings Source # | |
Eq DetectLabelsImagePropertiesSettings Source # | |
Hashable DetectLabelsImagePropertiesSettings Source # | |
type Rep DetectLabelsImagePropertiesSettings Source # | |
Defined in Amazonka.Rekognition.Types.DetectLabelsImagePropertiesSettings type Rep DetectLabelsImagePropertiesSettings = D1 ('MetaData "DetectLabelsImagePropertiesSettings" "Amazonka.Rekognition.Types.DetectLabelsImagePropertiesSettings" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "DetectLabelsImagePropertiesSettings'" 'PrefixI 'True) (S1 ('MetaSel ('Just "maxDominantColors") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Natural)))) |
newDetectLabelsImagePropertiesSettings :: DetectLabelsImagePropertiesSettings Source #
Create a value of DetectLabelsImagePropertiesSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:maxDominantColors:DetectLabelsImagePropertiesSettings'
, detectLabelsImagePropertiesSettings_maxDominantColors
- The maximum number of dominant colors to return when detecting labels in
an image. The default value is 10.
detectLabelsImagePropertiesSettings_maxDominantColors :: Lens' DetectLabelsImagePropertiesSettings (Maybe Natural) Source #
The maximum number of dominant colors to return when detecting labels in an image. The default value is 10.
DetectLabelsImageQuality
data DetectLabelsImageQuality Source #
The quality of an image provided for label detection, with regard to brightness, sharpness, and contrast.
See: newDetectLabelsImageQuality
smart constructor.
Instances
newDetectLabelsImageQuality :: DetectLabelsImageQuality Source #
Create a value of DetectLabelsImageQuality
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:brightness:DetectLabelsImageQuality'
, detectLabelsImageQuality_brightness
- The brightness of an image provided for label detection.
$sel:contrast:DetectLabelsImageQuality'
, detectLabelsImageQuality_contrast
- The contrast of an image provided for label detection.
$sel:sharpness:DetectLabelsImageQuality'
, detectLabelsImageQuality_sharpness
- The sharpness of an image provided for label detection.
detectLabelsImageQuality_brightness :: Lens' DetectLabelsImageQuality (Maybe Double) Source #
The brightness of an image provided for label detection.
detectLabelsImageQuality_contrast :: Lens' DetectLabelsImageQuality (Maybe Double) Source #
The contrast of an image provided for label detection.
detectLabelsImageQuality_sharpness :: Lens' DetectLabelsImageQuality (Maybe Double) Source #
The sharpness of an image provided for label detection.
DetectLabelsSettings
data DetectLabelsSettings Source #
Settings for the DetectLabels request. Settings can include filters for both GENERAL_LABELS and IMAGE_PROPERTIES. GENERAL_LABELS filters can be inclusive or exclusive and applied to individual labels or label categories. IMAGE_PROPERTIES filters allow specification of a maximum number of dominant colors.
See: newDetectLabelsSettings
smart constructor.
DetectLabelsSettings' | |
|
Instances
newDetectLabelsSettings :: DetectLabelsSettings Source #
Create a value of DetectLabelsSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:generalLabels:DetectLabelsSettings'
, detectLabelsSettings_generalLabels
- Contains the specified filters for GENERAL_LABELS.
$sel:imageProperties:DetectLabelsSettings'
, detectLabelsSettings_imageProperties
- Contains the chosen number of maximum dominant colors in an image.
detectLabelsSettings_generalLabels :: Lens' DetectLabelsSettings (Maybe GeneralLabelsSettings) Source #
Contains the specified filters for GENERAL_LABELS.
detectLabelsSettings_imageProperties :: Lens' DetectLabelsSettings (Maybe DetectLabelsImagePropertiesSettings) Source #
Contains the chosen number of maximum dominant colors in an image.
DetectTextFilters
data DetectTextFilters Source #
A set of optional parameters that you can use to set the criteria that
the text must meet to be included in your response. WordFilter
looks
at a word’s height, width, and minimum confidence. RegionOfInterest
lets you set a specific region of the image to look for text in.
See: newDetectTextFilters
smart constructor.
DetectTextFilters' | |
|
Instances
newDetectTextFilters :: DetectTextFilters Source #
Create a value of DetectTextFilters
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:regionsOfInterest:DetectTextFilters'
, detectTextFilters_regionsOfInterest
- A Filter focusing on a certain area of the image. Uses a BoundingBox
object to set the region of the image.
$sel:wordFilter:DetectTextFilters'
, detectTextFilters_wordFilter
- Undocumented member.
detectTextFilters_regionsOfInterest :: Lens' DetectTextFilters (Maybe [RegionOfInterest]) Source #
A Filter focusing on a certain area of the image. Uses a BoundingBox
object to set the region of the image.
detectTextFilters_wordFilter :: Lens' DetectTextFilters (Maybe DetectionFilter) Source #
Undocumented member.
DetectionFilter
data DetectionFilter Source #
A set of parameters that allow you to filter out certain results from your returned results.
See: newDetectionFilter
smart constructor.
DetectionFilter' | |
|
Instances
newDetectionFilter :: DetectionFilter Source #
Create a value of DetectionFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:minBoundingBoxHeight:DetectionFilter'
, detectionFilter_minBoundingBoxHeight
- Sets the minimum height of the word bounding box. Words with bounding
box heights lesser than this value will be excluded from the result.
Value is relative to the video frame height.
$sel:minBoundingBoxWidth:DetectionFilter'
, detectionFilter_minBoundingBoxWidth
- Sets the minimum width of the word bounding box. Words with bounding
boxes widths lesser than this value will be excluded from the result.
Value is relative to the video frame width.
$sel:minConfidence:DetectionFilter'
, detectionFilter_minConfidence
- Sets the confidence of word detection. Words with detection confidence
below this will be excluded from the result. Values should be between 0
and 100. The default MinConfidence is 80.
detectionFilter_minBoundingBoxHeight :: Lens' DetectionFilter (Maybe Double) Source #
Sets the minimum height of the word bounding box. Words with bounding box heights lesser than this value will be excluded from the result. Value is relative to the video frame height.
detectionFilter_minBoundingBoxWidth :: Lens' DetectionFilter (Maybe Double) Source #
Sets the minimum width of the word bounding box. Words with bounding boxes widths lesser than this value will be excluded from the result. Value is relative to the video frame width.
detectionFilter_minConfidence :: Lens' DetectionFilter (Maybe Double) Source #
Sets the confidence of word detection. Words with detection confidence below this will be excluded from the result. Values should be between 0 and 100. The default MinConfidence is 80.
DistributeDataset
data DistributeDataset Source #
A training dataset or a test dataset used in a dataset distribution operation. For more information, see DistributeDatasetEntries.
See: newDistributeDataset
smart constructor.
Instances
Create a value of DistributeDataset
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:arn:DistributeDataset'
, distributeDataset_arn
- The Amazon Resource Name (ARN) of the dataset that you want to use.
distributeDataset_arn :: Lens' DistributeDataset Text Source #
The Amazon Resource Name (ARN) of the dataset that you want to use.
DominantColor
data DominantColor Source #
A description of the dominant colors in an image.
See: newDominantColor
smart constructor.
DominantColor' | |
|
Instances
newDominantColor :: DominantColor Source #
Create a value of DominantColor
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:blue:DominantColor'
, dominantColor_blue
- The Blue RGB value for a dominant color.
$sel:cSSColor:DominantColor'
, dominantColor_cSSColor
- The CSS color name of a dominant color.
$sel:green:DominantColor'
, dominantColor_green
- The Green RGB value for a dominant color.
$sel:hexCode:DominantColor'
, dominantColor_hexCode
- The Hex code equivalent of the RGB values for a dominant color.
$sel:pixelPercent:DominantColor'
, dominantColor_pixelPercent
- The percentage of image pixels that have a given dominant color.
$sel:red:DominantColor'
, dominantColor_red
- The Red RGB value for a dominant color.
$sel:simplifiedColor:DominantColor'
, dominantColor_simplifiedColor
- One of 12 simplified color names applied to a dominant color.
dominantColor_blue :: Lens' DominantColor (Maybe Natural) Source #
The Blue RGB value for a dominant color.
dominantColor_cSSColor :: Lens' DominantColor (Maybe Text) Source #
The CSS color name of a dominant color.
dominantColor_green :: Lens' DominantColor (Maybe Natural) Source #
The Green RGB value for a dominant color.
dominantColor_hexCode :: Lens' DominantColor (Maybe Text) Source #
The Hex code equivalent of the RGB values for a dominant color.
dominantColor_pixelPercent :: Lens' DominantColor (Maybe Double) Source #
The percentage of image pixels that have a given dominant color.
dominantColor_red :: Lens' DominantColor (Maybe Natural) Source #
The Red RGB value for a dominant color.
dominantColor_simplifiedColor :: Lens' DominantColor (Maybe Text) Source #
One of 12 simplified color names applied to a dominant color.
Emotion
The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.
See: newEmotion
smart constructor.
Emotion' | |
|
Instances
FromJSON Emotion Source # | |
Generic Emotion Source # | |
Read Emotion Source # | |
Show Emotion Source # | |
NFData Emotion Source # | |
Defined in Amazonka.Rekognition.Types.Emotion | |
Eq Emotion Source # | |
Hashable Emotion Source # | |
Defined in Amazonka.Rekognition.Types.Emotion | |
type Rep Emotion Source # | |
Defined in Amazonka.Rekognition.Types.Emotion type Rep Emotion = D1 ('MetaData "Emotion" "Amazonka.Rekognition.Types.Emotion" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Emotion'" 'PrefixI 'True) (S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "type'") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe EmotionName)))) |
newEmotion :: Emotion Source #
Create a value of Emotion
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:Emotion'
, emotion_confidence
- Level of confidence in the determination.
$sel:type':Emotion'
, emotion_type
- Type of emotion detected.
emotion_confidence :: Lens' Emotion (Maybe Double) Source #
Level of confidence in the determination.
emotion_type :: Lens' Emotion (Maybe EmotionName) Source #
Type of emotion detected.
EquipmentDetection
data EquipmentDetection Source #
Information about an item of Personal Protective Equipment (PPE) detected by DetectProtectiveEquipment. For more information, see DetectProtectiveEquipment.
See: newEquipmentDetection
smart constructor.
EquipmentDetection' | |
|
Instances
newEquipmentDetection :: EquipmentDetection Source #
Create a value of EquipmentDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:EquipmentDetection'
, equipmentDetection_boundingBox
- A bounding box surrounding the item of detected PPE.
EquipmentDetection
, equipmentDetection_confidence
- The confidence that Amazon Rekognition has that the bounding box
(BoundingBox
) contains an item of PPE.
$sel:coversBodyPart:EquipmentDetection'
, equipmentDetection_coversBodyPart
- Information about the body part covered by the detected PPE.
$sel:type':EquipmentDetection'
, equipmentDetection_type
- The type of detected PPE.
equipmentDetection_boundingBox :: Lens' EquipmentDetection (Maybe BoundingBox) Source #
A bounding box surrounding the item of detected PPE.
equipmentDetection_confidence :: Lens' EquipmentDetection (Maybe Double) Source #
The confidence that Amazon Rekognition has that the bounding box
(BoundingBox
) contains an item of PPE.
equipmentDetection_coversBodyPart :: Lens' EquipmentDetection (Maybe CoversBodyPart) Source #
Information about the body part covered by the detected PPE.
equipmentDetection_type :: Lens' EquipmentDetection (Maybe ProtectiveEquipmentType) Source #
The type of detected PPE.
EvaluationResult
data EvaluationResult Source #
The evaluation results for the training of a model.
See: newEvaluationResult
smart constructor.
EvaluationResult' | |
|
Instances
newEvaluationResult :: EvaluationResult Source #
Create a value of EvaluationResult
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:f1Score:EvaluationResult'
, evaluationResult_f1Score
- The F1 score for the evaluation of all labels. The F1 score metric
evaluates the overall precision and recall performance of the model as a
single value. A higher value indicates better precision and recall
performance. A lower score indicates that precision, recall, or both are
performing poorly.
$sel:summary:EvaluationResult'
, evaluationResult_summary
- The S3 bucket that contains the training summary.
evaluationResult_f1Score :: Lens' EvaluationResult (Maybe Double) Source #
The F1 score for the evaluation of all labels. The F1 score metric evaluates the overall precision and recall performance of the model as a single value. A higher value indicates better precision and recall performance. A lower score indicates that precision, recall, or both are performing poorly.
evaluationResult_summary :: Lens' EvaluationResult (Maybe Summary) Source #
The S3 bucket that contains the training summary.
EyeOpen
Indicates whether or not the eyes on the face are open, and the confidence level in the determination.
See: newEyeOpen
smart constructor.
Instances
FromJSON EyeOpen Source # | |
Generic EyeOpen Source # | |
Read EyeOpen Source # | |
Show EyeOpen Source # | |
NFData EyeOpen Source # | |
Defined in Amazonka.Rekognition.Types.EyeOpen | |
Eq EyeOpen Source # | |
Hashable EyeOpen Source # | |
Defined in Amazonka.Rekognition.Types.EyeOpen | |
type Rep EyeOpen Source # | |
Defined in Amazonka.Rekognition.Types.EyeOpen type Rep EyeOpen = D1 ('MetaData "EyeOpen" "Amazonka.Rekognition.Types.EyeOpen" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "EyeOpen'" 'PrefixI 'True) (S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)))) |
newEyeOpen :: EyeOpen Source #
Create a value of EyeOpen
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:EyeOpen'
, eyeOpen_confidence
- Level of confidence in the determination.
$sel:value:EyeOpen'
, eyeOpen_value
- Boolean value that indicates whether the eyes on the face are open.
eyeOpen_confidence :: Lens' EyeOpen (Maybe Double) Source #
Level of confidence in the determination.
eyeOpen_value :: Lens' EyeOpen (Maybe Bool) Source #
Boolean value that indicates whether the eyes on the face are open.
Eyeglasses
data Eyeglasses Source #
Indicates whether or not the face is wearing eye glasses, and the confidence level in the determination.
See: newEyeglasses
smart constructor.
Instances
newEyeglasses :: Eyeglasses Source #
Create a value of Eyeglasses
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:Eyeglasses'
, eyeglasses_confidence
- Level of confidence in the determination.
$sel:value:Eyeglasses'
, eyeglasses_value
- Boolean value that indicates whether the face is wearing eye glasses or
not.
eyeglasses_confidence :: Lens' Eyeglasses (Maybe Double) Source #
Level of confidence in the determination.
eyeglasses_value :: Lens' Eyeglasses (Maybe Bool) Source #
Boolean value that indicates whether the face is wearing eye glasses or not.
Face
Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.
See: newFace
smart constructor.
Face' | |
|
Instances
Create a value of Face
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:Face'
, face_boundingBox
- Bounding box of the face.
$sel:confidence:Face'
, face_confidence
- Confidence level that the bounding box contains a face (and not a
different object such as a tree).
$sel:externalImageId:Face'
, face_externalImageId
- Identifier that you assign to all the faces in the input image.
$sel:faceId:Face'
, face_faceId
- Unique identifier that Amazon Rekognition assigns to the face.
$sel:imageId:Face'
, face_imageId
- Unique identifier that Amazon Rekognition assigns to the input image.
$sel:indexFacesModelVersion:Face'
, face_indexFacesModelVersion
- The version of the face detect and storage model that was used when
indexing the face vector.
face_boundingBox :: Lens' Face (Maybe BoundingBox) Source #
Bounding box of the face.
face_confidence :: Lens' Face (Maybe Double) Source #
Confidence level that the bounding box contains a face (and not a different object such as a tree).
face_externalImageId :: Lens' Face (Maybe Text) Source #
Identifier that you assign to all the faces in the input image.
face_faceId :: Lens' Face (Maybe Text) Source #
Unique identifier that Amazon Rekognition assigns to the face.
face_imageId :: Lens' Face (Maybe Text) Source #
Unique identifier that Amazon Rekognition assigns to the input image.
face_indexFacesModelVersion :: Lens' Face (Maybe Text) Source #
The version of the face detect and storage model that was used when indexing the face vector.
FaceDetail
data FaceDetail Source #
Structure containing attributes of the face that the algorithm detected.
A FaceDetail
object contains either the default facial attributes or
all facial attributes. The default attributes are BoundingBox
,
Confidence
, Landmarks
, Pose
, and Quality
.
GetFaceDetection is the only Amazon Rekognition Video stored video
operation that can return a FaceDetail
object with all attributes. To
specify which attributes to return, use the FaceAttributes
input
parameter for StartFaceDetection. The following Amazon Rekognition Video
operations return only the default attributes. The corresponding Start
operations don't have a FaceAttributes
input parameter:
- GetCelebrityRecognition
- GetPersonTracking
- GetFaceSearch
The Amazon Rekognition Image DetectFaces and IndexFaces operations can
return all facial attributes. To specify which attributes to return, use
the Attributes
input parameter for DetectFaces
. For IndexFaces
,
use the DetectAttributes
input parameter.
See: newFaceDetail
smart constructor.
FaceDetail' | |
|
Instances
newFaceDetail :: FaceDetail Source #
Create a value of FaceDetail
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:ageRange:FaceDetail'
, faceDetail_ageRange
- The estimated age range, in years, for the face. Low represents the
lowest estimated age and High represents the highest estimated age.
$sel:beard:FaceDetail'
, faceDetail_beard
- Indicates whether or not the face has a beard, and the confidence level
in the determination.
$sel:boundingBox:FaceDetail'
, faceDetail_boundingBox
- Bounding box of the face. Default attribute.
FaceDetail
, faceDetail_confidence
- Confidence level that the bounding box contains a face (and not a
different object such as a tree). Default attribute.
$sel:emotions:FaceDetail'
, faceDetail_emotions
- The emotions that appear to be expressed on the face, and the confidence
level in the determination. The API is only making a determination of
the physical appearance of a person's face. It is not a determination
of the person’s internal emotional state and should not be used in such
a way. For example, a person pretending to have a sad face might not be
sad emotionally.
$sel:eyeglasses:FaceDetail'
, faceDetail_eyeglasses
- Indicates whether or not the face is wearing eye glasses, and the
confidence level in the determination.
$sel:eyesOpen:FaceDetail'
, faceDetail_eyesOpen
- Indicates whether or not the eyes on the face are open, and the
confidence level in the determination.
$sel:gender:FaceDetail'
, faceDetail_gender
- The predicted gender of a detected face.
$sel:landmarks:FaceDetail'
, faceDetail_landmarks
- Indicates the location of landmarks on the face. Default attribute.
$sel:mouthOpen:FaceDetail'
, faceDetail_mouthOpen
- Indicates whether or not the mouth on the face is open, and the
confidence level in the determination.
$sel:mustache:FaceDetail'
, faceDetail_mustache
- Indicates whether or not the face has a mustache, and the confidence
level in the determination.
$sel:pose:FaceDetail'
, faceDetail_pose
- Indicates the pose of the face as determined by its pitch, roll, and
yaw. Default attribute.
$sel:quality:FaceDetail'
, faceDetail_quality
- Identifies image brightness and sharpness. Default attribute.
$sel:smile:FaceDetail'
, faceDetail_smile
- Indicates whether or not the face is smiling, and the confidence level
in the determination.
$sel:sunglasses:FaceDetail'
, faceDetail_sunglasses
- Indicates whether or not the face is wearing sunglasses, and the
confidence level in the determination.
faceDetail_ageRange :: Lens' FaceDetail (Maybe AgeRange) Source #
The estimated age range, in years, for the face. Low represents the lowest estimated age and High represents the highest estimated age.
faceDetail_beard :: Lens' FaceDetail (Maybe Beard) Source #
Indicates whether or not the face has a beard, and the confidence level in the determination.
faceDetail_boundingBox :: Lens' FaceDetail (Maybe BoundingBox) Source #
Bounding box of the face. Default attribute.
faceDetail_confidence :: Lens' FaceDetail (Maybe Double) Source #
Confidence level that the bounding box contains a face (and not a different object such as a tree). Default attribute.
faceDetail_emotions :: Lens' FaceDetail (Maybe [Emotion]) Source #
The emotions that appear to be expressed on the face, and the confidence level in the determination. The API is only making a determination of the physical appearance of a person's face. It is not a determination of the person’s internal emotional state and should not be used in such a way. For example, a person pretending to have a sad face might not be sad emotionally.
faceDetail_eyeglasses :: Lens' FaceDetail (Maybe Eyeglasses) Source #
Indicates whether or not the face is wearing eye glasses, and the confidence level in the determination.
faceDetail_eyesOpen :: Lens' FaceDetail (Maybe EyeOpen) Source #
Indicates whether or not the eyes on the face are open, and the confidence level in the determination.
faceDetail_gender :: Lens' FaceDetail (Maybe Gender) Source #
The predicted gender of a detected face.
faceDetail_landmarks :: Lens' FaceDetail (Maybe [Landmark]) Source #
Indicates the location of landmarks on the face. Default attribute.
faceDetail_mouthOpen :: Lens' FaceDetail (Maybe MouthOpen) Source #
Indicates whether or not the mouth on the face is open, and the confidence level in the determination.
faceDetail_mustache :: Lens' FaceDetail (Maybe Mustache) Source #
Indicates whether or not the face has a mustache, and the confidence level in the determination.
faceDetail_pose :: Lens' FaceDetail (Maybe Pose) Source #
Indicates the pose of the face as determined by its pitch, roll, and yaw. Default attribute.
faceDetail_quality :: Lens' FaceDetail (Maybe ImageQuality) Source #
Identifies image brightness and sharpness. Default attribute.
faceDetail_smile :: Lens' FaceDetail (Maybe Smile) Source #
Indicates whether or not the face is smiling, and the confidence level in the determination.
faceDetail_sunglasses :: Lens' FaceDetail (Maybe Sunglasses) Source #
Indicates whether or not the face is wearing sunglasses, and the confidence level in the determination.
FaceDetection
data FaceDetection Source #
Information about a face detected in a video analysis request and the time the face was detected in the video.
See: newFaceDetection
smart constructor.
Instances
newFaceDetection :: FaceDetection Source #
Create a value of FaceDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:face:FaceDetection'
, faceDetection_face
- The face properties for the detected face.
$sel:timestamp:FaceDetection'
, faceDetection_timestamp
- Time, in milliseconds from the start of the video, that the face was
detected. Note that Timestamp
is not guaranteed to be accurate to the
individual frame where the face first appears.
faceDetection_face :: Lens' FaceDetection (Maybe FaceDetail) Source #
The face properties for the detected face.
faceDetection_timestamp :: Lens' FaceDetection (Maybe Integer) Source #
Time, in milliseconds from the start of the video, that the face was
detected. Note that Timestamp
is not guaranteed to be accurate to the
individual frame where the face first appears.
FaceMatch
Provides face metadata. In addition, it also provides the confidence in the match of this face with the input face.
See: newFaceMatch
smart constructor.
Instances
FromJSON FaceMatch Source # | |
Generic FaceMatch Source # | |
Read FaceMatch Source # | |
Show FaceMatch Source # | |
NFData FaceMatch Source # | |
Defined in Amazonka.Rekognition.Types.FaceMatch | |
Eq FaceMatch Source # | |
Hashable FaceMatch Source # | |
Defined in Amazonka.Rekognition.Types.FaceMatch | |
type Rep FaceMatch Source # | |
Defined in Amazonka.Rekognition.Types.FaceMatch type Rep FaceMatch = D1 ('MetaData "FaceMatch" "Amazonka.Rekognition.Types.FaceMatch" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "FaceMatch'" 'PrefixI 'True) (S1 ('MetaSel ('Just "face") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Face)) :*: S1 ('MetaSel ('Just "similarity") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
newFaceMatch :: FaceMatch Source #
Create a value of FaceMatch
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:face:FaceMatch'
, faceMatch_face
- Describes the face properties such as the bounding box, face ID, image
ID of the source image, and external image ID that you assigned.
$sel:similarity:FaceMatch'
, faceMatch_similarity
- Confidence in the match of this face with the input face.
faceMatch_face :: Lens' FaceMatch (Maybe Face) Source #
Describes the face properties such as the bounding box, face ID, image ID of the source image, and external image ID that you assigned.
faceMatch_similarity :: Lens' FaceMatch (Maybe Double) Source #
Confidence in the match of this face with the input face.
FaceRecord
data FaceRecord Source #
Object containing both the face metadata (stored in the backend database), and facial attributes that are detected but aren't stored in the database.
See: newFaceRecord
smart constructor.
FaceRecord' | |
|
Instances
newFaceRecord :: FaceRecord Source #
Create a value of FaceRecord
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:face:FaceRecord'
, faceRecord_face
- Describes the face properties such as the bounding box, face ID, image
ID of the input image, and external image ID that you assigned.
$sel:faceDetail:FaceRecord'
, faceRecord_faceDetail
- Structure containing attributes of the face that the algorithm detected.
faceRecord_face :: Lens' FaceRecord (Maybe Face) Source #
Describes the face properties such as the bounding box, face ID, image ID of the input image, and external image ID that you assigned.
faceRecord_faceDetail :: Lens' FaceRecord (Maybe FaceDetail) Source #
Structure containing attributes of the face that the algorithm detected.
FaceSearchSettings
data FaceSearchSettings Source #
Input face recognition parameters for an Amazon Rekognition stream processor. Includes the collection to use for face recognition and the face attributes to detect. Defining the settings is required in the request parameter for CreateStreamProcessor.
See: newFaceSearchSettings
smart constructor.
FaceSearchSettings' | |
|
Instances
newFaceSearchSettings :: FaceSearchSettings Source #
Create a value of FaceSearchSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:collectionId:FaceSearchSettings'
, faceSearchSettings_collectionId
- The ID of a collection that contains faces that you want to search for.
$sel:faceMatchThreshold:FaceSearchSettings'
, faceSearchSettings_faceMatchThreshold
- Minimum face match confidence score that must be met to return a result
for a recognized face. The default is 80. 0 is the lowest confidence.
100 is the highest confidence. Values between 0 and 100 are accepted,
and values lower than 80 are set to 80.
faceSearchSettings_collectionId :: Lens' FaceSearchSettings (Maybe Text) Source #
The ID of a collection that contains faces that you want to search for.
faceSearchSettings_faceMatchThreshold :: Lens' FaceSearchSettings (Maybe Double) Source #
Minimum face match confidence score that must be met to return a result for a recognized face. The default is 80. 0 is the lowest confidence. 100 is the highest confidence. Values between 0 and 100 are accepted, and values lower than 80 are set to 80.
Gender
The predicted gender of a detected face.
Amazon Rekognition makes gender binary (male/female) predictions based on the physical appearance of a face in a particular image. This kind of prediction is not designed to categorize a person’s gender identity, and you shouldn't use Amazon Rekognition to make such a determination. For example, a male actor wearing a long-haired wig and earrings for a role might be predicted as female.
Using Amazon Rekognition to make gender binary predictions is best suited for use cases where aggregate gender distribution statistics need to be analyzed without identifying specific users. For example, the percentage of female users compared to male users on a social media platform.
We don't recommend using gender binary predictions to make decisions that impact an individual's rights, privacy, or access to services.
See: newGender
smart constructor.
Gender' | |
|
Instances
FromJSON Gender Source # | |
Generic Gender Source # | |
Read Gender Source # | |
Show Gender Source # | |
NFData Gender Source # | |
Defined in Amazonka.Rekognition.Types.Gender | |
Eq Gender Source # | |
Hashable Gender Source # | |
Defined in Amazonka.Rekognition.Types.Gender | |
type Rep Gender Source # | |
Defined in Amazonka.Rekognition.Types.Gender type Rep Gender = D1 ('MetaData "Gender" "Amazonka.Rekognition.Types.Gender" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Gender'" 'PrefixI 'True) (S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe GenderType)))) |
Create a value of Gender
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:Gender'
, gender_confidence
- Level of confidence in the prediction.
$sel:value:Gender'
, gender_value
- The predicted gender of the face.
gender_value :: Lens' Gender (Maybe GenderType) Source #
The predicted gender of the face.
GeneralLabelsSettings
data GeneralLabelsSettings Source #
Contains filters for the object labels returned by DetectLabels. Filters can be inclusive, exclusive, or a combination of both and can be applied to individual l abels or entire label categories.
See: newGeneralLabelsSettings
smart constructor.
GeneralLabelsSettings' | |
|
Instances
newGeneralLabelsSettings :: GeneralLabelsSettings Source #
Create a value of GeneralLabelsSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:labelCategoryExclusionFilters:GeneralLabelsSettings'
, generalLabelsSettings_labelCategoryExclusionFilters
- The label categories that should be excluded from the return from
DetectLabels.
$sel:labelCategoryInclusionFilters:GeneralLabelsSettings'
, generalLabelsSettings_labelCategoryInclusionFilters
- The label categories that should be included in the return from
DetectLabels.
$sel:labelExclusionFilters:GeneralLabelsSettings'
, generalLabelsSettings_labelExclusionFilters
- The labels that should be excluded from the return from DetectLabels.
$sel:labelInclusionFilters:GeneralLabelsSettings'
, generalLabelsSettings_labelInclusionFilters
- The labels that should be included in the return from DetectLabels.
generalLabelsSettings_labelCategoryExclusionFilters :: Lens' GeneralLabelsSettings (Maybe [Text]) Source #
The label categories that should be excluded from the return from DetectLabels.
generalLabelsSettings_labelCategoryInclusionFilters :: Lens' GeneralLabelsSettings (Maybe [Text]) Source #
The label categories that should be included in the return from DetectLabels.
generalLabelsSettings_labelExclusionFilters :: Lens' GeneralLabelsSettings (Maybe [Text]) Source #
The labels that should be excluded from the return from DetectLabels.
generalLabelsSettings_labelInclusionFilters :: Lens' GeneralLabelsSettings (Maybe [Text]) Source #
The labels that should be included in the return from DetectLabels.
Geometry
Information about where an object (DetectCustomLabels) or text (DetectText) is located on an image.
See: newGeometry
smart constructor.
Geometry' | |
|
Instances
FromJSON Geometry Source # | |
Generic Geometry Source # | |
Read Geometry Source # | |
Show Geometry Source # | |
NFData Geometry Source # | |
Defined in Amazonka.Rekognition.Types.Geometry | |
Eq Geometry Source # | |
Hashable Geometry Source # | |
Defined in Amazonka.Rekognition.Types.Geometry | |
type Rep Geometry Source # | |
Defined in Amazonka.Rekognition.Types.Geometry type Rep Geometry = D1 ('MetaData "Geometry" "Amazonka.Rekognition.Types.Geometry" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Geometry'" 'PrefixI 'True) (S1 ('MetaSel ('Just "boundingBox") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe BoundingBox)) :*: S1 ('MetaSel ('Just "polygon") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe [Point])))) |
newGeometry :: Geometry Source #
Create a value of Geometry
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:Geometry'
, geometry_boundingBox
- An axis-aligned coarse representation of the detected item's location
on the image.
$sel:polygon:Geometry'
, geometry_polygon
- Within the bounding box, a fine-grained polygon around the detected
item.
geometry_boundingBox :: Lens' Geometry (Maybe BoundingBox) Source #
An axis-aligned coarse representation of the detected item's location on the image.
geometry_polygon :: Lens' Geometry (Maybe [Point]) Source #
Within the bounding box, a fine-grained polygon around the detected item.
GroundTruthManifest
data GroundTruthManifest Source #
The S3 bucket that contains an Amazon Sagemaker Ground Truth format manifest file.
See: newGroundTruthManifest
smart constructor.
Instances
newGroundTruthManifest :: GroundTruthManifest Source #
Create a value of GroundTruthManifest
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:s3Object:GroundTruthManifest'
, groundTruthManifest_s3Object
- Undocumented member.
groundTruthManifest_s3Object :: Lens' GroundTruthManifest (Maybe S3Object) Source #
Undocumented member.
HumanLoopActivationOutput
data HumanLoopActivationOutput Source #
Shows the results of the human in the loop evaluation. If there is no HumanLoopArn, the input did not trigger human review.
See: newHumanLoopActivationOutput
smart constructor.
HumanLoopActivationOutput' | |
|
Instances
newHumanLoopActivationOutput :: HumanLoopActivationOutput Source #
Create a value of HumanLoopActivationOutput
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:humanLoopActivationConditionsEvaluationResults:HumanLoopActivationOutput'
, humanLoopActivationOutput_humanLoopActivationConditionsEvaluationResults
- Shows the result of condition evaluations, including those conditions
which activated a human review.
$sel:humanLoopActivationReasons:HumanLoopActivationOutput'
, humanLoopActivationOutput_humanLoopActivationReasons
- Shows if and why human review was needed.
$sel:humanLoopArn:HumanLoopActivationOutput'
, humanLoopActivationOutput_humanLoopArn
- The Amazon Resource Name (ARN) of the HumanLoop created.
humanLoopActivationOutput_humanLoopActivationConditionsEvaluationResults :: Lens' HumanLoopActivationOutput (Maybe Text) Source #
Shows the result of condition evaluations, including those conditions which activated a human review.
humanLoopActivationOutput_humanLoopActivationReasons :: Lens' HumanLoopActivationOutput (Maybe (NonEmpty Text)) Source #
Shows if and why human review was needed.
humanLoopActivationOutput_humanLoopArn :: Lens' HumanLoopActivationOutput (Maybe Text) Source #
The Amazon Resource Name (ARN) of the HumanLoop created.
HumanLoopConfig
data HumanLoopConfig Source #
Sets up the flow definition the image will be sent to if one of the conditions is met. You can also set certain attributes of the image before review.
See: newHumanLoopConfig
smart constructor.
HumanLoopConfig' | |
|
Instances
:: Text | |
-> Text | |
-> HumanLoopConfig |
Create a value of HumanLoopConfig
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:dataAttributes:HumanLoopConfig'
, humanLoopConfig_dataAttributes
- Sets attributes of the input data.
$sel:humanLoopName:HumanLoopConfig'
, humanLoopConfig_humanLoopName
- The name of the human review used for this image. This should be kept
unique within a region.
$sel:flowDefinitionArn:HumanLoopConfig'
, humanLoopConfig_flowDefinitionArn
- The Amazon Resource Name (ARN) of the flow definition. You can create a
flow definition by using the Amazon Sagemaker
CreateFlowDefinition
Operation.
humanLoopConfig_dataAttributes :: Lens' HumanLoopConfig (Maybe HumanLoopDataAttributes) Source #
Sets attributes of the input data.
humanLoopConfig_humanLoopName :: Lens' HumanLoopConfig Text Source #
The name of the human review used for this image. This should be kept unique within a region.
humanLoopConfig_flowDefinitionArn :: Lens' HumanLoopConfig Text Source #
The Amazon Resource Name (ARN) of the flow definition. You can create a flow definition by using the Amazon Sagemaker CreateFlowDefinition Operation.
HumanLoopDataAttributes
data HumanLoopDataAttributes Source #
Allows you to set attributes of the image. Currently, you can declare an image as free of personally identifiable information.
See: newHumanLoopDataAttributes
smart constructor.
HumanLoopDataAttributes' | |
|
Instances
newHumanLoopDataAttributes :: HumanLoopDataAttributes Source #
Create a value of HumanLoopDataAttributes
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:contentClassifiers:HumanLoopDataAttributes'
, humanLoopDataAttributes_contentClassifiers
- Sets whether the input image is free of personally identifiable
information.
humanLoopDataAttributes_contentClassifiers :: Lens' HumanLoopDataAttributes (Maybe [ContentClassifier]) Source #
Sets whether the input image is free of personally identifiable information.
Image
Provides the input image either as bytes or an S3 object.
You pass image bytes to an Amazon Rekognition API operation by using the
Bytes
property. For example, you would use the Bytes
property to
pass an image loaded from a local file system. Image bytes passed by
using the Bytes
property must be base64-encoded. Your code may not
need to encode image bytes if you are using an AWS SDK to call Amazon
Rekognition API operations.
For more information, see Analyzing an Image Loaded from a Local File System in the Amazon Rekognition Developer Guide.
You pass images stored in an S3 bucket to an Amazon Rekognition API
operation by using the S3Object
property. Images stored in an S3
bucket do not need to be base64-encoded.
The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.
If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and then call the operation using the S3Object property.
For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see How Amazon Rekognition works with IAM in the Amazon Rekognition Developer Guide.
See: newImage
smart constructor.
Instances
ToJSON Image Source # | |
Defined in Amazonka.Rekognition.Types.Image | |
Generic Image Source # | |
Read Image Source # | |
Show Image Source # | |
NFData Image Source # | |
Defined in Amazonka.Rekognition.Types.Image | |
Eq Image Source # | |
Hashable Image Source # | |
Defined in Amazonka.Rekognition.Types.Image | |
type Rep Image Source # | |
Defined in Amazonka.Rekognition.Types.Image type Rep Image = D1 ('MetaData "Image" "Amazonka.Rekognition.Types.Image" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Image'" 'PrefixI 'True) (S1 ('MetaSel ('Just "bytes") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Base64)) :*: S1 ('MetaSel ('Just "s3Object") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe S3Object)))) |
Create a value of Image
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:bytes:Image'
, image_bytes
- Blob of image bytes up to 5 MBs.--
-- Note: This Lens
automatically encodes and decodes Base64 data.
-- The underlying isomorphism will encode to Base64 representation during
-- serialisation, and decode from Base64 representation during deserialisation.
-- This Lens
accepts and returns only raw unencoded data.
$sel:s3Object:Image'
, image_s3Object
- Identifies an S3 object as the image source.
image_bytes :: Lens' Image (Maybe ByteString) Source #
Blob of image bytes up to 5 MBs.--
-- Note: This Lens
automatically encodes and decodes Base64 data.
-- The underlying isomorphism will encode to Base64 representation during
-- serialisation, and decode from Base64 representation during deserialisation.
-- This Lens
accepts and returns only raw unencoded data.
ImageQuality
data ImageQuality Source #
Identifies face image brightness and sharpness.
See: newImageQuality
smart constructor.
ImageQuality' | |
|
Instances
newImageQuality :: ImageQuality Source #
Create a value of ImageQuality
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:brightness:ImageQuality'
, imageQuality_brightness
- Value representing brightness of the face. The service returns a value
between 0 and 100 (inclusive). A higher value indicates a brighter face
image.
$sel:sharpness:ImageQuality'
, imageQuality_sharpness
- Value representing sharpness of the face. The service returns a value
between 0 and 100 (inclusive). A higher value indicates a sharper face
image.
imageQuality_brightness :: Lens' ImageQuality (Maybe Double) Source #
Value representing brightness of the face. The service returns a value between 0 and 100 (inclusive). A higher value indicates a brighter face image.
imageQuality_sharpness :: Lens' ImageQuality (Maybe Double) Source #
Value representing sharpness of the face. The service returns a value between 0 and 100 (inclusive). A higher value indicates a sharper face image.
Instance
An instance of a label returned by Amazon Rekognition Image (DetectLabels) or by Amazon Rekognition Video (GetLabelDetection).
See: newInstance
smart constructor.
Instance' | |
|
Instances
FromJSON Instance Source # | |
Generic Instance Source # | |
Read Instance Source # | |
Show Instance Source # | |
NFData Instance Source # | |
Defined in Amazonka.Rekognition.Types.Instance | |
Eq Instance Source # | |
Hashable Instance Source # | |
Defined in Amazonka.Rekognition.Types.Instance | |
type Rep Instance Source # | |
Defined in Amazonka.Rekognition.Types.Instance type Rep Instance = D1 ('MetaData "Instance" "Amazonka.Rekognition.Types.Instance" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Instance'" 'PrefixI 'True) (S1 ('MetaSel ('Just "boundingBox") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe BoundingBox)) :*: (S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "dominantColors") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe [DominantColor]))))) |
newInstance :: Instance Source #
Create a value of Instance
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:Instance'
, instance_boundingBox
- The position of the label instance on the image.
$sel:confidence:Instance'
, instance_confidence
- The confidence that Amazon Rekognition has in the accuracy of the
bounding box.
$sel:dominantColors:Instance'
, instance_dominantColors
- The dominant colors found in an individual instance of a label.
instance_boundingBox :: Lens' Instance (Maybe BoundingBox) Source #
The position of the label instance on the image.
instance_confidence :: Lens' Instance (Maybe Double) Source #
The confidence that Amazon Rekognition has in the accuracy of the bounding box.
instance_dominantColors :: Lens' Instance (Maybe [DominantColor]) Source #
The dominant colors found in an individual instance of a label.
KinesisDataStream
data KinesisDataStream Source #
The Kinesis data stream Amazon Rekognition to which the analysis results of a Amazon Rekognition stream processor are streamed. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
See: newKinesisDataStream
smart constructor.
Instances
newKinesisDataStream :: KinesisDataStream Source #
Create a value of KinesisDataStream
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:arn:KinesisDataStream'
, kinesisDataStream_arn
- ARN of the output Amazon Kinesis Data Streams stream.
kinesisDataStream_arn :: Lens' KinesisDataStream (Maybe Text) Source #
ARN of the output Amazon Kinesis Data Streams stream.
KinesisVideoStream
data KinesisVideoStream Source #
Kinesis video stream stream that provides the source streaming video for a Amazon Rekognition Video stream processor. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
See: newKinesisVideoStream
smart constructor.
Instances
newKinesisVideoStream :: KinesisVideoStream Source #
Create a value of KinesisVideoStream
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:arn:KinesisVideoStream'
, kinesisVideoStream_arn
- ARN of the Kinesis video stream stream that streams the source video.
kinesisVideoStream_arn :: Lens' KinesisVideoStream (Maybe Text) Source #
ARN of the Kinesis video stream stream that streams the source video.
KinesisVideoStreamStartSelector
data KinesisVideoStreamStartSelector Source #
Specifies the starting point in a Kinesis stream to start processing. You can use the producer timestamp or the fragment number. One of either producer timestamp or fragment number is required. If you use the producer timestamp, you must put the time in milliseconds. For more information about fragment numbers, see Fragment.
See: newKinesisVideoStreamStartSelector
smart constructor.
KinesisVideoStreamStartSelector' | |
|
Instances
ToJSON KinesisVideoStreamStartSelector Source # | |
Generic KinesisVideoStreamStartSelector Source # | |
Read KinesisVideoStreamStartSelector Source # | |
Show KinesisVideoStreamStartSelector Source # | |
NFData KinesisVideoStreamStartSelector Source # | |
Eq KinesisVideoStreamStartSelector Source # | |
Hashable KinesisVideoStreamStartSelector Source # | |
type Rep KinesisVideoStreamStartSelector Source # | |
Defined in Amazonka.Rekognition.Types.KinesisVideoStreamStartSelector type Rep KinesisVideoStreamStartSelector = D1 ('MetaData "KinesisVideoStreamStartSelector" "Amazonka.Rekognition.Types.KinesisVideoStreamStartSelector" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "KinesisVideoStreamStartSelector'" 'PrefixI 'True) (S1 ('MetaSel ('Just "fragmentNumber") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: S1 ('MetaSel ('Just "producerTimestamp") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Natural)))) |
newKinesisVideoStreamStartSelector :: KinesisVideoStreamStartSelector Source #
Create a value of KinesisVideoStreamStartSelector
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:fragmentNumber:KinesisVideoStreamStartSelector'
, kinesisVideoStreamStartSelector_fragmentNumber
- The unique identifier of the fragment. This value monotonically
increases based on the ingestion order.
$sel:producerTimestamp:KinesisVideoStreamStartSelector'
, kinesisVideoStreamStartSelector_producerTimestamp
- The timestamp from the producer corresponding to the fragment, in
milliseconds, expressed in unix time format.
kinesisVideoStreamStartSelector_fragmentNumber :: Lens' KinesisVideoStreamStartSelector (Maybe Text) Source #
The unique identifier of the fragment. This value monotonically increases based on the ingestion order.
kinesisVideoStreamStartSelector_producerTimestamp :: Lens' KinesisVideoStreamStartSelector (Maybe Natural) Source #
The timestamp from the producer corresponding to the fragment, in milliseconds, expressed in unix time format.
KnownGender
data KnownGender Source #
The known gender identity for the celebrity that matches the provided ID. The known gender identity can be Male, Female, Nonbinary, or Unlisted.
See: newKnownGender
smart constructor.
KnownGender' | |
|
Instances
newKnownGender :: KnownGender Source #
Create a value of KnownGender
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:type':KnownGender'
, knownGender_type
- A string value of the KnownGender info about the Celebrity.
knownGender_type :: Lens' KnownGender (Maybe KnownGenderType) Source #
A string value of the KnownGender info about the Celebrity.
Label
Structure containing details about the detected label, including the name, detected instances, parent labels, and level of confidence.
See: newLabel
smart constructor.
Label' | |
|
Instances
Create a value of Label
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:aliases:Label'
, label_aliases
- A list of potential aliases for a given label.
$sel:categories:Label'
, label_categories
- A list of the categories associated with a given label.
Label
, label_confidence
- Level of confidence.
$sel:instances:Label'
, label_instances
- If Label
represents an object, Instances
contains the bounding boxes
for each instance of the detected object. Bounding boxes are returned
for common object labels such as people, cars, furniture, apparel or
pets.
Label
, label_name
- The name (label) of the object or scene.
$sel:parents:Label'
, label_parents
- The parent labels for a label. The response includes all ancestor
labels.
label_aliases :: Lens' Label (Maybe [LabelAlias]) Source #
A list of potential aliases for a given label.
label_categories :: Lens' Label (Maybe [LabelCategory]) Source #
A list of the categories associated with a given label.
label_instances :: Lens' Label (Maybe [Instance]) Source #
If Label
represents an object, Instances
contains the bounding boxes
for each instance of the detected object. Bounding boxes are returned
for common object labels such as people, cars, furniture, apparel or
pets.
label_parents :: Lens' Label (Maybe [Parent]) Source #
The parent labels for a label. The response includes all ancestor labels.
LabelAlias
data LabelAlias Source #
A potential alias of for a given label.
See: newLabelAlias
smart constructor.
Instances
newLabelAlias :: LabelAlias Source #
Create a value of LabelAlias
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:name:LabelAlias'
, labelAlias_name
- The name of an alias for a given label.
labelAlias_name :: Lens' LabelAlias (Maybe Text) Source #
The name of an alias for a given label.
LabelCategory
data LabelCategory Source #
The category that applies to a given label.
See: newLabelCategory
smart constructor.
Instances
newLabelCategory :: LabelCategory Source #
Create a value of LabelCategory
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:name:LabelCategory'
, labelCategory_name
- The name of a category that applies to a given label.
labelCategory_name :: Lens' LabelCategory (Maybe Text) Source #
The name of a category that applies to a given label.
LabelDetection
data LabelDetection Source #
Information about a label detected in a video analysis request and the time the label was detected in the video.
See: newLabelDetection
smart constructor.
LabelDetection' | |
|
Instances
newLabelDetection :: LabelDetection Source #
Create a value of LabelDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:durationMillis:LabelDetection'
, labelDetection_durationMillis
- The time duration of a segment in milliseconds, I.e. time elapsed from
StartTimestampMillis to EndTimestampMillis.
$sel:endTimestampMillis:LabelDetection'
, labelDetection_endTimestampMillis
- The time in milliseconds defining the end of the timeline segment
containing a continuously detected label.
$sel:label:LabelDetection'
, labelDetection_label
- Details about the detected label.
$sel:startTimestampMillis:LabelDetection'
, labelDetection_startTimestampMillis
- The time in milliseconds defining the start of the timeline segment
containing a continuously detected label.
$sel:timestamp:LabelDetection'
, labelDetection_timestamp
- Time, in milliseconds from the start of the video, that the label was
detected. Note that Timestamp
is not guaranteed to be accurate to the
individual frame where the label first appears.
labelDetection_durationMillis :: Lens' LabelDetection (Maybe Natural) Source #
The time duration of a segment in milliseconds, I.e. time elapsed from StartTimestampMillis to EndTimestampMillis.
labelDetection_endTimestampMillis :: Lens' LabelDetection (Maybe Natural) Source #
The time in milliseconds defining the end of the timeline segment containing a continuously detected label.
labelDetection_label :: Lens' LabelDetection (Maybe Label) Source #
Details about the detected label.
labelDetection_startTimestampMillis :: Lens' LabelDetection (Maybe Natural) Source #
The time in milliseconds defining the start of the timeline segment containing a continuously detected label.
labelDetection_timestamp :: Lens' LabelDetection (Maybe Integer) Source #
Time, in milliseconds from the start of the video, that the label was
detected. Note that Timestamp
is not guaranteed to be accurate to the
individual frame where the label first appears.
LabelDetectionSettings
data LabelDetectionSettings Source #
Contains the specified filters that should be applied to a list of returned GENERAL_LABELS.
See: newLabelDetectionSettings
smart constructor.
Instances
newLabelDetectionSettings :: LabelDetectionSettings Source #
Create a value of LabelDetectionSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:generalLabels:LabelDetectionSettings'
, labelDetectionSettings_generalLabels
- Undocumented member.
labelDetectionSettings_generalLabels :: Lens' LabelDetectionSettings (Maybe GeneralLabelsSettings) Source #
Undocumented member.
Landmark
Indicates the location of the landmark on the face.
See: newLandmark
smart constructor.
Landmark' | |
|
Instances
FromJSON Landmark Source # | |
Generic Landmark Source # | |
Read Landmark Source # | |
Show Landmark Source # | |
NFData Landmark Source # | |
Defined in Amazonka.Rekognition.Types.Landmark | |
Eq Landmark Source # | |
Hashable Landmark Source # | |
Defined in Amazonka.Rekognition.Types.Landmark | |
type Rep Landmark Source # | |
Defined in Amazonka.Rekognition.Types.Landmark type Rep Landmark = D1 ('MetaData "Landmark" "Amazonka.Rekognition.Types.Landmark" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Landmark'" 'PrefixI 'True) (S1 ('MetaSel ('Just "type'") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe LandmarkType)) :*: (S1 ('MetaSel ('Just "x") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "y") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double))))) |
newLandmark :: Landmark Source #
Create a value of Landmark
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:type':Landmark'
, landmark_type
- Type of landmark.
$sel:x:Landmark'
, landmark_x
- The x-coordinate of the landmark expressed as a ratio of the width of
the image. The x-coordinate is measured from the left-side of the image.
For example, if the image is 700 pixels wide and the x-coordinate of the
landmark is at 350 pixels, this value is 0.5.
$sel:y:Landmark'
, landmark_y
- The y-coordinate of the landmark expressed as a ratio of the height of
the image. The y-coordinate is measured from the top of the image. For
example, if the image height is 200 pixels and the y-coordinate of the
landmark is at 50 pixels, this value is 0.25.
landmark_type :: Lens' Landmark (Maybe LandmarkType) Source #
Type of landmark.
landmark_x :: Lens' Landmark (Maybe Double) Source #
The x-coordinate of the landmark expressed as a ratio of the width of the image. The x-coordinate is measured from the left-side of the image. For example, if the image is 700 pixels wide and the x-coordinate of the landmark is at 350 pixels, this value is 0.5.
landmark_y :: Lens' Landmark (Maybe Double) Source #
The y-coordinate of the landmark expressed as a ratio of the height of the image. The y-coordinate is measured from the top of the image. For example, if the image height is 200 pixels and the y-coordinate of the landmark is at 50 pixels, this value is 0.25.
ModerationLabel
data ModerationLabel Source #
Provides information about a single type of inappropriate, unwanted, or offensive content found in an image or video. Each type of moderated content has a label within a hierarchical taxonomy. For more information, see Content moderation in the Amazon Rekognition Developer Guide.
See: newModerationLabel
smart constructor.
ModerationLabel' | |
|
Instances
newModerationLabel :: ModerationLabel Source #
Create a value of ModerationLabel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:ModerationLabel'
, moderationLabel_confidence
- Specifies the confidence that Amazon Rekognition has that the label has
been correctly identified.
If you don't specify the MinConfidence
parameter in the call to
DetectModerationLabels
, the operation returns labels with a confidence
value greater than or equal to 50 percent.
$sel:name:ModerationLabel'
, moderationLabel_name
- The label name for the type of unsafe content detected in the image.
$sel:parentName:ModerationLabel'
, moderationLabel_parentName
- The name for the parent label. Labels at the top level of the hierarchy
have the parent label ""
.
moderationLabel_confidence :: Lens' ModerationLabel (Maybe Double) Source #
Specifies the confidence that Amazon Rekognition has that the label has been correctly identified.
If you don't specify the MinConfidence
parameter in the call to
DetectModerationLabels
, the operation returns labels with a confidence
value greater than or equal to 50 percent.
moderationLabel_name :: Lens' ModerationLabel (Maybe Text) Source #
The label name for the type of unsafe content detected in the image.
moderationLabel_parentName :: Lens' ModerationLabel (Maybe Text) Source #
The name for the parent label. Labels at the top level of the hierarchy
have the parent label ""
.
MouthOpen
Indicates whether or not the mouth on the face is open, and the confidence level in the determination.
See: newMouthOpen
smart constructor.
Instances
FromJSON MouthOpen Source # | |
Generic MouthOpen Source # | |
Read MouthOpen Source # | |
Show MouthOpen Source # | |
NFData MouthOpen Source # | |
Defined in Amazonka.Rekognition.Types.MouthOpen | |
Eq MouthOpen Source # | |
Hashable MouthOpen Source # | |
Defined in Amazonka.Rekognition.Types.MouthOpen | |
type Rep MouthOpen Source # | |
Defined in Amazonka.Rekognition.Types.MouthOpen type Rep MouthOpen = D1 ('MetaData "MouthOpen" "Amazonka.Rekognition.Types.MouthOpen" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "MouthOpen'" 'PrefixI 'True) (S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)))) |
newMouthOpen :: MouthOpen Source #
Create a value of MouthOpen
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:MouthOpen'
, mouthOpen_confidence
- Level of confidence in the determination.
$sel:value:MouthOpen'
, mouthOpen_value
- Boolean value that indicates whether the mouth on the face is open or
not.
mouthOpen_confidence :: Lens' MouthOpen (Maybe Double) Source #
Level of confidence in the determination.
mouthOpen_value :: Lens' MouthOpen (Maybe Bool) Source #
Boolean value that indicates whether the mouth on the face is open or not.
Mustache
Indicates whether or not the face has a mustache, and the confidence level in the determination.
See: newMustache
smart constructor.
Instances
FromJSON Mustache Source # | |
Generic Mustache Source # | |
Read Mustache Source # | |
Show Mustache Source # | |
NFData Mustache Source # | |
Defined in Amazonka.Rekognition.Types.Mustache | |
Eq Mustache Source # | |
Hashable Mustache Source # | |
Defined in Amazonka.Rekognition.Types.Mustache | |
type Rep Mustache Source # | |
Defined in Amazonka.Rekognition.Types.Mustache type Rep Mustache = D1 ('MetaData "Mustache" "Amazonka.Rekognition.Types.Mustache" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Mustache'" 'PrefixI 'True) (S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)))) |
newMustache :: Mustache Source #
Create a value of Mustache
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:Mustache'
, mustache_confidence
- Level of confidence in the determination.
$sel:value:Mustache'
, mustache_value
- Boolean value that indicates whether the face has mustache or not.
mustache_confidence :: Lens' Mustache (Maybe Double) Source #
Level of confidence in the determination.
mustache_value :: Lens' Mustache (Maybe Bool) Source #
Boolean value that indicates whether the face has mustache or not.
NotificationChannel
data NotificationChannel Source #
The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see Calling Amazon Rekognition Video operations. Note that the Amazon SNS topic must have a topic name that begins with AmazonRekognition if you are using the AmazonRekognitionServiceRole permissions policy to access the topic. For more information, see Giving access to multiple Amazon SNS topics.
See: newNotificationChannel
smart constructor.
NotificationChannel' | |
|
Instances
newNotificationChannel Source #
:: Text | |
-> Text | |
-> NotificationChannel |
Create a value of NotificationChannel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:sNSTopicArn:NotificationChannel'
, notificationChannel_sNSTopicArn
- The Amazon SNS topic to which Amazon Rekognition posts the completion
status.
$sel:roleArn:NotificationChannel'
, notificationChannel_roleArn
- The ARN of an IAM role that gives Amazon Rekognition publishing
permissions to the Amazon SNS topic.
notificationChannel_sNSTopicArn :: Lens' NotificationChannel Text Source #
The Amazon SNS topic to which Amazon Rekognition posts the completion status.
notificationChannel_roleArn :: Lens' NotificationChannel Text Source #
The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic.
OutputConfig
data OutputConfig Source #
The S3 bucket and folder location where training output is placed.
See: newOutputConfig
smart constructor.
Instances
newOutputConfig :: OutputConfig Source #
Create a value of OutputConfig
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:s3Bucket:OutputConfig'
, outputConfig_s3Bucket
- The S3 bucket where training output is placed.
$sel:s3KeyPrefix:OutputConfig'
, outputConfig_s3KeyPrefix
- The prefix applied to the training output files.
outputConfig_s3Bucket :: Lens' OutputConfig (Maybe Text) Source #
The S3 bucket where training output is placed.
outputConfig_s3KeyPrefix :: Lens' OutputConfig (Maybe Text) Source #
The prefix applied to the training output files.
Parent
A parent label for a label. A label can have 0, 1, or more parents.
See: newParent
smart constructor.
Instances
FromJSON Parent Source # | |
Generic Parent Source # | |
Read Parent Source # | |
Show Parent Source # | |
NFData Parent Source # | |
Defined in Amazonka.Rekognition.Types.Parent | |
Eq Parent Source # | |
Hashable Parent Source # | |
Defined in Amazonka.Rekognition.Types.Parent | |
type Rep Parent Source # | |
Defined in Amazonka.Rekognition.Types.Parent |
Create a value of Parent
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:name:Parent'
, parent_name
- The name of the parent label.
PersonDetail
data PersonDetail Source #
Details about a person detected in a video analysis request.
See: newPersonDetail
smart constructor.
PersonDetail' | |
|
Instances
newPersonDetail :: PersonDetail Source #
Create a value of PersonDetail
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
PersonDetail
, personDetail_boundingBox
- Bounding box around the detected person.
$sel:face:PersonDetail'
, personDetail_face
- Face details for the detected person.
$sel:index:PersonDetail'
, personDetail_index
- Identifier for the person detected person within a video. Use to keep
track of the person throughout the video. The identifier is not stored
by Amazon Rekognition.
personDetail_boundingBox :: Lens' PersonDetail (Maybe BoundingBox) Source #
Bounding box around the detected person.
personDetail_face :: Lens' PersonDetail (Maybe FaceDetail) Source #
Face details for the detected person.
personDetail_index :: Lens' PersonDetail (Maybe Integer) Source #
Identifier for the person detected person within a video. Use to keep track of the person throughout the video. The identifier is not stored by Amazon Rekognition.
PersonDetection
data PersonDetection Source #
Details and path tracking information for a single time a person's path
is tracked in a video. Amazon Rekognition operations that track
people's paths return an array of PersonDetection
objects with
elements for each time a person's path is tracked in a video.
For more information, see GetPersonTracking in the Amazon Rekognition Developer Guide.
See: newPersonDetection
smart constructor.
PersonDetection' | |
|
Instances
newPersonDetection :: PersonDetection Source #
Create a value of PersonDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:person:PersonDetection'
, personDetection_person
- Details about a person whose path was tracked in a video.
$sel:timestamp:PersonDetection'
, personDetection_timestamp
- The time, in milliseconds from the start of the video, that the
person's path was tracked. Note that Timestamp
is not guaranteed to
be accurate to the individual frame where the person's path first
appears.
personDetection_person :: Lens' PersonDetection (Maybe PersonDetail) Source #
Details about a person whose path was tracked in a video.
personDetection_timestamp :: Lens' PersonDetection (Maybe Integer) Source #
The time, in milliseconds from the start of the video, that the
person's path was tracked. Note that Timestamp
is not guaranteed to
be accurate to the individual frame where the person's path first
appears.
PersonMatch
data PersonMatch Source #
Information about a person whose face matches a face(s) in an Amazon
Rekognition collection. Includes information about the faces in the
Amazon Rekognition collection (FaceMatch), information about the person
(PersonDetail), and the time stamp for when the person was detected in a
video. An array of PersonMatch
objects is returned by GetFaceSearch.
See: newPersonMatch
smart constructor.
PersonMatch' | |
|
Instances
newPersonMatch :: PersonMatch Source #
Create a value of PersonMatch
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:faceMatches:PersonMatch'
, personMatch_faceMatches
- Information about the faces in the input collection that match the face
of a person in the video.
$sel:person:PersonMatch'
, personMatch_person
- Information about the matched person.
$sel:timestamp:PersonMatch'
, personMatch_timestamp
- The time, in milliseconds from the beginning of the video, that the
person was matched in the video.
personMatch_faceMatches :: Lens' PersonMatch (Maybe [FaceMatch]) Source #
Information about the faces in the input collection that match the face of a person in the video.
personMatch_person :: Lens' PersonMatch (Maybe PersonDetail) Source #
Information about the matched person.
personMatch_timestamp :: Lens' PersonMatch (Maybe Integer) Source #
The time, in milliseconds from the beginning of the video, that the person was matched in the video.
Point
The X and Y coordinates of a point on an image or video frame. The X and Y values are ratios of the overall image size or video resolution. For example, if an input image is 700x200 and the values are X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the image.
An array of Point
objects makes up a Polygon
. A Polygon
is
returned by DetectText and by DetectCustomLabels Polygon
represents a
fine-grained polygon around a detected item. For more information, see
Geometry in the Amazon Rekognition Developer Guide.
See: newPoint
smart constructor.
Instances
FromJSON Point Source # | |
ToJSON Point Source # | |
Defined in Amazonka.Rekognition.Types.Point | |
Generic Point Source # | |
Read Point Source # | |
Show Point Source # | |
NFData Point Source # | |
Defined in Amazonka.Rekognition.Types.Point | |
Eq Point Source # | |
Hashable Point Source # | |
Defined in Amazonka.Rekognition.Types.Point | |
type Rep Point Source # | |
Defined in Amazonka.Rekognition.Types.Point type Rep Point = D1 ('MetaData "Point" "Amazonka.Rekognition.Types.Point" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Point'" 'PrefixI 'True) (S1 ('MetaSel ('Just "x") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "y") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
Create a value of Point
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:x:Point'
, point_x
- The value of the X coordinate for a point on a Polygon
.
$sel:y:Point'
, point_y
- The value of the Y coordinate for a point on a Polygon
.
point_x :: Lens' Point (Maybe Double) Source #
The value of the X coordinate for a point on a Polygon
.
point_y :: Lens' Point (Maybe Double) Source #
The value of the Y coordinate for a point on a Polygon
.
Pose
Indicates the pose of the face as determined by its pitch, roll, and yaw.
See: newPose
smart constructor.
Instances
FromJSON Pose Source # | |
Generic Pose Source # | |
Read Pose Source # | |
Show Pose Source # | |
NFData Pose Source # | |
Defined in Amazonka.Rekognition.Types.Pose | |
Eq Pose Source # | |
Hashable Pose Source # | |
Defined in Amazonka.Rekognition.Types.Pose | |
type Rep Pose Source # | |
Defined in Amazonka.Rekognition.Types.Pose type Rep Pose = D1 ('MetaData "Pose" "Amazonka.Rekognition.Types.Pose" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Pose'" 'PrefixI 'True) (S1 ('MetaSel ('Just "pitch") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: (S1 ('MetaSel ('Just "roll") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "yaw") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double))))) |
Create a value of Pose
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:pitch:Pose'
, pose_pitch
- Value representing the face rotation on the pitch axis.
$sel:roll:Pose'
, pose_roll
- Value representing the face rotation on the roll axis.
$sel:yaw:Pose'
, pose_yaw
- Value representing the face rotation on the yaw axis.
pose_pitch :: Lens' Pose (Maybe Double) Source #
Value representing the face rotation on the pitch axis.
pose_roll :: Lens' Pose (Maybe Double) Source #
Value representing the face rotation on the roll axis.
ProjectDescription
data ProjectDescription Source #
A description of an Amazon Rekognition Custom Labels project. For more information, see DescribeProjects.
See: newProjectDescription
smart constructor.
ProjectDescription' | |
|
Instances
newProjectDescription :: ProjectDescription Source #
Create a value of ProjectDescription
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
ProjectDescription
, projectDescription_creationTimestamp
- The Unix timestamp for the date and time that the project was created.
$sel:datasets:ProjectDescription'
, projectDescription_datasets
- Information about the training and test datasets in the project.
$sel:projectArn:ProjectDescription'
, projectDescription_projectArn
- The Amazon Resource Name (ARN) of the project.
ProjectDescription
, projectDescription_status
- The current status of the project.
projectDescription_creationTimestamp :: Lens' ProjectDescription (Maybe UTCTime) Source #
The Unix timestamp for the date and time that the project was created.
projectDescription_datasets :: Lens' ProjectDescription (Maybe [DatasetMetadata]) Source #
Information about the training and test datasets in the project.
projectDescription_projectArn :: Lens' ProjectDescription (Maybe Text) Source #
The Amazon Resource Name (ARN) of the project.
projectDescription_status :: Lens' ProjectDescription (Maybe ProjectStatus) Source #
The current status of the project.
ProjectPolicy
data ProjectPolicy Source #
Describes a project policy in the response from ListProjectPolicies.
See: newProjectPolicy
smart constructor.
ProjectPolicy' | |
|
Instances
newProjectPolicy :: ProjectPolicy Source #
Create a value of ProjectPolicy
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:creationTimestamp:ProjectPolicy'
, projectPolicy_creationTimestamp
- The Unix datetime for the creation of the project policy.
$sel:lastUpdatedTimestamp:ProjectPolicy'
, projectPolicy_lastUpdatedTimestamp
- The Unix datetime for when the project policy was last updated.
$sel:policyDocument:ProjectPolicy'
, projectPolicy_policyDocument
- The JSON document for the project policy.
$sel:policyName:ProjectPolicy'
, projectPolicy_policyName
- The name of the project policy.
$sel:policyRevisionId:ProjectPolicy'
, projectPolicy_policyRevisionId
- The revision ID of the project policy.
$sel:projectArn:ProjectPolicy'
, projectPolicy_projectArn
- The Amazon Resource Name (ARN) of the project to which the project
policy is attached.
projectPolicy_creationTimestamp :: Lens' ProjectPolicy (Maybe UTCTime) Source #
The Unix datetime for the creation of the project policy.
projectPolicy_lastUpdatedTimestamp :: Lens' ProjectPolicy (Maybe UTCTime) Source #
The Unix datetime for when the project policy was last updated.
projectPolicy_policyDocument :: Lens' ProjectPolicy (Maybe Text) Source #
The JSON document for the project policy.
projectPolicy_policyName :: Lens' ProjectPolicy (Maybe Text) Source #
The name of the project policy.
projectPolicy_policyRevisionId :: Lens' ProjectPolicy (Maybe Text) Source #
The revision ID of the project policy.
projectPolicy_projectArn :: Lens' ProjectPolicy (Maybe Text) Source #
The Amazon Resource Name (ARN) of the project to which the project policy is attached.
ProjectVersionDescription
data ProjectVersionDescription Source #
A description of a version of an Amazon Rekognition Custom Labels model.
See: newProjectVersionDescription
smart constructor.
ProjectVersionDescription' | |
|
Instances
newProjectVersionDescription :: ProjectVersionDescription Source #
Create a value of ProjectVersionDescription
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:billableTrainingTimeInSeconds:ProjectVersionDescription'
, projectVersionDescription_billableTrainingTimeInSeconds
- The duration, in seconds, that you were billed for a successful training
of the model version. This value is only returned if the model version
has been successfully trained.
$sel:creationTimestamp:ProjectVersionDescription'
, projectVersionDescription_creationTimestamp
- The Unix datetime for the date and time that training started.
$sel:evaluationResult:ProjectVersionDescription'
, projectVersionDescription_evaluationResult
- The training results. EvaluationResult
is only returned if training is
successful.
$sel:kmsKeyId:ProjectVersionDescription'
, projectVersionDescription_kmsKeyId
- The identifer for the AWS Key Management Service key (AWS KMS key) that
was used to encrypt the model during training.
$sel:manifestSummary:ProjectVersionDescription'
, projectVersionDescription_manifestSummary
- The location of the summary manifest. The summary manifest provides
aggregate data validation results for the training and test datasets.
$sel:maxInferenceUnits:ProjectVersionDescription'
, projectVersionDescription_maxInferenceUnits
- The maximum number of inference units Amazon Rekognition Custom Labels
uses to auto-scale the model. For more information, see
StartProjectVersion.
$sel:minInferenceUnits:ProjectVersionDescription'
, projectVersionDescription_minInferenceUnits
- The minimum number of inference units used by the model. For more
information, see StartProjectVersion.
$sel:outputConfig:ProjectVersionDescription'
, projectVersionDescription_outputConfig
- The location where training results are saved.
$sel:projectVersionArn:ProjectVersionDescription'
, projectVersionDescription_projectVersionArn
- The Amazon Resource Name (ARN) of the model version.
$sel:sourceProjectVersionArn:ProjectVersionDescription'
, projectVersionDescription_sourceProjectVersionArn
- If the model version was copied from a different project,
SourceProjectVersionArn
contains the ARN of the source model version.
$sel:status:ProjectVersionDescription'
, projectVersionDescription_status
- The current status of the model version.
$sel:statusMessage:ProjectVersionDescription'
, projectVersionDescription_statusMessage
- A descriptive message for an error or warning that occurred.
$sel:testingDataResult:ProjectVersionDescription'
, projectVersionDescription_testingDataResult
- Contains information about the testing results.
$sel:trainingDataResult:ProjectVersionDescription'
, projectVersionDescription_trainingDataResult
- Contains information about the training results.
$sel:trainingEndTimestamp:ProjectVersionDescription'
, projectVersionDescription_trainingEndTimestamp
- The Unix date and time that training of the model ended.
projectVersionDescription_billableTrainingTimeInSeconds :: Lens' ProjectVersionDescription (Maybe Natural) Source #
The duration, in seconds, that you were billed for a successful training of the model version. This value is only returned if the model version has been successfully trained.
projectVersionDescription_creationTimestamp :: Lens' ProjectVersionDescription (Maybe UTCTime) Source #
The Unix datetime for the date and time that training started.
projectVersionDescription_evaluationResult :: Lens' ProjectVersionDescription (Maybe EvaluationResult) Source #
The training results. EvaluationResult
is only returned if training is
successful.
projectVersionDescription_kmsKeyId :: Lens' ProjectVersionDescription (Maybe Text) Source #
The identifer for the AWS Key Management Service key (AWS KMS key) that was used to encrypt the model during training.
projectVersionDescription_manifestSummary :: Lens' ProjectVersionDescription (Maybe GroundTruthManifest) Source #
The location of the summary manifest. The summary manifest provides aggregate data validation results for the training and test datasets.
projectVersionDescription_maxInferenceUnits :: Lens' ProjectVersionDescription (Maybe Natural) Source #
The maximum number of inference units Amazon Rekognition Custom Labels uses to auto-scale the model. For more information, see StartProjectVersion.
projectVersionDescription_minInferenceUnits :: Lens' ProjectVersionDescription (Maybe Natural) Source #
The minimum number of inference units used by the model. For more information, see StartProjectVersion.
projectVersionDescription_outputConfig :: Lens' ProjectVersionDescription (Maybe OutputConfig) Source #
The location where training results are saved.
projectVersionDescription_projectVersionArn :: Lens' ProjectVersionDescription (Maybe Text) Source #
The Amazon Resource Name (ARN) of the model version.
projectVersionDescription_sourceProjectVersionArn :: Lens' ProjectVersionDescription (Maybe Text) Source #
If the model version was copied from a different project,
SourceProjectVersionArn
contains the ARN of the source model version.
projectVersionDescription_status :: Lens' ProjectVersionDescription (Maybe ProjectVersionStatus) Source #
The current status of the model version.
projectVersionDescription_statusMessage :: Lens' ProjectVersionDescription (Maybe Text) Source #
A descriptive message for an error or warning that occurred.
projectVersionDescription_testingDataResult :: Lens' ProjectVersionDescription (Maybe TestingDataResult) Source #
Contains information about the testing results.
projectVersionDescription_trainingDataResult :: Lens' ProjectVersionDescription (Maybe TrainingDataResult) Source #
Contains information about the training results.
projectVersionDescription_trainingEndTimestamp :: Lens' ProjectVersionDescription (Maybe UTCTime) Source #
The Unix date and time that training of the model ended.
ProtectiveEquipmentBodyPart
data ProtectiveEquipmentBodyPart Source #
Information about a body part detected by DetectProtectiveEquipment that
contains PPE. An array of ProtectiveEquipmentBodyPart
objects is
returned for each person detected by DetectProtectiveEquipment
.
See: newProtectiveEquipmentBodyPart
smart constructor.
ProtectiveEquipmentBodyPart' | |
|
Instances
newProtectiveEquipmentBodyPart :: ProtectiveEquipmentBodyPart Source #
Create a value of ProtectiveEquipmentBodyPart
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
ProtectiveEquipmentBodyPart
, protectiveEquipmentBodyPart_confidence
- The confidence that Amazon Rekognition has in the detection accuracy of
the detected body part.
$sel:equipmentDetections:ProtectiveEquipmentBodyPart'
, protectiveEquipmentBodyPart_equipmentDetections
- An array of Personal Protective Equipment items detected around a body
part.
$sel:name:ProtectiveEquipmentBodyPart'
, protectiveEquipmentBodyPart_name
- The detected body part.
protectiveEquipmentBodyPart_confidence :: Lens' ProtectiveEquipmentBodyPart (Maybe Double) Source #
The confidence that Amazon Rekognition has in the detection accuracy of the detected body part.
protectiveEquipmentBodyPart_equipmentDetections :: Lens' ProtectiveEquipmentBodyPart (Maybe [EquipmentDetection]) Source #
An array of Personal Protective Equipment items detected around a body part.
protectiveEquipmentBodyPart_name :: Lens' ProtectiveEquipmentBodyPart (Maybe BodyPart) Source #
The detected body part.
ProtectiveEquipmentPerson
data ProtectiveEquipmentPerson Source #
A person detected by a call to DetectProtectiveEquipment. The API
returns all persons detected in the input image in an array of
ProtectiveEquipmentPerson
objects.
See: newProtectiveEquipmentPerson
smart constructor.
ProtectiveEquipmentPerson' | |
|
Instances
newProtectiveEquipmentPerson :: ProtectiveEquipmentPerson Source #
Create a value of ProtectiveEquipmentPerson
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:bodyParts:ProtectiveEquipmentPerson'
, protectiveEquipmentPerson_bodyParts
- An array of body parts detected on a person's body (including body
parts without PPE).
$sel:boundingBox:ProtectiveEquipmentPerson'
, protectiveEquipmentPerson_boundingBox
- A bounding box around the detected person.
ProtectiveEquipmentPerson
, protectiveEquipmentPerson_confidence
- The confidence that Amazon Rekognition has that the bounding box
contains a person.
$sel:id:ProtectiveEquipmentPerson'
, protectiveEquipmentPerson_id
- The identifier for the detected person. The identifier is only unique
for a single call to DetectProtectiveEquipment
.
protectiveEquipmentPerson_bodyParts :: Lens' ProtectiveEquipmentPerson (Maybe [ProtectiveEquipmentBodyPart]) Source #
An array of body parts detected on a person's body (including body parts without PPE).
protectiveEquipmentPerson_boundingBox :: Lens' ProtectiveEquipmentPerson (Maybe BoundingBox) Source #
A bounding box around the detected person.
protectiveEquipmentPerson_confidence :: Lens' ProtectiveEquipmentPerson (Maybe Double) Source #
The confidence that Amazon Rekognition has that the bounding box contains a person.
protectiveEquipmentPerson_id :: Lens' ProtectiveEquipmentPerson (Maybe Natural) Source #
The identifier for the detected person. The identifier is only unique
for a single call to DetectProtectiveEquipment
.
ProtectiveEquipmentSummarizationAttributes
data ProtectiveEquipmentSummarizationAttributes Source #
Specifies summary attributes to return from a call to
DetectProtectiveEquipment. You can specify which types of PPE to
summarize. You can also specify a minimum confidence value for
detections. Summary information is returned in the Summary
(ProtectiveEquipmentSummary) field of the response from
DetectProtectiveEquipment
. The summary includes which persons in an
image were detected wearing the requested types of person protective
equipment (PPE), which persons were detected as not wearing PPE, and the
persons in which a determination could not be made. For more
information, see ProtectiveEquipmentSummary.
See: newProtectiveEquipmentSummarizationAttributes
smart constructor.
ProtectiveEquipmentSummarizationAttributes' | |
|
Instances
newProtectiveEquipmentSummarizationAttributes Source #
:: Double |
|
-> ProtectiveEquipmentSummarizationAttributes |
Create a value of ProtectiveEquipmentSummarizationAttributes
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:minConfidence:ProtectiveEquipmentSummarizationAttributes'
, protectiveEquipmentSummarizationAttributes_minConfidence
- The minimum confidence level for which you want summary information. The
confidence level applies to person detection, body part detection,
equipment detection, and body part coverage. Amazon Rekognition doesn't
return summary information with a confidence than this specified value.
There isn't a default value.
Specify a MinConfidence
value that is between 50-100% as
DetectProtectiveEquipment
returns predictions only where the detection
confidence is between 50% - 100%. If you specify a value that is less
than 50%, the results are the same specifying a value of 50%.
$sel:requiredEquipmentTypes:ProtectiveEquipmentSummarizationAttributes'
, protectiveEquipmentSummarizationAttributes_requiredEquipmentTypes
- An array of personal protective equipment types for which you want
summary information. If a person is detected wearing a required
requipment type, the person's ID is added to the
PersonsWithRequiredEquipment
array field returned in
ProtectiveEquipmentSummary by DetectProtectiveEquipment
.
protectiveEquipmentSummarizationAttributes_minConfidence :: Lens' ProtectiveEquipmentSummarizationAttributes Double Source #
The minimum confidence level for which you want summary information. The confidence level applies to person detection, body part detection, equipment detection, and body part coverage. Amazon Rekognition doesn't return summary information with a confidence than this specified value. There isn't a default value.
Specify a MinConfidence
value that is between 50-100% as
DetectProtectiveEquipment
returns predictions only where the detection
confidence is between 50% - 100%. If you specify a value that is less
than 50%, the results are the same specifying a value of 50%.
protectiveEquipmentSummarizationAttributes_requiredEquipmentTypes :: Lens' ProtectiveEquipmentSummarizationAttributes [ProtectiveEquipmentType] Source #
An array of personal protective equipment types for which you want
summary information. If a person is detected wearing a required
requipment type, the person's ID is added to the
PersonsWithRequiredEquipment
array field returned in
ProtectiveEquipmentSummary by DetectProtectiveEquipment
.
ProtectiveEquipmentSummary
data ProtectiveEquipmentSummary Source #
Summary information for required items of personal protective equipment
(PPE) detected on persons by a call to DetectProtectiveEquipment. You
specify the required type of PPE in the SummarizationAttributes
(ProtectiveEquipmentSummarizationAttributes) input parameter. The
summary includes which persons were detected wearing the required
personal protective equipment (PersonsWithRequiredEquipment
), which
persons were detected as not wearing the required PPE
(PersonsWithoutRequiredEquipment
), and the persons in which a
determination could not be made (PersonsIndeterminate
).
To get a total for each category, use the size of the field array. For
example, to find out how many people were detected as wearing the
specified PPE, use the size of the PersonsWithRequiredEquipment
array.
If you want to find out more about a person, such as the location
(BoundingBox) of the person on the image, use the person ID in each
array element. Each person ID matches the ID field of a
ProtectiveEquipmentPerson object returned in the Persons
array by
DetectProtectiveEquipment
.
See: newProtectiveEquipmentSummary
smart constructor.
ProtectiveEquipmentSummary' | |
|
Instances
newProtectiveEquipmentSummary :: ProtectiveEquipmentSummary Source #
Create a value of ProtectiveEquipmentSummary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:personsIndeterminate:ProtectiveEquipmentSummary'
, protectiveEquipmentSummary_personsIndeterminate
- An array of IDs for persons where it was not possible to determine if
they are wearing personal protective equipment.
$sel:personsWithRequiredEquipment:ProtectiveEquipmentSummary'
, protectiveEquipmentSummary_personsWithRequiredEquipment
- An array of IDs for persons who are wearing detected personal protective
equipment.
$sel:personsWithoutRequiredEquipment:ProtectiveEquipmentSummary'
, protectiveEquipmentSummary_personsWithoutRequiredEquipment
- An array of IDs for persons who are not wearing all of the types of PPE
specified in the RequiredEquipmentTypes
field of the detected personal
protective equipment.
protectiveEquipmentSummary_personsIndeterminate :: Lens' ProtectiveEquipmentSummary (Maybe [Natural]) Source #
An array of IDs for persons where it was not possible to determine if they are wearing personal protective equipment.
protectiveEquipmentSummary_personsWithRequiredEquipment :: Lens' ProtectiveEquipmentSummary (Maybe [Natural]) Source #
An array of IDs for persons who are wearing detected personal protective equipment.
protectiveEquipmentSummary_personsWithoutRequiredEquipment :: Lens' ProtectiveEquipmentSummary (Maybe [Natural]) Source #
An array of IDs for persons who are not wearing all of the types of PPE
specified in the RequiredEquipmentTypes
field of the detected personal
protective equipment.
RegionOfInterest
data RegionOfInterest Source #
Specifies a location within the frame that Rekognition checks for
objects of interest such as text, labels, or faces. It uses a
BoundingBox
or Polygon
to set a region of the screen.
A word, face, or label is included in the region if it is more than half in that region. If there is more than one region, the word, face, or label is compared with all regions of the screen. Any object of interest that is more than half in a region is kept in the results.
See: newRegionOfInterest
smart constructor.
RegionOfInterest' | |
|
Instances
newRegionOfInterest :: RegionOfInterest Source #
Create a value of RegionOfInterest
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:boundingBox:RegionOfInterest'
, regionOfInterest_boundingBox
- The box representing a region of interest on screen.
$sel:polygon:RegionOfInterest'
, regionOfInterest_polygon
- Specifies a shape made up of up to 10 Point
objects to define a region
of interest.
regionOfInterest_boundingBox :: Lens' RegionOfInterest (Maybe BoundingBox) Source #
The box representing a region of interest on screen.
regionOfInterest_polygon :: Lens' RegionOfInterest (Maybe [Point]) Source #
Specifies a shape made up of up to 10 Point
objects to define a region
of interest.
S3Destination
data S3Destination Source #
The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation. These results include the name of the stream processor resource, the session ID of the stream processing session, and labeled timestamps and bounding boxes for detected labels.
See: newS3Destination
smart constructor.
S3Destination' | |
|
Instances
newS3Destination :: S3Destination Source #
Create a value of S3Destination
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:bucket:S3Destination'
, s3Destination_bucket
- The name of the Amazon S3 bucket you want to associate with the
streaming video project. You must be the owner of the Amazon S3 bucket.
$sel:keyPrefix:S3Destination'
, s3Destination_keyPrefix
- The prefix value of the location within the bucket that you want the
information to be published to. For more information, see
Using prefixes.
s3Destination_bucket :: Lens' S3Destination (Maybe Text) Source #
The name of the Amazon S3 bucket you want to associate with the streaming video project. You must be the owner of the Amazon S3 bucket.
s3Destination_keyPrefix :: Lens' S3Destination (Maybe Text) Source #
The prefix value of the location within the bucket that you want the information to be published to. For more information, see Using prefixes.
S3Object
Provides the S3 bucket name and object name.
The region for the S3 bucket containing the S3 object must match the region you use for Amazon Rekognition operations.
For Amazon Rekognition to process an S3 object, the user must have permission to access the S3 object. For more information, see How Amazon Rekognition works with IAM in the Amazon Rekognition Developer Guide.
See: newS3Object
smart constructor.
Instances
FromJSON S3Object Source # | |
ToJSON S3Object Source # | |
Defined in Amazonka.Rekognition.Types.S3Object | |
Generic S3Object Source # | |
Read S3Object Source # | |
Show S3Object Source # | |
NFData S3Object Source # | |
Defined in Amazonka.Rekognition.Types.S3Object | |
Eq S3Object Source # | |
Hashable S3Object Source # | |
Defined in Amazonka.Rekognition.Types.S3Object | |
type Rep S3Object Source # | |
Defined in Amazonka.Rekognition.Types.S3Object type Rep S3Object = D1 ('MetaData "S3Object" "Amazonka.Rekognition.Types.S3Object" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "S3Object'" 'PrefixI 'True) (S1 ('MetaSel ('Just "bucket") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: (S1 ('MetaSel ('Just "name") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text)) :*: S1 ('MetaSel ('Just "version") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Text))))) |
newS3Object :: S3Object Source #
Create a value of S3Object
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:bucket:S3Object'
, s3Object_bucket
- Name of the S3 bucket.
$sel:name:S3Object'
, s3Object_name
- S3 object key name.
$sel:version:S3Object'
, s3Object_version
- If the bucket is versioning enabled, you can specify the object version.
s3Object_version :: Lens' S3Object (Maybe Text) Source #
If the bucket is versioning enabled, you can specify the object version.
SegmentDetection
data SegmentDetection Source #
A technical cue or shot detection segment detected in a video. An array
of SegmentDetection
objects containing all segments detected in a
stored video is returned by GetSegmentDetection.
See: newSegmentDetection
smart constructor.
SegmentDetection' | |
|
Instances
newSegmentDetection :: SegmentDetection Source #
Create a value of SegmentDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:durationFrames:SegmentDetection'
, segmentDetection_durationFrames
- The duration of a video segment, expressed in frames.
$sel:durationMillis:SegmentDetection'
, segmentDetection_durationMillis
- The duration of the detected segment in milliseconds.
$sel:durationSMPTE:SegmentDetection'
, segmentDetection_durationSMPTE
- The duration of the timecode for the detected segment in SMPTE format.
$sel:endFrameNumber:SegmentDetection'
, segmentDetection_endFrameNumber
- The frame number at the end of a video segment, using a frame index that
starts with 0.
$sel:endTimecodeSMPTE:SegmentDetection'
, segmentDetection_endTimecodeSMPTE
- The frame-accurate SMPTE timecode, from the start of a video, for the
end of a detected segment. EndTimecode
is in HH:MM:SS:fr format (and
;fr for drop frame-rates).
$sel:endTimestampMillis:SegmentDetection'
, segmentDetection_endTimestampMillis
- The end time of the detected segment, in milliseconds, from the start of
the video. This value is rounded down.
$sel:shotSegment:SegmentDetection'
, segmentDetection_shotSegment
- If the segment is a shot detection, contains information about the shot
detection.
$sel:startFrameNumber:SegmentDetection'
, segmentDetection_startFrameNumber
- The frame number of the start of a video segment, using a frame index
that starts with 0.
$sel:startTimecodeSMPTE:SegmentDetection'
, segmentDetection_startTimecodeSMPTE
- The frame-accurate SMPTE timecode, from the start of a video, for the
start of a detected segment. StartTimecode
is in HH:MM:SS:fr format
(and ;fr for drop frame-rates).
$sel:startTimestampMillis:SegmentDetection'
, segmentDetection_startTimestampMillis
- The start time of the detected segment in milliseconds from the start of
the video. This value is rounded down. For example, if the actual
timestamp is 100.6667 milliseconds, Amazon Rekognition Video returns a
value of 100 millis.
$sel:technicalCueSegment:SegmentDetection'
, segmentDetection_technicalCueSegment
- If the segment is a technical cue, contains information about the
technical cue.
SegmentDetection
, segmentDetection_type
- The type of the segment. Valid values are TECHNICAL_CUE
and SHOT
.
segmentDetection_durationFrames :: Lens' SegmentDetection (Maybe Natural) Source #
The duration of a video segment, expressed in frames.
segmentDetection_durationMillis :: Lens' SegmentDetection (Maybe Natural) Source #
The duration of the detected segment in milliseconds.
segmentDetection_durationSMPTE :: Lens' SegmentDetection (Maybe Text) Source #
The duration of the timecode for the detected segment in SMPTE format.
segmentDetection_endFrameNumber :: Lens' SegmentDetection (Maybe Natural) Source #
The frame number at the end of a video segment, using a frame index that starts with 0.
segmentDetection_endTimecodeSMPTE :: Lens' SegmentDetection (Maybe Text) Source #
The frame-accurate SMPTE timecode, from the start of a video, for the
end of a detected segment. EndTimecode
is in HH:MM:SS:fr format (and
;fr for drop frame-rates).
segmentDetection_endTimestampMillis :: Lens' SegmentDetection (Maybe Integer) Source #
The end time of the detected segment, in milliseconds, from the start of the video. This value is rounded down.
segmentDetection_shotSegment :: Lens' SegmentDetection (Maybe ShotSegment) Source #
If the segment is a shot detection, contains information about the shot detection.
segmentDetection_startFrameNumber :: Lens' SegmentDetection (Maybe Natural) Source #
The frame number of the start of a video segment, using a frame index that starts with 0.
segmentDetection_startTimecodeSMPTE :: Lens' SegmentDetection (Maybe Text) Source #
The frame-accurate SMPTE timecode, from the start of a video, for the
start of a detected segment. StartTimecode
is in HH:MM:SS:fr format
(and ;fr for drop frame-rates).
segmentDetection_startTimestampMillis :: Lens' SegmentDetection (Maybe Integer) Source #
The start time of the detected segment in milliseconds from the start of the video. This value is rounded down. For example, if the actual timestamp is 100.6667 milliseconds, Amazon Rekognition Video returns a value of 100 millis.
segmentDetection_technicalCueSegment :: Lens' SegmentDetection (Maybe TechnicalCueSegment) Source #
If the segment is a technical cue, contains information about the technical cue.
segmentDetection_type :: Lens' SegmentDetection (Maybe SegmentType) Source #
The type of the segment. Valid values are TECHNICAL_CUE
and SHOT
.
SegmentTypeInfo
data SegmentTypeInfo Source #
Information about the type of a segment requested in a call to
StartSegmentDetection. An array of SegmentTypeInfo
objects is returned
by the response from GetSegmentDetection.
See: newSegmentTypeInfo
smart constructor.
SegmentTypeInfo' | |
|
Instances
newSegmentTypeInfo :: SegmentTypeInfo Source #
Create a value of SegmentTypeInfo
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:modelVersion:SegmentTypeInfo'
, segmentTypeInfo_modelVersion
- The version of the model used to detect segments.
$sel:type':SegmentTypeInfo'
, segmentTypeInfo_type
- The type of a segment (technical cue or shot detection).
segmentTypeInfo_modelVersion :: Lens' SegmentTypeInfo (Maybe Text) Source #
The version of the model used to detect segments.
segmentTypeInfo_type :: Lens' SegmentTypeInfo (Maybe SegmentType) Source #
The type of a segment (technical cue or shot detection).
ShotSegment
data ShotSegment Source #
Information about a shot detection segment detected in a video. For more information, see SegmentDetection.
See: newShotSegment
smart constructor.
Instances
newShotSegment :: ShotSegment Source #
Create a value of ShotSegment
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:ShotSegment'
, shotSegment_confidence
- The confidence that Amazon Rekognition Video has in the accuracy of the
detected segment.
$sel:index:ShotSegment'
, shotSegment_index
- An Identifier for a shot detection segment detected in a video.
shotSegment_confidence :: Lens' ShotSegment (Maybe Double) Source #
The confidence that Amazon Rekognition Video has in the accuracy of the detected segment.
shotSegment_index :: Lens' ShotSegment (Maybe Natural) Source #
An Identifier for a shot detection segment detected in a video.
Smile
Indicates whether or not the face is smiling, and the confidence level in the determination.
See: newSmile
smart constructor.
Instances
FromJSON Smile Source # | |
Generic Smile Source # | |
Read Smile Source # | |
Show Smile Source # | |
NFData Smile Source # | |
Defined in Amazonka.Rekognition.Types.Smile | |
Eq Smile Source # | |
Hashable Smile Source # | |
Defined in Amazonka.Rekognition.Types.Smile | |
type Rep Smile Source # | |
Defined in Amazonka.Rekognition.Types.Smile type Rep Smile = D1 ('MetaData "Smile" "Amazonka.Rekognition.Types.Smile" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "Smile'" 'PrefixI 'True) (S1 ('MetaSel ('Just "confidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)) :*: S1 ('MetaSel ('Just "value") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Bool)))) |
Create a value of Smile
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:Smile'
, smile_confidence
- Level of confidence in the determination.
$sel:value:Smile'
, smile_value
- Boolean value that indicates whether the face is smiling or not.
smile_value :: Lens' Smile (Maybe Bool) Source #
Boolean value that indicates whether the face is smiling or not.
StartSegmentDetectionFilters
data StartSegmentDetectionFilters Source #
Filters applied to the technical cue or shot detection segments. For more information, see StartSegmentDetection.
See: newStartSegmentDetectionFilters
smart constructor.
StartSegmentDetectionFilters' | |
|
Instances
newStartSegmentDetectionFilters :: StartSegmentDetectionFilters Source #
Create a value of StartSegmentDetectionFilters
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:shotFilter:StartSegmentDetectionFilters'
, startSegmentDetectionFilters_shotFilter
- Filters that are specific to shot detections.
$sel:technicalCueFilter:StartSegmentDetectionFilters'
, startSegmentDetectionFilters_technicalCueFilter
- Filters that are specific to technical cues.
startSegmentDetectionFilters_shotFilter :: Lens' StartSegmentDetectionFilters (Maybe StartShotDetectionFilter) Source #
Filters that are specific to shot detections.
startSegmentDetectionFilters_technicalCueFilter :: Lens' StartSegmentDetectionFilters (Maybe StartTechnicalCueDetectionFilter) Source #
Filters that are specific to technical cues.
StartShotDetectionFilter
data StartShotDetectionFilter Source #
Filters for the shot detection segments returned by
GetSegmentDetection
. For more information, see
StartSegmentDetectionFilters.
See: newStartShotDetectionFilter
smart constructor.
StartShotDetectionFilter' | |
|
Instances
newStartShotDetectionFilter :: StartShotDetectionFilter Source #
Create a value of StartShotDetectionFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:minSegmentConfidence:StartShotDetectionFilter'
, startShotDetectionFilter_minSegmentConfidence
- Specifies the minimum confidence that Amazon Rekognition Video must have
in order to return a detected segment. Confidence represents how certain
Amazon Rekognition is that a segment is correctly identified. 0 is the
lowest confidence. 100 is the highest confidence. Amazon Rekognition
Video doesn't return any segments with a confidence level lower than
this specified value.
If you don't specify MinSegmentConfidence
, the GetSegmentDetection
returns segments with confidence values greater than or equal to 50
percent.
startShotDetectionFilter_minSegmentConfidence :: Lens' StartShotDetectionFilter (Maybe Double) Source #
Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level lower than this specified value.
If you don't specify MinSegmentConfidence
, the GetSegmentDetection
returns segments with confidence values greater than or equal to 50
percent.
StartTechnicalCueDetectionFilter
data StartTechnicalCueDetectionFilter Source #
Filters for the technical segments returned by GetSegmentDetection. For more information, see StartSegmentDetectionFilters.
See: newStartTechnicalCueDetectionFilter
smart constructor.
StartTechnicalCueDetectionFilter' | |
|
Instances
ToJSON StartTechnicalCueDetectionFilter Source # | |
Generic StartTechnicalCueDetectionFilter Source # | |
Read StartTechnicalCueDetectionFilter Source # | |
Show StartTechnicalCueDetectionFilter Source # | |
NFData StartTechnicalCueDetectionFilter Source # | |
Eq StartTechnicalCueDetectionFilter Source # | |
Hashable StartTechnicalCueDetectionFilter Source # | |
type Rep StartTechnicalCueDetectionFilter Source # | |
Defined in Amazonka.Rekognition.Types.StartTechnicalCueDetectionFilter type Rep StartTechnicalCueDetectionFilter = D1 ('MetaData "StartTechnicalCueDetectionFilter" "Amazonka.Rekognition.Types.StartTechnicalCueDetectionFilter" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "StartTechnicalCueDetectionFilter'" 'PrefixI 'True) (S1 ('MetaSel ('Just "blackFrame") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe BlackFrame)) :*: S1 ('MetaSel ('Just "minSegmentConfidence") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Double)))) |
newStartTechnicalCueDetectionFilter :: StartTechnicalCueDetectionFilter Source #
Create a value of StartTechnicalCueDetectionFilter
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:blackFrame:StartTechnicalCueDetectionFilter'
, startTechnicalCueDetectionFilter_blackFrame
- A filter that allows you to control the black frame detection by
specifying the black levels and pixel coverage of black pixels in a
frame. Videos can come from multiple sources, formats, and time periods,
with different standards and varying noise levels for black frames that
need to be accounted for.
$sel:minSegmentConfidence:StartTechnicalCueDetectionFilter'
, startTechnicalCueDetectionFilter_minSegmentConfidence
- Specifies the minimum confidence that Amazon Rekognition Video must have
in order to return a detected segment. Confidence represents how certain
Amazon Rekognition is that a segment is correctly identified. 0 is the
lowest confidence. 100 is the highest confidence. Amazon Rekognition
Video doesn't return any segments with a confidence level lower than
this specified value.
If you don't specify MinSegmentConfidence
, GetSegmentDetection
returns segments with confidence values greater than or equal to 50
percent.
startTechnicalCueDetectionFilter_blackFrame :: Lens' StartTechnicalCueDetectionFilter (Maybe BlackFrame) Source #
A filter that allows you to control the black frame detection by specifying the black levels and pixel coverage of black pixels in a frame. Videos can come from multiple sources, formats, and time periods, with different standards and varying noise levels for black frames that need to be accounted for.
startTechnicalCueDetectionFilter_minSegmentConfidence :: Lens' StartTechnicalCueDetectionFilter (Maybe Double) Source #
Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level lower than this specified value.
If you don't specify MinSegmentConfidence
, GetSegmentDetection
returns segments with confidence values greater than or equal to 50
percent.
StartTextDetectionFilters
data StartTextDetectionFilters Source #
Set of optional parameters that let you set the criteria text must meet
to be included in your response. WordFilter
looks at a word's height,
width and minimum confidence. RegionOfInterest
lets you set a specific
region of the screen to look for text in.
See: newStartTextDetectionFilters
smart constructor.
StartTextDetectionFilters' | |
|
Instances
newStartTextDetectionFilters :: StartTextDetectionFilters Source #
Create a value of StartTextDetectionFilters
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:regionsOfInterest:StartTextDetectionFilters'
, startTextDetectionFilters_regionsOfInterest
- Filter focusing on a certain area of the frame. Uses a BoundingBox
object to set the region of the screen.
$sel:wordFilter:StartTextDetectionFilters'
, startTextDetectionFilters_wordFilter
- Filters focusing on qualities of the text, such as confidence or size.
startTextDetectionFilters_regionsOfInterest :: Lens' StartTextDetectionFilters (Maybe [RegionOfInterest]) Source #
Filter focusing on a certain area of the frame. Uses a BoundingBox
object to set the region of the screen.
startTextDetectionFilters_wordFilter :: Lens' StartTextDetectionFilters (Maybe DetectionFilter) Source #
Filters focusing on qualities of the text, such as confidence or size.
StreamProcessingStartSelector
data StreamProcessingStartSelector Source #
This is a required parameter for label detection stream processors and should not be used to start a face search stream processor.
See: newStreamProcessingStartSelector
smart constructor.
StreamProcessingStartSelector' | |
|
Instances
ToJSON StreamProcessingStartSelector Source # | |
Generic StreamProcessingStartSelector Source # | |
Read StreamProcessingStartSelector Source # | |
Show StreamProcessingStartSelector Source # | |
NFData StreamProcessingStartSelector Source # | |
Defined in Amazonka.Rekognition.Types.StreamProcessingStartSelector rnf :: StreamProcessingStartSelector -> () # | |
Eq StreamProcessingStartSelector Source # | |
Hashable StreamProcessingStartSelector Source # | |
type Rep StreamProcessingStartSelector Source # | |
Defined in Amazonka.Rekognition.Types.StreamProcessingStartSelector type Rep StreamProcessingStartSelector = D1 ('MetaData "StreamProcessingStartSelector" "Amazonka.Rekognition.Types.StreamProcessingStartSelector" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "StreamProcessingStartSelector'" 'PrefixI 'True) (S1 ('MetaSel ('Just "kVSStreamStartSelector") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe KinesisVideoStreamStartSelector)))) |
newStreamProcessingStartSelector :: StreamProcessingStartSelector Source #
Create a value of StreamProcessingStartSelector
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:kVSStreamStartSelector:StreamProcessingStartSelector'
, streamProcessingStartSelector_kVSStreamStartSelector
- Specifies the starting point in the stream to start processing. This can
be done with a producer timestamp or a fragment number in a Kinesis
stream.
streamProcessingStartSelector_kVSStreamStartSelector :: Lens' StreamProcessingStartSelector (Maybe KinesisVideoStreamStartSelector) Source #
Specifies the starting point in the stream to start processing. This can be done with a producer timestamp or a fragment number in a Kinesis stream.
StreamProcessingStopSelector
data StreamProcessingStopSelector Source #
Specifies when to stop processing the stream. You can specify a maximum amount of time to process the video.
See: newStreamProcessingStopSelector
smart constructor.
StreamProcessingStopSelector' | |
|
Instances
ToJSON StreamProcessingStopSelector Source # | |
Generic StreamProcessingStopSelector Source # | |
Read StreamProcessingStopSelector Source # | |
Show StreamProcessingStopSelector Source # | |
NFData StreamProcessingStopSelector Source # | |
Defined in Amazonka.Rekognition.Types.StreamProcessingStopSelector rnf :: StreamProcessingStopSelector -> () # | |
Eq StreamProcessingStopSelector Source # | |
Hashable StreamProcessingStopSelector Source # | |
type Rep StreamProcessingStopSelector Source # | |
Defined in Amazonka.Rekognition.Types.StreamProcessingStopSelector type Rep StreamProcessingStopSelector = D1 ('MetaData "StreamProcessingStopSelector" "Amazonka.Rekognition.Types.StreamProcessingStopSelector" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "StreamProcessingStopSelector'" 'PrefixI 'True) (S1 ('MetaSel ('Just "maxDurationInSeconds") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe Natural)))) |
newStreamProcessingStopSelector :: StreamProcessingStopSelector Source #
Create a value of StreamProcessingStopSelector
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:maxDurationInSeconds:StreamProcessingStopSelector'
, streamProcessingStopSelector_maxDurationInSeconds
- Specifies the maximum amount of time in seconds that you want the stream
to be processed. The largest amount of time is 2 minutes. The default is
10 seconds.
streamProcessingStopSelector_maxDurationInSeconds :: Lens' StreamProcessingStopSelector (Maybe Natural) Source #
Specifies the maximum amount of time in seconds that you want the stream to be processed. The largest amount of time is 2 minutes. The default is 10 seconds.
StreamProcessor
data StreamProcessor Source #
An object that recognizes faces or labels in a streaming video. An
Amazon Rekognition stream processor is created by a call to
CreateStreamProcessor. The request parameters for
CreateStreamProcessor
describe the Kinesis video stream source for the
streaming video, face recognition parameters, and where to stream the
analysis resullts.
See: newStreamProcessor
smart constructor.
Instances
newStreamProcessor :: StreamProcessor Source #
Create a value of StreamProcessor
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:name:StreamProcessor'
, streamProcessor_name
- Name of the Amazon Rekognition stream processor.
$sel:status:StreamProcessor'
, streamProcessor_status
- Current status of the Amazon Rekognition stream processor.
streamProcessor_name :: Lens' StreamProcessor (Maybe Text) Source #
Name of the Amazon Rekognition stream processor.
streamProcessor_status :: Lens' StreamProcessor (Maybe StreamProcessorStatus) Source #
Current status of the Amazon Rekognition stream processor.
StreamProcessorDataSharingPreference
data StreamProcessorDataSharingPreference Source #
Allows you to opt in or opt out to share data with Rekognition to improve model performance. You can choose this option at the account level or on a per-stream basis. Note that if you opt out at the account level this setting is ignored on individual streams.
See: newStreamProcessorDataSharingPreference
smart constructor.
Instances
FromJSON StreamProcessorDataSharingPreference Source # | |
ToJSON StreamProcessorDataSharingPreference Source # | |
Generic StreamProcessorDataSharingPreference Source # | |
Read StreamProcessorDataSharingPreference Source # | |
Show StreamProcessorDataSharingPreference Source # | |
NFData StreamProcessorDataSharingPreference Source # | |
Eq StreamProcessorDataSharingPreference Source # | |
Hashable StreamProcessorDataSharingPreference Source # | |
type Rep StreamProcessorDataSharingPreference Source # | |
Defined in Amazonka.Rekognition.Types.StreamProcessorDataSharingPreference type Rep StreamProcessorDataSharingPreference = D1 ('MetaData "StreamProcessorDataSharingPreference" "Amazonka.Rekognition.Types.StreamProcessorDataSharingPreference" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "StreamProcessorDataSharingPreference'" 'PrefixI 'True) (S1 ('MetaSel ('Just "optIn") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 Bool))) |
newStreamProcessorDataSharingPreference Source #
Create a value of StreamProcessorDataSharingPreference
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:optIn:StreamProcessorDataSharingPreference'
, streamProcessorDataSharingPreference_optIn
- If this option is set to true, you choose to share data with Rekognition
to improve model performance.
streamProcessorDataSharingPreference_optIn :: Lens' StreamProcessorDataSharingPreference Bool Source #
If this option is set to true, you choose to share data with Rekognition to improve model performance.
StreamProcessorInput
data StreamProcessorInput Source #
Information about the source streaming video.
See: newStreamProcessorInput
smart constructor.
StreamProcessorInput' | |
|
Instances
newStreamProcessorInput :: StreamProcessorInput Source #
Create a value of StreamProcessorInput
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:kinesisVideoStream:StreamProcessorInput'
, streamProcessorInput_kinesisVideoStream
- The Kinesis video stream input stream for the source streaming video.
streamProcessorInput_kinesisVideoStream :: Lens' StreamProcessorInput (Maybe KinesisVideoStream) Source #
The Kinesis video stream input stream for the source streaming video.
StreamProcessorNotificationChannel
data StreamProcessorNotificationChannel Source #
The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the object detection results and completion status of a video analysis operation.
Amazon Rekognition publishes a notification the first time an object of interest or a person is detected in the video stream. For example, if Amazon Rekognition detects a person at second 2, a pet at second 4, and a person again at second 5, Amazon Rekognition sends 2 object class detected notifications, one for a person at second 2 and one for a pet at second 4.
Amazon Rekognition also publishes an an end-of-session notification with a summary when the stream processing session is complete.
See: newStreamProcessorNotificationChannel
smart constructor.
StreamProcessorNotificationChannel' | |
|
Instances
newStreamProcessorNotificationChannel Source #
Create a value of StreamProcessorNotificationChannel
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:sNSTopicArn:StreamProcessorNotificationChannel'
, streamProcessorNotificationChannel_sNSTopicArn
- The Amazon Resource Number (ARN) of the Amazon Amazon Simple
Notification Service topic to which Amazon Rekognition posts the
completion status.
streamProcessorNotificationChannel_sNSTopicArn :: Lens' StreamProcessorNotificationChannel Text Source #
The Amazon Resource Number (ARN) of the Amazon Amazon Simple Notification Service topic to which Amazon Rekognition posts the completion status.
StreamProcessorOutput
data StreamProcessorOutput Source #
Information about the Amazon Kinesis Data Streams stream to which a Amazon Rekognition Video stream processor streams the results of a video analysis. For more information, see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
See: newStreamProcessorOutput
smart constructor.
StreamProcessorOutput' | |
|
Instances
newStreamProcessorOutput :: StreamProcessorOutput Source #
Create a value of StreamProcessorOutput
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:kinesisDataStream:StreamProcessorOutput'
, streamProcessorOutput_kinesisDataStream
- The Amazon Kinesis Data Streams stream to which the Amazon Rekognition
stream processor streams the analysis results.
$sel:s3Destination:StreamProcessorOutput'
, streamProcessorOutput_s3Destination
- The Amazon S3 bucket location to which Amazon Rekognition publishes the
detailed inference results of a video analysis operation.
streamProcessorOutput_kinesisDataStream :: Lens' StreamProcessorOutput (Maybe KinesisDataStream) Source #
The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results.
streamProcessorOutput_s3Destination :: Lens' StreamProcessorOutput (Maybe S3Destination) Source #
The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation.
StreamProcessorSettings
data StreamProcessorSettings Source #
Input parameters used in a streaming video analyzed by a Amazon
Rekognition stream processor. You can use FaceSearch
to recognize
faces in a streaming video, or you can use ConnectedHome
to detect
labels.
See: newStreamProcessorSettings
smart constructor.
StreamProcessorSettings' | |
|
Instances
newStreamProcessorSettings :: StreamProcessorSettings Source #
Create a value of StreamProcessorSettings
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:connectedHome:StreamProcessorSettings'
, streamProcessorSettings_connectedHome
- Undocumented member.
$sel:faceSearch:StreamProcessorSettings'
, streamProcessorSettings_faceSearch
- Face search settings to use on a streaming video.
streamProcessorSettings_connectedHome :: Lens' StreamProcessorSettings (Maybe ConnectedHomeSettings) Source #
Undocumented member.
streamProcessorSettings_faceSearch :: Lens' StreamProcessorSettings (Maybe FaceSearchSettings) Source #
Face search settings to use on a streaming video.
StreamProcessorSettingsForUpdate
data StreamProcessorSettingsForUpdate Source #
The stream processor settings that you want to update. ConnectedHome
settings can be updated to detect different labels with a different
minimum confidence.
See: newStreamProcessorSettingsForUpdate
smart constructor.
StreamProcessorSettingsForUpdate' | |
|
Instances
ToJSON StreamProcessorSettingsForUpdate Source # | |
Generic StreamProcessorSettingsForUpdate Source # | |
Read StreamProcessorSettingsForUpdate Source # | |
Show StreamProcessorSettingsForUpdate Source # | |
NFData StreamProcessorSettingsForUpdate Source # | |
Eq StreamProcessorSettingsForUpdate Source # | |
Hashable StreamProcessorSettingsForUpdate Source # | |
type Rep StreamProcessorSettingsForUpdate Source # | |
Defined in Amazonka.Rekognition.Types.StreamProcessorSettingsForUpdate type Rep StreamProcessorSettingsForUpdate = D1 ('MetaData "StreamProcessorSettingsForUpdate" "Amazonka.Rekognition.Types.StreamProcessorSettingsForUpdate" "amazonka-rekognition-2.0-EaCrS9R3rWADqefEZvOx5B" 'False) (C1 ('MetaCons "StreamProcessorSettingsForUpdate'" 'PrefixI 'True) (S1 ('MetaSel ('Just "connectedHomeForUpdate") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedStrict) (Rec0 (Maybe ConnectedHomeSettingsForUpdate)))) |
newStreamProcessorSettingsForUpdate :: StreamProcessorSettingsForUpdate Source #
Create a value of StreamProcessorSettingsForUpdate
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:connectedHomeForUpdate:StreamProcessorSettingsForUpdate'
, streamProcessorSettingsForUpdate_connectedHomeForUpdate
- The label detection settings you want to use for your stream processor.
streamProcessorSettingsForUpdate_connectedHomeForUpdate :: Lens' StreamProcessorSettingsForUpdate (Maybe ConnectedHomeSettingsForUpdate) Source #
The label detection settings you want to use for your stream processor.
Summary
The S3 bucket that contains the training summary. The training summary includes aggregated evaluation metrics for the entire testing dataset and metrics for each individual label.
You get the training summary S3 bucket location by calling DescribeProjectVersions.
See: newSummary
smart constructor.
Instances
FromJSON Summary Source # | |
Generic Summary Source # | |
Read Summary Source # | |
Show Summary Source # | |
NFData Summary Source # | |
Defined in Amazonka.Rekognition.Types.Summary | |
Eq Summary Source # | |
Hashable Summary Source # | |
Defined in Amazonka.Rekognition.Types.Summary | |
type Rep Summary Source # | |
Defined in Amazonka.Rekognition.Types.Summary |
newSummary :: Summary Source #
Create a value of Summary
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:s3Object:Summary'
, summary_s3Object
- Undocumented member.
Sunglasses
data Sunglasses Source #
Indicates whether or not the face is wearing sunglasses, and the confidence level in the determination.
See: newSunglasses
smart constructor.
Instances
newSunglasses :: Sunglasses Source #
Create a value of Sunglasses
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:Sunglasses'
, sunglasses_confidence
- Level of confidence in the determination.
$sel:value:Sunglasses'
, sunglasses_value
- Boolean value that indicates whether the face is wearing sunglasses or
not.
sunglasses_confidence :: Lens' Sunglasses (Maybe Double) Source #
Level of confidence in the determination.
sunglasses_value :: Lens' Sunglasses (Maybe Bool) Source #
Boolean value that indicates whether the face is wearing sunglasses or not.
TechnicalCueSegment
data TechnicalCueSegment Source #
Information about a technical cue segment. For more information, see SegmentDetection.
See: newTechnicalCueSegment
smart constructor.
TechnicalCueSegment' | |
|
Instances
newTechnicalCueSegment :: TechnicalCueSegment Source #
Create a value of TechnicalCueSegment
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:TechnicalCueSegment'
, technicalCueSegment_confidence
- The confidence that Amazon Rekognition Video has in the accuracy of the
detected segment.
$sel:type':TechnicalCueSegment'
, technicalCueSegment_type
- The type of the technical cue.
technicalCueSegment_confidence :: Lens' TechnicalCueSegment (Maybe Double) Source #
The confidence that Amazon Rekognition Video has in the accuracy of the detected segment.
technicalCueSegment_type :: Lens' TechnicalCueSegment (Maybe TechnicalCueType) Source #
The type of the technical cue.
TestingData
data TestingData Source #
The dataset used for testing. Optionally, if AutoCreate
is set, Amazon
Rekognition Custom Labels uses the training dataset to create a test
dataset with a temporary split of the training dataset.
See: newTestingData
smart constructor.
TestingData' | |
|
Instances
newTestingData :: TestingData Source #
Create a value of TestingData
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:assets:TestingData'
, testingData_assets
- The assets used for testing.
$sel:autoCreate:TestingData'
, testingData_autoCreate
- If specified, Amazon Rekognition Custom Labels temporarily splits the
training dataset (80%) to create a test dataset (20%) for the training
job. After training completes, the test dataset is not stored and the
training dataset reverts to its previous size.
testingData_assets :: Lens' TestingData (Maybe [Asset]) Source #
The assets used for testing.
testingData_autoCreate :: Lens' TestingData (Maybe Bool) Source #
If specified, Amazon Rekognition Custom Labels temporarily splits the training dataset (80%) to create a test dataset (20%) for the training job. After training completes, the test dataset is not stored and the training dataset reverts to its previous size.
TestingDataResult
data TestingDataResult Source #
Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.
See: newTestingDataResult
smart constructor.
TestingDataResult' | |
|
Instances
newTestingDataResult :: TestingDataResult Source #
Create a value of TestingDataResult
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:input:TestingDataResult'
, testingDataResult_input
- The testing dataset that was supplied for training.
$sel:output:TestingDataResult'
, testingDataResult_output
- The subset of the dataset that was actually tested. Some images (assets)
might not be tested due to file formatting and other issues.
$sel:validation:TestingDataResult'
, testingDataResult_validation
- The location of the data validation manifest. The data validation
manifest is created for the test dataset during model training.
testingDataResult_input :: Lens' TestingDataResult (Maybe TestingData) Source #
The testing dataset that was supplied for training.
testingDataResult_output :: Lens' TestingDataResult (Maybe TestingData) Source #
The subset of the dataset that was actually tested. Some images (assets) might not be tested due to file formatting and other issues.
testingDataResult_validation :: Lens' TestingDataResult (Maybe ValidationData) Source #
The location of the data validation manifest. The data validation manifest is created for the test dataset during model training.
TextDetection
data TextDetection Source #
Information about a word or line of text detected by DetectText.
The DetectedText
field contains the text that Amazon Rekognition
detected in the image.
Every word and line has an identifier (Id
). Each word belongs to a
line and has a parent identifier (ParentId
) that identifies the line
of text in which the word appears. The word Id
is also an index for
the word within a line of words.
For more information, see Detecting text in the Amazon Rekognition Developer Guide.
See: newTextDetection
smart constructor.
TextDetection' | |
|
Instances
newTextDetection :: TextDetection Source #
Create a value of TextDetection
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:confidence:TextDetection'
, textDetection_confidence
- The confidence that Amazon Rekognition has in the accuracy of the
detected text and the accuracy of the geometry points around the
detected text.
$sel:detectedText:TextDetection'
, textDetection_detectedText
- The word or line of text recognized by Amazon Rekognition.
$sel:geometry:TextDetection'
, textDetection_geometry
- The location of the detected text on the image. Includes an axis aligned
coarse bounding box surrounding the text and a finer grain polygon for
more accurate spatial information.
$sel:id:TextDetection'
, textDetection_id
- The identifier for the detected text. The identifier is only unique for
a single call to DetectText
.
$sel:parentId:TextDetection'
, textDetection_parentId
- The Parent identifier for the detected text identified by the value of
ID
. If the type of detected text is LINE
, the value of ParentId
is
Null
.
$sel:type':TextDetection'
, textDetection_type
- The type of text that was detected.
textDetection_confidence :: Lens' TextDetection (Maybe Double) Source #
The confidence that Amazon Rekognition has in the accuracy of the detected text and the accuracy of the geometry points around the detected text.
textDetection_detectedText :: Lens' TextDetection (Maybe Text) Source #
The word or line of text recognized by Amazon Rekognition.
textDetection_geometry :: Lens' TextDetection (Maybe Geometry) Source #
The location of the detected text on the image. Includes an axis aligned coarse bounding box surrounding the text and a finer grain polygon for more accurate spatial information.
textDetection_id :: Lens' TextDetection (Maybe Natural) Source #
The identifier for the detected text. The identifier is only unique for
a single call to DetectText
.
textDetection_parentId :: Lens' TextDetection (Maybe Natural) Source #
The Parent identifier for the detected text identified by the value of
ID
. If the type of detected text is LINE
, the value of ParentId
is
Null
.
textDetection_type :: Lens' TextDetection (Maybe TextTypes) Source #
The type of text that was detected.
TextDetectionResult
data TextDetectionResult Source #
Information about text detected in a video. Incudes the detected text, the time in milliseconds from the start of the video that the text was detected, and where it was detected on the screen.
See: newTextDetectionResult
smart constructor.
TextDetectionResult' | |
|
Instances
newTextDetectionResult :: TextDetectionResult Source #
Create a value of TextDetectionResult
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:textDetection:TextDetectionResult'
, textDetectionResult_textDetection
- Details about text detected in a video.
$sel:timestamp:TextDetectionResult'
, textDetectionResult_timestamp
- The time, in milliseconds from the start of the video, that the text was
detected. Note that Timestamp
is not guaranteed to be accurate to the
individual frame where the text first appears.
textDetectionResult_textDetection :: Lens' TextDetectionResult (Maybe TextDetection) Source #
Details about text detected in a video.
textDetectionResult_timestamp :: Lens' TextDetectionResult (Maybe Integer) Source #
The time, in milliseconds from the start of the video, that the text was
detected. Note that Timestamp
is not guaranteed to be accurate to the
individual frame where the text first appears.
TrainingData
data TrainingData Source #
The dataset used for training.
See: newTrainingData
smart constructor.
Instances
newTrainingData :: TrainingData Source #
Create a value of TrainingData
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:assets:TrainingData'
, trainingData_assets
- A Sagemaker GroundTruth manifest file that contains the training images
(assets).
trainingData_assets :: Lens' TrainingData (Maybe [Asset]) Source #
A Sagemaker GroundTruth manifest file that contains the training images (assets).
TrainingDataResult
data TrainingDataResult Source #
Sagemaker Groundtruth format manifest files for the input, output and validation datasets that are used and created during testing.
See: newTrainingDataResult
smart constructor.
TrainingDataResult' | |
|
Instances
newTrainingDataResult :: TrainingDataResult Source #
Create a value of TrainingDataResult
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:input:TrainingDataResult'
, trainingDataResult_input
- The training assets that you supplied for training.
$sel:output:TrainingDataResult'
, trainingDataResult_output
- The images (assets) that were actually trained by Amazon Rekognition
Custom Labels.
$sel:validation:TrainingDataResult'
, trainingDataResult_validation
- The location of the data validation manifest. The data validation
manifest is created for the training dataset during model training.
trainingDataResult_input :: Lens' TrainingDataResult (Maybe TrainingData) Source #
The training assets that you supplied for training.
trainingDataResult_output :: Lens' TrainingDataResult (Maybe TrainingData) Source #
The images (assets) that were actually trained by Amazon Rekognition Custom Labels.
trainingDataResult_validation :: Lens' TrainingDataResult (Maybe ValidationData) Source #
The location of the data validation manifest. The data validation manifest is created for the training dataset during model training.
UnindexedFace
data UnindexedFace Source #
A face that IndexFaces detected, but didn't index. Use the Reasons
response attribute to determine why a face wasn't indexed.
See: newUnindexedFace
smart constructor.
UnindexedFace' | |
|
Instances
newUnindexedFace :: UnindexedFace Source #
Create a value of UnindexedFace
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:faceDetail:UnindexedFace'
, unindexedFace_faceDetail
- The structure that contains attributes of a face that
IndexFaces
detected, but didn't index.
$sel:reasons:UnindexedFace'
, unindexedFace_reasons
- An array of reasons that specify why a face wasn't indexed.
- EXTREME_POSE - The face is at a pose that can't be detected. For example, the head is turned too far away from the camera.
- EXCEEDS_MAX_FACES - The number of faces detected is already higher
than that specified by the
MaxFaces
input parameter forIndexFaces
. - LOW_BRIGHTNESS - The image is too dark.
- LOW_SHARPNESS - The image is too blurry.
- LOW_CONFIDENCE - The face was detected with a low confidence.
- SMALL_BOUNDING_BOX - The bounding box around the face is too small.
unindexedFace_faceDetail :: Lens' UnindexedFace (Maybe FaceDetail) Source #
The structure that contains attributes of a face that
IndexFaces
detected, but didn't index.
unindexedFace_reasons :: Lens' UnindexedFace (Maybe [Reason]) Source #
An array of reasons that specify why a face wasn't indexed.
- EXTREME_POSE - The face is at a pose that can't be detected. For example, the head is turned too far away from the camera.
- EXCEEDS_MAX_FACES - The number of faces detected is already higher
than that specified by the
MaxFaces
input parameter forIndexFaces
. - LOW_BRIGHTNESS - The image is too dark.
- LOW_SHARPNESS - The image is too blurry.
- LOW_CONFIDENCE - The face was detected with a low confidence.
- SMALL_BOUNDING_BOX - The bounding box around the face is too small.
ValidationData
data ValidationData Source #
Contains the Amazon S3 bucket location of the validation data for a model training job.
The validation data includes error information for individual JSON Lines in the dataset. For more information, see /Debugging a Failed Model Training/ in the Amazon Rekognition Custom Labels Developer Guide.
You get the ValidationData
object for the training dataset
(TrainingDataResult) and the test dataset (TestingDataResult) by calling
DescribeProjectVersions.
The assets array contains a single Asset object. The GroundTruthManifest field of the Asset object contains the S3 bucket location of the validation data.
See: newValidationData
smart constructor.
Instances
newValidationData :: ValidationData Source #
Create a value of ValidationData
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:assets:ValidationData'
, validationData_assets
- The assets that comprise the validation data.
validationData_assets :: Lens' ValidationData (Maybe [Asset]) Source #
The assets that comprise the validation data.
Video
Video file stored in an Amazon S3 bucket. Amazon Rekognition video start
operations such as StartLabelDetection use Video
to specify a video
for analysis. The supported file formats are .mp4, .mov and .avi.
See: newVideo
smart constructor.
Instances
ToJSON Video Source # | |
Defined in Amazonka.Rekognition.Types.Video | |
Generic Video Source # | |
Read Video Source # | |
Show Video Source # | |
NFData Video Source # | |
Defined in Amazonka.Rekognition.Types.Video | |
Eq Video Source # | |
Hashable Video Source # | |
Defined in Amazonka.Rekognition.Types.Video | |
type Rep Video Source # | |
Defined in Amazonka.Rekognition.Types.Video |
Create a value of Video
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:s3Object:Video'
, video_s3Object
- The Amazon S3 bucket name and file name for the video.
video_s3Object :: Lens' Video (Maybe S3Object) Source #
The Amazon S3 bucket name and file name for the video.
VideoMetadata
data VideoMetadata Source #
Information about a video that Amazon Rekognition analyzed.
Videometadata
is returned in every page of paginated responses from a
Amazon Rekognition video operation.
See: newVideoMetadata
smart constructor.
VideoMetadata' | |
|
Instances
newVideoMetadata :: VideoMetadata Source #
Create a value of VideoMetadata
with all optional fields omitted.
Use generic-lens or optics to modify other optional fields.
The following record fields are available, with the corresponding lenses provided for backwards compatibility:
$sel:codec:VideoMetadata'
, videoMetadata_codec
- Type of compression used in the analyzed video.
$sel:colorRange:VideoMetadata'
, videoMetadata_colorRange
- A description of the range of luminance values in a video, either
LIMITED (16 to 235) or FULL (0 to 255).
$sel:durationMillis:VideoMetadata'
, videoMetadata_durationMillis
- Length of the video in milliseconds.
$sel:format:VideoMetadata'
, videoMetadata_format
- Format of the analyzed video. Possible values are MP4, MOV and AVI.
$sel:frameHeight:VideoMetadata'
, videoMetadata_frameHeight
- Vertical pixel dimension of the video.
$sel:frameRate:VideoMetadata'
, videoMetadata_frameRate
- Number of frames per second in the video.
$sel:frameWidth:VideoMetadata'
, videoMetadata_frameWidth
- Horizontal pixel dimension of the video.
videoMetadata_codec :: Lens' VideoMetadata (Maybe Text) Source #
Type of compression used in the analyzed video.
videoMetadata_colorRange :: Lens' VideoMetadata (Maybe VideoColorRange) Source #
A description of the range of luminance values in a video, either LIMITED (16 to 235) or FULL (0 to 255).
videoMetadata_durationMillis :: Lens' VideoMetadata (Maybe Natural) Source #
Length of the video in milliseconds.
videoMetadata_format :: Lens' VideoMetadata (Maybe Text) Source #
Format of the analyzed video. Possible values are MP4, MOV and AVI.
videoMetadata_frameHeight :: Lens' VideoMetadata (Maybe Natural) Source #
Vertical pixel dimension of the video.
videoMetadata_frameRate :: Lens' VideoMetadata (Maybe Double) Source #
Number of frames per second in the video.
videoMetadata_frameWidth :: Lens' VideoMetadata (Maybe Natural) Source #
Horizontal pixel dimension of the video.