{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StrictData #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}

-- Derived from AWS service descriptions, licensed under Apache 2.0.

-- |
-- Module      : Amazonka.SageMaker.Types.InputConfig
-- Copyright   : (c) 2013-2023 Brendan Hay
-- License     : Mozilla Public License, v. 2.0.
-- Maintainer  : Brendan Hay
-- Stability   : auto-generated
-- Portability : non-portable (GHC extensions)
module Amazonka.SageMaker.Types.InputConfig where

import qualified Amazonka.Core as Core
import qualified Amazonka.Core.Lens.Internal as Lens
import qualified Amazonka.Data as Data
import qualified Amazonka.Prelude as Prelude
import Amazonka.SageMaker.Types.Framework

-- | Contains information about the location of input model artifacts, the
-- name and shape of the expected data inputs, and the framework in which
-- the model was trained.
--
-- /See:/ 'newInputConfig' smart constructor.
data InputConfig = InputConfig'
  { -- | Specifies the framework version to use. This API field is only supported
    -- for the MXNet, PyTorch, TensorFlow and TensorFlow Lite frameworks.
    --
    -- For information about framework versions supported for cloud targets and
    -- edge devices, see
    -- <https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html Cloud Supported Instance Types and Frameworks>
    -- and
    -- <https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge-frameworks.html Edge Supported Frameworks>.
    InputConfig -> Maybe Text
frameworkVersion :: Prelude.Maybe Prelude.Text,
    -- | The S3 path where the model artifacts, which result from model training,
    -- are stored. This path must point to a single gzip compressed tar archive
    -- (.tar.gz suffix).
    InputConfig -> Text
s3Uri :: Prelude.Text,
    -- | Specifies the name and shape of the expected data inputs for your
    -- trained model with a JSON dictionary form. The data inputs are
    -- InputConfig$Framework specific.
    --
    -- -   @TensorFlow@: You must specify the name and shape (NHWC format) of
    --     the expected data inputs using a dictionary format for your trained
    --     model. The dictionary formats required for the console and CLI are
    --     different.
    --
    --     -   Examples for one input:
    --
    --         -   If using the console, @{\"input\":[1,1024,1024,3]}@
    --
    --         -   If using the CLI, @{\\\"input\\\":[1,1024,1024,3]}@
    --
    --     -   Examples for two inputs:
    --
    --         -   If using the console,
    --             @{\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}@
    --
    --         -   If using the CLI,
    --             @{\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}@
    --
    -- -   @KERAS@: You must specify the name and shape (NCHW format) of
    --     expected data inputs using a dictionary format for your trained
    --     model. Note that while Keras model artifacts should be uploaded in
    --     NHWC (channel-last) format, @DataInputConfig@ should be specified in
    --     NCHW (channel-first) format. The dictionary formats required for the
    --     console and CLI are different.
    --
    --     -   Examples for one input:
    --
    --         -   If using the console, @{\"input_1\":[1,3,224,224]}@
    --
    --         -   If using the CLI, @{\\\"input_1\\\":[1,3,224,224]}@
    --
    --     -   Examples for two inputs:
    --
    --         -   If using the console,
    --             @{\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} @
    --
    --         -   If using the CLI,
    --             @{\\\"input_1\\\": [1,3,224,224], \\\"input_2\\\":[1,3,224,224]}@
    --
    -- -   @MXNET\/ONNX\/DARKNET@: You must specify the name and shape (NCHW
    --     format) of the expected data inputs in order using a dictionary
    --     format for your trained model. The dictionary formats required for
    --     the console and CLI are different.
    --
    --     -   Examples for one input:
    --
    --         -   If using the console, @{\"data\":[1,3,1024,1024]}@
    --
    --         -   If using the CLI, @{\\\"data\\\":[1,3,1024,1024]}@
    --
    --     -   Examples for two inputs:
    --
    --         -   If using the console,
    --             @{\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} @
    --
    --         -   If using the CLI,
    --             @{\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}@
    --
    -- -   @PyTorch@: You can either specify the name and shape (NCHW format)
    --     of expected data inputs in order using a dictionary format for your
    --     trained model or you can specify the shape only using a list format.
    --     The dictionary formats required for the console and CLI are
    --     different. The list formats for the console and CLI are the same.
    --
    --     -   Examples for one input in dictionary format:
    --
    --         -   If using the console, @{\"input0\":[1,3,224,224]}@
    --
    --         -   If using the CLI, @{\\\"input0\\\":[1,3,224,224]}@
    --
    --     -   Example for one input in list format: @[[1,3,224,224]]@
    --
    --     -   Examples for two inputs in dictionary format:
    --
    --         -   If using the console,
    --             @{\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}@
    --
    --         -   If using the CLI,
    --             @{\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]} @
    --
    --     -   Example for two inputs in list format:
    --         @[[1,3,224,224], [1,3,224,224]]@
    --
    -- -   @XGBOOST@: input data name and shape are not needed.
    --
    -- @DataInputConfig@ supports the following parameters for @CoreML@
    -- OutputConfig$TargetDevice (ML Model format):
    --
    -- -   @shape@: Input shape, for example
    --     @{\"input_1\": {\"shape\": [1,224,224,3]}}@. In addition to static
    --     input shapes, CoreML converter supports Flexible input shapes:
    --
    --     -   Range Dimension. You can use the Range Dimension feature if you
    --         know the input shape will be within some specific interval in
    --         that dimension, for example:
    --         @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}@
    --
    --     -   Enumerated shapes. Sometimes, the models are trained to work
    --         only on a select set of inputs. You can enumerate all supported
    --         input shapes, for example:
    --         @{\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}@
    --
    -- -   @default_shape@: Default input shape. You can set a default shape
    --     during conversion for both Range Dimension and Enumerated Shapes.
    --     For example
    --     @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}@
    --
    -- -   @type@: Input type. Allowed values: @Image@ and @Tensor@. By
    --     default, the converter generates an ML Model with inputs of type
    --     Tensor (MultiArray). User can set input type to be Image. Image
    --     input type requires additional input parameters such as @bias@ and
    --     @scale@.
    --
    -- -   @bias@: If the input type is an Image, you need to provide the bias
    --     vector.
    --
    -- -   @scale@: If the input type is an Image, you need to provide a scale
    --     factor.
    --
    -- CoreML @ClassifierConfig@ parameters can be specified using
    -- OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and
    -- PyTorch models. CoreML conversion examples:
    --
    -- -   Tensor type input:
    --
    --     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3]}}@
    --
    -- -   Tensor type input without input name (PyTorch):
    --
    --     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224]}]@
    --
    -- -   Image type input:
    --
    --     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}@
    --
    --     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
    --
    -- -   Image type input without input name (PyTorch):
    --
    --     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]@
    --
    --     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
    --
    -- Depending on the model format, @DataInputConfig@ requires the following
    -- parameters for @ml_eia2@
    -- <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice OutputConfig:TargetDevice>.
    --
    -- -   For TensorFlow models saved in the SavedModel format, specify the
    --     input names from @signature_def_key@ and the input model shapes for
    --     @DataInputConfig@. Specify the @signature_def_key@ in
    --     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
    --     if the model does not use TensorFlow\'s default signature def key.
    --     For example:
    --
    --     -   @\"DataInputConfig\": {\"inputs\": [1, 224, 224, 3]}@
    --
    --     -   @\"CompilerOptions\": {\"signature_def_key\": \"serving_custom\"}@
    --
    -- -   For TensorFlow models saved as a frozen graph, specify the input
    --     tensor names and shapes in @DataInputConfig@ and the output tensor
    --     names for @output_names@ in
    --     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
    --     . For example:
    --
    --     -   @\"DataInputConfig\": {\"input_tensor:0\": [1, 224, 224, 3]}@
    --
    --     -   @\"CompilerOptions\": {\"output_names\": [\"output_tensor:0\"]}@
    InputConfig -> Text
dataInputConfig :: Prelude.Text,
    -- | Identifies the framework in which the model was trained. For example:
    -- TENSORFLOW.
    InputConfig -> Framework
framework :: Framework
  }
  deriving (InputConfig -> InputConfig -> Bool
forall a. (a -> a -> Bool) -> (a -> a -> Bool) -> Eq a
/= :: InputConfig -> InputConfig -> Bool
$c/= :: InputConfig -> InputConfig -> Bool
== :: InputConfig -> InputConfig -> Bool
$c== :: InputConfig -> InputConfig -> Bool
Prelude.Eq, ReadPrec [InputConfig]
ReadPrec InputConfig
Int -> ReadS InputConfig
ReadS [InputConfig]
forall a.
(Int -> ReadS a)
-> ReadS [a] -> ReadPrec a -> ReadPrec [a] -> Read a
readListPrec :: ReadPrec [InputConfig]
$creadListPrec :: ReadPrec [InputConfig]
readPrec :: ReadPrec InputConfig
$creadPrec :: ReadPrec InputConfig
readList :: ReadS [InputConfig]
$creadList :: ReadS [InputConfig]
readsPrec :: Int -> ReadS InputConfig
$creadsPrec :: Int -> ReadS InputConfig
Prelude.Read, Int -> InputConfig -> ShowS
[InputConfig] -> ShowS
InputConfig -> String
forall a.
(Int -> a -> ShowS) -> (a -> String) -> ([a] -> ShowS) -> Show a
showList :: [InputConfig] -> ShowS
$cshowList :: [InputConfig] -> ShowS
show :: InputConfig -> String
$cshow :: InputConfig -> String
showsPrec :: Int -> InputConfig -> ShowS
$cshowsPrec :: Int -> InputConfig -> ShowS
Prelude.Show, forall x. Rep InputConfig x -> InputConfig
forall x. InputConfig -> Rep InputConfig x
forall a.
(forall x. a -> Rep a x) -> (forall x. Rep a x -> a) -> Generic a
$cto :: forall x. Rep InputConfig x -> InputConfig
$cfrom :: forall x. InputConfig -> Rep InputConfig x
Prelude.Generic)

-- |
-- Create a value of 'InputConfig' with all optional fields omitted.
--
-- Use <https://hackage.haskell.org/package/generic-lens generic-lens> or <https://hackage.haskell.org/package/optics optics> to modify other optional fields.
--
-- The following record fields are available, with the corresponding lenses provided
-- for backwards compatibility:
--
-- 'frameworkVersion', 'inputConfig_frameworkVersion' - Specifies the framework version to use. This API field is only supported
-- for the MXNet, PyTorch, TensorFlow and TensorFlow Lite frameworks.
--
-- For information about framework versions supported for cloud targets and
-- edge devices, see
-- <https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html Cloud Supported Instance Types and Frameworks>
-- and
-- <https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge-frameworks.html Edge Supported Frameworks>.
--
-- 's3Uri', 'inputConfig_s3Uri' - The S3 path where the model artifacts, which result from model training,
-- are stored. This path must point to a single gzip compressed tar archive
-- (.tar.gz suffix).
--
-- 'dataInputConfig', 'inputConfig_dataInputConfig' - Specifies the name and shape of the expected data inputs for your
-- trained model with a JSON dictionary form. The data inputs are
-- InputConfig$Framework specific.
--
-- -   @TensorFlow@: You must specify the name and shape (NHWC format) of
--     the expected data inputs using a dictionary format for your trained
--     model. The dictionary formats required for the console and CLI are
--     different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"input\":[1,1024,1024,3]}@
--
--         -   If using the CLI, @{\\\"input\\\":[1,1024,1024,3]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}@
--
--         -   If using the CLI,
--             @{\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}@
--
-- -   @KERAS@: You must specify the name and shape (NCHW format) of
--     expected data inputs using a dictionary format for your trained
--     model. Note that while Keras model artifacts should be uploaded in
--     NHWC (channel-last) format, @DataInputConfig@ should be specified in
--     NCHW (channel-first) format. The dictionary formats required for the
--     console and CLI are different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"input_1\":[1,3,224,224]}@
--
--         -   If using the CLI, @{\\\"input_1\\\":[1,3,224,224]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} @
--
--         -   If using the CLI,
--             @{\\\"input_1\\\": [1,3,224,224], \\\"input_2\\\":[1,3,224,224]}@
--
-- -   @MXNET\/ONNX\/DARKNET@: You must specify the name and shape (NCHW
--     format) of the expected data inputs in order using a dictionary
--     format for your trained model. The dictionary formats required for
--     the console and CLI are different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"data\":[1,3,1024,1024]}@
--
--         -   If using the CLI, @{\\\"data\\\":[1,3,1024,1024]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} @
--
--         -   If using the CLI,
--             @{\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}@
--
-- -   @PyTorch@: You can either specify the name and shape (NCHW format)
--     of expected data inputs in order using a dictionary format for your
--     trained model or you can specify the shape only using a list format.
--     The dictionary formats required for the console and CLI are
--     different. The list formats for the console and CLI are the same.
--
--     -   Examples for one input in dictionary format:
--
--         -   If using the console, @{\"input0\":[1,3,224,224]}@
--
--         -   If using the CLI, @{\\\"input0\\\":[1,3,224,224]}@
--
--     -   Example for one input in list format: @[[1,3,224,224]]@
--
--     -   Examples for two inputs in dictionary format:
--
--         -   If using the console,
--             @{\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}@
--
--         -   If using the CLI,
--             @{\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]} @
--
--     -   Example for two inputs in list format:
--         @[[1,3,224,224], [1,3,224,224]]@
--
-- -   @XGBOOST@: input data name and shape are not needed.
--
-- @DataInputConfig@ supports the following parameters for @CoreML@
-- OutputConfig$TargetDevice (ML Model format):
--
-- -   @shape@: Input shape, for example
--     @{\"input_1\": {\"shape\": [1,224,224,3]}}@. In addition to static
--     input shapes, CoreML converter supports Flexible input shapes:
--
--     -   Range Dimension. You can use the Range Dimension feature if you
--         know the input shape will be within some specific interval in
--         that dimension, for example:
--         @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}@
--
--     -   Enumerated shapes. Sometimes, the models are trained to work
--         only on a select set of inputs. You can enumerate all supported
--         input shapes, for example:
--         @{\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}@
--
-- -   @default_shape@: Default input shape. You can set a default shape
--     during conversion for both Range Dimension and Enumerated Shapes.
--     For example
--     @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}@
--
-- -   @type@: Input type. Allowed values: @Image@ and @Tensor@. By
--     default, the converter generates an ML Model with inputs of type
--     Tensor (MultiArray). User can set input type to be Image. Image
--     input type requires additional input parameters such as @bias@ and
--     @scale@.
--
-- -   @bias@: If the input type is an Image, you need to provide the bias
--     vector.
--
-- -   @scale@: If the input type is an Image, you need to provide a scale
--     factor.
--
-- CoreML @ClassifierConfig@ parameters can be specified using
-- OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and
-- PyTorch models. CoreML conversion examples:
--
-- -   Tensor type input:
--
--     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3]}}@
--
-- -   Tensor type input without input name (PyTorch):
--
--     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224]}]@
--
-- -   Image type input:
--
--     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}@
--
--     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
--
-- -   Image type input without input name (PyTorch):
--
--     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]@
--
--     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
--
-- Depending on the model format, @DataInputConfig@ requires the following
-- parameters for @ml_eia2@
-- <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice OutputConfig:TargetDevice>.
--
-- -   For TensorFlow models saved in the SavedModel format, specify the
--     input names from @signature_def_key@ and the input model shapes for
--     @DataInputConfig@. Specify the @signature_def_key@ in
--     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
--     if the model does not use TensorFlow\'s default signature def key.
--     For example:
--
--     -   @\"DataInputConfig\": {\"inputs\": [1, 224, 224, 3]}@
--
--     -   @\"CompilerOptions\": {\"signature_def_key\": \"serving_custom\"}@
--
-- -   For TensorFlow models saved as a frozen graph, specify the input
--     tensor names and shapes in @DataInputConfig@ and the output tensor
--     names for @output_names@ in
--     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
--     . For example:
--
--     -   @\"DataInputConfig\": {\"input_tensor:0\": [1, 224, 224, 3]}@
--
--     -   @\"CompilerOptions\": {\"output_names\": [\"output_tensor:0\"]}@
--
-- 'framework', 'inputConfig_framework' - Identifies the framework in which the model was trained. For example:
-- TENSORFLOW.
newInputConfig ::
  -- | 's3Uri'
  Prelude.Text ->
  -- | 'dataInputConfig'
  Prelude.Text ->
  -- | 'framework'
  Framework ->
  InputConfig
newInputConfig :: Text -> Text -> Framework -> InputConfig
newInputConfig Text
pS3Uri_ Text
pDataInputConfig_ Framework
pFramework_ =
  InputConfig'
    { $sel:frameworkVersion:InputConfig' :: Maybe Text
frameworkVersion = forall a. Maybe a
Prelude.Nothing,
      $sel:s3Uri:InputConfig' :: Text
s3Uri = Text
pS3Uri_,
      $sel:dataInputConfig:InputConfig' :: Text
dataInputConfig = Text
pDataInputConfig_,
      $sel:framework:InputConfig' :: Framework
framework = Framework
pFramework_
    }

-- | Specifies the framework version to use. This API field is only supported
-- for the MXNet, PyTorch, TensorFlow and TensorFlow Lite frameworks.
--
-- For information about framework versions supported for cloud targets and
-- edge devices, see
-- <https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-cloud.html Cloud Supported Instance Types and Frameworks>
-- and
-- <https://docs.aws.amazon.com/sagemaker/latest/dg/neo-supported-devices-edge-frameworks.html Edge Supported Frameworks>.
inputConfig_frameworkVersion :: Lens.Lens' InputConfig (Prelude.Maybe Prelude.Text)
inputConfig_frameworkVersion :: Lens' InputConfig (Maybe Text)
inputConfig_frameworkVersion = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\InputConfig' {Maybe Text
frameworkVersion :: Maybe Text
$sel:frameworkVersion:InputConfig' :: InputConfig -> Maybe Text
frameworkVersion} -> Maybe Text
frameworkVersion) (\s :: InputConfig
s@InputConfig' {} Maybe Text
a -> InputConfig
s {$sel:frameworkVersion:InputConfig' :: Maybe Text
frameworkVersion = Maybe Text
a} :: InputConfig)

-- | The S3 path where the model artifacts, which result from model training,
-- are stored. This path must point to a single gzip compressed tar archive
-- (.tar.gz suffix).
inputConfig_s3Uri :: Lens.Lens' InputConfig Prelude.Text
inputConfig_s3Uri :: Lens' InputConfig Text
inputConfig_s3Uri = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\InputConfig' {Text
s3Uri :: Text
$sel:s3Uri:InputConfig' :: InputConfig -> Text
s3Uri} -> Text
s3Uri) (\s :: InputConfig
s@InputConfig' {} Text
a -> InputConfig
s {$sel:s3Uri:InputConfig' :: Text
s3Uri = Text
a} :: InputConfig)

-- | Specifies the name and shape of the expected data inputs for your
-- trained model with a JSON dictionary form. The data inputs are
-- InputConfig$Framework specific.
--
-- -   @TensorFlow@: You must specify the name and shape (NHWC format) of
--     the expected data inputs using a dictionary format for your trained
--     model. The dictionary formats required for the console and CLI are
--     different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"input\":[1,1024,1024,3]}@
--
--         -   If using the CLI, @{\\\"input\\\":[1,1024,1024,3]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"data1\": [1,28,28,1], \"data2\":[1,28,28,1]}@
--
--         -   If using the CLI,
--             @{\\\"data1\\\": [1,28,28,1], \\\"data2\\\":[1,28,28,1]}@
--
-- -   @KERAS@: You must specify the name and shape (NCHW format) of
--     expected data inputs using a dictionary format for your trained
--     model. Note that while Keras model artifacts should be uploaded in
--     NHWC (channel-last) format, @DataInputConfig@ should be specified in
--     NCHW (channel-first) format. The dictionary formats required for the
--     console and CLI are different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"input_1\":[1,3,224,224]}@
--
--         -   If using the CLI, @{\\\"input_1\\\":[1,3,224,224]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"input_1\": [1,3,224,224], \"input_2\":[1,3,224,224]} @
--
--         -   If using the CLI,
--             @{\\\"input_1\\\": [1,3,224,224], \\\"input_2\\\":[1,3,224,224]}@
--
-- -   @MXNET\/ONNX\/DARKNET@: You must specify the name and shape (NCHW
--     format) of the expected data inputs in order using a dictionary
--     format for your trained model. The dictionary formats required for
--     the console and CLI are different.
--
--     -   Examples for one input:
--
--         -   If using the console, @{\"data\":[1,3,1024,1024]}@
--
--         -   If using the CLI, @{\\\"data\\\":[1,3,1024,1024]}@
--
--     -   Examples for two inputs:
--
--         -   If using the console,
--             @{\"var1\": [1,1,28,28], \"var2\":[1,1,28,28]} @
--
--         -   If using the CLI,
--             @{\\\"var1\\\": [1,1,28,28], \\\"var2\\\":[1,1,28,28]}@
--
-- -   @PyTorch@: You can either specify the name and shape (NCHW format)
--     of expected data inputs in order using a dictionary format for your
--     trained model or you can specify the shape only using a list format.
--     The dictionary formats required for the console and CLI are
--     different. The list formats for the console and CLI are the same.
--
--     -   Examples for one input in dictionary format:
--
--         -   If using the console, @{\"input0\":[1,3,224,224]}@
--
--         -   If using the CLI, @{\\\"input0\\\":[1,3,224,224]}@
--
--     -   Example for one input in list format: @[[1,3,224,224]]@
--
--     -   Examples for two inputs in dictionary format:
--
--         -   If using the console,
--             @{\"input0\":[1,3,224,224], \"input1\":[1,3,224,224]}@
--
--         -   If using the CLI,
--             @{\\\"input0\\\":[1,3,224,224], \\\"input1\\\":[1,3,224,224]} @
--
--     -   Example for two inputs in list format:
--         @[[1,3,224,224], [1,3,224,224]]@
--
-- -   @XGBOOST@: input data name and shape are not needed.
--
-- @DataInputConfig@ supports the following parameters for @CoreML@
-- OutputConfig$TargetDevice (ML Model format):
--
-- -   @shape@: Input shape, for example
--     @{\"input_1\": {\"shape\": [1,224,224,3]}}@. In addition to static
--     input shapes, CoreML converter supports Flexible input shapes:
--
--     -   Range Dimension. You can use the Range Dimension feature if you
--         know the input shape will be within some specific interval in
--         that dimension, for example:
--         @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}@
--
--     -   Enumerated shapes. Sometimes, the models are trained to work
--         only on a select set of inputs. You can enumerate all supported
--         input shapes, for example:
--         @{\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}@
--
-- -   @default_shape@: Default input shape. You can set a default shape
--     during conversion for both Range Dimension and Enumerated Shapes.
--     For example
--     @{\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}@
--
-- -   @type@: Input type. Allowed values: @Image@ and @Tensor@. By
--     default, the converter generates an ML Model with inputs of type
--     Tensor (MultiArray). User can set input type to be Image. Image
--     input type requires additional input parameters such as @bias@ and
--     @scale@.
--
-- -   @bias@: If the input type is an Image, you need to provide the bias
--     vector.
--
-- -   @scale@: If the input type is an Image, you need to provide a scale
--     factor.
--
-- CoreML @ClassifierConfig@ parameters can be specified using
-- OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and
-- PyTorch models. CoreML conversion examples:
--
-- -   Tensor type input:
--
--     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3]}}@
--
-- -   Tensor type input without input name (PyTorch):
--
--     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224]}]@
--
-- -   Image type input:
--
--     -   @\"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\": [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}@
--
--     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
--
-- -   Image type input without input name (PyTorch):
--
--     -   @\"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\": [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]@
--
--     -   @\"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}@
--
-- Depending on the model format, @DataInputConfig@ requires the following
-- parameters for @ml_eia2@
-- <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-TargetDevice OutputConfig:TargetDevice>.
--
-- -   For TensorFlow models saved in the SavedModel format, specify the
--     input names from @signature_def_key@ and the input model shapes for
--     @DataInputConfig@. Specify the @signature_def_key@ in
--     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
--     if the model does not use TensorFlow\'s default signature def key.
--     For example:
--
--     -   @\"DataInputConfig\": {\"inputs\": [1, 224, 224, 3]}@
--
--     -   @\"CompilerOptions\": {\"signature_def_key\": \"serving_custom\"}@
--
-- -   For TensorFlow models saved as a frozen graph, specify the input
--     tensor names and shapes in @DataInputConfig@ and the output tensor
--     names for @output_names@ in
--     <https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_OutputConfig.html#sagemaker-Type-OutputConfig-CompilerOptions OutputConfig:CompilerOptions>
--     . For example:
--
--     -   @\"DataInputConfig\": {\"input_tensor:0\": [1, 224, 224, 3]}@
--
--     -   @\"CompilerOptions\": {\"output_names\": [\"output_tensor:0\"]}@
inputConfig_dataInputConfig :: Lens.Lens' InputConfig Prelude.Text
inputConfig_dataInputConfig :: Lens' InputConfig Text
inputConfig_dataInputConfig = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\InputConfig' {Text
dataInputConfig :: Text
$sel:dataInputConfig:InputConfig' :: InputConfig -> Text
dataInputConfig} -> Text
dataInputConfig) (\s :: InputConfig
s@InputConfig' {} Text
a -> InputConfig
s {$sel:dataInputConfig:InputConfig' :: Text
dataInputConfig = Text
a} :: InputConfig)

-- | Identifies the framework in which the model was trained. For example:
-- TENSORFLOW.
inputConfig_framework :: Lens.Lens' InputConfig Framework
inputConfig_framework :: Lens' InputConfig Framework
inputConfig_framework = forall s a b t. (s -> a) -> (s -> b -> t) -> Lens s t a b
Lens.lens (\InputConfig' {Framework
framework :: Framework
$sel:framework:InputConfig' :: InputConfig -> Framework
framework} -> Framework
framework) (\s :: InputConfig
s@InputConfig' {} Framework
a -> InputConfig
s {$sel:framework:InputConfig' :: Framework
framework = Framework
a} :: InputConfig)

instance Data.FromJSON InputConfig where
  parseJSON :: Value -> Parser InputConfig
parseJSON =
    forall a. String -> (Object -> Parser a) -> Value -> Parser a
Data.withObject
      String
"InputConfig"
      ( \Object
x ->
          Maybe Text -> Text -> Text -> Framework -> InputConfig
InputConfig'
            forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> (Object
x forall a. FromJSON a => Object -> Key -> Parser (Maybe a)
Data..:? Key
"FrameworkVersion")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser a
Data..: Key
"S3Uri")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser a
Data..: Key
"DataInputConfig")
            forall (f :: * -> *) a b. Applicative f => f (a -> b) -> f a -> f b
Prelude.<*> (Object
x forall a. FromJSON a => Object -> Key -> Parser a
Data..: Key
"Framework")
      )

instance Prelude.Hashable InputConfig where
  hashWithSalt :: Int -> InputConfig -> Int
hashWithSalt Int
_salt InputConfig' {Maybe Text
Text
Framework
framework :: Framework
dataInputConfig :: Text
s3Uri :: Text
frameworkVersion :: Maybe Text
$sel:framework:InputConfig' :: InputConfig -> Framework
$sel:dataInputConfig:InputConfig' :: InputConfig -> Text
$sel:s3Uri:InputConfig' :: InputConfig -> Text
$sel:frameworkVersion:InputConfig' :: InputConfig -> Maybe Text
..} =
    Int
_salt
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Maybe Text
frameworkVersion
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Text
s3Uri
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Text
dataInputConfig
      forall a. Hashable a => Int -> a -> Int
`Prelude.hashWithSalt` Framework
framework

instance Prelude.NFData InputConfig where
  rnf :: InputConfig -> ()
rnf InputConfig' {Maybe Text
Text
Framework
framework :: Framework
dataInputConfig :: Text
s3Uri :: Text
frameworkVersion :: Maybe Text
$sel:framework:InputConfig' :: InputConfig -> Framework
$sel:dataInputConfig:InputConfig' :: InputConfig -> Text
$sel:s3Uri:InputConfig' :: InputConfig -> Text
$sel:frameworkVersion:InputConfig' :: InputConfig -> Maybe Text
..} =
    forall a. NFData a => a -> ()
Prelude.rnf Maybe Text
frameworkVersion
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Text
s3Uri
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Text
dataInputConfig
      seq :: forall a b. a -> b -> b
`Prelude.seq` forall a. NFData a => a -> ()
Prelude.rnf Framework
framework

instance Data.ToJSON InputConfig where
  toJSON :: InputConfig -> Value
toJSON InputConfig' {Maybe Text
Text
Framework
framework :: Framework
dataInputConfig :: Text
s3Uri :: Text
frameworkVersion :: Maybe Text
$sel:framework:InputConfig' :: InputConfig -> Framework
$sel:dataInputConfig:InputConfig' :: InputConfig -> Text
$sel:s3Uri:InputConfig' :: InputConfig -> Text
$sel:frameworkVersion:InputConfig' :: InputConfig -> Maybe Text
..} =
    [Pair] -> Value
Data.object
      ( forall a. [Maybe a] -> [a]
Prelude.catMaybes
          [ (Key
"FrameworkVersion" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..=)
              forall (f :: * -> *) a b. Functor f => (a -> b) -> f a -> f b
Prelude.<$> Maybe Text
frameworkVersion,
            forall a. a -> Maybe a
Prelude.Just (Key
"S3Uri" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..= Text
s3Uri),
            forall a. a -> Maybe a
Prelude.Just
              (Key
"DataInputConfig" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..= Text
dataInputConfig),
            forall a. a -> Maybe a
Prelude.Just (Key
"Framework" forall kv v. (KeyValue kv, ToJSON v) => Key -> v -> kv
Data..= Framework
framework)
          ]
      )