Safe Haskell | None |
---|---|
Language | Haskell2010 |
Synopsis
- data Analysis = Analysis {}
- newtype Tokenizer = Tokenizer Text
- data AnalyzerDefinition = AnalyzerDefinition {}
- data CharFilterDefinition
- data TokenizerDefinition
- data Ngram = Ngram {
- ngramMinGram :: Int
- ngramMaxGram :: Int
- ngramTokenChars :: [TokenChar]
- data TokenChar
- data TokenFilterDefinition
- = TokenFilterDefinitionLowercase (Maybe Language)
- | TokenFilterDefinitionUppercase (Maybe Language)
- | TokenFilterDefinitionApostrophe
- | TokenFilterDefinitionReverse
- | TokenFilterDefinitionSnowball Language
- | TokenFilterDefinitionShingle Shingle
- | TokenFilterDefinitionStemmer Language
- | TokenFilterDefinitionStop (Either Language [StopWord])
- | TokenFilterDefinitionEdgeNgram NgramFilter (Maybe EdgeNgramFilterSide)
- | TokenFilterDefinitionNgram NgramFilter
- | TokenFilterTruncate Int
- data NgramFilter = NgramFilter {}
- ngramFilterToPairs :: NgramFilter -> [Pair]
- ngramFilterFromJSONObject :: Object -> Parser NgramFilter
- data EdgeNgramFilterSide
- data Language
- = Arabic
- | Armenian
- | Basque
- | Bengali
- | Brazilian
- | Bulgarian
- | Catalan
- | Cjk
- | Czech
- | Danish
- | Dutch
- | English
- | Finnish
- | French
- | Galician
- | German
- | German2
- | Greek
- | Hindi
- | Hungarian
- | Indonesian
- | Irish
- | Italian
- | Kp
- | Latvian
- | Lithuanian
- | Lovins
- | Norwegian
- | Persian
- | Porter
- | Portuguese
- | Romanian
- | Russian
- | Sorani
- | Spanish
- | Swedish
- | Thai
- | Turkish
- languageToText :: Language -> Text
- languageFromText :: Text -> Maybe Language
- data Shingle = Shingle {}
Documentation
Instances
Eq Analysis Source # | |
Show Analysis Source # | |
Generic Analysis Source # | |
ToJSON Analysis Source # | |
Defined in Database.Bloodhound.Internal.Analysis | |
FromJSON Analysis Source # | |
type Rep Analysis Source # | |
Defined in Database.Bloodhound.Internal.Analysis type Rep Analysis = D1 ('MetaData "Analysis" "Database.Bloodhound.Internal.Analysis" "bloodhound-0.20.0.2-9g12ggjhChv8jJT8C6bH2X" 'False) (C1 ('MetaCons "Analysis" 'PrefixI 'True) ((S1 ('MetaSel ('Just "analysisAnalyzer") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 (Map Text AnalyzerDefinition)) :*: S1 ('MetaSel ('Just "analysisTokenizer") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 (Map Text TokenizerDefinition))) :*: (S1 ('MetaSel ('Just "analysisTokenFilter") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 (Map Text TokenFilterDefinition)) :*: S1 ('MetaSel ('Just "analysisCharFilter") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 (Map Text CharFilterDefinition))))) |
data AnalyzerDefinition Source #
Instances
data CharFilterDefinition Source #
Character filters are used to preprocess the stream of characters before it is passed to the tokenizer.
Instances
Eq CharFilterDefinition Source # | |
Defined in Database.Bloodhound.Internal.Analysis (==) :: CharFilterDefinition -> CharFilterDefinition -> Bool # (/=) :: CharFilterDefinition -> CharFilterDefinition -> Bool # | |
Show CharFilterDefinition Source # | |
Defined in Database.Bloodhound.Internal.Analysis showsPrec :: Int -> CharFilterDefinition -> ShowS # show :: CharFilterDefinition -> String # showList :: [CharFilterDefinition] -> ShowS # | |
ToJSON CharFilterDefinition Source # | |
Defined in Database.Bloodhound.Internal.Analysis toJSON :: CharFilterDefinition -> Value # toEncoding :: CharFilterDefinition -> Encoding # toJSONList :: [CharFilterDefinition] -> Value # toEncodingList :: [CharFilterDefinition] -> Encoding # | |
FromJSON CharFilterDefinition Source # | |
Defined in Database.Bloodhound.Internal.Analysis parseJSON :: Value -> Parser CharFilterDefinition # parseJSONList :: Value -> Parser [CharFilterDefinition] # |
data TokenizerDefinition Source #
Instances
Ngram | |
|
Instances
Eq Ngram Source # | |
Show Ngram Source # | |
Generic Ngram Source # | |
type Rep Ngram Source # | |
Defined in Database.Bloodhound.Internal.Analysis type Rep Ngram = D1 ('MetaData "Ngram" "Database.Bloodhound.Internal.Analysis" "bloodhound-0.20.0.2-9g12ggjhChv8jJT8C6bH2X" 'False) (C1 ('MetaCons "Ngram" 'PrefixI 'True) (S1 ('MetaSel ('Just "ngramMinGram") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 Int) :*: (S1 ('MetaSel ('Just "ngramMaxGram") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 Int) :*: S1 ('MetaSel ('Just "ngramTokenChars") 'NoSourceUnpackedness 'NoSourceStrictness 'DecidedLazy) (Rec0 [TokenChar])))) |
Instances
Eq TokenChar Source # | |
Show TokenChar Source # | |
Generic TokenChar Source # | |
ToJSON TokenChar Source # | |
Defined in Database.Bloodhound.Internal.Analysis | |
FromJSON TokenChar Source # | |
type Rep TokenChar Source # | |
Defined in Database.Bloodhound.Internal.Analysis type Rep TokenChar = D1 ('MetaData "TokenChar" "Database.Bloodhound.Internal.Analysis" "bloodhound-0.20.0.2-9g12ggjhChv8jJT8C6bH2X" 'False) ((C1 ('MetaCons "TokenLetter" 'PrefixI 'False) (U1 :: Type -> Type) :+: C1 ('MetaCons "TokenDigit" 'PrefixI 'False) (U1 :: Type -> Type)) :+: (C1 ('MetaCons "TokenWhitespace" 'PrefixI 'False) (U1 :: Type -> Type) :+: (C1 ('MetaCons "TokenPunctuation" 'PrefixI 'False) (U1 :: Type -> Type) :+: C1 ('MetaCons "TokenSymbol" 'PrefixI 'False) (U1 :: Type -> Type)))) |
data TokenFilterDefinition Source #
Token filters are used to create custom analyzers.
Instances
data NgramFilter Source #
Instances
ngramFilterToPairs :: NgramFilter -> [Pair] Source #
data EdgeNgramFilterSide Source #
Instances
The set of languages that can be passed to various analyzers,
filters, etc. in Elasticsearch. Most data types in this module
that have a Language
field are actually only actually to
handle a subset of these languages. Consult the official
Elasticsearch documentation to see what is actually supported.
Instances
languageToText :: Language -> Text Source #