stdio-0.2.0.0: A simple and high performance IO toolkit for Haskell

Copyright(c) Dong Han 2017-2018
LicenseBSD
Maintainerwinterland1989@gmail.com
Stabilityexperimental
Portabilitynon-portable
Safe HaskellNone
LanguageHaskell2010

Std.Data.Text

Contents

Description

A Text simply wraps a Bytes that are UTF-8 encoded codepoints, you can use validate / validateMaybe to construct a Text.

Synopsis

Text type

data Text Source #

Text represented as UTF-8 encoded Bytes

Instances
Eq Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

(==) :: Text -> Text -> Bool #

(/=) :: Text -> Text -> Bool #

Ord Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

compare :: Text -> Text -> Ordering #

(<) :: Text -> Text -> Bool #

(<=) :: Text -> Text -> Bool #

(>) :: Text -> Text -> Bool #

(>=) :: Text -> Text -> Bool #

max :: Text -> Text -> Text #

min :: Text -> Text -> Text #

Read Text Source # 
Instance details

Defined in Std.Data.Text.Base

Show Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

showsPrec :: Int -> Text -> ShowS #

show :: Text -> String #

showList :: [Text] -> ShowS #

IsString Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

fromString :: String -> Text #

Semigroup Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

(<>) :: Text -> Text -> Text #

sconcat :: NonEmpty Text -> Text #

stimes :: Integral b => b -> Text -> Text #

Monoid Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

mempty :: Text #

mappend :: Text -> Text -> Text #

mconcat :: [Text] -> Text #

NFData Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

rnf :: Text -> () #

Hashable Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

hashWithSalt :: Int -> Text -> Int

hash :: Text -> Int

Arbitrary Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

arbitrary :: Gen Text

shrink :: Text -> [Text]

CoArbitrary Text Source # 
Instance details

Defined in Std.Data.Text.Base

Methods

coarbitrary :: Text -> Gen b -> Gen b

ToText Text Source #

To keep sync with Show instance's escaping rule, we reuse show here, so it won't be as fast as memcpy.

Instance details

Defined in Std.Data.TextBuilder

FromValue Text Source # 
Instance details

Defined in Std.Data.JSON.Base

EncodeJSON Text Source # 
Instance details

Defined in Std.Data.JSON.Base

Methods

encodeJSON :: Text -> Builder () Source #

ToValue Text Source # 
Instance details

Defined in Std.Data.JSON.Base

Methods

toValue :: Text -> Value Source #

LEON Text Source # 
Instance details

Defined in Std.Data.LEON

FromValue a => FromValue (FlatMap Text a) Source #

default instance prefer later key

Instance details

Defined in Std.Data.JSON.Base

FromValue a => FromValue (HashMap Text a) Source #

default instance prefer later key

Instance details

Defined in Std.Data.JSON.Base

Methods

fromValue :: Value -> Converter (HashMap Text a) Source #

EncodeJSON a => EncodeJSON (FlatMap Text a) Source # 
Instance details

Defined in Std.Data.JSON.Base

EncodeJSON a => EncodeJSON (HashMap Text a) Source # 
Instance details

Defined in Std.Data.JSON.Base

Methods

encodeJSON :: HashMap Text a -> Builder () Source #

ToValue a => ToValue (FlatMap Text a) Source # 
Instance details

Defined in Std.Data.JSON.Base

Methods

toValue :: FlatMap Text a -> Value Source #

ToValue a => ToValue (HashMap Text a) Source # 
Instance details

Defined in Std.Data.JSON.Base

Methods

toValue :: HashMap Text a -> Value Source #

getUTF8Bytes :: Text -> Bytes Source #

Extract UTF-8 encoded Bytes from Text

validate :: HasCallStack => Bytes -> Text Source #

O(n) Validate a sequence of bytes is UTF-8 encoded.

Throw error in case of invalid codepoint.

Basic creating

empty :: Text Source #

O(1). Empty text.

singleton :: Char -> Text Source #

O(1). Single char text.

copy :: Text -> Text Source #

O(n). Copy a text from slice.

Building text

replicate :: Int -> Char -> Text Source #

O(n) replicate char n time.

cycleN :: Int -> Text -> Text Source #

O(n*m) cycleN a text n times.

Conversion between list

pack :: String -> Text Source #

O(n) Convert a string into a text

Alias for packN defaultInitSize, will be rewritten to a memcpy if possible.

packN :: Int -> String -> Text Source #

O(n) Convert a list into a text with an approximate size(in bytes, not codepoints).

If the encoded bytes length is larger than the size given, we simply double the buffer size and continue building.

This function is a good consumer in the sense of build/foldr fusion.

packR :: String -> Text Source #

O(n) Alias for packRN defaultInitSize.

packRN :: Int -> String -> Text Source #

O(n) packN in reverse order.

This function is a good consumer in the sense of build/foldr fusion.

unpack :: Text -> String Source #

O(n) Convert text to a char list.

Unpacking is done lazily. i.e. we will retain reference to the array until all element are consumed.

This function is a good producer in the sense of build/foldr fusion.

unpackR :: Text -> String Source #

O(n) Convert text to a list in reverse order.

This function is a good producer in the sense of build/foldr fusion.

Conversion between codepoint vector

fromVector :: PrimVector Char -> Text Source #

O(n) convert from a char vector.

toVector :: Text -> PrimVector Char Source #

O(n) convert to a char vector.

Basic interface

null :: Text -> Bool Source #

O(1) Test whether a text is empty.

length :: Text -> Int Source #

O(n) The char length of a text.

append :: Text -> Text -> Text Source #

O(m+n)

There's no need to guard empty vector because we guard them for you, so appending empty text are no-ops.

map' :: (Char -> Char) -> Text -> Text Source #

O(n) map f t is the Text obtained by applying f to each char of t. Performs replacement on invalid scalar values.

imap' :: (Int -> Char -> Char) -> Text -> Text Source #

Strict mapping with index.

foldl' :: (b -> Char -> b) -> b -> Text -> b Source #

Strict left to right fold.

ifoldl' :: (b -> Int -> Char -> b) -> b -> Text -> b Source #

Strict left to right fold with index.

foldr' :: (Char -> b -> b) -> b -> Text -> b Source #

Strict right to left fold

ifoldr' :: (Int -> Char -> b -> b) -> b -> Text -> b Source #

Strict right to left fold with index

NOTE: the index is counting from 0, not backwards

concat :: [Text] -> Text Source #

O(n) Concatenate a list of text.

Note: concat have to force the entire list to filter out empty text and calculate the length for allocation.

concatMap :: (Char -> Text) -> Text -> Text Source #

Map a function over a text and concatenate the results

Special folds

count :: Char -> Text -> Int Source #

O(n) count returns count of an element from a text.

all :: (Char -> Bool) -> Text -> Bool Source #

O(n) Applied to a predicate and text, all determines if all chars of the text satisfy the predicate.

any :: (Char -> Bool) -> Text -> Bool Source #

O(n) Applied to a predicate and a text, any determines if any chars of the text satisfy the predicate.

Searching by equality

elem :: Char -> Text -> Bool Source #

O(n) elem test if given char is in given text.

notElem :: Char -> Text -> Bool Source #

O(n) not . elem

Slice manipulation

cons :: Char -> Text -> Text Source #

O(n) cons is analogous to (:) for lists, but of different complexity, as it requires making a copy.

snoc :: Text -> Char -> Text Source #

O(n) Append a char to the end of a text.

uncons :: Text -> Maybe (Char, Text) Source #

O(1) Extract the head and tail of a text, return Nothing if it is empty.

unsnoc :: Text -> Maybe (Text, Char) Source #

O(1) Extract the init and last of a text, return Nothing if text is empty.

headMaybe :: Text -> Maybe Char Source #

O(1) Extract the first char of a text.

tailMayEmpty :: Text -> Text Source #

O(1) Extract the chars after the head of a text.

NOTE: tailMayEmpty return empty text in the case of an empty text.

lastMaybe :: Text -> Maybe Char Source #

O(1) Extract the last char of a text.

initMayEmpty :: Text -> Text Source #

O(1) Extract the chars before of the last one.

NOTE: initMayEmpty return empty text in the case of an empty text.

inits :: Text -> [Text] Source #

O(n) Return all initial segments of the given text, empty first.

tails :: Text -> [Text] Source #

O(n) Return all final segments of the given text, whole text first.

take :: Int -> Text -> Text Source #

O(1) take n, applied to a text xs, returns the prefix of xs of length n, or xs itself if n > length xs.

drop :: Int -> Text -> Text Source #

O(1) drop n xs returns the suffix of xs after the first n char, or [] if n > length xs.

takeR :: Int -> Text -> Text Source #

O(1) takeR n, applied to a text xs, returns the suffix of xs of length n, or xs itself if n > length xs.

dropR :: Int -> Text -> Text Source #

O(1) dropR n xs returns the prefix of xs before the last n char, or [] if n > length xs.

slice :: Int -> Int -> Text -> Text Source #

O(1) Extract a sub-range text with give start index and length.

This function is a total function just like 'take/drop', index/length exceeds range will be ingored, e.g.

slice 1 3 "hello"   == "ell"
slice -1 -1 "hello" == ""
slice -2 2 "hello"  == ""
slice 2 10 "hello"  == "llo"

This holds for all x y: slice x y vs == drop x . take (x+y) vs

splitAt :: Int -> Text -> (Text, Text) Source #

O(n) splitAt n xs is equivalent to (take n xs, drop n xs).

takeWhile :: (Char -> Bool) -> Text -> Text Source #

O(n) Applied to a predicate p and a text t, returns the longest prefix (possibly empty) of t of elements that satisfy p.

takeWhileR :: (Char -> Bool) -> Text -> Text Source #

O(n) Applied to a predicate p and a text t, returns the longest suffix (possibly empty) of t of elements that satisfy p.

dropWhile :: (Char -> Bool) -> Text -> Text Source #

O(n) Applied to a predicate p and a text vs, returns the suffix (possibly empty) remaining after takeWhile p vs.

dropWhileR :: (Char -> Bool) -> Text -> Text Source #

O(n) Applied to a predicate p and a text vs, returns the prefix (possibly empty) remaining before takeWhileR p vs.

dropAround :: (Char -> Bool) -> Text -> Text Source #

O(n) dropAround f = dropWhile f . dropWhileR f

break :: (Char -> Bool) -> Text -> (Text, Text) Source #

O(n) Split the text into the longest prefix of elements that do not satisfy the predicate and the rest without copying.

span :: (Char -> Bool) -> Text -> (Text, Text) Source #

O(n) Split the text into the longest prefix of elements that satisfy the predicate and the rest without copying.

breakR :: (Char -> Bool) -> Text -> (Text, Text) Source #

breakR behaves like break but from the end of the text.

breakR p == spanR (not.p)

spanR :: (Char -> Bool) -> Text -> (Text, Text) Source #

spanR behaves like span but from the end of the text.

breakOn :: Text -> Text -> (Text, Text) Source #

Break a text on a subtext, returning a pair of the part of the text prior to the match, and the rest of the text, e.g.

break "wor" "hello, world" = ("hello, ", "world")

breakOnAll Source #

Arguments

:: Text

needle to search for

-> Text

haystack in which to search

-> [(Text, Text)] 

O(n+m) Find all non-overlapping instances of needle in haystack. Each element of the returned list consists of a pair:

  • The entire string prior to the kth match (i.e. the prefix)
  • The kth match, followed by the remainder of the string

Examples:

breakOnAll "::" ""
==> []
breakOnAll "" "abc"
==> [("a", "bc"), ("ab", "c"), ("abc", "/")]

The result list is lazy, search is performed when you force the list.

group :: Text -> [Text] Source #

The group function takes a text and returns a list of texts such that the concatenation of the result is equal to the argument. Moreover, each sublist in the result contains only equal elements. For example,

group Mississippi = [M,"i","ss","i","ss","i","pp","i"]

It is a special case of groupBy, which allows the programmer to supply their own equality test.

groupBy :: (Char -> Char -> Bool) -> Text -> [Text] Source #

The groupBy function is the non-overloaded version of group.

stripPrefix :: Text -> Text -> Maybe Text Source #

O(n) The stripPrefix function takes two texts and returns Just the remainder of the second iff the first is its prefix, and otherwise Nothing.

stripSuffix :: Text -> Text -> Maybe Text Source #

O(n) The stripSuffix function takes two texts and returns Just the remainder of the second iff the first is its suffix, and otherwise Nothing.

split :: Char -> Text -> [Text] Source #

O(n) Break a text into pieces separated by the delimiter element consuming the delimiter. I.e.

split '\n' "a\nb\nd\ne" == ["a","b","d","e"]
split 'a'  "aXaXaXa"    == ["","X","X","X",""]
split 'x'  "x"          == ["",""]

and

intercalate [c] . split c == id
split == splitWith . (==)

NOTE, this function behavior different with bytestring's. see #56.

splitWith :: (Char -> Bool) -> Text -> [Text] Source #

O(n) Splits a text into components delimited by separators, where the predicate returns True for a separator char. The resulting components do not contain the separators. Two adjacent separators result in an empty component in the output. eg.

splitWith (=='a') "aabbaca" == ["","","bb","c",""]
splitWith (=='a') []        == [""]

splitOn :: Text -> Text -> [Text] Source #

O(m+n) Break haystack into pieces separated by needle.

Note: An empty needle will essentially split haystack element by element.

Examples:

>>> splitOn "\r\n" "a\r\nb\r\nd\r\ne"
["a","b","d","e"]
>>> splitOn "aaa"  "aaaXaaaXaaaXaaa"
["","X","X","X",""]
>>> splitOn "x"  "x"
["",""]

and

intercalate s . splitOn s         == id
splitOn (singleton c)             == split (==c)

isPrefixOf :: Text -> Text -> Bool Source #

The isPrefix function returns True if the first argument is a prefix of the second.

isSuffixOf :: Text -> Text -> Bool Source #

O(n) The isSuffixOf function takes two text and returns True if the first is a suffix of the second.

isInfixOf :: Text -> Text -> Bool Source #

Check whether one text is a subtext of another.

needle isInfixOf haystack === null haystack || indices needle haystake /= [].

commonPrefix :: Text -> Text -> (Text, Text, Text) Source #

O(n) Find the longest non-empty common prefix of two strings and return it, along with the suffixes of each string at which they no longer match. e.g.

>>> commonPrefix "foobar" "fooquux"
("foo","bar","quux")
>>> commonPrefix "veeble" "fetzer"
("","veeble","fetzer")

words :: Text -> [Text] Source #

O(n) Breaks a Bytes up into a list of words, delimited by unicode space.

lines :: Text -> [Text] Source #

O(n) Breaks a text up into a list of lines, delimited by ascii n.

unwords :: [Text] -> Text Source #

O(n) Joins words with ascii space.

unlines :: [Text] -> Text Source #

O(n) Joins lines with ascii n.

padLeft :: Int -> Char -> Text -> Text Source #

Add padding to the left so that the whole text's length is at least n.

padRight :: Int -> Char -> Text -> Text Source #

Add padding to the right so that the whole text's length is at least n.

Transform

reverse :: Text -> Text Source #

O(n) Reverse the characters of a string.

intersperse :: Char -> Text -> Text Source #

O(n) The intersperse function takes a character and places it between the characters of a Text. Performs replacement on invalid scalar values.

intercalate :: Text -> [Text] -> Text Source #

O(n) The intercalate function takes a Text and a list of Texts and concatenates the list after interspersing the first argument between each element of the list.

transpose :: [Text] -> [Text] Source #

The transpose function transposes the rows and columns of its text argument.

Search

element-wise search

find Source #

Arguments

:: (Char -> Bool) 
-> Text 
-> (Int, Int, Maybe Char)

(char index, byte index, matching char)

O(n) find the first char matching the predicate in a text from left to right, if there isn't one, return the index point to the end of the byte slice.

findR :: (Char -> Bool) -> Text -> (Int, Int, Maybe Char) Source #

O(n) find the first char matching the predicate in a text from right to left, if there isn't one, return the index point to the end of the byte slice.

filter :: (Char -> Bool) -> Text -> Text Source #

O(n) filter, applied to a predicate and a text, returns a text containing those chars that satisfy the predicate.

partition :: (Char -> Bool) -> Text -> (Text, Text) Source #

O(n) The partition function takes a predicate, a text, returns a pair of text with codepoints which do and do not satisfy the predicate, respectively; i.e.,

partition p txt == (filter p txt, filter (not . p) txt)

Unicode processing

normalization

data NormalizationResult Source #

Instances
Eq NormalizationResult Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Ord NormalizationResult Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Show NormalizationResult Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Generic NormalizationResult Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Associated Types

type Rep NormalizationResult :: Type -> Type #

type Rep NormalizationResult Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

type Rep NormalizationResult = D1 (MetaData "NormalizationResult" "Std.Data.Text.UTF8Rewind" "stdio-0.2.0.0-5YmH16k3vlO7FAtmF9PgfB" False) (C1 (MetaCons "NormalizedYes" PrefixI False) (U1 :: Type -> Type) :+: (C1 (MetaCons "NormalizedMaybe" PrefixI False) (U1 :: Type -> Type) :+: C1 (MetaCons "NormalizedNo" PrefixI False) (U1 :: Type -> Type)))

data NormalizeMode Source #

These are the Unicode Normalization Forms:

Form                         | Description
---------------------------- | ---------------------------------------------
Normalization Form D (NFD)   | Canonical decomposition
Normalization Form C (NFC)   | Canonical decomposition, followed by canonical composition
Normalization Form KD (NFKD) | Compatibility decomposition
Normalization Form KC (NFKC) | Compatibility decomposition, followed by canonical composition

Constructors

NFC 
NFKC 
NFD 
NFKD 
Instances
Eq NormalizeMode Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Ord NormalizeMode Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Show NormalizeMode Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Generic NormalizeMode Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Associated Types

type Rep NormalizeMode :: Type -> Type #

type Rep NormalizeMode Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

type Rep NormalizeMode = D1 (MetaData "NormalizeMode" "Std.Data.Text.UTF8Rewind" "stdio-0.2.0.0-5YmH16k3vlO7FAtmF9PgfB" False) ((C1 (MetaCons "NFC" PrefixI False) (U1 :: Type -> Type) :+: C1 (MetaCons "NFKC" PrefixI False) (U1 :: Type -> Type)) :+: (C1 (MetaCons "NFD" PrefixI False) (U1 :: Type -> Type) :+: C1 (MetaCons "NFKD" PrefixI False) (U1 :: Type -> Type)))

isNormalized :: Text -> NormalizationResult Source #

Check if a string is stable in the NFC (Normalization Form C).

isNormalizedTo :: NormalizeMode -> Text -> NormalizationResult Source #

Check if a string is stable in the specified Unicode Normalization Form.

This function can be used as a preprocessing step, before attempting to normalize a string. Normalization is a very expensive process, it is often cheaper to first determine if the string is unstable in the requested normalization form.

The result of the check will be YES if the string is stable and MAYBE or NO if it is unstable. If the result is MAYBE, the string does not necessarily have to be normalized.

If the result is unstable, the offset parameter is set to the offset for the first unstable code point. If the string is stable, the offset is equivalent to the length of the string in bytes.

For more information, please review Unicode Standard Annex #15 - Unicode Normalization Forms.

normalize :: Text -> Text Source #

Normalize a string to NFC (Normalization Form C).

normalizeTo :: NormalizeMode -> Text -> Text Source #

Normalize a string to the specified Unicode Normalization Form.

The Unicode standard defines two standards for equivalence between characters: canonical and compatibility equivalence. Canonically equivalent characters and sequence represent the same abstract character and must be rendered with the same appearance and behavior. Compatibility equivalent characters have a weaker equivalence and may be rendered differently.

Unicode Normalization Forms are formally defined standards that can be used to test whether any two strings of characters are equivalent to each other. This equivalence may be canonical or compatibility.

The algorithm puts all combining marks into a specified order and uses the rules for decomposition and composition to transform the string into one of four Unicode Normalization Forms. A binary comparison can then be used to determine equivalence.

Case conversion

data Locale Source #

Locale for case mapping.

Instances
Eq Locale Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Methods

(==) :: Locale -> Locale -> Bool #

(/=) :: Locale -> Locale -> Bool #

Ord Locale Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Show Locale Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Generic Locale Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Associated Types

type Rep Locale :: Type -> Type #

Methods

from :: Locale -> Rep Locale x #

to :: Rep Locale x -> Locale #

type Rep Locale Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

type Rep Locale = D1 (MetaData "Locale" "Std.Data.Text.UTF8Rewind" "stdio-0.2.0.0-5YmH16k3vlO7FAtmF9PgfB" True) (C1 (MetaCons "Locale" PrefixI False) (S1 (MetaSel (Nothing :: Maybe Symbol) NoSourceUnpackedness NoSourceStrictness DecidedLazy) (Rec0 CSize)))

caseFold :: Text -> Text Source #

Remove case distinction from UTF-8 encoded text with default locale.

caseFoldWith :: Locale -> Text -> Text Source #

Remove case distinction from UTF-8 encoded text.

Case folding is the process of eliminating differences between code points concerning case mapping. It is most commonly used for comparing strings in a case-insensitive manner. Conversion is fully compliant with the Unicode 7.0 standard.

Although similar to lowercasing text, there are significant differences. For one, case folding does _not_ take locale into account when converting. In some cases, case folding can be up to 20% faster than lowercasing the same text, but the result cannot be treated as correct lowercased text.

Only two locale-specific exception are made when case folding text. In Turkish, U+0049 LATIN CAPITAL LETTER I maps to U+0131 LATIN SMALL LETTER DOTLESS I and U+0130 LATIN CAPITAL LETTER I WITH DOT ABOVE maps to U+0069 LATIN SMALL LETTER I.

Although most code points can be case folded without changing length, there are notable exceptions. For example, U+0130 (LATIN CAPITAL LETTER I WITH DOT ABOVE) maps to "U+0069 U+0307" (LATIN SMALL LETTER I and COMBINING DOT ABOVE) when converted to lowercase.

Only a handful of scripts make a distinction between upper- and lowercase. In addition to modern scripts, such as Latin, Greek, Armenian and Cyrillic, a few historic or archaic scripts have case. The vast majority of scripts do not have case distinctions.

toLower :: Text -> Text Source #

Convert UTF-8 encoded text to lowercase with default locale.

toLowerWith :: Locale -> Text -> Text Source #

Convert UTF-8 encoded text to lowercase.

This function allows conversion of UTF-8 encoded strings to lowercase without first changing the encoding to UTF-32. Conversion is fully compliant with the Unicode 7.0 standard.

Although most code points can be converted to lowercase with changing length, there are notable exceptions. For example, U+0130 (LATIN CAPITAL LETTER I WITH DOT ABOVE) maps to "U+0069 U+0307" (LATIN SMALL LETTER I and COMBINING DOT ABOVE) when converted to lowercase.

Only a handful of scripts make a distinction between upper- and lowercase. In addition to modern scripts, such as Latin, Greek, Armenian and Cyrillic, a few historic or archaic scripts have case. The vast majority of scripts do not have case distinctions.

Case mapping is not reversible. That is, toUpper(toLower(x)) != toLower(toUpper(x)).

Certain code points (or combinations of code points) apply rules based on the locale. For more information about these exceptional code points, please refer to the Unicode standard: ftp:/ftp.unicode.orgPublicUNIDATASpecialCasing.txt

toUpper :: Text -> Text Source #

Convert UTF-8 encoded text to uppercase with default locale.

toUpperWith :: Locale -> Text -> Text Source #

Convert UTF-8 encoded text to uppercase.

Conversion is fully compliant with the Unicode 7.0 standard.

Although most code points can be converted without changing length, there are notable exceptions. For example, U+00DF (LATIN SMALL LETTER SHARP S) maps to "U+0053 U+0053" (LATIN CAPITAL LETTER S and LATIN CAPITAL LETTER S) when converted to uppercase.

Only a handful of scripts make a distinction between upper and lowercase. In addition to modern scripts, such as Latin, Greek, Armenian and Cyrillic, a few historic or archaic scripts have case. The vast majority of scripts do not have case distinctions.

Case mapping is not reversible. That is, toUpper(toLower(x)) != toLower(toUpper(x)).

Certain code points (or combinations of code points) apply rules based on the locale. For more information about these exceptional code points, please refer to the Unicode standard: ftp:/ftp.unicode.orgPublicUNIDATASpecialCasing.txt

toTitle :: Text -> Text Source #

Convert UTF-8 encoded text to titlecase with default locale.

toTitleWith :: Locale -> Text -> Text Source #

Convert UTF-8 encoded text to titlecase.

This function allows conversion of UTF-8 encoded strings to titlecase. Conversion is fully compliant with the Unicode 7.0 standard.

Titlecase requires a bit more explanation than uppercase and lowercase, because it is not a common text transformation. Titlecase uses uppercase for the first letter of each word and lowercase for the rest. Words are defined as "collections of code points with general category Lu, Ll, Lt, Lm or Lo according to the Unicode database".

Effectively, any type of punctuation can break up a word, even if this is not grammatically valid. This happens because the titlecasing algorithm does not and cannot take grammar rules into account.

Text                                 | Titlecase
-------------------------------------|-------------------------------------
The running man                      | The Running Man
NATO Alliance                        | Nato Alliance
You're amazing at building libraries | You'Re Amazing At Building Libraries

Although most code points can be converted to titlecase without changing length, there are notable exceptions. For example, U+00DF (LATIN SMALL LETTER SHARP S) maps to "U+0053 U+0073" (LATIN CAPITAL LETTER S and LATIN SMALL LETTER S) when converted to titlecase.

Certain code points (or combinations of code points) apply rules based on the locale. For more information about these exceptional code points, please refer to the Unicode standard: ftp:/ftp.unicode.orgPublicUNIDATASpecialCasing.txt

Unicode category

isCategory :: Category -> Text -> Bool Source #

Check if the input string conforms to the category specified by the flags.

This function can be used to check if the code points in a string are part of a category. Valid flags are members of the "list of categories". The category for a code point is defined as part of the entry in UnicodeData.txt, the data file for the Unicode code point database.

By default, the function will treat grapheme clusters as a single code point. This means that the following string:

Code point | Canonical combining class | General category      | Name
---------- | ------------------------- | --------------------- | ----------------------
U+0045     | 0                         | Lu (Uppercase letter) | LATIN CAPITAL LETTER E
U+0300     | 230                       | Mn (Non-spacing mark) | COMBINING GRAVE ACCENT

Will match with categoryLetterUppercase in its entirety, because the COMBINING GRAVE ACCENT is treated as part of the grapheme cluster. This is useful when e.g. creating a text parser, because you do not have to normalize the text first.

If this is undesired behavior, specify the UTF8_CATEGORY_IGNORE_GRAPHEME_CLUSTER flag.

In order to maintain backwards compatibility with POSIX functions like isdigit and isspace, compatibility flags have been provided. Note, however, that the result is only guaranteed to be correct for code points in the Basic Latin range, between U+0000 and 0+007F. Combining a compatibility flag with a regular category flag will result in undefined behavior.

spanCategory :: Category -> Text -> (Text, Text) Source #

Try to match as many code points with the matching category flags as possible and return the prefix and suffix.

data Category Source #

Unicode categories. See isCategory, you can combine categories with bitwise or.

Instances
Eq Category Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Ord Category Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Show Category Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Generic Category Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

Associated Types

type Rep Category :: Type -> Type #

Methods

from :: Category -> Rep Category x #

to :: Rep Category x -> Category #

Bits Category Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

FiniteBits Category Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

type Rep Category Source # 
Instance details

Defined in Std.Data.Text.UTF8Rewind

type Rep Category = D1 (MetaData "Category" "Std.Data.Text.UTF8Rewind" "stdio-0.2.0.0-5YmH16k3vlO7FAtmF9PgfB" True) (C1 (MetaCons "Category" PrefixI False) (S1 (MetaSel (Nothing :: Maybe Symbol) NoSourceUnpackedness NoSourceStrictness DecidedLazy) (Rec0 CSize)))