mirror of
https://github.com/GrammaticalFramework/gf-core.git
synced 2026-05-16 06:32:51 -06:00
changed names of resource-1.3; added a note on homepage on release
This commit is contained in:
115
src/GF/Text/Lexing.hs
Normal file
115
src/GF/Text/Lexing.hs
Normal file
@@ -0,0 +1,115 @@
|
||||
module GF.Text.Lexing (stringOp) where
|
||||
|
||||
import GF.Text.Transliterations
|
||||
import GF.Text.UTF8
|
||||
|
||||
import Data.Char
|
||||
import Data.List (intersperse)
|
||||
|
||||
-- lexers and unlexers - they work on space-separated word strings
|
||||
|
||||
stringOp :: String -> Maybe (String -> String)
|
||||
stringOp name = case name of
|
||||
"chars" -> Just $ appLexer (filter (not . all isSpace) . map return)
|
||||
"lextext" -> Just $ appLexer lexText
|
||||
"lexcode" -> Just $ appLexer lexText
|
||||
"lexmixed" -> Just $ appLexer lexMixed
|
||||
"words" -> Just $ appLexer words
|
||||
"bind" -> Just $ appUnlexer bindTok
|
||||
"uncars" -> Just $ appUnlexer concat
|
||||
"unlextext" -> Just $ appUnlexer unlexText
|
||||
"unlexcode" -> Just $ appUnlexer unlexCode
|
||||
"unlexmixed" -> Just $ appUnlexer unlexMixed
|
||||
"unwords" -> Just $ appUnlexer unwords
|
||||
"to_html" -> Just wrapHTML
|
||||
"to_utf8" -> Just encodeUTF8
|
||||
"from_utf8" -> Just decodeUTF8
|
||||
"to_cp1251" -> Just encodeCP1251
|
||||
"from_cp1251" -> Just decodeCP1251
|
||||
_ -> transliterate name
|
||||
|
||||
appLexer :: (String -> [String]) -> String -> String
|
||||
appLexer f = unwords . filter (not . null) . f
|
||||
|
||||
appUnlexer :: ([String] -> String) -> String -> String
|
||||
appUnlexer f = unlines . map (f . words) . lines
|
||||
|
||||
wrapHTML :: String -> String
|
||||
wrapHTML = unlines . tag . intersperse "<br>" . lines where
|
||||
tag ss = "<html>":"<body>" : ss ++ ["</body>","</html>"]
|
||||
|
||||
lexText :: String -> [String]
|
||||
lexText s = case s of
|
||||
c:cs | isPunct c -> [c] : lexText cs
|
||||
c:cs | isSpace c -> lexText cs
|
||||
_:_ -> let (w,cs) = break (\x -> isSpace x || isPunct x) s in w : lexText cs
|
||||
_ -> [s]
|
||||
|
||||
-- | Haskell lexer, usable for much code
|
||||
lexCode :: String -> [String]
|
||||
lexCode ss = case lex ss of
|
||||
[(w@(_:_),ws)] -> w : lexCode ws
|
||||
_ -> []
|
||||
|
||||
-- | LaTeX style lexer, with "math" environment using Code between $...$
|
||||
lexMixed :: String -> [String]
|
||||
lexMixed = concat . alternate False where
|
||||
alternate env s = case s of
|
||||
_:_ -> case break (=='$') s of
|
||||
(t,[]) -> lex env t : []
|
||||
(t,c:m) -> lex env t : [[c]] : alternate (not env) m
|
||||
_ -> []
|
||||
lex env = if env then lexCode else lexText
|
||||
|
||||
bindTok :: [String] -> String
|
||||
bindTok ws = case ws of
|
||||
w:"&+":ws2 -> w ++ bindTok ws2
|
||||
w:[] -> w
|
||||
w:ws2 -> w ++ " " ++ bindTok ws2
|
||||
[] -> ""
|
||||
|
||||
unlexText :: [String] -> String
|
||||
unlexText s = case s of
|
||||
w:[] -> w
|
||||
w:[c]:[] | isPunct c -> w ++ [c]
|
||||
w:[c]:cs | isPunct c -> w ++ [c] ++ " " ++ unlexText cs
|
||||
w:ws -> w ++ " " ++ unlexText ws
|
||||
_ -> []
|
||||
|
||||
unlexCode :: [String] -> String
|
||||
unlexCode s = case s of
|
||||
w:[] -> w
|
||||
[c]:cs | isParen c -> [c] ++ unlexCode cs
|
||||
w:cs@([c]:_) | isClosing c -> w ++ unlexCode cs
|
||||
w:ws -> w ++ " " ++ unlexCode ws
|
||||
_ -> []
|
||||
|
||||
|
||||
unlexMixed :: [String] -> String
|
||||
unlexMixed = concat . alternate False where
|
||||
alternate env s = case s of
|
||||
_:_ -> case break (=="$") s of
|
||||
(t,[]) -> unlex env t : []
|
||||
(t,c:m) -> unlex env t : sep env c : alternate (not env) m
|
||||
_ -> []
|
||||
unlex env = if env then unlexCode else unlexText
|
||||
sep env c = if env then c ++ " " else " " ++ c
|
||||
|
||||
isPunct = flip elem ".?!,:;"
|
||||
isParen = flip elem "()[]{}"
|
||||
isClosing = flip elem ")]}"
|
||||
|
||||
|
||||
-- might be in a file of its own: Windows Cyrillic, used in Bulgarian resource
|
||||
|
||||
decodeCP1251 = map convert where
|
||||
convert c
|
||||
| c >= '\192' && c <= '\255' = chr (ord c + 848)
|
||||
| otherwise = c
|
||||
|
||||
encodeCP1251 = map convert where
|
||||
convert c
|
||||
| oc >= 1040 && oc <= 1103 = chr (oc - 848)
|
||||
| otherwise = c
|
||||
where oc = ord c
|
||||
|
||||
97
src/GF/Text/Transliterations.hs
Normal file
97
src/GF/Text/Transliterations.hs
Normal file
@@ -0,0 +1,97 @@
|
||||
module GF.Text.Transliterations (transliterate,transliteration,characterTable) where
|
||||
|
||||
import GF.Text.UTF8
|
||||
|
||||
import Data.Char
|
||||
import qualified Data.Map as Map
|
||||
|
||||
-- transliterations between ASCII and a Unicode character set
|
||||
|
||||
-- current transliterations: devanagari, thai
|
||||
|
||||
-- to add a new one: define the Unicode range and the corresponding ASCII strings,
|
||||
-- which may be one or two characters long
|
||||
|
||||
-- conventions to be followed:
|
||||
-- each character is either [letter] or [letter+nonletter]
|
||||
-- when using a sparse range of unicodes, mark missing codes as "-" in transliterations
|
||||
-- characters can be invisible: ignored in translation to unicode
|
||||
|
||||
transliterate :: String -> Maybe (String -> String)
|
||||
transliterate s = case s of
|
||||
'f':'r':'o':'m':'_':t -> fmap appTransFromUnicode $ transliteration t
|
||||
't':'o':'_':t -> fmap appTransToUnicode $ transliteration t
|
||||
_ -> Nothing
|
||||
|
||||
transliteration :: String -> Maybe Transliteration
|
||||
transliteration s = case s of
|
||||
"devanagari" -> Just transDevanagari
|
||||
"thai" -> Just transThai
|
||||
_ -> Nothing
|
||||
|
||||
characterTable :: Transliteration -> String
|
||||
characterTable = unlines . map prOne . Map.assocs . trans_from_unicode where
|
||||
prOne (i,s) = unwords ["|", show i, "|", encodeUTF8 [toEnum i], "|", s, "|"]
|
||||
|
||||
data Transliteration = Trans {
|
||||
trans_to_unicode :: Map.Map String Int,
|
||||
trans_from_unicode :: Map.Map Int String,
|
||||
invisible_chars :: [String]
|
||||
}
|
||||
|
||||
appTransToUnicode :: Transliteration -> String -> String
|
||||
appTransToUnicode trans =
|
||||
concat .
|
||||
map (\c -> maybe c (return . toEnum) $
|
||||
Map.lookup c (trans_to_unicode trans)
|
||||
) .
|
||||
filter (flip notElem (invisible_chars trans)) .
|
||||
unchar
|
||||
|
||||
appTransFromUnicode :: Transliteration -> String -> String
|
||||
appTransFromUnicode trans =
|
||||
concat .
|
||||
map (maybe "?" id .
|
||||
flip Map.lookup (trans_from_unicode trans)
|
||||
) .
|
||||
map fromEnum
|
||||
|
||||
|
||||
mkTransliteration :: [String] -> [Int] -> Transliteration
|
||||
mkTransliteration ts us = Trans (Map.fromList (tzip ts us)) (Map.fromList (uzip us ts)) []
|
||||
where
|
||||
tzip ts us = [(t,u) | (t,u) <- zip ts us, t /= "-"]
|
||||
uzip us ts = [(u,t) | (u,t) <- zip us ts, t /= "-"]
|
||||
|
||||
|
||||
unchar :: String -> [String]
|
||||
unchar s = case s of
|
||||
c:d:cs
|
||||
| isAlpha d -> [c] : unchar (d:cs)
|
||||
| isSpace d -> [c]:[d]: unchar cs
|
||||
| otherwise -> [c,d] : unchar cs
|
||||
[_] -> [s]
|
||||
_ -> []
|
||||
|
||||
transThai :: Transliteration
|
||||
transThai = mkTransliteration allTrans allCodes where
|
||||
allTrans = words $
|
||||
"- k k1 - k2 - k3 g c c1 c2 s' c3 y' d' t' " ++
|
||||
"t1 t2 t3 n' d t t4 t5 t6 n b p p1 f p2 f' " ++
|
||||
"p3 m y r - l - w s- s. s h l' O h' - " ++
|
||||
"a. a a: a+ i i: v v: u u: - - - - - - " ++
|
||||
"e e' o: a% a& L R S T1 T2 T3 T4 K - - - " ++
|
||||
"N0 N1 N2 N3 N4 N5 N6 N7 N8 N9 - - - - - - "
|
||||
allCodes = [0x0e00 .. 0x0e7f]
|
||||
|
||||
transDevanagari :: Transliteration
|
||||
transDevanagari = (mkTransliteration allTrans allCodes){invisible_chars = ["a"]} where
|
||||
allTrans = words $
|
||||
"M N - - " ++
|
||||
"a- A- i- I- u- U- R- - - - e- E- - - o- O- " ++
|
||||
"k K g G N: c C j J n: t. T. d. D. n. t " ++
|
||||
"T d D n - p P b B m y r - l - - v " ++
|
||||
"S s. s h - - r. - A i I u U R - - " ++
|
||||
"- e E o O "
|
||||
allCodes = [0x0901 .. 0x094c]
|
||||
|
||||
48
src/GF/Text/UTF8.hs
Normal file
48
src/GF/Text/UTF8.hs
Normal file
@@ -0,0 +1,48 @@
|
||||
----------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : UTF8
|
||||
-- Maintainer : AR
|
||||
-- Stability : (stable)
|
||||
-- Portability : (portable)
|
||||
--
|
||||
-- > CVS $Date: 2005/04/21 16:23:42 $
|
||||
-- > CVS $Author: bringert $
|
||||
-- > CVS $Revision: 1.5 $
|
||||
--
|
||||
-- From the Char module supplied with HBC.
|
||||
-- code by Thomas Hallgren (Jul 10 1999)
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
module GF.Text.UTF8 (decodeUTF8, encodeUTF8) where
|
||||
|
||||
-- | Take a Unicode string and encode it as a string
|
||||
-- with the UTF8 method.
|
||||
decodeUTF8 :: String -> String
|
||||
decodeUTF8 "" = ""
|
||||
decodeUTF8 (c:cs) | c < '\x80' = c : decodeUTF8 cs
|
||||
decodeUTF8 (c:c':cs) | '\xc0' <= c && c <= '\xdf' &&
|
||||
'\x80' <= c' && c' <= '\xbf' =
|
||||
toEnum ((fromEnum c `mod` 0x20) * 0x40 + fromEnum c' `mod` 0x40) : decodeUTF8 cs
|
||||
decodeUTF8 (c:c':c'':cs) | '\xe0' <= c && c <= '\xef' &&
|
||||
'\x80' <= c' && c' <= '\xbf' &&
|
||||
'\x80' <= c'' && c'' <= '\xbf' =
|
||||
toEnum ((fromEnum c `mod` 0x10 * 0x1000) + (fromEnum c' `mod` 0x40) * 0x40 + fromEnum c'' `mod` 0x40) : decodeUTF8 cs
|
||||
decodeUTF8 s = s ---- AR workaround 22/6/2006
|
||||
----decodeUTF8 _ = error "UniChar.decodeUTF8: bad data"
|
||||
|
||||
encodeUTF8 :: String -> String
|
||||
encodeUTF8 "" = ""
|
||||
encodeUTF8 (c:cs) =
|
||||
if c > '\x0000' && c < '\x0080' then
|
||||
c : encodeUTF8 cs
|
||||
else if c < toEnum 0x0800 then
|
||||
let i = fromEnum c
|
||||
in toEnum (0xc0 + i `div` 0x40) :
|
||||
toEnum (0x80 + i `mod` 0x40) :
|
||||
encodeUTF8 cs
|
||||
else
|
||||
let i = fromEnum c
|
||||
in toEnum (0xe0 + i `div` 0x1000) :
|
||||
toEnum (0x80 + (i `mod` 0x1000) `div` 0x40) :
|
||||
toEnum (0x80 + i `mod` 0x40) :
|
||||
encodeUTF8 cs
|
||||
Reference in New Issue
Block a user