1
0
forked from GitHub/gf-core

reorganize the directories under src, and rescue the JavaScript interpreter from deprecated

This commit is contained in:
krasimir
2009-12-13 18:50:29 +00:00
parent d88a865faf
commit f85232947e
189 changed files with 2 additions and 2 deletions

View File

@@ -0,0 +1,77 @@
module GF.Text.CP1250 where
import Data.Char
decodeCP1250 = map convert where
convert c
| c == '\x80' = chr 0x20AC
| c == '\x82' = chr 0x201A
| c == '\x84' = chr 0x201E
| c == '\x85' = chr 0x2026
| c == '\x86' = chr 0x2020
| c == '\x87' = chr 0x2021
| c == '\x89' = chr 0x2030
| c == '\x8A' = chr 0x0160
| c == '\x8B' = chr 0x2039
| c == '\x8C' = chr 0x015A
| c == '\x8D' = chr 0x0164
| c == '\x8E' = chr 0x017D
| c == '\x8F' = chr 0x0179
| c == '\x91' = chr 0x2018
| c == '\x92' = chr 0x2019
| c == '\x93' = chr 0x201C
| c == '\x94' = chr 0x201D
| c == '\x95' = chr 0x2022
| c == '\x96' = chr 0x2013
| c == '\x97' = chr 0x2014
| c == '\x99' = chr 0x2122
| c == '\x9A' = chr 0x0161
| c == '\x9B' = chr 0x203A
| c == '\x9C' = chr 0x015B
| c == '\x9D' = chr 0x0165
| c == '\x9E' = chr 0x017E
| c == '\x9F' = chr 0x017A
| c == '\xA1' = chr 0x02C7
| c == '\xA5' = chr 0x0104
| c == '\xB9' = chr 0x0105
| c == '\xBC' = chr 0x013D
| c == '\xBE' = chr 0x013E
| otherwise = c
encodeCP1250 = map convert where
convert c
| oc == 0x20AC = '\x80'
| oc == 0x201A = '\x82'
| oc == 0x201E = '\x84'
| oc == 0x2026 = '\x85'
| oc == 0x2020 = '\x86'
| oc == 0x2021 = '\x87'
| oc == 0x2030 = '\x89'
| oc == 0x0160 = '\x8A'
| oc == 0x2039 = '\x8B'
| oc == 0x015A = '\x8C'
| oc == 0x0164 = '\x8D'
| oc == 0x017D = '\x8E'
| oc == 0x0179 = '\x8F'
| oc == 0x2018 = '\x91'
| oc == 0x2019 = '\x92'
| oc == 0x201C = '\x93'
| oc == 0x201D = '\x94'
| oc == 0x2022 = '\x95'
| oc == 0x2013 = '\x96'
| oc == 0x2014 = '\x97'
| oc == 0x2122 = '\x99'
| oc == 0x0161 = '\x9A'
| oc == 0x203A = '\x9B'
| oc == 0x015B = '\x9C'
| oc == 0x0165 = '\x9D'
| oc == 0x017E = '\x9E'
| oc == 0x017A = '\x9F'
| oc == 0x02C7 = '\xA1'
| oc == 0x0104 = '\xA5'
| oc == 0x0105 = '\xB9'
| oc == 0x013D = '\xBC'
| oc == 0x013E = '\xBE'
| otherwise = c
where oc = ord c

View File

@@ -0,0 +1,74 @@
module GF.Text.CP1251 where
import Data.Char
decodeCP1251 = map convert where
convert c
| c >= '\xC0' && c <= '\xFF' = chr (ord c + (0x410-0xC0))
| c == '\xA8' = chr 0x401 -- cyrillic capital letter lo
| c == '\x80' = chr 0x402
| c == '\x81' = chr 0x403
| c == '\xAA' = chr 0x404
| c == '\xBD' = chr 0x405
| c == '\xB2' = chr 0x406
| c == '\xAF' = chr 0x407
| c == '\xA3' = chr 0x408
| c == '\x8A' = chr 0x409
| c == '\x8C' = chr 0x40A
| c == '\x8E' = chr 0x40B
| c == '\x8D' = chr 0x40C
| c == '\xA1' = chr 0x40E
| c == '\x8F' = chr 0x40F
| c == '\xB8' = chr 0x451 -- cyrillic small letter lo
| c == '\x90' = chr 0x452
| c == '\x83' = chr 0x453
| c == '\xBA' = chr 0x454
| c == '\xBE' = chr 0x455
| c == '\xB3' = chr 0x456
| c == '\xBF' = chr 0x457
| c == '\xBC' = chr 0x458
| c == '\x9A' = chr 0x459
| c == '\x9C' = chr 0x45A
| c == '\x9E' = chr 0x45B
| c == '\x9D' = chr 0x45C
| c == '\xA2' = chr 0x45E
| c == '\x9F' = chr 0x45F
| c == '\xA5' = chr 0x490
| c == '\xB4' = chr 0x491
| otherwise = c
encodeCP1251 = map convert where
convert c
| oc >= 0x410 && oc <= 0x44F = chr (oc - (0x410-0xC0))
| oc == 0x401 = '\xA8' -- cyrillic capital letter lo
| oc == 0x402 = '\x80'
| oc == 0x403 = '\x81'
| oc == 0x404 = '\xAA'
| oc == 0x405 = '\xBD'
| oc == 0x406 = '\xB2'
| oc == 0x407 = '\xAF'
| oc == 0x408 = '\xA3'
| oc == 0x409 = '\x8A'
| oc == 0x40A = '\x8C'
| oc == 0x40B = '\x8E'
| oc == 0x40C = '\x8D'
| oc == 0x40E = '\xA1'
| oc == 0x40F = '\x8F'
| oc == 0x451 = '\xB8' -- cyrillic small letter lo
| oc == 0x452 = '\x90'
| oc == 0x453 = '\x83'
| oc == 0x454 = '\xBA'
| oc == 0x455 = '\xBE'
| oc == 0x456 = '\xB3'
| oc == 0x457 = '\xBF'
| oc == 0x458 = '\xBC'
| oc == 0x459 = '\x9A'
| oc == 0x45A = '\x9C'
| oc == 0x45B = '\x9E'
| oc == 0x45C = '\x9D'
| oc == 0x45E = '\xA2'
| oc == 0x45F = '\x9F'
| oc == 0x490 = '\xA5'
| oc == 0x491 = '\xB4'
| otherwise = c
where oc = ord c

View File

@@ -0,0 +1,6 @@
module GF.Text.CP1252 where
import Data.Char
decodeCP1252 = map id
encodeCP1252 = map (\x -> if x <= '\255' then x else '?')

View File

@@ -0,0 +1,21 @@
module GF.Text.Coding where
import GF.Infra.Option
import GF.Text.UTF8
import GF.Text.CP1250
import GF.Text.CP1251
import GF.Text.CP1252
encodeUnicode e = case e of
UTF_8 -> encodeUTF8
CP_1250 -> encodeCP1250
CP_1251 -> encodeCP1251
CP_1252 -> encodeCP1252
_ -> id
decodeUnicode e = case e of
UTF_8 -> decodeUTF8
CP_1250 -> decodeCP1250
CP_1251 -> decodeCP1251
CP_1252 -> decodeCP1252
_ -> id

View File

@@ -0,0 +1,131 @@
module GF.Text.Lexing (stringOp,opInEnv) where
import GF.Text.Transliterations
import GF.Text.UTF8
import GF.Text.CP1251
import Data.Char
import Data.List (intersperse)
-- lexers and unlexers - they work on space-separated word strings
stringOp :: String -> Maybe (String -> String)
stringOp name = case name of
"chars" -> Just $ appLexer (filter (not . all isSpace) . map return)
"lextext" -> Just $ appLexer lexText
"lexcode" -> Just $ appLexer lexCode
"lexmixed" -> Just $ appLexer lexMixed
"words" -> Just $ appLexer words
"bind" -> Just $ appUnlexer bindTok
"unchars" -> Just $ appUnlexer concat
"unlextext" -> Just $ appUnlexer unlexText
"unlexcode" -> Just $ appUnlexer unlexCode
"unlexmixed" -> Just $ appUnlexer unlexMixed
"unwords" -> Just $ appUnlexer unwords
"to_html" -> Just wrapHTML
"to_utf8" -> Just encodeUTF8
"from_utf8" -> Just decodeUTF8
"to_cp1251" -> Just encodeCP1251
"from_cp1251" -> Just decodeCP1251
_ -> transliterate name
-- perform op in environments beg--end, t.ex. between "--"
--- suboptimal implementation
opInEnv :: String -> String -> (String -> String) -> (String -> String)
opInEnv beg end op = concat . altern False . chop (lbeg, beg) [] where
chop mk@(lg, mark) s0 s =
let (tag,rest) = splitAt lg s in
if tag==mark then (reverse s0) : mark : chop (switch mk) [] rest
else case s of
c:cs -> chop mk (c:s0) cs
[] -> [reverse s0]
switch (lg,mark) = if mark==beg then (lend,end) else (lbeg,beg)
(lbeg,lend) = (length beg, length end)
altern m ts = case ts of
t:ws | not m && t==beg -> t : altern True ws
t:ws | m && t==end -> t : altern False ws
t:ws -> (if m then op t else t) : altern m ws
[] -> []
appLexer :: (String -> [String]) -> String -> String
appLexer f = unwords . filter (not . null) . f
appUnlexer :: ([String] -> String) -> String -> String
appUnlexer f = unlines . map (f . words) . lines
wrapHTML :: String -> String
wrapHTML = unlines . tag . intersperse "<br>" . lines where
tag ss = "<html>":"<head>":"<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />":"</head>":"<body>" : ss ++ ["</body>","</html>"]
lexText :: String -> [String]
lexText = uncap . lext where
lext s = case s of
c:cs | isMajorPunct c -> [c] : uncap (lext cs)
c:cs | isMinorPunct c -> [c] : lext cs
c:cs | isSpace c -> lext cs
_:_ -> let (w,cs) = break (\x -> isSpace x || isPunct x) s in w : lext cs
_ -> [s]
uncap s = case s of
(c:cs):ws -> (toLower c : cs):ws
_ -> s
-- | Haskell lexer, usable for much code
lexCode :: String -> [String]
lexCode ss = case lex ss of
[(w@(_:_),ws)] -> w : lexCode ws
_ -> []
-- | LaTeX style lexer, with "math" environment using Code between $...$
lexMixed :: String -> [String]
lexMixed = concat . alternate False where
alternate env s = case s of
_:_ -> case break (=='$') s of
(t,[]) -> lex env t : []
(t,c:m) -> lex env t : [[c]] : alternate (not env) m
_ -> []
lex env = if env then lexCode else lexText
bindTok :: [String] -> String
bindTok ws = case ws of
w:"&+":ws2 -> w ++ bindTok ws2
w:[] -> w
w:ws2 -> w ++ " " ++ bindTok ws2
[] -> ""
unlexText :: [String] -> String
unlexText = cap . unlext where
unlext s = case s of
w:[] -> w
w:[c]:[] | isPunct c -> w ++ [c]
w:[c]:cs | isMajorPunct c -> w ++ [c] ++ " " ++ cap (unlext cs)
w:[c]:cs | isMinorPunct c -> w ++ [c] ++ " " ++ unlext cs
w:ws -> w ++ " " ++ unlext ws
_ -> []
cap s = case s of
c:cs -> toUpper c : cs
_ -> s
unlexCode :: [String] -> String
unlexCode s = case s of
w:[] -> w
[c]:cs | isParen c -> [c] ++ unlexCode cs
w:cs@([c]:_) | isClosing c -> w ++ unlexCode cs
w:ws -> w ++ " " ++ unlexCode ws
_ -> []
unlexMixed :: [String] -> String
unlexMixed = concat . alternate False where
alternate env s = case s of
_:_ -> case break (=="$") s of
(t,[]) -> unlex env t : []
(t,c:m) -> unlex env t : sep env c : alternate (not env) m
_ -> []
unlex env = if env then unlexCode else unlexText
sep env c = if env then c ++ " " else " " ++ c
isPunct = flip elem ".?!,:;"
isMajorPunct = flip elem ".?!"
isMinorPunct = flip elem ",:;"
isParen = flip elem "()[]{}"
isClosing = flip elem ")]}"

View File

@@ -0,0 +1,206 @@
module GF.Text.Transliterations (
transliterate,
transliteration,
characterTable,
transliterationPrintNames
) where
import GF.Text.UTF8
import Data.Char
import Numeric
import qualified Data.Map as Map
-- transliterations between ASCII and a Unicode character set
-- current transliterations: devanagari, thai
-- to add a new one: define the Unicode range and the corresponding ASCII strings,
-- which may be one or more characters long
-- conventions to be followed:
-- each character is either [letter] or [letter+nonletters]
-- when using a sparse range of unicodes, mark missing codes as "-" in transliterations
-- characters can be invisible: ignored in translation to unicode
transliterate :: String -> Maybe (String -> String)
transliterate s = case s of
'f':'r':'o':'m':'_':t -> fmap appTransFromUnicode $ transliteration t
't':'o':'_':t -> fmap appTransToUnicode $ transliteration t
_ -> Nothing
transliteration :: String -> Maybe Transliteration
transliteration s = Map.lookup s allTransliterations
allTransliterations = Map.fromAscList [
("ancientgreek", transAncientGreek),
("arabic", transArabic),
("devanagari", transDevanagari),
("greek", transGreek),
("hebrew", transHebrew),
("persian", transPersian),
("telugu", transTelugu),
("thai", transThai)
---- "urdu", transUrdu
]
-- used in command options and help
transliterationPrintNames = [(t,printname p) | (t,p) <- Map.toList allTransliterations]
characterTable :: Transliteration -> String
characterTable = unlines . map prOne . Map.assocs . trans_from_unicode where
prOne (i,s) = unwords ["|", showHex i "", "|", [toEnum i], "|", s, "|"]
data Transliteration = Trans {
trans_to_unicode :: Map.Map String Int,
trans_from_unicode :: Map.Map Int String,
invisible_chars :: [String],
printname :: String
}
appTransToUnicode :: Transliteration -> String -> String
appTransToUnicode trans =
concat .
map (\c -> maybe c (return . toEnum) $
Map.lookup c (trans_to_unicode trans)
) .
filter (flip notElem (invisible_chars trans)) .
unchar
appTransFromUnicode :: Transliteration -> String -> String
appTransFromUnicode trans =
concat .
map (maybe "?" id .
flip Map.lookup (trans_from_unicode trans)
) .
map fromEnum
mkTransliteration :: String -> [String] -> [Int] -> Transliteration
mkTransliteration name ts us =
Trans (Map.fromList (tzip ts us)) (Map.fromList (uzip us ts)) [] name
where
tzip ts us = [(t,u) | (t,u) <- zip ts us, t /= "-"]
uzip us ts = [(u,t) | (u,t) <- zip us ts, t /= "-"]
unchar :: String -> [String]
unchar s = case s of
c:d:cs
| isAlpha d -> [c] : unchar (d:cs)
| isSpace d -> [c]:[d]: unchar cs
| otherwise -> let (ds,cs2) = break (\x -> isAlpha x || isSpace x) cs in
(c:d:ds) : unchar cs2
[_] -> [s]
_ -> []
transThai :: Transliteration
transThai = mkTransliteration "Thai" allTrans allCodes where
allTrans = words $
"- k k1 - k2 - k3 g c c1 c2 s' c3 y' d' t' " ++
"t1 t2 t3 n' d t t4 t5 t6 n b p p1 f p2 f' " ++
"p3 m y r - l - w s- s. s h l' O h' - " ++
"a. a a: a+ i i: v v: u u: - - - - - - " ++
"e e' o: a% a& L R S T1 T2 T3 T4 K - - - " ++
"N0 N1 N2 N3 N4 N5 N6 N7 N8 N9 - - - - - - "
allCodes = [0x0e00 .. 0x0e7f]
transDevanagari :: Transliteration
transDevanagari =
(mkTransliteration "Devanagari"
allTransUrduHindi allCodes){invisible_chars = ["a"]} where
allCodes = [0x0900 .. 0x095f]
allTransUrduHindi = words $
"- M N - - a- A- i- I- u- U- R- - - - e- " ++
"E- - - o- O- k K g G N: c C j J n: t. " ++
"T. d. D. n. t T d D n - p P b B m y " ++
"r - l - - v S s. s h - - r: - A i " ++
"I u U R - - - e E o O - - - - - " ++
"- - - - - - - - - - - z r. - - - "
transUrdu :: Transliteration
transUrdu =
(mkTransliteration "Urdu" allTransUrduHindi allCodes){invisible_chars = ["a"]} where
allCodes = [0x0900 .. 0x095f] ---- TODO: this is devanagari
transArabic :: Transliteration
transArabic = mkTransliteration "Arabic" allTrans allCodes where
allTrans = words $
" V A: A? w? A- y? A b t. t v g H K d " ++ -- 0621 - 062f
"W r z s C S D T Z c G " ++ -- 0630 - 063a
" f q k l m n h w y. y a. u. i. a u " ++ -- 0641 - 064f
"i v2 o a: V+ V- i: a+ " ++ -- 0650 - 0657
"A* " -- 0671 (used by AED)
allCodes = [0x0621..0x062f] ++ [0x0630..0x063a] ++
[0x0641..0x064f] ++ [0x0650..0x0657] ++ [0x0671]
transPersian :: Transliteration
transPersian = (mkTransliteration "Persian/Farsi" allTrans allCodes)
{invisible_chars = ["a","u","i"]} where
allTrans = words $
" V A: A? w? A- y? A b t. t t- j H K d " ++ -- 0621 - 062f
"W r z s C S D T Z c G " ++ -- 0630 - 063a
" f q k l m n h v y. y a. u. i. a u " ++ -- 0641 - 064f
"i v2 o a: V+ V- i: a+ " ++ -- 0650 - 0657
"p c^ J g "
allCodes = [0x0621..0x062f] ++ [0x0630..0x063a] ++
[0x0641..0x064f] ++ [0x0650..0x0657] ++
[0x067e,0x0686,0x0698,0x06af]
transHebrew :: Transliteration
transHebrew = mkTransliteration "unvocalized Hebrew" allTrans allCodes where
allTrans = words $
"A b g d h w z H T y K k l M m N " ++
"n S O P p Z. Z q r s t - - - - - " ++
"w2 w3 y2 g1 g2"
allCodes = [0x05d0..0x05f4]
transTelugu :: Transliteration
transTelugu = mkTransliteration "Telugu" allTrans allCodes where
allTrans = words $
"- c1 c2 c3 - A A: I I: U U: R_ L_ - E E: " ++
"A' - O O: A_ k k. g g. n. c c. j j. n' T " ++
"T. d d. N t t. d d. n - p p. b b. m y " ++
"r R l L - v s' S s h - - - c5 a: i " ++
"i: u u: r_ r. - e e: a' - o o: a_ c6 - - " ++
"- - - - - c7 c8 z Z - - - - - - - " ++
"R+ L+ l+ l* - - n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 "
allCodes = [0x0c00 .. 0x0c7f]
transGreek :: Transliteration
transGreek = mkTransliteration "modern Greek" allTrans allCodes where
allTrans = words $
"- - - - - - A' - E' H' I' - O' - Y' W' " ++
"i= A B G D E Z H V I K L M N X O " ++
"P R - S T Y F C Q W I- Y- a' e' h' i' " ++
"y= a b g d e z h v i k l m n x o " ++
"p r s* s t y f c q w i- y- o' y' w' - "
allCodes = [0x0380 .. 0x03cf]
transAncientGreek :: Transliteration
transAncientGreek = mkTransliteration "ancient Greek" allTrans allCodes where
allTrans = words $
"- - - - - - - - - - - - - - - - " ++
"i= A B G D E Z H V I K L M N X O " ++
"P R - S T Y F C Q W I- Y- - - - - " ++
"y= a b g d e z h v i k l m n x o " ++
"p r s* s t y f c q w i- y- - - - - " ++
"a) a( a)` a(` a)' a(' a)~ a(~ A) A( A)` A(` A)' A(' A)~ A(~ " ++
"e) e( e)` e(` e)' e(' - - E) E( E)` E(` E)' E(' - - " ++
"h) h( h)` h(` h)' h(' h)~ h(~ H) H( H)` H(` H)' H(' H)~ H(~ " ++
"i) i( i)` i(` i)' i(' i)~ i(~ I) I( I)` I(` I)' I(' I)~ I(~ " ++
"o) o( o)` o(` o)' o(' - - O) O( O)` O(` O)' O(' - - " ++
"y) y( y)` y(` y)' y(' y)~ y(~ - Y( - Y(` - Y(' - Y(~ " ++
"w) w( w)` w(` w)' w(' w)~ w(~ W) W( W)` W(` W)' W(' W)~ W(~ " ++
"a` a' e` e' h` h' i` i' o` o' y` y' w` w' - - " ++
"a|( a|) a|)` a|(` a|)' a|(' a|)~ a|(~ - - - - - - - - " ++ -- 1f80-
"h|( h|) h|)` h|(` h|)' h|(' h|)~ h|(~ - - - - - - - - " ++ -- 1f90-
"w|( w|) w|)` w|(` w|)' w|(' w|)~ w|(~ - - - - - - - - " ++ -- 1fa0-
"a. a_ a|` a| a|' - a~ a|~ - - - - - - - - " ++ -- 1fb0-
"- - h|` h| h|' - h~ h|~ - - - - - - - - " ++ -- 1fc0-
"i. i_ i=` i=' - - i~ i=~ - - - - - - - - " ++ -- 1fd0-
"y. y_ y=` y=' r) r( y~ y|~ - - - - - - - - " ++ -- 1fe0-
"- - w|` w| w|' - w~ w|~ - - - - - - - - " -- 1ff0-
allCodes = [0x0380 .. 0x03cf] ++ [0x1f00 .. 0x1fff]

View File

@@ -0,0 +1,48 @@
----------------------------------------------------------------------
-- |
-- Module : UTF8
-- Maintainer : AR
-- Stability : (stable)
-- Portability : (portable)
--
-- > CVS $Date: 2005/04/21 16:23:42 $
-- > CVS $Author: bringert $
-- > CVS $Revision: 1.5 $
--
-- From the Char module supplied with HBC.
-- code by Thomas Hallgren (Jul 10 1999)
-----------------------------------------------------------------------------
module GF.Text.UTF8 (decodeUTF8, encodeUTF8) where
-- | Take a Unicode string and encode it as a string
-- with the UTF8 method.
decodeUTF8 :: String -> String
decodeUTF8 "" = ""
decodeUTF8 (c:cs) | c < '\x80' = c : decodeUTF8 cs
decodeUTF8 (c:c':cs) | '\xc0' <= c && c <= '\xdf' &&
'\x80' <= c' && c' <= '\xbf' =
toEnum ((fromEnum c `mod` 0x20) * 0x40 + fromEnum c' `mod` 0x40) : decodeUTF8 cs
decodeUTF8 (c:c':c'':cs) | '\xe0' <= c && c <= '\xef' &&
'\x80' <= c' && c' <= '\xbf' &&
'\x80' <= c'' && c'' <= '\xbf' =
toEnum ((fromEnum c `mod` 0x10 * 0x1000) + (fromEnum c' `mod` 0x40) * 0x40 + fromEnum c'' `mod` 0x40) : decodeUTF8 cs
decodeUTF8 s = s ---- AR workaround 22/6/2006
----decodeUTF8 _ = error "UniChar.decodeUTF8: bad data"
encodeUTF8 :: String -> String
encodeUTF8 "" = ""
encodeUTF8 (c:cs) =
if c > '\x0000' && c < '\x0080' then
c : encodeUTF8 cs
else if c < toEnum 0x0800 then
let i = fromEnum c
in toEnum (0xc0 + i `div` 0x40) :
toEnum (0x80 + i `mod` 0x40) :
encodeUTF8 cs
else
let i = fromEnum c
in toEnum (0xe0 + i `div` 0x1000) :
toEnum (0x80 + (i `mod` 0x1000) `div` 0x40) :
toEnum (0x80 + i `mod` 0x40) :
encodeUTF8 cs