mirror of
https://github.com/GrammaticalFramework/gf-core.git
synced 2026-05-06 17:52:51 -06:00
changed names of resource-1.3; added a note on homepage on release
This commit is contained in:
64
src/PGF/BuildParser.hs
Normal file
64
src/PGF/BuildParser.hs
Normal file
@@ -0,0 +1,64 @@
|
||||
---------------------------------------------------------------------
|
||||
-- |
|
||||
-- Maintainer : Krasimir Angelov
|
||||
-- Stability : (stable)
|
||||
-- Portability : (portable)
|
||||
--
|
||||
-- FCFG parsing, parser information
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
module PGF.BuildParser where
|
||||
|
||||
import GF.Data.SortedList
|
||||
import GF.Data.Assoc
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import PGF.Parsing.FCFG.Utilities
|
||||
|
||||
import Data.Array
|
||||
import Data.Maybe
|
||||
import qualified Data.Map as Map
|
||||
import qualified Data.Set as Set
|
||||
import Debug.Trace
|
||||
|
||||
|
||||
------------------------------------------------------------
|
||||
-- parser information
|
||||
|
||||
getLeftCornerTok (FRule _ _ _ _ lins)
|
||||
| inRange (bounds syms) 0 = case syms ! 0 of
|
||||
FSymTok tok -> [tok]
|
||||
_ -> []
|
||||
| otherwise = []
|
||||
where
|
||||
syms = lins ! 0
|
||||
|
||||
getLeftCornerCat (FRule _ _ args _ lins)
|
||||
| inRange (bounds syms) 0 = case syms ! 0 of
|
||||
FSymCat _ d -> [args !! d]
|
||||
_ -> []
|
||||
| otherwise = []
|
||||
where
|
||||
syms = lins ! 0
|
||||
|
||||
buildParserInfo :: FGrammar -> ParserInfo
|
||||
buildParserInfo (grammar,startup) = -- trace (unlines [prt (x,Set.toList set) | (x,set) <- Map.toList leftcornFilter]) $
|
||||
ParserInfo { allRules = allrules
|
||||
, topdownRules = topdownrules
|
||||
-- , emptyRules = emptyrules
|
||||
, epsilonRules = epsilonrules
|
||||
, leftcornerCats = leftcorncats
|
||||
, leftcornerTokens = leftcorntoks
|
||||
, grammarCats = grammarcats
|
||||
, grammarToks = grammartoks
|
||||
, startupCats = startup
|
||||
}
|
||||
|
||||
where allrules = listArray (0,length grammar-1) grammar
|
||||
topdownrules = accumAssoc id [(cat, ruleid) | (ruleid, FRule _ _ _ cat _) <- assocs allrules]
|
||||
epsilonrules = [ ruleid | (ruleid, FRule _ _ _ _ lins) <- assocs allrules,
|
||||
not (inRange (bounds (lins ! 0)) 0) ]
|
||||
leftcorncats = accumAssoc id [ (cat, ruleid) | (ruleid, rule) <- assocs allrules, cat <- getLeftCornerCat rule ]
|
||||
leftcorntoks = accumAssoc id [ (tok, ruleid) | (ruleid, rule) <- assocs allrules, tok <- getLeftCornerTok rule ]
|
||||
grammarcats = aElems topdownrules
|
||||
grammartoks = nubsort [t | (FRule _ _ _ _ lins) <- grammar, lin <- elems lins, FSymTok t <- elems lin]
|
||||
18
src/PGF/CId.hs
Normal file
18
src/PGF/CId.hs
Normal file
@@ -0,0 +1,18 @@
|
||||
module PGF.CId (CId(..), wildCId, mkCId, prCId) where
|
||||
|
||||
import Data.ByteString.Char8 as BS
|
||||
|
||||
-- | An abstract data type that represents
|
||||
-- function identifier in PGF.
|
||||
newtype CId = CId BS.ByteString deriving (Eq,Ord,Show)
|
||||
|
||||
wildCId :: CId
|
||||
wildCId = CId (BS.singleton '_')
|
||||
|
||||
-- | Creates a new identifier from 'String'
|
||||
mkCId :: String -> CId
|
||||
mkCId s = CId (BS.pack s)
|
||||
|
||||
-- | Renders the identifier as 'String'
|
||||
prCId :: CId -> String
|
||||
prCId (CId x) = BS.unpack x
|
||||
171
src/PGF/Check.hs
Normal file
171
src/PGF/Check.hs
Normal file
@@ -0,0 +1,171 @@
|
||||
module PGF.Check (checkPGF) where
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import PGF.Macros
|
||||
import GF.Data.ErrM
|
||||
|
||||
import qualified Data.Map as Map
|
||||
import Control.Monad
|
||||
import Debug.Trace
|
||||
|
||||
checkPGF :: PGF -> Err (PGF,Bool)
|
||||
checkPGF pgf = do
|
||||
(cs,bs) <- mapM (checkConcrete pgf)
|
||||
(Map.assocs (concretes pgf)) >>= return . unzip
|
||||
return (pgf {concretes = Map.fromAscList cs}, and bs)
|
||||
|
||||
|
||||
-- errors are non-fatal; replace with 'fail' to change this
|
||||
msg s = trace s (return ())
|
||||
|
||||
andMapM :: Monad m => (a -> m Bool) -> [a] -> m Bool
|
||||
andMapM f xs = mapM f xs >>= return . and
|
||||
|
||||
labelBoolErr :: String -> Err (x,Bool) -> Err (x,Bool)
|
||||
labelBoolErr ms iob = do
|
||||
(x,b) <- iob
|
||||
if b then return (x,b) else (msg ms >> return (x,b))
|
||||
|
||||
|
||||
checkConcrete :: PGF -> (CId,Concr) -> Err ((CId,Concr),Bool)
|
||||
checkConcrete pgf (lang,cnc) =
|
||||
labelBoolErr ("happened in language " ++ prCId lang) $ do
|
||||
(rs,bs) <- mapM checkl (Map.assocs (lins cnc)) >>= return . unzip
|
||||
return ((lang,cnc{lins = Map.fromAscList rs}),and bs)
|
||||
where
|
||||
checkl = checkLin pgf lang
|
||||
|
||||
checkLin :: PGF -> CId -> (CId,Term) -> Err ((CId,Term),Bool)
|
||||
checkLin pgf lang (f,t) =
|
||||
labelBoolErr ("happened in function " ++ prCId f) $ do
|
||||
(t',b) <- checkTerm (lintype pgf lang f) t --- $ inline pgf lang t
|
||||
return ((f,t'),b)
|
||||
|
||||
inferTerm :: [CType] -> Term -> Err (Term,CType)
|
||||
inferTerm args trm = case trm of
|
||||
K _ -> returnt str
|
||||
C i -> returnt $ ints i
|
||||
V i -> do
|
||||
testErr (i < length args) ("too large index " ++ show i)
|
||||
returnt $ args !! i
|
||||
S ts -> do
|
||||
(ts',tys) <- mapM infer ts >>= return . unzip
|
||||
let tys' = filter (/=str) tys
|
||||
testErr (null tys')
|
||||
("expected Str in " ++ show trm ++ " not " ++ unwords (map show tys'))
|
||||
return (S ts',str)
|
||||
R ts -> do
|
||||
(ts',tys) <- mapM infer ts >>= return . unzip
|
||||
return $ (R ts',tuple tys)
|
||||
P t u -> do
|
||||
(t',tt) <- infer t
|
||||
(u',tu) <- infer u
|
||||
case tt of
|
||||
R tys -> case tu of
|
||||
R vs -> infer $ foldl P t' [P u' (C i) | i <- [0 .. length vs - 1]]
|
||||
--- R [v] -> infer $ P t v
|
||||
--- R (v:vs) -> infer $ P (head tys) (R vs)
|
||||
|
||||
C i -> do
|
||||
testErr (i < length tys)
|
||||
("required more than " ++ show i ++ " fields in " ++ show (R tys))
|
||||
return (P t' u', tys !! i) -- record: index must be known
|
||||
_ -> do
|
||||
let typ = head tys
|
||||
testErr (all (==typ) tys) ("different types in table " ++ show trm)
|
||||
return (P t' u', typ) -- table: types must be same
|
||||
_ -> Bad $ "projection from " ++ show t ++ " : " ++ show tt
|
||||
FV [] -> returnt tm0 ----
|
||||
FV (t:ts) -> do
|
||||
(t',ty) <- infer t
|
||||
(ts',tys) <- mapM infer ts >>= return . unzip
|
||||
testErr (all (eqType ty) tys) ("different types in variants " ++ show trm)
|
||||
return (FV (t':ts'),ty)
|
||||
W s r -> infer r
|
||||
_ -> Bad ("no type inference for " ++ show trm)
|
||||
where
|
||||
returnt ty = return (trm,ty)
|
||||
infer = inferTerm args
|
||||
|
||||
checkTerm :: LinType -> Term -> Err (Term,Bool)
|
||||
checkTerm (args,val) trm = case inferTerm args trm of
|
||||
Ok (t,ty) -> if eqType ty val
|
||||
then return (t,True)
|
||||
else do
|
||||
msg ("term: " ++ show trm ++
|
||||
"\nexpected type: " ++ show val ++
|
||||
"\ninferred type: " ++ show ty)
|
||||
return (t,False)
|
||||
Bad s -> do
|
||||
msg s
|
||||
return (trm,False)
|
||||
|
||||
eqType :: CType -> CType -> Bool
|
||||
eqType inf exp = case (inf,exp) of
|
||||
(C k, C n) -> k <= n -- only run-time corr.
|
||||
(R rs,R ts) -> length rs == length ts && and [eqType r t | (r,t) <- zip rs ts]
|
||||
(TM _, _) -> True ---- for variants [] ; not safe
|
||||
_ -> inf == exp
|
||||
|
||||
-- should be in a generic module, but not in the run-time DataGFCC
|
||||
|
||||
type CType = Term
|
||||
type LinType = ([CType],CType)
|
||||
|
||||
tuple :: [CType] -> CType
|
||||
tuple = R
|
||||
|
||||
ints :: Int -> CType
|
||||
ints = C
|
||||
|
||||
str :: CType
|
||||
str = S []
|
||||
|
||||
lintype :: PGF -> CId -> CId -> LinType
|
||||
lintype pgf lang fun = case typeSkeleton (lookType pgf fun) of
|
||||
(cs,c) -> (map vlinc cs, linc c) ---- HOAS
|
||||
where
|
||||
linc = lookLincat pgf lang
|
||||
vlinc (0,c) = linc c
|
||||
vlinc (i,c) = case linc c of
|
||||
R ts -> R (ts ++ replicate i str)
|
||||
|
||||
inline :: PGF -> CId -> Term -> Term
|
||||
inline pgf lang t = case t of
|
||||
F c -> inl $ look c
|
||||
_ -> composSafeOp inl t
|
||||
where
|
||||
inl = inline pgf lang
|
||||
look = lookLin pgf lang
|
||||
|
||||
composOp :: Monad m => (Term -> m Term) -> Term -> m Term
|
||||
composOp f trm = case trm of
|
||||
R ts -> liftM R $ mapM f ts
|
||||
S ts -> liftM S $ mapM f ts
|
||||
FV ts -> liftM FV $ mapM f ts
|
||||
P t u -> liftM2 P (f t) (f u)
|
||||
W s t -> liftM (W s) $ f t
|
||||
_ -> return trm
|
||||
|
||||
composSafeOp :: (Term -> Term) -> Term -> Term
|
||||
composSafeOp f = maybe undefined id . composOp (return . f)
|
||||
|
||||
-- from GF.Data.Oper
|
||||
|
||||
maybeErr :: String -> Maybe a -> Err a
|
||||
maybeErr s = maybe (Bad s) Ok
|
||||
|
||||
testErr :: Bool -> String -> Err ()
|
||||
testErr cond msg = if cond then return () else Bad msg
|
||||
|
||||
errVal :: a -> Err a -> a
|
||||
errVal a = err (const a) id
|
||||
|
||||
errIn :: String -> Err a -> Err a
|
||||
errIn msg = err (\s -> Bad (s ++ "\nOCCURRED IN\n" ++ msg)) return
|
||||
|
||||
err :: (String -> b) -> (a -> b) -> Err a -> b
|
||||
err d f e = case e of
|
||||
Ok a -> f a
|
||||
Bad s -> d s
|
||||
201
src/PGF/Data.hs
Normal file
201
src/PGF/Data.hs
Normal file
@@ -0,0 +1,201 @@
|
||||
module PGF.Data where
|
||||
|
||||
import PGF.CId
|
||||
import GF.Text.UTF8
|
||||
import GF.Data.Assoc
|
||||
|
||||
import qualified Data.Map as Map
|
||||
import Data.List
|
||||
import Data.Array
|
||||
|
||||
-- internal datatypes for PGF
|
||||
|
||||
-- | An abstract data type representing multilingual grammar
|
||||
-- in Portable Grammar Format.
|
||||
data PGF = PGF {
|
||||
absname :: CId ,
|
||||
cncnames :: [CId] ,
|
||||
gflags :: Map.Map CId String, -- value of a global flag
|
||||
abstract :: Abstr ,
|
||||
concretes :: Map.Map CId Concr
|
||||
}
|
||||
|
||||
data Abstr = Abstr {
|
||||
aflags :: Map.Map CId String, -- value of a flag
|
||||
funs :: Map.Map CId (Type,Expr), -- type and def of a fun
|
||||
cats :: Map.Map CId [Hypo], -- context of a cat
|
||||
catfuns :: Map.Map CId [CId] -- funs to a cat (redundant, for fast lookup)
|
||||
}
|
||||
|
||||
data Concr = Concr {
|
||||
cflags :: Map.Map CId String, -- value of a flag
|
||||
lins :: Map.Map CId Term, -- lin of a fun
|
||||
opers :: Map.Map CId Term, -- oper generated by subex elim
|
||||
lincats :: Map.Map CId Term, -- lin type of a cat
|
||||
lindefs :: Map.Map CId Term, -- lin default of a cat
|
||||
printnames :: Map.Map CId Term, -- printname of a cat or a fun
|
||||
paramlincats :: Map.Map CId Term, -- lin type of cat, with printable param names
|
||||
parser :: Maybe ParserInfo -- parser
|
||||
}
|
||||
|
||||
data Type =
|
||||
DTyp [Hypo] CId [Expr]
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
data Literal =
|
||||
LStr String -- ^ string constant
|
||||
| LInt Integer -- ^ integer constant
|
||||
| LFlt Double -- ^ floating point constant
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
-- | The tree is an evaluated expression in the abstract syntax
|
||||
-- of the grammar. The type is especially restricted to not
|
||||
-- allow unapplied lambda abstractions. The tree is used directly
|
||||
-- from the linearizer and is produced directly from the parser.
|
||||
data Tree =
|
||||
Abs [CId] Tree -- ^ lambda abstraction. The list of variables is non-empty
|
||||
| Var CId -- ^ variable
|
||||
| Fun CId [Tree] -- ^ function application
|
||||
| Lit Literal -- ^ literal
|
||||
| Meta Int -- ^ meta variable
|
||||
deriving (Show, Eq, Ord)
|
||||
|
||||
-- | An expression represents a potentially unevaluated expression
|
||||
-- in the abstract syntax of the grammar. It can be evaluated with
|
||||
-- the 'expr2tree' function and then linearized or it can be used
|
||||
-- directly in the dependent types.
|
||||
data Expr =
|
||||
EAbs CId Expr -- ^ lambda abstraction
|
||||
| EApp Expr Expr -- ^ application
|
||||
| ELit Literal -- ^ literal
|
||||
| EMeta Int -- ^ meta variable
|
||||
| EVar CId -- ^ variable or function reference
|
||||
| EEq [Equation] -- ^ lambda function defined as a set of equations with pattern matching
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
data Term =
|
||||
R [Term]
|
||||
| P Term Term
|
||||
| S [Term]
|
||||
| K Tokn
|
||||
| V Int
|
||||
| C Int
|
||||
| F CId
|
||||
| FV [Term]
|
||||
| W String Term
|
||||
| TM String
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
data Tokn =
|
||||
KS String
|
||||
| KP [String] [Alternative]
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
data Alternative =
|
||||
Alt [String] [String]
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
data Hypo =
|
||||
Hyp CId Type
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
-- | The equation is used to define lambda function as a sequence
|
||||
-- of equations with pattern matching. The list of 'Expr' represents
|
||||
-- the patterns and the second 'Expr' is the function body for this
|
||||
-- equation.
|
||||
data Equation =
|
||||
Equ [Expr] Expr
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
|
||||
type FToken = String
|
||||
type FCat = Int
|
||||
type FIndex = Int
|
||||
data FSymbol
|
||||
= FSymCat {-# UNPACK #-} !FIndex {-# UNPACK #-} !Int
|
||||
| FSymTok FToken
|
||||
type Profile = [Int]
|
||||
type FPointPos = Int
|
||||
type FGrammar = ([FRule], Map.Map CId [FCat])
|
||||
data FRule = FRule CId [Profile] [FCat] FCat (Array FIndex (Array FPointPos FSymbol))
|
||||
|
||||
type RuleId = Int
|
||||
|
||||
data ParserInfo
|
||||
= ParserInfo { allRules :: Array RuleId FRule
|
||||
, topdownRules :: Assoc FCat [RuleId]
|
||||
-- ^ used in 'GF.Parsing.MCFG.Active' (Earley):
|
||||
-- , emptyRules :: [RuleId]
|
||||
, epsilonRules :: [RuleId]
|
||||
-- ^ used in 'GF.Parsing.MCFG.Active' (Kilbury):
|
||||
, leftcornerCats :: Assoc FCat [RuleId]
|
||||
, leftcornerTokens :: Assoc FToken [RuleId]
|
||||
-- ^ used in 'GF.Parsing.MCFG.Active' (Kilbury):
|
||||
, grammarCats :: [FCat]
|
||||
, grammarToks :: [FToken]
|
||||
, startupCats :: Map.Map CId [FCat]
|
||||
}
|
||||
|
||||
|
||||
fcatString, fcatInt, fcatFloat, fcatVar :: Int
|
||||
fcatString = (-1)
|
||||
fcatInt = (-2)
|
||||
fcatFloat = (-3)
|
||||
fcatVar = (-4)
|
||||
|
||||
|
||||
-- print statistics
|
||||
|
||||
statGFCC :: PGF -> String
|
||||
statGFCC pgf = unlines [
|
||||
"Abstract\t" ++ prCId (absname pgf),
|
||||
"Concretes\t" ++ unwords (map prCId (cncnames pgf)),
|
||||
"Categories\t" ++ unwords (map prCId (Map.keys (cats (abstract pgf))))
|
||||
]
|
||||
|
||||
-- merge two GFCCs; fails is differens absnames; priority to second arg
|
||||
|
||||
unionPGF :: PGF -> PGF -> PGF
|
||||
unionPGF one two = case absname one of
|
||||
n | n == wildCId -> two -- extending empty grammar
|
||||
| n == absname two -> one { -- extending grammar with same abstract
|
||||
concretes = Map.union (concretes two) (concretes one),
|
||||
cncnames = union (cncnames two) (cncnames one)
|
||||
}
|
||||
_ -> one -- abstracts don't match ---- print error msg
|
||||
|
||||
emptyPGF :: PGF
|
||||
emptyPGF = PGF {
|
||||
absname = wildCId,
|
||||
cncnames = [] ,
|
||||
gflags = Map.empty,
|
||||
abstract = error "empty grammar, no abstract",
|
||||
concretes = Map.empty
|
||||
}
|
||||
|
||||
-- encode idenfifiers and strings in UTF8
|
||||
|
||||
utf8GFCC :: PGF -> PGF
|
||||
utf8GFCC pgf = pgf {
|
||||
concretes = Map.map u8concr (concretes pgf)
|
||||
}
|
||||
where
|
||||
u8concr cnc = cnc {
|
||||
lins = Map.map u8term (lins cnc),
|
||||
opers = Map.map u8term (opers cnc)
|
||||
}
|
||||
u8term = convertStringsInTerm encodeUTF8
|
||||
|
||||
---- TODO: convert identifiers and flags
|
||||
|
||||
convertStringsInTerm conv t = case t of
|
||||
K (KS s) -> K (KS (conv s))
|
||||
W s r -> W (conv s) (convs r)
|
||||
R ts -> R $ map convs ts
|
||||
S ts -> S $ map convs ts
|
||||
FV ts -> FV $ map convs ts
|
||||
P u v -> P (convs u) (convs v)
|
||||
_ -> t
|
||||
where
|
||||
convs = convertStringsInTerm conv
|
||||
|
||||
203
src/PGF/Expr.hs
Normal file
203
src/PGF/Expr.hs
Normal file
@@ -0,0 +1,203 @@
|
||||
module PGF.Expr(readTree, showTree, pTree, ppTree,
|
||||
readExpr, showExpr, pExpr, ppExpr,
|
||||
|
||||
tree2expr, expr2tree,
|
||||
|
||||
-- needed in the typechecker
|
||||
Value(..), Env, eval,
|
||||
|
||||
-- helpers
|
||||
pIdent,pStr
|
||||
) where
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
|
||||
import Data.Char
|
||||
import Data.Maybe
|
||||
import Control.Monad
|
||||
import qualified Text.PrettyPrint as PP
|
||||
import qualified Text.ParserCombinators.ReadP as RP
|
||||
import qualified Data.Map as Map
|
||||
|
||||
|
||||
-- | parses 'String' as an expression
|
||||
readTree :: String -> Maybe Tree
|
||||
readTree s = case [x | (x,cs) <- RP.readP_to_S (pTree False) s, all isSpace cs] of
|
||||
[x] -> Just x
|
||||
_ -> Nothing
|
||||
|
||||
-- | renders expression as 'String'
|
||||
showTree :: Tree -> String
|
||||
showTree = PP.render . ppTree 0
|
||||
|
||||
-- | parses 'String' as an expression
|
||||
readExpr :: String -> Maybe Expr
|
||||
readExpr s = case [x | (x,cs) <- RP.readP_to_S pExpr s, all isSpace cs] of
|
||||
[x] -> Just x
|
||||
_ -> Nothing
|
||||
|
||||
-- | renders expression as 'String'
|
||||
showExpr :: Expr -> String
|
||||
showExpr = PP.render . ppExpr 0
|
||||
|
||||
|
||||
-----------------------------------------------------
|
||||
-- Parsing
|
||||
-----------------------------------------------------
|
||||
|
||||
pTrees :: RP.ReadP [Tree]
|
||||
pTrees = liftM2 (:) (pTree True) pTrees RP.<++ (RP.skipSpaces >> return [])
|
||||
|
||||
pTree :: Bool -> RP.ReadP Tree
|
||||
pTree isNested = RP.skipSpaces >> (pParen RP.<++ pAbs RP.<++ pApp RP.<++ fmap Lit pLit RP.<++ pMeta)
|
||||
where
|
||||
pParen = RP.between (RP.char '(') (RP.char ')') (pTree False)
|
||||
pAbs = do xs <- RP.between (RP.char '\\') (RP.skipSpaces >> RP.string "->") (RP.sepBy1 (RP.skipSpaces >> pCId) (RP.skipSpaces >> RP.char ','))
|
||||
t <- pTree False
|
||||
return (Abs xs t)
|
||||
pApp = do f <- pCId
|
||||
ts <- (if isNested then return [] else pTrees)
|
||||
return (Fun f ts)
|
||||
pMeta = do RP.char '?'
|
||||
n <- fmap read (RP.munch1 isDigit)
|
||||
return (Meta n)
|
||||
|
||||
pExpr :: RP.ReadP Expr
|
||||
pExpr = RP.skipSpaces >> (pAbs RP.<++ pTerm RP.<++ pEqs)
|
||||
where
|
||||
pTerm = fmap (foldl1 EApp) (RP.sepBy1 pFactor RP.skipSpaces)
|
||||
|
||||
pFactor = fmap EVar pCId
|
||||
RP.<++ fmap ELit pLit
|
||||
RP.<++ pMeta
|
||||
RP.<++ RP.between (RP.char '(') (RP.char ')') pExpr
|
||||
|
||||
pAbs = do xs <- RP.between (RP.char '\\') (RP.skipSpaces >> RP.string "->") (RP.sepBy1 (RP.skipSpaces >> pCId) (RP.skipSpaces >> RP.char ','))
|
||||
e <- pExpr
|
||||
return (foldr EAbs e xs)
|
||||
|
||||
pMeta = do RP.char '?'
|
||||
n <- fmap read (RP.munch1 isDigit)
|
||||
return (EMeta n)
|
||||
|
||||
pEqs = fmap EEq $
|
||||
RP.between (RP.skipSpaces >> RP.char '{')
|
||||
(RP.skipSpaces >> RP.char '}')
|
||||
(RP.sepBy1 (RP.skipSpaces >> pEq)
|
||||
(RP.skipSpaces >> RP.string ";"))
|
||||
|
||||
pEq = do pats <- (RP.sepBy1 pExpr RP.skipSpaces)
|
||||
RP.skipSpaces >> RP.string "=>"
|
||||
e <- pExpr
|
||||
return (Equ pats e)
|
||||
|
||||
pLit :: RP.ReadP Literal
|
||||
pLit = pNum RP.<++ liftM LStr pStr
|
||||
|
||||
pNum = do x <- RP.munch1 isDigit
|
||||
((RP.char '.' >> RP.munch1 isDigit >>= \y -> return (LFlt (read (x++"."++y))))
|
||||
RP.<++
|
||||
(return (LInt (read x))))
|
||||
|
||||
pStr = RP.char '"' >> (RP.manyTill (pEsc RP.<++ RP.get) (RP.char '"'))
|
||||
where
|
||||
pEsc = RP.char '\\' >> RP.get
|
||||
|
||||
pCId = fmap mkCId pIdent
|
||||
|
||||
pIdent = liftM2 (:) (RP.satisfy isIdentFirst) (RP.munch isIdentRest)
|
||||
where
|
||||
isIdentFirst c = c == '_' || isLetter c
|
||||
isIdentRest c = c == '_' || c == '\'' || isAlphaNum c
|
||||
|
||||
|
||||
-----------------------------------------------------
|
||||
-- Printing
|
||||
-----------------------------------------------------
|
||||
|
||||
ppTree d (Abs xs t) = ppParens (d > 0) (PP.char '\\' PP.<>
|
||||
PP.hsep (PP.punctuate PP.comma (map (PP.text . prCId) xs)) PP.<+>
|
||||
PP.text "->" PP.<+>
|
||||
ppTree 0 t)
|
||||
ppTree d (Fun f []) = PP.text (prCId f)
|
||||
ppTree d (Fun f ts) = ppParens (d > 0) (PP.text (prCId f) PP.<+> PP.hsep (map (ppTree 1) ts))
|
||||
ppTree d (Lit l) = ppLit l
|
||||
ppTree d (Meta n) = PP.char '?' PP.<> PP.int n
|
||||
ppTree d (Var id) = PP.text (prCId id)
|
||||
|
||||
|
||||
ppExpr d (EAbs x e) = let (xs,e1) = getVars (EAbs x e)
|
||||
in ppParens (d > 0) (PP.char '\\' PP.<>
|
||||
PP.hsep (PP.punctuate PP.comma (map (PP.text . prCId) xs)) PP.<+>
|
||||
PP.text "->" PP.<+>
|
||||
ppExpr 0 e1)
|
||||
where
|
||||
getVars (EAbs x e) = let (xs,e1) = getVars e in (x:xs,e1)
|
||||
getVars e = ([],e)
|
||||
ppExpr d (EApp e1 e2) = ppParens (d > 1) ((ppExpr 1 e1) PP.<+> (ppExpr 2 e2))
|
||||
ppExpr d (ELit l) = ppLit l
|
||||
ppExpr d (EMeta n) = PP.char '?' PP.<+> PP.int n
|
||||
ppExpr d (EVar f) = PP.text (prCId f)
|
||||
ppExpr d (EEq eqs) = PP.braces (PP.sep (PP.punctuate PP.semi (map ppEquation eqs)))
|
||||
|
||||
ppEquation (Equ pats e) = PP.hsep (map (ppExpr 2) pats) PP.<+> PP.text "=>" PP.<+> ppExpr 0 e
|
||||
|
||||
ppLit (LStr s) = PP.text (show s)
|
||||
ppLit (LInt n) = PP.integer n
|
||||
ppLit (LFlt d) = PP.double d
|
||||
|
||||
ppParens True = PP.parens
|
||||
ppParens False = id
|
||||
|
||||
|
||||
-----------------------------------------------------
|
||||
-- Evaluation
|
||||
-----------------------------------------------------
|
||||
|
||||
-- | Converts a tree to expression.
|
||||
tree2expr :: Tree -> Expr
|
||||
tree2expr (Fun x ts) = foldl EApp (EVar x) (map tree2expr ts)
|
||||
tree2expr (Lit l) = ELit l
|
||||
tree2expr (Meta n) = EMeta n
|
||||
tree2expr (Abs xs t) = foldr EAbs (tree2expr t) xs
|
||||
tree2expr (Var x) = EVar x
|
||||
|
||||
-- | Converts an expression to tree. If the expression
|
||||
-- contains unevaluated applications they will be applied.
|
||||
expr2tree :: Expr -> Tree
|
||||
expr2tree e = value2tree (eval Map.empty e) [] []
|
||||
where
|
||||
value2tree (VApp v1 v2) xs ts = value2tree v1 xs (value2tree v2 [] []:ts)
|
||||
value2tree (VVar x) xs ts = ret xs (fun xs x ts)
|
||||
value2tree (VMeta n) xs [] = ret xs (Meta n)
|
||||
value2tree (VLit l) xs [] = ret xs (Lit l)
|
||||
value2tree (VClosure env (EAbs x e)) xs [] = value2tree (eval (Map.insert x (VVar x) env) e) (x:xs) []
|
||||
|
||||
fun xs x ts
|
||||
| x `elem` xs = Var x
|
||||
| otherwise = Fun x ts
|
||||
|
||||
ret [] t = t
|
||||
ret xs t = Abs (reverse xs) t
|
||||
|
||||
data Value
|
||||
= VGen Int
|
||||
| VApp Value Value
|
||||
| VVar CId
|
||||
| VMeta Int
|
||||
| VLit Literal
|
||||
| VClosure Env Expr
|
||||
|
||||
type Env = Map.Map CId Value
|
||||
|
||||
eval :: Env -> Expr -> Value
|
||||
eval env (EVar x) = fromMaybe (VVar x) (Map.lookup x env)
|
||||
eval env (EApp e1 e2) = apply (eval env e1) (eval env e2)
|
||||
eval env (EAbs x e) = VClosure env (EAbs x e)
|
||||
eval env (EMeta k) = VMeta k
|
||||
eval env (ELit l) = VLit l
|
||||
|
||||
apply :: Value -> Value -> Value
|
||||
apply (VClosure env (EAbs x e)) v = eval (Map.insert x v env) e
|
||||
apply v0 v = VApp v0 v
|
||||
70
src/PGF/Generate.hs
Normal file
70
src/PGF/Generate.hs
Normal file
@@ -0,0 +1,70 @@
|
||||
module PGF.Generate where
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import PGF.Macros
|
||||
|
||||
import qualified Data.Map as M
|
||||
import System.Random
|
||||
|
||||
-- generate an infinite list of trees exhaustively
|
||||
generate :: PGF -> CId -> Maybe Int -> [Tree]
|
||||
generate pgf cat dp = concatMap (\i -> gener i cat) depths
|
||||
where
|
||||
gener 0 c = [Fun f [] | (f, ([],_)) <- fns c]
|
||||
gener i c = [
|
||||
tr |
|
||||
(f, (cs,_)) <- fns c,
|
||||
let alts = map (gener (i-1)) cs,
|
||||
ts <- combinations alts,
|
||||
let tr = Fun f ts,
|
||||
depth tr >= i
|
||||
]
|
||||
fns c = [(f,catSkeleton ty) | (f,ty) <- functionsToCat pgf c]
|
||||
depths = maybe [0 ..] (\d -> [0..d]) dp
|
||||
|
||||
-- generate an infinite list of trees randomly
|
||||
genRandom :: StdGen -> PGF -> CId -> [Tree]
|
||||
genRandom gen pgf cat = genTrees (randomRs (0.0, 1.0 :: Double) gen) cat where
|
||||
|
||||
timeout = 47 -- give up
|
||||
|
||||
genTrees ds0 cat =
|
||||
let (ds,ds2) = splitAt (timeout+1) ds0 -- for time out, else ds
|
||||
(t,k) = genTree ds cat
|
||||
in (if k>timeout then id else (t:))
|
||||
(genTrees ds2 cat) -- else (drop k ds)
|
||||
|
||||
genTree rs = gett rs where
|
||||
gett ds cid | cid == mkCId "String" = (Lit (LStr "foo"), 1)
|
||||
gett ds cid | cid == mkCId "Int" = (Lit (LInt 12345), 1)
|
||||
gett [] _ = (Lit (LStr "TIMEOUT"), 1) ----
|
||||
gett ds cat = case fns cat of
|
||||
[] -> (Meta 0,1)
|
||||
fs -> let
|
||||
d:ds2 = ds
|
||||
(f,args) = getf d fs
|
||||
(ts,k) = getts ds2 args
|
||||
in (Fun f ts, k+1)
|
||||
getf d fs = let lg = (length fs) in
|
||||
fs !! (floor (d * fromIntegral lg))
|
||||
getts ds cats = case cats of
|
||||
c:cs -> let
|
||||
(t, k) = gett ds c
|
||||
(ts,ks) = getts (drop k ds) cs
|
||||
in (t:ts, k + ks)
|
||||
_ -> ([],0)
|
||||
|
||||
fns cat = [(f,(fst (catSkeleton ty))) | (f,ty) <- functionsToCat pgf cat]
|
||||
|
||||
|
||||
{-
|
||||
-- brute-force parsing method; only returns the first result
|
||||
-- note: you cannot throw away rules with unknown words from the grammar
|
||||
-- because it is not known which field in each rule may match the input
|
||||
|
||||
searchParse :: Int -> PGF -> CId -> [String] -> [Exp]
|
||||
searchParse i pgf cat ws = [t | t <- gen, s <- lins t, words s == ws] where
|
||||
gen = take i $ generate pgf cat
|
||||
lins t = [linearize pgf lang t | lang <- cncnames pgf]
|
||||
-}
|
||||
99
src/PGF/Linearize.hs
Normal file
99
src/PGF/Linearize.hs
Normal file
@@ -0,0 +1,99 @@
|
||||
module PGF.Linearize (linearizes,realize,realizes,linTree) where
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import PGF.Macros
|
||||
|
||||
import qualified Data.Map as Map
|
||||
import Data.List
|
||||
|
||||
import Debug.Trace
|
||||
|
||||
-- linearization and computation of concrete PGF Terms
|
||||
|
||||
linearizes :: PGF -> CId -> Tree -> [String]
|
||||
linearizes pgf lang = realizes . linTree pgf lang
|
||||
|
||||
realize :: Term -> String
|
||||
realize = concat . take 1 . realizes
|
||||
|
||||
realizes :: Term -> [String]
|
||||
realizes = map (unwords . untokn) . realizest
|
||||
|
||||
realizest :: Term -> [[Tokn]]
|
||||
realizest trm = case trm of
|
||||
R ts -> realizest (ts !! 0)
|
||||
S ss -> map concat $ combinations $ map realizest ss
|
||||
K t -> [[t]]
|
||||
W s t -> [[KS (s ++ r)] | [KS r] <- realizest t]
|
||||
FV ts -> concatMap realizest ts
|
||||
TM s -> [[KS s]]
|
||||
_ -> [[KS $ "REALIZE_ERROR " ++ show trm]] ---- debug
|
||||
|
||||
untokn :: [Tokn] -> [String]
|
||||
untokn ts = case ts of
|
||||
KP d _ : [] -> d
|
||||
KP d vs : ws -> let ss@(s:_) = untokn ws in sel d vs s ++ ss
|
||||
KS s : ws -> s : untokn ws
|
||||
[] -> []
|
||||
where
|
||||
sel d vs w = case [v | Alt v cs <- vs, any (\c -> isPrefixOf c w) cs] of
|
||||
v:_ -> v
|
||||
_ -> d
|
||||
|
||||
linTree :: PGF -> CId -> Tree -> Term
|
||||
linTree pgf lang = lin
|
||||
where
|
||||
lin (Abs xs e ) = case lin e of
|
||||
R ts -> R $ ts ++ (Data.List.map (kks . prCId) xs)
|
||||
TM s -> R $ (TM s) : (Data.List.map (kks . prCId) xs)
|
||||
lin (Fun fun es) = comp (map lin es) $ look fun
|
||||
lin (Lit (LStr s)) = R [kks (show s)] -- quoted
|
||||
lin (Lit (LInt i)) = R [kks (show i)]
|
||||
lin (Lit (LFlt d)) = R [kks (show d)]
|
||||
lin (Var x) = TM (prCId x)
|
||||
lin (Meta i) = TM (show i)
|
||||
|
||||
comp = compute pgf lang
|
||||
look = lookLin pgf lang
|
||||
|
||||
|
||||
compute :: PGF -> CId -> [Term] -> Term -> Term
|
||||
compute pgf lang args = comp where
|
||||
comp trm = case trm of
|
||||
P r p -> proj (comp r) (comp p)
|
||||
W s t -> W s (comp t)
|
||||
R ts -> R $ map comp ts
|
||||
V i -> idx args i -- already computed
|
||||
F c -> comp $ look c -- not computed (if contains argvar)
|
||||
FV ts -> FV $ map comp ts
|
||||
S ts -> S $ filter (/= S []) $ map comp ts
|
||||
_ -> trm
|
||||
|
||||
look = lookOper pgf lang
|
||||
|
||||
idx xs i = if i > length xs - 1
|
||||
then trace
|
||||
("too large " ++ show i ++ " for\n" ++ unlines (map show xs) ++ "\n") tm0
|
||||
else xs !! i
|
||||
|
||||
proj r p = case (r,p) of
|
||||
(_, FV ts) -> FV $ map (proj r) ts
|
||||
(FV ts, _ ) -> FV $ map (\t -> proj t p) ts
|
||||
(W s t, _) -> kks (s ++ getString (proj t p))
|
||||
_ -> comp $ getField r (getIndex p)
|
||||
|
||||
getString t = case t of
|
||||
K (KS s) -> s
|
||||
_ -> error ("ERROR in grammar compiler: string from "++ show t) "ERR"
|
||||
|
||||
getIndex t = case t of
|
||||
C i -> i
|
||||
TM _ -> 0 -- default value for parameter
|
||||
_ -> trace ("ERROR in grammar compiler: index from " ++ show t) 666
|
||||
|
||||
getField t i = case t of
|
||||
R rs -> idx rs i
|
||||
TM s -> TM s
|
||||
_ -> error ("ERROR in grammar compiler: field from " ++ show t) t
|
||||
|
||||
139
src/PGF/Macros.hs
Normal file
139
src/PGF/Macros.hs
Normal file
@@ -0,0 +1,139 @@
|
||||
module PGF.Macros where
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import Control.Monad
|
||||
import qualified Data.Map as Map
|
||||
import qualified Data.Array as Array
|
||||
import Data.Maybe
|
||||
import Data.List
|
||||
|
||||
-- operations for manipulating PGF grammars and objects
|
||||
|
||||
lookLin :: PGF -> CId -> CId -> Term
|
||||
lookLin pgf lang fun =
|
||||
lookMap tm0 fun $ lins $ lookMap (error "no lang") lang $ concretes pgf
|
||||
|
||||
lookOper :: PGF -> CId -> CId -> Term
|
||||
lookOper pgf lang fun =
|
||||
lookMap tm0 fun $ opers $ lookMap (error "no lang") lang $ concretes pgf
|
||||
|
||||
lookLincat :: PGF -> CId -> CId -> Term
|
||||
lookLincat pgf lang fun =
|
||||
lookMap tm0 fun $ lincats $ lookMap (error "no lang") lang $ concretes pgf
|
||||
|
||||
lookParamLincat :: PGF -> CId -> CId -> Term
|
||||
lookParamLincat pgf lang fun =
|
||||
lookMap tm0 fun $ paramlincats $ lookMap (error "no lang") lang $ concretes pgf
|
||||
|
||||
lookPrintName :: PGF -> CId -> CId -> Term
|
||||
lookPrintName pgf lang fun =
|
||||
lookMap tm0 fun $ printnames $ lookMap (error "no lang") lang $ concretes pgf
|
||||
|
||||
lookType :: PGF -> CId -> Type
|
||||
lookType pgf f =
|
||||
fst $ lookMap (error $ "lookType " ++ show f) f (funs (abstract pgf))
|
||||
|
||||
lookValCat :: PGF -> CId -> CId
|
||||
lookValCat pgf = valCat . lookType pgf
|
||||
|
||||
lookParser :: PGF -> CId -> Maybe ParserInfo
|
||||
lookParser pgf lang = Map.lookup lang (concretes pgf) >>= parser
|
||||
|
||||
lookFCFG :: PGF -> CId -> Maybe FGrammar
|
||||
lookFCFG pgf lang = fmap toFGrammar $ lookParser pgf lang
|
||||
where
|
||||
toFGrammar :: ParserInfo -> FGrammar
|
||||
toFGrammar pinfo = (Array.elems (allRules pinfo), startupCats pinfo)
|
||||
|
||||
lookStartCat :: PGF -> String
|
||||
lookStartCat pgf = fromMaybe "S" $ msum $ Data.List.map (Map.lookup (mkCId "startcat"))
|
||||
[gflags pgf, aflags (abstract pgf)]
|
||||
|
||||
lookGlobalFlag :: PGF -> CId -> String
|
||||
lookGlobalFlag pgf f =
|
||||
lookMap "?" f (gflags pgf)
|
||||
|
||||
lookAbsFlag :: PGF -> CId -> String
|
||||
lookAbsFlag pgf f =
|
||||
lookMap "?" f (aflags (abstract pgf))
|
||||
|
||||
lookConcr :: PGF -> CId -> Concr
|
||||
lookConcr pgf cnc =
|
||||
lookMap (error $ "Missing concrete syntax: " ++ prCId cnc) cnc $ concretes pgf
|
||||
|
||||
lookConcrFlag :: PGF -> CId -> CId -> Maybe String
|
||||
lookConcrFlag pgf lang f = Map.lookup f $ cflags $ lookConcr pgf lang
|
||||
|
||||
functionsToCat :: PGF -> CId -> [(CId,Type)]
|
||||
functionsToCat pgf cat =
|
||||
[(f,ty) | f <- fs, Just (ty,_) <- [Map.lookup f $ funs $ abstract pgf]]
|
||||
where
|
||||
fs = lookMap [] cat $ catfuns $ abstract pgf
|
||||
|
||||
missingLins :: PGF -> CId -> [CId]
|
||||
missingLins pgf lang = [c | c <- fs, not (hasl c)] where
|
||||
fs = Map.keys $ funs $ abstract pgf
|
||||
hasl = hasLin pgf lang
|
||||
|
||||
hasLin :: PGF -> CId -> CId -> Bool
|
||||
hasLin pgf lang f = Map.member f $ lins $ lookConcr pgf lang
|
||||
|
||||
restrictPGF :: (CId -> Bool) -> PGF -> PGF
|
||||
restrictPGF cond pgf = pgf {
|
||||
abstract = abstr {
|
||||
funs = restrict $ funs $ abstr,
|
||||
cats = restrict $ cats $ abstr
|
||||
}
|
||||
} ---- restrict concrs also, might be needed
|
||||
where
|
||||
restrict = Map.filterWithKey (\c _ -> cond c)
|
||||
abstr = abstract pgf
|
||||
|
||||
depth :: Tree -> Int
|
||||
depth (Abs _ t) = depth t
|
||||
depth (Fun _ ts) = maximum (0:map depth ts) + 1
|
||||
depth _ = 1
|
||||
|
||||
cftype :: [CId] -> CId -> Type
|
||||
cftype args val = DTyp [Hyp wildCId (cftype [] arg) | arg <- args] val []
|
||||
|
||||
catSkeleton :: Type -> ([CId],CId)
|
||||
catSkeleton ty = case ty of
|
||||
DTyp hyps val _ -> ([valCat ty | Hyp _ ty <- hyps],val)
|
||||
|
||||
typeSkeleton :: Type -> ([(Int,CId)],CId)
|
||||
typeSkeleton ty = case ty of
|
||||
DTyp hyps val _ -> ([(contextLength ty, valCat ty) | Hyp _ ty <- hyps],val)
|
||||
|
||||
valCat :: Type -> CId
|
||||
valCat ty = case ty of
|
||||
DTyp _ val _ -> val
|
||||
|
||||
contextLength :: Type -> Int
|
||||
contextLength ty = case ty of
|
||||
DTyp hyps _ _ -> length hyps
|
||||
|
||||
primNotion :: Expr
|
||||
primNotion = EEq []
|
||||
|
||||
term0 :: CId -> Term
|
||||
term0 = TM . prCId
|
||||
|
||||
tm0 :: Term
|
||||
tm0 = TM "?"
|
||||
|
||||
kks :: String -> Term
|
||||
kks = K . KS
|
||||
|
||||
-- lookup with default value
|
||||
lookMap :: (Show i, Ord i) => a -> i -> Map.Map i a -> a
|
||||
lookMap d c m = fromMaybe d $ Map.lookup c m
|
||||
|
||||
--- from Operations
|
||||
combinations :: [[a]] -> [[a]]
|
||||
combinations t = case t of
|
||||
[] -> [[]]
|
||||
aa:uu -> [a:u | a <- aa, u <- combinations uu]
|
||||
|
||||
|
||||
32
src/PGF/Morphology.hs
Normal file
32
src/PGF/Morphology.hs
Normal file
@@ -0,0 +1,32 @@
|
||||
module PGF.Morphology where
|
||||
|
||||
import PGF.ShowLinearize (collectWords)
|
||||
import PGF.Data
|
||||
import PGF.CId
|
||||
|
||||
import qualified Data.Map as Map
|
||||
import Data.List (intersperse)
|
||||
|
||||
-- these 4 definitions depend on the datastructure used
|
||||
|
||||
type Morpho = Map.Map String [(Lemma,Analysis)]
|
||||
|
||||
lookupMorpho :: Morpho -> String -> [(Lemma,Analysis)]
|
||||
lookupMorpho mo s = maybe noAnalysis id $ Map.lookup s mo
|
||||
|
||||
buildMorpho :: PGF -> CId -> Morpho
|
||||
buildMorpho pgf = Map.fromListWith (++) . collectWords pgf
|
||||
|
||||
prFullFormLexicon :: Morpho -> String
|
||||
prFullFormLexicon mo =
|
||||
unlines [w ++ " : " ++ prMorphoAnalysis ts | (w,ts) <- Map.assocs mo]
|
||||
|
||||
prMorphoAnalysis :: [(Lemma,Analysis)] -> String
|
||||
prMorphoAnalysis lps = unlines [l ++ " " ++ p | (l,p) <- lps]
|
||||
|
||||
type Lemma = String
|
||||
type Analysis = String
|
||||
|
||||
noAnalysis :: [(Lemma,Analysis)]
|
||||
noAnalysis = []
|
||||
|
||||
40
src/PGF/Parsing/FCFG.hs
Normal file
40
src/PGF/Parsing/FCFG.hs
Normal file
@@ -0,0 +1,40 @@
|
||||
----------------------------------------------------------------------
|
||||
-- |
|
||||
-- Maintainer : Krasimir Angelov
|
||||
-- Stability : (stable)
|
||||
-- Portability : (portable)
|
||||
--
|
||||
-- FCFG parsing
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
module PGF.Parsing.FCFG
|
||||
(buildParserInfo,ParserInfo,parseFCFG) where
|
||||
|
||||
import GF.Data.ErrM
|
||||
import GF.Data.Assoc
|
||||
import GF.Data.SortedList
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import PGF.Macros
|
||||
import PGF.BuildParser
|
||||
import PGF.Parsing.FCFG.Utilities
|
||||
import qualified PGF.Parsing.FCFG.Active as Active
|
||||
import qualified PGF.Parsing.FCFG.Incremental as Incremental
|
||||
|
||||
import qualified Data.Map as Map
|
||||
|
||||
----------------------------------------------------------------------
|
||||
-- parsing
|
||||
|
||||
-- main parsing function
|
||||
|
||||
parseFCFG :: String -- ^ parsing strategy
|
||||
-> ParserInfo -- ^ compiled grammar (fcfg)
|
||||
-> CId -- ^ starting category
|
||||
-> [String] -- ^ input tokens
|
||||
-> Err [Tree] -- ^ resulting GF terms
|
||||
parseFCFG "bottomup" pinfo start toks = return $ Active.parse "b" pinfo start toks
|
||||
parseFCFG "topdown" pinfo start toks = return $ Active.parse "t" pinfo start toks
|
||||
parseFCFG "incremental" pinfo start toks = return $ Incremental.parse pinfo start toks
|
||||
parseFCFG strat pinfo start toks = fail $ "FCFG parsing strategy not defined: " ++ strat
|
||||
189
src/PGF/Parsing/FCFG/Active.hs
Normal file
189
src/PGF/Parsing/FCFG/Active.hs
Normal file
@@ -0,0 +1,189 @@
|
||||
----------------------------------------------------------------------
|
||||
-- |
|
||||
-- Maintainer : Krasimir Angelov
|
||||
-- Stability : (stable)
|
||||
-- Portability : (portable)
|
||||
--
|
||||
-- MCFG parsing, the active algorithm
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
module PGF.Parsing.FCFG.Active (parse) where
|
||||
|
||||
import GF.Data.Assoc
|
||||
import GF.Data.SortedList
|
||||
import GF.Data.Utilities
|
||||
import qualified GF.Data.MultiMap as MM
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import PGF.Parsing.FCFG.Utilities
|
||||
|
||||
import Control.Monad (guard)
|
||||
|
||||
import qualified Data.List as List
|
||||
import qualified Data.Map as Map
|
||||
import qualified Data.Set as Set
|
||||
import Data.Array
|
||||
|
||||
----------------------------------------------------------------------
|
||||
-- * parsing
|
||||
|
||||
makeFinalEdge cat 0 0 = (cat, [EmptyRange])
|
||||
makeFinalEdge cat i j = (cat, [makeRange i j])
|
||||
|
||||
-- | the list of categories = possible starting categories
|
||||
parse :: String -> ParserInfo -> CId -> [FToken] -> [Tree]
|
||||
parse strategy pinfo start toks = nubsort $ filteredForests >>= forest2trees
|
||||
where
|
||||
inTokens = input toks
|
||||
starts = Map.findWithDefault [] start (startupCats pinfo)
|
||||
schart = xchart2syntaxchart chart pinfo
|
||||
(i,j) = inputBounds inTokens
|
||||
finalEdges = [makeFinalEdge cat i j | cat <- starts]
|
||||
forests = chart2forests schart (const False) finalEdges
|
||||
filteredForests = forests >>= applyProfileToForest
|
||||
|
||||
chart = process strategy pinfo inTokens axioms emptyXChart
|
||||
axioms | isBU strategy = literals pinfo inTokens ++ initialBU pinfo inTokens
|
||||
| isTD strategy = literals pinfo inTokens ++ initialTD pinfo starts inTokens
|
||||
|
||||
isBU s = s=="b"
|
||||
isTD s = s=="t"
|
||||
|
||||
-- used in prediction
|
||||
emptyChildren :: RuleId -> ParserInfo -> SyntaxNode RuleId RangeRec
|
||||
emptyChildren ruleid pinfo = SNode ruleid (replicate (length rhs) [])
|
||||
where
|
||||
FRule _ _ rhs _ _ = allRules pinfo ! ruleid
|
||||
|
||||
process :: String -> ParserInfo -> Input FToken -> [(FCat,Item)] -> XChart FCat -> XChart FCat
|
||||
process strategy pinfo toks [] chart = chart
|
||||
process strategy pinfo toks ((c,item):items) chart = process strategy pinfo toks items $! univRule c item chart
|
||||
where
|
||||
univRule cat item@(Active found rng lbl ppos node@(SNode ruleid recs)) chart
|
||||
| inRange (bounds lin) ppos =
|
||||
case lin ! ppos of
|
||||
FSymCat r d -> let c = args !! d
|
||||
in case recs !! d of
|
||||
[] -> case insertXChart chart item c of
|
||||
Nothing -> chart
|
||||
Just chart -> let items = do item@(Final found' _) <- lookupXChartFinal chart c
|
||||
rng <- concatRange rng (found' !! r)
|
||||
return (c, Active found rng lbl (ppos+1) (SNode ruleid (updateNth (const found') d recs)))
|
||||
++
|
||||
do guard (isTD strategy)
|
||||
ruleid <- topdownRules pinfo ? c
|
||||
return (c, Active [] EmptyRange 0 0 (emptyChildren ruleid pinfo))
|
||||
in process strategy pinfo toks items chart
|
||||
found' -> let items = do rng <- concatRange rng (found' !! r)
|
||||
return (c, Active found rng lbl (ppos+1) node)
|
||||
in process strategy pinfo toks items chart
|
||||
FSymTok tok -> let items = do t_rng <- inputToken toks ? tok
|
||||
rng' <- concatRange rng t_rng
|
||||
return (cat, Active found rng' lbl (ppos+1) node)
|
||||
in process strategy pinfo toks items chart
|
||||
| otherwise =
|
||||
if inRange (bounds lins) (lbl+1)
|
||||
then univRule cat (Active (rng:found) EmptyRange (lbl+1) 0 node) chart
|
||||
else univRule cat (Final (reverse (rng:found)) node) chart
|
||||
where
|
||||
(FRule _ _ args cat lins) = allRules pinfo ! ruleid
|
||||
lin = lins ! lbl
|
||||
univRule cat item@(Final found' node) chart =
|
||||
case insertXChart chart item cat of
|
||||
Nothing -> chart
|
||||
Just chart -> let items = do (Active found rng l ppos node@(SNode ruleid _)) <- lookupXChartAct chart cat
|
||||
let FRule _ _ args _ lins = allRules pinfo ! ruleid
|
||||
FSymCat r d = lins ! l ! ppos
|
||||
rng <- concatRange rng (found' !! r)
|
||||
return (args !! d, Active found rng l (ppos+1) (updateChildren node d found'))
|
||||
++
|
||||
do guard (isBU strategy)
|
||||
ruleid <- leftcornerCats pinfo ? cat
|
||||
let FRule _ _ args _ lins = allRules pinfo ! ruleid
|
||||
FSymCat r d = lins ! 0 ! 0
|
||||
return (args !! d, Active [] (found' !! r) 0 1 (updateChildren (emptyChildren ruleid pinfo) d found'))
|
||||
|
||||
updateChildren :: SyntaxNode RuleId RangeRec -> Int -> RangeRec -> SyntaxNode RuleId RangeRec
|
||||
updateChildren (SNode ruleid recs) i rec = SNode ruleid $! updateNth (const rec) i recs
|
||||
in process strategy pinfo toks items chart
|
||||
|
||||
----------------------------------------------------------------------
|
||||
-- * XChart
|
||||
|
||||
data Item
|
||||
= Active RangeRec
|
||||
Range
|
||||
{-# UNPACK #-} !FIndex
|
||||
{-# UNPACK #-} !FPointPos
|
||||
(SyntaxNode RuleId RangeRec)
|
||||
| Final RangeRec (SyntaxNode RuleId RangeRec)
|
||||
deriving (Eq, Ord)
|
||||
|
||||
data XChart c = XChart !(MM.MultiMap c Item) !(MM.MultiMap c Item)
|
||||
|
||||
emptyXChart :: Ord c => XChart c
|
||||
emptyXChart = XChart MM.empty MM.empty
|
||||
|
||||
insertXChart (XChart actives finals) item@(Active _ _ _ _ _) c =
|
||||
case MM.insert' c item actives of
|
||||
Nothing -> Nothing
|
||||
Just actives -> Just (XChart actives finals)
|
||||
|
||||
insertXChart (XChart actives finals) item@(Final _ _) c =
|
||||
case MM.insert' c item finals of
|
||||
Nothing -> Nothing
|
||||
Just finals -> Just (XChart actives finals)
|
||||
|
||||
lookupXChartAct (XChart actives finals) c = actives MM.! c
|
||||
lookupXChartFinal (XChart actives finals) c = finals MM.! c
|
||||
|
||||
xchart2syntaxchart :: XChart FCat -> ParserInfo -> SyntaxChart (CId,[Profile]) (FCat,RangeRec)
|
||||
xchart2syntaxchart (XChart actives finals) pinfo =
|
||||
accumAssoc groupSyntaxNodes $
|
||||
[ case node of
|
||||
SNode ruleid rrecs -> let FRule fun prof rhs cat _ = allRules pinfo ! ruleid
|
||||
in ((cat,found), SNode (fun,prof) (zip rhs rrecs))
|
||||
SString s -> ((cat,found), SString s)
|
||||
SInt n -> ((cat,found), SInt n)
|
||||
SFloat f -> ((cat,found), SFloat f)
|
||||
| (cat, Final found node) <- MM.toList finals
|
||||
]
|
||||
|
||||
literals :: ParserInfo -> Input FToken -> [(FCat,Item)]
|
||||
literals pinfo toks =
|
||||
[let (c,node) = lexer t in (c,Final [rng] node) | (t,rngs) <- aAssocs (inputToken toks), rng <- rngs, not (t `elem` grammarToks pinfo)]
|
||||
where
|
||||
lexer t =
|
||||
case reads t of
|
||||
[(n,"")] -> (fcatInt, SInt (n::Integer))
|
||||
_ -> case reads t of
|
||||
[(f,"")] -> (fcatFloat, SFloat (f::Double))
|
||||
_ -> (fcatString,SString t)
|
||||
|
||||
|
||||
----------------------------------------------------------------------
|
||||
-- Earley --
|
||||
|
||||
-- called with all starting categories
|
||||
initialTD :: ParserInfo -> [FCat] -> Input FToken -> [(FCat,Item)]
|
||||
initialTD pinfo starts toks =
|
||||
do cat <- starts
|
||||
ruleid <- topdownRules pinfo ? cat
|
||||
return (cat,Active [] (Range 0 0) 0 0 (emptyChildren ruleid pinfo))
|
||||
|
||||
|
||||
----------------------------------------------------------------------
|
||||
-- Kilbury --
|
||||
|
||||
initialBU :: ParserInfo -> Input FToken -> [(FCat,Item)]
|
||||
initialBU pinfo toks =
|
||||
do (tok,rngs) <- aAssocs (inputToken toks)
|
||||
ruleid <- leftcornerTokens pinfo ? tok
|
||||
let FRule _ _ _ cat _ = allRules pinfo ! ruleid
|
||||
rng <- rngs
|
||||
return (cat,Active [] rng 0 1 (emptyChildren ruleid pinfo))
|
||||
++
|
||||
do ruleid <- epsilonRules pinfo
|
||||
let FRule _ _ _ cat _ = allRules pinfo ! ruleid
|
||||
return (cat,Active [] EmptyRange 0 0 (emptyChildren ruleid pinfo))
|
||||
187
src/PGF/Parsing/FCFG/Incremental.hs
Normal file
187
src/PGF/Parsing/FCFG/Incremental.hs
Normal file
@@ -0,0 +1,187 @@
|
||||
{-# LANGUAGE BangPatterns #-}
|
||||
module PGF.Parsing.FCFG.Incremental
|
||||
( ParseState
|
||||
, initState
|
||||
, nextState
|
||||
, getCompletions
|
||||
, extractExps
|
||||
, parse
|
||||
) where
|
||||
|
||||
import Data.Array
|
||||
import Data.Array.Base (unsafeAt)
|
||||
import Data.List (isPrefixOf, foldl')
|
||||
import Data.Maybe (fromMaybe)
|
||||
import qualified Data.Map as Map
|
||||
import qualified Data.IntMap as IntMap
|
||||
import qualified Data.Set as Set
|
||||
import Control.Monad
|
||||
|
||||
import GF.Data.Assoc
|
||||
import GF.Data.SortedList
|
||||
import qualified GF.Data.MultiMap as MM
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import PGF.Parsing.FCFG.Utilities
|
||||
import Debug.Trace
|
||||
|
||||
parse :: ParserInfo -> CId -> [FToken] -> [Tree]
|
||||
parse pinfo start toks = extractExps (foldl' nextState (initState pinfo start) toks) start
|
||||
|
||||
initState :: ParserInfo -> CId -> ParseState
|
||||
initState pinfo start =
|
||||
let items = do
|
||||
c <- Map.findWithDefault [] start (startupCats pinfo)
|
||||
ruleid <- topdownRules pinfo ? c
|
||||
let (FRule fn _ args cat lins) = allRules pinfo ! ruleid
|
||||
lbl <- indices lins
|
||||
return (Active 0 lbl 0 ruleid args cat)
|
||||
|
||||
forest = IntMap.fromListWith Set.union [(cat, Set.singleton (Passive ruleid args)) | (ruleid, FRule _ _ args cat _) <- assocs (allRules pinfo)]
|
||||
|
||||
max_fid = case IntMap.maxViewWithKey forest of
|
||||
Just ((fid,_), _) -> fid+1
|
||||
Nothing -> 0
|
||||
|
||||
in State pinfo
|
||||
(Chart MM.empty [] Map.empty forest max_fid 0)
|
||||
(Set.fromList items)
|
||||
|
||||
-- | From the current state and the next token
|
||||
-- 'nextState' computes a new state where the token
|
||||
-- is consumed and the current position shifted by one.
|
||||
nextState :: ParseState -> String -> ParseState
|
||||
nextState (State pinfo chart items) t =
|
||||
let (items1,chart1) = process add (allRules pinfo) (Set.toList items) (Set.empty,chart)
|
||||
chart2 = chart1{ active =MM.empty
|
||||
, actives=active chart1 : actives chart1
|
||||
, passive=Map.empty
|
||||
, offset =offset chart1+1
|
||||
}
|
||||
in State pinfo chart2 items1
|
||||
where
|
||||
add tok item set
|
||||
| tok == t = Set.insert item set
|
||||
| otherwise = set
|
||||
|
||||
-- | If the next token is not known but only its prefix (possible empty prefix)
|
||||
-- then the 'getCompletions' function can be used to calculate the possible
|
||||
-- next words and the consequent states. This is used for word completions in
|
||||
-- the GF interpreter.
|
||||
getCompletions :: ParseState -> String -> Map.Map String ParseState
|
||||
getCompletions (State pinfo chart items) w =
|
||||
let (map',chart1) = process add (allRules pinfo) (Set.toList items) (MM.empty,chart)
|
||||
chart2 = chart1{ active =MM.empty
|
||||
, actives=active chart1 : actives chart1
|
||||
, passive=Map.empty
|
||||
, offset =offset chart1+1
|
||||
}
|
||||
in fmap (State pinfo chart2) map'
|
||||
where
|
||||
add tok item map
|
||||
| isPrefixOf w tok = fromMaybe map (MM.insert' tok item map)
|
||||
| otherwise = map
|
||||
|
||||
extractExps :: ParseState -> CId -> [Tree]
|
||||
extractExps (State pinfo chart items) start = exps
|
||||
where
|
||||
(_,st) = process (\_ _ -> id) (allRules pinfo) (Set.toList items) ((),chart)
|
||||
|
||||
exps = nubsort $ do
|
||||
c <- Map.findWithDefault [] start (startupCats pinfo)
|
||||
ruleid <- topdownRules pinfo ? c
|
||||
let (FRule fn _ args cat lins) = allRules pinfo ! ruleid
|
||||
lbl <- indices lins
|
||||
fid <- Map.lookup (PK c lbl 0) (passive st)
|
||||
go Set.empty fid
|
||||
|
||||
go rec fid
|
||||
| Set.member fid rec = mzero
|
||||
| otherwise = do set <- IntMap.lookup fid (forest st)
|
||||
Passive ruleid args <- Set.toList set
|
||||
let (FRule fn _ _ cat lins) = allRules pinfo ! ruleid
|
||||
if fn == wildCId
|
||||
then go (Set.insert fid rec) (head args)
|
||||
else do args <- mapM (go (Set.insert fid rec)) args
|
||||
return (Fun fn args)
|
||||
|
||||
process fn !rules [] acc_chart = acc_chart
|
||||
process fn !rules (item:items) acc_chart = univRule item acc_chart
|
||||
where
|
||||
univRule (Active j lbl ppos ruleid args fid0) acc_chart@(acc,chart)
|
||||
| inRange (bounds lin) ppos =
|
||||
case unsafeAt lin ppos of
|
||||
FSymCat r d -> let !fid = args !! d
|
||||
in case MM.insert' (AK fid r) item (active chart) of
|
||||
Nothing -> process fn rules items $ acc_chart
|
||||
Just actCat -> (case Map.lookup (PK fid r k) (passive chart) of
|
||||
Nothing -> id
|
||||
Just id -> process fn rules [Active j lbl (ppos+1) ruleid (updateAt d id args) fid0]) $
|
||||
(case IntMap.lookup fid (forest chart) of
|
||||
Nothing -> id
|
||||
Just set -> process fn rules (Set.fold (\(Passive ruleid args) -> (:) (Active k r 0 ruleid args fid)) [] set)) $
|
||||
process fn rules items $
|
||||
(acc,chart{active=actCat})
|
||||
FSymTok tok -> process fn rules items $
|
||||
(fn tok (Active j lbl (ppos+1) ruleid args fid0) acc,chart)
|
||||
| otherwise = case Map.lookup (PK fid0 lbl j) (passive chart) of
|
||||
Nothing -> let fid = nextId chart
|
||||
in process fn rules [Active j' lbl (ppos+1) ruleid (updateAt d fid args) fidc
|
||||
| Active j' lbl ppos ruleid args fidc <- ((active chart:actives chart) !! (k-j)) MM.! (AK fid0 lbl),
|
||||
let FSymCat _ d = unsafeAt (rhs ruleid lbl) ppos] $
|
||||
process fn rules items $
|
||||
(acc,chart{passive=Map.insert (PK fid0 lbl j) fid (passive chart)
|
||||
,forest =IntMap.insert fid (Set.singleton (Passive ruleid args)) (forest chart)
|
||||
,nextId =nextId chart+1
|
||||
})
|
||||
Just id -> process fn rules items $
|
||||
(acc,chart{forest = IntMap.insertWith Set.union id (Set.singleton (Passive ruleid args)) (forest chart)})
|
||||
where
|
||||
!lin = rhs ruleid lbl
|
||||
!k = offset chart
|
||||
|
||||
rhs ruleid lbl = unsafeAt lins lbl
|
||||
where
|
||||
(FRule _ _ _ cat lins) = unsafeAt rules ruleid
|
||||
|
||||
updateAt :: Int -> a -> [a] -> [a]
|
||||
updateAt nr x xs = [if i == nr then x else y | (i,y) <- zip [0..] xs]
|
||||
|
||||
|
||||
data Active
|
||||
= Active {-# UNPACK #-} !Int
|
||||
{-# UNPACK #-} !FIndex
|
||||
{-# UNPACK #-} !FPointPos
|
||||
{-# UNPACK #-} !RuleId
|
||||
[FCat]
|
||||
{-# UNPACK #-} !FCat
|
||||
deriving (Eq,Show,Ord)
|
||||
data Passive
|
||||
= Passive {-# UNPACK #-} !RuleId
|
||||
[FCat]
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
data ActiveKey
|
||||
= AK {-# UNPACK #-} !FCat
|
||||
{-# UNPACK #-} !FIndex
|
||||
deriving (Eq,Ord,Show)
|
||||
data PassiveKey
|
||||
= PK {-# UNPACK #-} !FCat
|
||||
{-# UNPACK #-} !FIndex
|
||||
{-# UNPACK #-} !Int
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
|
||||
-- | An abstract data type whose values represent
|
||||
-- the current state in an incremental parser.
|
||||
data ParseState = State ParserInfo Chart (Set.Set Active)
|
||||
|
||||
data Chart
|
||||
= Chart
|
||||
{ active :: MM.MultiMap ActiveKey Active
|
||||
, actives :: [MM.MultiMap ActiveKey Active]
|
||||
, passive :: Map.Map PassiveKey FCat
|
||||
, forest :: IntMap.IntMap (Set.Set Passive)
|
||||
, nextId :: {-# UNPACK #-} !FCat
|
||||
, offset :: {-# UNPACK #-} !Int
|
||||
}
|
||||
187
src/PGF/Parsing/FCFG/Utilities.hs
Normal file
187
src/PGF/Parsing/FCFG/Utilities.hs
Normal file
@@ -0,0 +1,187 @@
|
||||
----------------------------------------------------------------------
|
||||
-- |
|
||||
-- Maintainer : PL
|
||||
-- Stability : (stable)
|
||||
-- Portability : (portable)
|
||||
--
|
||||
-- > CVS $Date: 2005/05/13 12:40:19 $
|
||||
-- > CVS $Author: peb $
|
||||
-- > CVS $Revision: 1.6 $
|
||||
--
|
||||
-- Basic type declarations and functions for grammar formalisms
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
|
||||
module PGF.Parsing.FCFG.Utilities where
|
||||
|
||||
import Control.Monad
|
||||
import Data.Array
|
||||
import Data.List (groupBy)
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import GF.Data.Assoc
|
||||
import GF.Data.Utilities (sameLength, foldMerge, splitBy)
|
||||
|
||||
|
||||
------------------------------------------------------------
|
||||
-- ranges as single pairs
|
||||
|
||||
type RangeRec = [Range]
|
||||
|
||||
data Range = Range {-# UNPACK #-} !Int {-# UNPACK #-} !Int
|
||||
| EmptyRange
|
||||
deriving (Eq, Ord)
|
||||
|
||||
makeRange :: Int -> Int -> Range
|
||||
makeRange = Range
|
||||
|
||||
concatRange :: Range -> Range -> [Range]
|
||||
concatRange EmptyRange rng = return rng
|
||||
concatRange rng EmptyRange = return rng
|
||||
concatRange (Range i j) (Range j' k) = [Range i k | j==j']
|
||||
|
||||
minRange :: Range -> Int
|
||||
minRange (Range i j) = i
|
||||
|
||||
maxRange :: Range -> Int
|
||||
maxRange (Range i j) = j
|
||||
|
||||
|
||||
------------------------------------------------------------
|
||||
-- * representaions of input tokens
|
||||
|
||||
data Input t = MkInput { inputBounds :: (Int, Int),
|
||||
inputToken :: Assoc t [Range]
|
||||
}
|
||||
|
||||
input :: Ord t => [t] -> Input t
|
||||
input toks = MkInput inBounds inToken
|
||||
where
|
||||
inBounds = (0, length toks)
|
||||
inToken = accumAssoc id [ (tok, makeRange i j) | (i,j,tok) <- zip3 [0..] [1..] toks ]
|
||||
|
||||
inputMany :: Ord t => [[t]] -> Input t
|
||||
inputMany toks = MkInput inBounds inToken
|
||||
where
|
||||
inBounds = (0, length toks)
|
||||
inToken = accumAssoc id [ (tok, makeRange i j) | (i,j,ts) <- zip3 [0..] [1..] toks, tok <- ts ]
|
||||
|
||||
|
||||
------------------------------------------------------------
|
||||
-- * representations of syntactical analyses
|
||||
|
||||
-- ** charts as finite maps over edges
|
||||
|
||||
-- | The values of the chart, a list of key-daughters pairs,
|
||||
-- has unique keys. In essence, it is a map from 'n' to daughters.
|
||||
-- The daughters should be a set (not necessarily sorted) of rhs's.
|
||||
type SyntaxChart n e = Assoc e [SyntaxNode n [e]]
|
||||
|
||||
data SyntaxNode n e = SMeta
|
||||
| SNode n [e]
|
||||
| SString String
|
||||
| SInt Integer
|
||||
| SFloat Double
|
||||
deriving (Eq,Ord)
|
||||
|
||||
groupSyntaxNodes :: Ord n => [SyntaxNode n e] -> [SyntaxNode n [e]]
|
||||
groupSyntaxNodes [] = []
|
||||
groupSyntaxNodes (SNode n0 es0:xs) = (SNode n0 (es0:ess)) : groupSyntaxNodes xs'
|
||||
where
|
||||
(ess,xs') = span xs
|
||||
|
||||
span [] = ([],[])
|
||||
span xs@(SNode n es:xs')
|
||||
| n0 == n = let (ess,xs) = span xs' in (es:ess,xs)
|
||||
| otherwise = ([],xs)
|
||||
groupSyntaxNodes (SString s:xs) = (SString s) : groupSyntaxNodes xs
|
||||
groupSyntaxNodes (SInt n:xs) = (SInt n) : groupSyntaxNodes xs
|
||||
groupSyntaxNodes (SFloat f:xs) = (SFloat f) : groupSyntaxNodes xs
|
||||
|
||||
-- ** syntax forests
|
||||
|
||||
data SyntaxForest n = FMeta
|
||||
| FNode n [[SyntaxForest n]]
|
||||
-- ^ The outer list should be a set (not necessarily sorted)
|
||||
-- of possible alternatives. Ie. the outer list
|
||||
-- is a disjunctive node, and the inner lists
|
||||
-- are (conjunctive) concatenative nodes
|
||||
| FString String
|
||||
| FInt Integer
|
||||
| FFloat Double
|
||||
deriving (Eq, Ord, Show)
|
||||
|
||||
instance Functor SyntaxForest where
|
||||
fmap f (FNode n forests) = FNode (f n) $ map (map (fmap f)) forests
|
||||
fmap _ (FString s) = FString s
|
||||
fmap _ (FInt n) = FInt n
|
||||
fmap _ (FFloat f) = FFloat f
|
||||
fmap _ (FMeta) = FMeta
|
||||
|
||||
forestName :: SyntaxForest n -> Maybe n
|
||||
forestName (FNode n _) = Just n
|
||||
forestName _ = Nothing
|
||||
|
||||
unifyManyForests :: (Monad m, Eq n) => [SyntaxForest n] -> m (SyntaxForest n)
|
||||
unifyManyForests = foldM unifyForests FMeta
|
||||
|
||||
-- | two forests can be unified, if either is 'FMeta', or both have the same parent,
|
||||
-- and all children can be unified
|
||||
unifyForests :: (Monad m, Eq n) => SyntaxForest n -> SyntaxForest n -> m (SyntaxForest n)
|
||||
unifyForests FMeta forest = return forest
|
||||
unifyForests forest FMeta = return forest
|
||||
unifyForests (FNode name1 children1) (FNode name2 children2)
|
||||
| name1 == name2 && not (null children) = return $ FNode name1 children
|
||||
where children = [ forests | forests1 <- children1, forests2 <- children2,
|
||||
sameLength forests1 forests2,
|
||||
forests <- zipWithM unifyForests forests1 forests2 ]
|
||||
unifyForests (FString s1) (FString s2)
|
||||
| s1 == s2 = return $ FString s1
|
||||
unifyForests (FInt n1) (FInt n2)
|
||||
| n1 == n2 = return $ FInt n1
|
||||
unifyForests (FFloat f1) (FFloat f2)
|
||||
| f1 == f2 = return $ FFloat f1
|
||||
unifyForests _ _ = fail "forest unification failure"
|
||||
|
||||
|
||||
-- ** conversions between representations
|
||||
|
||||
chart2forests :: (Ord n, Ord e) =>
|
||||
SyntaxChart n e -- ^ The complete chart
|
||||
-> (e -> Bool) -- ^ When is an edge 'FMeta'?
|
||||
-> [e] -- ^ The starting edges
|
||||
-> [SyntaxForest n] -- ^ The result has unique keys, ie. all 'n' are joined together.
|
||||
-- In essence, the result is a map from 'n' to forest daughters
|
||||
chart2forests chart isMeta = concatMap (edge2forests [])
|
||||
where edge2forests edges edge
|
||||
| isMeta edge = [FMeta]
|
||||
| edge `elem` edges = []
|
||||
| otherwise = map (item2forest (edge:edges)) $ chart ? edge
|
||||
item2forest edges (SMeta) = FMeta
|
||||
item2forest edges (SNode name children) =
|
||||
FNode name $ children >>= mapM (edge2forests edges)
|
||||
item2forest edges (SString s) = FString s
|
||||
item2forest edges (SInt n) = FInt n
|
||||
item2forest edges (SFloat f) = FFloat f
|
||||
|
||||
|
||||
applyProfileToForest :: SyntaxForest (CId,[Profile]) -> [SyntaxForest CId]
|
||||
applyProfileToForest (FNode (fun,profiles) children)
|
||||
| fun == wildCId = concat chForests
|
||||
| otherwise = [ FNode fun chForests | not (null chForests) ]
|
||||
where chForests = concat [ mapM (unifyManyForests . map (forests !!)) profiles |
|
||||
forests0 <- children,
|
||||
forests <- mapM applyProfileToForest forests0 ]
|
||||
applyProfileToForest (FString s) = [FString s]
|
||||
applyProfileToForest (FInt n) = [FInt n]
|
||||
applyProfileToForest (FFloat f) = [FFloat f]
|
||||
applyProfileToForest (FMeta) = [FMeta]
|
||||
|
||||
|
||||
forest2trees :: SyntaxForest CId -> [Tree]
|
||||
forest2trees (FNode n forests) = map (Fun n) $ forests >>= mapM forest2trees
|
||||
forest2trees (FString s) = [Lit (LStr s)]
|
||||
forest2trees (FInt n) = [Lit (LInt n)]
|
||||
forest2trees (FFloat f) = [Lit (LFlt f)]
|
||||
forest2trees (FMeta) = [Meta 0]
|
||||
67
src/PGF/Quiz.hs
Normal file
67
src/PGF/Quiz.hs
Normal file
@@ -0,0 +1,67 @@
|
||||
----------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : TeachYourself
|
||||
-- Maintainer : AR
|
||||
-- Stability : (stable)
|
||||
-- Portability : (portable)
|
||||
--
|
||||
-- > CVS $Date: 2005/04/21 16:46:13 $
|
||||
-- > CVS $Author: bringert $
|
||||
-- > CVS $Revision: 1.7 $
|
||||
--
|
||||
-- translation and morphology quiz. AR 10\/5\/2000 -- 12\/4\/2002 -- 14\/6\/2008
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
module PGF.Quiz (
|
||||
mkQuiz,
|
||||
translationList,
|
||||
morphologyList
|
||||
) where
|
||||
|
||||
import PGF
|
||||
import PGF.ShowLinearize
|
||||
|
||||
import GF.Data.Operations
|
||||
import GF.Infra.UseIO
|
||||
|
||||
import System.Random
|
||||
|
||||
import Data.List (nub)
|
||||
|
||||
-- translation and morphology quiz. AR 10/5/2000 -- 12/4/2002
|
||||
|
||||
-- generic quiz function
|
||||
|
||||
mkQuiz :: String -> [(String,[String])] -> IO ()
|
||||
mkQuiz msg tts = do
|
||||
let qas = [ (q, mkAnswer as) | (q,as) <- tts]
|
||||
teachDialogue qas msg
|
||||
|
||||
translationList ::
|
||||
PGF -> Language -> Language -> Category -> Int -> IO [(String,[String])]
|
||||
translationList pgf ig og cat number = do
|
||||
ts <- generateRandom pgf cat >>= return . take number
|
||||
return $ map mkOne $ ts
|
||||
where
|
||||
mkOne t = (norml (linearize pgf ig t), map (norml . linearize pgf og) (homonyms t))
|
||||
homonyms = nub . parse pgf ig cat . linearize pgf ig
|
||||
|
||||
morphologyList :: PGF -> Language -> Category -> Int -> IO [(String,[String])]
|
||||
morphologyList pgf ig cat number = do
|
||||
ts <- generateRandom pgf cat >>= return . take (max 1 number)
|
||||
gen <- newStdGen
|
||||
let ss = map (tabularLinearize pgf (mkCId ig)) ts
|
||||
let size = length (head ss)
|
||||
let forms = take number $ randomRs (0,size-1) gen
|
||||
return [(head (snd (head pws)) +++ par, ws) |
|
||||
(pws,i) <- zip ss forms, let (par,ws) = pws !! i]
|
||||
|
||||
-- | compare answer to the list of right answers, increase score and give feedback
|
||||
mkAnswer :: [String] -> String -> (Integer, String)
|
||||
mkAnswer as s = if (elem (norml s) as)
|
||||
then (1,"Yes.")
|
||||
else (0,"No, not" +++ s ++ ", but" ++++ unlines as)
|
||||
|
||||
norml :: String -> String
|
||||
norml = unwords . words
|
||||
|
||||
14
src/PGF/Raw/Abstract.hs
Normal file
14
src/PGF/Raw/Abstract.hs
Normal file
@@ -0,0 +1,14 @@
|
||||
module PGF.Raw.Abstract where
|
||||
|
||||
data Grammar =
|
||||
Grm [RExp]
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
data RExp =
|
||||
App String [RExp]
|
||||
| AInt Integer
|
||||
| AStr String
|
||||
| AFlt Double
|
||||
| AMet
|
||||
deriving (Eq,Ord,Show)
|
||||
|
||||
248
src/PGF/Raw/Convert.hs
Normal file
248
src/PGF/Raw/Convert.hs
Normal file
@@ -0,0 +1,248 @@
|
||||
module PGF.Raw.Convert (toPGF,fromPGF) where
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import PGF.Raw.Abstract
|
||||
import PGF.BuildParser (buildParserInfo)
|
||||
import PGF.Parsing.FCFG.Utilities
|
||||
|
||||
import qualified Data.Array as Array
|
||||
import qualified Data.Map as Map
|
||||
|
||||
pgfMajorVersion, pgfMinorVersion :: Integer
|
||||
(pgfMajorVersion, pgfMinorVersion) = (1,0)
|
||||
|
||||
-- convert parsed grammar to internal PGF
|
||||
|
||||
toPGF :: Grammar -> PGF
|
||||
toPGF (Grm [
|
||||
App "pgf" (AInt v1 : AInt v2 : App a []:cs),
|
||||
App "flags" gfs,
|
||||
ab@(
|
||||
App "abstract" [
|
||||
App "fun" fs,
|
||||
App "cat" cts
|
||||
]),
|
||||
App "concrete" ccs
|
||||
]) = PGF {
|
||||
absname = mkCId a,
|
||||
cncnames = [mkCId c | App c [] <- cs],
|
||||
gflags = Map.fromAscList [(mkCId f,v) | App f [AStr v] <- gfs],
|
||||
abstract =
|
||||
let
|
||||
aflags = Map.fromAscList [(mkCId f,v) | App f [AStr v] <- gfs]
|
||||
lfuns = [(mkCId f,(toType typ,toExp def)) | App f [typ, def] <- fs]
|
||||
funs = Map.fromAscList lfuns
|
||||
lcats = [(mkCId c, Prelude.map toHypo hyps) | App c hyps <- cts]
|
||||
cats = Map.fromAscList lcats
|
||||
catfuns = Map.fromAscList
|
||||
[(cat,[f | (f, (DTyp _ c _,_)) <- lfuns, c==cat]) | (cat,_) <- lcats]
|
||||
in Abstr aflags funs cats catfuns,
|
||||
concretes = Map.fromAscList [(mkCId lang, toConcr ts) | App lang ts <- ccs]
|
||||
}
|
||||
where
|
||||
|
||||
toConcr :: [RExp] -> Concr
|
||||
toConcr = foldl add (Concr {
|
||||
cflags = Map.empty,
|
||||
lins = Map.empty,
|
||||
opers = Map.empty,
|
||||
lincats = Map.empty,
|
||||
lindefs = Map.empty,
|
||||
printnames = Map.empty,
|
||||
paramlincats = Map.empty,
|
||||
parser = Nothing
|
||||
})
|
||||
where
|
||||
add :: Concr -> RExp -> Concr
|
||||
add cnc (App "flags" ts) = cnc { cflags = Map.fromAscList [(mkCId f,v) | App f [AStr v] <- ts] }
|
||||
add cnc (App "lin" ts) = cnc { lins = mkTermMap ts }
|
||||
add cnc (App "oper" ts) = cnc { opers = mkTermMap ts }
|
||||
add cnc (App "lincat" ts) = cnc { lincats = mkTermMap ts }
|
||||
add cnc (App "lindef" ts) = cnc { lindefs = mkTermMap ts }
|
||||
add cnc (App "printname" ts) = cnc { printnames = mkTermMap ts }
|
||||
add cnc (App "param" ts) = cnc { paramlincats = mkTermMap ts }
|
||||
add cnc (App "parser" ts) = cnc { parser = Just (toPInfo ts) }
|
||||
|
||||
toPInfo :: [RExp] -> ParserInfo
|
||||
toPInfo [App "rules" rs, App "startupcats" cs] = buildParserInfo (rules, cats)
|
||||
where
|
||||
rules = map toFRule rs
|
||||
cats = Map.fromList [(mkCId c, map expToInt fs) | App c fs <- cs]
|
||||
|
||||
toFRule :: RExp -> FRule
|
||||
toFRule (App "rule"
|
||||
[n,
|
||||
App "cats" (rt:at),
|
||||
App "R" ls]) = FRule fun prof args res lins
|
||||
where
|
||||
(fun,prof) = toFName n
|
||||
args = map expToInt at
|
||||
res = expToInt rt
|
||||
lins = mkArray [mkArray [toSymbol s | s <- l] | App "S" l <- ls]
|
||||
|
||||
toFName :: RExp -> (CId,[Profile])
|
||||
toFName (App "_A" [x]) = (wildCId, [[expToInt x]])
|
||||
toFName (App f ts) = (mkCId f, map toProfile ts)
|
||||
where
|
||||
toProfile :: RExp -> Profile
|
||||
toProfile AMet = []
|
||||
toProfile (App "_A" [t]) = [expToInt t]
|
||||
toProfile (App "_U" ts) = [expToInt t | App "_A" [t] <- ts]
|
||||
|
||||
toSymbol :: RExp -> FSymbol
|
||||
toSymbol (App "P" [n,l]) = FSymCat (expToInt l) (expToInt n)
|
||||
toSymbol (AStr t) = FSymTok t
|
||||
|
||||
toType :: RExp -> Type
|
||||
toType e = case e of
|
||||
App cat [App "H" hypos, App "X" exps] ->
|
||||
DTyp (map toHypo hypos) (mkCId cat) (map toExp exps)
|
||||
_ -> error $ "type " ++ show e
|
||||
|
||||
toHypo :: RExp -> Hypo
|
||||
toHypo e = case e of
|
||||
App x [typ] -> Hyp (mkCId x) (toType typ)
|
||||
_ -> error $ "hypo " ++ show e
|
||||
|
||||
toExp :: RExp -> Expr
|
||||
toExp e = case e of
|
||||
App "Abs" [App x [], exp] -> EAbs (mkCId x) (toExp exp)
|
||||
App "App" [e1,e2] -> EApp (toExp e1) (toExp e2)
|
||||
App "Eq" eqs -> EEq [Equ (map toExp ps) (toExp v) | App "E" (v:ps) <- eqs]
|
||||
App "Var" [App i []] -> EVar (mkCId i)
|
||||
AMet -> EMeta 0
|
||||
AInt i -> ELit (LInt i)
|
||||
AFlt i -> ELit (LFlt i)
|
||||
AStr i -> ELit (LStr i)
|
||||
_ -> error $ "exp " ++ show e
|
||||
|
||||
toTerm :: RExp -> Term
|
||||
toTerm e = case e of
|
||||
App "R" es -> R (map toTerm es)
|
||||
App "S" es -> S (map toTerm es)
|
||||
App "FV" es -> FV (map toTerm es)
|
||||
App "P" [e,v] -> P (toTerm e) (toTerm v)
|
||||
App "W" [AStr s,v] -> W s (toTerm v)
|
||||
App "A" [AInt i] -> V (fromInteger i)
|
||||
App f [] -> F (mkCId f)
|
||||
AInt i -> C (fromInteger i)
|
||||
AMet -> TM "?"
|
||||
AStr s -> K (KS s) ----
|
||||
_ -> error $ "term " ++ show e
|
||||
|
||||
------------------------------
|
||||
--- from internal to parser --
|
||||
------------------------------
|
||||
|
||||
fromPGF :: PGF -> Grammar
|
||||
fromPGF pgf0 = Grm [
|
||||
App "pgf" (AInt pgfMajorVersion:AInt pgfMinorVersion
|
||||
: App (prCId (absname pgf)) [] : map (flip App [] . prCId) (cncnames pgf)),
|
||||
App "flags" [App (prCId f) [AStr v] | (f,v) <- Map.toList (gflags pgf `Map.union` aflags apgf)],
|
||||
App "abstract" [
|
||||
App "fun" [App (prCId f) [fromType t,fromExp d] | (f,(t,d)) <- Map.toList (funs apgf)],
|
||||
App "cat" [App (prCId f) (map fromHypo hs) | (f,hs) <- Map.toList (cats apgf)]
|
||||
],
|
||||
App "concrete" [App (prCId lang) (fromConcrete c) | (lang,c) <- Map.toList (concretes pgf)]
|
||||
]
|
||||
where
|
||||
pgf = utf8GFCC pgf0
|
||||
apgf = abstract pgf
|
||||
fromConcrete cnc = [
|
||||
App "flags" [App (prCId f) [AStr v] | (f,v) <- Map.toList (cflags cnc)],
|
||||
App "lin" [App (prCId f) [fromTerm v] | (f,v) <- Map.toList (lins cnc)],
|
||||
App "oper" [App (prCId f) [fromTerm v] | (f,v) <- Map.toList (opers cnc)],
|
||||
App "lincat" [App (prCId f) [fromTerm v] | (f,v) <- Map.toList (lincats cnc)],
|
||||
App "lindef" [App (prCId f) [fromTerm v] | (f,v) <- Map.toList (lindefs cnc)],
|
||||
App "printname" [App (prCId f) [fromTerm v] | (f,v) <- Map.toList (printnames cnc)],
|
||||
App "param" [App (prCId f) [fromTerm v] | (f,v) <- Map.toList (paramlincats cnc)]
|
||||
] ++ maybe [] (\p -> [fromPInfo p]) (parser cnc)
|
||||
|
||||
fromType :: Type -> RExp
|
||||
fromType e = case e of
|
||||
DTyp hypos cat exps ->
|
||||
App (prCId cat) [
|
||||
App "H" (map fromHypo hypos),
|
||||
App "X" (map fromExp exps)]
|
||||
|
||||
fromHypo :: Hypo -> RExp
|
||||
fromHypo e = case e of
|
||||
Hyp x typ -> App (prCId x) [fromType typ]
|
||||
|
||||
fromExp :: Expr -> RExp
|
||||
fromExp e = case e of
|
||||
EAbs x exp -> App "Abs" [App (prCId x) [], fromExp exp]
|
||||
EApp e1 e2 -> App "App" [fromExp e1, fromExp e2]
|
||||
EVar x -> App "Var" [App (prCId x) []]
|
||||
ELit (LStr s) -> AStr s
|
||||
ELit (LFlt d) -> AFlt d
|
||||
ELit (LInt i) -> AInt (toInteger i)
|
||||
EMeta _ -> AMet ----
|
||||
EEq eqs ->
|
||||
App "Eq" [App "E" (map fromExp (v:ps)) | Equ ps v <- eqs]
|
||||
|
||||
fromTerm :: Term -> RExp
|
||||
fromTerm e = case e of
|
||||
R es -> App "R" (map fromTerm es)
|
||||
S es -> App "S" (map fromTerm es)
|
||||
FV es -> App "FV" (map fromTerm es)
|
||||
P e v -> App "P" [fromTerm e, fromTerm v]
|
||||
W s v -> App "W" [AStr s, fromTerm v]
|
||||
C i -> AInt (toInteger i)
|
||||
TM _ -> AMet
|
||||
F f -> App (prCId f) []
|
||||
V i -> App "A" [AInt (toInteger i)]
|
||||
K (KS s) -> AStr s ----
|
||||
K (KP d vs) -> App "FV" (str d : [str v | Alt v _ <- vs]) ----
|
||||
where
|
||||
str v = App "S" (map AStr v)
|
||||
|
||||
-- ** Parsing info
|
||||
|
||||
fromPInfo :: ParserInfo -> RExp
|
||||
fromPInfo p = App "parser" [
|
||||
App "rules" [fromFRule rule | rule <- Array.elems (allRules p)],
|
||||
App "startupcats" [App (prCId f) (map intToExp cs) | (f,cs) <- Map.toList (startupCats p)]
|
||||
]
|
||||
|
||||
fromFRule :: FRule -> RExp
|
||||
fromFRule (FRule fun prof args res lins) =
|
||||
App "rule" [fromFName (fun,prof),
|
||||
App "cats" (intToExp res:map intToExp args),
|
||||
App "R" [App "S" [fromSymbol s | s <- Array.elems l] | l <- Array.elems lins]
|
||||
]
|
||||
|
||||
fromFName :: (CId,[Profile]) -> RExp
|
||||
fromFName (f,ps) | f == wildCId = fromProfile (head ps)
|
||||
| otherwise = App (prCId f) (map fromProfile ps)
|
||||
where
|
||||
fromProfile :: Profile -> RExp
|
||||
fromProfile [] = AMet
|
||||
fromProfile [x] = daughter x
|
||||
fromProfile args = App "_U" (map daughter args)
|
||||
|
||||
daughter n = App "_A" [intToExp n]
|
||||
|
||||
fromSymbol :: FSymbol -> RExp
|
||||
fromSymbol (FSymCat l n) = App "P" [intToExp n, intToExp l]
|
||||
fromSymbol (FSymTok t) = AStr t
|
||||
|
||||
-- ** Utilities
|
||||
|
||||
mkTermMap :: [RExp] -> Map.Map CId Term
|
||||
mkTermMap ts = Map.fromAscList [(mkCId f,toTerm v) | App f [v] <- ts]
|
||||
|
||||
mkArray :: [a] -> Array.Array Int a
|
||||
mkArray xs = Array.listArray (0, length xs - 1) xs
|
||||
|
||||
expToInt :: Integral a => RExp -> a
|
||||
expToInt (App "neg" [AInt i]) = fromIntegral (negate i)
|
||||
expToInt (AInt i) = fromIntegral i
|
||||
|
||||
expToStr :: RExp -> String
|
||||
expToStr (AStr s) = s
|
||||
|
||||
intToExp :: Integral a => a -> RExp
|
||||
intToExp x | x < 0 = App "neg" [AInt (fromIntegral (negate x))]
|
||||
| otherwise = AInt (fromIntegral x)
|
||||
101
src/PGF/Raw/Parse.hs
Normal file
101
src/PGF/Raw/Parse.hs
Normal file
@@ -0,0 +1,101 @@
|
||||
module PGF.Raw.Parse (parseGrammar) where
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Raw.Abstract
|
||||
|
||||
import Control.Monad
|
||||
import Data.Char
|
||||
import qualified Data.ByteString.Char8 as BS
|
||||
|
||||
parseGrammar :: String -> IO Grammar
|
||||
parseGrammar s = case runP pGrammar s of
|
||||
Just (x,"") -> return x
|
||||
_ -> fail "Parse error"
|
||||
|
||||
pGrammar :: P Grammar
|
||||
pGrammar = liftM Grm pTerms
|
||||
|
||||
pTerms :: P [RExp]
|
||||
pTerms = liftM2 (:) (pTerm 1) pTerms <++ (skipSpaces >> return [])
|
||||
|
||||
pTerm :: Int -> P RExp
|
||||
pTerm n = skipSpaces >> (pParen <++ pApp <++ pNum <++ pStr <++ pMeta)
|
||||
where pParen = between (char '(') (char ')') (pTerm 0)
|
||||
pApp = liftM2 App pIdent (if n == 0 then pTerms else return [])
|
||||
pStr = char '"' >> liftM AStr (manyTill (pEsc <++ get) (char '"'))
|
||||
pEsc = char '\\' >> get
|
||||
pNum = do x <- munch1 isDigit
|
||||
((char '.' >> munch1 isDigit >>= \y -> return (AFlt (read (x++"."++y))))
|
||||
<++
|
||||
return (AInt (read x)))
|
||||
pMeta = char '?' >> return AMet
|
||||
pIdent = liftM2 (:) (satisfy isIdentFirst) (munch isIdentRest)
|
||||
isIdentFirst c = c == '_' || isAlpha c
|
||||
isIdentRest c = c == '_' || c == '\'' || isAlphaNum c
|
||||
|
||||
-- Parser combinators with only left-biased choice
|
||||
|
||||
newtype P a = P { runP :: String -> Maybe (a,String) }
|
||||
|
||||
instance Monad P where
|
||||
return x = P (\ts -> Just (x,ts))
|
||||
P p >>= f = P (\ts -> p ts >>= \ (x,ts') -> runP (f x) ts')
|
||||
fail _ = pfail
|
||||
|
||||
instance MonadPlus P where
|
||||
mzero = pfail
|
||||
mplus = (<++)
|
||||
|
||||
|
||||
get :: P Char
|
||||
get = P (\ts -> case ts of
|
||||
[] -> Nothing
|
||||
c:cs -> Just (c,cs))
|
||||
|
||||
look :: P String
|
||||
look = P (\ts -> Just (ts,ts))
|
||||
|
||||
(<++) :: P a -> P a -> P a
|
||||
P p <++ P q = P (\ts -> p ts `mplus` q ts)
|
||||
|
||||
pfail :: P a
|
||||
pfail = P (\ts -> Nothing)
|
||||
|
||||
satisfy :: (Char -> Bool) -> P Char
|
||||
satisfy p = do c <- get
|
||||
if p c then return c else pfail
|
||||
|
||||
char :: Char -> P Char
|
||||
char c = satisfy (c==)
|
||||
|
||||
string :: String -> P String
|
||||
string this = look >>= scan this
|
||||
where
|
||||
scan [] _ = return this
|
||||
scan (x:xs) (y:ys) | x == y = get >> scan xs ys
|
||||
scan _ _ = pfail
|
||||
|
||||
skipSpaces :: P ()
|
||||
skipSpaces = look >>= skip
|
||||
where
|
||||
skip (c:s) | isSpace c = get >> skip s
|
||||
skip _ = return ()
|
||||
|
||||
manyTill :: P a -> P end -> P [a]
|
||||
manyTill p end = scan
|
||||
where scan = (end >> return []) <++ liftM2 (:) p scan
|
||||
|
||||
munch :: (Char -> Bool) -> P String
|
||||
munch p = munch1 p <++ return []
|
||||
|
||||
munch1 :: (Char -> Bool) -> P String
|
||||
munch1 p = liftM2 (:) (satisfy p) (munch p)
|
||||
|
||||
choice :: [P a] -> P a
|
||||
choice = msum
|
||||
|
||||
between :: P open -> P close -> P a -> P a
|
||||
between open close p = do open
|
||||
x <- p
|
||||
close
|
||||
return x
|
||||
35
src/PGF/Raw/Print.hs
Normal file
35
src/PGF/Raw/Print.hs
Normal file
@@ -0,0 +1,35 @@
|
||||
module PGF.Raw.Print (printTree) where
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Raw.Abstract
|
||||
|
||||
import Data.List (intersperse)
|
||||
import Numeric (showFFloat)
|
||||
import qualified Data.ByteString.Char8 as BS
|
||||
|
||||
printTree :: Grammar -> String
|
||||
printTree g = prGrammar g ""
|
||||
|
||||
prGrammar :: Grammar -> ShowS
|
||||
prGrammar (Grm xs) = prRExpList xs
|
||||
|
||||
prRExp :: Int -> RExp -> ShowS
|
||||
prRExp _ (App x []) = showString x
|
||||
prRExp n (App x xs) = p (showString x . showChar ' ' . prRExpList xs)
|
||||
where p s = if n == 0 then s else showChar '(' . s . showChar ')'
|
||||
prRExp _ (AInt x) = shows x
|
||||
prRExp _ (AStr x) = showChar '"' . concatS (map mkEsc x) . showChar '"'
|
||||
prRExp _ (AFlt x) = showFFloat Nothing x
|
||||
prRExp _ AMet = showChar '?'
|
||||
|
||||
mkEsc :: Char -> ShowS
|
||||
mkEsc s = case s of
|
||||
'"' -> showString "\\\""
|
||||
'\\' -> showString "\\\\"
|
||||
_ -> showChar s
|
||||
|
||||
prRExpList :: [RExp] -> ShowS
|
||||
prRExpList = concatS . intersperse (showChar ' ') . map (prRExp 1)
|
||||
|
||||
concatS :: [ShowS] -> ShowS
|
||||
concatS = foldr (.) id
|
||||
105
src/PGF/ShowLinearize.hs
Normal file
105
src/PGF/ShowLinearize.hs
Normal file
@@ -0,0 +1,105 @@
|
||||
module PGF.ShowLinearize (
|
||||
collectWords,
|
||||
tableLinearize,
|
||||
recordLinearize,
|
||||
termLinearize,
|
||||
tabularLinearize,
|
||||
allLinearize
|
||||
) where
|
||||
|
||||
import PGF.CId
|
||||
import PGF.Data
|
||||
import PGF.Macros
|
||||
import PGF.Linearize
|
||||
|
||||
import GF.Data.Operations
|
||||
import Data.List
|
||||
import qualified Data.Map as Map
|
||||
|
||||
-- printing linearizations in different ways with source parameters
|
||||
|
||||
-- internal representation, only used internally in this module
|
||||
data Record =
|
||||
RR [(String,Record)]
|
||||
| RT [(String,Record)]
|
||||
| RFV [Record]
|
||||
| RS String
|
||||
| RCon String
|
||||
|
||||
prRecord :: Record -> String
|
||||
prRecord = prr where
|
||||
prr t = case t of
|
||||
RR fs -> concat $
|
||||
"{" :
|
||||
(intersperse ";" (map (\ (l,v) -> unwords [l,"=", prr v]) fs)) ++ ["}"]
|
||||
RT fs -> concat $
|
||||
"table {" :
|
||||
(intersperse ";" (map (\ (l,v) -> unwords [l,"=>",prr v]) fs)) ++ ["}"]
|
||||
RFV ts -> concat $
|
||||
"variants {" : (intersperse ";" (map prr ts)) ++ ["}"]
|
||||
RS s -> prQuotedString s
|
||||
RCon s -> s
|
||||
|
||||
-- uses the encoding of record types in PGF.paramlincat
|
||||
mkRecord :: Term -> Term -> Record
|
||||
mkRecord typ trm = case (typ,trm) of
|
||||
(_, FV ts) -> RFV $ map (mkRecord typ) ts
|
||||
(R rs, R ts) -> RR [(str lab, mkRecord ty t) | (P lab ty, t) <- zip rs ts]
|
||||
(S [FV ps,ty],R ts) -> RT [(str par, mkRecord ty t) | (par, t) <- zip ps ts]
|
||||
(_,W s (R ts)) -> mkRecord typ (R [K (KS (s ++ u)) | K (KS u) <- ts])
|
||||
(FV ps, C i) -> RCon $ str $ ps !! i
|
||||
(S [], _) -> RS $ str trm
|
||||
_ -> RS $ show trm ---- printTree trm
|
||||
where
|
||||
str = realize
|
||||
|
||||
-- show all branches, without labels and params
|
||||
allLinearize :: (String -> String) -> PGF -> CId -> Tree -> String
|
||||
allLinearize unlex pgf lang = concat . map (unlex . pr) . tabularLinearize pgf lang where
|
||||
pr (p,vs) = unlines vs
|
||||
|
||||
-- show all branches, with labels and params
|
||||
tableLinearize :: (String -> String) -> PGF -> CId -> Tree -> String
|
||||
tableLinearize unlex pgf lang = unlines . map pr . tabularLinearize pgf lang where
|
||||
pr (p,vs) = p +++ ":" +++ unwords (intersperse "|" (map unlex vs))
|
||||
|
||||
-- create a table from labels+params to variants
|
||||
tabularLinearize :: PGF -> CId -> Tree -> [(String,[String])]
|
||||
tabularLinearize pgf lang = branches . recLinearize pgf lang where
|
||||
branches r = case r of
|
||||
RR fs -> [( b,s) | (lab,t) <- fs, (b,s) <- branches t]
|
||||
RT fs -> [(lab +++ b,s) | (lab,t) <- fs, (b,s) <- branches t]
|
||||
RFV rs -> [([], ss) | (_,ss) <- concatMap branches rs]
|
||||
RS s -> [([], [s])]
|
||||
RCon _ -> []
|
||||
|
||||
-- show record in GF-source-like syntax
|
||||
recordLinearize :: PGF -> CId -> Tree -> String
|
||||
recordLinearize pgf lang = prRecord . recLinearize pgf lang
|
||||
|
||||
-- create a GF-like record, forming the basis of all functions above
|
||||
recLinearize :: PGF -> CId -> Tree -> Record
|
||||
recLinearize pgf lang tree = mkRecord typ $ linTree pgf lang tree where
|
||||
typ = case tree of
|
||||
Fun f _ -> lookParamLincat pgf lang $ valCat $ lookType pgf f
|
||||
|
||||
-- show PGF term
|
||||
termLinearize :: PGF -> CId -> Tree -> String
|
||||
termLinearize pgf lang = show . linTree pgf lang
|
||||
|
||||
|
||||
-- for Morphology: word, lemma, tags
|
||||
collectWords :: PGF -> CId -> [(String, [(String,String)])]
|
||||
collectWords pgf lang =
|
||||
concatMap collOne
|
||||
[(f,c,0) | (f,(DTyp [] c _,_)) <- Map.toList $ funs $ abstract pgf]
|
||||
where
|
||||
collOne (f,c,i) =
|
||||
fromRec f [prCId c] (recLinearize pgf lang (Fun f (replicate i (Meta 888))))
|
||||
fromRec f v r = case r of
|
||||
RR rs -> concat [fromRec f v t | (_,t) <- rs]
|
||||
RT rs -> concat [fromRec f (p:v) t | (p,t) <- rs]
|
||||
RFV rs -> concatMap (fromRec f v) rs
|
||||
RS s -> [(s,[(prCId f,unwords (reverse v))])]
|
||||
RCon c -> [] ---- inherent
|
||||
|
||||
48
src/PGF/VisualizeTree.hs
Normal file
48
src/PGF/VisualizeTree.hs
Normal file
@@ -0,0 +1,48 @@
|
||||
----------------------------------------------------------------------
|
||||
-- |
|
||||
-- Module : VisualizeTree
|
||||
-- Maintainer : AR
|
||||
-- Stability : (stable)
|
||||
-- Portability : (portable)
|
||||
--
|
||||
-- > CVS $Date:
|
||||
-- > CVS $Author:
|
||||
-- > CVS $Revision:
|
||||
--
|
||||
-- Print a graph of an abstract syntax tree in Graphviz DOT format
|
||||
-- Based on BB's VisualizeGrammar
|
||||
-- FIXME: change this to use GF.Visualization.Graphviz,
|
||||
-- instead of rolling its own.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
module PGF.VisualizeTree ( visualizeTrees
|
||||
) where
|
||||
|
||||
import PGF.CId (prCId)
|
||||
import PGF.Data
|
||||
import PGF.Macros (lookValCat)
|
||||
|
||||
visualizeTrees :: PGF -> (Bool,Bool) -> [Tree] -> String
|
||||
visualizeTrees pgf funscats = unlines . map (prGraph False . tree2graph pgf funscats)
|
||||
|
||||
tree2graph :: PGF -> (Bool,Bool) -> Tree -> [String]
|
||||
tree2graph pgf (funs,cats) = prf [] where
|
||||
prf ps t = case t of
|
||||
Fun cid trees ->
|
||||
let (nod,lab) = prn ps cid in
|
||||
(nod ++ " [label = " ++ lab ++ ", style = \"solid\", shape = \"plaintext\"] ;") :
|
||||
[ pra (j:ps) nod t | (j,t) <- zip [0..] trees] ++
|
||||
concat [prf (j:ps) t | (j,t) <- zip [0..] trees]
|
||||
prn ps cid =
|
||||
let
|
||||
fun = if funs then prCId cid else ""
|
||||
cat = if cats then prCat cid else ""
|
||||
colon = if funs && cats then " : " else ""
|
||||
lab = "\"" ++ fun ++ colon ++ cat ++ "\""
|
||||
in (show(show (ps :: [Int])),lab)
|
||||
pra i nod t@(Fun cid _) = nod ++ arr ++ fst (prn i cid) ++ " [style = \"solid\"];"
|
||||
arr = " -- " -- if digr then " -> " else " -- "
|
||||
prCat = prCId . lookValCat pgf
|
||||
|
||||
prGraph digr ns = concat $ map (++"\n") $ [graph ++ "{\n"] ++ ns ++ ["}"] where
|
||||
graph = if digr then "digraph" else "graph"
|
||||
13
src/PGF/doc/Eng.gf
Normal file
13
src/PGF/doc/Eng.gf
Normal file
@@ -0,0 +1,13 @@
|
||||
concrete Eng of Ex = {
|
||||
lincat
|
||||
S = {s : Str} ;
|
||||
NP = {s : Str ; n : Num} ;
|
||||
VP = {s : Num => Str} ;
|
||||
param
|
||||
Num = Sg | Pl ;
|
||||
lin
|
||||
Pred np vp = {s = np.s ++ vp.s ! np.n} ;
|
||||
She = {s = "she" ; n = Sg} ;
|
||||
They = {s = "they" ; n = Pl} ;
|
||||
Sleep = {s = table {Sg => "sleeps" ; Pl => "sleep"}} ;
|
||||
}
|
||||
8
src/PGF/doc/Ex.gf
Normal file
8
src/PGF/doc/Ex.gf
Normal file
@@ -0,0 +1,8 @@
|
||||
abstract Ex = {
|
||||
cat
|
||||
S ; NP ; VP ;
|
||||
fun
|
||||
Pred : NP -> VP -> S ;
|
||||
She, They : NP ;
|
||||
Sleep : VP ;
|
||||
}
|
||||
13
src/PGF/doc/Swe.gf
Normal file
13
src/PGF/doc/Swe.gf
Normal file
@@ -0,0 +1,13 @@
|
||||
concrete Swe of Ex = {
|
||||
lincat
|
||||
S = {s : Str} ;
|
||||
NP = {s : Str} ;
|
||||
VP = {s : Str} ;
|
||||
param
|
||||
Num = Sg | Pl ;
|
||||
lin
|
||||
Pred np vp = {s = np.s ++ vp.s} ;
|
||||
She = {s = "hon"} ;
|
||||
They = {s = "de"} ;
|
||||
Sleep = {s = "sover"} ;
|
||||
}
|
||||
64
src/PGF/doc/Test.gf
Normal file
64
src/PGF/doc/Test.gf
Normal file
@@ -0,0 +1,64 @@
|
||||
-- to test GFCC compilation
|
||||
|
||||
flags coding=utf8 ;
|
||||
|
||||
cat S ; NP ; N ; VP ;
|
||||
|
||||
fun Pred : NP -> VP -> S ;
|
||||
fun Pred2 : NP -> VP -> NP -> S ;
|
||||
fun Det, Dets : N -> NP ;
|
||||
fun Mina, Sina, Me, Te : NP ;
|
||||
fun Raha, Paska, Pallo : N ;
|
||||
fun Puhua, Munia, Sanoa : VP ;
|
||||
|
||||
param Person = P1 | P2 | P3 ;
|
||||
param Number = Sg | Pl ;
|
||||
param Case = Nom | Part ;
|
||||
|
||||
param NForm = NF Number Case ;
|
||||
param VForm = VF Number Person ;
|
||||
|
||||
lincat N = Noun ;
|
||||
lincat VP = Verb ;
|
||||
|
||||
oper Noun = {s : NForm => Str} ;
|
||||
oper Verb = {s : VForm => Str} ;
|
||||
|
||||
lincat NP = {s : Case => Str ; a : {n : Number ; p : Person}} ;
|
||||
|
||||
lin Pred np vp = {s = np.s ! Nom ++ vp.s ! VF np.a.n np.a.p} ;
|
||||
lin Pred2 np vp ob = {s = np.s ! Nom ++ vp.s ! VF np.a.n np.a.p ++ ob.s ! Part} ;
|
||||
lin Det no = {s = \\c => no.s ! NF Sg c ; a = {n = Sg ; p = P3}} ;
|
||||
lin Dets no = {s = \\c => no.s ! NF Pl c ; a = {n = Pl ; p = P3}} ;
|
||||
lin Mina = {s = table Case ["minä" ; "minua"] ; a = {n = Sg ; p = P1}} ;
|
||||
lin Te = {s = table Case ["te" ; "teitä"] ; a = {n = Pl ; p = P2}} ;
|
||||
lin Sina = {s = table Case ["sinä" ; "sinua"] ; a = {n = Sg ; p = P2}} ;
|
||||
lin Me = {s = table Case ["me" ; "meitä"] ; a = {n = Pl ; p = P1}} ;
|
||||
|
||||
lin Raha = mkN "raha" ;
|
||||
lin Paska = mkN "paska" ;
|
||||
lin Pallo = mkN "pallo" ;
|
||||
lin Puhua = mkV "puhu" ;
|
||||
lin Munia = mkV "muni" ;
|
||||
lin Sanoa = mkV "sano" ;
|
||||
|
||||
oper mkN : Str -> Noun = \raha -> {
|
||||
s = table {
|
||||
NF Sg Nom => raha ;
|
||||
NF Sg Part => raha + "a" ;
|
||||
NF Pl Nom => raha + "t" ;
|
||||
NF Pl Part => Predef.tk 1 raha + "oja"
|
||||
}
|
||||
} ;
|
||||
|
||||
oper mkV : Str -> Verb = \puhu -> {
|
||||
s = table {
|
||||
VF Sg P1 => puhu + "n" ;
|
||||
VF Sg P2 => puhu + "t" ;
|
||||
VF Sg P3 => puhu + Predef.dp 1 puhu ;
|
||||
VF Pl P1 => puhu + "mme" ;
|
||||
VF Pl P2 => puhu + "tte" ;
|
||||
VF Pl P3 => puhu + "vat"
|
||||
}
|
||||
} ;
|
||||
|
||||
809
src/PGF/doc/gfcc.html
Normal file
809
src/PGF/doc/gfcc.html
Normal file
@@ -0,0 +1,809 @@
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
|
||||
<HTML>
|
||||
<HEAD>
|
||||
<META NAME="generator" CONTENT="http://txt2tags.sf.net">
|
||||
<TITLE>The GFCC Grammar Format</TITLE>
|
||||
</HEAD><BODY BGCOLOR="white" TEXT="black">
|
||||
<P ALIGN="center"><CENTER><H1>The GFCC Grammar Format</H1>
|
||||
<FONT SIZE="4">
|
||||
<I>Aarne Ranta</I><BR>
|
||||
October 5, 2007
|
||||
</FONT></CENTER>
|
||||
|
||||
<P>
|
||||
Author's address:
|
||||
<A HREF="http://www.cs.chalmers.se/~aarne"><CODE>http://www.cs.chalmers.se/~aarne</CODE></A>
|
||||
</P>
|
||||
<P>
|
||||
History:
|
||||
</P>
|
||||
<UL>
|
||||
<LI>5 Oct 2007: new, better structured GFCC with full expressive power
|
||||
<LI>19 Oct: translation of lincats, new figures on C++
|
||||
<LI>3 Oct 2006: first version
|
||||
</UL>
|
||||
|
||||
<H2>What is GFCC</H2>
|
||||
<P>
|
||||
GFCC is a low-level format for GF grammars. Its aim is to contain the minimum
|
||||
that is needed to process GF grammars at runtime. This minimality has three
|
||||
advantages:
|
||||
</P>
|
||||
<UL>
|
||||
<LI>compact grammar files and run-time objects
|
||||
<LI>time and space efficient processing
|
||||
<LI>simple definition of interpreters
|
||||
</UL>
|
||||
|
||||
<P>
|
||||
Thus we also want to call GFCC the <B>portable grammar format</B>.
|
||||
</P>
|
||||
<P>
|
||||
The idea is that all embedded GF applications use GFCC.
|
||||
The GF system would be primarily used as a compiler and as a grammar
|
||||
development tool.
|
||||
</P>
|
||||
<P>
|
||||
Since GFCC is implemented in BNFC, a parser of the format is readily
|
||||
available for C, C++, C#, Haskell, Java, and OCaml. Also an XML
|
||||
representation can be generated in BNFC. A
|
||||
<A HREF="../">reference implementation</A>
|
||||
of linearization and some other functions has been written in Haskell.
|
||||
</P>
|
||||
<H2>GFCC vs. GFC</H2>
|
||||
<P>
|
||||
GFCC is aimed to replace GFC as the run-time grammar format. GFC was designed
|
||||
to be a run-time format, but also to
|
||||
support separate compilation of grammars, i.e.
|
||||
to store the results of compiling
|
||||
individual GF modules. But this means that GFC has to contain extra information,
|
||||
such as type annotations, which is only needed in compilation and not at
|
||||
run-time. In particular, the pattern matching syntax and semantics of GFC is
|
||||
complex and therefore difficult to implement in new platforms.
|
||||
</P>
|
||||
<P>
|
||||
Actually, GFC is planned to be omitted also as the target format of
|
||||
separate compilation, where plain GF (type annotated and partially evaluated)
|
||||
will be used instead. GFC provides only marginal advantages as a target format
|
||||
compared with GF, and it is therefore just extra weight to carry around this
|
||||
format.
|
||||
</P>
|
||||
<P>
|
||||
The main differences of GFCC compared with GFC (and GF) can be summarized as follows:
|
||||
</P>
|
||||
<UL>
|
||||
<LI>there are no modules, and therefore no qualified names
|
||||
<LI>a GFCC grammar is multilingual, and consists of a common abstract syntax
|
||||
together with one concrete syntax per language
|
||||
<LI>records and tables are replaced by arrays
|
||||
<LI>record labels and parameter values are replaced by integers
|
||||
<LI>record projection and table selection are replaced by array indexing
|
||||
<LI>even though the format does support dependent types and higher-order abstract
|
||||
syntax, there is no interpreted yet that does this
|
||||
</UL>
|
||||
|
||||
<P>
|
||||
Here is an example of a GF grammar, consisting of three modules,
|
||||
as translated to GFCC. The representations are aligned; thus they do not completely
|
||||
reflect the order of judgements in GFCC files, which have different orders of
|
||||
blocks of judgements, and alphabetical sorting.
|
||||
</P>
|
||||
<PRE>
|
||||
grammar Ex(Eng,Swe);
|
||||
|
||||
abstract Ex = { abstract {
|
||||
cat cat
|
||||
S ; NP ; VP ; NP[]; S[]; VP[];
|
||||
fun fun
|
||||
Pred : NP -> VP -> S ; Pred=[(($ 0! 1),(($ 1! 0)!($ 0! 0)))];
|
||||
She, They : NP ; She=[0,"she"];
|
||||
Sleep : VP ; They=[1,"they"];
|
||||
Sleep=[["sleeps","sleep"]];
|
||||
} } ;
|
||||
|
||||
concrete Eng of Ex = { concrete Eng {
|
||||
lincat lincat
|
||||
S = {s : Str} ; S=[()];
|
||||
NP = {s : Str ; n : Num} ; NP=[1,()];
|
||||
VP = {s : Num => Str} ; VP=[[(),()]];
|
||||
param
|
||||
Num = Sg | Pl ;
|
||||
lin lin
|
||||
Pred np vp = { Pred=[(($ 0! 1),(($ 1! 0)!($ 0! 0)))];
|
||||
s = np.s ++ vp.s ! np.n} ;
|
||||
She = {s = "she" ; n = Sg} ; She=[0,"she"];
|
||||
They = {s = "they" ; n = Pl} ; They = [1, "they"];
|
||||
Sleep = {s = table { Sleep=[["sleeps","sleep"]];
|
||||
Sg => "sleeps" ;
|
||||
Pl => "sleep"
|
||||
}
|
||||
} ;
|
||||
} } ;
|
||||
|
||||
concrete Swe of Ex = { concrete Swe {
|
||||
lincat lincat
|
||||
S = {s : Str} ; S=[()];
|
||||
NP = {s : Str} ; NP=[()];
|
||||
VP = {s : Str} ; VP=[()];
|
||||
param
|
||||
Num = Sg | Pl ;
|
||||
lin lin
|
||||
Pred np vp = { Pred = [(($0!0),($1!0))];
|
||||
s = np.s ++ vp.s} ;
|
||||
She = {s = "hon"} ; She = ["hon"];
|
||||
They = {s = "de"} ; They = ["de"];
|
||||
Sleep = {s = "sover"} ; Sleep = ["sover"];
|
||||
} } ;
|
||||
</PRE>
|
||||
<P></P>
|
||||
<H2>The syntax of GFCC files</H2>
|
||||
<P>
|
||||
The complete BNFC grammar, from which
|
||||
the rules in this section are taken, is in the file
|
||||
<A HREF="../DataGFCC.cf"><CODE>GF/GFCC/GFCC.cf</CODE></A>.
|
||||
</P>
|
||||
<H3>Top level</H3>
|
||||
<P>
|
||||
A grammar has a header telling the name of the abstract syntax
|
||||
(often specifying an application domain), and the names of
|
||||
the concrete languages. The abstract syntax and the concrete
|
||||
syntaxes themselves follow.
|
||||
</P>
|
||||
<PRE>
|
||||
Grm. Grammar ::=
|
||||
"grammar" CId "(" [CId] ")" ";"
|
||||
Abstract ";"
|
||||
[Concrete] ;
|
||||
|
||||
Abs. Abstract ::=
|
||||
"abstract" "{"
|
||||
"flags" [Flag]
|
||||
"fun" [FunDef]
|
||||
"cat" [CatDef]
|
||||
"}" ;
|
||||
|
||||
Cnc. Concrete ::=
|
||||
"concrete" CId "{"
|
||||
"flags" [Flag]
|
||||
"lin" [LinDef]
|
||||
"oper" [LinDef]
|
||||
"lincat" [LinDef]
|
||||
"lindef" [LinDef]
|
||||
"printname" [LinDef]
|
||||
"}" ;
|
||||
</PRE>
|
||||
<P>
|
||||
This syntax organizes each module to a sequence of <B>fields</B>, such
|
||||
as flags, linearizations, operations, linearization types, etc.
|
||||
It is envisaged that particular applications can ignore some
|
||||
of the fields, typically so that earlier fields are more
|
||||
important than later ones.
|
||||
</P>
|
||||
<P>
|
||||
The judgement forms have the following syntax.
|
||||
</P>
|
||||
<PRE>
|
||||
Flg. Flag ::= CId "=" String ;
|
||||
Cat. CatDef ::= CId "[" [Hypo] "]" ;
|
||||
Fun. FunDef ::= CId ":" Type "=" Exp ;
|
||||
Lin. LinDef ::= CId "=" Term ;
|
||||
</PRE>
|
||||
<P>
|
||||
For the run-time system, the reference implementation in Haskell
|
||||
uses a structure that gives efficient look-up:
|
||||
</P>
|
||||
<PRE>
|
||||
data GFCC = GFCC {
|
||||
absname :: CId ,
|
||||
cncnames :: [CId] ,
|
||||
abstract :: Abstr ,
|
||||
concretes :: Map CId Concr
|
||||
}
|
||||
|
||||
data Abstr = Abstr {
|
||||
aflags :: Map CId String, -- value of a flag
|
||||
funs :: Map CId (Type,Exp), -- type and def of a fun
|
||||
cats :: Map CId [Hypo], -- context of a cat
|
||||
catfuns :: Map CId [CId] -- funs yielding a cat (redundant, for fast lookup)
|
||||
}
|
||||
|
||||
data Concr = Concr {
|
||||
flags :: Map CId String, -- value of a flag
|
||||
lins :: Map CId Term, -- lin of a fun
|
||||
opers :: Map CId Term, -- oper generated by subex elim
|
||||
lincats :: Map CId Term, -- lin type of a cat
|
||||
lindefs :: Map CId Term, -- lin default of a cat
|
||||
printnames :: Map CId Term -- printname of a cat or a fun
|
||||
}
|
||||
</PRE>
|
||||
<P>
|
||||
These definitions are from <A HREF="../DataGFCC.hs"><CODE>GF/GFCC/DataGFCC.hs</CODE></A>.
|
||||
</P>
|
||||
<P>
|
||||
Identifiers (<CODE>CId</CODE>) are like <CODE>Ident</CODE> in GF, except that
|
||||
the compiler produces constants prefixed with <CODE>_</CODE> in
|
||||
the common subterm elimination optimization.
|
||||
</P>
|
||||
<PRE>
|
||||
token CId (('_' | letter) (letter | digit | '\'' | '_')*) ;
|
||||
</PRE>
|
||||
<P></P>
|
||||
<H3>Abstract syntax</H3>
|
||||
<P>
|
||||
Types are first-order function types built from argument type
|
||||
contexts and value types.
|
||||
category symbols. Syntax trees (<CODE>Exp</CODE>) are
|
||||
rose trees with nodes consisting of a head (<CODE>Atom</CODE>) and
|
||||
bound variables (<CODE>CId</CODE>).
|
||||
</P>
|
||||
<PRE>
|
||||
DTyp. Type ::= "[" [Hypo] "]" CId [Exp] ;
|
||||
DTr. Exp ::= "[" "(" [CId] ")" Atom [Exp] "]" ;
|
||||
Hyp. Hypo ::= CId ":" Type ;
|
||||
</PRE>
|
||||
<P>
|
||||
The head Atom is either a function
|
||||
constant, a bound variable, or a metavariable, or a string, integer, or float
|
||||
literal.
|
||||
</P>
|
||||
<PRE>
|
||||
AC. Atom ::= CId ;
|
||||
AS. Atom ::= String ;
|
||||
AI. Atom ::= Integer ;
|
||||
AF. Atom ::= Double ;
|
||||
AM. Atom ::= "?" Integer ;
|
||||
</PRE>
|
||||
<P>
|
||||
The context-free types and trees of the "old GFCC" are special
|
||||
cases, which can be defined as follows:
|
||||
</P>
|
||||
<PRE>
|
||||
Typ. Type ::= [CId] "->" CId
|
||||
Typ args val = DTyp [Hyp (CId "_") arg | arg <- args] val
|
||||
|
||||
Tr. Exp ::= "(" CId [Exp] ")"
|
||||
Tr fun exps = DTr [] fun exps
|
||||
</PRE>
|
||||
<P>
|
||||
To store semantic (<CODE>def</CODE>) definitions by cases, the following expression
|
||||
form is provided, but it is only meaningful in the last field of a function
|
||||
declaration in an abstract syntax:
|
||||
</P>
|
||||
<PRE>
|
||||
EEq. Exp ::= "{" [Equation] "}" ;
|
||||
Equ. Equation ::= [Exp] "->" Exp ;
|
||||
</PRE>
|
||||
<P>
|
||||
Notice that expressions are used to encode patterns. Primitive notions
|
||||
(the default semantics in GF) are encoded as empty sets of equations
|
||||
(<CODE>[]</CODE>). For a constructor (canonical form) of a category <CODE>C</CODE>, we
|
||||
aim to use the encoding as the application <CODE>(_constr C)</CODE>.
|
||||
</P>
|
||||
<H3>Concrete syntax</H3>
|
||||
<P>
|
||||
Linearization terms (<CODE>Term</CODE>) are built as follows.
|
||||
Constructor names are shown to make the later code
|
||||
examples readable.
|
||||
</P>
|
||||
<PRE>
|
||||
R. Term ::= "[" [Term] "]" ; -- array (record/table)
|
||||
P. Term ::= "(" Term "!" Term ")" ; -- access to field (projection/selection)
|
||||
S. Term ::= "(" [Term] ")" ; -- concatenated sequence
|
||||
K. Term ::= Tokn ; -- token
|
||||
V. Term ::= "$" Integer ; -- argument (subtree)
|
||||
C. Term ::= Integer ; -- array index (label/parameter value)
|
||||
FV. Term ::= "[|" [Term] "|]" ; -- free variation
|
||||
TM. Term ::= "?" ; -- linearization of metavariable
|
||||
</PRE>
|
||||
<P>
|
||||
Tokens are strings or (maybe obsolescent) prefix-dependent
|
||||
variant lists.
|
||||
</P>
|
||||
<PRE>
|
||||
KS. Tokn ::= String ;
|
||||
KP. Tokn ::= "[" "pre" [String] "[" [Variant] "]" "]" ;
|
||||
Var. Variant ::= [String] "/" [String] ;
|
||||
</PRE>
|
||||
<P>
|
||||
Two special forms of terms are introduced by the compiler
|
||||
as optimizations. They can in principle be eliminated, but
|
||||
their presence makes grammars much more compact. Their semantics
|
||||
will be explained in a later section.
|
||||
</P>
|
||||
<PRE>
|
||||
F. Term ::= CId ; -- global constant
|
||||
W. Term ::= "(" String "+" Term ")" ; -- prefix + suffix table
|
||||
</PRE>
|
||||
<P>
|
||||
There is also a deprecated form of "record parameter alias",
|
||||
</P>
|
||||
<PRE>
|
||||
RP. Term ::= "(" Term "@" Term ")"; -- DEPRECATED
|
||||
</PRE>
|
||||
<P>
|
||||
which will be removed when the migration to new GFCC is complete.
|
||||
</P>
|
||||
<H2>The semantics of concrete syntax terms</H2>
|
||||
<P>
|
||||
The code in this section is from <A HREF="../Linearize.hs"><CODE>GF/GFCC/Linearize.hs</CODE></A>.
|
||||
</P>
|
||||
<H3>Linearization and realization</H3>
|
||||
<P>
|
||||
The linearization algorithm is essentially the same as in
|
||||
GFC: a tree is linearized by evaluating its linearization term
|
||||
in the environment of the linearizations of the subtrees.
|
||||
Literal atoms are linearized in the obvious way.
|
||||
The function also needs to know the language (i.e. concrete syntax)
|
||||
in which linearization is performed.
|
||||
</P>
|
||||
<PRE>
|
||||
linExp :: GFCC -> CId -> Exp -> Term
|
||||
linExp gfcc lang tree@(DTr _ at trees) = case at of
|
||||
AC fun -> comp (Prelude.map lin trees) $ look fun
|
||||
AS s -> R [kks (show s)] -- quoted
|
||||
AI i -> R [kks (show i)]
|
||||
AF d -> R [kks (show d)]
|
||||
AM -> TM
|
||||
where
|
||||
lin = linExp gfcc lang
|
||||
comp = compute gfcc lang
|
||||
look = lookLin gfcc lang
|
||||
</PRE>
|
||||
<P>
|
||||
TODO: bindings must be supported.
|
||||
</P>
|
||||
<P>
|
||||
The result of linearization is usually a record, which is realized as
|
||||
a string using the following algorithm.
|
||||
</P>
|
||||
<PRE>
|
||||
realize :: Term -> String
|
||||
realize trm = case trm of
|
||||
R (t:_) -> realize t
|
||||
S ss -> unwords $ Prelude.map realize ss
|
||||
K (KS s) -> s
|
||||
K (KP s _) -> unwords s ---- prefix choice TODO
|
||||
W s t -> s ++ realize t
|
||||
FV (t:_) -> realize t
|
||||
TM -> "?"
|
||||
</PRE>
|
||||
<P>
|
||||
Notice that realization always picks the first field of a record.
|
||||
If a linearization type has more than one field, the first field
|
||||
does not necessarily contain the desired string.
|
||||
Also notice that the order of record fields in GFCC is not necessarily
|
||||
the same as in GF source.
|
||||
</P>
|
||||
<H3>Term evaluation</H3>
|
||||
<P>
|
||||
Evaluation follows call-by-value order, with two environments
|
||||
needed:
|
||||
</P>
|
||||
<UL>
|
||||
<LI>the grammar (a concrete syntax) to give the global constants
|
||||
<LI>an array of terms to give the subtree linearizations
|
||||
</UL>
|
||||
|
||||
<P>
|
||||
The code is presented in one-level pattern matching, to
|
||||
enable reimplementations in languages that do not permit
|
||||
deep patterns (such as Java and C++).
|
||||
</P>
|
||||
<PRE>
|
||||
compute :: GFCC -> CId -> [Term] -> Term -> Term
|
||||
compute gfcc lang args = comp where
|
||||
comp trm = case trm of
|
||||
P r p -> proj (comp r) (comp p)
|
||||
W s t -> W s (comp t)
|
||||
R ts -> R $ Prelude.map comp ts
|
||||
V i -> idx args (fromInteger i) -- already computed
|
||||
F c -> comp $ look c -- not computed (if contains V)
|
||||
FV ts -> FV $ Prelude.map comp ts
|
||||
S ts -> S $ Prelude.filter (/= S []) $ Prelude.map comp ts
|
||||
_ -> trm
|
||||
|
||||
look = lookOper gfcc lang
|
||||
|
||||
idx xs i = xs !! i
|
||||
|
||||
proj r p = case (r,p) of
|
||||
(_, FV ts) -> FV $ Prelude.map (proj r) ts
|
||||
(W s t, _) -> kks (s ++ getString (proj t p))
|
||||
_ -> comp $ getField r (getIndex p)
|
||||
|
||||
getString t = case t of
|
||||
K (KS s) -> s
|
||||
_ -> trace ("ERROR in grammar compiler: string from "++ show t) "ERR"
|
||||
|
||||
getIndex t = case t of
|
||||
C i -> fromInteger i
|
||||
RP p _ -> getIndex p
|
||||
TM -> 0 -- default value for parameter
|
||||
_ -> trace ("ERROR in grammar compiler: index from " ++ show t) 0
|
||||
|
||||
getField t i = case t of
|
||||
R rs -> idx rs i
|
||||
RP _ r -> getField r i
|
||||
TM -> TM
|
||||
_ -> trace ("ERROR in grammar compiler: field from " ++ show t) t
|
||||
</PRE>
|
||||
<P></P>
|
||||
<H3>The special term constructors</H3>
|
||||
<P>
|
||||
The three forms introduced by the compiler may a need special
|
||||
explanation.
|
||||
</P>
|
||||
<P>
|
||||
Global constants
|
||||
</P>
|
||||
<PRE>
|
||||
Term ::= CId ;
|
||||
</PRE>
|
||||
<P>
|
||||
are shorthands for complex terms. They are produced by the
|
||||
compiler by (iterated) <B>common subexpression elimination</B>.
|
||||
They are often more powerful than hand-devised code sharing in the source
|
||||
code. They could be computed off-line by replacing each identifier by
|
||||
its definition.
|
||||
</P>
|
||||
<P>
|
||||
<B>Prefix-suffix tables</B>
|
||||
</P>
|
||||
<PRE>
|
||||
Term ::= "(" String "+" Term ")" ;
|
||||
</PRE>
|
||||
<P>
|
||||
represent tables of word forms divided to the longest common prefix
|
||||
and its array of suffixes. In the example grammar above, we have
|
||||
</P>
|
||||
<PRE>
|
||||
Sleep = [("sleep" + ["s",""])]
|
||||
</PRE>
|
||||
<P>
|
||||
which in fact is equal to the array of full forms
|
||||
</P>
|
||||
<PRE>
|
||||
["sleeps", "sleep"]
|
||||
</PRE>
|
||||
<P>
|
||||
The power of this construction comes from the fact that suffix sets
|
||||
tend to be repeated in a language, and can therefore be collected
|
||||
by common subexpression elimination. It is this technique that
|
||||
explains the used syntax rather than the more accurate
|
||||
</P>
|
||||
<PRE>
|
||||
"(" String "+" [String] ")"
|
||||
</PRE>
|
||||
<P>
|
||||
since we want the suffix part to be a <CODE>Term</CODE> for the optimization to
|
||||
take effect.
|
||||
</P>
|
||||
<H2>Compiling to GFCC</H2>
|
||||
<P>
|
||||
Compilation to GFCC is performed by the GF grammar compiler, and
|
||||
GFCC interpreters need not know what it does. For grammar writers,
|
||||
however, it might be interesting to know what happens to the grammars
|
||||
in the process.
|
||||
</P>
|
||||
<P>
|
||||
The compilation phases are the following
|
||||
</P>
|
||||
<OL>
|
||||
<LI>type check and partially evaluate GF source
|
||||
<LI>create a symbol table mapping the GF parameter and record types to
|
||||
fixed-size arrays, and parameter values and record labels to integers
|
||||
<LI>traverse the linearization rules replacing parameters and labels by integers
|
||||
<LI>reorganize the created GF grammar so that it has just one abstract syntax
|
||||
and one concrete syntax per language
|
||||
<LI>TODO: apply UTF8 encoding to the grammar, if not yet applied (this is told by the
|
||||
<CODE>coding</CODE> flag)
|
||||
<LI>translate the GF grammar object to a GFCC grammar object, using a simple
|
||||
compositional mapping
|
||||
<LI>perform the word-suffix optimization on GFCC linearization terms
|
||||
<LI>perform subexpression elimination on each concrete syntax module
|
||||
<LI>print out the GFCC code
|
||||
</OL>
|
||||
|
||||
<H3>Problems in GFCC compilation</H3>
|
||||
<P>
|
||||
Two major problems had to be solved in compiling GF to GFCC:
|
||||
</P>
|
||||
<UL>
|
||||
<LI>consistent order of tables and records, to permit the array translation
|
||||
<LI>run-time variables in complex parameter values.
|
||||
</UL>
|
||||
|
||||
<P>
|
||||
The current implementation is still experimental and may fail
|
||||
to generate correct code. Any errors remaining are likely to be
|
||||
related to the two problems just mentioned.
|
||||
</P>
|
||||
<P>
|
||||
The order problem is solved in slightly different ways for tables and records.
|
||||
In both cases, <B>eta expansion</B> is used to establish a
|
||||
canonical order. Tables are ordered by applying the preorder induced
|
||||
by <CODE>param</CODE> definitions. Records are ordered by sorting them by labels.
|
||||
This means that
|
||||
e.g. the <CODE>s</CODE> field will in general no longer appear as the first
|
||||
field, even if it does so in the GF source code. But relying on the
|
||||
order of fields in a labelled record would be misplaced anyway.
|
||||
</P>
|
||||
<P>
|
||||
The canonical form of records is further complicated by lock fields,
|
||||
i.e. dummy fields of form <CODE>lock_C = <></CODE>, which are added to grammar
|
||||
libraries to force intensionality of linearization types. The problem
|
||||
is that the absence of a lock field only generates a warning, not
|
||||
an error. Therefore a GF grammar can contain objects of the same
|
||||
type with and without a lock field. This problem was solved in GFCC
|
||||
generation by just removing all lock fields (defined as fields whose
|
||||
type is the empty record type). This has the further advantage of
|
||||
(slightly) reducing the grammar size. More importantly, it is safe
|
||||
to remove lock fields, because they are never used in computation,
|
||||
and because intensional types are only needed in grammars reused
|
||||
as libraries, not in grammars used at runtime.
|
||||
</P>
|
||||
<P>
|
||||
While the order problem is rather bureaucratic in nature, run-time
|
||||
variables are an interesting problem. They arise in the presence
|
||||
of complex parameter values, created by argument-taking constructors
|
||||
and parameter records. To give an example, consider the GF parameter
|
||||
type system
|
||||
</P>
|
||||
<PRE>
|
||||
Number = Sg | Pl ;
|
||||
Person = P1 | P2 | P3 ;
|
||||
Agr = Ag Number Person ;
|
||||
</PRE>
|
||||
<P>
|
||||
The values can be translated to integers in the expected way,
|
||||
</P>
|
||||
<PRE>
|
||||
Sg = 0, Pl = 1
|
||||
P1 = 0, P2 = 1, P3 = 2
|
||||
Ag Sg P1 = 0, Ag Sg P2 = 1, Ag Sg P3 = 2,
|
||||
Ag Pl P1 = 3, Ag Pl P2 = 4, Ag Pl P3 = 5
|
||||
</PRE>
|
||||
<P>
|
||||
However, an argument of <CODE>Agr</CODE> can be a run-time variable, as in
|
||||
</P>
|
||||
<PRE>
|
||||
Ag np.n P3
|
||||
</PRE>
|
||||
<P>
|
||||
This expression must first be translated to a case expression,
|
||||
</P>
|
||||
<PRE>
|
||||
case np.n of {
|
||||
0 => 2 ;
|
||||
1 => 5
|
||||
}
|
||||
</PRE>
|
||||
<P>
|
||||
which can then be translated to the GFCC term
|
||||
</P>
|
||||
<PRE>
|
||||
([2,5] ! ($0 ! $1))
|
||||
</PRE>
|
||||
<P>
|
||||
assuming that the variable <CODE>np</CODE> is the first argument and that its
|
||||
<CODE>Number</CODE> field is the second in the record.
|
||||
</P>
|
||||
<P>
|
||||
This transformation of course has to be performed recursively, since
|
||||
there can be several run-time variables in a parameter value:
|
||||
</P>
|
||||
<PRE>
|
||||
Ag np.n np.p
|
||||
</PRE>
|
||||
<P>
|
||||
A similar transformation would be possible to deal with the double
|
||||
role of parameter records discussed above. Thus the type
|
||||
</P>
|
||||
<PRE>
|
||||
RNP = {n : Number ; p : Person}
|
||||
</PRE>
|
||||
<P>
|
||||
could be uniformly translated into the set <CODE>{0,1,2,3,4,5}</CODE>
|
||||
as <CODE>Agr</CODE> above. Selections would be simple instances of indexing.
|
||||
But any projection from the record should be translated into
|
||||
a case expression,
|
||||
</P>
|
||||
<PRE>
|
||||
rnp.n ===>
|
||||
case rnp of {
|
||||
0 => 0 ;
|
||||
1 => 0 ;
|
||||
2 => 0 ;
|
||||
3 => 1 ;
|
||||
4 => 1 ;
|
||||
5 => 1
|
||||
}
|
||||
</PRE>
|
||||
<P>
|
||||
To avoid the code bloat resulting from this, we have chosen to
|
||||
deal with records by a <B>currying</B> transformation:
|
||||
</P>
|
||||
<PRE>
|
||||
table {n : Number ; p : Person} {... ...}
|
||||
===>
|
||||
table Number {Sg => table Person {...} ; table Person {...}}
|
||||
</PRE>
|
||||
<P>
|
||||
This is performed when GFCC is generated. Selections with
|
||||
records have to be treated likewise,
|
||||
</P>
|
||||
<PRE>
|
||||
t ! r ===> t ! r.n ! r.p
|
||||
</PRE>
|
||||
<P></P>
|
||||
<H3>The representation of linearization types</H3>
|
||||
<P>
|
||||
Linearization types (<CODE>lincat</CODE>) are not needed when generating with
|
||||
GFCC, but they have been added to enable parser generation directly from
|
||||
GFCC. The linearization type definitions are shown as a part of the
|
||||
concrete syntax, by using terms to represent types. Here is the table
|
||||
showing how different linearization types are encoded.
|
||||
</P>
|
||||
<PRE>
|
||||
P* = max(P) -- parameter type
|
||||
{r1 : T1 ; ... ; rn : Tn}* = [T1*,...,Tn*] -- record
|
||||
(P => T)* = [T* ,...,T*] -- table, size(P) cases
|
||||
Str* = ()
|
||||
</PRE>
|
||||
<P>
|
||||
For example, the linearization type <CODE>present/CatEng.NP</CODE> is
|
||||
translated as follows:
|
||||
</P>
|
||||
<PRE>
|
||||
NP = {
|
||||
a : { -- 6 = 2*3 values
|
||||
n : {ParamX.Number} ; -- 2 values
|
||||
p : {ParamX.Person} -- 3 values
|
||||
} ;
|
||||
s : {ResEng.Case} => Str -- 3 values
|
||||
}
|
||||
|
||||
__NP = [[1,2],[(),(),()]]
|
||||
</PRE>
|
||||
<P></P>
|
||||
<H3>Running the compiler and the GFCC interpreter</H3>
|
||||
<P>
|
||||
GFCC generation is a part of the
|
||||
<A HREF="http://www.cs.chalmers.se/Cs/Research/Language-technology/darcs/GF/doc/darcs.html">developers' version</A>
|
||||
of GF since September 2006. To invoke the compiler, the flag
|
||||
<CODE>-printer=gfcc</CODE> to the command
|
||||
<CODE>pm = print_multi</CODE> is used. It is wise to recompile the grammar from
|
||||
source, since previously compiled libraries may not obey the canonical
|
||||
order of records.
|
||||
Here is an example, performed in
|
||||
<A HREF="../../../../../examples/bronzeage">example/bronzeage</A>.
|
||||
</P>
|
||||
<PRE>
|
||||
i -src -path=.:prelude:resource-1.0/* -optimize=all_subs BronzeageEng.gf
|
||||
i -src -path=.:prelude:resource-1.0/* -optimize=all_subs BronzeageGer.gf
|
||||
strip
|
||||
pm -printer=gfcc | wf bronze.gfcc
|
||||
</PRE>
|
||||
<P>
|
||||
There is also an experimental batch compiler, which does not use the GFC
|
||||
format or the record aliases. It can be produced by
|
||||
</P>
|
||||
<PRE>
|
||||
make gfc
|
||||
</PRE>
|
||||
<P>
|
||||
in <CODE>GF/src</CODE>, and invoked by
|
||||
</P>
|
||||
<PRE>
|
||||
gfc --make FILES
|
||||
</PRE>
|
||||
<P></P>
|
||||
<H2>The reference interpreter</H2>
|
||||
<P>
|
||||
The reference interpreter written in Haskell consists of the following files:
|
||||
</P>
|
||||
<PRE>
|
||||
-- source file for BNFC
|
||||
GFCC.cf -- labelled BNF grammar of gfcc
|
||||
|
||||
-- files generated by BNFC
|
||||
AbsGFCC.hs -- abstrac syntax datatypes
|
||||
ErrM.hs -- error monad used internally
|
||||
LexGFCC.hs -- lexer of gfcc files
|
||||
ParGFCC.hs -- parser of gfcc files and syntax trees
|
||||
PrintGFCC.hs -- printer of gfcc files and syntax trees
|
||||
|
||||
-- hand-written files
|
||||
DataGFCC.hs -- grammar datatype, post-parser grammar creation
|
||||
Linearize.hs -- linearization and evaluation
|
||||
Macros.hs -- utilities abstracting away from GFCC datatypes
|
||||
Generate.hs -- random and exhaustive generation, generate-and-test parsing
|
||||
API.hs -- functionalities accessible in embedded GF applications
|
||||
Generate.hs -- random and exhaustive generation
|
||||
Shell.hs -- main function - a simple command interpreter
|
||||
</PRE>
|
||||
<P>
|
||||
It is included in the
|
||||
<A HREF="http://www.cs.chalmers.se/Cs/Research/Language-technology/darcs/GF/doc/darcs.html">developers' version</A>
|
||||
of GF, in the subdirectories <A HREF="../"><CODE>GF/src/GF/GFCC</CODE></A> and
|
||||
<A HREF="../../Devel"><CODE>GF/src/GF/Devel</CODE></A>.
|
||||
</P>
|
||||
<P>
|
||||
As of September 2007, default parsing in main GF uses GFCC (implemented by Krasimir
|
||||
Angelov). The interpreter uses the relevant modules
|
||||
</P>
|
||||
<PRE>
|
||||
GF/Conversions/SimpleToFCFG.hs -- generate parser from GFCC
|
||||
GF/Parsing/FCFG.hs -- run the parser
|
||||
</PRE>
|
||||
<P></P>
|
||||
<P>
|
||||
To compile the interpreter, type
|
||||
</P>
|
||||
<PRE>
|
||||
make gfcc
|
||||
</PRE>
|
||||
<P>
|
||||
in <CODE>GF/src</CODE>. To run it, type
|
||||
</P>
|
||||
<PRE>
|
||||
./gfcc <GFCC-file>
|
||||
</PRE>
|
||||
<P>
|
||||
The available commands are
|
||||
</P>
|
||||
<UL>
|
||||
<LI><CODE>gr <Cat> <Int></CODE>: generate a number of random trees in category.
|
||||
and show their linearizations in all languages
|
||||
<LI><CODE>grt <Cat> <Int></CODE>: generate a number of random trees in category.
|
||||
and show the trees and their linearizations in all languages
|
||||
<LI><CODE>gt <Cat> <Int></CODE>: generate a number of trees in category from smallest,
|
||||
and show their linearizations in all languages
|
||||
<LI><CODE>gtt <Cat> <Int></CODE>: generate a number of trees in category from smallest,
|
||||
and show the trees and their linearizations in all languages
|
||||
<LI><CODE>p <Lang> <Cat> <String></CODE>: parse a string into a set of trees
|
||||
<LI><CODE>lin <Tree></CODE>: linearize tree in all languages, also showing full records
|
||||
<LI><CODE>q</CODE>: terminate the system cleanly
|
||||
</UL>
|
||||
|
||||
<H2>Embedded formats</H2>
|
||||
<UL>
|
||||
<LI>JavaScript: compiler of linearization and abstract syntax
|
||||
<P></P>
|
||||
<LI>Haskell: compiler of abstract syntax and interpreter with parsing,
|
||||
linearization, and generation
|
||||
<P></P>
|
||||
<LI>C: compiler of linearization (old GFCC)
|
||||
<P></P>
|
||||
<LI>C++: embedded interpreter supporting linearization (old GFCC)
|
||||
</UL>
|
||||
|
||||
<H2>Some things to do</H2>
|
||||
<P>
|
||||
Support for dependent types, higher-order abstract syntax, and
|
||||
semantic definition in GFCC generation and interpreters.
|
||||
</P>
|
||||
<P>
|
||||
Replacing the entire GF shell by one based on GFCC.
|
||||
</P>
|
||||
<P>
|
||||
Interpreter in Java.
|
||||
</P>
|
||||
<P>
|
||||
Hand-written parsers for GFCC grammars to reduce code size
|
||||
(and efficiency?) of interpreters.
|
||||
</P>
|
||||
<P>
|
||||
Binary format and/or file compression of GFCC output.
|
||||
</P>
|
||||
<P>
|
||||
Syntax editor based on GFCC.
|
||||
</P>
|
||||
<P>
|
||||
Rewriting of resource libraries in order to exploit the
|
||||
word-suffix sharing better (depth-one tables, as in FM).
|
||||
</P>
|
||||
|
||||
<!-- html code generated by txt2tags 2.3 (http://txt2tags.sf.net) -->
|
||||
<!-- cmdline: txt2tags -thtml gfcc.txt -->
|
||||
</BODY></HTML>
|
||||
712
src/PGF/doc/gfcc.txt
Normal file
712
src/PGF/doc/gfcc.txt
Normal file
@@ -0,0 +1,712 @@
|
||||
The GFCC Grammar Format
|
||||
Aarne Ranta
|
||||
December 14, 2007
|
||||
|
||||
Author's address:
|
||||
[``http://www.cs.chalmers.se/~aarne`` http://www.cs.chalmers.se/~aarne]
|
||||
|
||||
% to compile: txt2tags -thtml --toc gfcc.txt
|
||||
|
||||
History:
|
||||
- 14 Dec 2007: simpler, Lisp-like concrete syntax of GFCC
|
||||
- 5 Oct 2007: new, better structured GFCC with full expressive power
|
||||
- 19 Oct: translation of lincats, new figures on C++
|
||||
- 3 Oct 2006: first version
|
||||
|
||||
|
||||
==What is GFCC==
|
||||
|
||||
GFCC is a low-level format for GF grammars. Its aim is to contain the minimum
|
||||
that is needed to process GF grammars at runtime. This minimality has three
|
||||
advantages:
|
||||
- compact grammar files and run-time objects
|
||||
- time and space efficient processing
|
||||
- simple definition of interpreters
|
||||
|
||||
|
||||
Thus we also want to call GFCC the **portable grammar format**.
|
||||
|
||||
The idea is that all embedded GF applications use GFCC.
|
||||
The GF system would be primarily used as a compiler and as a grammar
|
||||
development tool.
|
||||
|
||||
Since GFCC is implemented in BNFC, a parser of the format is readily
|
||||
available for C, C++, C#, Haskell, Java, and OCaml. Also an XML
|
||||
representation can be generated in BNFC. A
|
||||
[reference implementation ../]
|
||||
of linearization and some other functions has been written in Haskell.
|
||||
|
||||
|
||||
==GFCC vs. GFC==
|
||||
|
||||
GFCC is aimed to replace GFC as the run-time grammar format. GFC was designed
|
||||
to be a run-time format, but also to
|
||||
support separate compilation of grammars, i.e.
|
||||
to store the results of compiling
|
||||
individual GF modules. But this means that GFC has to contain extra information,
|
||||
such as type annotations, which is only needed in compilation and not at
|
||||
run-time. In particular, the pattern matching syntax and semantics of GFC is
|
||||
complex and therefore difficult to implement in new platforms.
|
||||
|
||||
Actually, GFC is planned to be omitted also as the target format of
|
||||
separate compilation, where plain GF (type annotated and partially evaluated)
|
||||
will be used instead. GFC provides only marginal advantages as a target format
|
||||
compared with GF, and it is therefore just extra weight to carry around this
|
||||
format.
|
||||
|
||||
The main differences of GFCC compared with GFC (and GF) can be
|
||||
summarized as follows:
|
||||
- there are no modules, and therefore no qualified names
|
||||
- a GFCC grammar is multilingual, and consists of a common abstract syntax
|
||||
together with one concrete syntax per language
|
||||
- records and tables are replaced by arrays
|
||||
- record labels and parameter values are replaced by integers
|
||||
- record projection and table selection are replaced by array indexing
|
||||
- even though the format does support dependent types and higher-order abstract
|
||||
syntax, there is no interpreted yet that does this
|
||||
|
||||
|
||||
|
||||
Here is an example of a GF grammar, consisting of three modules,
|
||||
as translated to GFCC. The representations are aligned;
|
||||
thus they do not completely
|
||||
reflect the order of judgements in GFCC files, which have different orders of
|
||||
blocks of judgements, and alphabetical sorting.
|
||||
```
|
||||
grammar Ex(Eng,Swe);
|
||||
|
||||
abstract Ex = { abstract {
|
||||
cat cat
|
||||
S ; NP ; VP ; NP[]; S[]; VP[];
|
||||
fun fun
|
||||
Pred : NP -> VP -> S ; Pred=[(($ 0! 1),(($ 1! 0)!($ 0! 0)))];
|
||||
She, They : NP ; She=[0,"she"];
|
||||
Sleep : VP ; They=[1,"they"];
|
||||
Sleep=[["sleeps","sleep"]];
|
||||
} } ;
|
||||
|
||||
concrete Eng of Ex = { concrete Eng {
|
||||
lincat lincat
|
||||
S = {s : Str} ; S=[()];
|
||||
NP = {s : Str ; n : Num} ; NP=[1,()];
|
||||
VP = {s : Num => Str} ; VP=[[(),()]];
|
||||
param
|
||||
Num = Sg | Pl ;
|
||||
lin lin
|
||||
Pred np vp = { Pred=[(($ 0! 1),(($ 1! 0)!($ 0! 0)))];
|
||||
s = np.s ++ vp.s ! np.n} ;
|
||||
She = {s = "she" ; n = Sg} ; She=[0,"she"];
|
||||
They = {s = "they" ; n = Pl} ; They = [1, "they"];
|
||||
Sleep = {s = table { Sleep=[["sleeps","sleep"]];
|
||||
Sg => "sleeps" ;
|
||||
Pl => "sleep"
|
||||
}
|
||||
} ;
|
||||
} } ;
|
||||
|
||||
concrete Swe of Ex = { concrete Swe {
|
||||
lincat lincat
|
||||
S = {s : Str} ; S=[()];
|
||||
NP = {s : Str} ; NP=[()];
|
||||
VP = {s : Str} ; VP=[()];
|
||||
param
|
||||
Num = Sg | Pl ;
|
||||
lin lin
|
||||
Pred np vp = { Pred = [(($0!0),($1!0))];
|
||||
s = np.s ++ vp.s} ;
|
||||
She = {s = "hon"} ; She = ["hon"];
|
||||
They = {s = "de"} ; They = ["de"];
|
||||
Sleep = {s = "sover"} ; Sleep = ["sover"];
|
||||
} } ;
|
||||
```
|
||||
|
||||
==The syntax of GFCC files==
|
||||
|
||||
The complete BNFC grammar, from which
|
||||
the rules in this section are taken, is in the file
|
||||
[``GF/GFCC/GFCC.cf`` ../DataGFCC.cf].
|
||||
|
||||
|
||||
===Top level===
|
||||
|
||||
A grammar has a header telling the name of the abstract syntax
|
||||
(often specifying an application domain), and the names of
|
||||
the concrete languages. The abstract syntax and the concrete
|
||||
syntaxes themselves follow.
|
||||
```
|
||||
Grm. Grammar ::=
|
||||
"grammar" CId "(" [CId] ")" ";"
|
||||
Abstract ";"
|
||||
[Concrete] ;
|
||||
|
||||
Abs. Abstract ::=
|
||||
"abstract" "{"
|
||||
"flags" [Flag]
|
||||
"fun" [FunDef]
|
||||
"cat" [CatDef]
|
||||
"}" ;
|
||||
|
||||
Cnc. Concrete ::=
|
||||
"concrete" CId "{"
|
||||
"flags" [Flag]
|
||||
"lin" [LinDef]
|
||||
"oper" [LinDef]
|
||||
"lincat" [LinDef]
|
||||
"lindef" [LinDef]
|
||||
"printname" [LinDef]
|
||||
"}" ;
|
||||
```
|
||||
This syntax organizes each module to a sequence of **fields**, such
|
||||
as flags, linearizations, operations, linearization types, etc.
|
||||
It is envisaged that particular applications can ignore some
|
||||
of the fields, typically so that earlier fields are more
|
||||
important than later ones.
|
||||
|
||||
The judgement forms have the following syntax.
|
||||
```
|
||||
Flg. Flag ::= CId "=" String ;
|
||||
Cat. CatDef ::= CId "[" [Hypo] "]" ;
|
||||
Fun. FunDef ::= CId ":" Type "=" Exp ;
|
||||
Lin. LinDef ::= CId "=" Term ;
|
||||
```
|
||||
For the run-time system, the reference implementation in Haskell
|
||||
uses a structure that gives efficient look-up:
|
||||
```
|
||||
data GFCC = GFCC {
|
||||
absname :: CId ,
|
||||
cncnames :: [CId] ,
|
||||
abstract :: Abstr ,
|
||||
concretes :: Map CId Concr
|
||||
}
|
||||
|
||||
data Abstr = Abstr {
|
||||
aflags :: Map CId String, -- value of a flag
|
||||
funs :: Map CId (Type,Exp), -- type and def of a fun
|
||||
cats :: Map CId [Hypo], -- context of a cat
|
||||
catfuns :: Map CId [CId] -- funs yielding a cat (redundant, for fast lookup)
|
||||
}
|
||||
|
||||
data Concr = Concr {
|
||||
flags :: Map CId String, -- value of a flag
|
||||
lins :: Map CId Term, -- lin of a fun
|
||||
opers :: Map CId Term, -- oper generated by subex elim
|
||||
lincats :: Map CId Term, -- lin type of a cat
|
||||
lindefs :: Map CId Term, -- lin default of a cat
|
||||
printnames :: Map CId Term -- printname of a cat or a fun
|
||||
}
|
||||
```
|
||||
These definitions are from [``GF/GFCC/DataGFCC.hs`` ../DataGFCC.hs].
|
||||
|
||||
Identifiers (``CId``) are like ``Ident`` in GF, except that
|
||||
the compiler produces constants prefixed with ``_`` in
|
||||
the common subterm elimination optimization.
|
||||
```
|
||||
token CId (('_' | letter) (letter | digit | '\'' | '_')*) ;
|
||||
```
|
||||
|
||||
|
||||
===Abstract syntax===
|
||||
|
||||
Types are first-order function types built from argument type
|
||||
contexts and value types.
|
||||
category symbols. Syntax trees (``Exp``) are
|
||||
rose trees with nodes consisting of a head (``Atom``) and
|
||||
bound variables (``CId``).
|
||||
```
|
||||
DTyp. Type ::= "[" [Hypo] "]" CId [Exp] ;
|
||||
DTr. Exp ::= "[" "(" [CId] ")" Atom [Exp] "]" ;
|
||||
Hyp. Hypo ::= CId ":" Type ;
|
||||
```
|
||||
The head Atom is either a function
|
||||
constant, a bound variable, or a metavariable, or a string, integer, or float
|
||||
literal.
|
||||
```
|
||||
AC. Atom ::= CId ;
|
||||
AS. Atom ::= String ;
|
||||
AI. Atom ::= Integer ;
|
||||
AF. Atom ::= Double ;
|
||||
AM. Atom ::= "?" Integer ;
|
||||
```
|
||||
The context-free types and trees of the "old GFCC" are special
|
||||
cases, which can be defined as follows:
|
||||
```
|
||||
Typ. Type ::= [CId] "->" CId
|
||||
Typ args val = DTyp [Hyp (CId "_") arg | arg <- args] val
|
||||
|
||||
Tr. Exp ::= "(" CId [Exp] ")"
|
||||
Tr fun exps = DTr [] fun exps
|
||||
```
|
||||
To store semantic (``def``) definitions by cases, the following expression
|
||||
form is provided, but it is only meaningful in the last field of a function
|
||||
declaration in an abstract syntax:
|
||||
```
|
||||
EEq. Exp ::= "{" [Equation] "}" ;
|
||||
Equ. Equation ::= [Exp] "->" Exp ;
|
||||
```
|
||||
Notice that expressions are used to encode patterns. Primitive notions
|
||||
(the default semantics in GF) are encoded as empty sets of equations
|
||||
(``[]``). For a constructor (canonical form) of a category ``C``, we
|
||||
aim to use the encoding as the application ``(_constr C)``.
|
||||
|
||||
|
||||
|
||||
===Concrete syntax===
|
||||
|
||||
Linearization terms (``Term``) are built as follows.
|
||||
Constructor names are shown to make the later code
|
||||
examples readable.
|
||||
```
|
||||
R. Term ::= "[" [Term] "]" ; -- array (record/table)
|
||||
P. Term ::= "(" Term "!" Term ")" ; -- access to field (projection/selection)
|
||||
S. Term ::= "(" [Term] ")" ; -- concatenated sequence
|
||||
K. Term ::= Tokn ; -- token
|
||||
V. Term ::= "$" Integer ; -- argument (subtree)
|
||||
C. Term ::= Integer ; -- array index (label/parameter value)
|
||||
FV. Term ::= "[|" [Term] "|]" ; -- free variation
|
||||
TM. Term ::= "?" ; -- linearization of metavariable
|
||||
```
|
||||
Tokens are strings or (maybe obsolescent) prefix-dependent
|
||||
variant lists.
|
||||
```
|
||||
KS. Tokn ::= String ;
|
||||
KP. Tokn ::= "[" "pre" [String] "[" [Variant] "]" "]" ;
|
||||
Var. Variant ::= [String] "/" [String] ;
|
||||
```
|
||||
Two special forms of terms are introduced by the compiler
|
||||
as optimizations. They can in principle be eliminated, but
|
||||
their presence makes grammars much more compact. Their semantics
|
||||
will be explained in a later section.
|
||||
```
|
||||
F. Term ::= CId ; -- global constant
|
||||
W. Term ::= "(" String "+" Term ")" ; -- prefix + suffix table
|
||||
```
|
||||
There is also a deprecated form of "record parameter alias",
|
||||
```
|
||||
RP. Term ::= "(" Term "@" Term ")"; -- DEPRECATED
|
||||
```
|
||||
which will be removed when the migration to new GFCC is complete.
|
||||
|
||||
|
||||
|
||||
==The semantics of concrete syntax terms==
|
||||
|
||||
The code in this section is from [``GF/GFCC/Linearize.hs`` ../Linearize.hs].
|
||||
|
||||
|
||||
===Linearization and realization===
|
||||
|
||||
The linearization algorithm is essentially the same as in
|
||||
GFC: a tree is linearized by evaluating its linearization term
|
||||
in the environment of the linearizations of the subtrees.
|
||||
Literal atoms are linearized in the obvious way.
|
||||
The function also needs to know the language (i.e. concrete syntax)
|
||||
in which linearization is performed.
|
||||
```
|
||||
linExp :: GFCC -> CId -> Exp -> Term
|
||||
linExp gfcc lang tree@(DTr _ at trees) = case at of
|
||||
AC fun -> comp (Prelude.map lin trees) $ look fun
|
||||
AS s -> R [kks (show s)] -- quoted
|
||||
AI i -> R [kks (show i)]
|
||||
AF d -> R [kks (show d)]
|
||||
AM -> TM
|
||||
where
|
||||
lin = linExp gfcc lang
|
||||
comp = compute gfcc lang
|
||||
look = lookLin gfcc lang
|
||||
```
|
||||
TODO: bindings must be supported.
|
||||
|
||||
The result of linearization is usually a record, which is realized as
|
||||
a string using the following algorithm.
|
||||
```
|
||||
realize :: Term -> String
|
||||
realize trm = case trm of
|
||||
R (t:_) -> realize t
|
||||
S ss -> unwords $ Prelude.map realize ss
|
||||
K (KS s) -> s
|
||||
K (KP s _) -> unwords s ---- prefix choice TODO
|
||||
W s t -> s ++ realize t
|
||||
FV (t:_) -> realize t
|
||||
TM -> "?"
|
||||
```
|
||||
Notice that realization always picks the first field of a record.
|
||||
If a linearization type has more than one field, the first field
|
||||
does not necessarily contain the desired string.
|
||||
Also notice that the order of record fields in GFCC is not necessarily
|
||||
the same as in GF source.
|
||||
|
||||
|
||||
===Term evaluation===
|
||||
|
||||
Evaluation follows call-by-value order, with two environments
|
||||
needed:
|
||||
- the grammar (a concrete syntax) to give the global constants
|
||||
- an array of terms to give the subtree linearizations
|
||||
|
||||
|
||||
The code is presented in one-level pattern matching, to
|
||||
enable reimplementations in languages that do not permit
|
||||
deep patterns (such as Java and C++).
|
||||
```
|
||||
compute :: GFCC -> CId -> [Term] -> Term -> Term
|
||||
compute gfcc lang args = comp where
|
||||
comp trm = case trm of
|
||||
P r p -> proj (comp r) (comp p)
|
||||
W s t -> W s (comp t)
|
||||
R ts -> R $ Prelude.map comp ts
|
||||
V i -> idx args (fromInteger i) -- already computed
|
||||
F c -> comp $ look c -- not computed (if contains V)
|
||||
FV ts -> FV $ Prelude.map comp ts
|
||||
S ts -> S $ Prelude.filter (/= S []) $ Prelude.map comp ts
|
||||
_ -> trm
|
||||
|
||||
look = lookOper gfcc lang
|
||||
|
||||
idx xs i = xs !! i
|
||||
|
||||
proj r p = case (r,p) of
|
||||
(_, FV ts) -> FV $ Prelude.map (proj r) ts
|
||||
(FV ts, _ ) -> FV $ Prelude.map (\t -> proj t p) ts
|
||||
(W s t, _) -> kks (s ++ getString (proj t p))
|
||||
_ -> comp $ getField r (getIndex p)
|
||||
|
||||
getString t = case t of
|
||||
K (KS s) -> s
|
||||
_ -> trace ("ERROR in grammar compiler: string from "++ show t) "ERR"
|
||||
|
||||
getIndex t = case t of
|
||||
C i -> fromInteger i
|
||||
RP p _ -> getIndex p
|
||||
TM -> 0 -- default value for parameter
|
||||
_ -> trace ("ERROR in grammar compiler: index from " ++ show t) 0
|
||||
|
||||
getField t i = case t of
|
||||
R rs -> idx rs i
|
||||
RP _ r -> getField r i
|
||||
TM -> TM
|
||||
_ -> trace ("ERROR in grammar compiler: field from " ++ show t) t
|
||||
```
|
||||
|
||||
===The special term constructors===
|
||||
|
||||
The three forms introduced by the compiler may a need special
|
||||
explanation.
|
||||
|
||||
Global constants
|
||||
```
|
||||
Term ::= CId ;
|
||||
```
|
||||
are shorthands for complex terms. They are produced by the
|
||||
compiler by (iterated) **common subexpression elimination**.
|
||||
They are often more powerful than hand-devised code sharing in the source
|
||||
code. They could be computed off-line by replacing each identifier by
|
||||
its definition.
|
||||
|
||||
**Prefix-suffix tables**
|
||||
```
|
||||
Term ::= "(" String "+" Term ")" ;
|
||||
```
|
||||
represent tables of word forms divided to the longest common prefix
|
||||
and its array of suffixes. In the example grammar above, we have
|
||||
```
|
||||
Sleep = [("sleep" + ["s",""])]
|
||||
```
|
||||
which in fact is equal to the array of full forms
|
||||
```
|
||||
["sleeps", "sleep"]
|
||||
```
|
||||
The power of this construction comes from the fact that suffix sets
|
||||
tend to be repeated in a language, and can therefore be collected
|
||||
by common subexpression elimination. It is this technique that
|
||||
explains the used syntax rather than the more accurate
|
||||
```
|
||||
"(" String "+" [String] ")"
|
||||
```
|
||||
since we want the suffix part to be a ``Term`` for the optimization to
|
||||
take effect.
|
||||
|
||||
|
||||
|
||||
==Compiling to GFCC==
|
||||
|
||||
Compilation to GFCC is performed by the GF grammar compiler, and
|
||||
GFCC interpreters need not know what it does. For grammar writers,
|
||||
however, it might be interesting to know what happens to the grammars
|
||||
in the process.
|
||||
|
||||
The compilation phases are the following
|
||||
+ type check and partially evaluate GF source
|
||||
+ create a symbol table mapping the GF parameter and record types to
|
||||
fixed-size arrays, and parameter values and record labels to integers
|
||||
+ traverse the linearization rules replacing parameters and labels by integers
|
||||
+ reorganize the created GF grammar so that it has just one abstract syntax
|
||||
and one concrete syntax per language
|
||||
+ TODO: apply UTF8 encoding to the grammar, if not yet applied (this is told by the
|
||||
``coding`` flag)
|
||||
+ translate the GF grammar object to a GFCC grammar object, using a simple
|
||||
compositional mapping
|
||||
+ perform the word-suffix optimization on GFCC linearization terms
|
||||
+ perform subexpression elimination on each concrete syntax module
|
||||
+ print out the GFCC code
|
||||
|
||||
|
||||
|
||||
|
||||
===Problems in GFCC compilation===
|
||||
|
||||
Two major problems had to be solved in compiling GF to GFCC:
|
||||
- consistent order of tables and records, to permit the array translation
|
||||
- run-time variables in complex parameter values.
|
||||
|
||||
|
||||
The current implementation is still experimental and may fail
|
||||
to generate correct code. Any errors remaining are likely to be
|
||||
related to the two problems just mentioned.
|
||||
|
||||
The order problem is solved in slightly different ways for tables and records.
|
||||
In both cases, **eta expansion** is used to establish a
|
||||
canonical order. Tables are ordered by applying the preorder induced
|
||||
by ``param`` definitions. Records are ordered by sorting them by labels.
|
||||
This means that
|
||||
e.g. the ``s`` field will in general no longer appear as the first
|
||||
field, even if it does so in the GF source code. But relying on the
|
||||
order of fields in a labelled record would be misplaced anyway.
|
||||
|
||||
The canonical form of records is further complicated by lock fields,
|
||||
i.e. dummy fields of form ``lock_C = <>``, which are added to grammar
|
||||
libraries to force intensionality of linearization types. The problem
|
||||
is that the absence of a lock field only generates a warning, not
|
||||
an error. Therefore a GF grammar can contain objects of the same
|
||||
type with and without a lock field. This problem was solved in GFCC
|
||||
generation by just removing all lock fields (defined as fields whose
|
||||
type is the empty record type). This has the further advantage of
|
||||
(slightly) reducing the grammar size. More importantly, it is safe
|
||||
to remove lock fields, because they are never used in computation,
|
||||
and because intensional types are only needed in grammars reused
|
||||
as libraries, not in grammars used at runtime.
|
||||
|
||||
While the order problem is rather bureaucratic in nature, run-time
|
||||
variables are an interesting problem. They arise in the presence
|
||||
of complex parameter values, created by argument-taking constructors
|
||||
and parameter records. To give an example, consider the GF parameter
|
||||
type system
|
||||
```
|
||||
Number = Sg | Pl ;
|
||||
Person = P1 | P2 | P3 ;
|
||||
Agr = Ag Number Person ;
|
||||
```
|
||||
The values can be translated to integers in the expected way,
|
||||
```
|
||||
Sg = 0, Pl = 1
|
||||
P1 = 0, P2 = 1, P3 = 2
|
||||
Ag Sg P1 = 0, Ag Sg P2 = 1, Ag Sg P3 = 2,
|
||||
Ag Pl P1 = 3, Ag Pl P2 = 4, Ag Pl P3 = 5
|
||||
```
|
||||
However, an argument of ``Agr`` can be a run-time variable, as in
|
||||
```
|
||||
Ag np.n P3
|
||||
```
|
||||
This expression must first be translated to a case expression,
|
||||
```
|
||||
case np.n of {
|
||||
0 => 2 ;
|
||||
1 => 5
|
||||
}
|
||||
```
|
||||
which can then be translated to the GFCC term
|
||||
```
|
||||
([2,5] ! ($0 ! $1))
|
||||
```
|
||||
assuming that the variable ``np`` is the first argument and that its
|
||||
``Number`` field is the second in the record.
|
||||
|
||||
This transformation of course has to be performed recursively, since
|
||||
there can be several run-time variables in a parameter value:
|
||||
```
|
||||
Ag np.n np.p
|
||||
```
|
||||
A similar transformation would be possible to deal with the double
|
||||
role of parameter records discussed above. Thus the type
|
||||
```
|
||||
RNP = {n : Number ; p : Person}
|
||||
```
|
||||
could be uniformly translated into the set ``{0,1,2,3,4,5}``
|
||||
as ``Agr`` above. Selections would be simple instances of indexing.
|
||||
But any projection from the record should be translated into
|
||||
a case expression,
|
||||
```
|
||||
rnp.n ===>
|
||||
case rnp of {
|
||||
0 => 0 ;
|
||||
1 => 0 ;
|
||||
2 => 0 ;
|
||||
3 => 1 ;
|
||||
4 => 1 ;
|
||||
5 => 1
|
||||
}
|
||||
```
|
||||
To avoid the code bloat resulting from this, we have chosen to
|
||||
deal with records by a **currying** transformation:
|
||||
```
|
||||
table {n : Number ; p : Person} {... ...}
|
||||
===>
|
||||
table Number {Sg => table Person {...} ; table Person {...}}
|
||||
```
|
||||
This is performed when GFCC is generated. Selections with
|
||||
records have to be treated likewise,
|
||||
```
|
||||
t ! r ===> t ! r.n ! r.p
|
||||
```
|
||||
|
||||
|
||||
===The representation of linearization types===
|
||||
|
||||
Linearization types (``lincat``) are not needed when generating with
|
||||
GFCC, but they have been added to enable parser generation directly from
|
||||
GFCC. The linearization type definitions are shown as a part of the
|
||||
concrete syntax, by using terms to represent types. Here is the table
|
||||
showing how different linearization types are encoded.
|
||||
```
|
||||
P* = max(P) -- parameter type
|
||||
{r1 : T1 ; ... ; rn : Tn}* = [T1*,...,Tn*] -- record
|
||||
(P => T)* = [T* ,...,T*] -- table, size(P) cases
|
||||
Str* = ()
|
||||
```
|
||||
For example, the linearization type ``present/CatEng.NP`` is
|
||||
translated as follows:
|
||||
```
|
||||
NP = {
|
||||
a : { -- 6 = 2*3 values
|
||||
n : {ParamX.Number} ; -- 2 values
|
||||
p : {ParamX.Person} -- 3 values
|
||||
} ;
|
||||
s : {ResEng.Case} => Str -- 3 values
|
||||
}
|
||||
|
||||
__NP = [[1,2],[(),(),()]]
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
===Running the compiler and the GFCC interpreter===
|
||||
|
||||
GFCC generation is a part of the
|
||||
[developers' version http://www.cs.chalmers.se/Cs/Research/Language-technology/darcs/GF/doc/darcs.html]
|
||||
of GF since September 2006. To invoke the compiler, the flag
|
||||
``-printer=gfcc`` to the command
|
||||
``pm = print_multi`` is used. It is wise to recompile the grammar from
|
||||
source, since previously compiled libraries may not obey the canonical
|
||||
order of records.
|
||||
Here is an example, performed in
|
||||
[example/bronzeage ../../../../../examples/bronzeage].
|
||||
```
|
||||
i -src -path=.:prelude:resource-1.0/* -optimize=all_subs BronzeageEng.gf
|
||||
i -src -path=.:prelude:resource-1.0/* -optimize=all_subs BronzeageGer.gf
|
||||
strip
|
||||
pm -printer=gfcc | wf bronze.gfcc
|
||||
```
|
||||
There is also an experimental batch compiler, which does not use the GFC
|
||||
format or the record aliases. It can be produced by
|
||||
```
|
||||
make gfc
|
||||
```
|
||||
in ``GF/src``, and invoked by
|
||||
```
|
||||
gfc --make FILES
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
==The reference interpreter==
|
||||
|
||||
The reference interpreter written in Haskell consists of the following files:
|
||||
```
|
||||
-- source file for BNFC
|
||||
GFCC.cf -- labelled BNF grammar of gfcc
|
||||
|
||||
-- files generated by BNFC
|
||||
AbsGFCC.hs -- abstrac syntax datatypes
|
||||
ErrM.hs -- error monad used internally
|
||||
LexGFCC.hs -- lexer of gfcc files
|
||||
ParGFCC.hs -- parser of gfcc files and syntax trees
|
||||
PrintGFCC.hs -- printer of gfcc files and syntax trees
|
||||
|
||||
-- hand-written files
|
||||
DataGFCC.hs -- grammar datatype, post-parser grammar creation
|
||||
Linearize.hs -- linearization and evaluation
|
||||
Macros.hs -- utilities abstracting away from GFCC datatypes
|
||||
Generate.hs -- random and exhaustive generation, generate-and-test parsing
|
||||
API.hs -- functionalities accessible in embedded GF applications
|
||||
Generate.hs -- random and exhaustive generation
|
||||
Shell.hs -- main function - a simple command interpreter
|
||||
```
|
||||
It is included in the
|
||||
[developers' version http://www.cs.chalmers.se/Cs/Research/Language-technology/darcs/GF/doc/darcs.html]
|
||||
of GF, in the subdirectories [``GF/src/GF/GFCC`` ../] and
|
||||
[``GF/src/GF/Devel`` ../../Devel].
|
||||
|
||||
As of September 2007, default parsing in main GF uses GFCC (implemented by Krasimir
|
||||
Angelov). The interpreter uses the relevant modules
|
||||
```
|
||||
GF/Conversions/SimpleToFCFG.hs -- generate parser from GFCC
|
||||
GF/Parsing/FCFG.hs -- run the parser
|
||||
```
|
||||
|
||||
|
||||
To compile the interpreter, type
|
||||
```
|
||||
make gfcc
|
||||
```
|
||||
in ``GF/src``. To run it, type
|
||||
```
|
||||
./gfcc <GFCC-file>
|
||||
```
|
||||
The available commands are
|
||||
- ``gr <Cat> <Int>``: generate a number of random trees in category.
|
||||
and show their linearizations in all languages
|
||||
- ``grt <Cat> <Int>``: generate a number of random trees in category.
|
||||
and show the trees and their linearizations in all languages
|
||||
- ``gt <Cat> <Int>``: generate a number of trees in category from smallest,
|
||||
and show their linearizations in all languages
|
||||
- ``gtt <Cat> <Int>``: generate a number of trees in category from smallest,
|
||||
and show the trees and their linearizations in all languages
|
||||
- ``p <Lang> <Cat> <String>``: parse a string into a set of trees
|
||||
- ``lin <Tree>``: linearize tree in all languages, also showing full records
|
||||
- ``q``: terminate the system cleanly
|
||||
|
||||
|
||||
|
||||
==Embedded formats==
|
||||
|
||||
- JavaScript: compiler of linearization and abstract syntax
|
||||
|
||||
- Haskell: compiler of abstract syntax and interpreter with parsing,
|
||||
linearization, and generation
|
||||
|
||||
- C: compiler of linearization (old GFCC)
|
||||
|
||||
- C++: embedded interpreter supporting linearization (old GFCC)
|
||||
|
||||
|
||||
|
||||
==Some things to do==
|
||||
|
||||
Support for dependent types, higher-order abstract syntax, and
|
||||
semantic definition in GFCC generation and interpreters.
|
||||
|
||||
Replacing the entire GF shell by one based on GFCC.
|
||||
|
||||
Interpreter in Java.
|
||||
|
||||
Hand-written parsers for GFCC grammars to reduce code size
|
||||
(and efficiency?) of interpreters.
|
||||
|
||||
Binary format and/or file compression of GFCC output.
|
||||
|
||||
Syntax editor based on GFCC.
|
||||
|
||||
Rewriting of resource libraries in order to exploit the
|
||||
word-suffix sharing better (depth-one tables, as in FM).
|
||||
|
||||
50
src/PGF/doc/old-GFCC.cf
Normal file
50
src/PGF/doc/old-GFCC.cf
Normal file
@@ -0,0 +1,50 @@
|
||||
Grm. Grammar ::= Header ";" Abstract ";" [Concrete] ;
|
||||
Hdr. Header ::= "grammar" CId "(" [CId] ")" ;
|
||||
Abs. Abstract ::= "abstract" "{" [AbsDef] "}" ;
|
||||
Cnc. Concrete ::= "concrete" CId "{" [CncDef] "}" ;
|
||||
|
||||
Fun. AbsDef ::= CId ":" Type "=" Exp ;
|
||||
--AFl. AbsDef ::= "%" CId "=" String ; -- flag
|
||||
Lin. CncDef ::= CId "=" Term ;
|
||||
--CFl. CncDef ::= "%" CId "=" String ; -- flag
|
||||
|
||||
Typ. Type ::= [CId] "->" CId ;
|
||||
Tr. Exp ::= "(" Atom [Exp] ")" ;
|
||||
AC. Atom ::= CId ;
|
||||
AS. Atom ::= String ;
|
||||
AI. Atom ::= Integer ;
|
||||
AF. Atom ::= Double ;
|
||||
AM. Atom ::= "?" ;
|
||||
trA. Exp ::= Atom ;
|
||||
define trA a = Tr a [] ;
|
||||
|
||||
R. Term ::= "[" [Term] "]" ; -- record/table
|
||||
P. Term ::= "(" Term "!" Term ")" ; -- projection/selection
|
||||
S. Term ::= "(" [Term] ")" ; -- sequence with ++
|
||||
K. Term ::= Tokn ; -- token
|
||||
V. Term ::= "$" Integer ; -- argument
|
||||
C. Term ::= Integer ; -- parameter value/label
|
||||
F. Term ::= CId ; -- global constant
|
||||
FV. Term ::= "[|" [Term] "|]" ; -- free variation
|
||||
W. Term ::= "(" String "+" Term ")" ; -- prefix + suffix table
|
||||
RP. Term ::= "(" Term "@" Term ")"; -- record parameter alias
|
||||
TM. Term ::= "?" ; -- lin of metavariable
|
||||
|
||||
L. Term ::= "(" CId "->" Term ")" ; -- lambda abstracted table
|
||||
BV. Term ::= "#" CId ; -- lambda-bound variable
|
||||
|
||||
KS. Tokn ::= String ;
|
||||
KP. Tokn ::= "[" "pre" [String] "[" [Variant] "]" "]" ;
|
||||
Var. Variant ::= [String] "/" [String] ;
|
||||
|
||||
|
||||
terminator Concrete ";" ;
|
||||
terminator AbsDef ";" ;
|
||||
terminator CncDef ";" ;
|
||||
separator CId "," ;
|
||||
separator Term "," ;
|
||||
terminator Exp "" ;
|
||||
terminator String "" ;
|
||||
separator Variant "," ;
|
||||
|
||||
token CId (('_' | letter) (letter | digit | '\'' | '_')*) ;
|
||||
656
src/PGF/doc/old-gfcc.txt
Normal file
656
src/PGF/doc/old-gfcc.txt
Normal file
@@ -0,0 +1,656 @@
|
||||
The GFCC Grammar Format
|
||||
Aarne Ranta
|
||||
October 19, 2006
|
||||
|
||||
Author's address:
|
||||
[``http://www.cs.chalmers.se/~aarne`` http://www.cs.chalmers.se/~aarne]
|
||||
|
||||
% to compile: txt2tags -thtml --toc gfcc.txt
|
||||
|
||||
History:
|
||||
- 19 Oct: translation of lincats, new figures on C++
|
||||
- 3 Oct 2006: first version
|
||||
|
||||
|
||||
==What is GFCC==
|
||||
|
||||
GFCC is a low-level format for GF grammars. Its aim is to contain the minimum
|
||||
that is needed to process GF grammars at runtime. This minimality has three
|
||||
advantages:
|
||||
- compact grammar files and run-time objects
|
||||
- time and space efficient processing
|
||||
- simple definition of interpreters
|
||||
|
||||
|
||||
The idea is that all embedded GF applications are compiled to GFCC.
|
||||
The GF system would be primarily used as a compiler and as a grammar
|
||||
development tool.
|
||||
|
||||
Since GFCC is implemented in BNFC, a parser of the format is readily
|
||||
available for C, C++, Haskell, Java, and OCaml. Also an XML
|
||||
representation is generated in BNFC. A
|
||||
[reference implementation ../]
|
||||
of linearization and some other functions has been written in Haskell.
|
||||
|
||||
|
||||
==GFCC vs. GFC==
|
||||
|
||||
GFCC is aimed to replace GFC as the run-time grammar format. GFC was designed
|
||||
to be a run-time format, but also to
|
||||
support separate compilation of grammars, i.e.
|
||||
to store the results of compiling
|
||||
individual GF modules. But this means that GFC has to contain extra information,
|
||||
such as type annotations, which is only needed in compilation and not at
|
||||
run-time. In particular, the pattern matching syntax and semantics of GFC is
|
||||
complex and therefore difficult to implement in new platforms.
|
||||
|
||||
The main differences of GFCC compared with GFC can be summarized as follows:
|
||||
- there are no modules, and therefore no qualified names
|
||||
- a GFCC grammar is multilingual, and consists of a common abstract syntax
|
||||
together with one concrete syntax per language
|
||||
- records and tables are replaced by arrays
|
||||
- record labels and parameter values are replaced by integers
|
||||
- record projection and table selection are replaced by array indexing
|
||||
- there is (so far) no support for dependent types or higher-order abstract
|
||||
syntax (which would be easy to add, but make interpreters much more difficult
|
||||
to write)
|
||||
|
||||
|
||||
Here is an example of a GF grammar, consisting of three modules,
|
||||
as translated to GFCC. The representations are aligned, with the exceptions
|
||||
due to the alphabetical sorting of GFCC grammars.
|
||||
```
|
||||
grammar Ex(Eng,Swe);
|
||||
|
||||
abstract Ex = { abstract {
|
||||
cat
|
||||
S ; NP ; VP ;
|
||||
fun
|
||||
Pred : NP -> VP -> S ; Pred : NP,VP -> S = (Pred);
|
||||
She, They : NP ; She : -> NP = (She);
|
||||
Sleep : VP ; Sleep : -> VP = (Sleep);
|
||||
They : -> NP = (They);
|
||||
} } ;
|
||||
|
||||
concrete Eng of Ex = { concrete Eng {
|
||||
lincat
|
||||
S = {s : Str} ;
|
||||
NP = {s : Str ; n : Num} ;
|
||||
VP = {s : Num => Str} ;
|
||||
param
|
||||
Num = Sg | Pl ;
|
||||
lin
|
||||
Pred np vp = { Pred = [(($0!1),(($1!0)!($0!0)))];
|
||||
s = np.s ++ vp.s ! np.n} ;
|
||||
She = {s = "she" ; n = Sg} ; She = [0, "she"];
|
||||
They = {s = "they" ; n = Pl} ;
|
||||
Sleep = {s = table { Sleep = [("sleep" + ["s",""])];
|
||||
Sg => "sleeps" ;
|
||||
Pl => "sleep" They = [1, "they"];
|
||||
} } ;
|
||||
} ;
|
||||
}
|
||||
|
||||
concrete Swe of Ex = { concrete Swe {
|
||||
lincat
|
||||
S = {s : Str} ;
|
||||
NP = {s : Str} ;
|
||||
VP = {s : Str} ;
|
||||
param
|
||||
Num = Sg | Pl ;
|
||||
lin
|
||||
Pred np vp = { Pred = [(($0!0),($1!0))];
|
||||
s = np.s ++ vp.s} ;
|
||||
She = {s = "hon"} ; She = ["hon"];
|
||||
They = {s = "de"} ; They = ["de"];
|
||||
Sleep = {s = "sover"} ; Sleep = ["sover"];
|
||||
} } ;
|
||||
```
|
||||
|
||||
==The syntax of GFCC files==
|
||||
|
||||
===Top level===
|
||||
|
||||
A grammar has a header telling the name of the abstract syntax
|
||||
(often specifying an application domain), and the names of
|
||||
the concrete languages. The abstract syntax and the concrete
|
||||
syntaxes themselves follow.
|
||||
```
|
||||
Grammar ::= Header ";" Abstract ";" [Concrete] ;
|
||||
Header ::= "grammar" CId "(" [CId] ")" ;
|
||||
Abstract ::= "abstract" "{" [AbsDef] "}" ;
|
||||
Concrete ::= "concrete" CId "{" [CncDef] "}" ;
|
||||
```
|
||||
Abstract syntax judgements give typings and semantic definitions.
|
||||
Concrete syntax judgements give linearizations.
|
||||
```
|
||||
AbsDef ::= CId ":" Type "=" Exp ;
|
||||
CncDef ::= CId "=" Term ;
|
||||
```
|
||||
Also flags are possible, local to each "module" (i.e. abstract and concretes).
|
||||
```
|
||||
AbsDef ::= "%" CId "=" String ;
|
||||
CncDef ::= "%" CId "=" String ;
|
||||
```
|
||||
For the run-time system, the reference implementation in Haskell
|
||||
uses a structure that gives efficient look-up:
|
||||
```
|
||||
data GFCC = GFCC {
|
||||
absname :: CId ,
|
||||
cncnames :: [CId] ,
|
||||
abstract :: Abstr ,
|
||||
concretes :: Map CId Concr
|
||||
}
|
||||
|
||||
data Abstr = Abstr {
|
||||
funs :: Map CId Type, -- find the type of a fun
|
||||
cats :: Map CId [CId] -- find the funs giving a cat
|
||||
}
|
||||
|
||||
type Concr = Map CId Term
|
||||
```
|
||||
|
||||
|
||||
===Abstract syntax===
|
||||
|
||||
Types are first-order function types built from
|
||||
category symbols. Syntax trees (``Exp``) are
|
||||
rose trees with the head (``Atom``) either a function
|
||||
constant, a metavariable, or a string, integer, or float
|
||||
literal.
|
||||
```
|
||||
Type ::= [CId] "->" CId ;
|
||||
Exp ::= "(" Atom [Exp] ")" ;
|
||||
Atom ::= CId ; -- function constant
|
||||
Atom ::= "?" ; -- metavariable
|
||||
Atom ::= String ; -- string literal
|
||||
Atom ::= Integer ; -- integer literal
|
||||
Atom ::= Double ; -- float literal
|
||||
```
|
||||
|
||||
|
||||
===Concrete syntax===
|
||||
|
||||
Linearization terms (``Term``) are built as follows.
|
||||
Constructor names are shown to make the later code
|
||||
examples readable.
|
||||
```
|
||||
R. Term ::= "[" [Term] "]" ; -- array
|
||||
P. Term ::= "(" Term "!" Term ")" ; -- access to indexed field
|
||||
S. Term ::= "(" [Term] ")" ; -- sequence with ++
|
||||
K. Term ::= Tokn ; -- token
|
||||
V. Term ::= "$" Integer ; -- argument
|
||||
C. Term ::= Integer ; -- array index
|
||||
FV. Term ::= "[|" [Term] "|]" ; -- free variation
|
||||
TM. Term ::= "?" ; -- linearization of metavariable
|
||||
```
|
||||
Tokens are strings or (maybe obsolescent) prefix-dependent
|
||||
variant lists.
|
||||
```
|
||||
KS. Tokn ::= String ;
|
||||
KP. Tokn ::= "[" "pre" [String] "[" [Variant] "]" "]" ;
|
||||
Var. Variant ::= [String] "/" [String] ;
|
||||
```
|
||||
Three special forms of terms are introduced by the compiler
|
||||
as optimizations. They can in principle be eliminated, but
|
||||
their presence makes grammars much more compact. Their semantics
|
||||
will be explained in a later section.
|
||||
```
|
||||
F. Term ::= CId ; -- global constant
|
||||
W. Term ::= "(" String "+" Term ")" ; -- prefix + suffix table
|
||||
RP. Term ::= "(" Term "@" Term ")"; -- record parameter alias
|
||||
```
|
||||
Identifiers are like ``Ident`` in GF and GFC, except that
|
||||
the compiler produces constants prefixed with ``_`` in
|
||||
the common subterm elimination optimization.
|
||||
```
|
||||
token CId (('_' | letter) (letter | digit | '\'' | '_')*) ;
|
||||
```
|
||||
|
||||
|
||||
==The semantics of concrete syntax terms==
|
||||
|
||||
===Linearization and realization===
|
||||
|
||||
The linearization algorithm is essentially the same as in
|
||||
GFC: a tree is linearized by evaluating its linearization term
|
||||
in the environment of the linearizations of the subtrees.
|
||||
Literal atoms are linearized in the obvious way.
|
||||
The function also needs to know the language (i.e. concrete syntax)
|
||||
in which linearization is performed.
|
||||
```
|
||||
linExp :: GFCC -> CId -> Exp -> Term
|
||||
linExp mcfg lang tree@(Tr at trees) = case at of
|
||||
AC fun -> comp (Prelude.map lin trees) $ look fun
|
||||
AS s -> R [kks (show s)] -- quoted
|
||||
AI i -> R [kks (show i)]
|
||||
AF d -> R [kks (show d)]
|
||||
AM -> TM
|
||||
where
|
||||
lin = linExp mcfg lang
|
||||
comp = compute mcfg lang
|
||||
look = lookLin mcfg lang
|
||||
```
|
||||
The result of linearization is usually a record, which is realized as
|
||||
a string using the following algorithm.
|
||||
```
|
||||
realize :: Term -> String
|
||||
realize trm = case trm of
|
||||
R (t:_) -> realize t
|
||||
S ss -> unwords $ Prelude.map realize ss
|
||||
K (KS s) -> s
|
||||
K (KP s _) -> unwords s ---- prefix choice TODO
|
||||
W s t -> s ++ realize t
|
||||
FV (t:_) -> realize t
|
||||
TM -> "?"
|
||||
```
|
||||
Since the order of record fields is not necessarily
|
||||
the same as in GF source,
|
||||
this realization does not work securely for
|
||||
categories whose lincats more than one field.
|
||||
|
||||
|
||||
===Term evaluation===
|
||||
|
||||
Evaluation follows call-by-value order, with two environments
|
||||
needed:
|
||||
- the grammar (a concrete syntax) to give the global constants
|
||||
- an array of terms to give the subtree linearizations
|
||||
|
||||
|
||||
The code is presented in one-level pattern matching, to
|
||||
enable reimplementations in languages that do not permit
|
||||
deep patterns (such as Java and C++).
|
||||
```
|
||||
compute :: GFCC -> CId -> [Term] -> Term -> Term
|
||||
compute mcfg lang args = comp where
|
||||
comp trm = case trm of
|
||||
P r p -> proj (comp r) (comp p)
|
||||
RP i t -> RP (comp i) (comp t)
|
||||
W s t -> W s (comp t)
|
||||
R ts -> R $ Prelude.map comp ts
|
||||
V i -> idx args (fromInteger i) -- already computed
|
||||
F c -> comp $ look c -- not computed (if contains V)
|
||||
FV ts -> FV $ Prelude.map comp ts
|
||||
S ts -> S $ Prelude.filter (/= S []) $ Prelude.map comp ts
|
||||
_ -> trm
|
||||
|
||||
look = lookLin mcfg lang
|
||||
|
||||
idx xs i = xs !! i
|
||||
|
||||
proj r p = case (r,p) of
|
||||
(_, FV ts) -> FV $ Prelude.map (proj r) ts
|
||||
(W s t, _) -> kks (s ++ getString (proj t p))
|
||||
_ -> comp $ getField r (getIndex p)
|
||||
|
||||
getString t = case t of
|
||||
K (KS s) -> s
|
||||
_ -> trace ("ERROR in grammar compiler: string from "++ show t) "ERR"
|
||||
|
||||
getIndex t = case t of
|
||||
C i -> fromInteger i
|
||||
RP p _ -> getIndex p
|
||||
TM -> 0 -- default value for parameter
|
||||
_ -> trace ("ERROR in grammar compiler: index from " ++ show t) 0
|
||||
|
||||
getField t i = case t of
|
||||
R rs -> idx rs i
|
||||
RP _ r -> getField r i
|
||||
TM -> TM
|
||||
_ -> trace ("ERROR in grammar compiler: field from " ++ show t) t
|
||||
```
|
||||
|
||||
===The special term constructors===
|
||||
|
||||
The three forms introduced by the compiler may a need special
|
||||
explanation.
|
||||
|
||||
Global constants
|
||||
```
|
||||
Term ::= CId ;
|
||||
```
|
||||
are shorthands for complex terms. They are produced by the
|
||||
compiler by (iterated) common subexpression elimination.
|
||||
They are often more powerful than hand-devised code sharing in the source
|
||||
code. They could be computed off-line by replacing each identifier by
|
||||
its definition.
|
||||
|
||||
Prefix-suffix tables
|
||||
```
|
||||
Term ::= "(" String "+" Term ")" ;
|
||||
```
|
||||
represent tables of word forms divided to the longest common prefix
|
||||
and its array of suffixes. In the example grammar above, we have
|
||||
```
|
||||
Sleep = [("sleep" + ["s",""])]
|
||||
```
|
||||
which in fact is equal to the array of full forms
|
||||
```
|
||||
["sleeps", "sleep"]
|
||||
```
|
||||
The power of this construction comes from the fact that suffix sets
|
||||
tend to be repeated in a language, and can therefore be collected
|
||||
by common subexpression elimination. It is this technique that
|
||||
explains the used syntax rather than the more accurate
|
||||
```
|
||||
"(" String "+" [String] ")"
|
||||
```
|
||||
since we want the suffix part to be a ``Term`` for the optimization to
|
||||
take effect.
|
||||
|
||||
The most curious construct of GFCC is the parameter array alias,
|
||||
```
|
||||
Term ::= "(" Term "@" Term ")";
|
||||
```
|
||||
This form is used as the value of parameter records, such as the type
|
||||
```
|
||||
{n : Number ; p : Person}
|
||||
```
|
||||
The problem with parameter records is their double role.
|
||||
They can be used like parameter values, as indices in selection,
|
||||
```
|
||||
VP.s ! {n = Sg ; p = P3}
|
||||
```
|
||||
but also as records, from which parameters can be projected:
|
||||
```
|
||||
{n = Sg ; p = P3}.n
|
||||
```
|
||||
Whichever use is selected as primary, a prohibitively complex
|
||||
case expression must be generated at compilation to GFCC to get the
|
||||
other use. The adopted
|
||||
solution is to generate a pair containing both a parameter value index
|
||||
and an array of indices of record fields. For instance, if we have
|
||||
```
|
||||
param Number = Sg | Pl ; Person = P1 | P2 | P3 ;
|
||||
```
|
||||
we get the encoding
|
||||
```
|
||||
{n = Sg ; p = P3} ---> (2 @ [0,2])
|
||||
```
|
||||
The GFCC computation rules are essentially
|
||||
```
|
||||
(t ! (i @ _)) = (t ! i)
|
||||
((_ @ r) ! j) =(r ! j)
|
||||
```
|
||||
|
||||
|
||||
==Compiling to GFCC==
|
||||
|
||||
Compilation to GFCC is performed by the GF grammar compiler, and
|
||||
GFCC interpreters need not know what it does. For grammar writers,
|
||||
however, it might be interesting to know what happens to the grammars
|
||||
in the process.
|
||||
|
||||
The compilation phases are the following
|
||||
+ translate GF source to GFC, as always in GF
|
||||
+ undo GFC back-end optimizations
|
||||
+ perform the ``values`` optimization to normalize tables
|
||||
+ create a symbol table mapping the GFC parameter and record types to
|
||||
fixed-size arrays, and parameter values and record labels to integers
|
||||
+ traverse the linearization rules replacing parameters and labels by integers
|
||||
+ reorganize the created GFC grammar so that it has just one abstract syntax
|
||||
and one concrete syntax per language
|
||||
+ apply UTF8 encoding to the grammar, if not yet applied (this is told by the
|
||||
``coding`` flag)
|
||||
+ translate the GFC syntax tree to a GFCC syntax tree, using a simple
|
||||
compositional mapping
|
||||
+ perform the word-suffix optimization on GFCC linearization terms
|
||||
+ perform subexpression elimination on each concrete syntax module
|
||||
+ print out the GFCC code
|
||||
|
||||
|
||||
Notice that a major part of the compilation is done within GFC, so that
|
||||
GFC-related tasks (such as parser generation) could be performed by
|
||||
using the old algorithms.
|
||||
|
||||
|
||||
===Problems in GFCC compilation===
|
||||
|
||||
Two major problems had to be solved in compiling GFC to GFCC:
|
||||
- consistent order of tables and records, to permit the array translation
|
||||
- run-time variables in complex parameter values.
|
||||
|
||||
|
||||
The current implementation is still experimental and may fail
|
||||
to generate correct code. Any errors remaining are likely to be
|
||||
related to the two problems just mentioned.
|
||||
|
||||
The order problem is solved in different ways for tables and records.
|
||||
For tables, the ``values`` optimization of GFC already manages to
|
||||
maintain a canonical order. But this order can be destroyed by the
|
||||
``share`` optimization. To make sure that GFCC compilation works properly,
|
||||
it is safest to recompile the GF grammar by using the ``values``
|
||||
optimization flag.
|
||||
|
||||
Records can be canonically ordered by sorting them by labels.
|
||||
In fact, this was done in connection of the GFCC work as a part
|
||||
of the GFC generation, to guarantee consistency. This means that
|
||||
e.g. the ``s`` field will in general no longer appear as the first
|
||||
field, even if it does so in the GF source code. But relying on the
|
||||
order of fields in a labelled record would be misplaced anyway.
|
||||
|
||||
The canonical form of records is further complicated by lock fields,
|
||||
i.e. dummy fields of form ``lock_C = <>``, which are added to grammar
|
||||
libraries to force intensionality of linearization types. The problem
|
||||
is that the absence of a lock field only generates a warning, not
|
||||
an error. Therefore a GFC grammar can contain objects of the same
|
||||
type with and without a lock field. This problem was solved in GFCC
|
||||
generation by just removing all lock fields (defined as fields whose
|
||||
type is the empty record type). This has the further advantage of
|
||||
(slightly) reducing the grammar size. More importantly, it is safe
|
||||
to remove lock fields, because they are never used in computation,
|
||||
and because intensional types are only needed in grammars reused
|
||||
as libraries, not in grammars used at runtime.
|
||||
|
||||
While the order problem is rather bureaucratic in nature, run-time
|
||||
variables are an interesting problem. They arise in the presence
|
||||
of complex parameter values, created by argument-taking constructors
|
||||
and parameter records. To give an example, consider the GF parameter
|
||||
type system
|
||||
```
|
||||
Number = Sg | Pl ;
|
||||
Person = P1 | P2 | P3 ;
|
||||
Agr = Ag Number Person ;
|
||||
```
|
||||
The values can be translated to integers in the expected way,
|
||||
```
|
||||
Sg = 0, Pl = 1
|
||||
P1 = 0, P2 = 1, P3 = 2
|
||||
Ag Sg P1 = 0, Ag Sg P2 = 1, Ag Sg P3 = 2,
|
||||
Ag Pl P1 = 3, Ag Pl P2 = 4, Ag Pl P3 = 5
|
||||
```
|
||||
However, an argument of ``Agr`` can be a run-time variable, as in
|
||||
```
|
||||
Ag np.n P3
|
||||
```
|
||||
This expression must first be translated to a case expression,
|
||||
```
|
||||
case np.n of {
|
||||
0 => 2 ;
|
||||
1 => 5
|
||||
}
|
||||
```
|
||||
which can then be translated to the GFCC term
|
||||
```
|
||||
([2,5] ! ($0 ! $1))
|
||||
```
|
||||
assuming that the variable ``np`` is the first argument and that its
|
||||
``Number`` field is the second in the record.
|
||||
|
||||
This transformation of course has to be performed recursively, since
|
||||
there can be several run-time variables in a parameter value:
|
||||
```
|
||||
Ag np.n np.p
|
||||
```
|
||||
A similar transformation would be possible to deal with the double
|
||||
role of parameter records discussed above. Thus the type
|
||||
```
|
||||
RNP = {n : Number ; p : Person}
|
||||
```
|
||||
could be uniformly translated into the set ``{0,1,2,3,4,5}``
|
||||
as ``Agr`` above. Selections would be simple instances of indexing.
|
||||
But any projection from the record should be translated into
|
||||
a case expression,
|
||||
```
|
||||
rnp.n ===>
|
||||
case rnp of {
|
||||
0 => 0 ;
|
||||
1 => 0 ;
|
||||
2 => 0 ;
|
||||
3 => 1 ;
|
||||
4 => 1 ;
|
||||
5 => 1
|
||||
}
|
||||
```
|
||||
To avoid the code bloat resulting from this, we chose the alias representation
|
||||
which is easy enough to deal with in interpreters.
|
||||
|
||||
|
||||
===The representation of linearization types===
|
||||
|
||||
Linearization types (``lincat``) are not needed when generating with
|
||||
GFCC, but they have been added to enable parser generation directly from
|
||||
GFCC. The linearization type definitions are shown as a part of the
|
||||
concrete syntax, by using terms to represent types. Here is the table
|
||||
showing how different linearization types are encoded.
|
||||
```
|
||||
P* = size(P) -- parameter type
|
||||
{_ : I ; __ : R}* = (I* @ R*) -- record of parameters
|
||||
{r1 : T1 ; ... ; rn : Tn}* = [T1*,...,Tn*] -- other record
|
||||
(P => T)* = [T* ,...,T*] -- size(P) times
|
||||
Str* = ()
|
||||
```
|
||||
The category symbols are prefixed with two underscores (``__``).
|
||||
For example, the linearization type ``present/CatEng.NP`` is
|
||||
translated as follows:
|
||||
```
|
||||
NP = {
|
||||
a : { -- 6 = 2*3 values
|
||||
n : {ParamX.Number} ; -- 2 values
|
||||
p : {ParamX.Person} -- 3 values
|
||||
} ;
|
||||
s : {ResEng.Case} => Str -- 3 values
|
||||
}
|
||||
|
||||
__NP = [(6@[2,3]),[(),(),()]]
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
===Running the compiler and the GFCC interpreter===
|
||||
|
||||
GFCC generation is a part of the
|
||||
[developers' version http://www.cs.chalmers.se/Cs/Research/Language-technology/darcs/GF/doc/darcs.html]
|
||||
of GF since September 2006. To invoke the compiler, the flag
|
||||
``-printer=gfcc`` to the command
|
||||
``pm = print_multi`` is used. It is wise to recompile the grammar from
|
||||
source, since previously compiled libraries may not obey the canonical
|
||||
order of records. To ``strip`` the grammar before
|
||||
GFCC translation removes unnecessary interface references.
|
||||
Here is an example, performed in
|
||||
[example/bronzeage ../../../../../examples/bronzeage].
|
||||
```
|
||||
i -src -path=.:prelude:resource-1.0/* -optimize=all_subs BronzeageEng.gf
|
||||
i -src -path=.:prelude:resource-1.0/* -optimize=all_subs BronzeageGer.gf
|
||||
strip
|
||||
pm -printer=gfcc | wf bronze.gfcc
|
||||
```
|
||||
|
||||
|
||||
|
||||
==The reference interpreter==
|
||||
|
||||
The reference interpreter written in Haskell consists of the following files:
|
||||
```
|
||||
-- source file for BNFC
|
||||
GFCC.cf -- labelled BNF grammar of gfcc
|
||||
|
||||
-- files generated by BNFC
|
||||
AbsGFCC.hs -- abstrac syntax of gfcc
|
||||
ErrM.hs -- error monad used internally
|
||||
LexGFCC.hs -- lexer of gfcc files
|
||||
ParGFCC.hs -- parser of gfcc files and syntax trees
|
||||
PrintGFCC.hs -- printer of gfcc files and syntax trees
|
||||
|
||||
-- hand-written files
|
||||
DataGFCC.hs -- post-parser grammar creation, linearization and evaluation
|
||||
GenGFCC.hs -- random and exhaustive generation, generate-and-test parsing
|
||||
RunGFCC.hs -- main function - a simple command interpreter
|
||||
```
|
||||
It is included in the
|
||||
[developers' version http://www.cs.chalmers.se/Cs/Research/Language-technology/darcs/GF/doc/darcs.html]
|
||||
of GF, in the subdirectory [``GF/src/GF/Canon/GFCC`` ../].
|
||||
|
||||
To compile the interpreter, type
|
||||
```
|
||||
make gfcc
|
||||
```
|
||||
in ``GF/src``. To run it, type
|
||||
```
|
||||
./gfcc <GFCC-file>
|
||||
```
|
||||
The available commands are
|
||||
- ``gr <Cat> <Int>``: generate a number of random trees in category.
|
||||
and show their linearizations in all languages
|
||||
- ``grt <Cat> <Int>``: generate a number of random trees in category.
|
||||
and show the trees and their linearizations in all languages
|
||||
- ``gt <Cat> <Int>``: generate a number of trees in category from smallest,
|
||||
and show their linearizations in all languages
|
||||
- ``gtt <Cat> <Int>``: generate a number of trees in category from smallest,
|
||||
and show the trees and their linearizations in all languages
|
||||
- ``p <Int> <Cat> <String>``: "parse", i.e. generate trees until match or
|
||||
until the given number have been generated
|
||||
- ``<Tree>``: linearize tree in all languages, also showing full records
|
||||
- ``quit``: terminate the system cleanly
|
||||
|
||||
|
||||
==Interpreter in C++==
|
||||
|
||||
A base-line interpreter in C++ has been started.
|
||||
Its main functionality is random generation of trees and linearization of them.
|
||||
|
||||
Here are some results from running the different interpreters, compared
|
||||
to running the same grammar in GF, saved in ``.gfcm`` format.
|
||||
The grammar contains the English, German, and Norwegian
|
||||
versions of Bronzeage. The experiment was carried out on
|
||||
Ubuntu Linux laptop with 1.5 GHz Intel centrino processor.
|
||||
|
||||
|| | GF | gfcc(hs) | gfcc++ |
|
||||
| program size | 7249k | 803k | 113k
|
||||
| grammar size | 336k | 119k | 119k
|
||||
| read grammar | 1150ms | 510ms | 100ms
|
||||
| generate 222 | 9500ms | 450ms | 800ms
|
||||
| memory | 21M | 10M | 20M
|
||||
|
||||
|
||||
|
||||
To summarize:
|
||||
- going from GF to gfcc is a major win in both code size and efficiency
|
||||
- going from Haskell to C++ interpreter is not a win yet, because of a space
|
||||
leak in the C++ version
|
||||
|
||||
|
||||
|
||||
==Some things to do==
|
||||
|
||||
Interpreter in Java.
|
||||
|
||||
Parsing via MCFG
|
||||
- the FCFG format can possibly be simplified
|
||||
- parser grammars should be saved in files to make interpreters easier
|
||||
|
||||
|
||||
Hand-written parsers for GFCC grammars to reduce code size
|
||||
(and efficiency?) of interpreters.
|
||||
|
||||
Binary format and/or file compression of GFCC output.
|
||||
|
||||
Syntax editor based on GFCC.
|
||||
|
||||
Rewriting of resource libraries in order to exploit the
|
||||
word-suffix sharing better (depth-one tables, as in FM).
|
||||
|
||||
|
||||
|
||||
180
src/PGF/doc/syntax.txt
Normal file
180
src/PGF/doc/syntax.txt
Normal file
@@ -0,0 +1,180 @@
|
||||
GFCC Syntax
|
||||
|
||||
|
||||
==Syntax of GFCC files==
|
||||
|
||||
The parser syntax is very simple, as defined in BNF:
|
||||
```
|
||||
Grm. Grammar ::= [RExp] ;
|
||||
|
||||
App. RExp ::= "(" CId [RExp] ")" ;
|
||||
AId. RExp ::= CId ;
|
||||
AInt. RExp ::= Integer ;
|
||||
AStr. RExp ::= String ;
|
||||
AFlt. RExp ::= Double ;
|
||||
AMet. RExp ::= "?" ;
|
||||
|
||||
terminator RExp "" ;
|
||||
|
||||
token CId (('_' | letter) (letter | digit | '\'' | '_')*) ;
|
||||
```
|
||||
While a parser and a printer can be generated for many languages
|
||||
from this grammar by using the BNF Converter, a parser is also
|
||||
easy to write by hand using recursive descent.
|
||||
|
||||
|
||||
==Syntax of well-formed GFCC code==
|
||||
|
||||
Here is a summary of well-formed syntax,
|
||||
with a comment on the semantics of each construction.
|
||||
```
|
||||
Grammar ::=
|
||||
("grammar" CId CId*) -- abstract syntax name and concrete syntax names
|
||||
"(" "flags" Flag* ")" -- global and abstract flags
|
||||
"(" "abstract" Abstract ")" -- abstract syntax
|
||||
"(" "concrete" Concrete* ")" -- concrete syntaxes
|
||||
|
||||
Abstract ::=
|
||||
"(" "fun" FunDef* ")" -- function definitions
|
||||
"(" "cat" CatDef* ")" -- category definitions
|
||||
|
||||
Concrete ::=
|
||||
"(" CId -- language name
|
||||
"flags" Flag* -- concrete flags
|
||||
"lin" LinDef* -- linearization rules
|
||||
"oper" LinDef* -- operations (macros)
|
||||
"lincat" LinDef* -- linearization type definitions
|
||||
"lindef" LinDef* -- linearization default definitions
|
||||
"printname" LinDef* -- printname definitions
|
||||
"param" LinDef* -- lincats with labels and parameter value names
|
||||
")"
|
||||
|
||||
Flag ::= "(" CId String ")" -- flag and value
|
||||
FunDef ::= "(" CId Type Exp ")" -- function, type, and definition
|
||||
CatDef ::= "(" CId Hypo* ")" -- category and context
|
||||
LinDef ::= "(" CId Term ")" -- function and definition
|
||||
|
||||
Type ::=
|
||||
"(" CId -- value category
|
||||
"(" "H" Hypo* ")" -- argument context
|
||||
"(" "X" Exp* ")" ")" -- arguments (of dependent value type)
|
||||
|
||||
Exp ::=
|
||||
"(" CId -- function
|
||||
"(" "B" CId* ")" -- bindings
|
||||
"(" "X" Exp* ")" ")" -- arguments
|
||||
| CId -- variable
|
||||
| "?" -- metavariable
|
||||
| "(" "Eq" Equation* ")" -- group of pattern equations
|
||||
| Integer -- integer literal (non-negative)
|
||||
| Float -- floating-point literal (non-negative)
|
||||
| String -- string literal (in double quotes)
|
||||
|
||||
Hypo ::= "(" CId Type ")" -- variable and type
|
||||
|
||||
Equation ::= "(" "E" Exp Exp* ")" -- value and pattern list
|
||||
|
||||
Term ::=
|
||||
"(" "R" Term* ")" -- array (record or table)
|
||||
| "(" "S" Term* ")" -- concatenated sequence
|
||||
| "(" "FV" Term* ")" -- free variant list
|
||||
| "(" "P" Term Term ")" -- access to index (projection or selection)
|
||||
| "(" "W" String Term ")" -- token prefix with suffix list
|
||||
| "(" "A" Integer ")" -- pointer to subtree
|
||||
| String -- token (in double quotes)
|
||||
| Integer -- index in array
|
||||
| CId -- macro constant
|
||||
| "?" -- metavariable
|
||||
```
|
||||
|
||||
|
||||
==GFCC interpreter==
|
||||
|
||||
The first phase in interpreting GFCC is to parse a GFCC file and
|
||||
build an internal abstract syntax representation, as specified
|
||||
in the previous section.
|
||||
|
||||
With this representation, linearization can be performed by
|
||||
a straightforward function from expressions (``Exp``) to terms
|
||||
(``Term``). All expressions except groups of pattern equations
|
||||
can be linearized.
|
||||
|
||||
Here is a reference Haskell implementation of linearization:
|
||||
```
|
||||
linExp :: GFCC -> CId -> Exp -> Term
|
||||
linExp gfcc lang tree@(DTr _ at trees) = case at of
|
||||
AC fun -> comp (map lin trees) $ look fun
|
||||
AS s -> R [K (show s)] -- quoted
|
||||
AI i -> R [K (show i)]
|
||||
AF d -> R [K (show d)]
|
||||
AM -> TM
|
||||
where
|
||||
lin = linExp gfcc lang
|
||||
comp = compute gfcc lang
|
||||
look = lookLin gfcc lang
|
||||
```
|
||||
TODO: bindings must be supported.
|
||||
|
||||
Terms resulting from linearization are evaluated in
|
||||
call-by-value order, with two environments needed:
|
||||
- the grammar (a concrete syntax) to give the global constants
|
||||
- an array of terms to give the subtree linearizations
|
||||
|
||||
|
||||
The Haskell implementation works as follows:
|
||||
```
|
||||
compute :: GFCC -> CId -> [Term] -> Term -> Term
|
||||
compute gfcc lang args = comp where
|
||||
comp trm = case trm of
|
||||
P r p -> proj (comp r) (comp p)
|
||||
W s t -> W s (comp t)
|
||||
R ts -> R $ map comp ts
|
||||
V i -> idx args (fromInteger i) -- already computed
|
||||
F c -> comp $ look c -- not computed (if contains V)
|
||||
FV ts -> FV $ Prelude.map comp ts
|
||||
S ts -> S $ Prelude.filter (/= S []) $ Prelude.map comp ts
|
||||
_ -> trm
|
||||
|
||||
look = lookOper gfcc lang
|
||||
|
||||
idx xs i = xs !! i
|
||||
|
||||
proj r p = case (r,p) of
|
||||
(_, FV ts) -> FV $ Prelude.map (proj r) ts
|
||||
(FV ts, _ ) -> FV $ Prelude.map (\t -> proj t p) ts
|
||||
(W s t, _) -> kks (s ++ getString (proj t p))
|
||||
_ -> comp $ getField r (getIndex p)
|
||||
|
||||
getString t = case t of
|
||||
K (KS s) -> s
|
||||
_ -> trace ("ERROR in grammar compiler: string from "++ show t) "ERR"
|
||||
|
||||
getIndex t = case t of
|
||||
C i -> fromInteger i
|
||||
RP p _ -> getIndex p
|
||||
TM -> 0 -- default value for parameter
|
||||
_ -> trace ("ERROR in grammar compiler: index from " ++ show t) 0
|
||||
|
||||
getField t i = case t of
|
||||
R rs -> idx rs i
|
||||
RP _ r -> getField r i
|
||||
TM -> TM
|
||||
_ -> trace ("ERROR in grammar compiler: field from " ++ show t) t
|
||||
```
|
||||
The result of linearization is usually a record, which is realized as
|
||||
a string using the following algorithm.
|
||||
```
|
||||
realize :: Term -> String
|
||||
realize trm = case trm of
|
||||
R (t:_) -> realize t
|
||||
S ss -> unwords $ map realize ss
|
||||
K s -> s
|
||||
W s t -> s ++ realize t
|
||||
FV (t:_) -> realize t -- TODO: all variants
|
||||
TM -> "?"
|
||||
```
|
||||
Notice that realization always picks the first field of a record.
|
||||
If a linearization type has more than one field, the first field
|
||||
does not necessarily contain the desired string.
|
||||
Also notice that the order of record fields in GFCC is not necessarily
|
||||
the same as in GF source.
|
||||
Reference in New Issue
Block a user