Restoring old functionality

This commit is contained in:
aarne
2004-03-24 15:09:06 +00:00
parent 8f829331f6
commit 4a34119ad0
19 changed files with 738 additions and 139 deletions

View File

@@ -10,7 +10,7 @@ Highlights, preliminary version
<p>
13/10/2003 - 25/11
13/10/2003 - 25/11 - 24/3/2004
<p>
@@ -35,8 +35,10 @@ An accurate <a href="DocGF.ps.gz">language specification</a> is now available.
<li> Hierarchic structure (<tt>extend</tt>) + cross-cutting reuse (<tt>open</tt>)
<li> Separate compilation, one module per file
<li> Reuse of <tt>abstract</tt>+<tt>concrete</tt> as <tt>resource</tt>
<li> New (experimental) module types: <tt>transfer</tt>,
<li> New module types:
<tt>interface</tt>, <tt>instance</tt>, <tt>incomplete</tt>.
<li> New experimental module types: <tt>transfer</tt>,
<tt>union</tt>.
<h4>Canonical format GFC</h4>
@@ -46,6 +48,7 @@ An accurate <a href="DocGF.ps.gz">language specification</a> is now available.
<h4>New features in expression language</h4>
<li> Disjunctive patterns <tt>P | ... | Q</tt>.
<li> String patterns <tt>"foo"</tt>.
<li> Binding token <tt>&+</tt> to glue separate tokens at unlexing phase,
and unlexer to resolve this.
<li> New syntax alternatives for local definitions: <tt>let</tt> without
@@ -75,53 +78,47 @@ An accurate <a href="DocGF.ps.gz">language specification</a> is now available.
<!-- NEW -->
<h2>Status (25/11/2003)</h2>
<h2>Status (24/3/2004)</h2>
Grammar compiler, editor GUIs, and shell work.
<p>
The updated <tt>HelpFile</tt> (accessible through <tt>h</tt> command)
marks unsupported but expected features with <tt>*</tt>.
<p>
GF1 grammars can be automatically translated to GF2 (although result not as good
as manual, since indentation and comments are destroyed). The results can be
saved in GF2 files, but this is not necessary.
<p>
Example grammars and resource libraries are in the process of
being converted. There will be a new API with
It is also possible to write a GF2 grammar back to GF1.
<p>
Example grammars and resource libraries are have been
converted. There is a new resource API with
many new constructions. The new versions lie in <tt>grammars/newresource</tt>.
English and Swedish resources are up-to-date.
In the old API version, <tt>grammars/resource</tt>, the other languages
are up-to-date.
<p>
A make facility works, finding out which modules have to be recompiled.
There is some room for improvement.
<br>
<b>Bug</b>.
Sometimes the grammar compiler gets confused if there are many
grammars open simultaneously. Then the advice is to empty
the environment (using <tt>e</tt>) and compile grammar one by one.
When read from <tt>gfc</tt> versions, there should be no problems
to have several grammars simultaneously.
<p>
The module type <tt>grammar</tt> (to build multilingual grammars) not yet
implemented.
<tt>transfer</tt> modules have to be called by flags
<tt>transfer</tt> modules have to be called by flags.
<p>
Abstract modules (<tt>interface</tt>, <tt>instance</tt>, and <tt>incomplete</tt>)
have not been stress-tested, but work in the examples in
<tt>resource/romance</tt> and <tt>resource/french</tt>.
<p>
Soundness checking of module depencencies and completeness is not
complete.
complete. This means that some errors may show up too late.
<!-- NEW -->
@@ -167,6 +164,15 @@ line, e.g.
<p>
To write a GF2 grammar back to GF1 (as one big file), use the command
<pre>
> pg -old
</pre>
<p>
GF2 has more reserved words than GF 1.2. When old files are read, a preprocessor
replaces every identifier that has the shape of a new reserved word
with a variant where the last letter is replaced by <tt>Z</tt>, e.g.
@@ -311,6 +317,10 @@ compare the modification times of each <tt>gf</tt> and <tt>gfc</tt> file:
<li> if <tt>gfc</tt> is later, just read in the module
</ul>
</ol>
Inside the GF shell, also time stamps of modules read into memory are
taken into account. Thus a module need not be read from a file if the
module is in the memory and the file has not been modified.
<!-- NEW -->

View File

@@ -1,6 +1,6 @@
-- language-independent prelude facilities
resource Prelude = open (Predef = Predef) in {
resource Prelude = {
oper
-- to construct records and tables
@@ -71,16 +71,20 @@ oper
E0 => E1 ; E1 => E2 ; _ => Emore} ;
-- these were defined in Predef before
oper isNil : Tok -> Bool = \b -> pbool2bool (Predef.eqStr [] b) ;
isNil : Tok -> Bool = \b -> pbool2bool (Predef.eqStr [] b) ;
oper ifTok : (A : Type) -> Tok -> Tok -> A -> A -> A = \A,t,u,a,b ->
ifTok : (A : Type) -> Tok -> Tok -> A -> A -> A = \A,t,u,a,b ->
case Predef.eqStr t u of {Predef.PTrue => a ; Predef.PFalse => b} ;
-- so we need an interface
oper pbool2bool : Predef.PBool -> Bool = \b -> case b of {
pbool2bool : Predef.PBool -> Bool = \b -> case b of {
Predef.PFalse => False ; Predef.PTrue => True
} ;
init : Tok -> Tok = Predef.tk 1 ;
last : Tok -> Tok = Predef.dp 1 ;
-- bind together two tokens in the lexer, either obligatorily or optionally
oper

View File

@@ -6,6 +6,7 @@ import PGrammar
import TypeCheck
import Compile
import ShellState
import GetGrammar
import Modules
import Option
@@ -36,13 +37,19 @@ string2annotTree gr m = annotate gr . string2absTerm (prt m) ---- prt
---string2paramList st = map (renameTrm (lookupConcrete st) . patt2term) . pPattList
shellStateFromFiles :: Options -> ShellState -> FilePath -> IOE ShellState
shellStateFromFiles opts st file | fileSuffix file == "gfcm" = do
(_,_,cgr) <- compileOne opts (compileEnvShSt st []) file
ioeErr $ updateShellState opts st (cgr,(emptyMGrammar,[]))
shellStateFromFiles opts st file = do
let osb = if oElem showOld opts
then addOptions (options [beVerbose]) opts -- for old, no emit
else addOptions (options [beVerbose, emitCode]) opts -- for new, do
grts <- compileModule osb st file
ioeErr $ updateShellState opts st grts
--- liftM (changeModTimes rts) $ grammar2shellState opts gr
shellStateFromFiles opts st file = case fileSuffix file of
"cf" -> do
let opts' = addOptions (options [beVerbose]) opts
sgr <- getCFGrammar opts' file
ioeIO $ print sgr -----
return st
"gfcm" -> do
(_,_,cgr) <- compileOne opts (compileEnvShSt st []) file
ioeErr $ updateShellState opts st (cgr,(emptyMGrammar,[]))
_ -> do
let osb = if oElem showOld opts
then addOptions (options [beVerbose]) opts -- for old, no emit
else addOptions (options [beVerbose, emitCode]) opts -- for new,do
grts <- compileModule osb st file
ioeErr $ updateShellState opts st grts
--- liftM (changeModTimes rts) $ grammar2shellState opts gr

View File

@@ -68,6 +68,10 @@ varCFFun = mkCFFun . AV
consCFFun :: CIdent -> CFFun
consCFFun = mkCFFun . AC
-- standard way of making cf fun
string2CFFun :: String -> String -> CFFun
string2CFFun m c = consCFFun $ mkCIdent m c
stringCFFun :: String -> CFFun
stringCFFun = mkCFFun . AS
@@ -80,6 +84,9 @@ dummyCFFun = varCFFun $ identC "_" --- used in lexer-by-need rules
cfFun2String :: CFFun -> String
cfFun2String (CFFun (f,_)) = prt f
cfFun2Ident :: CFFun -> Ident
cfFun2Ident (CFFun (f,_)) = identC $ prt_ f ---
cfFun2Profile :: CFFun -> Profile
cfFun2Profile (CFFun (_,p)) = p
@@ -131,6 +138,9 @@ moduleOfCFCat (CFCat (CIQ m _, _)) = m
cfCat2Cat :: CFCat -> (Ident,Ident)
cfCat2Cat (CFCat (CIQ m c,_)) = (m,c)
cfCat2Ident :: CFCat -> Ident
cfCat2Ident = snd . cfCat2Cat
lexCFCat :: CFCat -> CFCat
lexCFCat cat = ident2CFCat (uncurry CIQ (cfCat2Cat cat)) (identC "*")

50
src/GF/CF/CFtoGrammar.hs Normal file
View File

@@ -0,0 +1,50 @@
module CFtoGrammar where
import Ident
import Grammar
import qualified AbsGF as A
import qualified GrammarToSource as S
import Macros
import CF
import CFIdent
import PPrCF
import Operations
import List (nub)
import Char (isSpace)
-- 26/1/2000 -- 18/4 -- 24/3/2004
cf2grammar :: CF -> [A.TopDef]
cf2grammar cf = concatMap S.trAnyDef (abs ++ conc) where
rules = rulesOfCF cf
abs = cats ++ funs
conc = lintypes ++ lins
cats = [(cat, AbsCat (yes []) (yes [])) |
cat <- nub (concat (map cf2cat rules))] ----notPredef cat
lintypes = [] ----[(cat, CncCat (yes) nope Nothing) | (cat,AbsCat _ _) <- cats]
(funs,lins) = unzip (map cf2rule rules)
cf2cat :: CFRule -> [Ident]
cf2cat (_,(cat, items)) = map cfCat2Ident $ cat : [c | CFNonterm c <- items]
cf2rule :: CFRule -> ((Ident,Info),(Ident,Info))
cf2rule (fun, (cat, items)) = (def,ldef) where
f = cfFun2Ident fun
def = (f, AbsFun (yes (mkProd (args', Cn (cfCat2Ident cat), []))) nope)
args0 = zip (map (mkIdent "x") [0..]) items
args = [(v, Cn (cfCat2Ident c)) | (v, CFNonterm c) <- args0]
args' = [(zIdent "_", Cn (cfCat2Ident c)) | (_, CFNonterm c) <- args0]
ldef = (f, CncFun
Nothing
(yes (mkAbs (map fst args)
(mkRecord linLabel [foldconcat (map mkIt args0)])))
nope)
mkIt (v, CFNonterm _) = P (Vr v) (linLabel 0)
mkIt (_, CFTerm (RegAlts [a])) = K a
mkIt _ = K "" --- regexp not recognized in input CF ; use EBNF for this
foldconcat [] = K ""
foldconcat tt = foldr1 C tt

View File

@@ -6,6 +6,8 @@ import CFIdent
import AbsGFC
import PrGrammar
import Char
-- printing and parsing CF grammars, rules, and trees AR 26/1/2000 -- 9/6/2003
---- use the Print class instead!
@@ -42,18 +44,25 @@ prRegExp (RegAlts tt) = case tt of
[t] -> prQuotedString t
_ -> prParenth (prTList " | " (map prQuotedString tt))
{- ----
-- rules have an amazingly easy parser, if we use the format
-- fun. C -> item1 item2 ... where unquoted items are treated as cats
-- Actually would be nice to add profiles to this.
getCFRule :: String -> Maybe CFRule
getCFRule s = getcf (wrds s) where
getCFRule :: String -> String -> Err CFRule
getCFRule mo s = getcf (wrds s) where
getcf ww | length ww > 2 && ww !! 2 `elem` ["->", "::="] =
Just (string2CFFun (init fun), (string2CFCat cat, map mkIt its)) where
Ok (string2CFFun mo (init fun), (string2CFCat mo cat, map mkIt its)) where
fun : cat : _ : its = words s
mkIt ('"':w@(_:_)) = atomCFTerm (string2CFTok (init w))
mkIt w = CFNonterm (string2CFCat w)
getcf _ = Nothing
mkIt w = CFNonterm (string2CFCat mo w)
getcf _ = Bad "invalid rule"
wrds = takeWhile (/= ";") . words -- to permit semicolon in the end
-}
pCF :: String -> String -> Err CF
pCF mo s = do
rules <- mapM (getCFRule mo) $ filter isRule $ lines s
return $ rules2CF rules
where
isRule line = case line of
'-':'-':_ -> False
_ -> not $ all isSpace line

View File

@@ -13,6 +13,7 @@ import LookAbs
import Macros
import ReservedWords ----
import PatternMatch
import AppPredefined
import Operations
import CheckM
@@ -207,6 +208,8 @@ computeLType gr t = do
where
comp ty = case ty of
Q m _ | m == cPredef -> return ty
Q m ident -> do
ty' <- checkErr (lookupResDef gr m ident)
if ty' == ty then return ty else comp ty' --- is this necessary to test?
@@ -256,6 +259,8 @@ checkReservedId x = let c = prt x in
inferLType :: SourceGrammar -> Term -> Check (Term, Type)
inferLType gr trm = case trm of
Q m ident | m==cPredef -> termWith trm $ checkErr (typPredefined ident)
Q m ident -> checks [
termWith trm $ checkErr (lookupResType gr m ident)
,
@@ -616,6 +621,7 @@ checkEqLType env t u trm = do
---- this should be made in Rename
(Q m a, Q n b) | a == b -> elem m (allExtendsPlus env n)
|| elem n (allExtendsPlus env m)
|| m == n --- for Predef
(QC m a, QC n b) | a == b -> elem m (allExtendsPlus env n)
|| elem n (allExtendsPlus env m)
(QC m a, Q n b) | a == b -> elem m (allExtendsPlus env n)

View File

@@ -16,6 +16,9 @@ import Option
import ParGF
import qualified LexGF as L
import PPrCF
import CFtoGrammar
import ReadFiles ----
import List (nub)
@@ -81,3 +84,11 @@ oldLexer = map change . L.tokens where
new = words $ "abstract concrete interface incomplete " ++
"instance out open resource reuse transfer union with where"
getCFGrammar :: Options -> FilePath -> IOE SourceGrammar
getCFGrammar opts file = do
let mo = takeWhile (/='-') file
s <- ioeIO $ readFileIf file
cf <- ioeErr $ pCF mo file
defs <- return $ cf2grammar cf
let g = A.OldGr A.NoIncl defs
ioeErr $ transOldGrammar opts file g

View File

@@ -6,6 +6,7 @@ import Modules
import Ident
import Macros
import PrGrammar
import AppPredefined
import Lookup
import Extend
import Operations
@@ -56,6 +57,7 @@ renameIdentTerm env@(act,imps) t =
Cn c -> do
f <- lookupTreeMany prt opens c
return $ f c
Q m' c | m' == cPredef {- && isInPredefined c -} -> return t
Q m' c -> do
m <- lookupErr m' qualifs
f <- lookupTree prt c m

View File

@@ -3,12 +3,34 @@ module AppPredefined where
import Operations
import Grammar
import Ident
import PrGrammar (prt)
import Macros
import PrGrammar (prt,prtBad)
---- import PGrammar (pTrm)
-- predefined function type signatures and definitions. AR 12/3/2003.
---- typPredefined :: Term -> Err Type
isInPredefined :: Ident -> Bool
isInPredefined = err (const True) (const False) . typPredefined
typPredefined :: Ident -> Err Type
typPredefined c@(IC f) = case f of
"Int" -> return typePType
"PBool" -> return typePType
--- "PFalse" -> -- hidden
--- "PTrue" ->
"dp" -> return $ mkFunType [cnPredef "Int",typeTok] typeTok
"drop" -> return $ mkFunType [cnPredef "Int",typeTok] typeTok
"eqInt" -> return $ mkFunType [cnPredef "Int",cnPredef "Int"] (cnPredef "PBool")
"eqStr" -> return $ mkFunType [typeTok,typeTok] (cnPredef "PBool")
"length" -> return $ mkFunType [typeTok] (cnPredef "Int")
"occur" -> return $ mkFunType [typeTok,typeTok] (cnPredef "PBool")
"plus" -> return $ mkFunType [cnPredef "Int",cnPredef "Int"] (cnPredef "PInt")
---- "read" -> (P : Type) -> Tok -> P
---- "show" -> (P : Type) -> P -> Tok
"take" -> return $ mkFunType [cnPredef "Int",typeTok] typeTok
"tk" -> return $ mkFunType [cnPredef "Int",typeTok] typeTok
_ -> prtBad "unknown in Predef:" c
typPredefined c = prtBad "unknown in Predef:" c
appPredefined :: Term -> Term
appPredefined t = case t of

View File

@@ -40,6 +40,12 @@ qq (m,c) = Q m c
typeForm = qTypeForm ---- no need to dist any more
cPredef :: Ident
cPredef = identC "Predef"
cnPredef :: String -> Term
cnPredef f = Q cPredef (identC f)
typeFormCnc :: Type -> Err (Context, Type)
typeFormCnc t = case t of
Prod x a b -> do

View File

@@ -13,7 +13,7 @@ import API
import IOGrammar
import Compile
---- import GFTex
---- import TeachYourself -- also a subshell
import TeachYourself -- also a subshell
import ShellState
import Option
@@ -180,7 +180,6 @@ execC co@(comm, opts0) sa@((st,(h,_)),a) = case comm of
justOutput (putStrLn (err id prt (
string2srcTerm src m t >>= Co.computeConcrete src))) sa
{- ----
CTranslationQuiz il ol -> justOutput (teachTranslation opts (sgr il) (sgr ol)) sa
CTranslationList il ol n -> do
qs <- transTrainList opts (sgr il) (sgr ol) (toInteger n)
@@ -190,14 +189,14 @@ execC co@(comm, opts0) sa@((st,(h,_)),a) = case comm of
CMorphoList n -> do
qs <- useIOE [] $ morphoTrainList opts gro (toInteger n)
returnArg (AString $ foldr (+++++) [] [unlines (s:ss) | (s,ss) <- qs]) sa
-}
CReadFile file -> returnArgIO (readFileIf file >>= return . AString) sa
CWriteFile file -> justOutputArg (writeFile file) sa
CAppendFile file -> justOutputArg (appendFile file) sa
CSpeakAloud -> justOutputArg (speechGenerate opts) sa
CSystemCommand s -> justOutput (system s >> return ()) sa
----- CPutString -> changeArg (opSS2CommandArg (optStringCommand opts gro)) sa
----- CShowTerm -> changeArg (opTS2CommandArg (optPrintTerm opts gro) . s2t) sa
CPutString -> changeArg (opSS2CommandArg (optStringCommand opts gro)) sa
----- CShowTerm -> changeArg (opTS2CommandArg (optPrintTerm opts gro) . s2t) sa
CSetFlag -> changeState (addGlobalOptions opts0) sa
---- deprec! CSetLocalFlag lang -> changeState (addLocalOptions lang opts0) sa
@@ -211,7 +210,10 @@ execC co@(comm, opts0) sa@((st,(h,_)),a) = case comm of
CPrintInformation c -> justOutput (useIOE () $ showInformation opts st c) sa
CPrintLanguages -> justOutput
(putStrLn $ unwords $ map prLanguage $ allLanguages st) sa
CPrintMultiGrammar -> returnArg (AString (prCanonGrammar (canModules st))) sa
CPrintMultiGrammar -> do
sa' <- changeState purgeShellState sa
returnArg (AString (prCanonGrammar (canModules st))) sa'
---- CPrintGramlet -> returnArg (AString (Gr.prGramlet st)) sa
---- CPrintCanonXML -> returnArg (AString (Canon.prCanonXML st False)) sa
---- CPrintCanonXMLStruct -> returnArg (AString (Canon.prCanonXML st True)) sa

View File

@@ -35,7 +35,7 @@ pCommandLine s = pFirst (chks s) where
pCommandOpt :: [String] -> (Command, Options, [CommandArg])
pCommandOpt (w:ws) = let
(os, co) = getOptions "-" ws
(comm, args) = pCommand (w:co)
(comm, args) = pCommand (abbrevCommand w:co)
in
(comm, os, args)
pCommandOpt s = (CVoid, noOptions, [AError "no parse"])
@@ -45,6 +45,15 @@ pInputString s = case s of
('"':_:_) -> [AString (init (tail s))]
_ -> [AError "illegal string"]
-- command rl can be written remove_language etc.
abbrevCommand :: String -> String
abbrevCommand = hds . words . map u2sp where
u2sp c = if c=='_' then ' ' else c
hds s = case s of
[w@[_,_]] -> w
_ -> map head s
pCommand :: [String] -> (Command, [CommandArg])
pCommand ws = case ws of
@@ -81,6 +90,7 @@ pCommand ws = case ws of
"ps" : s -> aString CPutString s
"st" : s -> aTerm CShowTerm s
"!" : s -> aUnit (CSystemCommand (unwords s))
"sc" : s -> aUnit (CSystemCommand (unwords s))
"sf" : l : [] -> aUnit (CSetLocalFlag (language l))
"sf" : [] -> aUnit CSetFlag

View File

@@ -0,0 +1,71 @@
module TeachYourself where
import ShellState
import API
import Linear
import PrGrammar
import Option
import Arch (myStdGen)
import Operations
import UseIO
import Random --- (randoms) --- bad import for hbc
import System
-- translation and morphology quiz. AR 10/5/2000 -- 12/4/2002
teachTranslation :: Options -> GFGrammar -> GFGrammar -> IO ()
teachTranslation opts ig og = do
tts <- transTrainList opts ig og infinity
let qas = [ (q, mkAnswer as) | (q,as) <- tts]
teachDialogue qas "Welcome to GF Translation Quiz."
transTrainList ::
Options -> GFGrammar -> GFGrammar -> Integer -> IO [(String,[String])]
transTrainList opts ig og number = do
ts <- randomTreesIO opts ig (fromInteger number)
return $ map mkOne $ ts
where
cat = firstCatOpts opts ig
mkOne t = (norml (linearize ig t),map (norml . linearize og) (homonyms ig cat t))
teachMorpho :: Options -> GFGrammar -> IO ()
teachMorpho opts ig = useIOE () $ do
tts <- morphoTrainList opts ig infinity
let qas = [ (q, mkAnswer as) | (q,as) <- tts]
ioeIO $ teachDialogue qas "Welcome to GF Morphology Quiz."
morphoTrainList :: Options -> GFGrammar -> Integer -> IOE [(String,[String])]
morphoTrainList opts ig number = do
ts <- ioeIO $ randomTreesIO opts ig (fromInteger number)
gen <- ioeIO $ myStdGen (fromInteger number)
mkOnes gen ts
where
mkOnes gen (t:ts) = do
psss <- ioeErr $ allLinTables gr cnc t
let pss = concat $ map snd $ concat psss
let (i,gen') = randomR (0, length pss - 1) gen
(ps,ss) <- ioeErr $ pss !? i
(_,ss0) <- ioeErr $ pss !? 0
let bas = concat $ take 1 ss0
more <- mkOnes gen' ts
return $ (bas +++ ":" +++ unwords (map prt_ ps), return (concat ss)) : more
mkOnes gen [] = return []
gr = grammar ig
cnc = cncId ig
-- compare answer to the list of right answers, increase score and give feedback
mkAnswer :: [String] -> String -> (Integer, String)
mkAnswer as s = if (elem (norml s) as)
then (1,"Yes.")
else (0,"No, not" +++ s ++ ", but" ++++ unlines as)
norml = unwords . words
--- the maximal number of precompiled quiz problems
infinity :: Integer
infinity = 123

View File

@@ -148,7 +148,7 @@ allLinsAsRec gr c t = linearizeNoMark gr c t >>= expandLinTables gr >>= allLinVa
-- the value is a list of structures arranged as records of tables of strings
-- only taking into account string fields
allLinTables :: CanonGrammar ->Ident ->A.Tree -> Err [[(Label,[([Patt],[String])])]]
allLinTables :: CanonGrammar ->Ident ->A.Tree ->Err [[(Label,[([Patt],[String])])]]
allLinTables gr c t = do
r' <- allLinsAsRec gr c t
mapM (mapM getS) r'

374
src/HelpFile Normal file
View File

@@ -0,0 +1,374 @@
-- GF help file updated for GF 2.0, 24/3/2004.
-- *: Commands and options marked with * are not yet implemented.
--
-- Each command has a long and a short name, options, and zero or more
-- arguments. Commands are sorted by functionality. The short name is
-- given first.
-- commands that change the state
i, import: i File
Reads a grammar from File and compiles it into a GF runtime grammar.
Files "include"d in File are read recursively, nubbing repetitions.
If a grammar with the same language name is already in the state,
it is overwritten - but only if compilation succeeds.
The grammar parser depends on the file name suffix:
.gf normal GF source
.gfc canonical GF
.gfr precompiled GF resource
.gfcm multilingual canonical GF
*.ebnf Extended BNF format
*.cf Context-free (BNF) format
options:
-old old: parse in GF<2.0 format
-v verbose: give lots of messages
-s silent: don't give error messages
-opt perform branch-sharing optimization
*-src source: ignore precompiled gfc and gfr files
-nocf don't build context-free grammar (thus no parser)
-nocheckcirc don't eliminate circular rules from CF
-cflexer build an optimized parser with separate lexer trie
flags:
-abs set the name used for abstract syntax (with -old option)
-cnc set the name used for concrete syntax (with -old option)
-res set the name used for resource (with -old option)
* rl, remove_language: rl Language
Takes away the language from the state.
e, empty: e
Takes away all languages and resets all global flags.
sf, set_flags: sf Language? Flag*
The values of the Flags are set for Language. If no language
is specified, the flags are set globally.
s, strip: s
Prune the state by removing source and resource modules.
-- commands that give information about the state
pg, print_grammar: pg
Prints the actual grammar (overridden by the -lang=X flag).
The -printer=X flag sets the format in which the grammar is
written.
N.B. since grammars are compiled when imported, this command
generally does not show the grammar in the same format as the
source. In particular, the -printer=latex is not supported.
Use the command tg -printer=latex File to print the source
grammar in LaTeX.
options:
-utf8 apply UTF8-encoding to the grammar
flags:
-printer
-lang
pm, print_multigrammar: pm
Prints the current multilingual grammar into a .gfcm file.
(Automatically executes the strip command (s) before doing this.)
po, print_options: po
Print what modules there are in the state. Also
prints those flag values in the current state that differ from defaults.
pl, print_languages: pl
Prints the names of currently available languages.
pi, print_info: pi Ident
Prints information on the identifier.
-- commands that execute and show the session history
eh, execute_history: eh File
Executes commands in the file.
ph, print_history; ph
Prints the commands issued during the GF session.
The result is readable by the eh command.
HINT: write "ph | wf foo.hist" to save the history.
-- linearization, parsing, translation, and computation
l, linearize: l PattList? Tree
Shows all linearization forms of Tree by the actual grammar
(which is overridden by the -lang flag).
The pattern list has the form [P, ... ,Q] where P,...,Q follow GF
syntax for patterns. All those forms are generated that match with the
pattern list. Too short lists are filled with variables in the end.
Only the -table flag is available if a pattern list is specified.
HINT: see GF language specification for the syntax of Pattern and Term.
You can also copy and past parsing results.
options:
-table show parameters
-struct bracketed form
-record record, i.e. explicit GF concrete syntax term
flags:
-lang linearize in this grammar
-number give this number of forms at most
-unlexer filter output through unlexer
p, parse: p String
Shows all Trees returned for String by the actual
grammar (overridden by the -lang flag), in the category S (overridden
by the -cat flag).
options:
-n non-strict: tolerates morphological errors
-ign ignore unknown words when parsing
-raw return context-free terms in raw form
-v verbose: give more information if parsing fails
flags:
-cat parse in this category
-lang parse in this grammar
-lexer filter input through this lexer
-parser use this context-free parsing method
-number return this many results at most
tt, test_tokenizer: tt String
Show the token list sent to the parser when String is parsed.
HINT: can be useful when debugging the parser.
flags:
-lexer use this lexer
cc, compute_concrete: cc Ident Term
Compute a term by concrete syntax definitions.
The identifier Ident is a resource module name
needed to resolve constant.
N.B. You need the flag -src when importing the grammar, if you want
the oper definitions to be retained after compilation; otherwise this
command does not expand oper constants.
N.B.' The resulting Term is not a term in the sense of abstract syntax,
and hence not a valid input to a Tree-demanding command.
t, translate: t Lang Lang String
Parses String in Lang1 and linearizes the resulting Trees in Lang2.
flags:
-cat
-lexer
-parser
gr, generate_random: gr
Generates a random Tree.
flags:
-cat generate in this category
-lang use the abstract syntax of this grammar
-number generate this number of trees
-depth use this number of search steps at most
ma, morphologically_analyse: ma String
Runs morphological analysis on each word in String and displays
the results line by line.
options:
-short show analyses in bracketed words, instead of separate lines
flags:
-lang
-- elementary generation of Strings and Trees
ps, put_string: ps String
Returns its argument String, like Unix echo.
HINT. The strength of ps comes from the possibility to receive the
argument from a pipeline, and altering it by the -filter flag.
flags:
-filter filter the result through this string processor
-length cut the string after this number of characters
pt, put_tree: pt Tree
Returns its argument Tree, like a specialized Unix echo.
HINT. The strength of pt comes from the possibility to receive
the argument from a pipeline, and altering it by the -transform flag.
flags:
-transform transform the result by this term processor
-number generate this number of terms at most
* st, show_tree: st Tree
Prints the tree as a string. Unlike pt, this command cannot be
used in a pipe to produce a tree, since its output is a string.
flags:
-printer show the tree in a special format (-printer=xml supported)
-- subshells
es, editing_session: es
Opens an interactive editing session.
N.B. Exit from a Fudget session is to the Unix shell, not to GF.
options:
-f Fudget GUI (necessary for Unicode; only available in X Window System)
ts, translation_session: ts
Translates input lines from any of the actual languages to any other one.
To exit, type a full stop (.) alone on a line.
N.B. Exit from a Fudget session is to the Unix shell, not to GF.
HINT: Set -parser and -lexer locally in each grammar.
options:
-f Fudget GUI (necessary for Unicode; only available in X Window System)
flags:
-cat
tq, translation_quiz: tq Lang Lang
Random-generates translation exercises from Lang1 to Lang2,
keeping score of success.
To interrupt, type a full stop (.) alone on a line.
HINT: Set -parser and -lexer locally in each grammar.
flags:
-cat
tl, translation_list: tl Lang Lang Int
Random-generates a list of Int translation exercises from Lang1 to Lang2.
HINT: use wf to save the exercises in a file.
flags:
-cat
mq, morphology_quiz: mq
Random-generates morphological exercises,
keeping score of success.
To interrupt, type a full stop (.) alone on a line.
HINT: use printname judgements in your grammar to
produce nice expressions for desired forms.
flags:
-cat
-lang
ml, morphology_list: ml Int
Random-generates a list of Int morphological exercises,
keeping score of success.
HINT: use wf to save the exercises in a file.
flags:
-cat
-lang
-- IO related commands
rf, read_file: rf File
Returns the contents of File as a String; error is File does not exist.
wf, write_file: wf File String
Writes String into File; File is created if it does not exist.
N.B. the command overwrites File without a warning.
af, append_file: af File
Writes String into the end of File; File is created if it does not exist.
* tg, transform_grammar: tg File
Reads File, parses as a grammar,
but instead of compiling further, prints it.
The environment is not changed. When parsing the grammar, the same file
name suffixes are supported as in the i command.
HINT: use this command to print the grammar in
another format (the -printer flag); pipe it to wf to save this format.
flags:
-printer (only -printer=latex supported currently)
* cl, convert_latex: cl File
Reads File, which is expected to be in LaTeX form.
Three environments are treated in special ways:
\begGF - \end{verbatim}, which contains GF judgements,
\begTGF - \end{verbatim}, which contains a GF expression (displayed)
\begInTGF - \end{verbatim}, which contains a GF expressions (inlined).
Moreover, certain macros should be included in the file; you can
get those macros by applying 'tg -printer=latex foo.gf' to any grammar
foo.gf. Notice that the same File can be imported as a GF grammar,
consisting of all the judgements in \begGF environments.
HINT: pipe with 'wf Foo.tex' to generate a new Latex file.
sa, speak_aloud: sa String
Uses the Festival speech generator to produce speech for String.
The command cupports Festival's language flag, which is sent verbatim
to Festival, e.g. -language=spanish. Omitting this flag gives the
system-dependent default voice (often British English).
flags:
-language
h, help: h
Displays this help message.
q, quit: q
Exits GF.
HINT: you can use 'ph | wf history' to save your session.
!, system_command: ! String
Issues a system command. No value is returned to GF.
-- Flags. The availability of flags is defined separately for each command.
-cat: category in which parsing is performed.
The default is S.
-depth: the search depth in e.g. random generation.
The default depends on application.
-filter: operation performed on a string. The default is identity.
-filter=identity no change
-filter=erase erase the text
-filter=take100 show the first 100 characters
-filter=length show the length of the string
-filter=text format as text (punctuation, capitalization)
-filter=code format as code (spacing, indentation)
-filter=latexfile embed in a LaTeX file
-lang: grammar used when executing a grammar-dependent command.
The default is the last-imported grammar.
-language: voice used by Festival as its --language flag in the sa command.
The default is system-dependent.
-length: the maximum number of characters shown of a string.
The default is unlimited.
-lexer: tokenization transforming a string into lexical units for a parser.
The default is words.
-lexer=words tokens are separated by spaces or newlines
-lexer=literals like words, but GF integer and string literals recognized
-lexer=vars like words, but "x","x_...","$...$" as vars, "?..." as meta
-lexer=chars each character is a token
-lexer=code use Haskell's lex
-lexer=text with conventions on punctuation and capital letters
-lexer=codelit like code, but treat unknown words as string literals
-lexer=textlit like text, but treat unknown words as string literals
-lexer=codeC use a C-like lexer
-number: the maximum number of generated items in a list.
The default is unlimited.
-parser: Context-free parsing algorithm. The default is chart.
-parser=earley Earley algorithm
-parser=chart bottom-up chart parser
-printer: format in which the grammar is printed. The default is gf.
-printer=gf GF grammar
-printer=cf context-free grammar
*-printer=happy source file for Happy parser generator
*-printer=srg speech recognition grammar
*-printer=haskell abstract syntax in Haskell, with transl to/from GF
-printer=morpho full-form lexicon, long format
*-printer=latex LaTeX file (for the tg command)
-printer=fullform full-form lexicon, short format
*-printer=xml XML: DTD for the pg command, object for st
-printer=old old GF: file readable by GF 1.2
-startcat: like -cat, but used in grammars (to avoid clash with keyword cat)
-transform: transformation performed on a syntax tree. The default is identity.
-transform=identity no change
-transform=compute compute by using definitions in the grammar
-transform=typecheck return the term only if it is type-correct
-transform=solve solve metavariables as derived refinements
-transform=context solve metavariables by unique refinements as variables
-transform=delete replace the term by metavariable
-unlexer: untokenization transforming linearization output into a string.
The default is unwords.
-unlexer=unwords space-separated token list (like unwords)
-unlexer=text format as text: punctuation, capitals, paragraph <p>
-unlexer=code format as code (spacing, indentation)
-unlexer=textlit like text, but remove string literal quotes
-unlexer=codelit like code, but remove string literal quotes
-unlexer=concat remove all spaces
-unlexer=bind like identity, but bind at "&+"
-- *: Commands and options marked with * are not yet implemented.

View File

@@ -1,6 +1,13 @@
module HelpFile where
txtHelpFile =
"\n-- GF help file updated for GF 2.0, 24/3/2004." ++
"\n-- *: Commands and options marked with * are not yet implemented." ++
"\n--" ++
"\n-- Each command has a long and a short name, options, and zero or more" ++
"\n-- arguments. Commands are sorted by functionality. The short name is" ++
"\n-- given first." ++
"\n" ++
"\n-- commands that change the state" ++
"\n" ++
"\ni, import: i File" ++
@@ -9,37 +16,42 @@ txtHelpFile =
"\n If a grammar with the same language name is already in the state," ++
"\n it is overwritten - but only if compilation succeeds. " ++
"\n The grammar parser depends on the file name suffix:" ++
"\n .gf normal GF source " ++
"\n .gfl LaTeX file with grammar in \\begGF..\\end{verbatim} environments" ++
"\n .tex LaTeX file with grammar in \\begGF..\\end{verbatim} environments" ++
"\n .gfc already optimized - skip compilation and type checking" ++
"\n .gfhc already compiled (a Haskell data object)" ++
"\n .ebnf EBNF format" ++
"\n .cf Context-free format" ++
"\n .gf normal GF source" ++
"\n .gfc canonical GF" ++
"\n .gfr precompiled GF resource " ++
"\n .gfcm multilingual canonical GF" ++
"\n *.ebnf Extended BNF format" ++
"\n *.cf Context-free (BNF) format" ++
"\n options:" ++
"\n -old old: parse in GF<2.0 format" ++
"\n -v verbose: give lots of messages " ++
"\n -s silent: don't give error messages" ++
"\n -opt perform branch-sharing optimization" ++
"\n -retain retain oper and lintype definitions" ++
"\n *-src source: ignore precompiled gfc and gfr files " ++
"\n -nocf don't build context-free grammar (thus no parser)" ++
"\n -nocheckcirc don't eliminate circular rules from CF " ++
"\n -nocirc do eliminate circ rules (default; currently just explicit ones)" ++
"\n -cflexer build an optimized parser with separate lexer trie" ++
"\n flags:" ++
"\n -lang set the name used for the grammar in the session" ++
"\n" ++
"\nrl, remove language: rl Language" ++
"\n -abs set the name used for abstract syntax (with -old option)" ++
"\n -cnc set the name used for concrete syntax (with -old option)" ++
"\n -res set the name used for resource (with -old option)" ++
"\n " ++
"\nrl, remove_language: rl Language" ++
"\n Takes away the language from the state." ++
"\n" ++
"\ne, empty state: e" ++
"\ne, empty: e" ++
"\n Takes away all languages and resets all global flags." ++
"\n" ++
"\nsf, set flags: sf Language? Flag*" ++
"\nsf, set_flags: sf Language? Flag*" ++
"\n The values of the Flags are set for Language. If no language" ++
"\n is specified, the flags are set globally." ++
"\n" ++
"\ns, strip: s" ++
"\n Prune the state by removing source and resource modules." ++
"\n" ++
"\n-- commands that give information about the state" ++
"\n" ++
"\npg, print grammar: pg" ++
"\npg, print_grammar: pg" ++
"\n Prints the actual grammar (overridden by the -lang=X flag)." ++
"\n The -printer=X flag sets the format in which the grammar is" ++
"\n written." ++
@@ -50,31 +62,29 @@ txtHelpFile =
"\n grammar in LaTeX." ++
"\n options:" ++
"\n -utf8 apply UTF8-encoding to the grammar" ++
"\n" ++
"\n flags: " ++
"\n -printer" ++
"\n -lang" ++
"\n" ++
"\npm, print_multigrammar: pm" ++
"\n Prints the current multilingual grammar into a .gfcm file." ++
"\n " ++
"\npo, print_options: po" ++
"\n Print what modules there are in the state. Also" ++
"\n prints those flag values in the current state that differ from defaults." ++
"\n" ++
"\npm, print multigrammar: pm" ++
"\n Prints the current multilingual grammar into a Haskell file" ++
"\n in a canonical format (usable by the canonical GF editor)." ++
"\n options" ++
"\n -opt perform branch-sharing optimization (should not have been done at import)" ++
"\n" ++
"\npo, print options: po" ++
"\n Prints those flag values in the current state that differ from defaults." ++
"\n" ++
"\npl, print languages: pl" ++
"\npl, print_languages: pl" ++
"\n Prints the names of currently available languages." ++
"\n" ++
"\npi, print_info: pi Ident" ++
"\n Prints information on the identifier." ++
"\n" ++
"\n-- commands that execute and show the session history" ++
"\n" ++
"\neh, execute history: eh File" ++
"\neh, execute_history: eh File" ++
"\n Executes commands in the file." ++
"\n" ++
"\nph, print history; ph" ++
"\nph, print_history; ph" ++
"\n Prints the commands issued during the GF session." ++
"\n The result is readable by the eh command." ++
"\n HINT: write \"ph | wf foo.hist\" to save the history." ++
@@ -116,21 +126,21 @@ txtHelpFile =
"\n -parser use this context-free parsing method" ++
"\n -number return this many results at most" ++
"\n" ++
"\ntt, test tokenizer: tt String" ++
"\ntt, test_tokenizer: tt String" ++
"\n Show the token list sent to the parser when String is parsed." ++
"\n HINT: can be useful when debugging the parser." ++
"\n flags: " ++
"\n -lexer use this lexer" ++
"\n" ++
"\ncc, compute concrete: cc Term" ++
"\n Compute a term by concrete syntax definitions. " ++
"\n N.B. You need the flag -retain when importing the grammar, if you want " ++
"\ncc, compute_concrete: cc Ident Term" ++
"\n Compute a term by concrete syntax definitions." ++
"\n The identifier Ident is a resource module name " ++
"\n needed to resolve constant. " ++
"\n N.B. You need the flag -src when importing the grammar, if you want " ++
"\n the oper definitions to be retained after compilation; otherwise this" ++
"\n command does not expand oper constants." ++
"\n N.B.' The resulting Term is not a term in the sense of abstract syntax," ++
"\n and hence not a valid input to a Tree-demanding command." ++
"\n flags:" ++
"\n -lang" ++
"\n" ++
"\nt, translate: t Lang Lang String" ++
"\n Parses String in Lang1 and linearizes the resulting Trees in Lang2." ++
@@ -139,7 +149,7 @@ txtHelpFile =
"\n -lexer" ++
"\n -parser" ++
"\n" ++
"\ngr, generate random: gr" ++
"\ngr, generate_random: gr" ++
"\n Generates a random Tree." ++
"\n flags:" ++
"\n -cat generate in this category" ++
@@ -147,7 +157,7 @@ txtHelpFile =
"\n -number generate this number of trees" ++
"\n -depth use this number of search steps at most" ++
"\n" ++
"\nma, morphologically analyse: ma String" ++
"\nma, morphologically_analyse: ma String" ++
"\n Runs morphological analysis on each word in String and displays" ++
"\n the results line by line." ++
"\n options:" ++
@@ -158,43 +168,38 @@ txtHelpFile =
"\n" ++
"\n-- elementary generation of Strings and Trees" ++
"\n" ++
"\nps, put string: ps String" ++
"\nps, put_string: ps String" ++
"\n Returns its argument String, like Unix echo." ++
"\n HINT. The strength of ps comes from the possibility to receive the argument" ++
"\n from a pipeline, and altering it by the -filter flag." ++
"\n HINT. The strength of ps comes from the possibility to receive the " ++
"\n argument from a pipeline, and altering it by the -filter flag." ++
"\n flags:" ++
"\n -filter filter the result through this string processor " ++
"\n -length cut the string after this number of characters" ++
"\n" ++
"\npt, put tree: pt Tree" ++
"\npt, put_tree: pt Tree" ++
"\n Returns its argument Tree, like a specialized Unix echo." ++
"\n HINT. The strength of pt comes from the possibility to receive the argument" ++
"\n from a pipeline, and altering it by the -transform flag." ++
"\n HINT. The strength of pt comes from the possibility to receive " ++
"\n the argument from a pipeline, and altering it by the -transform flag." ++
"\n flags:" ++
"\n -transform transform the result by this term processor" ++
"\n -number generate this number of terms at most" ++
"\n" ++
"\nst, show tree: st Tree" ++
"\nst, show_tree: st Tree" ++
"\n Prints the tree as a string. Unlike pt, this command cannot be" ++
"\n used in a pipe to produce a tree, since its output is a string." ++
"\n flags:" ++
"\n -printer show the tree in a special format (-printer=xml supported)" ++
"\n" ++
"\nwt, wrap tree: wt Fun Tree" ++
"\n Returns its argument Tree wrapped in the function Fun." ++
"\n flags:" ++
"\n -c compute the resulting tree" ++
"\n" ++
"\n" ++
"\n-- subshells" ++
"\n" ++
"\nes, editing session: es" ++
"\nes, editing_session: es" ++
"\n Opens an interactive editing session." ++
"\n N.B. Exit from a Fudget session is to the Unix shell, not to GF. " ++
"\n options:" ++
"\n -f Fudget GUI (necessary for Unicode; only available in X Window System)" ++
"\n" ++
"\nts, translation session: ts" ++
"\nts, translation_session: ts" ++
"\n Translates input lines from any of the actual languages to any other one." ++
"\n To exit, type a full stop (.) alone on a line." ++
"\n N.B. Exit from a Fudget session is to the Unix shell, not to GF. " ++
@@ -204,7 +209,7 @@ txtHelpFile =
"\n flags:" ++
"\n -cat" ++
"\n" ++
"\ntq, translation quiz: tq Lang Lang" ++
"\n* tq, translation_quiz: tq Lang Lang" ++
"\n Random-generates translation exercises from Lang1 to Lang2," ++
"\n keeping score of success." ++
"\n To interrupt, type a full stop (.) alone on a line." ++
@@ -212,13 +217,13 @@ txtHelpFile =
"\n flags:" ++
"\n -cat" ++
"\n" ++
"\ntl, translation list: tl Lang Lang Int" ++
"\n* tl, translation_list: tl Lang Lang Int" ++
"\n Random-generates a list of Int translation exercises from Lang1 to Lang2." ++
"\n HINT: use wf to save the exercises in a file." ++
"\n flags:" ++
"\n -cat" ++
"\n" ++
"\nmq, morphology quiz: mq" ++
"\n* mq, morphology_quiz: mq" ++
"\n Random-generates morphological exercises," ++
"\n keeping score of success." ++
"\n To interrupt, type a full stop (.) alone on a line." ++
@@ -228,7 +233,7 @@ txtHelpFile =
"\n -cat" ++
"\n -lang" ++
"\n" ++
"\nml, morphology list: tl Int" ++
"\n* ml, morphology_list: tl Int" ++
"\n Random-generates a list of Int morphological exercises," ++
"\n keeping score of success." ++
"\n HINT: use wf to save the exercises in a file." ++
@@ -239,38 +244,39 @@ txtHelpFile =
"\n" ++
"\n-- IO related commands" ++
"\n" ++
"\nrf, read file: rf File" ++
"\nrf, read_file: rf File" ++
"\n Returns the contents of File as a String; error is File does not exist." ++
"\n" ++
"\nwf, write file: wf File String" ++
"\nwf, write_file: wf File String" ++
"\n Writes String into File; File is created if it does not exist." ++
"\n N.B. the command overwrites File without a warning." ++
"\n" ++
"\naf, append file: af File" ++
"\naf, append_file: af File" ++
"\n Writes String into the end of File; File is created if it does not exist." ++
"\n" ++
"\ntg, transform grammar: tg File" ++
"\n Reads File, parses as a grammar, but instead of compiling further, prints it. " ++
"\n* tg, transform_grammar: tg File" ++
"\n Reads File, parses as a grammar, " ++
"\n but instead of compiling further, prints it. " ++
"\n The environment is not changed. When parsing the grammar, the same file" ++
"\n name suffixes are supported as in the i command." ++
"\n HINT: use this command to print the grammar in another format (the -printer" ++
"\n flag); pipe it to wf to save this format." ++
"\n HINT: use this command to print the grammar in " ++
"\n another format (the -printer flag); pipe it to wf to save this format." ++
"\n flags:" ++
"\n -printer (only -printer=latex supported currently)" ++
"\n" ++
"\ncl, convert latex: cl File" ++
"\n* cl, convert_latex: cl File" ++
"\n Reads File, which is expected to be in LaTeX form." ++
"\n Two environments are treated in special ways:" ++
"\n \\begGF - \\end{verbatim}, which contains GF judgements," ++
"\n \\begTGF - \\end{verbatim}, which contains a GF expression (displayed), and" ++
"\n \\begInTGF - \\end{verbatim}, which contains a GF expressions (inlined)." ++
"\n Three environments are treated in special ways:" ++
"\n \\begGF - \\end{verbatim}, which contains GF judgements," ++
"\n \\begTGF - \\end{verbatim}, which contains a GF expression (displayed)" ++
"\n \\begInTGF - \\end{verbatim}, which contains a GF expressions (inlined)." ++
"\n Moreover, certain macros should be included in the file; you can" ++
"\n get those macros by applying 'tg -printer=latex foo.gf' to any grammar" ++
"\n foo.gf. Notice that the same File can be imported as a GF grammar," ++
"\n consisting of all the judgements in \\begGF environments." ++
"\n HINT: pipe with 'wf Foo.tex' to generate a new Latex file." ++
"\n" ++
"\nsa, speak aloud: sa String" ++
"\nsa, speak_aloud: sa String" ++
"\n Uses the Festival speech generator to produce speech for String." ++
"\n The command cupports Festival's language flag, which is sent verbatim" ++
"\n to Festival, e.g. -language=spanish. Omitting this flag gives the " ++
@@ -285,7 +291,7 @@ txtHelpFile =
"\n Exits GF." ++
"\n HINT: you can use 'ph | wf history' to save your session." ++
"\n" ++
"\n!, system command: ! String" ++
"\n!, system_command: ! String" ++
"\n Issues a system command. No value is returned to GF." ++
"\n" ++
"\n" ++
@@ -338,21 +344,16 @@ txtHelpFile =
"\n-printer: format in which the grammar is printed. The default is gf." ++
"\n -printer=gf GF grammar" ++
"\n -printer=cf context-free grammar" ++
"\n -printer=resource resource grammar (cat+lincat, fun+lin --> oper)" ++
"\n -printer=resourcetypes resource grammar type signatures" ++
"\n -printer=resourcedefs resource grammar operation definitions" ++
"\n -printer=happy source file for Happy parser generator" ++
"\n -printer=srg speech recognition grammar" ++
"\n -printer=canon grammar compiled into a canonical form, Haskell module" ++
"\n -printer=canonOpt canonical form, with branch-sharing optimization" ++
"\n -printer=gfhs compiled grammar as Haskell data object" ++
"\n -printer=haskell abstract syntax in Haskell, with translations to/from GF" ++
"\n *-printer=happy source file for Happy parser generator" ++
"\n *-printer=srg speech recognition grammar" ++
"\n *-printer=haskell abstract syntax in Haskell, with transl to/from GF" ++
"\n -printer=morpho full-form lexicon, long format" ++
"\n -printer=latex LaTeX file (for the tg command)" ++
"\n *-printer=latex LaTeX file (for the tg command)" ++
"\n -printer=fullform full-form lexicon, short format" ++
"\n -printer=xml XML: DTD for the pg command, object for st" ++
"\n *-printer=xml XML: DTD for the pg command, object for st" ++
"\n -printer=old old GF: file readable by GF 1.2" ++
"\n" ++
"\n-startcat: like -cat, but used in grammars (to avoid clash with the keyword cat)" ++
"\n-startcat: like -cat, but used in grammars (to avoid clash with keyword cat)" ++
"\n" ++
"\n-transform: transformation performed on a syntax tree. The default is identity." ++
"\n -transform=identity no change" ++
@@ -361,16 +362,16 @@ txtHelpFile =
"\n -transform=solve solve metavariables as derived refinements" ++
"\n -transform=context solve metavariables by unique refinements as variables" ++
"\n -transform=delete replace the term by metavariable" ++
"\n -transform=predcalc generating sentences from predicate calculus formulas" ++
"\n" ++
"\n-unlexer: untokenization transforming linearization output into a string." ++
"\n The default is unwords." ++
"\n -unlexer=unwords space-separated token list (like unwords)" ++
"\n -unlexer=text format as text: punctuation, capitalization, paragraph <p>" ++
"\n -unlexer=text format as text: punctuation, capitals, paragraph <p>" ++
"\n -unlexer=code format as code (spacing, indentation)" ++
"\n -unlexer=textlit like text, but remove string literal quotes" ++
"\n -unlexer=codelit like code, but remove string literal quotes" ++
"\n -unlexer=concat remove all spaces" ++
"\n -unlexer=bind like identity, but bind at \"&+\"" ++
"\n" ++
"\n-- *: Commands and options marked with * are not yet implemented." ++
[]

View File

@@ -7,8 +7,10 @@ GHCINCLUDEGFT=-iapi -icompile -igrammar -iinfra -ishell -isource -icanonical -iu
WINDOWSINCLUDE=-ifor-windows -iapi -icompile -igrammar -iinfra -ishell -isource -icanonical -iuseGrammar -icf -iparsing -iparsers
all:
make today ; make ghc
make today ; make fud
ghc:
make nofud
fud:
$(GHMAKE) $(GHCFLAGS) $(GHCINCLUDE) $(GHCFUDFLAG) --make GF.hs -o gf2+ ; strip gf2+ ; mv gf2+ ../bin/
gft:
$(GHMAKE) $(GHCFLAGS) $(GHCINCLUDENOFUD) -itranslate --make translate/GFT.hs -o gft ; strip gft ; mv gft ../bin/
@@ -32,3 +34,5 @@ today:
runhugs util/MkToday
javac:
cd java ; javac *.java ; cd ..
help:
cd util ; runhugs MkHelpFile ; mv HelpFile.hs .. ; cd ..

View File

@@ -1 +1 @@
module Today where today = "Tue Mar 23 10:59:42 CET 2004"
module Today where today = "Wed Mar 24 16:54:35 CET 2004"