From ec8ce0243e8905f955b0ed732873f4eb866541e6 Mon Sep 17 00:00:00 2001
From: Jake Wheat <jakewheat@tutanota.com>
Date: Tue, 9 Jan 2024 00:07:47 +0000
Subject: [PATCH] switch from literate to regular haskell source

---
 Language/SQL/SimpleSQL/Combinators.hs         |  117 +
 Language/SQL/SimpleSQL/Combinators.lhs        |  107 -
 Language/SQL/SimpleSQL/Dialect.hs             |  553 ++++
 Language/SQL/SimpleSQL/Dialect.lhs            |  551 ----
 Language/SQL/SimpleSQL/Errors.hs              |   53 +
 Language/SQL/SimpleSQL/Errors.lhs             |   51 -
 Language/SQL/SimpleSQL/Lex.hs                 |  751 +++++
 Language/SQL/SimpleSQL/Lex.lhs                |  717 -----
 Language/SQL/SimpleSQL/Parse.hs               | 2288 ++++++++++++++++
 Language/SQL/SimpleSQL/Parse.lhs              | 2188 ---------------
 Language/SQL/SimpleSQL/Pretty.hs              |  848 ++++++
 Language/SQL/SimpleSQL/Pretty.lhs             |  844 ------
 Language/SQL/SimpleSQL/Syntax.hs              |  744 +++++
 Language/SQL/SimpleSQL/Syntax.lhs             |  738 -----
 Makefile                                      |   12 +-
 TODO                                          |    2 +-
 simple-sql-parser.cabal                       |    6 +-
 tools/Filter.hs                               |   35 +
 tools/Filter.lhs                              |   35 -
 tools/FilterSpaces.hs                         |   24 +
 tools/FilterSpaces.lhs                        |   24 -
 tools/Fixity.hs                               |  720 +++++
 tools/Fixity.lhs                              |  702 -----
 tools/Language/SQL/SimpleSQL/CreateIndex.hs   |   17 +
 tools/Language/SQL/SimpleSQL/CreateIndex.lhs  |   17 -
 tools/Language/SQL/SimpleSQL/CustomDialect.hs |   27 +
 .../Language/SQL/SimpleSQL/CustomDialect.lhs  |   27 -
 .../Language/SQL/SimpleSQL/EmptyStatement.hs  |   20 +
 .../Language/SQL/SimpleSQL/EmptyStatement.lhs |   20 -
 .../{ErrorMessages.lhs => ErrorMessages.hs}   |  138 +-
 tools/Language/SQL/SimpleSQL/FullQueries.hs   |   39 +
 tools/Language/SQL/SimpleSQL/FullQueries.lhs  |   39 -
 tools/Language/SQL/SimpleSQL/GroupBy.hs       |  237 ++
 tools/Language/SQL/SimpleSQL/GroupBy.lhs      |  235 --
 tools/Language/SQL/SimpleSQL/LexerTests.hs    |  343 +++
 tools/Language/SQL/SimpleSQL/LexerTests.lhs   |  335 ---
 tools/Language/SQL/SimpleSQL/MySQL.hs         |   42 +
 tools/Language/SQL/SimpleSQL/MySQL.lhs        |   40 -
 tools/Language/SQL/SimpleSQL/Odbc.hs          |   52 +
 tools/Language/SQL/SimpleSQL/Odbc.lhs         |   52 -
 tools/Language/SQL/SimpleSQL/Oracle.hs        |   29 +
 tools/Language/SQL/SimpleSQL/Oracle.lhs       |   29 -
 tools/Language/SQL/SimpleSQL/Postgres.hs      |  278 ++
 tools/Language/SQL/SimpleSQL/Postgres.lhs     |  274 --
 .../SQL/SimpleSQL/QueryExprComponents.hs      |  211 ++
 .../SQL/SimpleSQL/QueryExprComponents.lhs     |  209 --
 tools/Language/SQL/SimpleSQL/QueryExprs.hs    |   26 +
 tools/Language/SQL/SimpleSQL/QueryExprs.lhs   |   24 -
 .../SQL/SimpleSQL/SQL2011AccessControl.hs     |  329 +++
 .../SQL/SimpleSQL/SQL2011AccessControl.lhs    |  315 ---
 .../{SQL2011Bits.lhs => SQL2011Bits.hs}       |   74 +-
 ...ulation.lhs => SQL2011DataManipulation.hs} |  150 +-
 .../{SQL2011Queries.lhs => SQL2011Queries.hs} | 2426 +++++++++--------
 .../{SQL2011Schema.lhs => SQL2011Schema.hs}   | 1024 +++----
 tools/Language/SQL/SimpleSQL/ScalarExprs.hs   |  432 +++
 tools/Language/SQL/SimpleSQL/ScalarExprs.lhs  |  428 ---
 tools/Language/SQL/SimpleSQL/TableRefs.hs     |  107 +
 tools/Language/SQL/SimpleSQL/TableRefs.lhs    |  105 -
 tools/Language/SQL/SimpleSQL/TestTypes.hs     |   43 +
 tools/Language/SQL/SimpleSQL/TestTypes.lhs    |   37 -
 tools/Language/SQL/SimpleSQL/Tests.hs         |  175 ++
 tools/Language/SQL/SimpleSQL/Tests.lhs        |  171 --
 tools/Language/SQL/SimpleSQL/Tpch.hs          |  685 +++++
 tools/Language/SQL/SimpleSQL/Tpch.lhs         |  683 -----
 tools/RunTests.hs                             |    8 +
 tools/RunTests.lhs                            |    8 -
 tools/ShowErrors.hs                           |    7 +
 tools/ShowErrors.lhs                          |    7 -
 tools/SimpleSqlParserTool.hs                  |   95 +
 tools/SimpleSqlParserTool.lhs                 |   93 -
 website/AddLinks.hs                           |   34 +
 website/AddLinks.lhs                          |   34 -
 website/RenderTestCases.hs                    |   77 +
 website/RenderTestCases.lhs                   |   77 -
 74 files changed, 11498 insertions(+), 10996 deletions(-)
 create mode 100644 Language/SQL/SimpleSQL/Combinators.hs
 delete mode 100644 Language/SQL/SimpleSQL/Combinators.lhs
 create mode 100644 Language/SQL/SimpleSQL/Dialect.hs
 delete mode 100644 Language/SQL/SimpleSQL/Dialect.lhs
 create mode 100644 Language/SQL/SimpleSQL/Errors.hs
 delete mode 100644 Language/SQL/SimpleSQL/Errors.lhs
 create mode 100644 Language/SQL/SimpleSQL/Lex.hs
 delete mode 100644 Language/SQL/SimpleSQL/Lex.lhs
 create mode 100644 Language/SQL/SimpleSQL/Parse.hs
 delete mode 100644 Language/SQL/SimpleSQL/Parse.lhs
 create mode 100644 Language/SQL/SimpleSQL/Pretty.hs
 delete mode 100644 Language/SQL/SimpleSQL/Pretty.lhs
 create mode 100644 Language/SQL/SimpleSQL/Syntax.hs
 delete mode 100644 Language/SQL/SimpleSQL/Syntax.lhs
 create mode 100644 tools/Filter.hs
 delete mode 100644 tools/Filter.lhs
 create mode 100644 tools/FilterSpaces.hs
 delete mode 100644 tools/FilterSpaces.lhs
 create mode 100644 tools/Fixity.hs
 delete mode 100644 tools/Fixity.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/CreateIndex.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/CreateIndex.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/CustomDialect.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/CustomDialect.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/EmptyStatement.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/EmptyStatement.lhs
 rename tools/Language/SQL/SimpleSQL/{ErrorMessages.lhs => ErrorMessages.hs} (55%)
 create mode 100644 tools/Language/SQL/SimpleSQL/FullQueries.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/FullQueries.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/GroupBy.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/GroupBy.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/LexerTests.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/LexerTests.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/MySQL.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/MySQL.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/Odbc.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/Odbc.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/Oracle.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/Oracle.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/Postgres.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/Postgres.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/QueryExprComponents.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/QueryExprComponents.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/QueryExprs.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/QueryExprs.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/SQL2011AccessControl.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/SQL2011AccessControl.lhs
 rename tools/Language/SQL/SimpleSQL/{SQL2011Bits.lhs => SQL2011Bits.hs} (79%)
 rename tools/Language/SQL/SimpleSQL/{SQL2011DataManipulation.lhs => SQL2011DataManipulation.hs} (74%)
 rename tools/Language/SQL/SimpleSQL/{SQL2011Queries.lhs => SQL2011Queries.hs} (64%)
 rename tools/Language/SQL/SimpleSQL/{SQL2011Schema.lhs => SQL2011Schema.hs} (59%)
 create mode 100644 tools/Language/SQL/SimpleSQL/ScalarExprs.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/ScalarExprs.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/TableRefs.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/TableRefs.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/TestTypes.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/TestTypes.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/Tests.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/Tests.lhs
 create mode 100644 tools/Language/SQL/SimpleSQL/Tpch.hs
 delete mode 100644 tools/Language/SQL/SimpleSQL/Tpch.lhs
 create mode 100644 tools/RunTests.hs
 delete mode 100644 tools/RunTests.lhs
 create mode 100644 tools/ShowErrors.hs
 delete mode 100644 tools/ShowErrors.lhs
 create mode 100644 tools/SimpleSqlParserTool.hs
 delete mode 100644 tools/SimpleSqlParserTool.lhs
 create mode 100644 website/AddLinks.hs
 delete mode 100644 website/AddLinks.lhs
 create mode 100644 website/RenderTestCases.hs
 delete mode 100644 website/RenderTestCases.lhs

diff --git a/Language/SQL/SimpleSQL/Combinators.hs b/Language/SQL/SimpleSQL/Combinators.hs
new file mode 100644
index 0000000..6c9e178
--- /dev/null
+++ b/Language/SQL/SimpleSQL/Combinators.hs
@@ -0,0 +1,117 @@
+
+-- | This module contains some generic combinators used in the
+-- parser. None of the parsing which relies on the local lexers is
+-- in this module. Some of these combinators have been taken from
+-- other parser combinator libraries other than Parsec.
+
+module Language.SQL.SimpleSQL.Combinators
+    (optionSuffix
+    ,(<??>)
+    ,(<??.>)
+    ,(<??*>)
+    ,(<$$>)
+    ,(<$$$>)
+    ,(<$$$$>)
+    ,(<$$$$$>)
+    ) where
+
+import Control.Applicative ((<**>))
+import Text.Parsec (option,many)
+import Text.Parsec.String (GenParser)
+
+{-
+a possible issue with the option suffix is that it enforces left
+associativity when chaining it recursively. Have to review
+all these uses and figure out if any should be right associative
+instead, and create an alternative suffix parser
+
+This function style is not good, and should be replaced with chain and
+<??> which has a different type
+-}
+
+optionSuffix :: (a -> GenParser t s a) -> a -> GenParser t s a
+optionSuffix p a = option a (p a)
+
+
+{-
+parses an optional postfix element and applies its result to its left
+hand result, taken from uu-parsinglib
+
+TODO: make sure the precedence higher than <|> and lower than the
+other operators so it can be used nicely
+-}
+
+(<??>) :: GenParser t s a -> GenParser t s (a -> a) -> GenParser t s a
+p <??> q = p <**> option id q
+
+
+{-
+Help with left factored parsers. <$$> is like an analogy with <**>:
+
+f <$> a <*> b
+
+is like
+
+a <**> (b <$$> f)
+
+f <$> a <*> b <*> c
+
+is like
+
+a <**> (b <**> (c <$$$> f))
+-}
+
+(<$$>) :: Applicative f =>
+      f b -> (a -> b -> c) -> f (a -> c)
+(<$$>) pa c = pa <**> pure (flip c)
+
+(<$$$>) :: Applicative f =>
+          f c -> (a -> b -> c -> t) -> f (b -> a -> t)
+p <$$$> c = p <**> pure (flip3 c)
+
+(<$$$$>) :: Applicative f =>
+          f d -> (a -> b -> c -> d -> t) -> f (c -> b -> a -> t)
+p <$$$$> c = p <**> pure (flip4 c)
+
+(<$$$$$>) :: Applicative f =>
+          f e -> (a -> b -> c -> d -> e -> t) -> f (d -> c -> b -> a -> t)
+p <$$$$$> c = p <**> pure (flip5 c)
+
+{-
+Surely no-one would write code like this seriously?
+
+
+composing suffix parsers, not sure about the name. This is used to add
+a second or more suffix parser contingent on the first suffix parser
+succeeding.
+-}
+
+(<??.>) :: GenParser t s (a -> a) -> GenParser t s (a -> a) -> GenParser t s (a -> a)
+(<??.>) pa pb = (.) `c` pa <*> option id pb
+  -- todo: fix this mess
+  where c = (<$>) . flip
+
+
+-- 0 to many repeated applications of suffix parser
+
+(<??*>) :: GenParser t s a -> GenParser t s (a -> a) -> GenParser t s a
+p <??*> q = foldr ($) <$> p <*> (reverse <$> many q)
+
+
+{-
+These are to help with left factored parsers:
+
+a <**> (b <**> (c <**> pure (flip3 ctor)))
+
+Not sure the names are correct, but they follow a pattern with flip
+a <**> (b <**> pure (flip ctor))
+-}
+
+flip3 :: (a -> b -> c -> t) -> c -> b -> a -> t
+flip3 f a b c = f c b a
+
+flip4 :: (a -> b -> c -> d -> t) -> d -> c -> b -> a -> t
+flip4 f a b c d = f d c b a
+
+flip5 :: (a -> b -> c -> d -> e -> t) -> e -> d -> c -> b -> a -> t
+flip5 f a b c d e = f e d c b a
diff --git a/Language/SQL/SimpleSQL/Combinators.lhs b/Language/SQL/SimpleSQL/Combinators.lhs
deleted file mode 100644
index 446453f..0000000
--- a/Language/SQL/SimpleSQL/Combinators.lhs
+++ /dev/null
@@ -1,107 +0,0 @@
-
-> -- | This module contains some generic combinators used in the
-> -- parser. None of the parsing which relies on the local lexers is
-> -- in this module. Some of these combinators have been taken from
-> -- other parser combinator libraries other than Parsec.
-
-> module Language.SQL.SimpleSQL.Combinators
->     (optionSuffix
->     ,(<??>)
->     ,(<??.>)
->     ,(<??*>)
->     ,(<$$>)
->     ,(<$$$>)
->     ,(<$$$$>)
->     ,(<$$$$$>)
->     ) where
-
-> import Control.Applicative ((<**>))
-> import Text.Parsec (option,many)
-> import Text.Parsec.String (GenParser)
-
-a possible issue with the option suffix is that it enforces left
-associativity when chaining it recursively. Have to review
-all these uses and figure out if any should be right associative
-instead, and create an alternative suffix parser
-
-This function style is not good, and should be replaced with chain and
-<??> which has a different type
-
-> optionSuffix :: (a -> GenParser t s a) -> a -> GenParser t s a
-> optionSuffix p a = option a (p a)
-
-
-parses an optional postfix element and applies its result to its left
-hand result, taken from uu-parsinglib
-
-TODO: make sure the precedence higher than <|> and lower than the
-other operators so it can be used nicely
-
-> (<??>) :: GenParser t s a -> GenParser t s (a -> a) -> GenParser t s a
-> p <??> q = p <**> option id q
-
-
-Help with left factored parsers. <$$> is like an analogy with <**>:
-
-f <$> a <*> b
-
-is like
-
-a <**> (b <$$> f)
-
-f <$> a <*> b <*> c
-
-is like
-
-a <**> (b <**> (c <$$$> f))
-
-> (<$$>) :: Applicative f =>
->       f b -> (a -> b -> c) -> f (a -> c)
-> (<$$>) pa c = pa <**> pure (flip c)
-
-> (<$$$>) :: Applicative f =>
->           f c -> (a -> b -> c -> t) -> f (b -> a -> t)
-> p <$$$> c = p <**> pure (flip3 c)
-
-> (<$$$$>) :: Applicative f =>
->           f d -> (a -> b -> c -> d -> t) -> f (c -> b -> a -> t)
-> p <$$$$> c = p <**> pure (flip4 c)
-
-> (<$$$$$>) :: Applicative f =>
->           f e -> (a -> b -> c -> d -> e -> t) -> f (d -> c -> b -> a -> t)
-> p <$$$$$> c = p <**> pure (flip5 c)
-
-Surely no-one would write code like this seriously?
-
-
-composing suffix parsers, not sure about the name. This is used to add
-a second or more suffix parser contingent on the first suffix parser
-succeeding.
-
-> (<??.>) :: GenParser t s (a -> a) -> GenParser t s (a -> a) -> GenParser t s (a -> a)
-> (<??.>) pa pb = (.) `c` pa <*> option id pb
->   -- todo: fix this mess
->   where c = (<$>) . flip
-
-
-0 to many repeated applications of suffix parser
-
-> (<??*>) :: GenParser t s a -> GenParser t s (a -> a) -> GenParser t s a
-> p <??*> q = foldr ($) <$> p <*> (reverse <$> many q)
-
-
-These are to help with left factored parsers:
-
-a <**> (b <**> (c <**> pure (flip3 ctor)))
-
-Not sure the names are correct, but they follow a pattern with flip
-a <**> (b <**> pure (flip ctor))
-
-> flip3 :: (a -> b -> c -> t) -> c -> b -> a -> t
-> flip3 f a b c = f c b a
-
-> flip4 :: (a -> b -> c -> d -> t) -> d -> c -> b -> a -> t
-> flip4 f a b c d = f d c b a
-
-> flip5 :: (a -> b -> c -> d -> e -> t) -> e -> d -> c -> b -> a -> t
-> flip5 f a b c d e = f e d c b a
diff --git a/Language/SQL/SimpleSQL/Dialect.hs b/Language/SQL/SimpleSQL/Dialect.hs
new file mode 100644
index 0000000..4c54e11
--- /dev/null
+++ b/Language/SQL/SimpleSQL/Dialect.hs
@@ -0,0 +1,553 @@
+
+
+-- Data types to represent different dialect options
+
+{-# LANGUAGE DeriveDataTypeable #-}
+module Language.SQL.SimpleSQL.Dialect
+    (Dialect(..)
+    ,ansi2011
+    ,mysql
+    ,postgres
+    ,oracle
+    ,sqlserver
+    ) where
+
+import Data.Data
+
+-- | Used to set the dialect used for parsing and pretty printing,
+-- very unfinished at the moment.
+--
+-- The keyword handling works as follows:
+--
+-- There is a list of reserved keywords. These will never parse as
+-- anything other than as a keyword, unless they are in one of the
+-- other lists.
+--
+-- There is a list of \'identifier\' keywords. These are reserved
+-- keywords, with an exception that they will parse as an
+-- identifier in a scalar expression. They won't parse as
+-- identifiers in other places, e.g. column names or aliases.
+--
+-- There is a list of \'app\' keywords. These are reserved keywords,
+-- with an exception that they will also parse in an \'app-like\'
+-- construct - a regular function call, or any of the aggregate and
+-- window variations.
+--
+-- There is a list of special type names. This list serves two
+-- purposes - it is a list of the reserved keywords which are also
+-- type names, and it is a list of all the multi word type names.
+--
+-- Every keyword should appear in the keywords lists, and then you can
+-- add them to the other lists if you want exceptions. Most things
+-- that refer to functions, types or variables that are keywords in
+-- the ansi standard, can be removed from the keywords lists
+-- completely with little effect. With most of the actual SQL
+-- keywords, removing them from the keyword list will result in
+-- lots of valid syntax no longer parsing (and probably bad parse
+-- error messages too).
+--
+-- In the code, all special syntax which looks identical to regular
+-- identifiers or function calls (apart from the name), is treated
+-- like a regular identifier or function call.
+--
+-- It's easy to break the parser by removing the wrong words from
+-- the keywords list or adding the wrong words to the other lists.
+
+data Dialect = Dialect
+    { -- | reserved keywords
+     diKeywords :: [String]
+      -- | keywords with identifier exception
+    ,diIdentifierKeywords :: [String]
+      -- | keywords with app exception
+    ,diAppKeywords :: [String]
+     -- | keywords with type exception plus all the type names which
+     -- are multiple words
+    ,diSpecialTypeNames :: [String]
+     -- | allow ansi fetch first syntax
+    ,diFetchFirst :: Bool
+     -- | allow limit keyword (mysql, postgres,
+     -- ...)
+    ,diLimit :: Bool
+     -- | allow parsing ODBC syntax
+    ,diOdbc :: Bool
+     -- | allow quoting identifiers with \`backquotes\`
+    ,diBackquotedIden :: Bool
+     -- | allow quoting identifiers with [square brackets]
+    ,diSquareBracketQuotedIden :: Bool
+     -- | allow identifiers with a leading at @example
+    ,diAtIdentifier :: Bool
+     -- | allow identifiers with a leading \# \#example
+    ,diHashIdentifier :: Bool
+     -- | allow positional identifiers like this: $1 
+    ,diPositionalArg :: Bool
+     -- | allow postgres style dollar strings
+    ,diDollarString :: Bool
+     -- | allow strings with an e - e"example"
+    ,diEString :: Bool
+     -- | allow postgres style symbols
+    ,diPostgresSymbols :: Bool
+     -- | allow sql server style symbols
+    ,diSqlServerSymbols :: Bool
+     -- | allow sql server style for CONVERT function in format CONVERT(data_type(length), expression, style)
+    ,diConvertFunction :: Bool
+     -- | allow creating autoincrement columns
+    ,diAutoincrement :: Bool
+     -- | allow omitting the comma between constraint clauses
+    ,diNonCommaSeparatedConstraints :: Bool
+    }
+               deriving (Eq,Show,Read,Data,Typeable)
+
+-- | ansi sql 2011 dialect
+ansi2011 :: Dialect
+ansi2011 = Dialect {diKeywords = ansi2011ReservedKeywords
+                   ,diIdentifierKeywords = []
+                   ,diAppKeywords = ["set"]
+                   ,diSpecialTypeNames = ansi2011TypeNames
+                   ,diFetchFirst = True
+                   ,diLimit = False
+                   ,diOdbc = False
+                   ,diBackquotedIden = False
+                   ,diSquareBracketQuotedIden = False
+                   ,diAtIdentifier = False
+                   ,diHashIdentifier = False
+                   ,diPositionalArg = False
+                   ,diDollarString = False
+                   ,diEString = False
+                   ,diPostgresSymbols = False
+                   ,diSqlServerSymbols = False
+                   ,diConvertFunction = False                     
+                   ,diAutoincrement = False
+                   ,diNonCommaSeparatedConstraints = False
+                   }
+
+-- | mysql dialect
+mysql :: Dialect
+mysql = addLimit ansi2011 {diFetchFirst = False
+                          ,diBackquotedIden = True
+                          }
+
+-- | postgresql dialect
+postgres :: Dialect
+postgres = addLimit ansi2011 {diPositionalArg = True
+                             ,diDollarString = True
+                             ,diEString = True
+                             ,diPostgresSymbols = True}
+
+-- | oracle dialect
+oracle :: Dialect
+oracle = ansi2011 -- {}
+
+-- | microsoft sql server dialect
+sqlserver :: Dialect
+sqlserver = ansi2011 {diSquareBracketQuotedIden = True
+                     ,diAtIdentifier = True
+                     ,diHashIdentifier = True
+                     ,diOdbc = True
+                     ,diSqlServerSymbols = True
+                     ,diConvertFunction = True}
+
+addLimit :: Dialect -> Dialect
+addLimit d = d {diKeywords = "limit": diKeywords d
+               ,diLimit = True}
+
+
+{-
+The keyword handling is quite strong - an alternative way to do it
+would be to have as few keywords as possible, and only require them
+to be quoted when this is needed to resolve a parsing ambiguity.
+
+I don't think this is a good idea for genuine keywords (it probably is
+for all the 'fake' keywords in the standard - things which are
+essentially function names, or predefined variable names, or type
+names, eetc.).
+
+1. working out exactly when each keyword would need to be quoted is
+quite error prone, and might change as the parser implementation is
+maintained - which would be terrible for users
+
+2. it's not user friendly for the user to deal with a whole load of
+special cases - either something is a keyword, then you know you must
+always quote it, or it isn't, then you know you never need to quote
+it
+
+3. I think not having exceptions makes for better error messages for
+the user, and a better sql code maintenance experience.
+
+This might not match actual existing SQL products that well, some of
+which I think have idiosyncratic rules about when a keyword must be
+quoted. If you want to match one of these dialects exactly with this
+parser, I think it will be a lot of work.
+-}
+
+ansi2011ReservedKeywords :: [String]
+ansi2011ReservedKeywords =
+    [--"abs" -- function
+     "all" -- keyword only?
+    ,"allocate" -- keyword
+    ,"alter" -- keyword
+    ,"and" -- keyword
+    --,"any" -- keyword? and function
+    ,"are" -- keyword
+    ,"array" -- keyword, and used in some special places, like array[...], and array(subquery)
+    --,"array_agg" -- function
+    -- ,"array_max_cardinality" -- function
+    ,"as" -- keyword
+    ,"asensitive" -- keyword
+    ,"asymmetric" -- keyword
+    ,"at" -- keyword
+    ,"atomic" -- keyword
+    ,"authorization" -- keyword
+    --,"avg" -- function
+    ,"begin" -- keyword
+    --,"begin_frame" -- identifier
+    --,"begin_partition" -- identifier
+    ,"between" -- keyword
+    ,"bigint" -- type
+    ,"binary" -- type
+    ,"blob" -- type
+    ,"boolean" -- type
+    ,"both" -- keyword
+    ,"by" -- keyword
+    ,"call" -- keyword
+    ,"called" -- keyword
+    -- ,"cardinality" -- function + identifier?
+    ,"cascaded" -- keyword
+    ,"case" -- keyword
+    ,"cast" -- special function
+    -- ,"ceil" -- function
+    -- ,"ceiling" -- function
+    ,"char"  -- type (+ keyword?)
+    --,"char_length" -- function
+    ,"character" -- type
+    --,"character_length" -- function
+    ,"check" -- keyword
+    ,"clob" -- type
+    ,"close" -- keyword
+    -- ,"coalesce" -- function
+    ,"collate" -- keyword
+    --,"collect" -- function
+    ,"column" -- keyword
+    ,"commit" -- keyword
+    ,"condition" -- keyword
+    ,"connect" -- keyword
+    ,"constraint" --keyword
+    --,"contains" -- keyword?
+    --,"convert" -- function?
+    --,"corr" -- function
+    ,"corresponding" --keyword
+    --,"count" --function
+    --,"covar_pop" -- function
+    --,"covar_samp" --function
+    ,"create" -- keyword
+    ,"cross" -- keyword
+    ,"cube" -- keyword
+    --,"cume_dist" -- function
+    ,"current" -- keyword
+    -- ,"current_catalog" --identifier?
+    --,"current_date" -- identifier
+    --,"current_default_transform_group"  -- identifier
+    --,"current_path"  -- identifier
+    --,"current_role"  -- identifier
+    -- ,"current_row"  -- identifier
+    -- ,"current_schema"  -- identifier
+    -- ,"current_time"  -- identifier
+    --,"current_timestamp"  -- identifier
+    --,"current_transform_group_for_type"  -- identifier, or keyword?
+    --,"current_user" -- identifier
+    ,"cursor" -- keyword
+    ,"cycle" --keyword
+    ,"date" -- type
+    --,"day" -- keyword? - the parser needs it to not be a keyword to parse extract at the moment
+    ,"deallocate" -- keyword
+    ,"dec" -- type
+    ,"decimal" -- type
+    ,"declare" -- keyword
+    --,"default" -- identifier + keyword
+    ,"delete" -- keyword
+    --,"dense_rank" -- functino
+    ,"deref" -- keyword
+    ,"describe"  -- keyword
+    ,"deterministic"
+    ,"disconnect"
+    ,"distinct"
+    ,"double"
+    ,"drop"
+    ,"dynamic"
+    ,"each"
+    --,"element"
+    ,"else"
+    ,"end"
+    -- ,"end_frame"  -- identifier
+    -- ,"end_partition"  -- identifier
+    ,"end-exec" -- no idea what this is
+    ,"equals"
+    ,"escape"
+    --,"every"
+    ,"except"
+    ,"exec"
+    ,"execute"
+    ,"exists"
+    ,"exp"
+    ,"external"
+    ,"extract"
+    --,"false"
+    ,"fetch"
+    ,"filter"
+    -- ,"first_value"
+    ,"float"
+    --,"floor"
+    ,"for"
+    ,"foreign"
+    -- ,"frame_row"  -- identifier
+    ,"free"
+    ,"from"
+    ,"full"
+    ,"function"
+    --,"fusion"
+    ,"get"
+    ,"global"
+    ,"grant"
+    ,"group"
+    --,"grouping"
+    ,"groups"
+    ,"having"
+    ,"hold"
+    --,"hour"
+    ,"identity"
+    ,"in"
+    ,"indicator"
+    ,"inner"
+    ,"inout"
+    ,"insensitive"
+    ,"insert"
+    ,"int"
+    ,"integer"
+    ,"intersect"
+    --,"intersection"
+    ,"interval"
+    ,"into"
+    ,"is"
+    ,"join"
+    --,"lag"
+    ,"language"
+    ,"large"
+    --,"last_value"
+    ,"lateral"
+    --,"lead"
+    ,"leading"
+    ,"left"
+    ,"like"
+    ,"like_regex"
+    --,"ln"
+    ,"local"
+    ,"localtime"
+    ,"localtimestamp"
+    --,"lower"
+    ,"match"
+    --,"max"
+    ,"member"
+    ,"merge"
+    ,"method"
+    --,"min"
+    --,"minute"
+    --,"mod"
+    ,"modifies"
+    --,"module"
+    --,"month"
+    ,"multiset"
+    ,"national"
+    ,"natural"
+    ,"nchar"
+    ,"nclob"
+    ,"new"
+    ,"no"
+    ,"none"
+    ,"normalize"
+    ,"not"
+    --,"nth_value"
+    ,"ntile"
+    --,"null"
+    --,"nullif"
+    ,"numeric"
+    ,"octet_length"
+    ,"occurrences_regex"
+    ,"of"
+    ,"offset"
+    ,"old"
+    ,"on"
+    ,"only"
+    ,"open"
+    ,"or"
+    ,"order"
+    ,"out"
+    ,"outer"
+    ,"over"
+    ,"overlaps"
+    ,"overlay"
+    ,"parameter"
+    ,"partition"
+    ,"percent"
+    --,"percent_rank"
+    --,"percentile_cont"
+    --,"percentile_disc"
+    ,"period"
+    ,"portion"
+    ,"position"
+    ,"position_regex"
+    --,"power"
+    ,"precedes"
+    ,"precision"
+    ,"prepare"
+    ,"primary"
+    ,"procedure"
+    ,"range"
+    --,"rank"
+    ,"reads"
+    ,"real"
+    ,"recursive"
+    ,"ref"
+    ,"references"
+    ,"referencing"
+    --,"regr_avgx"
+    --,"regr_avgy"
+    --,"regr_count"
+    --,"regr_intercept"
+    --,"regr_r2"
+    --,"regr_slope"
+    --,"regr_sxx"
+    --,"regr_sxy"
+    --,"regr_syy"
+    ,"release"
+    ,"result"
+    ,"return"
+    ,"returns"
+    ,"revoke"
+    ,"right"
+    ,"rollback"
+    ,"rollup"
+    --,"row"
+    --,"row_number"
+    ,"rows"
+    ,"savepoint"
+    ,"scope"
+    ,"scroll"
+    ,"search"
+    --,"second"
+    ,"select"
+    ,"sensitive"
+    --,"session_user"
+    ,"set"
+    ,"similar"
+    ,"smallint"
+    --,"some"
+    ,"specific"
+    ,"specifictype"
+    ,"sql"
+    ,"sqlexception"
+    ,"sqlstate"
+    ,"sqlwarning"
+    --,"sqrt"
+    --,"start"
+    ,"static"
+    --,"stddev_pop"
+    --,"stddev_samp"
+    ,"submultiset"
+    --,"substring"
+    ,"substring_regex"
+    ,"succeeds"
+    --,"sum"
+    ,"symmetric"
+    ,"system"
+    --,"system_time"
+    --,"system_user"
+    ,"table"
+    ,"tablesample"
+    ,"then"
+    ,"time"
+    ,"timestamp"
+    ,"timezone_hour"
+    ,"timezone_minute"
+    ,"to"
+    ,"trailing"
+    ,"translate"
+    ,"translate_regex"
+    ,"translation"
+    ,"treat"
+    ,"trigger"
+    ,"truncate"
+    --,"trim"
+    --,"trim_array"
+    --,"true"
+    ,"uescape"
+    ,"union"
+    ,"unique"
+    --,"unknown"
+    ,"unnest"
+    ,"update"
+    ,"upper"
+    --,"user"
+    ,"using"
+    --,"value"
+    ,"values"
+    ,"value_of"
+    --,"var_pop"
+    --,"var_samp"
+    ,"varbinary"
+    ,"varchar"
+    ,"varying"
+    ,"versioning"
+    ,"when"
+    ,"whenever"
+    ,"where"
+    --,"width_bucket"
+    ,"window"
+    ,"with"
+    ,"within"
+    ,"without"
+    --,"year"
+    ]
+
+
+ansi2011TypeNames :: [String]
+ansi2011TypeNames =
+    ["double precision"
+    ,"character varying"
+    ,"char varying"
+    ,"character large object"
+    ,"char large object"
+    ,"national character"
+    ,"national char"
+    ,"national character varying"
+    ,"national char varying"
+    ,"national character large object"
+    ,"nchar large object"
+    ,"nchar varying"
+    ,"bit varying"
+    ,"binary large object"
+    ,"binary varying"
+        -- reserved keyword typenames:
+    ,"array"
+    ,"bigint"
+    ,"binary"
+    ,"blob"
+    ,"boolean"
+    ,"char"
+    ,"character"
+    ,"clob"
+    ,"date"
+    ,"dec"
+    ,"decimal"
+    ,"double"
+    ,"float"
+    ,"int"
+    ,"integer"
+    ,"nchar"
+    ,"nclob"
+    ,"numeric"
+    ,"real"
+    ,"smallint"
+    ,"time"
+    ,"timestamp"
+    ,"varchar"
+    ,"varbinary"
+    ]
diff --git a/Language/SQL/SimpleSQL/Dialect.lhs b/Language/SQL/SimpleSQL/Dialect.lhs
deleted file mode 100644
index 30c2d08..0000000
--- a/Language/SQL/SimpleSQL/Dialect.lhs
+++ /dev/null
@@ -1,551 +0,0 @@
-
-
-Data types to represent different dialect options
-
-> {-# LANGUAGE DeriveDataTypeable #-}
-> module Language.SQL.SimpleSQL.Dialect
->     (Dialect(..)
->     ,ansi2011
->     ,mysql
->     ,postgres
->     ,oracle
->     ,sqlserver
->     ) where
-
-> import Data.Data
-
-> -- | Used to set the dialect used for parsing and pretty printing,
-> -- very unfinished at the moment.
-> --
-> -- The keyword handling works as follows:
-> --
-> -- There is a list of reserved keywords. These will never parse as
-> -- anything other than as a keyword, unless they are in one of the
-> -- other lists.
-> --
-> -- There is a list of \'identifier\' keywords. These are reserved
-> -- keywords, with an exception that they will parse as an
-> -- identifier in a scalar expression. They won't parse as
-> -- identifiers in other places, e.g. column names or aliases.
-> --
-> -- There is a list of \'app\' keywords. These are reserved keywords,
-> -- with an exception that they will also parse in an \'app-like\'
-> -- construct - a regular function call, or any of the aggregate and
-> -- window variations.
-> --
-> -- There is a list of special type names. This list serves two
-> -- purposes - it is a list of the reserved keywords which are also
-> -- type names, and it is a list of all the multi word type names.
-> --
-> -- Every keyword should appear in the keywords lists, and then you can
-> -- add them to the other lists if you want exceptions. Most things
-> -- that refer to functions, types or variables that are keywords in
-> -- the ansi standard, can be removed from the keywords lists
-> -- completely with little effect. With most of the actual SQL
-> -- keywords, removing them from the keyword list will result in
-> -- lots of valid syntax no longer parsing (and probably bad parse
-> -- error messages too).
-> --
-> -- In the code, all special syntax which looks identical to regular
-> -- identifiers or function calls (apart from the name), is treated
-> -- like a regular identifier or function call.
-> --
-> -- It's easy to break the parser by removing the wrong words from
-> -- the keywords list or adding the wrong words to the other lists.
->
-> data Dialect = Dialect
->     { -- | reserved keywords
->      diKeywords :: [String]
->       -- | keywords with identifier exception
->     ,diIdentifierKeywords :: [String]
->       -- | keywords with app exception
->     ,diAppKeywords :: [String]
->      -- | keywords with type exception plus all the type names which
->      -- are multiple words
->     ,diSpecialTypeNames :: [String]
->      -- | allow ansi fetch first syntax
->     ,diFetchFirst :: Bool
->      -- | allow limit keyword (mysql, postgres,
->      -- ...)
->     ,diLimit :: Bool
->      -- | allow parsing ODBC syntax
->     ,diOdbc :: Bool
->      -- | allow quoting identifiers with \`backquotes\`
->     ,diBackquotedIden :: Bool
->      -- | allow quoting identifiers with [square brackets]
->     ,diSquareBracketQuotedIden :: Bool
->      -- | allow identifiers with a leading at @example
->     ,diAtIdentifier :: Bool
->      -- | allow identifiers with a leading \# \#example
->     ,diHashIdentifier :: Bool
->      -- | allow positional identifiers like this: $1 
->     ,diPositionalArg :: Bool
->      -- | allow postgres style dollar strings
->     ,diDollarString :: Bool
->      -- | allow strings with an e - e"example"
->     ,diEString :: Bool
->      -- | allow postgres style symbols
->     ,diPostgresSymbols :: Bool
->      -- | allow sql server style symbols
->     ,diSqlServerSymbols :: Bool
->      -- | allow sql server style for CONVERT function in format CONVERT(data_type(length), expression, style)
->     ,diConvertFunction :: Bool
->      -- | allow creating autoincrement columns
->     ,diAutoincrement :: Bool
->      -- | allow omitting the comma between constraint clauses
->     ,diNonCommaSeparatedConstraints :: Bool
->     }
->                deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | ansi sql 2011 dialect
-> ansi2011 :: Dialect
-> ansi2011 = Dialect {diKeywords = ansi2011ReservedKeywords
->                    ,diIdentifierKeywords = []
->                    ,diAppKeywords = ["set"]
->                    ,diSpecialTypeNames = ansi2011TypeNames
->                    ,diFetchFirst = True
->                    ,diLimit = False
->                    ,diOdbc = False
->                    ,diBackquotedIden = False
->                    ,diSquareBracketQuotedIden = False
->                    ,diAtIdentifier = False
->                    ,diHashIdentifier = False
->                    ,diPositionalArg = False
->                    ,diDollarString = False
->                    ,diEString = False
->                    ,diPostgresSymbols = False
->                    ,diSqlServerSymbols = False
->                    ,diConvertFunction = False                     
->                    ,diAutoincrement = False
->                    ,diNonCommaSeparatedConstraints = False
->                    }
-
-> -- | mysql dialect
-> mysql :: Dialect
-> mysql = addLimit ansi2011 {diFetchFirst = False
->                           ,diBackquotedIden = True
->                           }
-
-> -- | postgresql dialect
-> postgres :: Dialect
-> postgres = addLimit ansi2011 {diPositionalArg = True
->                              ,diDollarString = True
->                              ,diEString = True
->                              ,diPostgresSymbols = True}
-
-> -- | oracle dialect
-> oracle :: Dialect
-> oracle = ansi2011 -- {}
-
-> -- | microsoft sql server dialect
-> sqlserver :: Dialect
-> sqlserver = ansi2011 {diSquareBracketQuotedIden = True
->                      ,diAtIdentifier = True
->                      ,diHashIdentifier = True
->                      ,diOdbc = True
->                      ,diSqlServerSymbols = True
->                      ,diConvertFunction = True}
-
-> addLimit :: Dialect -> Dialect
-> addLimit d = d {diKeywords = "limit": diKeywords d
->                ,diLimit = True}
-
-
-The keyword handling is quite strong - an alternative way to do it
-would be to have as few keywords as possible, and only require them
-to be quoted when this is needed to resolve a parsing ambiguity.
-
-I don't think this is a good idea for genuine keywords (it probably is
-for all the 'fake' keywords in the standard - things which are
-essentially function names, or predefined variable names, or type
-names, eetc.).
-
-1. working out exactly when each keyword would need to be quoted is
-quite error prone, and might change as the parser implementation is
-maintained - which would be terrible for users
-
-2. it's not user friendly for the user to deal with a whole load of
-special cases - either something is a keyword, then you know you must
-always quote it, or it isn't, then you know you never need to quote
-it
-
-3. I think not having exceptions makes for better error messages for
-the user, and a better sql code maintenance experience.
-
-This might not match actual existing SQL products that well, some of
-which I think have idiosyncratic rules about when a keyword must be
-quoted. If you want to match one of these dialects exactly with this
-parser, I think it will be a lot of work.
-
-> ansi2011ReservedKeywords :: [String]
-> ansi2011ReservedKeywords =
->     [--"abs" -- function
->      "all" -- keyword only?
->     ,"allocate" -- keyword
->     ,"alter" -- keyword
->     ,"and" -- keyword
->     --,"any" -- keyword? and function
->     ,"are" -- keyword
->     ,"array" -- keyword, and used in some special places, like array[...], and array(subquery)
->     --,"array_agg" -- function
->     -- ,"array_max_cardinality" -- function
->     ,"as" -- keyword
->     ,"asensitive" -- keyword
->     ,"asymmetric" -- keyword
->     ,"at" -- keyword
->     ,"atomic" -- keyword
->     ,"authorization" -- keyword
->     --,"avg" -- function
->     ,"begin" -- keyword
->     --,"begin_frame" -- identifier
->     --,"begin_partition" -- identifier
->     ,"between" -- keyword
->     ,"bigint" -- type
->     ,"binary" -- type
->     ,"blob" -- type
->     ,"boolean" -- type
->     ,"both" -- keyword
->     ,"by" -- keyword
->     ,"call" -- keyword
->     ,"called" -- keyword
->     -- ,"cardinality" -- function + identifier?
->     ,"cascaded" -- keyword
->     ,"case" -- keyword
->     ,"cast" -- special function
->     -- ,"ceil" -- function
->     -- ,"ceiling" -- function
->     ,"char"  -- type (+ keyword?)
->     --,"char_length" -- function
->     ,"character" -- type
->     --,"character_length" -- function
->     ,"check" -- keyword
->     ,"clob" -- type
->     ,"close" -- keyword
->     -- ,"coalesce" -- function
->     ,"collate" -- keyword
->     --,"collect" -- function
->     ,"column" -- keyword
->     ,"commit" -- keyword
->     ,"condition" -- keyword
->     ,"connect" -- keyword
->     ,"constraint" --keyword
->     --,"contains" -- keyword?
->     --,"convert" -- function?
->     --,"corr" -- function
->     ,"corresponding" --keyword
->     --,"count" --function
->     --,"covar_pop" -- function
->     --,"covar_samp" --function
->     ,"create" -- keyword
->     ,"cross" -- keyword
->     ,"cube" -- keyword
->     --,"cume_dist" -- function
->     ,"current" -- keyword
->     -- ,"current_catalog" --identifier?
->     --,"current_date" -- identifier
->     --,"current_default_transform_group"  -- identifier
->     --,"current_path"  -- identifier
->     --,"current_role"  -- identifier
->     -- ,"current_row"  -- identifier
->     -- ,"current_schema"  -- identifier
->     -- ,"current_time"  -- identifier
->     --,"current_timestamp"  -- identifier
->     --,"current_transform_group_for_type"  -- identifier, or keyword?
->     --,"current_user" -- identifier
->     ,"cursor" -- keyword
->     ,"cycle" --keyword
->     ,"date" -- type
->     --,"day" -- keyword? - the parser needs it to not be a keyword to parse extract at the moment
->     ,"deallocate" -- keyword
->     ,"dec" -- type
->     ,"decimal" -- type
->     ,"declare" -- keyword
->     --,"default" -- identifier + keyword
->     ,"delete" -- keyword
->     --,"dense_rank" -- functino
->     ,"deref" -- keyword
->     ,"describe"  -- keyword
->     ,"deterministic"
->     ,"disconnect"
->     ,"distinct"
->     ,"double"
->     ,"drop"
->     ,"dynamic"
->     ,"each"
->     --,"element"
->     ,"else"
->     ,"end"
->     -- ,"end_frame"  -- identifier
->     -- ,"end_partition"  -- identifier
->     ,"end-exec" -- no idea what this is
->     ,"equals"
->     ,"escape"
->     --,"every"
->     ,"except"
->     ,"exec"
->     ,"execute"
->     ,"exists"
->     ,"exp"
->     ,"external"
->     ,"extract"
->     --,"false"
->     ,"fetch"
->     ,"filter"
->     -- ,"first_value"
->     ,"float"
->     --,"floor"
->     ,"for"
->     ,"foreign"
->     -- ,"frame_row"  -- identifier
->     ,"free"
->     ,"from"
->     ,"full"
->     ,"function"
->     --,"fusion"
->     ,"get"
->     ,"global"
->     ,"grant"
->     ,"group"
->     --,"grouping"
->     ,"groups"
->     ,"having"
->     ,"hold"
->     --,"hour"
->     ,"identity"
->     ,"in"
->     ,"indicator"
->     ,"inner"
->     ,"inout"
->     ,"insensitive"
->     ,"insert"
->     ,"int"
->     ,"integer"
->     ,"intersect"
->     --,"intersection"
->     ,"interval"
->     ,"into"
->     ,"is"
->     ,"join"
->     --,"lag"
->     ,"language"
->     ,"large"
->     --,"last_value"
->     ,"lateral"
->     --,"lead"
->     ,"leading"
->     ,"left"
->     ,"like"
->     ,"like_regex"
->     --,"ln"
->     ,"local"
->     ,"localtime"
->     ,"localtimestamp"
->     --,"lower"
->     ,"match"
->     --,"max"
->     ,"member"
->     ,"merge"
->     ,"method"
->     --,"min"
->     --,"minute"
->     --,"mod"
->     ,"modifies"
->     --,"module"
->     --,"month"
->     ,"multiset"
->     ,"national"
->     ,"natural"
->     ,"nchar"
->     ,"nclob"
->     ,"new"
->     ,"no"
->     ,"none"
->     ,"normalize"
->     ,"not"
->     --,"nth_value"
->     ,"ntile"
->     --,"null"
->     --,"nullif"
->     ,"numeric"
->     ,"octet_length"
->     ,"occurrences_regex"
->     ,"of"
->     ,"offset"
->     ,"old"
->     ,"on"
->     ,"only"
->     ,"open"
->     ,"or"
->     ,"order"
->     ,"out"
->     ,"outer"
->     ,"over"
->     ,"overlaps"
->     ,"overlay"
->     ,"parameter"
->     ,"partition"
->     ,"percent"
->     --,"percent_rank"
->     --,"percentile_cont"
->     --,"percentile_disc"
->     ,"period"
->     ,"portion"
->     ,"position"
->     ,"position_regex"
->     --,"power"
->     ,"precedes"
->     ,"precision"
->     ,"prepare"
->     ,"primary"
->     ,"procedure"
->     ,"range"
->     --,"rank"
->     ,"reads"
->     ,"real"
->     ,"recursive"
->     ,"ref"
->     ,"references"
->     ,"referencing"
->     --,"regr_avgx"
->     --,"regr_avgy"
->     --,"regr_count"
->     --,"regr_intercept"
->     --,"regr_r2"
->     --,"regr_slope"
->     --,"regr_sxx"
->     --,"regr_sxy"
->     --,"regr_syy"
->     ,"release"
->     ,"result"
->     ,"return"
->     ,"returns"
->     ,"revoke"
->     ,"right"
->     ,"rollback"
->     ,"rollup"
->     --,"row"
->     --,"row_number"
->     ,"rows"
->     ,"savepoint"
->     ,"scope"
->     ,"scroll"
->     ,"search"
->     --,"second"
->     ,"select"
->     ,"sensitive"
->     --,"session_user"
->     ,"set"
->     ,"similar"
->     ,"smallint"
->     --,"some"
->     ,"specific"
->     ,"specifictype"
->     ,"sql"
->     ,"sqlexception"
->     ,"sqlstate"
->     ,"sqlwarning"
->     --,"sqrt"
->     --,"start"
->     ,"static"
->     --,"stddev_pop"
->     --,"stddev_samp"
->     ,"submultiset"
->     --,"substring"
->     ,"substring_regex"
->     ,"succeeds"
->     --,"sum"
->     ,"symmetric"
->     ,"system"
->     --,"system_time"
->     --,"system_user"
->     ,"table"
->     ,"tablesample"
->     ,"then"
->     ,"time"
->     ,"timestamp"
->     ,"timezone_hour"
->     ,"timezone_minute"
->     ,"to"
->     ,"trailing"
->     ,"translate"
->     ,"translate_regex"
->     ,"translation"
->     ,"treat"
->     ,"trigger"
->     ,"truncate"
->     --,"trim"
->     --,"trim_array"
->     --,"true"
->     ,"uescape"
->     ,"union"
->     ,"unique"
->     --,"unknown"
->     ,"unnest"
->     ,"update"
->     ,"upper"
->     --,"user"
->     ,"using"
->     --,"value"
->     ,"values"
->     ,"value_of"
->     --,"var_pop"
->     --,"var_samp"
->     ,"varbinary"
->     ,"varchar"
->     ,"varying"
->     ,"versioning"
->     ,"when"
->     ,"whenever"
->     ,"where"
->     --,"width_bucket"
->     ,"window"
->     ,"with"
->     ,"within"
->     ,"without"
->     --,"year"
->     ]
-
-
-> ansi2011TypeNames :: [String]
-> ansi2011TypeNames =
->     ["double precision"
->     ,"character varying"
->     ,"char varying"
->     ,"character large object"
->     ,"char large object"
->     ,"national character"
->     ,"national char"
->     ,"national character varying"
->     ,"national char varying"
->     ,"national character large object"
->     ,"nchar large object"
->     ,"nchar varying"
->     ,"bit varying"
->     ,"binary large object"
->     ,"binary varying"
->         -- reserved keyword typenames:
->     ,"array"
->     ,"bigint"
->     ,"binary"
->     ,"blob"
->     ,"boolean"
->     ,"char"
->     ,"character"
->     ,"clob"
->     ,"date"
->     ,"dec"
->     ,"decimal"
->     ,"double"
->     ,"float"
->     ,"int"
->     ,"integer"
->     ,"nchar"
->     ,"nclob"
->     ,"numeric"
->     ,"real"
->     ,"smallint"
->     ,"time"
->     ,"timestamp"
->     ,"varchar"
->     ,"varbinary"
->     ]
diff --git a/Language/SQL/SimpleSQL/Errors.hs b/Language/SQL/SimpleSQL/Errors.hs
new file mode 100644
index 0000000..6bd67bd
--- /dev/null
+++ b/Language/SQL/SimpleSQL/Errors.hs
@@ -0,0 +1,53 @@
+
+-- | helpers to work with parsec errors more nicely
+module Language.SQL.SimpleSQL.Errors
+    (ParseError(..)
+    --,formatError
+    ,convParseError
+    ) where
+
+import Text.Parsec (sourceColumn,sourceLine,sourceName,errorPos)
+import qualified Text.Parsec as P (ParseError)
+
+-- | Type to represent parse errors.
+data ParseError = ParseError
+                  {peErrorString :: String
+                   -- ^ contains the error message
+                  ,peFilename :: FilePath
+                   -- ^ filename location for the error
+                  ,pePosition :: (Int,Int)
+                   -- ^ line number and column number location for the error
+                  ,peFormattedError :: String
+                   -- ^ formatted error with the position, error
+                   -- message and source context
+                  } deriving (Eq,Show)
+
+convParseError :: String -> P.ParseError -> ParseError
+convParseError src e =
+    ParseError
+    {peErrorString = show e
+    ,peFilename = sourceName p
+    ,pePosition = (sourceLine p, sourceColumn p)
+    ,peFormattedError = formatError src e}
+  where
+    p = errorPos e
+
+{-
+format the error more nicely: emacs format for positioning, plus
+context
+-}
+
+formatError :: String -> P.ParseError -> String
+formatError src e =
+    sourceName p ++ ":" ++ show (sourceLine p)
+    ++ ":" ++ show (sourceColumn p) ++ ":"
+    ++ context
+    ++ show e
+  where
+    context =
+        let lns = take 1 $ drop (sourceLine p - 1) $ lines src
+        in case lns of
+             [x] -> "\n" ++ x ++ "\n"
+                    ++ replicate (sourceColumn p - 1) ' ' ++ "^\n"
+             _ -> ""
+    p = errorPos e
diff --git a/Language/SQL/SimpleSQL/Errors.lhs b/Language/SQL/SimpleSQL/Errors.lhs
deleted file mode 100644
index a6da3a9..0000000
--- a/Language/SQL/SimpleSQL/Errors.lhs
+++ /dev/null
@@ -1,51 +0,0 @@
-
-> -- | helpers to work with parsec errors more nicely
-> module Language.SQL.SimpleSQL.Errors
->     (ParseError(..)
->     --,formatError
->     ,convParseError
->     ) where
-
-> import Text.Parsec (sourceColumn,sourceLine,sourceName,errorPos)
-> import qualified Text.Parsec as P (ParseError)
-
-> -- | Type to represent parse errors.
-> data ParseError = ParseError
->                   {peErrorString :: String
->                    -- ^ contains the error message
->                   ,peFilename :: FilePath
->                    -- ^ filename location for the error
->                   ,pePosition :: (Int,Int)
->                    -- ^ line number and column number location for the error
->                   ,peFormattedError :: String
->                    -- ^ formatted error with the position, error
->                    -- message and source context
->                   } deriving (Eq,Show)
-
-> convParseError :: String -> P.ParseError -> ParseError
-> convParseError src e =
->     ParseError
->     {peErrorString = show e
->     ,peFilename = sourceName p
->     ,pePosition = (sourceLine p, sourceColumn p)
->     ,peFormattedError = formatError src e}
->   where
->     p = errorPos e
-
-format the error more nicely: emacs format for positioning, plus
-context
-
-> formatError :: String -> P.ParseError -> String
-> formatError src e =
->     sourceName p ++ ":" ++ show (sourceLine p)
->     ++ ":" ++ show (sourceColumn p) ++ ":"
->     ++ context
->     ++ show e
->   where
->     context =
->         let lns = take 1 $ drop (sourceLine p - 1) $ lines src
->         in case lns of
->              [x] -> "\n" ++ x ++ "\n"
->                     ++ replicate (sourceColumn p - 1) ' ' ++ "^\n"
->              _ -> ""
->     p = errorPos e
diff --git a/Language/SQL/SimpleSQL/Lex.hs b/Language/SQL/SimpleSQL/Lex.hs
new file mode 100644
index 0000000..1a400e8
--- /dev/null
+++ b/Language/SQL/SimpleSQL/Lex.hs
@@ -0,0 +1,751 @@
+
+{-
+The parser uses a separate lexer for two reasons:
+
+1. sql syntax is very awkward to parse, the separate lexer makes it
+easier to handle this in most places (in some places it makes it
+harder or impossible, the fix is to switch to something better than
+parsec)
+
+2. using a separate lexer gives a huge speed boost because it reduces
+backtracking. (We could get this by making the parsing code a lot more
+complex also.)
+
+3. we can test the lexer relatively exhaustively, then even when we
+don't do nearly as comprehensive testing on the syntax level, we still
+have a relatively high assurance of the low level of bugs. This is
+much more difficult to get parity with when testing the syntax parser
+directly without the separately testing lexing stage.
+-}
+
+-- | Lexer for SQL.
+{-# LANGUAGE TupleSections #-}
+module Language.SQL.SimpleSQL.Lex
+    (Token(..)
+    ,lexSQL
+    ,prettyToken
+    ,prettyTokens
+    ,ParseError(..)
+    ,tokenListWillPrintAndLex
+    ,ansi2011
+    ) where
+
+import Language.SQL.SimpleSQL.Dialect
+
+import Text.Parsec (option,string,manyTill,anyChar
+                   ,try,string,many1,oneOf,digit,(<|>),choice,char,eof
+                   ,many,runParser,lookAhead,satisfy
+                   ,setPosition,getPosition
+                   ,setSourceColumn,setSourceLine
+                   ,sourceName, setSourceName
+                   ,sourceLine, sourceColumn
+                   ,notFollowedBy)
+import Language.SQL.SimpleSQL.Combinators
+import Language.SQL.SimpleSQL.Errors
+import Control.Applicative hiding ((<|>), many)
+import Data.Char
+import Control.Monad
+import Prelude hiding (takeWhile)
+import Text.Parsec.String (Parser)
+import Data.Maybe
+
+
+-- | Represents a lexed token
+data Token
+    -- | A symbol (in ansi dialect) is one of the following
+    --
+    -- * multi char symbols <> \<= \>= != ||
+    -- * single char symbols: * + -  < >  ^ / %  ~ & | ? ( ) [ ] , ; ( )
+    --
+    = Symbol String
+
+    -- | This is an identifier or keyword. The first field is
+    -- the quotes used, or nothing if no quotes were used. The quotes
+    -- can be " or u& or something dialect specific like []
+    | Identifier (Maybe (String,String)) String
+
+    -- | This is a prefixed variable symbol, such as :var, @var or #var
+    -- (only :var is used in ansi dialect)
+    | PrefixedVariable Char String
+
+    -- | This is a positional arg identifier e.g. $1
+    | PositionalArg Int
+
+    -- | This is a string literal. The first two fields are the --
+    -- start and end quotes, which are usually both ', but can be
+    -- the character set (one of nNbBxX, or u&, U&), or a dialect
+    -- specific string quoting (such as $$ in postgres)
+    | SqlString String String String
+
+    -- | A number literal (integral or otherwise), stored in original format
+    -- unchanged
+    | SqlNumber String
+
+    -- | Whitespace, one or more of space, tab or newline.
+    | Whitespace String
+
+    -- | A commented line using --, contains every character starting with the
+    -- \'--\' and including the terminating newline character if there is one
+    -- - this will be missing if the last line in the source is a line comment
+    -- with no trailing newline
+    | LineComment String
+
+    -- | A block comment, \/* stuff *\/, includes the comment delimiters
+    | BlockComment String
+
+      deriving (Eq,Show)
+
+
+
+-- | Pretty printing, if you lex a bunch of tokens, then pretty
+-- print them, should should get back exactly the same string
+prettyToken :: Dialect -> Token -> String
+prettyToken _ (Symbol s) = s
+prettyToken _ (Identifier Nothing t) = t
+prettyToken _ (Identifier (Just (q1,q2)) t) = q1 ++ t ++ q2
+prettyToken _ (PrefixedVariable c p) = c:p
+prettyToken _ (PositionalArg p) = '$':show p
+prettyToken _ (SqlString s e t) = s ++ t ++ e
+prettyToken _ (SqlNumber r) = r
+prettyToken _ (Whitespace t) = t
+prettyToken _ (LineComment l) = l
+prettyToken _ (BlockComment c) = c
+
+prettyTokens :: Dialect -> [Token] -> String
+prettyTokens d ts = concat $ map (prettyToken d) ts
+
+-- TODO: try to make all parsers applicative only
+
+-- | Lex some SQL to a list of tokens.
+lexSQL :: Dialect
+                  -- ^ dialect of SQL to use
+               -> FilePath
+                  -- ^ filename to use in error messages
+               -> Maybe (Int,Int)
+                  -- ^ line number and column number of the first character
+                  -- in the source to use in error messages
+               -> String
+                  -- ^ the SQL source to lex
+               -> Either ParseError [((String,Int,Int),Token)]
+lexSQL dialect fn' p src =
+    let (l',c') = fromMaybe (1,1) p
+    in either (Left . convParseError src) Right
+       $ runParser (setPos (fn',l',c') *> many (sqlToken dialect) <* eof) () fn' src
+  where
+    setPos (fn,l,c) = do
+        fmap (flip setSourceName fn
+               . flip setSourceLine l
+               . flip setSourceColumn c) getPosition
+          >>= setPosition
+
+-- | parser for a sql token
+sqlToken :: Dialect -> Parser ((String,Int,Int),Token)
+sqlToken d = do
+    p' <- getPosition
+    let p = (sourceName p',sourceLine p', sourceColumn p')
+
+{-
+The order of parsers is important: strings and quoted identifiers can
+start out looking like normal identifiers, so we try to parse these
+first and use a little bit of try. Line and block comments start like
+symbols, so we try these before symbol. Numbers can start with a . so
+this is also tried before symbol (a .1 will be parsed as a number, but
+. otherwise will be parsed as a symbol).
+-}
+
+    (p,) <$> choice [sqlString d
+                    ,identifier d
+                    ,lineComment d
+                    ,blockComment d
+                    ,sqlNumber d
+                    ,positionalArg d
+                    ,dontParseEndBlockComment d
+                    ,prefixedVariable d
+                    ,symbol d
+                    ,sqlWhitespace d]
+
+{-
+Parses identifiers:
+
+simple_identifier_23
+u&"unicode quoted identifier"
+"quoted identifier"
+"quoted identifier "" with double quote char"
+`mysql quoted identifier`
+-}
+
+identifier :: Dialect -> Parser Token
+identifier d =
+    choice
+    [quotedIden
+    ,unicodeQuotedIden
+    ,regularIden
+    ,guard (diBackquotedIden d) >> mySqlQuotedIden
+    ,guard (diSquareBracketQuotedIden d) >> sqlServerQuotedIden
+    ]
+  where
+    regularIden = Identifier Nothing <$> identifierString
+    quotedIden = Identifier (Just ("\"","\"")) <$> qidenPart
+    mySqlQuotedIden = Identifier (Just ("`","`"))
+                      <$> (char '`' *> takeWhile1 (/='`') <* char '`')
+    sqlServerQuotedIden = Identifier (Just ("[","]"))
+                          <$> (char '[' *> takeWhile1 (`notElem` "[]") <* char ']')
+    -- try is used here to avoid a conflict with identifiers
+    -- and quoted strings which also start with a 'u'
+    unicodeQuotedIden = Identifier
+                        <$> (f <$> try (oneOf "uU" <* string "&"))
+                        <*> qidenPart
+      where f x = Just (x: "&\"", "\"")
+    qidenPart = char '"' *> qidenSuffix ""
+    qidenSuffix t = do
+        s <- takeTill (=='"')
+        void $ char '"'
+        -- deal with "" as literal double quote character
+        choice [do
+                void $ char '"'
+                qidenSuffix $ concat [t,s,"\"\""]
+               ,return $ concat [t,s]]
+
+
+-- This parses a valid identifier without quotes.
+
+identifierString :: Parser String
+identifierString =
+    startsWith (\c -> c == '_' || isAlpha c) isIdentifierChar
+
+-- this can be moved to the dialect at some point
+
+isIdentifierChar :: Char -> Bool
+isIdentifierChar c = c == '_' || isAlphaNum c
+
+-- use try because : and @ can be part of other things also
+
+prefixedVariable :: Dialect -> Parser Token
+prefixedVariable  d = try $ choice
+    [PrefixedVariable <$> char ':' <*> identifierString
+    ,guard (diAtIdentifier d) >>
+     PrefixedVariable <$> char '@' <*> identifierString
+    ,guard (diHashIdentifier d) >>
+     PrefixedVariable <$> char '#' <*> identifierString
+    ]
+
+positionalArg :: Dialect -> Parser Token
+positionalArg d =
+  guard (diPositionalArg d) >>
+  -- use try to avoid ambiguities with other syntax which starts with dollar
+  PositionalArg <$> try (char '$' *> (read <$> many1 digit))
+
+
+{-
+Parse a SQL string. Examples:
+
+'basic string'
+'string with '' a quote'
+n'international text'
+b'binary string'
+x'hexidecimal string'
+-}
+
+
+sqlString :: Dialect -> Parser Token
+sqlString d = dollarString <|> csString <|> normalString
+  where
+    dollarString = do
+        guard $ diDollarString d
+        -- use try because of ambiguity with symbols and with
+        -- positional arg
+        delim <- (\x -> concat ["$",x,"$"])
+                 <$> try (char '$' *> option "" identifierString <* char '$')
+        SqlString delim delim  <$> manyTill anyChar (try $ string delim)
+    normalString = SqlString "'" "'" <$> (char '\'' *> normalStringSuffix False "")
+    normalStringSuffix allowBackslash t = do
+        s <- takeTill $ if allowBackslash
+                        then (`elem` "'\\")
+                        else (== '\'')
+        -- deal with '' or \' as literal quote character
+        choice [do
+                ctu <- choice ["''" <$ try (string "''")
+                              ,"\\'" <$ string "\\'"
+                              ,"\\" <$ char '\\']
+                normalStringSuffix allowBackslash $ concat [t,s,ctu]
+               ,concat [t,s] <$ char '\'']
+    -- try is used to to avoid conflicts with
+    -- identifiers which can start with n,b,x,u
+    -- once we read the quote type and the starting '
+    -- then we commit to a string
+    -- it's possible that this will reject some valid syntax
+    -- but only pathalogical stuff, and I think the improved
+    -- error messages and user predictability make it a good
+    -- pragmatic choice
+    csString
+      | diEString d =
+        choice [SqlString <$> try (string "e'" <|> string "E'")
+                          <*> return "'" <*> normalStringSuffix True ""
+               ,csString']
+      | otherwise = csString'
+    csString' = SqlString
+                <$> try cs
+                <*> return "'"
+                <*> normalStringSuffix False ""
+    csPrefixes = "nNbBxX"
+    cs = choice $ (map (\x -> string ([x] ++ "'")) csPrefixes)
+                  ++ [string "u&'"
+                     ,string "U&'"]
+
+{-
+numbers
+
+digits
+digits.[digits][e[+-]digits]
+[digits].digits[e[+-]digits]
+digitse[+-]digits
+
+where digits is one or more decimal digits (0 through 9). At least one
+digit must be before or after the decimal point, if one is used. At
+least one digit must follow the exponent marker (e), if one is
+present. There cannot be any spaces or other characters embedded in
+the constant. Note that any leading plus or minus sign is not actually
+considered part of the constant; it is an operator applied to the
+constant.
+-}
+
+sqlNumber :: Dialect -> Parser Token
+sqlNumber d =
+    SqlNumber <$> completeNumber
+    -- this is for definitely avoiding possibly ambiguous source
+    <* choice [-- special case to allow e.g. 1..2
+               guard (diPostgresSymbols d)
+               *> (void $ lookAhead $ try $ string "..")
+                  <|> void (notFollowedBy (oneOf "eE."))
+              ,notFollowedBy (oneOf "eE.")
+              ]
+  where
+    completeNumber =
+      (int <??> (pp dot <??.> pp int)
+      -- try is used in case we read a dot
+      -- and it isn't part of a number
+      -- if there are any following digits, then we commit
+      -- to it being a number and not something else
+      <|> try ((++) <$> dot <*> int))
+      <??> pp expon
+
+    int = many1 digit
+    -- make sure we don't parse two adjacent dots in a number
+    -- special case for postgresql, we backtrack if we see two adjacent dots
+    -- to parse 1..2, but in other dialects we commit to the failure
+    dot = let p = string "." <* notFollowedBy (char '.')
+          in if diPostgresSymbols d
+             then try p
+             else p
+    expon = (:) <$> oneOf "eE" <*> sInt
+    sInt = (++) <$> option "" (string "+" <|> string "-") <*> int
+    pp = (<$$> (++))
+
+{-
+Symbols
+
+A symbol is an operator, or one of the misc symbols which include:
+. .. := : :: ( ) ? ; , { } (for odbc)
+
+The postgresql operator syntax allows a huge range of operators
+compared with ansi and other dialects
+-}
+
+symbol :: Dialect -> Parser Token
+symbol d  = Symbol <$> choice (concat
+   [dots
+   ,if diPostgresSymbols d
+    then postgresExtraSymbols
+    else []
+   ,miscSymbol
+   ,if diOdbc d then odbcSymbol else []
+   ,if diPostgresSymbols d
+    then generalizedPostgresqlOperator
+    else basicAnsiOps
+   ])
+ where
+   dots = [many1 (char '.')]
+   odbcSymbol = [string "{", string "}"]
+   postgresExtraSymbols =
+       [try (string ":=")
+        -- parse :: and : and avoid allowing ::: or more
+       ,try (string "::" <* notFollowedBy (char ':'))
+       ,try (string ":" <* notFollowedBy (char ':'))]
+   miscSymbol = map (string . (:[])) $
+                case () of
+                    _ | diSqlServerSymbols d -> ",;():?"
+                      | diPostgresSymbols d -> "[],;()"
+                      | otherwise -> "[],;():?"
+
+{-
+try is used because most of the first characters of the two character
+symbols can also be part of a single character symbol
+-}
+
+   basicAnsiOps = map (try . string) [">=","<=","!=","<>"]
+                  ++ map (string . (:[])) "+-^*/%~&<>="
+                  ++ pipes
+   pipes = -- what about using many1 (char '|'), then it will
+           -- fail in the parser? Not sure exactly how
+           -- standalone the lexer should be
+           [char '|' *>
+            choice ["||" <$ char '|' <* notFollowedBy (char '|')
+                   ,return "|"]]
+
+{-
+postgresql generalized operators
+
+this includes the custom operators that postgres supports,
+plus all the standard operators which could be custom operators
+according to their grammar
+
+rules
+
+An operator name is a sequence of up to NAMEDATALEN-1 (63 by default) characters from the following list:
+
++ - * / < > = ~ ! @ # % ^ & | ` ?
+
+There are a few restrictions on operator names, however:
+-- and /* cannot appear anywhere in an operator name, since they will be taken as the start of a comment.
+
+A multiple-character operator name cannot end in + or -, unless the name also contains at least one of these characters:
+
+~ ! @ # % ^ & | ` ?
+
+which allows the last character of a multi character symbol to be + or
+-
+-}
+
+generalizedPostgresqlOperator :: [Parser String]
+generalizedPostgresqlOperator = [singlePlusMinus,opMoreChars]
+  where
+    allOpSymbols = "+-*/<>=~!@#%^&|`?"
+    -- these are the symbols when if part of a multi character
+    -- operator permit the operator to end with a + or - symbol
+    exceptionOpSymbols = "~!@#%^&|`?"
+
+    -- special case for parsing a single + or - symbol
+    singlePlusMinus = try $ do
+      c <- oneOf "+-"
+      notFollowedBy $ oneOf allOpSymbols
+      return [c]
+
+    -- this is used when we are parsing a potentially multi symbol
+    -- operator and we have alread seen one of the 'exception chars'
+    -- and so we can end with a + or -
+    moreOpCharsException = do
+       c <- oneOf (filter (`notElem` "-/*") allOpSymbols)
+            -- make sure we don't parse a comment starting token
+            -- as part of an operator
+            <|> try (char '/' <* notFollowedBy (char '*'))
+            <|> try (char '-' <* notFollowedBy (char '-'))
+            -- and make sure we don't parse a block comment end
+            -- as part of another symbol
+            <|> try (char '*' <* notFollowedBy (char '/'))
+       (c:) <$> option [] moreOpCharsException
+
+    opMoreChars = choice
+       [-- parse an exception char, now we can finish with a + -
+        (:)
+        <$> oneOf exceptionOpSymbols
+        <*> option [] moreOpCharsException
+       ,(:)
+        <$> (-- parse +, make sure it isn't the last symbol
+             try (char '+' <* lookAhead (oneOf allOpSymbols))
+             <|> -- parse -, make sure it isn't the last symbol
+                 -- or the start of a -- comment
+             try (char '-'
+                  <* notFollowedBy (char '-')
+                  <* lookAhead (oneOf allOpSymbols))
+             <|> -- parse / check it isn't the start of a /* comment
+             try (char '/' <* notFollowedBy (char '*'))
+             <|> -- make sure we don't parse */ as part of a symbol
+             try (char '*' <* notFollowedBy (char '/'))
+             <|> -- any other ansi operator symbol
+             oneOf "<>=")
+        <*> option [] opMoreChars
+       ]
+
+sqlWhitespace :: Dialect -> Parser Token
+sqlWhitespace _ = Whitespace <$> many1 (satisfy isSpace)
+
+lineComment :: Dialect -> Parser Token
+lineComment _ =
+    (\s -> LineComment $ concat ["--",s]) <$>
+    -- try is used here in case we see a - symbol
+    -- once we read two -- then we commit to the comment token
+    (try (string "--") *> (
+        -- todo: there must be a better way to do this
+     conc <$> manyTill anyChar (lookAhead lineCommentEnd) <*> lineCommentEnd))
+  where
+    conc a Nothing = a
+    conc a (Just b) = a ++ b
+    lineCommentEnd =
+        Just "\n" <$ char '\n'
+        <|> Nothing <$ eof
+
+{-
+Try is used in the block comment for the two symbol bits because we
+want to backtrack if we read the first symbol but the second symbol
+isn't there.
+-}
+
+blockComment :: Dialect -> Parser Token
+blockComment _ =
+    (\s -> BlockComment $ concat ["/*",s]) <$>
+    (try (string "/*") *> commentSuffix 0)
+  where
+    commentSuffix :: Int -> Parser String
+    commentSuffix n = do
+      -- read until a possible end comment or nested comment
+      x <- takeWhile (\e -> e /= '/' && e /= '*')
+      choice [-- close comment: if the nesting is 0, done
+              -- otherwise recurse on commentSuffix
+              try (string "*/") *> let t = concat [x,"*/"]
+                                   in if n == 0
+                                      then return t
+                                      else (\s -> concat [t,s]) <$> commentSuffix (n - 1)
+              -- nested comment, recurse
+             ,try (string "/*") *> ((\s -> concat [x,"/*",s]) <$> commentSuffix (n + 1))
+              -- not an end comment or nested comment, continue
+             ,(\c s -> x ++ [c] ++ s) <$> anyChar <*> commentSuffix n]
+
+
+{-
+This is to improve user experience: provide an error if we see */
+outside a comment. This could potentially break postgres ops with */
+in them (which is a stupid thing to do). In other cases, the user
+should write * / instead (I can't think of any cases when this would
+be valid syntax though).
+-}
+
+dontParseEndBlockComment :: Dialect -> Parser Token
+dontParseEndBlockComment _ =
+    -- don't use try, then it should commit to the error
+    try (string "*/") *> fail "comment end without comment start"
+
+
+-- Some helper combinators
+
+startsWith :: (Char -> Bool) -> (Char -> Bool) -> Parser String
+startsWith p ps = do
+  c <- satisfy p
+  choice [(:) c <$> (takeWhile1 ps)
+         ,return [c]]
+
+takeWhile1 :: (Char -> Bool) -> Parser String
+takeWhile1 p = many1 (satisfy p)
+
+takeWhile :: (Char -> Bool) -> Parser String
+takeWhile p = many (satisfy p)
+
+takeTill :: (Char -> Bool) -> Parser String
+takeTill p = manyTill anyChar (peekSatisfy p)
+
+peekSatisfy :: (Char -> Bool) -> Parser ()
+peekSatisfy p = void $ lookAhead (satisfy p)
+
+{-
+This utility function will accurately report if the two tokens are
+pretty printed, if they should lex back to the same two tokens. This
+function is used in testing (and can be used in other places), and
+must not be implemented by actually trying to print both tokens and
+then lex them back from a single string (because then we would have
+the risk of thinking two tokens cannot be together when there is bug
+in the lexer, which the testing is supposed to find).
+
+maybe do some quick checking to make sure this function only gives
+true negatives: check pairs which return false actually fail to lex or
+give different symbols in return: could use quickcheck for this
+
+a good sanity test for this function is to change it to always return
+true, then check that the automated tests return the same number of
+successes. I don't think it succeeds this test at the moment
+-}
+
+-- | Utility function to tell you if a list of tokens
+-- will pretty print then lex back to the same set of tokens.
+-- Used internally, might be useful for generating SQL via lexical tokens.
+tokenListWillPrintAndLex :: Dialect -> [Token] -> Bool
+tokenListWillPrintAndLex _ [] = True
+tokenListWillPrintAndLex _ [_] = True
+tokenListWillPrintAndLex d (a:b:xs) =
+    tokensWillPrintAndLex d a b && tokenListWillPrintAndLex d (b:xs)
+
+tokensWillPrintAndLex :: Dialect -> Token -> Token -> Bool
+tokensWillPrintAndLex d a b
+
+{-
+a : followed by an identifier character will look like a host param
+followed by = or : makes a different symbol
+-}
+
+    | Symbol ":" <- a
+    , checkFirstBChar (\x -> isIdentifierChar x || x `elem` ":=") = False
+
+{-
+two symbols next to eachother will fail if the symbols can combine and
+(possibly just the prefix) look like a different symbol
+-}
+
+    | diPostgresSymbols d
+    , Symbol a' <- a
+    , Symbol b' <- b
+    , b' `notElem` ["+", "-"] || or (map (`elem` a') "~!@#%^&|`?") = False
+
+{-
+check two adjacent symbols in non postgres where the combination
+possibilities are much more limited. This is ansi behaviour, it might
+be different when the other dialects are done properly
+-}
+
+   | Symbol a' <- a
+   , Symbol b' <- b
+   , (a',b') `elem` [("<",">")
+                    ,("<","=")
+                    ,(">","=")
+                    ,("!","=")
+                    ,("|","|")
+                    ,("||","|")
+                    ,("|","||")
+                    ,("||","||")
+                    ,("<",">=")
+                    ] = False
+
+-- two whitespaces will be combined
+
+   | Whitespace {} <- a
+   , Whitespace {} <- b = False
+
+-- line comment without a newline at the end will eat the next token
+
+   | LineComment {} <- a
+   , checkLastAChar (/='\n') = False
+
+{-
+check the last character of the first token and the first character of
+the second token forming a comment start or end symbol
+-}
+
+   | let f '-' '-' = True
+         f '/' '*' = True
+         f '*' '/' = True
+         f _ _ = False
+     in checkBorderChars f = False
+
+{-
+a symbol will absorb a following .
+TODO: not 100% on this always being bad
+-}
+
+   | Symbol {} <- a
+   , checkFirstBChar (=='.') = False
+
+-- cannot follow a symbol ending in : with another token starting with :
+
+   | let f ':' ':' = True
+         f _ _ = False
+     in checkBorderChars f = False
+
+-- unquoted identifier followed by an identifier letter
+
+   | Identifier Nothing _ <- a
+   , checkFirstBChar isIdentifierChar = False
+
+-- a quoted identifier using ", followed by a " will fail
+
+   | Identifier (Just (_,"\"")) _ <- a
+   , checkFirstBChar (=='"') = False
+
+-- prefixed variable followed by an identifier char will be absorbed
+
+   | PrefixedVariable {} <- a
+   , checkFirstBChar isIdentifierChar = False
+
+-- a positional arg will absorb a following digit
+
+   | PositionalArg {} <- a
+   , checkFirstBChar isDigit = False
+
+-- a string ending with ' followed by a token starting with ' will be absorbed
+
+   | SqlString _ "'" _ <- a
+   , checkFirstBChar (=='\'') = False
+
+-- a number followed by a . will fail or be absorbed
+
+   | SqlNumber {} <- a
+   , checkFirstBChar (=='.') = False
+
+-- a number followed by an e or E will fail or be absorbed
+
+   | SqlNumber {} <- a
+   , checkFirstBChar (\x -> x =='e' || x == 'E') = False
+
+-- two numbers next to eachother will fail or be absorbed
+
+   | SqlNumber {} <- a
+   , SqlNumber {} <- b = False
+
+
+   | otherwise = True
+
+  where
+    prettya = prettyToken d a
+    prettyb = prettyToken d b
+    -- helper function to run a predicate on the
+    -- last character of the first token and the first
+    -- character of the second token
+    checkBorderChars f
+        | (_:_) <- prettya
+        , (fb:_) <- prettyb
+        , la <- last prettya
+        = f la fb
+    checkBorderChars _ = False
+    checkFirstBChar f = case prettyb of
+                          (b':_) -> f b'
+                          _ -> False
+    checkLastAChar f = case prettya of
+                          (_:_) -> f $ last prettya
+                          _ -> False
+
+
+
+
+{-
+TODO:
+
+make the tokenswill print more dialect accurate. Maybe add symbol
+  chars and identifier chars to the dialect definition and use them from
+  here
+
+start adding negative / different parse dialect tests
+
+add token tables and tests for oracle, sql server
+review existing tables
+
+look for refactoring opportunities, especially the token
+generation tables in the tests
+
+do some user documentation on lexing, and lexing/dialects
+
+start thinking about a more separated design for the dialect handling
+
+lexing tests are starting to take a really long time, so split the
+tests so it is much easier to run all the tests except the lexing
+tests which only need to be run when working on the lexer (which
+should be relatively uncommon), or doing a commit or finishing off a
+series of commits,
+
+start writing the error message tests:
+  generate/write a large number of syntax errors
+  create a table with the source and the error message
+  try to compare some different versions of code to compare the
+    quality of the error messages by hand
+
+  get this checked in so improvements and regressions in the error
+    message quality can be tracked a little more easily (although it will
+    still be manual)
+
+try again to add annotation to the ast
+-}
diff --git a/Language/SQL/SimpleSQL/Lex.lhs b/Language/SQL/SimpleSQL/Lex.lhs
deleted file mode 100644
index 0cd940d..0000000
--- a/Language/SQL/SimpleSQL/Lex.lhs
+++ /dev/null
@@ -1,717 +0,0 @@
-
-The parser uses a separate lexer for two reasons:
-
-1. sql syntax is very awkward to parse, the separate lexer makes it
-easier to handle this in most places (in some places it makes it
-harder or impossible, the fix is to switch to something better than
-parsec)
-
-2. using a separate lexer gives a huge speed boost because it reduces
-backtracking. (We could get this by making the parsing code a lot more
-complex also.)
-
-3. we can test the lexer relatively exhaustively, then even when we
-don't do nearly as comprehensive testing on the syntax level, we still
-have a relatively high assurance of the low level of bugs. This is
-much more difficult to get parity with when testing the syntax parser
-directly without the separately testing lexing stage.
-
-> -- | Lexer for SQL.
-> {-# LANGUAGE TupleSections #-}
-> module Language.SQL.SimpleSQL.Lex
->     (Token(..)
->     ,lexSQL
->     ,prettyToken
->     ,prettyTokens
->     ,ParseError(..)
->     ,tokenListWillPrintAndLex
->     ,ansi2011
->     ) where
-
-> import Language.SQL.SimpleSQL.Dialect
-
-> import Text.Parsec (option,string,manyTill,anyChar
->                    ,try,string,many1,oneOf,digit,(<|>),choice,char,eof
->                    ,many,runParser,lookAhead,satisfy
->                    ,setPosition,getPosition
->                    ,setSourceColumn,setSourceLine
->                    ,sourceName, setSourceName
->                    ,sourceLine, sourceColumn
->                    ,notFollowedBy)
-> import Language.SQL.SimpleSQL.Combinators
-> import Language.SQL.SimpleSQL.Errors
-> import Control.Applicative hiding ((<|>), many)
-> import Data.Char
-> import Control.Monad
-> import Prelude hiding (takeWhile)
-> import Text.Parsec.String (Parser)
-> import Data.Maybe
-
-
-> -- | Represents a lexed token
-> data Token
->     -- | A symbol (in ansi dialect) is one of the following
->     --
->     -- * multi char symbols <> \<= \>= != ||
->     -- * single char symbols: * + -  < >  ^ / %  ~ & | ? ( ) [ ] , ; ( )
->     --
->     = Symbol String
->
->     -- | This is an identifier or keyword. The first field is
->     -- the quotes used, or nothing if no quotes were used. The quotes
->     -- can be " or u& or something dialect specific like []
->     | Identifier (Maybe (String,String)) String
->
->     -- | This is a prefixed variable symbol, such as :var, @var or #var
->     -- (only :var is used in ansi dialect)
->     | PrefixedVariable Char String
->
->     -- | This is a positional arg identifier e.g. $1
->     | PositionalArg Int
->
->     -- | This is a string literal. The first two fields are the --
->     -- start and end quotes, which are usually both ', but can be
->     -- the character set (one of nNbBxX, or u&, U&), or a dialect
->     -- specific string quoting (such as $$ in postgres)
->     | SqlString String String String
->
->     -- | A number literal (integral or otherwise), stored in original format
->     -- unchanged
->     | SqlNumber String
->
->     -- | Whitespace, one or more of space, tab or newline.
->     | Whitespace String
->
->     -- | A commented line using --, contains every character starting with the
->     -- \'--\' and including the terminating newline character if there is one
->     -- - this will be missing if the last line in the source is a line comment
->     -- with no trailing newline
->     | LineComment String
->
->     -- | A block comment, \/* stuff *\/, includes the comment delimiters
->     | BlockComment String
->
->       deriving (Eq,Show)
-
-
-
-> -- | Pretty printing, if you lex a bunch of tokens, then pretty
-> -- print them, should should get back exactly the same string
-> prettyToken :: Dialect -> Token -> String
-> prettyToken _ (Symbol s) = s
-> prettyToken _ (Identifier Nothing t) = t
-> prettyToken _ (Identifier (Just (q1,q2)) t) = q1 ++ t ++ q2
-> prettyToken _ (PrefixedVariable c p) = c:p
-> prettyToken _ (PositionalArg p) = '$':show p
-> prettyToken _ (SqlString s e t) = s ++ t ++ e
-> prettyToken _ (SqlNumber r) = r
-> prettyToken _ (Whitespace t) = t
-> prettyToken _ (LineComment l) = l
-> prettyToken _ (BlockComment c) = c
-
-> prettyTokens :: Dialect -> [Token] -> String
-> prettyTokens d ts = concat $ map (prettyToken d) ts
-
-TODO: try to make all parsers applicative only
-
-> -- | Lex some SQL to a list of tokens.
-> lexSQL :: Dialect
->                   -- ^ dialect of SQL to use
->                -> FilePath
->                   -- ^ filename to use in error messages
->                -> Maybe (Int,Int)
->                   -- ^ line number and column number of the first character
->                   -- in the source to use in error messages
->                -> String
->                   -- ^ the SQL source to lex
->                -> Either ParseError [((String,Int,Int),Token)]
-> lexSQL dialect fn' p src =
->     let (l',c') = fromMaybe (1,1) p
->     in either (Left . convParseError src) Right
->        $ runParser (setPos (fn',l',c') *> many (sqlToken dialect) <* eof) () fn' src
->   where
->     setPos (fn,l,c) = do
->         fmap (flip setSourceName fn
->                . flip setSourceLine l
->                . flip setSourceColumn c) getPosition
->           >>= setPosition
-
-> -- | parser for a sql token
-> sqlToken :: Dialect -> Parser ((String,Int,Int),Token)
-> sqlToken d = do
->     p' <- getPosition
->     let p = (sourceName p',sourceLine p', sourceColumn p')
-
-The order of parsers is important: strings and quoted identifiers can
-start out looking like normal identifiers, so we try to parse these
-first and use a little bit of try. Line and block comments start like
-symbols, so we try these before symbol. Numbers can start with a . so
-this is also tried before symbol (a .1 will be parsed as a number, but
-. otherwise will be parsed as a symbol).
-
->     (p,) <$> choice [sqlString d
->                     ,identifier d
->                     ,lineComment d
->                     ,blockComment d
->                     ,sqlNumber d
->                     ,positionalArg d
->                     ,dontParseEndBlockComment d
->                     ,prefixedVariable d
->                     ,symbol d
->                     ,sqlWhitespace d]
-
-Parses identifiers:
-
-simple_identifier_23
-u&"unicode quoted identifier"
-"quoted identifier"
-"quoted identifier "" with double quote char"
-`mysql quoted identifier`
-
-> identifier :: Dialect -> Parser Token
-> identifier d =
->     choice
->     [quotedIden
->     ,unicodeQuotedIden
->     ,regularIden
->     ,guard (diBackquotedIden d) >> mySqlQuotedIden
->     ,guard (diSquareBracketQuotedIden d) >> sqlServerQuotedIden
->     ]
->   where
->     regularIden = Identifier Nothing <$> identifierString
->     quotedIden = Identifier (Just ("\"","\"")) <$> qidenPart
->     mySqlQuotedIden = Identifier (Just ("`","`"))
->                       <$> (char '`' *> takeWhile1 (/='`') <* char '`')
->     sqlServerQuotedIden = Identifier (Just ("[","]"))
->                           <$> (char '[' *> takeWhile1 (`notElem` "[]") <* char ']')
->     -- try is used here to avoid a conflict with identifiers
->     -- and quoted strings which also start with a 'u'
->     unicodeQuotedIden = Identifier
->                         <$> (f <$> try (oneOf "uU" <* string "&"))
->                         <*> qidenPart
->       where f x = Just (x: "&\"", "\"")
->     qidenPart = char '"' *> qidenSuffix ""
->     qidenSuffix t = do
->         s <- takeTill (=='"')
->         void $ char '"'
->         -- deal with "" as literal double quote character
->         choice [do
->                 void $ char '"'
->                 qidenSuffix $ concat [t,s,"\"\""]
->                ,return $ concat [t,s]]
-
-
-This parses a valid identifier without quotes.
-
-> identifierString :: Parser String
-> identifierString =
->     startsWith (\c -> c == '_' || isAlpha c) isIdentifierChar
-
-this can be moved to the dialect at some point
-
-> isIdentifierChar :: Char -> Bool
-> isIdentifierChar c = c == '_' || isAlphaNum c
-
-use try because : and @ can be part of other things also
-
-> prefixedVariable :: Dialect -> Parser Token
-> prefixedVariable  d = try $ choice
->     [PrefixedVariable <$> char ':' <*> identifierString
->     ,guard (diAtIdentifier d) >>
->      PrefixedVariable <$> char '@' <*> identifierString
->     ,guard (diHashIdentifier d) >>
->      PrefixedVariable <$> char '#' <*> identifierString
->     ]
-
-> positionalArg :: Dialect -> Parser Token
-> positionalArg d =
->   guard (diPositionalArg d) >>
->   -- use try to avoid ambiguities with other syntax which starts with dollar
->   PositionalArg <$> try (char '$' *> (read <$> many1 digit))
-
-
-Parse a SQL string. Examples:
-
-'basic string'
-'string with '' a quote'
-n'international text'
-b'binary string'
-x'hexidecimal string'
-
-
-> sqlString :: Dialect -> Parser Token
-> sqlString d = dollarString <|> csString <|> normalString
->   where
->     dollarString = do
->         guard $ diDollarString d
->         -- use try because of ambiguity with symbols and with
->         -- positional arg
->         delim <- (\x -> concat ["$",x,"$"])
->                  <$> try (char '$' *> option "" identifierString <* char '$')
->         SqlString delim delim  <$> manyTill anyChar (try $ string delim)
->     normalString = SqlString "'" "'" <$> (char '\'' *> normalStringSuffix False "")
->     normalStringSuffix allowBackslash t = do
->         s <- takeTill $ if allowBackslash
->                         then (`elem` "'\\")
->                         else (== '\'')
->         -- deal with '' or \' as literal quote character
->         choice [do
->                 ctu <- choice ["''" <$ try (string "''")
->                               ,"\\'" <$ string "\\'"
->                               ,"\\" <$ char '\\']
->                 normalStringSuffix allowBackslash $ concat [t,s,ctu]
->                ,concat [t,s] <$ char '\'']
->     -- try is used to to avoid conflicts with
->     -- identifiers which can start with n,b,x,u
->     -- once we read the quote type and the starting '
->     -- then we commit to a string
->     -- it's possible that this will reject some valid syntax
->     -- but only pathalogical stuff, and I think the improved
->     -- error messages and user predictability make it a good
->     -- pragmatic choice
->     csString
->       | diEString d =
->         choice [SqlString <$> try (string "e'" <|> string "E'")
->                           <*> return "'" <*> normalStringSuffix True ""
->                ,csString']
->       | otherwise = csString'
->     csString' = SqlString
->                 <$> try cs
->                 <*> return "'"
->                 <*> normalStringSuffix False ""
->     csPrefixes = "nNbBxX"
->     cs = choice $ (map (\x -> string ([x] ++ "'")) csPrefixes)
->                   ++ [string "u&'"
->                      ,string "U&'"]
-
-numbers
-
-digits
-digits.[digits][e[+-]digits]
-[digits].digits[e[+-]digits]
-digitse[+-]digits
-
-where digits is one or more decimal digits (0 through 9). At least one
-digit must be before or after the decimal point, if one is used. At
-least one digit must follow the exponent marker (e), if one is
-present. There cannot be any spaces or other characters embedded in
-the constant. Note that any leading plus or minus sign is not actually
-considered part of the constant; it is an operator applied to the
-constant.
-
-> sqlNumber :: Dialect -> Parser Token
-> sqlNumber d =
->     SqlNumber <$> completeNumber
->     -- this is for definitely avoiding possibly ambiguous source
->     <* choice [-- special case to allow e.g. 1..2
->                guard (diPostgresSymbols d)
->                *> (void $ lookAhead $ try $ string "..")
->                   <|> void (notFollowedBy (oneOf "eE."))
->               ,notFollowedBy (oneOf "eE.")
->               ]
->   where
->     completeNumber =
->       (int <??> (pp dot <??.> pp int)
->       -- try is used in case we read a dot
->       -- and it isn't part of a number
->       -- if there are any following digits, then we commit
->       -- to it being a number and not something else
->       <|> try ((++) <$> dot <*> int))
->       <??> pp expon
-
->     int = many1 digit
->     -- make sure we don't parse two adjacent dots in a number
->     -- special case for postgresql, we backtrack if we see two adjacent dots
->     -- to parse 1..2, but in other dialects we commit to the failure
->     dot = let p = string "." <* notFollowedBy (char '.')
->           in if diPostgresSymbols d
->              then try p
->              else p
->     expon = (:) <$> oneOf "eE" <*> sInt
->     sInt = (++) <$> option "" (string "+" <|> string "-") <*> int
->     pp = (<$$> (++))
-
-Symbols
-
-A symbol is an operator, or one of the misc symbols which include:
-. .. := : :: ( ) ? ; , { } (for odbc)
-
-The postgresql operator syntax allows a huge range of operators
-compared with ansi and other dialects
-
-> symbol :: Dialect -> Parser Token
-> symbol d  = Symbol <$> choice (concat
->    [dots
->    ,if diPostgresSymbols d
->     then postgresExtraSymbols
->     else []
->    ,miscSymbol
->    ,if diOdbc d then odbcSymbol else []
->    ,if diPostgresSymbols d
->     then generalizedPostgresqlOperator
->     else basicAnsiOps
->    ])
->  where
->    dots = [many1 (char '.')]
->    odbcSymbol = [string "{", string "}"]
->    postgresExtraSymbols =
->        [try (string ":=")
->         -- parse :: and : and avoid allowing ::: or more
->        ,try (string "::" <* notFollowedBy (char ':'))
->        ,try (string ":" <* notFollowedBy (char ':'))]
->    miscSymbol = map (string . (:[])) $
->                 case () of
->                     _ | diSqlServerSymbols d -> ",;():?"
->                       | diPostgresSymbols d -> "[],;()"
->                       | otherwise -> "[],;():?"
-
-try is used because most of the first characters of the two character
-symbols can also be part of a single character symbol
-
->    basicAnsiOps = map (try . string) [">=","<=","!=","<>"]
->                   ++ map (string . (:[])) "+-^*/%~&<>="
->                   ++ pipes
->    pipes = -- what about using many1 (char '|'), then it will
->            -- fail in the parser? Not sure exactly how
->            -- standalone the lexer should be
->            [char '|' *>
->             choice ["||" <$ char '|' <* notFollowedBy (char '|')
->                    ,return "|"]]
-
-postgresql generalized operators
-
-this includes the custom operators that postgres supports,
-plus all the standard operators which could be custom operators
-according to their grammar
-
-rules
-
-An operator name is a sequence of up to NAMEDATALEN-1 (63 by default) characters from the following list:
-
-+ - * / < > = ~ ! @ # % ^ & | ` ?
-
-There are a few restrictions on operator names, however:
--- and /* cannot appear anywhere in an operator name, since they will be taken as the start of a comment.
-
-A multiple-character operator name cannot end in + or -, unless the name also contains at least one of these characters:
-
-~ ! @ # % ^ & | ` ?
-
-which allows the last character of a multi character symbol to be + or
--
-
-> generalizedPostgresqlOperator :: [Parser String]
-> generalizedPostgresqlOperator = [singlePlusMinus,opMoreChars]
->   where
->     allOpSymbols = "+-*/<>=~!@#%^&|`?"
->     -- these are the symbols when if part of a multi character
->     -- operator permit the operator to end with a + or - symbol
->     exceptionOpSymbols = "~!@#%^&|`?"
-
->     -- special case for parsing a single + or - symbol
->     singlePlusMinus = try $ do
->       c <- oneOf "+-"
->       notFollowedBy $ oneOf allOpSymbols
->       return [c]
-
->     -- this is used when we are parsing a potentially multi symbol
->     -- operator and we have alread seen one of the 'exception chars'
->     -- and so we can end with a + or -
->     moreOpCharsException = do
->        c <- oneOf (filter (`notElem` "-/*") allOpSymbols)
->             -- make sure we don't parse a comment starting token
->             -- as part of an operator
->             <|> try (char '/' <* notFollowedBy (char '*'))
->             <|> try (char '-' <* notFollowedBy (char '-'))
->             -- and make sure we don't parse a block comment end
->             -- as part of another symbol
->             <|> try (char '*' <* notFollowedBy (char '/'))
->        (c:) <$> option [] moreOpCharsException
-
->     opMoreChars = choice
->        [-- parse an exception char, now we can finish with a + -
->         (:)
->         <$> oneOf exceptionOpSymbols
->         <*> option [] moreOpCharsException
->        ,(:)
->         <$> (-- parse +, make sure it isn't the last symbol
->              try (char '+' <* lookAhead (oneOf allOpSymbols))
->              <|> -- parse -, make sure it isn't the last symbol
->                  -- or the start of a -- comment
->              try (char '-'
->                   <* notFollowedBy (char '-')
->                   <* lookAhead (oneOf allOpSymbols))
->              <|> -- parse / check it isn't the start of a /* comment
->              try (char '/' <* notFollowedBy (char '*'))
->              <|> -- make sure we don't parse */ as part of a symbol
->              try (char '*' <* notFollowedBy (char '/'))
->              <|> -- any other ansi operator symbol
->              oneOf "<>=")
->         <*> option [] opMoreChars
->        ]
-
-> sqlWhitespace :: Dialect -> Parser Token
-> sqlWhitespace _ = Whitespace <$> many1 (satisfy isSpace)
-
-> lineComment :: Dialect -> Parser Token
-> lineComment _ =
->     (\s -> LineComment $ concat ["--",s]) <$>
->     -- try is used here in case we see a - symbol
->     -- once we read two -- then we commit to the comment token
->     (try (string "--") *> (
->         -- todo: there must be a better way to do this
->      conc <$> manyTill anyChar (lookAhead lineCommentEnd) <*> lineCommentEnd))
->   where
->     conc a Nothing = a
->     conc a (Just b) = a ++ b
->     lineCommentEnd =
->         Just "\n" <$ char '\n'
->         <|> Nothing <$ eof
-
-Try is used in the block comment for the two symbol bits because we
-want to backtrack if we read the first symbol but the second symbol
-isn't there.
-
-> blockComment :: Dialect -> Parser Token
-> blockComment _ =
->     (\s -> BlockComment $ concat ["/*",s]) <$>
->     (try (string "/*") *> commentSuffix 0)
->   where
->     commentSuffix :: Int -> Parser String
->     commentSuffix n = do
->       -- read until a possible end comment or nested comment
->       x <- takeWhile (\e -> e /= '/' && e /= '*')
->       choice [-- close comment: if the nesting is 0, done
->               -- otherwise recurse on commentSuffix
->               try (string "*/") *> let t = concat [x,"*/"]
->                                    in if n == 0
->                                       then return t
->                                       else (\s -> concat [t,s]) <$> commentSuffix (n - 1)
->               -- nested comment, recurse
->              ,try (string "/*") *> ((\s -> concat [x,"/*",s]) <$> commentSuffix (n + 1))
->               -- not an end comment or nested comment, continue
->              ,(\c s -> x ++ [c] ++ s) <$> anyChar <*> commentSuffix n]
-
-
-This is to improve user experience: provide an error if we see */
-outside a comment. This could potentially break postgres ops with */
-in them (which is a stupid thing to do). In other cases, the user
-should write * / instead (I can't think of any cases when this would
-be valid syntax though).
-
-> dontParseEndBlockComment :: Dialect -> Parser Token
-> dontParseEndBlockComment _ =
->     -- don't use try, then it should commit to the error
->     try (string "*/") *> fail "comment end without comment start"
-
-
-Some helper combinators
-
-> startsWith :: (Char -> Bool) -> (Char -> Bool) -> Parser String
-> startsWith p ps = do
->   c <- satisfy p
->   choice [(:) c <$> (takeWhile1 ps)
->          ,return [c]]
-
-> takeWhile1 :: (Char -> Bool) -> Parser String
-> takeWhile1 p = many1 (satisfy p)
-
-> takeWhile :: (Char -> Bool) -> Parser String
-> takeWhile p = many (satisfy p)
-
-> takeTill :: (Char -> Bool) -> Parser String
-> takeTill p = manyTill anyChar (peekSatisfy p)
-
-> peekSatisfy :: (Char -> Bool) -> Parser ()
-> peekSatisfy p = void $ lookAhead (satisfy p)
-
-This utility function will accurately report if the two tokens are
-pretty printed, if they should lex back to the same two tokens. This
-function is used in testing (and can be used in other places), and
-must not be implemented by actually trying to print both tokens and
-then lex them back from a single string (because then we would have
-the risk of thinking two tokens cannot be together when there is bug
-in the lexer, which the testing is supposed to find).
-
-maybe do some quick checking to make sure this function only gives
-true negatives: check pairs which return false actually fail to lex or
-give different symbols in return: could use quickcheck for this
-
-a good sanity test for this function is to change it to always return
-true, then check that the automated tests return the same number of
-successes. I don't think it succeeds this test at the moment
-
-> -- | Utility function to tell you if a list of tokens
-> -- will pretty print then lex back to the same set of tokens.
-> -- Used internally, might be useful for generating SQL via lexical tokens.
-> tokenListWillPrintAndLex :: Dialect -> [Token] -> Bool
-> tokenListWillPrintAndLex _ [] = True
-> tokenListWillPrintAndLex _ [_] = True
-> tokenListWillPrintAndLex d (a:b:xs) =
->     tokensWillPrintAndLex d a b && tokenListWillPrintAndLex d (b:xs)
-
-> tokensWillPrintAndLex :: Dialect -> Token -> Token -> Bool
-> tokensWillPrintAndLex d a b
-
-a : followed by an identifier character will look like a host param
-followed by = or : makes a different symbol
-
->     | Symbol ":" <- a
->     , checkFirstBChar (\x -> isIdentifierChar x || x `elem` ":=") = False
-
-two symbols next to eachother will fail if the symbols can combine and
-(possibly just the prefix) look like a different symbol
-
->     | diPostgresSymbols d
->     , Symbol a' <- a
->     , Symbol b' <- b
->     , b' `notElem` ["+", "-"] || or (map (`elem` a') "~!@#%^&|`?") = False
-
-check two adjacent symbols in non postgres where the combination
-possibilities are much more limited. This is ansi behaviour, it might
-be different when the other dialects are done properly
-
->    | Symbol a' <- a
->    , Symbol b' <- b
->    , (a',b') `elem` [("<",">")
->                     ,("<","=")
->                     ,(">","=")
->                     ,("!","=")
->                     ,("|","|")
->                     ,("||","|")
->                     ,("|","||")
->                     ,("||","||")
->                     ,("<",">=")
->                     ] = False
-
-two whitespaces will be combined
-
->    | Whitespace {} <- a
->    , Whitespace {} <- b = False
-
-line comment without a newline at the end will eat the next token
-
->    | LineComment {} <- a
->    , checkLastAChar (/='\n') = False
-
-check the last character of the first token and the first character of
-the second token forming a comment start or end symbol
-
->    | let f '-' '-' = True
->          f '/' '*' = True
->          f '*' '/' = True
->          f _ _ = False
->      in checkBorderChars f = False
-
-a symbol will absorb a following .
-TODO: not 100% on this always being bad
-
->    | Symbol {} <- a
->    , checkFirstBChar (=='.') = False
-
-cannot follow a symbol ending in : with another token starting with :
-
->    | let f ':' ':' = True
->          f _ _ = False
->      in checkBorderChars f = False
-
-unquoted identifier followed by an identifier letter
-
->    | Identifier Nothing _ <- a
->    , checkFirstBChar isIdentifierChar = False
-
-a quoted identifier using ", followed by a " will fail
-
->    | Identifier (Just (_,"\"")) _ <- a
->    , checkFirstBChar (=='"') = False
-
-prefixed variable followed by an identifier char will be absorbed
-
->    | PrefixedVariable {} <- a
->    , checkFirstBChar isIdentifierChar = False
-
-a positional arg will absorb a following digit
-
->    | PositionalArg {} <- a
->    , checkFirstBChar isDigit = False
-
-a string ending with ' followed by a token starting with ' will be absorbed
-
->    | SqlString _ "'" _ <- a
->    , checkFirstBChar (=='\'') = False
-
-a number followed by a . will fail or be absorbed
-
->    | SqlNumber {} <- a
->    , checkFirstBChar (=='.') = False
-
-a number followed by an e or E will fail or be absorbed
-
->    | SqlNumber {} <- a
->    , checkFirstBChar (\x -> x =='e' || x == 'E') = False
-
-two numbers next to eachother will fail or be absorbed
-
->    | SqlNumber {} <- a
->    , SqlNumber {} <- b = False
-
-
->    | otherwise = True
-
->   where
->     prettya = prettyToken d a
->     prettyb = prettyToken d b
->     -- helper function to run a predicate on the
->     -- last character of the first token and the first
->     -- character of the second token
->     checkBorderChars f
->         | (_:_) <- prettya
->         , (fb:_) <- prettyb
->         , la <- last prettya
->         = f la fb
->     checkBorderChars _ = False
->     checkFirstBChar f = case prettyb of
->                           (b':_) -> f b'
->                           _ -> False
->     checkLastAChar f = case prettya of
->                           (_:_) -> f $ last prettya
->                           _ -> False
-
-
-
-
-TODO:
-
-make the tokenswill print more dialect accurate. Maybe add symbol
-  chars and identifier chars to the dialect definition and use them from
-  here
-
-start adding negative / different parse dialect tests
-
-add token tables and tests for oracle, sql server
-review existing tables
-
-look for refactoring opportunities, especially the token
-generation tables in the tests
-
-do some user documentation on lexing, and lexing/dialects
-
-start thinking about a more separated design for the dialect handling
-
-lexing tests are starting to take a really long time, so split the
-tests so it is much easier to run all the tests except the lexing
-tests which only need to be run when working on the lexer (which
-should be relatively uncommon), or doing a commit or finishing off a
-series of commits,
-
-start writing the error message tests:
-  generate/write a large number of syntax errors
-  create a table with the source and the error message
-  try to compare some different versions of code to compare the
-    quality of the error messages by hand
-
-  get this checked in so improvements and regressions in the error
-    message quality can be tracked a little more easily (although it will
-    still be manual)
-
-try again to add annotation to the ast
diff --git a/Language/SQL/SimpleSQL/Parse.hs b/Language/SQL/SimpleSQL/Parse.hs
new file mode 100644
index 0000000..1813911
--- /dev/null
+++ b/Language/SQL/SimpleSQL/Parse.hs
@@ -0,0 +1,2288 @@
+
+{-
+= TOC:
+
+notes
+Public api
+Names - parsing identifiers
+Typenames
+Scalar expressions
+  simple literals
+  star, param
+  parens expression, row constructor and scalar subquery
+  case, cast, exists, unique, array/ multiset constructor
+  typed literal, app, special function, aggregate, window function
+  suffixes: in, between, quantified comparison, match predicate, array
+    subscript, escape, collate
+  operators
+  scalar expression top level
+  helpers
+query expressions
+  select lists
+  from clause
+  other table expression clauses:
+    where, group by, having, order by, offset and fetch
+  common table expressions
+  query expression
+  set operations
+lexers
+utilities
+
+= Notes about the code
+
+The lexers appear at the bottom of the file. There tries to be a clear
+separation between the lexers and the other parser which only use the
+lexers, this isn't 100% complete at the moment and needs fixing.
+
+== Left factoring
+
+The parsing code is aggressively left factored, and try is avoided as
+much as possible. Try is avoided because:
+
+ * when it is overused it makes the code hard to follow
+ * when it is overused it makes the parsing code harder to debug
+ * it makes the parser error messages much worse
+
+The code could be made a bit simpler with a few extra 'trys', but this
+isn't done because of the impact on the parser error
+messages. Apparently it can also help the speed but this hasn't been
+looked into.
+
+== Parser error messages
+
+A lot of care has been given to generating good parser error messages
+for invalid syntax. There are a few utils below which partially help
+in this area.
+
+There is a set of crafted bad expressions in ErrorMessages. hs, these
+are used to guage the quality of the error messages and monitor
+regressions by hand. The use of <?> is limited as much as possible:
+each instance should justify itself by improving an actual error
+message.
+
+There is also a plan to write a really simple expression parser which
+doesn't do precedence and associativity, and the fix these with a pass
+over the ast. I don't think there is any other way to sanely handle
+the common prefixes between many infix and postfix multiple keyword
+operators, and some other ambiguities also. This should help a lot in
+generating good error messages also.
+
+Both the left factoring and error message work are greatly complicated
+by the large number of shared prefixes of the various elements in SQL
+syntax.
+
+== Main left factoring issues
+
+There are three big areas which are tricky to left factor:
+
+ * typenames
+ * scalar expressions which can start with an identifier
+ * infix and suffix operators
+
+=== typenames
+
+There are a number of variations of typename syntax. The standard
+deals with this by switching on the name of the type which is parsed
+first. This code doesn't do this currently, but might in the
+future. Taking the approach in the standard grammar will limit the
+extensibility of the parser and might affect the ease of adapting to
+support other sql dialects.
+
+=== identifier scalar expressions
+
+There are a lot of scalar expression nodes which start with
+identifiers, and can't be distinguished the tokens after the initial
+identifier are parsed. Using try to implement these variations is very
+simple but makes the code much harder to debug and makes the parser
+error messages really bad.
+
+Here is a list of these nodes:
+
+ * identifiers
+ * function application
+ * aggregate application
+ * window application
+ * typed literal: typename 'literal string'
+ * interval literal which is like the typed literal with some extras
+
+There is further ambiguity e.g. with typed literals with precision,
+functions, aggregates, etc. - these are an identifier, followed by
+parens comma separated scalar expressions or something similar, and it
+is only later that we can find a token which tells us which flavour it
+is.
+
+There is also a set of nodes which start with an identifier/keyword
+but can commit since no other syntax can start the same way:
+
+ * case
+ * cast
+ * exists, unique subquery
+ * array constructor
+ * multiset constructor
+ * all the special syntax functions: extract, position, substring,
+  convert, translate, overlay, trim, etc.
+
+The interval literal mentioned above is treated in this group at the
+moment: if we see 'interval' we parse it either as a full interval
+literal or a typed literal only.
+
+Some items in this list might have to be fixed in the future, e.g. to
+support standard 'substring(a from 3 for 5)' as well as regular
+function substring syntax 'substring(a,3,5) at the same time.
+
+The work in left factoring all this is mostly done, but there is still
+a substantial bit to complete and this is by far the most difficult
+bit. At the moment, the work around is to use try, the downsides of
+which is the poor parsing error messages.
+
+=== infix and suffix operators
+
+== permissiveness
+
+The parser is very permissive in many ways. This departs from the
+standard which is able to eliminate a number of possibilities just in
+the grammar, which this parser allows. This is done for a number of
+reasons:
+
+ * it makes the parser simple - less variations
+ * it should allow for dialects and extensibility more easily in the
+  future (e.g. new infix binary operators with custom precedence)
+ * many things which are effectively checked in the grammar in the
+  standard, can be checked using a typechecker or other simple static
+  analysis
+
+To use this code as a front end for a sql engine, or as a sql validity
+checker, you will need to do a lot of checks on the ast. A
+typechecker/static checker plus annotation to support being a compiler
+front end is planned but not likely to happen too soon.
+
+Some of the areas this affects:
+
+typenames: the variation of the type name should switch on the actual
+name given according to the standard, but this code only does this for
+the special case of interval type names. E.g. you can write 'int
+collate C' or 'int(15,2)' and this will parse as a character type name
+or a precision scale type name instead of being rejected.
+
+scalar expressions: every variation on scalar expressions uses the same
+parser/syntax. This means we don't try to stop non boolean valued
+expressions in boolean valued contexts in the parser. Another area
+this affects is that we allow general scalar expressions in group by,
+whereas the standard only allows column names with optional collation.
+
+These are all areas which are specified (roughly speaking) in the
+syntax rather than the semantics in the standard, and we are not
+fixing them in the syntax but leaving them till the semantic checking
+(which doesn't exist in this code at this time).
+-}
+
+{-# LANGUAGE TupleSections #-}
+-- | This is the module with the parser functions.
+module Language.SQL.SimpleSQL.Parse
+    (parseQueryExpr
+    ,parseScalarExpr
+    ,parseStatement
+    ,parseStatements
+    ,ParseError(..)) where
+
+import Control.Monad.Identity (Identity)
+import Control.Monad (guard, void)
+import Control.Applicative ((<**>))
+import Data.Char (toLower, isDigit)
+import Text.Parsec (setPosition,setSourceColumn,setSourceLine,getPosition
+                   ,option,between,sepBy,sepBy1
+                   ,try,many,many1,(<|>),choice,eof
+                   ,optionMaybe,optional,runParser
+                   ,chainl1, chainr1,(<?>))
+import Text.Parsec.Perm (permute,(<$?>), (<|?>))
+import Text.Parsec.Prim (getState, token)
+import Text.Parsec.Pos (newPos)
+import qualified Text.Parsec.Expr as E
+import Data.List (intercalate,sort,groupBy)
+import Data.Function (on)
+import Data.Maybe
+import Text.Parsec.String (GenParser)
+
+import Language.SQL.SimpleSQL.Syntax
+import Language.SQL.SimpleSQL.Combinators
+import Language.SQL.SimpleSQL.Errors
+import Language.SQL.SimpleSQL.Dialect
+import qualified Language.SQL.SimpleSQL.Lex as L
+
+
+-- = Public API
+
+-- | Parses a query expr, trailing semicolon optional.
+parseQueryExpr :: Dialect
+                  -- ^ dialect of SQL to use
+               -> FilePath
+                  -- ^ filename to use in error messages
+               -> Maybe (Int,Int)
+                  -- ^ line number and column number of the first character
+                  -- in the source to use in error messages
+               -> String
+                  -- ^ the SQL source to parse
+               -> Either ParseError QueryExpr
+parseQueryExpr = wrapParse topLevelQueryExpr
+
+-- | Parses a statement, trailing semicolon optional.
+parseStatement :: Dialect
+                  -- ^ dialect of SQL to use
+               -> FilePath
+                  -- ^ filename to use in error messages
+               -> Maybe (Int,Int)
+                  -- ^ line number and column number of the first character
+                  -- in the source to use in error messages
+               -> String
+                  -- ^ the SQL source to parse
+               -> Either ParseError Statement
+parseStatement = wrapParse topLevelStatement
+
+
+-- | Parses a list of statements, with semi colons between
+-- them. The final semicolon is optional.
+parseStatements :: Dialect
+                  -- ^ dialect of SQL to use
+                -> FilePath
+                   -- ^ filename to use in error messages
+                -> Maybe (Int,Int)
+                   -- ^ line number and column number of the first character
+                   -- in the source to use in error messages
+                -> String
+                   -- ^ the SQL source to parse
+                -> Either ParseError [Statement]
+parseStatements = wrapParse statements
+
+-- | Parses a scalar expression.
+parseScalarExpr :: Dialect
+                   -- ^ dialect of SQL to use
+                -> FilePath
+                   -- ^ filename to use in error messages
+                -> Maybe (Int,Int)
+                   -- ^ line number and column number of the first character
+                   -- in the source to use in error messages
+                -> String
+                   -- ^ the SQL source to parse
+                -> Either ParseError ScalarExpr
+parseScalarExpr = wrapParse scalarExpr
+
+{-
+This helper function takes the parser given and:
+
+sets the position when parsing
+automatically skips leading whitespace
+checks the parser parses all the input using eof
+converts the error return to the nice wrapper
+-}
+
+wrapParse :: Parser a
+          -> Dialect
+          -> FilePath
+          -> Maybe (Int,Int)
+          -> String
+          -> Either ParseError a
+wrapParse parser d f p src = do
+    let (l,c) = fromMaybe (1,1) p
+    lx <- L.lexSQL d f (Just (l,c)) src
+    either (Left . convParseError src) Right
+      $ runParser (setPos p *> parser <* eof)
+                  d f $ filter keep lx
+  where
+    setPos Nothing = pure ()
+    setPos (Just (l,c)) = fmap up getPosition >>= setPosition
+      where up = flip setSourceColumn c . flip setSourceLine l
+    keep (_,L.Whitespace {}) = False
+    keep (_,L.LineComment {}) = False
+    keep (_,L.BlockComment {}) = False
+    keep _ = True
+
+
+{-
+------------------------------------------------
+
+= Names
+
+Names represent identifiers and a few other things. The parser here
+handles regular identifiers, dotten chain identifiers, quoted
+identifiers and unicode quoted identifiers.
+
+Dots: dots in identifier chains are parsed here and represented in the
+Iden constructor usually. If parts of the chains are non identifier
+scalar expressions, then this is represented by a BinOp "."
+instead. Dotten chain identifiers which appear in other contexts (such
+as function names, table names, are represented as [Name] only.
+
+Identifier grammar:
+
+unquoted:
+underscore <|> letter : many (underscore <|> alphanum
+
+example
+_example123
+
+quoted:
+
+double quote, many (non quote character or two double quotes
+together), double quote
+
+"example quoted"
+"example with "" quote"
+
+unicode quoted is the same as quoted in this parser, except it starts
+with U& or u&
+
+u&"example quoted"
+-}
+
+name :: Parser Name
+name = do
+    d <- getState
+    uncurry Name <$> identifierTok (blacklist d)
+
+-- todo: replace (:[]) with a named function all over
+
+names :: Parser [Name]
+names = reverse <$> (((:[]) <$> name) <??*> anotherName)
+  -- can't use a simple chain here since we
+  -- want to wrap the . + name in a try
+  -- this will change when this is left factored
+  where
+    anotherName :: Parser ([Name] -> [Name])
+    anotherName = try ((:) <$> (symbol "." *> name))
+
+{-
+= Type Names
+
+Typenames are used in casts, and also in the typed literal syntax,
+which is a typename followed by a string literal.
+
+Here are the grammar notes:
+
+== simple type name
+
+just an identifier chain or a multi word identifier (this is a fixed
+list of possibilities, e.g. as 'character varying', see below in the
+parser code for the exact list).
+
+<simple-type-name> ::= <identifier-chain>
+     | multiword-type-identifier
+
+== Precision type name
+
+<precision-type-name> ::= <simple-type-name> <left paren> <unsigned-int> <right paren>
+
+e.g. char(5)
+
+note: above and below every where a simple type name can appear, this
+means a single identifier/quoted or a dotted chain, or a multi word
+identifier
+
+== Precision scale type name
+
+<precision-type-name> ::= <simple-type-name> <left paren> <unsigned-int> <comma> <unsigned-int> <right paren>
+
+e.g. decimal(15,2)
+
+== Lob type name
+
+this is a variation on the precision type name with some extra info on
+the units:
+
+<lob-type-name> ::=
+   <simple-type-name> <left paren> <unsigned integer> [ <multiplier> ] [ <char length units> ] <right paren>
+
+<multiplier>    ::=   K | M | G
+<char length units>    ::=   CHARACTERS | CODE_UNITS | OCTETS
+
+(if both multiplier and char length units are missing, then this will
+parse as a precision type name)
+
+e.g.
+clob(5M octets)
+
+== char type name
+
+this is a simple type with optional precision which allows the
+character set or the collation to appear as a suffix:
+
+<char type name> ::=
+    <simple type name>
+    [ <left paren> <unsigned-int> <right paren> ]
+    [ CHARACTER SET <identifier chain> ]
+    [ COLLATE <identifier chain> ]
+
+e.g.
+
+char(5) character set my_charset collate my_collation
+
+= Time typename
+
+this is typename with optional precision and either 'with time zone'
+or 'without time zone' suffix, e.g.:
+
+<datetime type> ::=
+    [ <left paren> <unsigned-int> <right paren> ]
+    <with or without time zone>
+<with or without time zone> ::= WITH TIME ZONE | WITHOUT TIME ZONE
+    WITH TIME ZONE | WITHOUT TIME ZONE
+
+= row type name
+
+<row type> ::=
+    ROW <left paren> <field definition> [ { <comma> <field definition> }... ] <right paren>
+
+<field definition> ::= <identifier> <type name>
+
+e.g.
+row(a int, b char(5))
+
+= interval type name
+
+<interval type> ::= INTERVAL <interval datetime field> [TO <interval datetime field>]
+
+<interval datetime field> ::=
+  <datetime field> [ <left paren> <unsigned int> [ <comma> <unsigned int> ] <right paren> ]
+
+= array type name
+
+<array type> ::= <data type> ARRAY [ <left bracket> <unsigned integer> <right bracket> ]
+
+= multiset type name
+
+<multiset type>    ::=   <data type> MULTISET
+
+A type name will parse into the 'smallest' constructor it will fit in
+syntactically, e.g. a clob(5) will parse to a precision type name, not
+a lob type name.
+
+Unfortunately, to improve the error messages, there is a lot of (left)
+factoring in this function, and it is a little dense.
+-}
+
+typeName :: Parser TypeName
+typeName =
+    (rowTypeName <|> intervalTypeName <|> otherTypeName)
+    <??*> tnSuffix
+  where
+    rowTypeName =
+        RowTypeName <$> (keyword_ "row" *> parens (commaSep1 rowField))
+    rowField = (,) <$> name <*> typeName
+    ----------------------------
+    intervalTypeName =
+        keyword_ "interval" *>
+        (uncurry IntervalTypeName <$> intervalQualifier)
+    ----------------------------
+    otherTypeName =
+        nameOfType <**>
+            (typeNameWithParens
+             <|> pure Nothing <**> (timeTypeName <|> charTypeName)
+             <|> pure TypeName)
+    nameOfType = reservedTypeNames <|> names
+    charTypeName = charSet <**> (option [] tcollate <$$$$> CharTypeName)
+                   <|> pure [] <**> (tcollate <$$$$> CharTypeName)
+    typeNameWithParens =
+        (openParen *> unsignedInteger)
+        <**> (closeParen *> precMaybeSuffix
+              <|> (precScaleTypeName <|> precLengthTypeName) <* closeParen)
+    precMaybeSuffix = (. Just) <$> (timeTypeName <|> charTypeName)
+                      <|> pure (flip PrecTypeName)
+    precScaleTypeName = (comma *> unsignedInteger) <$$$> PrecScaleTypeName
+    precLengthTypeName =
+        Just <$> lobPrecSuffix
+        <**> (optionMaybe lobUnits <$$$$> PrecLengthTypeName)
+        <|> pure Nothing <**> ((Just <$> lobUnits) <$$$$> PrecLengthTypeName)
+    timeTypeName = tz <$$$> TimeTypeName
+    ----------------------------
+    lobPrecSuffix = PrecK <$ keyword_ "k"
+                    <|> PrecM <$ keyword_ "m"
+                    <|> PrecG <$ keyword_ "g"
+                    <|> PrecT <$ keyword_ "t"
+                    <|> PrecP <$ keyword_ "p"
+    lobUnits = PrecCharacters <$ keyword_ "characters"
+               -- char and byte are the oracle spelling
+               -- todo: move these to oracle dialect
+               <|> PrecCharacters <$ keyword_ "char"
+               <|> PrecOctets <$ keyword_ "octets"
+               <|> PrecOctets <$ keyword_ "byte"
+    tz = True <$ keywords_ ["with", "time","zone"]
+         <|> False <$ keywords_ ["without", "time","zone"]
+    charSet = keywords_ ["character", "set"] *> names
+    tcollate = keyword_ "collate" *> names
+    ----------------------------
+    tnSuffix = multiset <|> array
+    multiset = MultisetTypeName <$ keyword_ "multiset"
+    array = keyword_ "array" *>
+        (optionMaybe (brackets unsignedInteger) <$$> ArrayTypeName)
+    ----------------------------
+    -- this parser handles the fixed set of multi word
+    -- type names, plus all the type names which are
+    -- reserved words
+    reservedTypeNames = do
+        d <- getState
+        (:[]) . Name Nothing . unwords <$> makeKeywordTree (diSpecialTypeNames d)
+        
+
+{-
+= Scalar expressions
+
+== simple literals
+
+See the stringToken lexer below for notes on string literal syntax.
+-}
+
+stringLit :: Parser ScalarExpr
+stringLit = (\(s,e,t) -> StringLit s e t) <$> stringTokExtend
+
+numberLit :: Parser ScalarExpr
+numberLit = NumLit <$> sqlNumberTok False
+
+simpleLiteral :: Parser ScalarExpr
+simpleLiteral = numberLit <|> stringLit
+
+{-
+== star, param, host param
+
+=== star
+
+used in select *, select x.*, and agg(*) variations, and some other
+places as well. The parser doesn't attempt to check that the star is
+in a valid context, it parses it OK in any scalar expression context.
+-}
+
+star :: Parser ScalarExpr
+star = Star <$ symbol "*"
+
+{-
+== parameter
+
+unnamed parameter or named parameter
+use in e.g. select * from t where a = ?
+select x from t where x > :param
+-}
+
+parameter :: Parser ScalarExpr
+parameter = choice
+    [Parameter <$ questionMark
+    ,HostParameter
+     <$> hostParamTok
+     <*> optionMaybe (keyword "indicator" *> hostParamTok)]
+
+-- == positional arg
+
+positionalArg :: Parser ScalarExpr
+positionalArg = PositionalArg <$> positionalArgTok
+
+{-
+== parens
+
+scalar expression parens, row ctor and scalar subquery
+-}
+
+parensExpr :: Parser ScalarExpr
+parensExpr = parens $ choice
+    [SubQueryExpr SqSq <$> queryExpr
+    ,ctor <$> commaSep1 scalarExpr]
+  where
+    ctor [a] = Parens a
+    ctor as = SpecialOp [Name Nothing "rowctor"] as
+
+{-
+== case, cast, exists, unique, array/multiset constructor, interval
+
+All of these start with a fixed keyword which is reserved, so no other
+syntax can start with the same keyword.
+
+=== case expression
+-}
+
+caseExpr :: Parser ScalarExpr
+caseExpr =
+    Case <$> (keyword_ "case" *> optionMaybe scalarExpr)
+         <*> many1 whenClause
+         <*> optionMaybe elseClause
+         <* keyword_ "end"
+  where
+   whenClause = (,) <$> (keyword_ "when" *> commaSep1 scalarExpr)
+                    <*> (keyword_ "then" *> scalarExpr)
+   elseClause = keyword_ "else" *> scalarExpr
+
+{-
+=== cast
+
+cast: cast(expr as type)
+-}
+
+cast :: Parser ScalarExpr
+cast = keyword_ "cast" *>
+       parens (Cast <$> scalarExpr
+                    <*> (keyword_ "as" *> typeName))
+
+{-
+=== convert
+
+convertSqlServer: SqlServer dialect CONVERT(data_type(length), expression, style)
+-}
+
+convertSqlServer :: Parser ScalarExpr
+convertSqlServer = guardDialect diConvertFunction
+                   *> keyword_ "convert" *>
+                   parens (Convert <$> typeName <*> (comma *> scalarExpr)
+                      <*> optionMaybe (comma *> unsignedInteger))
+
+{-
+=== exists, unique
+
+subquery expression:
+[exists|unique] (queryexpr)
+-}
+
+subquery :: Parser ScalarExpr
+subquery = SubQueryExpr <$> sqkw <*> parens queryExpr
+  where
+    sqkw = SqExists <$ keyword_ "exists" <|> SqUnique <$ keyword_ "unique"
+
+-- === array/multiset constructor
+
+arrayCtor :: Parser ScalarExpr
+arrayCtor = keyword_ "array" >>
+    choice
+    [ArrayCtor <$> parens queryExpr
+    ,Array (Iden [Name Nothing "array"]) <$> brackets (commaSep scalarExpr)]
+
+{-
+As far as I can tell, table(query expr) is just syntax sugar for
+multiset(query expr). It must be there for compatibility or something.
+-}
+
+multisetCtor :: Parser ScalarExpr
+multisetCtor =
+    choice
+    [keyword_ "multiset" >>
+     choice
+     [MultisetQueryCtor <$> parens queryExpr
+     ,MultisetCtor <$> brackets (commaSep scalarExpr)]
+    ,keyword_ "table" >>
+     MultisetQueryCtor <$> parens queryExpr]
+
+nextValueFor :: Parser ScalarExpr
+nextValueFor = keywords_ ["next","value","for"] >>
+    NextValueFor <$> names
+
+{-
+=== interval
+
+interval literals are a special case and we follow the grammar less
+permissively here
+
+parse SQL interval literals, something like
+interval '5' day (3)
+or
+interval '5' month
+
+if the literal looks like this:
+interval 'something'
+
+then it is parsed as a regular typed literal. It must have a
+interval-datetime-field suffix to parse as an intervallit
+
+It uses try because of a conflict with interval type names: todo, fix
+this. also fix the monad -> applicative
+-}
+
+intervalLit :: Parser ScalarExpr
+intervalLit = try (keyword_ "interval" >> do
+    s <- optionMaybe $ choice [Plus <$ symbol_ "+"
+                              ,Minus <$ symbol_ "-"]
+    lit <- singleQuotesOnlyStringTok
+    q <- optionMaybe intervalQualifier
+    mkIt s lit q)
+  where
+    mkIt Nothing val Nothing = pure $ TypedLit (TypeName [Name Nothing "interval"]) val
+    mkIt s val (Just (a,b)) = pure $ IntervalLit s val a b
+    mkIt (Just {}) _val Nothing = fail "cannot use sign without interval qualifier"
+
+{-
+== typed literal, app, special, aggregate, window, iden
+
+All of these start with identifiers (some of the special functions
+start with reserved keywords).
+
+they are all variations on suffixes on the basic identifier parser
+
+The windows is a suffix on the app parser
+
+=== iden prefix term
+
+all the scalar expressions which start with an identifier
+
+(todo: really put all of them here instead of just some of them)
+-}
+
+idenExpr :: Parser ScalarExpr
+idenExpr =
+    -- todo: work out how to left factor this
+    try (TypedLit <$> typeName <*> singleQuotesOnlyStringTok)
+    <|> (names <**> option Iden app)
+    <|> keywordFunctionOrIden
+  where
+    -- special cases for keywords that can be parsed as an iden or app
+    keywordFunctionOrIden = try $ do
+        x <- unquotedIdentifierTok [] Nothing
+        d <- getState
+        let i = map toLower x `elem` diIdentifierKeywords d
+            a = map toLower x `elem` diAppKeywords d
+        case () of
+            _  | i && a -> pure [Name Nothing x] <**> option Iden app
+               | i -> pure (Iden [Name Nothing x])
+               | a -> pure [Name Nothing x] <**> app
+               | otherwise -> fail ""
+
+
+{-
+=== special
+
+These are keyword operators which don't look like normal prefix,
+postfix or infix binary operators. They mostly look like function
+application but with keywords in the argument list instead of commas
+to separate the arguments.
+
+the special op keywords
+parse an operator which is
+operatorname(firstArg keyword0 arg0 keyword1 arg1 etc.)
+-}
+
+data SpecialOpKFirstArg = SOKNone
+                        | SOKOptional
+                        | SOKMandatory
+
+specialOpK :: String -- name of the operator
+           -> SpecialOpKFirstArg -- has a first arg without a keyword
+           -> [(String,Bool)] -- the other args with their keywords
+                              -- and whether they are optional
+           -> Parser ScalarExpr
+specialOpK opName firstArg kws =
+    keyword_ opName >> do
+    void openParen
+    let pfa = do
+              e <- scalarExpr
+              -- check we haven't parsed the first
+              -- keyword as an identifier
+              case (e,kws) of
+                  (Iden [Name Nothing i], (k,_):_)
+                      | map toLower i == k ->
+                          fail $ "cannot use keyword here: " ++ i
+                  _ -> return ()
+              pure e
+    fa <- case firstArg of
+         SOKNone -> pure Nothing
+         SOKOptional -> optionMaybe (try pfa)
+         SOKMandatory -> Just <$> pfa
+    as <- mapM parseArg kws
+    void closeParen
+    pure $ SpecialOpK [Name Nothing opName] fa $ catMaybes as
+  where
+    parseArg (nm,mand) =
+        let p = keyword_ nm >> scalarExpr
+        in fmap (nm,) <$> if mand
+                          then Just <$> p
+                          else optionMaybe (try p)
+
+{-
+The actual operators:
+
+EXTRACT( date_part FROM expression )
+
+POSITION( string1 IN string2 )
+
+SUBSTRING(extraction_string FROM starting_position [FOR length]
+[COLLATE collation_name])
+
+CONVERT(char_value USING conversion_char_name)
+
+TRANSLATE(char_value USING translation_name)
+
+OVERLAY(string PLACING embedded_string FROM start
+[FOR length])
+
+TRIM( [ [{LEADING | TRAILING | BOTH}] [removal_char] FROM ]
+target_string
+[COLLATE collation_name] )
+-}
+
+specialOpKs :: Parser ScalarExpr
+specialOpKs = choice $ map try
+    [extract, position, substring, convert, translate, overlay, trim]
+
+extract :: Parser ScalarExpr
+extract = specialOpK "extract" SOKMandatory [("from", True)]
+
+position :: Parser ScalarExpr
+position = specialOpK "position" SOKMandatory [("in", True)]
+
+{-
+strictly speaking, the substring must have at least one of from and
+for, but the parser doens't enforce this
+-}
+
+substring :: Parser ScalarExpr
+substring = specialOpK "substring" SOKMandatory
+                [("from", False),("for", False)]
+
+convert :: Parser ScalarExpr
+convert = specialOpK "convert" SOKMandatory [("using", True)]
+
+
+translate :: Parser ScalarExpr
+translate = specialOpK "translate" SOKMandatory [("using", True)]
+
+overlay :: Parser ScalarExpr
+overlay = specialOpK "overlay" SOKMandatory
+                [("placing", True),("from", True),("for", False)]
+
+{-
+trim is too different because of the optional char, so a custom parser
+the both ' ' is filled in as the default if either parts are missing
+in the source
+-}
+
+trim :: Parser ScalarExpr
+trim =
+    keyword "trim" >>
+    parens (mkTrim
+            <$> option "both" sides
+            <*> option " " singleQuotesOnlyStringTok
+            <*> (keyword_ "from" *> scalarExpr))
+  where
+    sides = choice ["leading" <$ keyword_ "leading"
+                   ,"trailing" <$ keyword_ "trailing"
+                   ,"both" <$ keyword_ "both"]
+    mkTrim fa ch fr =
+      SpecialOpK [Name Nothing "trim"] Nothing
+          $ catMaybes [Just (fa,StringLit "'" "'" ch)
+                      ,Just ("from", fr)]
+
+{-
+=== app, aggregate, window
+
+This parses all these variations:
+normal function application with just a csv of scalar exprs
+aggregate variations (distinct, order by in parens, filter and where
+  suffixes)
+window apps (fn/agg followed by over)
+
+This code is also a little dense like the typename code because of
+left factoring, later they will even have to be partially combined
+together.
+-}
+
+app :: Parser ([Name] -> ScalarExpr)
+app =
+    openParen *> choice
+    [duplicates
+     <**> (commaSep1 scalarExpr
+           <**> (((option [] orderBy) <* closeParen)
+                 <**> (optionMaybe afilter <$$$$$> AggregateApp)))
+     -- separate cases with no all or distinct which must have at
+     -- least one scalar expr
+    ,commaSep1 scalarExpr
+     <**> choice
+          [closeParen *> choice
+                         [window
+                         ,withinGroup
+                         ,(Just <$> afilter) <$$$> aggAppWithoutDupeOrd
+                         ,pure (flip App)]
+          ,orderBy <* closeParen
+           <**> (optionMaybe afilter <$$$$> aggAppWithoutDupe)]
+     -- no scalarExprs: duplicates and order by not allowed
+    ,([] <$ closeParen) <**> option (flip App) (window <|> withinGroup)
+    ]
+  where
+    aggAppWithoutDupeOrd n es f = AggregateApp n SQDefault es [] f
+    aggAppWithoutDupe n = AggregateApp n SQDefault
+
+afilter :: Parser ScalarExpr
+afilter = keyword_ "filter" *> parens (keyword_ "where" *> scalarExpr)
+
+withinGroup :: Parser ([ScalarExpr] -> [Name] -> ScalarExpr)
+withinGroup =
+    (keywords_ ["within", "group"] *> parens orderBy) <$$$> AggregateAppGroup
+
+{-
+==== window
+
+parse a window call as a suffix of a regular function call
+this looks like this:
+functionname(args) over ([partition by ids] [order by orderitems])
+
+No support for explicit frames yet.
+
+TODO: add window support for other aggregate variations, needs some
+changes to the syntax also
+-}
+
+window :: Parser ([ScalarExpr] -> [Name] -> ScalarExpr)
+window =
+  keyword_ "over" *> openParen *> option [] partitionBy
+  <**> (option [] orderBy
+        <**> (((optionMaybe frameClause) <* closeParen) <$$$$$> WindowApp))
+  where
+    partitionBy = keywords_ ["partition","by"] *> commaSep1 scalarExpr
+    frameClause =
+        frameRowsRange -- TODO: this 'and' could be an issue
+        <**> (choice [(keyword_ "between" *> frameLimit True)
+                      <**> ((keyword_ "and" *> frameLimit True)
+                            <$$$> FrameBetween)
+                      -- maybe this should still use a b expression
+                      -- for consistency
+                     ,frameLimit False <**> pure (flip FrameFrom)])
+    frameRowsRange = FrameRows <$ keyword_ "rows"
+                     <|> FrameRange <$ keyword_ "range"
+    frameLimit useB =
+        choice
+        [Current <$ keywords_ ["current", "row"]
+         -- todo: create an automatic left factor for stuff like this
+        ,keyword_ "unbounded" *>
+         choice [UnboundedPreceding <$ keyword_ "preceding"
+                ,UnboundedFollowing <$ keyword_ "following"]
+        ,(if useB then scalarExprB else scalarExpr)
+         <**> (Preceding <$ keyword_ "preceding"
+               <|> Following <$ keyword_ "following")
+        ]
+
+{-
+== suffixes
+
+These are all generic suffixes on any scalar expr
+
+=== in
+
+in: two variations:
+a in (expr0, expr1, ...)
+a in (queryexpr)
+-}
+
+inSuffix :: Parser (ScalarExpr -> ScalarExpr)
+inSuffix =
+    mkIn <$> inty
+         <*> parens (choice
+                     [InQueryExpr <$> queryExpr
+                     ,InList <$> commaSep1 scalarExpr])
+  where
+    inty = choice [True <$ keyword_ "in"
+                  ,False <$ keywords_ ["not","in"]]
+    mkIn i v = \e -> In i e v
+
+{-
+=== between
+
+between:
+expr between expr and expr
+
+There is a complication when parsing between - when parsing the second
+expression it is ambiguous when you hit an 'and' whether it is a
+binary operator or part of the between. This code follows what
+postgres does, which might be standard across SQL implementations,
+which is that you can't have a binary and operator in the middle
+expression in a between unless it is wrapped in parens. The 'bExpr
+parsing' is used to create alternative scalar expression parser which
+is identical to the normal one expect it doesn't recognise the binary
+and operator. This is the call to scalarExprB.
+-}
+
+betweenSuffix :: Parser (ScalarExpr -> ScalarExpr)
+betweenSuffix =
+    makeOp <$> Name Nothing <$> opName
+           <*> scalarExprB
+           <*> (keyword_ "and" *> scalarExprB)
+  where
+    opName = choice
+             ["between" <$ keyword_ "between"
+             ,"not between" <$ try (keywords_ ["not","between"])]
+    makeOp n b c = \a -> SpecialOp [n] [a,b,c]
+
+{-
+=== quantified comparison
+
+a = any (select * from t)
+-}
+
+quantifiedComparisonSuffix :: Parser (ScalarExpr -> ScalarExpr)
+quantifiedComparisonSuffix = do
+    c <- comp
+    cq <- compQuan
+    q <- parens queryExpr
+    pure $ \v -> QuantifiedComparison v [c] cq q
+  where
+    comp = Name Nothing <$> choice (map symbol
+           ["=", "<>", "<=", "<", ">", ">="])
+    compQuan = choice
+               [CPAny <$ keyword_ "any"
+               ,CPSome <$ keyword_ "some"
+               ,CPAll <$ keyword_ "all"]
+
+{-
+=== match
+
+a match (select a from t)
+-}
+
+matchPredicateSuffix :: Parser (ScalarExpr -> ScalarExpr)
+matchPredicateSuffix = do
+    keyword_ "match"
+    u <- option False (True <$ keyword_ "unique")
+    q <- parens queryExpr
+    pure $ \v -> Match v u q
+
+-- === array subscript
+
+arraySuffix :: Parser (ScalarExpr -> ScalarExpr)
+arraySuffix = do
+    es <- brackets (commaSep scalarExpr)
+    pure $ \v -> Array v es
+
+{-
+=== escape
+
+It is going to be really difficult to support an arbitrary character
+for the escape now there is a separate lexer ...
+
+TODO: this needs fixing. Escape is only part of other nodes, and not a
+separate suffix.
+-}
+
+{-escapeSuffix :: Parser (ScalarExpr -> ScalarExpr)
+escapeSuffix = do
+    ctor <- choice
+            [Escape <$ keyword_ "escape"
+            ,UEscape <$ keyword_ "uescape"]
+    c <- escapeChar
+    pure $ \v -> ctor v c
+  where
+    escapeChar :: Parser Char
+    escapeChar = (identifierTok [] Nothing <|> symbolTok Nothing) >>= oneOnly
+    oneOnly :: String -> Parser Char
+    oneOnly c = case c of
+                   [c'] -> return c'
+                   _ -> fail "escape char must be single char"
+-}
+
+-- === collate
+
+collateSuffix:: Parser (ScalarExpr -> ScalarExpr)
+collateSuffix = do
+    keyword_ "collate"
+    i <- names
+    pure $ \v -> Collate v i
+
+{-
+== odbc syntax
+
+the parser supports three kinds of odbc syntax, two of which are
+scalar expressions (the other is a variation on joins)
+-}
+
+
+odbcExpr :: Parser ScalarExpr
+odbcExpr = between (symbol "{") (symbol "}")
+           (odbcTimeLit <|> odbcFunc)
+  where
+    odbcTimeLit =
+        OdbcLiteral <$> choice [OLDate <$ keyword "d"
+                               ,OLTime <$ keyword "t"
+                               ,OLTimestamp <$ keyword "ts"]
+                    <*> singleQuotesOnlyStringTok
+    -- todo: this parser is too general, the expr part
+    -- should be only a function call (from a whitelist of functions)
+    -- or the extract operator
+    odbcFunc = OdbcFunc <$> (keyword "fn" *> scalarExpr)
+
+{-
+==  operators
+
+The 'regular' operators in this parsing and in the abstract syntax are
+unary prefix, unary postfix and binary infix operators. The operators
+can be symbols (a + b), single keywords (a and b) or multiple keywords
+(a is similar to b).
+
+TODO: carefully review the precedences and associativities.
+
+TODO: to fix the parsing completely, I think will need to parse
+without precedence and associativity and fix up afterwards, since SQL
+syntax is way too messy. It might be possible to avoid this if we
+wanted to avoid extensibility and to not be concerned with parse error
+messages, but both of these are too important.
+-}
+
+opTable :: Bool -> [[E.Operator [Token] ParseState Identity ScalarExpr]]
+opTable bExpr =
+        [-- parse match and quantified comparisons as postfix ops
+          -- todo: left factor the quantified comparison with regular
+          -- binary comparison, somehow
+         [E.Postfix $ try quantifiedComparisonSuffix
+         ,E.Postfix matchPredicateSuffix
+         ]
+
+        ,[binarySym "." E.AssocLeft]
+
+        ,[postfix' arraySuffix
+         ,postfix' collateSuffix]
+
+        ,[prefixSym "+", prefixSym "-"]
+
+        ,[binarySym "^" E.AssocLeft]
+
+        ,[binarySym "*" E.AssocLeft
+         ,binarySym "/" E.AssocLeft
+         ,binarySym "%" E.AssocLeft]
+
+        ,[binarySym "+" E.AssocLeft
+         ,binarySym "-" E.AssocLeft]
+
+        ,[binarySym "||" E.AssocRight
+         ,prefixSym "~"
+         ,binarySym "&" E.AssocRight
+         ,binarySym "|" E.AssocRight]
+
+        ,[binaryKeyword "overlaps" E.AssocNone]
+
+        ,[binaryKeyword "like" E.AssocNone
+         -- have to use try with inSuffix because of a conflict
+         -- with 'in' in position function, and not between
+         -- between also has a try in it to deal with 'not'
+         -- ambiguity
+         ,E.Postfix $ try inSuffix
+         ,E.Postfix betweenSuffix]
+         -- todo: figure out where to put the try?
+         ++ [binaryKeywords $ makeKeywordTree
+             ["not like"
+             ,"is similar to"
+             ,"is not similar to"]]
+         ++ [multisetBinOp]
+
+        ,[binarySym "<" E.AssocNone
+         ,binarySym ">" E.AssocNone
+         ,binarySym ">=" E.AssocNone
+         ,binarySym "<=" E.AssocNone
+         ,binarySym "!=" E.AssocRight
+         ,binarySym "<>" E.AssocRight
+         ,binarySym "=" E.AssocRight]
+
+        ,[postfixKeywords $ makeKeywordTree
+             ["is null"
+             ,"is not null"
+             ,"is true"
+             ,"is not true"
+             ,"is false"
+             ,"is not false"
+             ,"is unknown"
+             ,"is not unknown"]]
+         ++ [binaryKeywords $ makeKeywordTree
+             ["is distinct from"
+             ,"is not distinct from"]]
+
+        ,[prefixKeyword "not"]
+
+        ,if bExpr then [] else [binaryKeyword "and" E.AssocLeft]
+
+        ,[binaryKeyword "or" E.AssocLeft]
+
+       ]
+  where
+    binarySym nm assoc = binary (symbol_ nm) nm assoc
+    binaryKeyword nm assoc = binary (keyword_ nm) nm assoc
+    binaryKeywords p =
+        E.Infix (do
+                 o <- try p
+                 pure (\a b -> BinOp a [Name Nothing $ unwords o] b))
+            E.AssocNone
+    postfixKeywords p =
+      postfix' $ do
+          o <- try p
+          pure $ PostfixOp [Name Nothing $ unwords o]
+    binary p nm assoc =
+      E.Infix (p >> pure (\a b -> BinOp a [Name Nothing nm] b)) assoc
+    multisetBinOp = E.Infix (do
+        keyword_ "multiset"
+        o <- choice [Union <$ keyword_ "union"
+                    ,Intersect <$ keyword_ "intersect"
+                    ,Except <$ keyword_ "except"]
+        d <- option SQDefault duplicates
+        pure (\a b -> MultisetBinOp a o d b))
+          E.AssocLeft
+    prefixKeyword nm = prefix (keyword_ nm) nm
+    prefixSym nm = prefix (symbol_ nm) nm
+    prefix p nm = prefix' (p >> pure (PrefixOp [Name Nothing nm]))
+    -- hack from here
+    -- http://stackoverflow.com/questions/10475337/parsec-expr-repeated-prefix-postfix-operator-not-supported
+    -- not implemented properly yet
+    -- I don't think this will be enough for all cases
+    -- at least it works for 'not not a'
+    -- ok: "x is not true is not true"
+    -- no work: "x is not true is not null"
+    prefix'  p = E.Prefix  . chainl1 p $ pure       (.)
+    postfix' p = E.Postfix . chainl1 p $ pure (flip (.))
+
+{-
+== scalar expression top level
+
+This parses most of the scalar exprs.The order of the parsers and use
+of try is carefully done to make everything work. It is a little
+fragile and could at least do with some heavy explanation. Update: the
+'try's have migrated into the individual parsers, they still need
+documenting/fixing.
+-}
+
+scalarExpr :: Parser ScalarExpr
+scalarExpr = E.buildExpressionParser (opTable False) term
+
+term :: Parser ScalarExpr
+term = choice [simpleLiteral
+              ,parameter
+              ,positionalArg
+              ,star
+              ,parensExpr
+              ,caseExpr
+              ,cast
+              ,convertSqlServer
+              ,arrayCtor
+              ,multisetCtor
+              ,nextValueFor
+              ,subquery
+              ,intervalLit
+              ,specialOpKs
+              ,idenExpr
+              ,odbcExpr]
+       <?> "scalar expression"
+
+-- expose the b expression for window frame clause range between
+
+scalarExprB :: Parser ScalarExpr
+scalarExprB = E.buildExpressionParser (opTable True) term
+
+{-
+== helper parsers
+
+This is used in interval literals and in interval type names.
+-}
+
+intervalQualifier :: Parser (IntervalTypeField,Maybe IntervalTypeField)
+intervalQualifier =
+    (,) <$> intervalField
+        <*> optionMaybe (keyword_ "to" *> intervalField)
+  where
+    intervalField =
+        Itf
+        <$> datetimeField
+        <*> optionMaybe
+            (parens ((,) <$> unsignedInteger
+                         <*> optionMaybe (comma *> unsignedInteger)))
+
+{-
+TODO: use datetime field in extract also
+use a data type for the datetime field?
+-}
+
+datetimeField :: Parser String
+datetimeField = choice (map keyword ["year","month","day"
+                                    ,"hour","minute","second"])
+                <?> "datetime field"
+
+{-
+This is used in multiset operations (scalar expr), selects (query expr)
+and set operations (query expr).
+-}
+
+duplicates :: Parser SetQuantifier
+duplicates =
+    choice [All <$ keyword_ "all"
+           ,Distinct <$ keyword "distinct"]
+
+{-
+-------------------------------------------------
+
+= query expressions
+
+== select lists
+-}
+
+selectItem :: Parser (ScalarExpr,Maybe Name)
+selectItem = (,) <$> scalarExpr <*> optionMaybe als
+  where als = optional (keyword_ "as") *> name
+
+selectList :: Parser [(ScalarExpr,Maybe Name)]
+selectList = commaSep1 selectItem
+
+{-
+== from
+
+Here is the rough grammar for joins
+
+tref
+(cross | [natural] ([inner] | (left | right | full) [outer])) join
+tref
+[on expr | using (...)]
+
+TODO: either use explicit 'operator precedence' parsers or build
+expression parser for the 'tref operators' such as joins, lateral,
+aliases.
+-}
+
+from :: Parser [TableRef]
+from = keyword_ "from" *> commaSep1 tref
+  where
+    -- TODO: use P (a->) for the join tref suffix
+    -- chainl or buildexpressionparser
+    tref = nonJoinTref >>= optionSuffix joinTrefSuffix
+    nonJoinTref = choice
+        [parens $ choice
+             [TRQueryExpr <$> queryExpr
+             ,TRParens <$> tref]
+        ,TRLateral <$> (keyword_ "lateral"
+                        *> nonJoinTref)
+        ,do
+         n <- names
+         choice [TRFunction n
+                 <$> parens (commaSep scalarExpr)
+                ,pure $ TRSimple n]
+         -- todo: I think you can only have outer joins inside the oj,
+         -- not sure.
+        ,TROdbc <$> (symbol "{" *> keyword_ "oj" *> tref <* symbol "}")
+        ] <??> aliasSuffix
+    aliasSuffix = fromAlias <$$> TRAlias
+    joinTrefSuffix t =
+        (TRJoin t <$> option False (True <$ keyword_ "natural")
+                  <*> joinType
+                  <*> nonJoinTref
+                  <*> optionMaybe joinCondition)
+        >>= optionSuffix joinTrefSuffix
+
+{-
+TODO: factor the join stuff to produce better error messages (and make
+it more readable)
+-}
+
+joinType :: Parser JoinType
+joinType = choice
+    [JCross <$ keyword_ "cross" <* keyword_ "join"
+    ,JInner <$ keyword_ "inner" <* keyword_ "join"
+    ,JLeft <$ keyword_ "left"
+           <* optional (keyword_ "outer")
+           <* keyword_ "join"
+    ,JRight <$ keyword_ "right"
+            <* optional (keyword_ "outer")
+            <* keyword_ "join"
+    ,JFull <$ keyword_ "full"
+           <* optional (keyword_ "outer")
+           <* keyword_ "join"
+    ,JInner <$ keyword_ "join"]
+
+joinCondition :: Parser JoinCondition
+joinCondition = choice
+    [keyword_ "on" >> JoinOn <$> scalarExpr
+    ,keyword_ "using" >> JoinUsing <$> parens (commaSep1 name)]
+
+fromAlias :: Parser Alias
+fromAlias = Alias <$> tableAlias <*> columnAliases
+  where
+    tableAlias = optional (keyword_ "as") *> name
+    columnAliases = optionMaybe $ parens $ commaSep1 name
+
+{-
+== simple other parts
+
+Parsers for where, group by, having, order by and limit, which are
+pretty trivial.
+-}
+
+whereClause :: Parser ScalarExpr
+whereClause = keyword_ "where" *> scalarExpr
+
+groupByClause :: Parser [GroupingExpr]
+groupByClause = keywords_ ["group","by"] *> commaSep1 groupingExpression
+  where
+    groupingExpression = choice
+      [keyword_ "cube" >>
+       Cube <$> parens (commaSep groupingExpression)
+      ,keyword_ "rollup" >>
+       Rollup <$> parens (commaSep groupingExpression)
+      ,GroupingParens <$> parens (commaSep groupingExpression)
+      ,keywords_ ["grouping", "sets"] >>
+       GroupingSets <$> parens (commaSep groupingExpression)
+      ,SimpleGroup <$> scalarExpr
+      ]
+
+having :: Parser ScalarExpr
+having = keyword_ "having" *> scalarExpr
+
+orderBy :: Parser [SortSpec]
+orderBy = keywords_ ["order","by"] *> commaSep1 ob
+  where
+    ob = SortSpec
+         <$> scalarExpr
+         <*> option DirDefault (choice [Asc <$ keyword_ "asc"
+                                       ,Desc <$ keyword_ "desc"])
+         <*> option NullsOrderDefault
+             -- todo: left factor better
+             (keyword_ "nulls" >>
+                    choice [NullsFirst <$ keyword "first"
+                           ,NullsLast <$ keyword "last"])
+
+{-
+allows offset and fetch in either order
++ postgresql offset without row(s) and limit instead of fetch also
+-}
+
+offsetFetch :: Parser (Maybe ScalarExpr, Maybe ScalarExpr)
+offsetFetch = permute ((,) <$?> (Nothing, Just <$> offset)
+                           <|?> (Nothing, Just <$> fetch))
+
+offset :: Parser ScalarExpr
+offset = keyword_ "offset" *> scalarExpr
+         <* option () (choice [keyword_ "rows"
+                              ,keyword_ "row"])
+
+fetch :: Parser ScalarExpr
+fetch = fetchFirst <|> limit
+  where
+    fetchFirst = guardDialect diFetchFirst
+                 *> fs *> scalarExpr <* ro
+    fs = makeKeywordTree ["fetch first", "fetch next"]
+    ro = makeKeywordTree ["rows only", "row only"]
+    -- todo: not in ansi sql dialect
+    limit = guardDialect diLimit *>
+            keyword_ "limit" *> scalarExpr
+
+-- == common table expressions
+
+with :: Parser QueryExpr
+with = keyword_ "with" >>
+    With <$> option False (True <$ keyword_ "recursive")
+         <*> commaSep1 withQuery <*> queryExpr
+  where
+    withQuery = (,) <$> (withAlias <* keyword_ "as")
+                    <*> parens queryExpr
+    withAlias = Alias <$> name <*> columnAliases
+    columnAliases = optionMaybe $ parens $ commaSep1 name
+
+
+{-
+== query expression
+
+This parser parses any query expression variant: normal select, cte,
+and union, etc..
+-}
+
+queryExpr :: Parser QueryExpr
+queryExpr = choice
+    [with
+    ,chainr1 (choice [values,table, select]) setOp]
+  where
+    select = keyword_ "select" >>
+        mkSelect
+        <$> option SQDefault duplicates
+        <*> selectList
+        <*> optionMaybe tableExpression
+    mkSelect d sl Nothing =
+        makeSelect{qeSetQuantifier = d, qeSelectList = sl}
+    mkSelect d sl (Just (TableExpression f w g h od ofs fe)) =
+        Select d sl f w g h od ofs fe
+    values = keyword_ "values"
+             >> Values <$> commaSep (parens (commaSep scalarExpr))
+    table = keyword_ "table" >> Table <$> names
+
+{-
+local data type to help with parsing the bit after the select list,
+called 'table expression' in the ansi sql grammar. Maybe this should
+be in the public syntax?
+-}
+
+data TableExpression
+    = TableExpression
+      {_teFrom :: [TableRef]
+      ,_teWhere :: Maybe ScalarExpr
+      ,_teGroupBy :: [GroupingExpr]
+      ,_teHaving :: Maybe ScalarExpr
+      ,_teOrderBy :: [SortSpec]
+      ,_teOffset :: Maybe ScalarExpr
+      ,_teFetchFirst :: Maybe ScalarExpr}
+
+tableExpression :: Parser TableExpression
+tableExpression = mkTe <$> from
+                       <*> optionMaybe whereClause
+                       <*> option [] groupByClause
+                       <*> optionMaybe having
+                       <*> option [] orderBy
+                       <*> offsetFetch
+ where
+    mkTe f w g h od (ofs,fe) =
+        TableExpression f w g h od ofs fe
+
+setOp :: Parser (QueryExpr -> QueryExpr -> QueryExpr)
+setOp = cq
+        <$> setOpK
+        <*> option SQDefault duplicates
+        <*> corr
+  where
+    cq o d c q0 q1 = QueryExprSetOp q0 o d c q1
+    setOpK = choice [Union <$ keyword_ "union"
+                    ,Intersect <$ keyword_ "intersect"
+                    ,Except <$ keyword_ "except"]
+            <?> "set operator"
+    corr = option Respectively (Corresponding <$ keyword_ "corresponding")
+
+
+{-
+wrapper for query expr which ignores optional trailing semicolon.
+
+TODO: change style
+-}
+
+topLevelQueryExpr :: Parser QueryExpr
+topLevelQueryExpr = queryExpr <??> (id <$ semi)
+
+topLevelStatement :: Parser Statement
+topLevelStatement = statement
+
+{-
+-------------------------
+
+= Statements
+-}
+
+statementWithoutSemicolon :: Parser Statement
+statementWithoutSemicolon = choice
+    [keyword_ "create" *> choice [createSchema
+                                 ,createTable
+                                 ,createIndex
+                                 ,createView
+                                 ,createDomain
+                                 ,createSequence
+                                 ,createRole
+                                 ,createAssertion]
+    ,keyword_ "alter" *> choice [alterTable
+                                ,alterDomain
+                                ,alterSequence]
+    ,keyword_ "drop" *> choice [dropSchema
+                               ,dropTable
+                               ,dropView
+                               ,dropDomain
+                               ,dropSequence
+                               ,dropRole
+                               ,dropAssertion]
+    ,delete
+    ,truncateSt
+    ,insert
+    ,update
+    ,startTransaction
+    ,savepoint
+    ,releaseSavepoint
+    ,commit
+    ,rollback
+    ,grant
+    ,revoke
+    ,SelectStatement <$> queryExpr
+    ]
+
+statement :: Parser Statement
+statement = statementWithoutSemicolon <* optional semi <|> semi *> pure EmptyStatement
+
+createSchema :: Parser Statement
+createSchema = keyword_ "schema" >>
+    CreateSchema <$> names
+
+createTable :: Parser Statement
+createTable = do 
+  d <- getState
+  let 
+    parseColumnDef = TableColumnDef <$> columnDef 
+    parseConstraintDef = uncurry TableConstraintDef <$> tableConstraintDef
+    separator = if diNonCommaSeparatedConstraints d
+      then optional comma
+      else void comma
+    constraints = sepBy parseConstraintDef separator
+    entries = ((:) <$> parseColumnDef <*> ((comma >> entries) <|> pure [])) <|> constraints
+
+  keyword_ "table" >>
+    CreateTable
+    <$> names 
+    <*> parens entries
+
+createIndex :: Parser Statement
+createIndex =
+    CreateIndex
+    <$> ((keyword_ "index" >> pure False) <|>
+         (keywords_ ["unique", "index"] >> pure True))
+    <*> names
+    <*> (keyword_ "on" >> names)
+    <*> parens (commaSep1 name)
+
+columnDef :: Parser ColumnDef
+columnDef = ColumnDef <$> name <*> typeName
+            <*> optionMaybe defaultClause
+            <*> option [] (many1 colConstraintDef)
+  where
+    defaultClause = choice [
+        keyword_ "default" >>
+        DefaultClause <$> scalarExpr
+        -- todo: left factor
+       ,try (keywords_ ["generated","always","as"] >>
+             GenerationClause <$> parens scalarExpr)
+       ,keyword_ "generated" >>
+        IdentityColumnSpec
+        <$> (GeneratedAlways <$ keyword_ "always"
+             <|> GeneratedByDefault <$ keywords_ ["by", "default"])
+        <*> (keywords_ ["as", "identity"] *>
+             option [] (parens sequenceGeneratorOptions))
+       ]
+
+tableConstraintDef :: Parser (Maybe [Name], TableConstraint)
+tableConstraintDef =
+    (,)
+    <$> (optionMaybe (keyword_ "constraint" *> names))
+    <*> (unique <|> primaryKey <|> check <|> references)
+  where
+    unique = keyword_ "unique" >>
+        TableUniqueConstraint <$> parens (commaSep1 name)
+    primaryKey = keywords_ ["primary", "key"] >>
+        TablePrimaryKeyConstraint <$> parens (commaSep1 name)
+    check = keyword_ "check" >> TableCheckConstraint <$> parens scalarExpr
+    references = keywords_ ["foreign", "key"] >>
+        (\cs ft ftcs m (u,d) -> TableReferencesConstraint cs ft ftcs m u d)
+        <$> parens (commaSep1 name)
+        <*> (keyword_ "references" *> names)
+        <*> optionMaybe (parens $ commaSep1 name)
+        <*> refMatch
+        <*> refActions
+
+refMatch :: Parser ReferenceMatch
+refMatch = option DefaultReferenceMatch
+            (keyword_ "match" *>
+             choice [MatchFull <$ keyword_ "full"
+                    ,MatchPartial <$ keyword_ "partial"
+                    ,MatchSimple <$ keyword_ "simple"])
+refActions :: Parser (ReferentialAction,ReferentialAction)
+refActions = permute ((,) <$?> (DefaultReferentialAction, onUpdate)
+                          <|?> (DefaultReferentialAction, onDelete))
+  where
+    -- todo: left factor?
+    onUpdate = try (keywords_ ["on", "update"]) *> referentialAction
+    onDelete = try (keywords_ ["on", "delete"]) *> referentialAction
+    referentialAction = choice [
+         RefCascade <$ keyword_ "cascade"
+         -- todo: left factor?
+        ,RefSetNull <$ try (keywords_ ["set", "null"])
+        ,RefSetDefault <$ try (keywords_ ["set", "default"])
+        ,RefRestrict <$ keyword_ "restrict"
+        ,RefNoAction <$ keywords_ ["no", "action"]]
+
+colConstraintDef :: Parser ColConstraintDef
+colConstraintDef =
+    ColConstraintDef
+    <$> optionMaybe (keyword_ "constraint" *> names)
+    <*> (nullable <|> notNull <|> unique <|> primaryKey <|> check <|> references)
+  where
+    nullable = ColNullableConstraint <$ keyword "null"
+    notNull = ColNotNullConstraint <$ keywords_ ["not", "null"]
+    unique = ColUniqueConstraint <$ keyword_ "unique"
+    primaryKey = do
+      keywords_ ["primary", "key"] 
+      d <- getState
+      autoincrement <- if diAutoincrement d 
+        then optionMaybe (keyword_ "autoincrement")
+        else pure Nothing
+      pure $ ColPrimaryKeyConstraint $ isJust autoincrement
+    check = keyword_ "check" >> ColCheckConstraint <$> parens scalarExpr
+    references = keyword_ "references" >>
+        (\t c m (ou,od) -> ColReferencesConstraint t c m ou od)
+        <$> names
+        <*> optionMaybe (parens name)
+        <*> refMatch
+        <*> refActions
+
+-- slightly hacky parser for signed integers
+
+signedInteger :: Parser Integer
+signedInteger =
+    (*) <$> option 1 (1 <$ symbol "+" <|> (-1) <$ symbol "-")
+    <*> unsignedInteger
+
+sequenceGeneratorOptions :: Parser [SequenceGeneratorOption]
+sequenceGeneratorOptions =
+         -- todo: could try to combine exclusive options
+         -- such as cycle and nocycle
+         -- sort out options which are sometimes not allowed
+         -- as datatype, and restart with
+    permute ((\a b c d e f g h j k -> catMaybes [a,b,c,d,e,f,g,h,j,k])
+                  <$?> nj startWith
+                  <|?> nj dataType
+                  <|?> nj restart
+                  <|?> nj incrementBy
+                  <|?> nj maxValue
+                  <|?> nj noMaxValue
+                  <|?> nj minValue
+                  <|?> nj noMinValue
+                  <|?> nj scycle
+                  <|?> nj noCycle
+                 )
+  where
+    nj p = (Nothing,Just <$> p)
+    startWith = keywords_ ["start", "with"] >>
+                SGOStartWith <$> signedInteger
+    dataType = keyword_ "as" >>
+               SGODataType <$> typeName
+    restart = keyword_ "restart" >>
+              SGORestart <$> optionMaybe (keyword_ "with" *> signedInteger)
+    incrementBy = keywords_ ["increment", "by"] >>
+                SGOIncrementBy <$> signedInteger
+    maxValue = keyword_ "maxvalue" >>
+                SGOMaxValue <$> signedInteger
+    noMaxValue = SGONoMaxValue <$ try (keywords_ ["no","maxvalue"])
+    minValue = keyword_ "minvalue" >>
+                SGOMinValue <$> signedInteger
+    noMinValue = SGONoMinValue <$ try (keywords_ ["no","minvalue"])
+    scycle = SGOCycle <$ keyword_ "cycle"
+    noCycle = SGONoCycle <$ try (keywords_ ["no","cycle"])
+
+
+alterTable :: Parser Statement
+alterTable = keyword_ "table" >>
+    -- the choices have been ordered so that it works
+    AlterTable <$> names <*> choice [addConstraint
+                                    ,dropConstraint
+                                    ,addColumnDef
+                                    ,alterColumn
+                                    ,dropColumn
+                                    ]
+  where
+    addColumnDef = try (keyword_ "add"
+                        *> optional (keyword_ "column")) >>
+                   AddColumnDef <$> columnDef
+    alterColumn = keyword_ "alter" >> optional (keyword_ "column") >>
+                  name <**> choice [setDefault
+                                   ,dropDefault
+                                   ,setNotNull
+                                   ,dropNotNull
+                                   ,setDataType]
+    setDefault :: Parser (Name -> AlterTableAction)
+    -- todo: left factor
+    setDefault = try (keywords_ ["set","default"]) >>
+                 scalarExpr <$$> AlterColumnSetDefault
+    dropDefault = AlterColumnDropDefault <$ try (keywords_ ["drop","default"])
+    setNotNull = AlterColumnSetNotNull <$ try (keywords_ ["set","not","null"])
+    dropNotNull = AlterColumnDropNotNull <$ try (keywords_ ["drop","not","null"])
+    setDataType = try (keywords_ ["set","data","type"]) >>
+                  typeName <$$> AlterColumnSetDataType
+    dropColumn = try (keyword_ "drop" *> optional (keyword_ "column")) >>
+                 DropColumn <$> name <*> dropBehaviour
+    -- todo: left factor, this try is especially bad
+    addConstraint = try (keyword_ "add" >>
+        uncurry AddTableConstraintDef <$> tableConstraintDef)
+    dropConstraint = try (keywords_ ["drop","constraint"]) >>
+        DropTableConstraintDef <$> names <*> dropBehaviour
+
+
+dropSchema :: Parser Statement
+dropSchema = keyword_ "schema" >>
+    DropSchema <$> names <*> dropBehaviour
+
+dropTable :: Parser Statement
+dropTable = keyword_ "table" >>
+    DropTable <$> names <*> dropBehaviour
+
+createView :: Parser Statement
+createView =
+    CreateView
+    <$> (option False (True <$ keyword_ "recursive") <* keyword_ "view")
+    <*> names
+    <*> optionMaybe (parens (commaSep1 name))
+    <*> (keyword_ "as" *> queryExpr)
+    <*> optionMaybe (choice [
+            -- todo: left factor
+            DefaultCheckOption <$ try (keywords_ ["with", "check", "option"])
+           ,CascadedCheckOption <$ try (keywords_ ["with", "cascaded", "check", "option"])
+           ,LocalCheckOption <$ try (keywords_ ["with", "local", "check", "option"])
+            ])
+
+dropView :: Parser Statement
+dropView = keyword_ "view" >>
+    DropView <$> names <*> dropBehaviour
+
+createDomain :: Parser Statement
+createDomain = keyword_ "domain" >>
+    CreateDomain
+    <$> names
+    <*> (optional (keyword_ "as") *> typeName)
+    <*> optionMaybe (keyword_ "default" *> scalarExpr)
+    <*> many con
+  where
+    con = (,) <$> optionMaybe (keyword_ "constraint" *> names)
+          <*> (keyword_ "check" *> parens scalarExpr)
+
+alterDomain :: Parser Statement
+alterDomain = keyword_ "domain" >>
+    AlterDomain
+    <$> names
+    <*> (setDefault <|> constraint
+         <|> (keyword_ "drop" *> (dropDefault <|> dropConstraint)))
+  where
+    setDefault = keywords_ ["set", "default"] >> ADSetDefault <$> scalarExpr
+    constraint = keyword_ "add" >>
+       ADAddConstraint
+       <$> optionMaybe (keyword_ "constraint" *> names)
+       <*> (keyword_ "check" *> parens scalarExpr)
+    dropDefault = ADDropDefault <$ keyword_ "default"
+    dropConstraint = keyword_ "constraint" >> ADDropConstraint <$> names
+
+dropDomain :: Parser Statement
+dropDomain = keyword_ "domain" >>
+    DropDomain <$> names <*> dropBehaviour
+
+createSequence :: Parser Statement
+createSequence = keyword_ "sequence" >>
+    CreateSequence
+    <$> names
+    <*> sequenceGeneratorOptions
+
+alterSequence :: Parser Statement
+alterSequence = keyword_ "sequence" >>
+    AlterSequence
+    <$> names
+    <*> sequenceGeneratorOptions
+
+dropSequence :: Parser Statement
+dropSequence = keyword_ "sequence" >>
+    DropSequence <$> names <*> dropBehaviour
+
+createAssertion :: Parser Statement
+createAssertion = keyword_ "assertion" >>
+    CreateAssertion
+    <$> names
+    <*> (keyword_ "check" *> parens scalarExpr)
+
+
+dropAssertion :: Parser Statement
+dropAssertion = keyword_ "assertion" >>
+    DropAssertion <$> names <*> dropBehaviour
+
+{-
+-----------------
+
+= dml
+-}
+
+delete :: Parser Statement
+delete = keywords_ ["delete","from"] >>
+    Delete
+    <$> names
+    <*> optionMaybe (optional (keyword_ "as") *> name)
+    <*> optionMaybe (keyword_ "where" *> scalarExpr)
+
+truncateSt :: Parser Statement
+truncateSt = keywords_ ["truncate", "table"] >>
+    Truncate
+    <$> names
+    <*> option DefaultIdentityRestart
+        (ContinueIdentity <$ keywords_ ["continue","identity"]
+         <|> RestartIdentity <$ keywords_ ["restart","identity"])
+
+insert :: Parser Statement
+insert = keywords_ ["insert", "into"] >>
+    Insert
+    <$> names
+    <*> optionMaybe (parens $ commaSep1 name)
+    <*> (DefaultInsertValues <$ keywords_ ["default", "values"]
+         <|> InsertQuery <$> queryExpr)
+
+update :: Parser Statement
+update = keywords_ ["update"] >>
+    Update
+    <$> names
+    <*> optionMaybe (optional (keyword_ "as") *> name)
+    <*> (keyword_ "set" *> commaSep1 setClause)
+    <*> optionMaybe (keyword_ "where" *> scalarExpr)
+  where
+    setClause = multipleSet <|> singleSet
+    multipleSet = SetMultiple
+                  <$> parens (commaSep1 names)
+                  <*> (symbol "=" *> parens (commaSep1 scalarExpr))
+    singleSet = Set
+                <$> names
+                <*> (symbol "=" *> scalarExpr)
+
+dropBehaviour :: Parser DropBehaviour
+dropBehaviour =
+    option DefaultDropBehaviour
+    (Restrict <$ keyword_ "restrict"
+    <|> Cascade <$ keyword_ "cascade")
+
+{-
+-----------------------------
+
+= transaction management
+-}
+
+startTransaction :: Parser Statement
+startTransaction = StartTransaction <$ keywords_ ["start","transaction"]
+
+savepoint :: Parser Statement
+savepoint = keyword_ "savepoint" >>
+    Savepoint <$> name
+
+releaseSavepoint :: Parser Statement
+releaseSavepoint = keywords_ ["release","savepoint"] >>
+    ReleaseSavepoint <$> name
+
+commit :: Parser Statement
+commit = Commit <$ keyword_ "commit" <* optional (keyword_ "work")
+
+rollback :: Parser Statement
+rollback = keyword_ "rollback" >> optional (keyword_ "work") >>
+    Rollback <$> optionMaybe (keywords_ ["to", "savepoint"] *> name)
+
+
+{-
+------------------------------
+
+= Access control
+
+TODO: fix try at the 'on'
+-}
+
+grant :: Parser Statement
+grant = keyword_ "grant" >> (try priv <|> role)
+  where
+    priv = GrantPrivilege
+           <$> commaSep privilegeAction
+           <*> (keyword_ "on" *> privilegeObject)
+           <*> (keyword_ "to" *> commaSep name)
+           <*> option WithoutGrantOption
+               (WithGrantOption <$ keywords_ ["with","grant","option"])
+    role = GrantRole
+           <$> commaSep name
+           <*> (keyword_ "to" *> commaSep name)
+           <*> option WithoutAdminOption
+               (WithAdminOption <$ keywords_ ["with","admin","option"])
+
+createRole :: Parser Statement
+createRole = keyword_ "role" >>
+    CreateRole <$> name
+
+dropRole :: Parser Statement
+dropRole = keyword_ "role" >>
+    DropRole <$> name
+
+-- TODO: fix try at the 'on'
+
+revoke :: Parser Statement
+revoke = keyword_ "revoke" >> (try priv <|> role)
+  where
+    priv = RevokePrivilege
+           <$> option NoGrantOptionFor
+               (GrantOptionFor <$ keywords_ ["grant","option","for"])
+           <*> commaSep privilegeAction
+           <*> (keyword_ "on" *> privilegeObject)
+           <*> (keyword_ "from" *> commaSep name)
+           <*> dropBehaviour
+    role = RevokeRole
+           <$> option NoAdminOptionFor
+               (AdminOptionFor <$ keywords_ ["admin","option", "for"])
+           <*> commaSep name
+           <*> (keyword_ "from" *> commaSep name)
+           <*> dropBehaviour
+
+privilegeAction :: Parser PrivilegeAction
+privilegeAction = choice
+    [PrivAll <$ keywords_ ["all","privileges"]
+    ,keyword_ "select" >>
+     PrivSelect <$> option [] (parens $ commaSep name)
+    ,PrivDelete <$ keyword_ "delete"
+    ,PrivUsage <$ keyword_ "usage"
+    ,PrivTrigger <$ keyword_ "trigger"
+    ,PrivExecute <$ keyword_ "execute"
+    ,keyword_ "insert" >>
+     PrivInsert <$> option [] (parens $ commaSep name)
+    ,keyword_ "update" >>
+     PrivUpdate <$> option [] (parens $ commaSep name)
+    ,keyword_ "references" >>
+     PrivReferences <$> option [] (parens $ commaSep name)
+    ]
+
+privilegeObject :: Parser PrivilegeObject
+privilegeObject = choice
+    [keyword_ "domain" >> PrivDomain <$> names
+    ,keyword_ "type" >> PrivType <$> names
+    ,keyword_ "sequence" >> PrivSequence <$> names
+    ,keywords_ ["specific","function"] >> PrivFunction <$> names
+    ,optional (keyword_ "table") >> PrivTable <$> names
+    ]
+
+
+{-
+----------------------------
+
+wrapper to parse a series of statements. They must be separated by
+semicolon, but for the last statement, the trailing semicolon is
+optional.
+-}
+
+statements :: Parser [Statement]
+statements = many statement
+
+{-
+----------------------------------------------
+
+= multi keyword helper
+
+This helper is to help parsing multiple options of multiple keywords
+with similar prefixes, e.g. parsing 'is null' and 'is not null'.
+
+use to left factor/ improve:
+typed literal and general identifiers
+not like, not in, not between operators
+help with factoring keyword functions and other app-likes
+the join keyword sequences
+fetch first/next
+row/rows only
+
+There is probably a simpler way of doing this but I am a bit
+thick.
+-}
+
+makeKeywordTree :: [String] -> Parser [String]
+makeKeywordTree sets =
+    parseTrees (sort $ map words sets)
+  where
+    parseTrees :: [[String]] -> Parser [String]
+    parseTrees ws = do
+      let gs :: [[[String]]]
+          gs = groupBy ((==) `on` safeHead) ws
+      choice $ map parseGroup gs
+    parseGroup :: [[String]] -> Parser [String]
+    parseGroup l@((k:_):_) = do
+        keyword_ k
+        let tls = catMaybes $ map safeTail l
+            pr = (k:) <$> parseTrees tls
+        if (or $ map null tls)
+          then pr <|> pure [k]
+          else pr
+    parseGroup _ = guard False >> error "impossible"
+    safeHead (x:_) = Just x
+    safeHead [] = Nothing
+    safeTail (_:x) = Just x
+    safeTail [] = Nothing
+
+{-
+------------------------------------------------
+
+= lexing
+
+TODO: push checks into here:
+keyword blacklists
+unsigned integer match
+symbol matching
+keyword matching
+-}
+
+stringTok :: Parser (String,String,String)
+stringTok = mytoken (\tok ->
+    case tok of
+      L.SqlString s e t -> Just (s,e,t)
+      _ -> Nothing)
+
+singleQuotesOnlyStringTok :: Parser String
+singleQuotesOnlyStringTok = mytoken (\tok ->
+    case tok of
+      L.SqlString "'" "'" t -> Just t
+      _ -> Nothing)
+
+{-
+This is to support SQL strings where you can write
+'part of a string' ' another part'
+and it will parse as a single string
+
+It is only allowed when all the strings are quoted with ' atm.
+-}
+
+stringTokExtend :: Parser (String,String,String)
+stringTokExtend = do
+    (s,e,x) <- stringTok
+    choice [
+         do
+         guard (s == "'" && e == "'")
+         (s',e',y) <- stringTokExtend
+         guard (s' == "'" && e' == "'")
+         return $ (s,e,x ++ y)
+        ,return (s,e,x)
+        ]
+
+hostParamTok :: Parser String
+hostParamTok = mytoken (\tok ->
+    case tok of
+      L.PrefixedVariable c p -> Just (c:p)
+      _ -> Nothing)
+
+positionalArgTok :: Parser Int
+positionalArgTok = mytoken (\tok ->
+    case tok of
+      L.PositionalArg p -> Just p
+      _ -> Nothing)
+
+
+sqlNumberTok :: Bool -> Parser String
+sqlNumberTok intOnly = mytoken (\tok ->
+    case tok of
+      L.SqlNumber p | not intOnly || all isDigit p -> Just p
+      _ -> Nothing)
+
+
+symbolTok :: Maybe String -> Parser String
+symbolTok sym = mytoken (\tok ->
+    case (sym,tok) of
+      (Nothing, L.Symbol p) -> Just p
+      (Just s, L.Symbol p) | s == p -> Just p
+      _ -> Nothing)
+
+identifierTok :: [String] -> Parser (Maybe (String,String), String)
+identifierTok blackList = mytoken (\tok ->
+    case tok of
+      L.Identifier q@(Just {}) p -> Just (q,p)
+      L.Identifier q p | map toLower p `notElem` blackList -> Just (q,p)
+      _ -> Nothing)
+
+unquotedIdentifierTok :: [String] -> Maybe String -> Parser String
+unquotedIdentifierTok blackList kw = mytoken (\tok ->
+    case (kw,tok) of
+      (Nothing, L.Identifier Nothing p) | map toLower p `notElem` blackList -> Just p
+      (Just k, L.Identifier Nothing p) | k == map toLower p -> Just p
+      _ -> Nothing)
+
+mytoken :: (L.Token -> Maybe a) -> Parser a
+mytoken test = token showToken posToken testToken
+  where
+    showToken (_,tok)   = show tok
+    posToken  ((a,b,c),_)  = newPos a b c
+    testToken (_,tok)   = test tok
+
+unsignedInteger :: Parser Integer
+unsignedInteger = read <$> sqlNumberTok True <?> "natural number"
+
+-- todo: work out the symbol parsing better
+
+symbol :: String -> Parser String
+symbol s = symbolTok (Just s) <?> s
+
+singleCharSymbol :: Char -> Parser Char
+singleCharSymbol c = c <$ symbol [c]
+
+questionMark :: Parser Char
+questionMark = singleCharSymbol '?' <?> "question mark"
+
+openParen :: Parser Char
+openParen = singleCharSymbol '('
+
+closeParen :: Parser Char
+closeParen = singleCharSymbol ')'
+
+openBracket :: Parser Char
+openBracket = singleCharSymbol '['
+
+closeBracket :: Parser Char
+closeBracket = singleCharSymbol ']'
+
+
+comma :: Parser Char
+comma = singleCharSymbol ','
+
+semi :: Parser Char
+semi = singleCharSymbol ';'
+
+-- = helper functions
+
+keyword :: String -> Parser String
+keyword k = unquotedIdentifierTok [] (Just k) <?> k
+
+-- helper function to improve error messages
+
+keywords_ :: [String] -> Parser ()
+keywords_ ks = mapM_ keyword_ ks <?> intercalate " " ks
+
+
+parens :: Parser a -> Parser a
+parens = between openParen closeParen
+
+brackets :: Parser a -> Parser a
+brackets = between openBracket closeBracket
+
+commaSep :: Parser a -> Parser [a]
+commaSep = (`sepBy` comma)
+
+keyword_ :: String -> Parser ()
+keyword_ = void . keyword
+
+symbol_ :: String -> Parser ()
+symbol_ = void . symbol
+
+commaSep1 :: Parser a -> Parser [a]
+commaSep1 = (`sepBy1` comma)
+
+blacklist :: Dialect -> [String]
+blacklist d = diKeywords d
+
+{-
+These blacklisted names are mostly needed when we parse something with
+an optional alias, e.g. select a a from t. If we write select a from
+t, we have to make sure the from isn't parsed as an alias. I'm not
+sure what other places strictly need the blacklist, and in theory it
+could be tuned differently for each place the identifierString/
+identifier parsers are used to only blacklist the bare
+minimum. Something like this might be needed for dialect support, even
+if it is pretty silly to use a keyword as an unquoted identifier when
+there is a quoting syntax as well.
+
+The standard has a weird mix of reserved keywords and unreserved
+keywords (I'm not sure what exactly being an unreserved keyword
+means).
+
+The current approach tries to have everything which is a keyword only
+in the keyword list - so it can only be used in some other context if
+quoted. If something is a 'ansi keyword', but appears only as an
+identifier or function name for instance in the syntax (or something
+that looks identical to this), then it isn't treated as a keyword at
+all. When there is some overlap (e.g. 'set'), then there is either
+special case parsing code to handle this (in the case of set), or it
+is not treated as a keyword (not perfect, but if it more or less
+works, ok for now).
+
+An exception to this is the standard type names are considered as
+keywords at the moment, with a special case in the type parser to
+make this work. Maybe this isn't necessary or is a bad idea.
+
+It is possible to have a problem if you remove something which is a
+keyword from this list, and still want to parse statements using it
+as a keyword - for instance, removing things like 'from' or 'as',
+will likely mean many things don't parse anymore.
+
+
+
+-----------
+
+Used to make the dialect available during parsing so different parsers
+can be used for different dialects. Not sure if this is the best way
+to do it, but it's convenient
+-}
+
+type ParseState = Dialect
+
+type Token = ((String,Int,Int),L.Token)
+
+type Parser = GenParser Token ParseState
+
+guardDialect :: (Dialect -> Bool) -> Parser ()
+guardDialect f = do
+    d <- getState
+    guard (f d)
+
+{-
+The dialect stuff could also be used for custom options: e.g. to only
+parse dml for instance.
+-}
diff --git a/Language/SQL/SimpleSQL/Parse.lhs b/Language/SQL/SimpleSQL/Parse.lhs
deleted file mode 100644
index 29c098c..0000000
--- a/Language/SQL/SimpleSQL/Parse.lhs
+++ /dev/null
@@ -1,2188 +0,0 @@
-
-= TOC:
-
-notes
-Public api
-Names - parsing identifiers
-Typenames
-Scalar expressions
-  simple literals
-  star, param
-  parens expression, row constructor and scalar subquery
-  case, cast, exists, unique, array/ multiset constructor
-  typed literal, app, special function, aggregate, window function
-  suffixes: in, between, quantified comparison, match predicate, array
-    subscript, escape, collate
-  operators
-  scalar expression top level
-  helpers
-query expressions
-  select lists
-  from clause
-  other table expression clauses:
-    where, group by, having, order by, offset and fetch
-  common table expressions
-  query expression
-  set operations
-lexers
-utilities
-
-= Notes about the code
-
-The lexers appear at the bottom of the file. There tries to be a clear
-separation between the lexers and the other parser which only use the
-lexers, this isn't 100% complete at the moment and needs fixing.
-
-== Left factoring
-
-The parsing code is aggressively left factored, and try is avoided as
-much as possible. Try is avoided because:
-
- * when it is overused it makes the code hard to follow
- * when it is overused it makes the parsing code harder to debug
- * it makes the parser error messages much worse
-
-The code could be made a bit simpler with a few extra 'trys', but this
-isn't done because of the impact on the parser error
-messages. Apparently it can also help the speed but this hasn't been
-looked into.
-
-== Parser error messages
-
-A lot of care has been given to generating good parser error messages
-for invalid syntax. There are a few utils below which partially help
-in this area.
-
-There is a set of crafted bad expressions in ErrorMessages.lhs, these
-are used to guage the quality of the error messages and monitor
-regressions by hand. The use of <?> is limited as much as possible:
-each instance should justify itself by improving an actual error
-message.
-
-There is also a plan to write a really simple expression parser which
-doesn't do precedence and associativity, and the fix these with a pass
-over the ast. I don't think there is any other way to sanely handle
-the common prefixes between many infix and postfix multiple keyword
-operators, and some other ambiguities also. This should help a lot in
-generating good error messages also.
-
-Both the left factoring and error message work are greatly complicated
-by the large number of shared prefixes of the various elements in SQL
-syntax.
-
-== Main left factoring issues
-
-There are three big areas which are tricky to left factor:
-
- * typenames
- * scalar expressions which can start with an identifier
- * infix and suffix operators
-
-=== typenames
-
-There are a number of variations of typename syntax. The standard
-deals with this by switching on the name of the type which is parsed
-first. This code doesn't do this currently, but might in the
-future. Taking the approach in the standard grammar will limit the
-extensibility of the parser and might affect the ease of adapting to
-support other sql dialects.
-
-=== identifier scalar expressions
-
-There are a lot of scalar expression nodes which start with
-identifiers, and can't be distinguished the tokens after the initial
-identifier are parsed. Using try to implement these variations is very
-simple but makes the code much harder to debug and makes the parser
-error messages really bad.
-
-Here is a list of these nodes:
-
- * identifiers
- * function application
- * aggregate application
- * window application
- * typed literal: typename 'literal string'
- * interval literal which is like the typed literal with some extras
-
-There is further ambiguity e.g. with typed literals with precision,
-functions, aggregates, etc. - these are an identifier, followed by
-parens comma separated scalar expressions or something similar, and it
-is only later that we can find a token which tells us which flavour it
-is.
-
-There is also a set of nodes which start with an identifier/keyword
-but can commit since no other syntax can start the same way:
-
- * case
- * cast
- * exists, unique subquery
- * array constructor
- * multiset constructor
- * all the special syntax functions: extract, position, substring,
-  convert, translate, overlay, trim, etc.
-
-The interval literal mentioned above is treated in this group at the
-moment: if we see 'interval' we parse it either as a full interval
-literal or a typed literal only.
-
-Some items in this list might have to be fixed in the future, e.g. to
-support standard 'substring(a from 3 for 5)' as well as regular
-function substring syntax 'substring(a,3,5) at the same time.
-
-The work in left factoring all this is mostly done, but there is still
-a substantial bit to complete and this is by far the most difficult
-bit. At the moment, the work around is to use try, the downsides of
-which is the poor parsing error messages.
-
-=== infix and suffix operators
-
-== permissiveness
-
-The parser is very permissive in many ways. This departs from the
-standard which is able to eliminate a number of possibilities just in
-the grammar, which this parser allows. This is done for a number of
-reasons:
-
- * it makes the parser simple - less variations
- * it should allow for dialects and extensibility more easily in the
-  future (e.g. new infix binary operators with custom precedence)
- * many things which are effectively checked in the grammar in the
-  standard, can be checked using a typechecker or other simple static
-  analysis
-
-To use this code as a front end for a sql engine, or as a sql validity
-checker, you will need to do a lot of checks on the ast. A
-typechecker/static checker plus annotation to support being a compiler
-front end is planned but not likely to happen too soon.
-
-Some of the areas this affects:
-
-typenames: the variation of the type name should switch on the actual
-name given according to the standard, but this code only does this for
-the special case of interval type names. E.g. you can write 'int
-collate C' or 'int(15,2)' and this will parse as a character type name
-or a precision scale type name instead of being rejected.
-
-scalar expressions: every variation on scalar expressions uses the same
-parser/syntax. This means we don't try to stop non boolean valued
-expressions in boolean valued contexts in the parser. Another area
-this affects is that we allow general scalar expressions in group by,
-whereas the standard only allows column names with optional collation.
-
-These are all areas which are specified (roughly speaking) in the
-syntax rather than the semantics in the standard, and we are not
-fixing them in the syntax but leaving them till the semantic checking
-(which doesn't exist in this code at this time).
-
-> {-# LANGUAGE TupleSections #-}
-> -- | This is the module with the parser functions.
-> module Language.SQL.SimpleSQL.Parse
->     (parseQueryExpr
->     ,parseScalarExpr
->     ,parseStatement
->     ,parseStatements
->     ,ParseError(..)) where
-
-> import Control.Monad.Identity (Identity)
-> import Control.Monad (guard, void)
-> import Control.Applicative ((<**>))
-> import Data.Char (toLower, isDigit)
-> import Text.Parsec (setPosition,setSourceColumn,setSourceLine,getPosition
->                    ,option,between,sepBy,sepBy1
->                    ,try,many,many1,(<|>),choice,eof
->                    ,optionMaybe,optional,runParser
->                    ,chainl1, chainr1,(<?>))
-> import Text.Parsec.Perm (permute,(<$?>), (<|?>))
-> import Text.Parsec.Prim (getState, token)
-> import Text.Parsec.Pos (newPos)
-> import qualified Text.Parsec.Expr as E
-> import Data.List (intercalate,sort,groupBy)
-> import Data.Function (on)
-> import Data.Maybe
-> import Text.Parsec.String (GenParser)
-
-> import Language.SQL.SimpleSQL.Syntax
-> import Language.SQL.SimpleSQL.Combinators
-> import Language.SQL.SimpleSQL.Errors
-> import Language.SQL.SimpleSQL.Dialect
-> import qualified Language.SQL.SimpleSQL.Lex as L
-
-
-= Public API
-
-> -- | Parses a query expr, trailing semicolon optional.
-> parseQueryExpr :: Dialect
->                   -- ^ dialect of SQL to use
->                -> FilePath
->                   -- ^ filename to use in error messages
->                -> Maybe (Int,Int)
->                   -- ^ line number and column number of the first character
->                   -- in the source to use in error messages
->                -> String
->                   -- ^ the SQL source to parse
->                -> Either ParseError QueryExpr
-> parseQueryExpr = wrapParse topLevelQueryExpr
-
-> -- | Parses a statement, trailing semicolon optional.
-> parseStatement :: Dialect
->                   -- ^ dialect of SQL to use
->                -> FilePath
->                   -- ^ filename to use in error messages
->                -> Maybe (Int,Int)
->                   -- ^ line number and column number of the first character
->                   -- in the source to use in error messages
->                -> String
->                   -- ^ the SQL source to parse
->                -> Either ParseError Statement
-> parseStatement = wrapParse topLevelStatement
-
-
-> -- | Parses a list of statements, with semi colons between
-> -- them. The final semicolon is optional.
-> parseStatements :: Dialect
->                   -- ^ dialect of SQL to use
->                 -> FilePath
->                    -- ^ filename to use in error messages
->                 -> Maybe (Int,Int)
->                    -- ^ line number and column number of the first character
->                    -- in the source to use in error messages
->                 -> String
->                    -- ^ the SQL source to parse
->                 -> Either ParseError [Statement]
-> parseStatements = wrapParse statements
-
-> -- | Parses a scalar expression.
-> parseScalarExpr :: Dialect
->                    -- ^ dialect of SQL to use
->                 -> FilePath
->                    -- ^ filename to use in error messages
->                 -> Maybe (Int,Int)
->                    -- ^ line number and column number of the first character
->                    -- in the source to use in error messages
->                 -> String
->                    -- ^ the SQL source to parse
->                 -> Either ParseError ScalarExpr
-> parseScalarExpr = wrapParse scalarExpr
-
-This helper function takes the parser given and:
-
-sets the position when parsing
-automatically skips leading whitespace
-checks the parser parses all the input using eof
-converts the error return to the nice wrapper
-
-> wrapParse :: Parser a
->           -> Dialect
->           -> FilePath
->           -> Maybe (Int,Int)
->           -> String
->           -> Either ParseError a
-> wrapParse parser d f p src = do
->     let (l,c) = fromMaybe (1,1) p
->     lx <- L.lexSQL d f (Just (l,c)) src
->     either (Left . convParseError src) Right
->       $ runParser (setPos p *> parser <* eof)
->                   d f $ filter keep lx
->   where
->     setPos Nothing = pure ()
->     setPos (Just (l,c)) = fmap up getPosition >>= setPosition
->       where up = flip setSourceColumn c . flip setSourceLine l
->     keep (_,L.Whitespace {}) = False
->     keep (_,L.LineComment {}) = False
->     keep (_,L.BlockComment {}) = False
->     keep _ = True
-
-
-------------------------------------------------
-
-= Names
-
-Names represent identifiers and a few other things. The parser here
-handles regular identifiers, dotten chain identifiers, quoted
-identifiers and unicode quoted identifiers.
-
-Dots: dots in identifier chains are parsed here and represented in the
-Iden constructor usually. If parts of the chains are non identifier
-scalar expressions, then this is represented by a BinOp "."
-instead. Dotten chain identifiers which appear in other contexts (such
-as function names, table names, are represented as [Name] only.
-
-Identifier grammar:
-
-unquoted:
-underscore <|> letter : many (underscore <|> alphanum
-
-example
-_example123
-
-quoted:
-
-double quote, many (non quote character or two double quotes
-together), double quote
-
-"example quoted"
-"example with "" quote"
-
-unicode quoted is the same as quoted in this parser, except it starts
-with U& or u&
-
-u&"example quoted"
-
-> name :: Parser Name
-> name = do
->     d <- getState
->     uncurry Name <$> identifierTok (blacklist d)
-
-todo: replace (:[]) with a named function all over
-
-> names :: Parser [Name]
-> names = reverse <$> (((:[]) <$> name) <??*> anotherName)
->   -- can't use a simple chain here since we
->   -- want to wrap the . + name in a try
->   -- this will change when this is left factored
->   where
->     anotherName :: Parser ([Name] -> [Name])
->     anotherName = try ((:) <$> (symbol "." *> name))
-
-= Type Names
-
-Typenames are used in casts, and also in the typed literal syntax,
-which is a typename followed by a string literal.
-
-Here are the grammar notes:
-
-== simple type name
-
-just an identifier chain or a multi word identifier (this is a fixed
-list of possibilities, e.g. as 'character varying', see below in the
-parser code for the exact list).
-
-<simple-type-name> ::= <identifier-chain>
-     | multiword-type-identifier
-
-== Precision type name
-
-<precision-type-name> ::= <simple-type-name> <left paren> <unsigned-int> <right paren>
-
-e.g. char(5)
-
-note: above and below every where a simple type name can appear, this
-means a single identifier/quoted or a dotted chain, or a multi word
-identifier
-
-== Precision scale type name
-
-<precision-type-name> ::= <simple-type-name> <left paren> <unsigned-int> <comma> <unsigned-int> <right paren>
-
-e.g. decimal(15,2)
-
-== Lob type name
-
-this is a variation on the precision type name with some extra info on
-the units:
-
-<lob-type-name> ::=
-   <simple-type-name> <left paren> <unsigned integer> [ <multiplier> ] [ <char length units> ] <right paren>
-
-<multiplier>    ::=   K | M | G
-<char length units>    ::=   CHARACTERS | CODE_UNITS | OCTETS
-
-(if both multiplier and char length units are missing, then this will
-parse as a precision type name)
-
-e.g.
-clob(5M octets)
-
-== char type name
-
-this is a simple type with optional precision which allows the
-character set or the collation to appear as a suffix:
-
-<char type name> ::=
-    <simple type name>
-    [ <left paren> <unsigned-int> <right paren> ]
-    [ CHARACTER SET <identifier chain> ]
-    [ COLLATE <identifier chain> ]
-
-e.g.
-
-char(5) character set my_charset collate my_collation
-
-= Time typename
-
-this is typename with optional precision and either 'with time zone'
-or 'without time zone' suffix, e.g.:
-
-<datetime type> ::=
-    [ <left paren> <unsigned-int> <right paren> ]
-    <with or without time zone>
-<with or without time zone> ::= WITH TIME ZONE | WITHOUT TIME ZONE
-    WITH TIME ZONE | WITHOUT TIME ZONE
-
-= row type name
-
-<row type> ::=
-    ROW <left paren> <field definition> [ { <comma> <field definition> }... ] <right paren>
-
-<field definition> ::= <identifier> <type name>
-
-e.g.
-row(a int, b char(5))
-
-= interval type name
-
-<interval type> ::= INTERVAL <interval datetime field> [TO <interval datetime field>]
-
-<interval datetime field> ::=
-  <datetime field> [ <left paren> <unsigned int> [ <comma> <unsigned int> ] <right paren> ]
-
-= array type name
-
-<array type> ::= <data type> ARRAY [ <left bracket> <unsigned integer> <right bracket> ]
-
-= multiset type name
-
-<multiset type>    ::=   <data type> MULTISET
-
-A type name will parse into the 'smallest' constructor it will fit in
-syntactically, e.g. a clob(5) will parse to a precision type name, not
-a lob type name.
-
-Unfortunately, to improve the error messages, there is a lot of (left)
-factoring in this function, and it is a little dense.
-
-> typeName :: Parser TypeName
-> typeName =
->     (rowTypeName <|> intervalTypeName <|> otherTypeName)
->     <??*> tnSuffix
->   where
->     rowTypeName =
->         RowTypeName <$> (keyword_ "row" *> parens (commaSep1 rowField))
->     rowField = (,) <$> name <*> typeName
->     ----------------------------
->     intervalTypeName =
->         keyword_ "interval" *>
->         (uncurry IntervalTypeName <$> intervalQualifier)
->     ----------------------------
->     otherTypeName =
->         nameOfType <**>
->             (typeNameWithParens
->              <|> pure Nothing <**> (timeTypeName <|> charTypeName)
->              <|> pure TypeName)
->     nameOfType = reservedTypeNames <|> names
->     charTypeName = charSet <**> (option [] tcollate <$$$$> CharTypeName)
->                    <|> pure [] <**> (tcollate <$$$$> CharTypeName)
->     typeNameWithParens =
->         (openParen *> unsignedInteger)
->         <**> (closeParen *> precMaybeSuffix
->               <|> (precScaleTypeName <|> precLengthTypeName) <* closeParen)
->     precMaybeSuffix = (. Just) <$> (timeTypeName <|> charTypeName)
->                       <|> pure (flip PrecTypeName)
->     precScaleTypeName = (comma *> unsignedInteger) <$$$> PrecScaleTypeName
->     precLengthTypeName =
->         Just <$> lobPrecSuffix
->         <**> (optionMaybe lobUnits <$$$$> PrecLengthTypeName)
->         <|> pure Nothing <**> ((Just <$> lobUnits) <$$$$> PrecLengthTypeName)
->     timeTypeName = tz <$$$> TimeTypeName
->     ----------------------------
->     lobPrecSuffix = PrecK <$ keyword_ "k"
->                     <|> PrecM <$ keyword_ "m"
->                     <|> PrecG <$ keyword_ "g"
->                     <|> PrecT <$ keyword_ "t"
->                     <|> PrecP <$ keyword_ "p"
->     lobUnits = PrecCharacters <$ keyword_ "characters"
->                -- char and byte are the oracle spelling
->                -- todo: move these to oracle dialect
->                <|> PrecCharacters <$ keyword_ "char"
->                <|> PrecOctets <$ keyword_ "octets"
->                <|> PrecOctets <$ keyword_ "byte"
->     tz = True <$ keywords_ ["with", "time","zone"]
->          <|> False <$ keywords_ ["without", "time","zone"]
->     charSet = keywords_ ["character", "set"] *> names
->     tcollate = keyword_ "collate" *> names
->     ----------------------------
->     tnSuffix = multiset <|> array
->     multiset = MultisetTypeName <$ keyword_ "multiset"
->     array = keyword_ "array" *>
->         (optionMaybe (brackets unsignedInteger) <$$> ArrayTypeName)
->     ----------------------------
->     -- this parser handles the fixed set of multi word
->     -- type names, plus all the type names which are
->     -- reserved words
->     reservedTypeNames = do
->         d <- getState
->         (:[]) . Name Nothing . unwords <$> makeKeywordTree (diSpecialTypeNames d)
->         
-
-= Scalar expressions
-
-== simple literals
-
-See the stringToken lexer below for notes on string literal syntax.
-
-> stringLit :: Parser ScalarExpr
-> stringLit = (\(s,e,t) -> StringLit s e t) <$> stringTokExtend
-
-> numberLit :: Parser ScalarExpr
-> numberLit = NumLit <$> sqlNumberTok False
-
-> simpleLiteral :: Parser ScalarExpr
-> simpleLiteral = numberLit <|> stringLit
-
-== star, param, host param
-
-=== star
-
-used in select *, select x.*, and agg(*) variations, and some other
-places as well. The parser doesn't attempt to check that the star is
-in a valid context, it parses it OK in any scalar expression context.
-
-> star :: Parser ScalarExpr
-> star = Star <$ symbol "*"
-
-== parameter
-
-unnamed parameter or named parameter
-use in e.g. select * from t where a = ?
-select x from t where x > :param
-
-> parameter :: Parser ScalarExpr
-> parameter = choice
->     [Parameter <$ questionMark
->     ,HostParameter
->      <$> hostParamTok
->      <*> optionMaybe (keyword "indicator" *> hostParamTok)]
-
-== positional arg
-
-> positionalArg :: Parser ScalarExpr
-> positionalArg = PositionalArg <$> positionalArgTok
-
-== parens
-
-scalar expression parens, row ctor and scalar subquery
-
-> parensExpr :: Parser ScalarExpr
-> parensExpr = parens $ choice
->     [SubQueryExpr SqSq <$> queryExpr
->     ,ctor <$> commaSep1 scalarExpr]
->   where
->     ctor [a] = Parens a
->     ctor as = SpecialOp [Name Nothing "rowctor"] as
-
-== case, cast, exists, unique, array/multiset constructor, interval
-
-All of these start with a fixed keyword which is reserved, so no other
-syntax can start with the same keyword.
-
-=== case expression
-
-> caseExpr :: Parser ScalarExpr
-> caseExpr =
->     Case <$> (keyword_ "case" *> optionMaybe scalarExpr)
->          <*> many1 whenClause
->          <*> optionMaybe elseClause
->          <* keyword_ "end"
->   where
->    whenClause = (,) <$> (keyword_ "when" *> commaSep1 scalarExpr)
->                     <*> (keyword_ "then" *> scalarExpr)
->    elseClause = keyword_ "else" *> scalarExpr
-
-=== cast
-
-cast: cast(expr as type)
-
-> cast :: Parser ScalarExpr
-> cast = keyword_ "cast" *>
->        parens (Cast <$> scalarExpr
->                     <*> (keyword_ "as" *> typeName))
-
-=== convert
-
-convertSqlServer: SqlServer dialect CONVERT(data_type(length), expression, style)
-
-> convertSqlServer :: Parser ScalarExpr
-> convertSqlServer = guardDialect diConvertFunction
->                    *> keyword_ "convert" *>
->                    parens (Convert <$> typeName <*> (comma *> scalarExpr)
->                       <*> optionMaybe (comma *> unsignedInteger))
-
-=== exists, unique
-
-subquery expression:
-[exists|unique] (queryexpr)
-
-> subquery :: Parser ScalarExpr
-> subquery = SubQueryExpr <$> sqkw <*> parens queryExpr
->   where
->     sqkw = SqExists <$ keyword_ "exists" <|> SqUnique <$ keyword_ "unique"
-
-=== array/multiset constructor
-
-> arrayCtor :: Parser ScalarExpr
-> arrayCtor = keyword_ "array" >>
->     choice
->     [ArrayCtor <$> parens queryExpr
->     ,Array (Iden [Name Nothing "array"]) <$> brackets (commaSep scalarExpr)]
-
-As far as I can tell, table(query expr) is just syntax sugar for
-multiset(query expr). It must be there for compatibility or something.
-
-> multisetCtor :: Parser ScalarExpr
-> multisetCtor =
->     choice
->     [keyword_ "multiset" >>
->      choice
->      [MultisetQueryCtor <$> parens queryExpr
->      ,MultisetCtor <$> brackets (commaSep scalarExpr)]
->     ,keyword_ "table" >>
->      MultisetQueryCtor <$> parens queryExpr]
-
-> nextValueFor :: Parser ScalarExpr
-> nextValueFor = keywords_ ["next","value","for"] >>
->     NextValueFor <$> names
-
-=== interval
-
-interval literals are a special case and we follow the grammar less
-permissively here
-
-parse SQL interval literals, something like
-interval '5' day (3)
-or
-interval '5' month
-
-if the literal looks like this:
-interval 'something'
-
-then it is parsed as a regular typed literal. It must have a
-interval-datetime-field suffix to parse as an intervallit
-
-It uses try because of a conflict with interval type names: todo, fix
-this. also fix the monad -> applicative
-
-> intervalLit :: Parser ScalarExpr
-> intervalLit = try (keyword_ "interval" >> do
->     s <- optionMaybe $ choice [Plus <$ symbol_ "+"
->                               ,Minus <$ symbol_ "-"]
->     lit <- singleQuotesOnlyStringTok
->     q <- optionMaybe intervalQualifier
->     mkIt s lit q)
->   where
->     mkIt Nothing val Nothing = pure $ TypedLit (TypeName [Name Nothing "interval"]) val
->     mkIt s val (Just (a,b)) = pure $ IntervalLit s val a b
->     mkIt (Just {}) _val Nothing = fail "cannot use sign without interval qualifier"
-
-== typed literal, app, special, aggregate, window, iden
-
-All of these start with identifiers (some of the special functions
-start with reserved keywords).
-
-they are all variations on suffixes on the basic identifier parser
-
-The windows is a suffix on the app parser
-
-=== iden prefix term
-
-all the scalar expressions which start with an identifier
-
-(todo: really put all of them here instead of just some of them)
-
-> idenExpr :: Parser ScalarExpr
-> idenExpr =
->     -- todo: work out how to left factor this
->     try (TypedLit <$> typeName <*> singleQuotesOnlyStringTok)
->     <|> (names <**> option Iden app)
->     <|> keywordFunctionOrIden
->   where
->     -- special cases for keywords that can be parsed as an iden or app
->     keywordFunctionOrIden = try $ do
->         x <- unquotedIdentifierTok [] Nothing
->         d <- getState
->         let i = map toLower x `elem` diIdentifierKeywords d
->             a = map toLower x `elem` diAppKeywords d
->         case () of
->             _  | i && a -> pure [Name Nothing x] <**> option Iden app
->                | i -> pure (Iden [Name Nothing x])
->                | a -> pure [Name Nothing x] <**> app
->                | otherwise -> fail ""
-
-
-=== special
-
-These are keyword operators which don't look like normal prefix,
-postfix or infix binary operators. They mostly look like function
-application but with keywords in the argument list instead of commas
-to separate the arguments.
-
-the special op keywords
-parse an operator which is
-operatorname(firstArg keyword0 arg0 keyword1 arg1 etc.)
-
-> data SpecialOpKFirstArg = SOKNone
->                         | SOKOptional
->                         | SOKMandatory
-
-> specialOpK :: String -- name of the operator
->            -> SpecialOpKFirstArg -- has a first arg without a keyword
->            -> [(String,Bool)] -- the other args with their keywords
->                               -- and whether they are optional
->            -> Parser ScalarExpr
-> specialOpK opName firstArg kws =
->     keyword_ opName >> do
->     void openParen
->     let pfa = do
->               e <- scalarExpr
->               -- check we haven't parsed the first
->               -- keyword as an identifier
->               case (e,kws) of
->                   (Iden [Name Nothing i], (k,_):_)
->                       | map toLower i == k ->
->                           fail $ "cannot use keyword here: " ++ i
->                   _ -> return ()
->               pure e
->     fa <- case firstArg of
->          SOKNone -> pure Nothing
->          SOKOptional -> optionMaybe (try pfa)
->          SOKMandatory -> Just <$> pfa
->     as <- mapM parseArg kws
->     void closeParen
->     pure $ SpecialOpK [Name Nothing opName] fa $ catMaybes as
->   where
->     parseArg (nm,mand) =
->         let p = keyword_ nm >> scalarExpr
->         in fmap (nm,) <$> if mand
->                           then Just <$> p
->                           else optionMaybe (try p)
-
-The actual operators:
-
-EXTRACT( date_part FROM expression )
-
-POSITION( string1 IN string2 )
-
-SUBSTRING(extraction_string FROM starting_position [FOR length]
-[COLLATE collation_name])
-
-CONVERT(char_value USING conversion_char_name)
-
-TRANSLATE(char_value USING translation_name)
-
-OVERLAY(string PLACING embedded_string FROM start
-[FOR length])
-
-TRIM( [ [{LEADING | TRAILING | BOTH}] [removal_char] FROM ]
-target_string
-[COLLATE collation_name] )
-
-> specialOpKs :: Parser ScalarExpr
-> specialOpKs = choice $ map try
->     [extract, position, substring, convert, translate, overlay, trim]
-
-> extract :: Parser ScalarExpr
-> extract = specialOpK "extract" SOKMandatory [("from", True)]
-
-> position :: Parser ScalarExpr
-> position = specialOpK "position" SOKMandatory [("in", True)]
-
-strictly speaking, the substring must have at least one of from and
-for, but the parser doens't enforce this
-
-> substring :: Parser ScalarExpr
-> substring = specialOpK "substring" SOKMandatory
->                 [("from", False),("for", False)]
-
-> convert :: Parser ScalarExpr
-> convert = specialOpK "convert" SOKMandatory [("using", True)]
-
-
-> translate :: Parser ScalarExpr
-> translate = specialOpK "translate" SOKMandatory [("using", True)]
-
-> overlay :: Parser ScalarExpr
-> overlay = specialOpK "overlay" SOKMandatory
->                 [("placing", True),("from", True),("for", False)]
-
-trim is too different because of the optional char, so a custom parser
-the both ' ' is filled in as the default if either parts are missing
-in the source
-
-> trim :: Parser ScalarExpr
-> trim =
->     keyword "trim" >>
->     parens (mkTrim
->             <$> option "both" sides
->             <*> option " " singleQuotesOnlyStringTok
->             <*> (keyword_ "from" *> scalarExpr))
->   where
->     sides = choice ["leading" <$ keyword_ "leading"
->                    ,"trailing" <$ keyword_ "trailing"
->                    ,"both" <$ keyword_ "both"]
->     mkTrim fa ch fr =
->       SpecialOpK [Name Nothing "trim"] Nothing
->           $ catMaybes [Just (fa,StringLit "'" "'" ch)
->                       ,Just ("from", fr)]
-
-=== app, aggregate, window
-
-This parses all these variations:
-normal function application with just a csv of scalar exprs
-aggregate variations (distinct, order by in parens, filter and where
-  suffixes)
-window apps (fn/agg followed by over)
-
-This code is also a little dense like the typename code because of
-left factoring, later they will even have to be partially combined
-together.
-
-> app :: Parser ([Name] -> ScalarExpr)
-> app =
->     openParen *> choice
->     [duplicates
->      <**> (commaSep1 scalarExpr
->            <**> (((option [] orderBy) <* closeParen)
->                  <**> (optionMaybe afilter <$$$$$> AggregateApp)))
->      -- separate cases with no all or distinct which must have at
->      -- least one scalar expr
->     ,commaSep1 scalarExpr
->      <**> choice
->           [closeParen *> choice
->                          [window
->                          ,withinGroup
->                          ,(Just <$> afilter) <$$$> aggAppWithoutDupeOrd
->                          ,pure (flip App)]
->           ,orderBy <* closeParen
->            <**> (optionMaybe afilter <$$$$> aggAppWithoutDupe)]
->      -- no scalarExprs: duplicates and order by not allowed
->     ,([] <$ closeParen) <**> option (flip App) (window <|> withinGroup)
->     ]
->   where
->     aggAppWithoutDupeOrd n es f = AggregateApp n SQDefault es [] f
->     aggAppWithoutDupe n = AggregateApp n SQDefault
-
-> afilter :: Parser ScalarExpr
-> afilter = keyword_ "filter" *> parens (keyword_ "where" *> scalarExpr)
-
-> withinGroup :: Parser ([ScalarExpr] -> [Name] -> ScalarExpr)
-> withinGroup =
->     (keywords_ ["within", "group"] *> parens orderBy) <$$$> AggregateAppGroup
-
-==== window
-
-parse a window call as a suffix of a regular function call
-this looks like this:
-functionname(args) over ([partition by ids] [order by orderitems])
-
-No support for explicit frames yet.
-
-TODO: add window support for other aggregate variations, needs some
-changes to the syntax also
-
-> window :: Parser ([ScalarExpr] -> [Name] -> ScalarExpr)
-> window =
->   keyword_ "over" *> openParen *> option [] partitionBy
->   <**> (option [] orderBy
->         <**> (((optionMaybe frameClause) <* closeParen) <$$$$$> WindowApp))
->   where
->     partitionBy = keywords_ ["partition","by"] *> commaSep1 scalarExpr
->     frameClause =
->         frameRowsRange -- TODO: this 'and' could be an issue
->         <**> (choice [(keyword_ "between" *> frameLimit True)
->                       <**> ((keyword_ "and" *> frameLimit True)
->                             <$$$> FrameBetween)
->                       -- maybe this should still use a b expression
->                       -- for consistency
->                      ,frameLimit False <**> pure (flip FrameFrom)])
->     frameRowsRange = FrameRows <$ keyword_ "rows"
->                      <|> FrameRange <$ keyword_ "range"
->     frameLimit useB =
->         choice
->         [Current <$ keywords_ ["current", "row"]
->          -- todo: create an automatic left factor for stuff like this
->         ,keyword_ "unbounded" *>
->          choice [UnboundedPreceding <$ keyword_ "preceding"
->                 ,UnboundedFollowing <$ keyword_ "following"]
->         ,(if useB then scalarExprB else scalarExpr)
->          <**> (Preceding <$ keyword_ "preceding"
->                <|> Following <$ keyword_ "following")
->         ]
-
-== suffixes
-
-These are all generic suffixes on any scalar expr
-
-=== in
-
-in: two variations:
-a in (expr0, expr1, ...)
-a in (queryexpr)
-
-> inSuffix :: Parser (ScalarExpr -> ScalarExpr)
-> inSuffix =
->     mkIn <$> inty
->          <*> parens (choice
->                      [InQueryExpr <$> queryExpr
->                      ,InList <$> commaSep1 scalarExpr])
->   where
->     inty = choice [True <$ keyword_ "in"
->                   ,False <$ keywords_ ["not","in"]]
->     mkIn i v = \e -> In i e v
-
-=== between
-
-between:
-expr between expr and expr
-
-There is a complication when parsing between - when parsing the second
-expression it is ambiguous when you hit an 'and' whether it is a
-binary operator or part of the between. This code follows what
-postgres does, which might be standard across SQL implementations,
-which is that you can't have a binary and operator in the middle
-expression in a between unless it is wrapped in parens. The 'bExpr
-parsing' is used to create alternative scalar expression parser which
-is identical to the normal one expect it doesn't recognise the binary
-and operator. This is the call to scalarExprB.
-
-> betweenSuffix :: Parser (ScalarExpr -> ScalarExpr)
-> betweenSuffix =
->     makeOp <$> Name Nothing <$> opName
->            <*> scalarExprB
->            <*> (keyword_ "and" *> scalarExprB)
->   where
->     opName = choice
->              ["between" <$ keyword_ "between"
->              ,"not between" <$ try (keywords_ ["not","between"])]
->     makeOp n b c = \a -> SpecialOp [n] [a,b,c]
-
-=== quantified comparison
-
-a = any (select * from t)
-
-> quantifiedComparisonSuffix :: Parser (ScalarExpr -> ScalarExpr)
-> quantifiedComparisonSuffix = do
->     c <- comp
->     cq <- compQuan
->     q <- parens queryExpr
->     pure $ \v -> QuantifiedComparison v [c] cq q
->   where
->     comp = Name Nothing <$> choice (map symbol
->            ["=", "<>", "<=", "<", ">", ">="])
->     compQuan = choice
->                [CPAny <$ keyword_ "any"
->                ,CPSome <$ keyword_ "some"
->                ,CPAll <$ keyword_ "all"]
-
-=== match
-
-a match (select a from t)
-
-> matchPredicateSuffix :: Parser (ScalarExpr -> ScalarExpr)
-> matchPredicateSuffix = do
->     keyword_ "match"
->     u <- option False (True <$ keyword_ "unique")
->     q <- parens queryExpr
->     pure $ \v -> Match v u q
-
-=== array subscript
-
-> arraySuffix :: Parser (ScalarExpr -> ScalarExpr)
-> arraySuffix = do
->     es <- brackets (commaSep scalarExpr)
->     pure $ \v -> Array v es
-
-=== escape
-
-It is going to be really difficult to support an arbitrary character
-for the escape now there is a separate lexer ...
-
-TODO: this needs fixing. Escape is only part of other nodes, and not a
-separate suffix.
-
-> {-escapeSuffix :: Parser (ScalarExpr -> ScalarExpr)
-> escapeSuffix = do
->     ctor <- choice
->             [Escape <$ keyword_ "escape"
->             ,UEscape <$ keyword_ "uescape"]
->     c <- escapeChar
->     pure $ \v -> ctor v c
->   where
->     escapeChar :: Parser Char
->     escapeChar = (identifierTok [] Nothing <|> symbolTok Nothing) >>= oneOnly
->     oneOnly :: String -> Parser Char
->     oneOnly c = case c of
->                    [c'] -> return c'
->                    _ -> fail "escape char must be single char"
-> -}
-
-=== collate
-
-> collateSuffix:: Parser (ScalarExpr -> ScalarExpr)
-> collateSuffix = do
->     keyword_ "collate"
->     i <- names
->     pure $ \v -> Collate v i
-
-== odbc syntax
-
-the parser supports three kinds of odbc syntax, two of which are
-scalar expressions (the other is a variation on joins)
-
-
-> odbcExpr :: Parser ScalarExpr
-> odbcExpr = between (symbol "{") (symbol "}")
->            (odbcTimeLit <|> odbcFunc)
->   where
->     odbcTimeLit =
->         OdbcLiteral <$> choice [OLDate <$ keyword "d"
->                                ,OLTime <$ keyword "t"
->                                ,OLTimestamp <$ keyword "ts"]
->                     <*> singleQuotesOnlyStringTok
->     -- todo: this parser is too general, the expr part
->     -- should be only a function call (from a whitelist of functions)
->     -- or the extract operator
->     odbcFunc = OdbcFunc <$> (keyword "fn" *> scalarExpr)
-
-==  operators
-
-The 'regular' operators in this parsing and in the abstract syntax are
-unary prefix, unary postfix and binary infix operators. The operators
-can be symbols (a + b), single keywords (a and b) or multiple keywords
-(a is similar to b).
-
-TODO: carefully review the precedences and associativities.
-
-TODO: to fix the parsing completely, I think will need to parse
-without precedence and associativity and fix up afterwards, since SQL
-syntax is way too messy. It might be possible to avoid this if we
-wanted to avoid extensibility and to not be concerned with parse error
-messages, but both of these are too important.
-
-> opTable :: Bool -> [[E.Operator [Token] ParseState Identity ScalarExpr]]
-> opTable bExpr =
->         [-- parse match and quantified comparisons as postfix ops
->           -- todo: left factor the quantified comparison with regular
->           -- binary comparison, somehow
->          [E.Postfix $ try quantifiedComparisonSuffix
->          ,E.Postfix matchPredicateSuffix
->          ]
-
->         ,[binarySym "." E.AssocLeft]
-
->         ,[postfix' arraySuffix
->          ,postfix' collateSuffix]
-
->         ,[prefixSym "+", prefixSym "-"]
-
->         ,[binarySym "^" E.AssocLeft]
-
->         ,[binarySym "*" E.AssocLeft
->          ,binarySym "/" E.AssocLeft
->          ,binarySym "%" E.AssocLeft]
-
->         ,[binarySym "+" E.AssocLeft
->          ,binarySym "-" E.AssocLeft]
-
->         ,[binarySym "||" E.AssocRight
->          ,prefixSym "~"
->          ,binarySym "&" E.AssocRight
->          ,binarySym "|" E.AssocRight]
-
->         ,[binaryKeyword "overlaps" E.AssocNone]
-
->         ,[binaryKeyword "like" E.AssocNone
->          -- have to use try with inSuffix because of a conflict
->          -- with 'in' in position function, and not between
->          -- between also has a try in it to deal with 'not'
->          -- ambiguity
->          ,E.Postfix $ try inSuffix
->          ,E.Postfix betweenSuffix]
->          -- todo: figure out where to put the try?
->          ++ [binaryKeywords $ makeKeywordTree
->              ["not like"
->              ,"is similar to"
->              ,"is not similar to"]]
->          ++ [multisetBinOp]
-
->         ,[binarySym "<" E.AssocNone
->          ,binarySym ">" E.AssocNone
->          ,binarySym ">=" E.AssocNone
->          ,binarySym "<=" E.AssocNone
->          ,binarySym "!=" E.AssocRight
->          ,binarySym "<>" E.AssocRight
->          ,binarySym "=" E.AssocRight]
-
->         ,[postfixKeywords $ makeKeywordTree
->              ["is null"
->              ,"is not null"
->              ,"is true"
->              ,"is not true"
->              ,"is false"
->              ,"is not false"
->              ,"is unknown"
->              ,"is not unknown"]]
->          ++ [binaryKeywords $ makeKeywordTree
->              ["is distinct from"
->              ,"is not distinct from"]]
-
->         ,[prefixKeyword "not"]
-
->         ,if bExpr then [] else [binaryKeyword "and" E.AssocLeft]
-
->         ,[binaryKeyword "or" E.AssocLeft]
-
->        ]
->   where
->     binarySym nm assoc = binary (symbol_ nm) nm assoc
->     binaryKeyword nm assoc = binary (keyword_ nm) nm assoc
->     binaryKeywords p =
->         E.Infix (do
->                  o <- try p
->                  pure (\a b -> BinOp a [Name Nothing $ unwords o] b))
->             E.AssocNone
->     postfixKeywords p =
->       postfix' $ do
->           o <- try p
->           pure $ PostfixOp [Name Nothing $ unwords o]
->     binary p nm assoc =
->       E.Infix (p >> pure (\a b -> BinOp a [Name Nothing nm] b)) assoc
->     multisetBinOp = E.Infix (do
->         keyword_ "multiset"
->         o <- choice [Union <$ keyword_ "union"
->                     ,Intersect <$ keyword_ "intersect"
->                     ,Except <$ keyword_ "except"]
->         d <- option SQDefault duplicates
->         pure (\a b -> MultisetBinOp a o d b))
->           E.AssocLeft
->     prefixKeyword nm = prefix (keyword_ nm) nm
->     prefixSym nm = prefix (symbol_ nm) nm
->     prefix p nm = prefix' (p >> pure (PrefixOp [Name Nothing nm]))
->     -- hack from here
->     -- http://stackoverflow.com/questions/10475337/parsec-expr-repeated-prefix-postfix-operator-not-supported
->     -- not implemented properly yet
->     -- I don't think this will be enough for all cases
->     -- at least it works for 'not not a'
->     -- ok: "x is not true is not true"
->     -- no work: "x is not true is not null"
->     prefix'  p = E.Prefix  . chainl1 p $ pure       (.)
->     postfix' p = E.Postfix . chainl1 p $ pure (flip (.))
-
-== scalar expression top level
-
-This parses most of the scalar exprs.The order of the parsers and use
-of try is carefully done to make everything work. It is a little
-fragile and could at least do with some heavy explanation. Update: the
-'try's have migrated into the individual parsers, they still need
-documenting/fixing.
-
-> scalarExpr :: Parser ScalarExpr
-> scalarExpr = E.buildExpressionParser (opTable False) term
-
-> term :: Parser ScalarExpr
-> term = choice [simpleLiteral
->               ,parameter
->               ,positionalArg
->               ,star
->               ,parensExpr
->               ,caseExpr
->               ,cast
->               ,convertSqlServer
->               ,arrayCtor
->               ,multisetCtor
->               ,nextValueFor
->               ,subquery
->               ,intervalLit
->               ,specialOpKs
->               ,idenExpr
->               ,odbcExpr]
->        <?> "scalar expression"
-
-expose the b expression for window frame clause range between
-
-> scalarExprB :: Parser ScalarExpr
-> scalarExprB = E.buildExpressionParser (opTable True) term
-
-== helper parsers
-
-This is used in interval literals and in interval type names.
-
-> intervalQualifier :: Parser (IntervalTypeField,Maybe IntervalTypeField)
-> intervalQualifier =
->     (,) <$> intervalField
->         <*> optionMaybe (keyword_ "to" *> intervalField)
->   where
->     intervalField =
->         Itf
->         <$> datetimeField
->         <*> optionMaybe
->             (parens ((,) <$> unsignedInteger
->                          <*> optionMaybe (comma *> unsignedInteger)))
-
-TODO: use datetime field in extract also
-use a data type for the datetime field?
-
-> datetimeField :: Parser String
-> datetimeField = choice (map keyword ["year","month","day"
->                                     ,"hour","minute","second"])
->                 <?> "datetime field"
-
-This is used in multiset operations (scalar expr), selects (query expr)
-and set operations (query expr).
-
-> duplicates :: Parser SetQuantifier
-> duplicates =
->     choice [All <$ keyword_ "all"
->            ,Distinct <$ keyword "distinct"]
-
--------------------------------------------------
-
-= query expressions
-
-== select lists
-
-> selectItem :: Parser (ScalarExpr,Maybe Name)
-> selectItem = (,) <$> scalarExpr <*> optionMaybe als
->   where als = optional (keyword_ "as") *> name
-
-> selectList :: Parser [(ScalarExpr,Maybe Name)]
-> selectList = commaSep1 selectItem
-
-== from
-
-Here is the rough grammar for joins
-
-tref
-(cross | [natural] ([inner] | (left | right | full) [outer])) join
-tref
-[on expr | using (...)]
-
-TODO: either use explicit 'operator precedence' parsers or build
-expression parser for the 'tref operators' such as joins, lateral,
-aliases.
-
-> from :: Parser [TableRef]
-> from = keyword_ "from" *> commaSep1 tref
->   where
->     -- TODO: use P (a->) for the join tref suffix
->     -- chainl or buildexpressionparser
->     tref = nonJoinTref >>= optionSuffix joinTrefSuffix
->     nonJoinTref = choice
->         [parens $ choice
->              [TRQueryExpr <$> queryExpr
->              ,TRParens <$> tref]
->         ,TRLateral <$> (keyword_ "lateral"
->                         *> nonJoinTref)
->         ,do
->          n <- names
->          choice [TRFunction n
->                  <$> parens (commaSep scalarExpr)
->                 ,pure $ TRSimple n]
->          -- todo: I think you can only have outer joins inside the oj,
->          -- not sure.
->         ,TROdbc <$> (symbol "{" *> keyword_ "oj" *> tref <* symbol "}")
->         ] <??> aliasSuffix
->     aliasSuffix = fromAlias <$$> TRAlias
->     joinTrefSuffix t =
->         (TRJoin t <$> option False (True <$ keyword_ "natural")
->                   <*> joinType
->                   <*> nonJoinTref
->                   <*> optionMaybe joinCondition)
->         >>= optionSuffix joinTrefSuffix
-
-TODO: factor the join stuff to produce better error messages (and make
-it more readable)
-
-> joinType :: Parser JoinType
-> joinType = choice
->     [JCross <$ keyword_ "cross" <* keyword_ "join"
->     ,JInner <$ keyword_ "inner" <* keyword_ "join"
->     ,JLeft <$ keyword_ "left"
->            <* optional (keyword_ "outer")
->            <* keyword_ "join"
->     ,JRight <$ keyword_ "right"
->             <* optional (keyword_ "outer")
->             <* keyword_ "join"
->     ,JFull <$ keyword_ "full"
->            <* optional (keyword_ "outer")
->            <* keyword_ "join"
->     ,JInner <$ keyword_ "join"]
-
-> joinCondition :: Parser JoinCondition
-> joinCondition = choice
->     [keyword_ "on" >> JoinOn <$> scalarExpr
->     ,keyword_ "using" >> JoinUsing <$> parens (commaSep1 name)]
-
-> fromAlias :: Parser Alias
-> fromAlias = Alias <$> tableAlias <*> columnAliases
->   where
->     tableAlias = optional (keyword_ "as") *> name
->     columnAliases = optionMaybe $ parens $ commaSep1 name
-
-== simple other parts
-
-Parsers for where, group by, having, order by and limit, which are
-pretty trivial.
-
-> whereClause :: Parser ScalarExpr
-> whereClause = keyword_ "where" *> scalarExpr
-
-> groupByClause :: Parser [GroupingExpr]
-> groupByClause = keywords_ ["group","by"] *> commaSep1 groupingExpression
->   where
->     groupingExpression = choice
->       [keyword_ "cube" >>
->        Cube <$> parens (commaSep groupingExpression)
->       ,keyword_ "rollup" >>
->        Rollup <$> parens (commaSep groupingExpression)
->       ,GroupingParens <$> parens (commaSep groupingExpression)
->       ,keywords_ ["grouping", "sets"] >>
->        GroupingSets <$> parens (commaSep groupingExpression)
->       ,SimpleGroup <$> scalarExpr
->       ]
-
-> having :: Parser ScalarExpr
-> having = keyword_ "having" *> scalarExpr
-
-> orderBy :: Parser [SortSpec]
-> orderBy = keywords_ ["order","by"] *> commaSep1 ob
->   where
->     ob = SortSpec
->          <$> scalarExpr
->          <*> option DirDefault (choice [Asc <$ keyword_ "asc"
->                                        ,Desc <$ keyword_ "desc"])
->          <*> option NullsOrderDefault
->              -- todo: left factor better
->              (keyword_ "nulls" >>
->                     choice [NullsFirst <$ keyword "first"
->                            ,NullsLast <$ keyword "last"])
-
-allows offset and fetch in either order
-+ postgresql offset without row(s) and limit instead of fetch also
-
-> offsetFetch :: Parser (Maybe ScalarExpr, Maybe ScalarExpr)
-> offsetFetch = permute ((,) <$?> (Nothing, Just <$> offset)
->                            <|?> (Nothing, Just <$> fetch))
-
-> offset :: Parser ScalarExpr
-> offset = keyword_ "offset" *> scalarExpr
->          <* option () (choice [keyword_ "rows"
->                               ,keyword_ "row"])
-
-> fetch :: Parser ScalarExpr
-> fetch = fetchFirst <|> limit
->   where
->     fetchFirst = guardDialect diFetchFirst
->                  *> fs *> scalarExpr <* ro
->     fs = makeKeywordTree ["fetch first", "fetch next"]
->     ro = makeKeywordTree ["rows only", "row only"]
->     -- todo: not in ansi sql dialect
->     limit = guardDialect diLimit *>
->             keyword_ "limit" *> scalarExpr
-
-== common table expressions
-
-> with :: Parser QueryExpr
-> with = keyword_ "with" >>
->     With <$> option False (True <$ keyword_ "recursive")
->          <*> commaSep1 withQuery <*> queryExpr
->   where
->     withQuery = (,) <$> (withAlias <* keyword_ "as")
->                     <*> parens queryExpr
->     withAlias = Alias <$> name <*> columnAliases
->     columnAliases = optionMaybe $ parens $ commaSep1 name
-
-
-== query expression
-
-This parser parses any query expression variant: normal select, cte,
-and union, etc..
-
-> queryExpr :: Parser QueryExpr
-> queryExpr = choice
->     [with
->     ,chainr1 (choice [values,table, select]) setOp]
->   where
->     select = keyword_ "select" >>
->         mkSelect
->         <$> option SQDefault duplicates
->         <*> selectList
->         <*> optionMaybe tableExpression
->     mkSelect d sl Nothing =
->         makeSelect{qeSetQuantifier = d, qeSelectList = sl}
->     mkSelect d sl (Just (TableExpression f w g h od ofs fe)) =
->         Select d sl f w g h od ofs fe
->     values = keyword_ "values"
->              >> Values <$> commaSep (parens (commaSep scalarExpr))
->     table = keyword_ "table" >> Table <$> names
-
-local data type to help with parsing the bit after the select list,
-called 'table expression' in the ansi sql grammar. Maybe this should
-be in the public syntax?
-
-> data TableExpression
->     = TableExpression
->       {_teFrom :: [TableRef]
->       ,_teWhere :: Maybe ScalarExpr
->       ,_teGroupBy :: [GroupingExpr]
->       ,_teHaving :: Maybe ScalarExpr
->       ,_teOrderBy :: [SortSpec]
->       ,_teOffset :: Maybe ScalarExpr
->       ,_teFetchFirst :: Maybe ScalarExpr}
-
-> tableExpression :: Parser TableExpression
-> tableExpression = mkTe <$> from
->                        <*> optionMaybe whereClause
->                        <*> option [] groupByClause
->                        <*> optionMaybe having
->                        <*> option [] orderBy
->                        <*> offsetFetch
->  where
->     mkTe f w g h od (ofs,fe) =
->         TableExpression f w g h od ofs fe
-
-> setOp :: Parser (QueryExpr -> QueryExpr -> QueryExpr)
-> setOp = cq
->         <$> setOpK
->         <*> option SQDefault duplicates
->         <*> corr
->   where
->     cq o d c q0 q1 = QueryExprSetOp q0 o d c q1
->     setOpK = choice [Union <$ keyword_ "union"
->                     ,Intersect <$ keyword_ "intersect"
->                     ,Except <$ keyword_ "except"]
->             <?> "set operator"
->     corr = option Respectively (Corresponding <$ keyword_ "corresponding")
-
-
-wrapper for query expr which ignores optional trailing semicolon.
-
-TODO: change style
-
-> topLevelQueryExpr :: Parser QueryExpr
-> topLevelQueryExpr = queryExpr <??> (id <$ semi)
-
-> topLevelStatement :: Parser Statement
-> topLevelStatement = statement
-
--------------------------
-
-= Statements
-
-> statementWithoutSemicolon :: Parser Statement
-> statementWithoutSemicolon = choice
->     [keyword_ "create" *> choice [createSchema
->                                  ,createTable
->                                  ,createIndex
->                                  ,createView
->                                  ,createDomain
->                                  ,createSequence
->                                  ,createRole
->                                  ,createAssertion]
->     ,keyword_ "alter" *> choice [alterTable
->                                 ,alterDomain
->                                 ,alterSequence]
->     ,keyword_ "drop" *> choice [dropSchema
->                                ,dropTable
->                                ,dropView
->                                ,dropDomain
->                                ,dropSequence
->                                ,dropRole
->                                ,dropAssertion]
->     ,delete
->     ,truncateSt
->     ,insert
->     ,update
->     ,startTransaction
->     ,savepoint
->     ,releaseSavepoint
->     ,commit
->     ,rollback
->     ,grant
->     ,revoke
->     ,SelectStatement <$> queryExpr
->     ]
->
-> statement :: Parser Statement
-> statement = statementWithoutSemicolon <* optional semi <|> semi *> pure EmptyStatement
-
-> createSchema :: Parser Statement
-> createSchema = keyword_ "schema" >>
->     CreateSchema <$> names
-
-> createTable :: Parser Statement
-> createTable = do 
->   d <- getState
->   let 
->     parseColumnDef = TableColumnDef <$> columnDef 
->     parseConstraintDef = uncurry TableConstraintDef <$> tableConstraintDef
->     separator = if diNonCommaSeparatedConstraints d
->       then optional comma
->       else void comma
->     constraints = sepBy parseConstraintDef separator
->     entries = ((:) <$> parseColumnDef <*> ((comma >> entries) <|> pure [])) <|> constraints
->
->   keyword_ "table" >>
->     CreateTable
->     <$> names 
->     <*> parens entries
-
-> createIndex :: Parser Statement
-> createIndex =
->     CreateIndex
->     <$> ((keyword_ "index" >> pure False) <|>
->          (keywords_ ["unique", "index"] >> pure True))
->     <*> names
->     <*> (keyword_ "on" >> names)
->     <*> parens (commaSep1 name)
-
-> columnDef :: Parser ColumnDef
-> columnDef = ColumnDef <$> name <*> typeName
->             <*> optionMaybe defaultClause
->             <*> option [] (many1 colConstraintDef)
->   where
->     defaultClause = choice [
->         keyword_ "default" >>
->         DefaultClause <$> scalarExpr
->         -- todo: left factor
->        ,try (keywords_ ["generated","always","as"] >>
->              GenerationClause <$> parens scalarExpr)
->        ,keyword_ "generated" >>
->         IdentityColumnSpec
->         <$> (GeneratedAlways <$ keyword_ "always"
->              <|> GeneratedByDefault <$ keywords_ ["by", "default"])
->         <*> (keywords_ ["as", "identity"] *>
->              option [] (parens sequenceGeneratorOptions))
->        ]
-
-> tableConstraintDef :: Parser (Maybe [Name], TableConstraint)
-> tableConstraintDef =
->     (,)
->     <$> (optionMaybe (keyword_ "constraint" *> names))
->     <*> (unique <|> primaryKey <|> check <|> references)
->   where
->     unique = keyword_ "unique" >>
->         TableUniqueConstraint <$> parens (commaSep1 name)
->     primaryKey = keywords_ ["primary", "key"] >>
->         TablePrimaryKeyConstraint <$> parens (commaSep1 name)
->     check = keyword_ "check" >> TableCheckConstraint <$> parens scalarExpr
->     references = keywords_ ["foreign", "key"] >>
->         (\cs ft ftcs m (u,d) -> TableReferencesConstraint cs ft ftcs m u d)
->         <$> parens (commaSep1 name)
->         <*> (keyword_ "references" *> names)
->         <*> optionMaybe (parens $ commaSep1 name)
->         <*> refMatch
->         <*> refActions
-
-> refMatch :: Parser ReferenceMatch
-> refMatch = option DefaultReferenceMatch
->             (keyword_ "match" *>
->              choice [MatchFull <$ keyword_ "full"
->                     ,MatchPartial <$ keyword_ "partial"
->                     ,MatchSimple <$ keyword_ "simple"])
-> refActions :: Parser (ReferentialAction,ReferentialAction)
-> refActions = permute ((,) <$?> (DefaultReferentialAction, onUpdate)
->                           <|?> (DefaultReferentialAction, onDelete))
->   where
->     -- todo: left factor?
->     onUpdate = try (keywords_ ["on", "update"]) *> referentialAction
->     onDelete = try (keywords_ ["on", "delete"]) *> referentialAction
->     referentialAction = choice [
->          RefCascade <$ keyword_ "cascade"
->          -- todo: left factor?
->         ,RefSetNull <$ try (keywords_ ["set", "null"])
->         ,RefSetDefault <$ try (keywords_ ["set", "default"])
->         ,RefRestrict <$ keyword_ "restrict"
->         ,RefNoAction <$ keywords_ ["no", "action"]]
-
-> colConstraintDef :: Parser ColConstraintDef
-> colConstraintDef =
->     ColConstraintDef
->     <$> optionMaybe (keyword_ "constraint" *> names)
->     <*> (nullable <|> notNull <|> unique <|> primaryKey <|> check <|> references)
->   where
->     nullable = ColNullableConstraint <$ keyword "null"
->     notNull = ColNotNullConstraint <$ keywords_ ["not", "null"]
->     unique = ColUniqueConstraint <$ keyword_ "unique"
->     primaryKey = do
->       keywords_ ["primary", "key"] 
->       d <- getState
->       autoincrement <- if diAutoincrement d 
->         then optionMaybe (keyword_ "autoincrement")
->         else pure Nothing
->       pure $ ColPrimaryKeyConstraint $ isJust autoincrement
->     check = keyword_ "check" >> ColCheckConstraint <$> parens scalarExpr
->     references = keyword_ "references" >>
->         (\t c m (ou,od) -> ColReferencesConstraint t c m ou od)
->         <$> names
->         <*> optionMaybe (parens name)
->         <*> refMatch
->         <*> refActions
-
-slightly hacky parser for signed integers
-
-> signedInteger :: Parser Integer
-> signedInteger =
->     (*) <$> option 1 (1 <$ symbol "+" <|> (-1) <$ symbol "-")
->     <*> unsignedInteger
-
-> sequenceGeneratorOptions :: Parser [SequenceGeneratorOption]
-> sequenceGeneratorOptions =
->          -- todo: could try to combine exclusive options
->          -- such as cycle and nocycle
->          -- sort out options which are sometimes not allowed
->          -- as datatype, and restart with
->     permute ((\a b c d e f g h j k -> catMaybes [a,b,c,d,e,f,g,h,j,k])
->                   <$?> nj startWith
->                   <|?> nj dataType
->                   <|?> nj restart
->                   <|?> nj incrementBy
->                   <|?> nj maxValue
->                   <|?> nj noMaxValue
->                   <|?> nj minValue
->                   <|?> nj noMinValue
->                   <|?> nj scycle
->                   <|?> nj noCycle
->                  )
->   where
->     nj p = (Nothing,Just <$> p)
->     startWith = keywords_ ["start", "with"] >>
->                 SGOStartWith <$> signedInteger
->     dataType = keyword_ "as" >>
->                SGODataType <$> typeName
->     restart = keyword_ "restart" >>
->               SGORestart <$> optionMaybe (keyword_ "with" *> signedInteger)
->     incrementBy = keywords_ ["increment", "by"] >>
->                 SGOIncrementBy <$> signedInteger
->     maxValue = keyword_ "maxvalue" >>
->                 SGOMaxValue <$> signedInteger
->     noMaxValue = SGONoMaxValue <$ try (keywords_ ["no","maxvalue"])
->     minValue = keyword_ "minvalue" >>
->                 SGOMinValue <$> signedInteger
->     noMinValue = SGONoMinValue <$ try (keywords_ ["no","minvalue"])
->     scycle = SGOCycle <$ keyword_ "cycle"
->     noCycle = SGONoCycle <$ try (keywords_ ["no","cycle"])
-
-
-> alterTable :: Parser Statement
-> alterTable = keyword_ "table" >>
->     -- the choices have been ordered so that it works
->     AlterTable <$> names <*> choice [addConstraint
->                                     ,dropConstraint
->                                     ,addColumnDef
->                                     ,alterColumn
->                                     ,dropColumn
->                                     ]
->   where
->     addColumnDef = try (keyword_ "add"
->                         *> optional (keyword_ "column")) >>
->                    AddColumnDef <$> columnDef
->     alterColumn = keyword_ "alter" >> optional (keyword_ "column") >>
->                   name <**> choice [setDefault
->                                    ,dropDefault
->                                    ,setNotNull
->                                    ,dropNotNull
->                                    ,setDataType]
->     setDefault :: Parser (Name -> AlterTableAction)
->     -- todo: left factor
->     setDefault = try (keywords_ ["set","default"]) >>
->                  scalarExpr <$$> AlterColumnSetDefault
->     dropDefault = AlterColumnDropDefault <$ try (keywords_ ["drop","default"])
->     setNotNull = AlterColumnSetNotNull <$ try (keywords_ ["set","not","null"])
->     dropNotNull = AlterColumnDropNotNull <$ try (keywords_ ["drop","not","null"])
->     setDataType = try (keywords_ ["set","data","type"]) >>
->                   typeName <$$> AlterColumnSetDataType
->     dropColumn = try (keyword_ "drop" *> optional (keyword_ "column")) >>
->                  DropColumn <$> name <*> dropBehaviour
->     -- todo: left factor, this try is especially bad
->     addConstraint = try (keyword_ "add" >>
->         uncurry AddTableConstraintDef <$> tableConstraintDef)
->     dropConstraint = try (keywords_ ["drop","constraint"]) >>
->         DropTableConstraintDef <$> names <*> dropBehaviour
-
-
-> dropSchema :: Parser Statement
-> dropSchema = keyword_ "schema" >>
->     DropSchema <$> names <*> dropBehaviour
-
-> dropTable :: Parser Statement
-> dropTable = keyword_ "table" >>
->     DropTable <$> names <*> dropBehaviour
-
-> createView :: Parser Statement
-> createView =
->     CreateView
->     <$> (option False (True <$ keyword_ "recursive") <* keyword_ "view")
->     <*> names
->     <*> optionMaybe (parens (commaSep1 name))
->     <*> (keyword_ "as" *> queryExpr)
->     <*> optionMaybe (choice [
->             -- todo: left factor
->             DefaultCheckOption <$ try (keywords_ ["with", "check", "option"])
->            ,CascadedCheckOption <$ try (keywords_ ["with", "cascaded", "check", "option"])
->            ,LocalCheckOption <$ try (keywords_ ["with", "local", "check", "option"])
->             ])
-
-> dropView :: Parser Statement
-> dropView = keyword_ "view" >>
->     DropView <$> names <*> dropBehaviour
-
-> createDomain :: Parser Statement
-> createDomain = keyword_ "domain" >>
->     CreateDomain
->     <$> names
->     <*> (optional (keyword_ "as") *> typeName)
->     <*> optionMaybe (keyword_ "default" *> scalarExpr)
->     <*> many con
->   where
->     con = (,) <$> optionMaybe (keyword_ "constraint" *> names)
->           <*> (keyword_ "check" *> parens scalarExpr)
-
-> alterDomain :: Parser Statement
-> alterDomain = keyword_ "domain" >>
->     AlterDomain
->     <$> names
->     <*> (setDefault <|> constraint
->          <|> (keyword_ "drop" *> (dropDefault <|> dropConstraint)))
->   where
->     setDefault = keywords_ ["set", "default"] >> ADSetDefault <$> scalarExpr
->     constraint = keyword_ "add" >>
->        ADAddConstraint
->        <$> optionMaybe (keyword_ "constraint" *> names)
->        <*> (keyword_ "check" *> parens scalarExpr)
->     dropDefault = ADDropDefault <$ keyword_ "default"
->     dropConstraint = keyword_ "constraint" >> ADDropConstraint <$> names
-
-> dropDomain :: Parser Statement
-> dropDomain = keyword_ "domain" >>
->     DropDomain <$> names <*> dropBehaviour
-
-> createSequence :: Parser Statement
-> createSequence = keyword_ "sequence" >>
->     CreateSequence
->     <$> names
->     <*> sequenceGeneratorOptions
-
-> alterSequence :: Parser Statement
-> alterSequence = keyword_ "sequence" >>
->     AlterSequence
->     <$> names
->     <*> sequenceGeneratorOptions
-
-> dropSequence :: Parser Statement
-> dropSequence = keyword_ "sequence" >>
->     DropSequence <$> names <*> dropBehaviour
-
-> createAssertion :: Parser Statement
-> createAssertion = keyword_ "assertion" >>
->     CreateAssertion
->     <$> names
->     <*> (keyword_ "check" *> parens scalarExpr)
-
-
-> dropAssertion :: Parser Statement
-> dropAssertion = keyword_ "assertion" >>
->     DropAssertion <$> names <*> dropBehaviour
-
------------------
-
-= dml
-
-> delete :: Parser Statement
-> delete = keywords_ ["delete","from"] >>
->     Delete
->     <$> names
->     <*> optionMaybe (optional (keyword_ "as") *> name)
->     <*> optionMaybe (keyword_ "where" *> scalarExpr)
-
-> truncateSt :: Parser Statement
-> truncateSt = keywords_ ["truncate", "table"] >>
->     Truncate
->     <$> names
->     <*> option DefaultIdentityRestart
->         (ContinueIdentity <$ keywords_ ["continue","identity"]
->          <|> RestartIdentity <$ keywords_ ["restart","identity"])
-
-> insert :: Parser Statement
-> insert = keywords_ ["insert", "into"] >>
->     Insert
->     <$> names
->     <*> optionMaybe (parens $ commaSep1 name)
->     <*> (DefaultInsertValues <$ keywords_ ["default", "values"]
->          <|> InsertQuery <$> queryExpr)
-
-> update :: Parser Statement
-> update = keywords_ ["update"] >>
->     Update
->     <$> names
->     <*> optionMaybe (optional (keyword_ "as") *> name)
->     <*> (keyword_ "set" *> commaSep1 setClause)
->     <*> optionMaybe (keyword_ "where" *> scalarExpr)
->   where
->     setClause = multipleSet <|> singleSet
->     multipleSet = SetMultiple
->                   <$> parens (commaSep1 names)
->                   <*> (symbol "=" *> parens (commaSep1 scalarExpr))
->     singleSet = Set
->                 <$> names
->                 <*> (symbol "=" *> scalarExpr)
-
-> dropBehaviour :: Parser DropBehaviour
-> dropBehaviour =
->     option DefaultDropBehaviour
->     (Restrict <$ keyword_ "restrict"
->     <|> Cascade <$ keyword_ "cascade")
-
------------------------------
-
-= transaction management
-
-> startTransaction :: Parser Statement
-> startTransaction = StartTransaction <$ keywords_ ["start","transaction"]
-
-> savepoint :: Parser Statement
-> savepoint = keyword_ "savepoint" >>
->     Savepoint <$> name
-
-> releaseSavepoint :: Parser Statement
-> releaseSavepoint = keywords_ ["release","savepoint"] >>
->     ReleaseSavepoint <$> name
-
-> commit :: Parser Statement
-> commit = Commit <$ keyword_ "commit" <* optional (keyword_ "work")
-
-> rollback :: Parser Statement
-> rollback = keyword_ "rollback" >> optional (keyword_ "work") >>
->     Rollback <$> optionMaybe (keywords_ ["to", "savepoint"] *> name)
-
-
-------------------------------
-
-= Access control
-
-TODO: fix try at the 'on'
-
-> grant :: Parser Statement
-> grant = keyword_ "grant" >> (try priv <|> role)
->   where
->     priv = GrantPrivilege
->            <$> commaSep privilegeAction
->            <*> (keyword_ "on" *> privilegeObject)
->            <*> (keyword_ "to" *> commaSep name)
->            <*> option WithoutGrantOption
->                (WithGrantOption <$ keywords_ ["with","grant","option"])
->     role = GrantRole
->            <$> commaSep name
->            <*> (keyword_ "to" *> commaSep name)
->            <*> option WithoutAdminOption
->                (WithAdminOption <$ keywords_ ["with","admin","option"])
-
-> createRole :: Parser Statement
-> createRole = keyword_ "role" >>
->     CreateRole <$> name
-
-> dropRole :: Parser Statement
-> dropRole = keyword_ "role" >>
->     DropRole <$> name
-
-TODO: fix try at the 'on'
-
-> revoke :: Parser Statement
-> revoke = keyword_ "revoke" >> (try priv <|> role)
->   where
->     priv = RevokePrivilege
->            <$> option NoGrantOptionFor
->                (GrantOptionFor <$ keywords_ ["grant","option","for"])
->            <*> commaSep privilegeAction
->            <*> (keyword_ "on" *> privilegeObject)
->            <*> (keyword_ "from" *> commaSep name)
->            <*> dropBehaviour
->     role = RevokeRole
->            <$> option NoAdminOptionFor
->                (AdminOptionFor <$ keywords_ ["admin","option", "for"])
->            <*> commaSep name
->            <*> (keyword_ "from" *> commaSep name)
->            <*> dropBehaviour
-
-> privilegeAction :: Parser PrivilegeAction
-> privilegeAction = choice
->     [PrivAll <$ keywords_ ["all","privileges"]
->     ,keyword_ "select" >>
->      PrivSelect <$> option [] (parens $ commaSep name)
->     ,PrivDelete <$ keyword_ "delete"
->     ,PrivUsage <$ keyword_ "usage"
->     ,PrivTrigger <$ keyword_ "trigger"
->     ,PrivExecute <$ keyword_ "execute"
->     ,keyword_ "insert" >>
->      PrivInsert <$> option [] (parens $ commaSep name)
->     ,keyword_ "update" >>
->      PrivUpdate <$> option [] (parens $ commaSep name)
->     ,keyword_ "references" >>
->      PrivReferences <$> option [] (parens $ commaSep name)
->     ]
-
-> privilegeObject :: Parser PrivilegeObject
-> privilegeObject = choice
->     [keyword_ "domain" >> PrivDomain <$> names
->     ,keyword_ "type" >> PrivType <$> names
->     ,keyword_ "sequence" >> PrivSequence <$> names
->     ,keywords_ ["specific","function"] >> PrivFunction <$> names
->     ,optional (keyword_ "table") >> PrivTable <$> names
->     ]
-
-
-----------------------------
-
-wrapper to parse a series of statements. They must be separated by
-semicolon, but for the last statement, the trailing semicolon is
-optional.
-
-> statements :: Parser [Statement]
-> statements = many statement
-
-----------------------------------------------
-
-= multi keyword helper
-
-This helper is to help parsing multiple options of multiple keywords
-with similar prefixes, e.g. parsing 'is null' and 'is not null'.
-
-use to left factor/ improve:
-typed literal and general identifiers
-not like, not in, not between operators
-help with factoring keyword functions and other app-likes
-the join keyword sequences
-fetch first/next
-row/rows only
-
-There is probably a simpler way of doing this but I am a bit
-thick.
-
-> makeKeywordTree :: [String] -> Parser [String]
-> makeKeywordTree sets =
->     parseTrees (sort $ map words sets)
->   where
->     parseTrees :: [[String]] -> Parser [String]
->     parseTrees ws = do
->       let gs :: [[[String]]]
->           gs = groupBy ((==) `on` safeHead) ws
->       choice $ map parseGroup gs
->     parseGroup :: [[String]] -> Parser [String]
->     parseGroup l@((k:_):_) = do
->         keyword_ k
->         let tls = catMaybes $ map safeTail l
->             pr = (k:) <$> parseTrees tls
->         if (or $ map null tls)
->           then pr <|> pure [k]
->           else pr
->     parseGroup _ = guard False >> error "impossible"
->     safeHead (x:_) = Just x
->     safeHead [] = Nothing
->     safeTail (_:x) = Just x
->     safeTail [] = Nothing
-
-------------------------------------------------
-
-= lexing
-
-TODO: push checks into here:
-keyword blacklists
-unsigned integer match
-symbol matching
-keyword matching
-
-> stringTok :: Parser (String,String,String)
-> stringTok = mytoken (\tok ->
->     case tok of
->       L.SqlString s e t -> Just (s,e,t)
->       _ -> Nothing)
-
-> singleQuotesOnlyStringTok :: Parser String
-> singleQuotesOnlyStringTok = mytoken (\tok ->
->     case tok of
->       L.SqlString "'" "'" t -> Just t
->       _ -> Nothing)
-
-This is to support SQL strings where you can write
-'part of a string' ' another part'
-and it will parse as a single string
-
-It is only allowed when all the strings are quoted with ' atm.
-
-> stringTokExtend :: Parser (String,String,String)
-> stringTokExtend = do
->     (s,e,x) <- stringTok
->     choice [
->          do
->          guard (s == "'" && e == "'")
->          (s',e',y) <- stringTokExtend
->          guard (s' == "'" && e' == "'")
->          return $ (s,e,x ++ y)
->         ,return (s,e,x)
->         ]
-
-> hostParamTok :: Parser String
-> hostParamTok = mytoken (\tok ->
->     case tok of
->       L.PrefixedVariable c p -> Just (c:p)
->       _ -> Nothing)
-
-> positionalArgTok :: Parser Int
-> positionalArgTok = mytoken (\tok ->
->     case tok of
->       L.PositionalArg p -> Just p
->       _ -> Nothing)
-
-
-> sqlNumberTok :: Bool -> Parser String
-> sqlNumberTok intOnly = mytoken (\tok ->
->     case tok of
->       L.SqlNumber p | not intOnly || all isDigit p -> Just p
->       _ -> Nothing)
-
-
-> symbolTok :: Maybe String -> Parser String
-> symbolTok sym = mytoken (\tok ->
->     case (sym,tok) of
->       (Nothing, L.Symbol p) -> Just p
->       (Just s, L.Symbol p) | s == p -> Just p
->       _ -> Nothing)
-
-> identifierTok :: [String] -> Parser (Maybe (String,String), String)
-> identifierTok blackList = mytoken (\tok ->
->     case tok of
->       L.Identifier q@(Just {}) p -> Just (q,p)
->       L.Identifier q p | map toLower p `notElem` blackList -> Just (q,p)
->       _ -> Nothing)
-
-> unquotedIdentifierTok :: [String] -> Maybe String -> Parser String
-> unquotedIdentifierTok blackList kw = mytoken (\tok ->
->     case (kw,tok) of
->       (Nothing, L.Identifier Nothing p) | map toLower p `notElem` blackList -> Just p
->       (Just k, L.Identifier Nothing p) | k == map toLower p -> Just p
->       _ -> Nothing)
-
-> mytoken :: (L.Token -> Maybe a) -> Parser a
-> mytoken test = token showToken posToken testToken
->   where
->     showToken (_,tok)   = show tok
->     posToken  ((a,b,c),_)  = newPos a b c
->     testToken (_,tok)   = test tok
-
-> unsignedInteger :: Parser Integer
-> unsignedInteger = read <$> sqlNumberTok True <?> "natural number"
-
-todo: work out the symbol parsing better
-
-> symbol :: String -> Parser String
-> symbol s = symbolTok (Just s) <?> s
-
-> singleCharSymbol :: Char -> Parser Char
-> singleCharSymbol c = c <$ symbol [c]
-
-> questionMark :: Parser Char
-> questionMark = singleCharSymbol '?' <?> "question mark"
-
-> openParen :: Parser Char
-> openParen = singleCharSymbol '('
-
-> closeParen :: Parser Char
-> closeParen = singleCharSymbol ')'
-
-> openBracket :: Parser Char
-> openBracket = singleCharSymbol '['
-
-> closeBracket :: Parser Char
-> closeBracket = singleCharSymbol ']'
-
-
-> comma :: Parser Char
-> comma = singleCharSymbol ','
-
-> semi :: Parser Char
-> semi = singleCharSymbol ';'
-
-= helper functions
-
-> keyword :: String -> Parser String
-> keyword k = unquotedIdentifierTok [] (Just k) <?> k
-
-helper function to improve error messages
-
-> keywords_ :: [String] -> Parser ()
-> keywords_ ks = mapM_ keyword_ ks <?> intercalate " " ks
-
-
-> parens :: Parser a -> Parser a
-> parens = between openParen closeParen
-
-> brackets :: Parser a -> Parser a
-> brackets = between openBracket closeBracket
-
-> commaSep :: Parser a -> Parser [a]
-> commaSep = (`sepBy` comma)
-
-> keyword_ :: String -> Parser ()
-> keyword_ = void . keyword
-
-> symbol_ :: String -> Parser ()
-> symbol_ = void . symbol
-
-> commaSep1 :: Parser a -> Parser [a]
-> commaSep1 = (`sepBy1` comma)
-
-> blacklist :: Dialect -> [String]
-> blacklist d = diKeywords d
-
-These blacklisted names are mostly needed when we parse something with
-an optional alias, e.g. select a a from t. If we write select a from
-t, we have to make sure the from isn't parsed as an alias. I'm not
-sure what other places strictly need the blacklist, and in theory it
-could be tuned differently for each place the identifierString/
-identifier parsers are used to only blacklist the bare
-minimum. Something like this might be needed for dialect support, even
-if it is pretty silly to use a keyword as an unquoted identifier when
-there is a quoting syntax as well.
-
-The standard has a weird mix of reserved keywords and unreserved
-keywords (I'm not sure what exactly being an unreserved keyword
-means).
-
-The current approach tries to have everything which is a keyword only
-in the keyword list - so it can only be used in some other context if
-quoted. If something is a 'ansi keyword', but appears only as an
-identifier or function name for instance in the syntax (or something
-that looks identical to this), then it isn't treated as a keyword at
-all. When there is some overlap (e.g. 'set'), then there is either
-special case parsing code to handle this (in the case of set), or it
-is not treated as a keyword (not perfect, but if it more or less
-works, ok for now).
-
-An exception to this is the standard type names are considered as
-keywords at the moment, with a special case in the type parser to
-make this work. Maybe this isn't necessary or is a bad idea.
-
-It is possible to have a problem if you remove something which is a
-keyword from this list, and still want to parse statements using it
-as a keyword - for instance, removing things like 'from' or 'as',
-will likely mean many things don't parse anymore.
-
-
-
------------
-
-Used to make the dialect available during parsing so different parsers
-can be used for different dialects. Not sure if this is the best way
-to do it, but it's convenient
-
-> type ParseState = Dialect
-
-> type Token = ((String,Int,Int),L.Token)
-
-> type Parser = GenParser Token ParseState
-
-> guardDialect :: (Dialect -> Bool) -> Parser ()
-> guardDialect f = do
->     d <- getState
->     guard (f d)
-
-The dialect stuff could also be used for custom options: e.g. to only
-parse dml for instance.
diff --git a/Language/SQL/SimpleSQL/Pretty.hs b/Language/SQL/SimpleSQL/Pretty.hs
new file mode 100644
index 0000000..8726d58
--- /dev/null
+++ b/Language/SQL/SimpleSQL/Pretty.hs
@@ -0,0 +1,848 @@
+
+-- | These is the pretty printing functions, which produce SQL
+-- source from ASTs. The code attempts to format the output in a
+-- readable way.
+module Language.SQL.SimpleSQL.Pretty
+    (prettyQueryExpr
+    ,prettyScalarExpr
+    ,prettyStatement
+    ,prettyStatements
+    ) where
+
+import Prelude hiding ((<>))
+
+{-
+TODO: there should be more comments in this file, especially the bits
+which have been changed to try to improve the layout of the output.
+Try to do this when this code is ported to a modern pretty printing lib.
+-}
+
+--import Language.SQL.SimpleSQL.Dialect
+import Text.PrettyPrint (render, vcat, text, (<>), (<+>), empty, parens,
+                         nest, Doc, punctuate, comma, sep, quotes,
+                         brackets,hcat)
+import Data.Maybe (maybeToList, catMaybes)
+import Data.List (intercalate)
+
+
+import Language.SQL.SimpleSQL.Syntax
+import Language.SQL.SimpleSQL.Dialect
+
+
+-- | Convert a query expr ast to concrete syntax.
+prettyQueryExpr :: Dialect -> QueryExpr -> String
+prettyQueryExpr d = render . queryExpr d
+
+-- | Convert a value expr ast to concrete syntax.
+prettyScalarExpr :: Dialect -> ScalarExpr -> String
+prettyScalarExpr d = render . scalarExpr d
+
+-- | A terminating semicolon.
+terminator :: Doc
+terminator = text ";\n"
+
+-- | Convert a statement ast to concrete syntax.
+prettyStatement :: Dialect -> Statement -> String
+prettyStatement _ EmptyStatement = render terminator
+prettyStatement d s = render (statement d s)
+
+-- | Convert a list of statements to concrete syntax. A semicolon
+-- is inserted after each statement.
+prettyStatements :: Dialect -> [Statement] -> String
+prettyStatements d = render . vcat . map prettyStatementWithSemicolon
+  where
+    prettyStatementWithSemicolon :: Statement -> Doc
+    prettyStatementWithSemicolon s = statement d s <> terminator
+
+-- = scalar expressions
+
+scalarExpr :: Dialect -> ScalarExpr -> Doc
+scalarExpr _ (StringLit s e t) = text s <> text t <> text e
+
+scalarExpr _ (NumLit s) = text s
+scalarExpr _ (IntervalLit s v f t) =
+    text "interval"
+    <+> me (\x -> text $ case x of
+                             Plus -> "+"
+                             Minus -> "-") s
+    <+> quotes (text v)
+    <+> intervalTypeField f
+    <+> me (\x -> text "to" <+> intervalTypeField x) t
+scalarExpr _ (Iden i) = names i
+scalarExpr _ Star = text "*"
+scalarExpr _ Parameter = text "?"
+scalarExpr _ (PositionalArg n) = text $ "$" ++ show n
+scalarExpr _ (HostParameter p i) =
+    text p
+    <+> me (\i' -> text "indicator" <+> text i') i
+
+scalarExpr d (App f es) = names f <> parens (commaSep (map (scalarExpr d) es))
+
+scalarExpr dia (AggregateApp f d es od fil) =
+    names f
+    <> parens ((case d of
+                  Distinct -> text "distinct"
+                  All -> text "all"
+                  SQDefault -> empty)
+               <+> commaSep (map (scalarExpr dia) es)
+               <+> orderBy dia od)
+    <+> me (\x -> text "filter"
+                  <+> parens (text "where" <+> scalarExpr dia x)) fil
+
+scalarExpr d (AggregateAppGroup f es od) =
+    names f
+    <> parens (commaSep (map (scalarExpr d) es))
+    <+> if null od
+        then empty
+        else text "within group" <+> parens (orderBy d od)
+
+scalarExpr d (WindowApp f es pb od fr) =
+    names f <> parens (commaSep $ map (scalarExpr d) es)
+    <+> text "over"
+    <+> parens ((case pb of
+                    [] -> empty
+                    _ -> text "partition by"
+                          <+> nest 13 (commaSep $ map (scalarExpr d) pb))
+                <+> orderBy d od
+    <+> me frd fr)
+  where
+    frd (FrameFrom rs fp) = rsd rs <+> fpd fp
+    frd (FrameBetween rs fps fpe) =
+        rsd rs <+> text "between" <+> fpd fps
+        <+> text "and" <+> fpd fpe
+    rsd rs = case rs of
+                 FrameRows -> text "rows"
+                 FrameRange -> text "range"
+    fpd UnboundedPreceding = text "unbounded preceding"
+    fpd UnboundedFollowing = text "unbounded following"
+    fpd Current = text "current row"
+    fpd (Preceding e) = scalarExpr d e <+> text "preceding"
+    fpd (Following e) = scalarExpr d e <+> text "following"
+
+scalarExpr dia (SpecialOp nm [a,b,c]) | nm `elem` [[Name Nothing "between"]
+                                                 ,[Name Nothing "not between"]] =
+  sep [scalarExpr dia a
+      ,names nm <+> scalarExpr dia b
+      ,nest (length (unnames nm) + 1) $ text "and" <+> scalarExpr dia c]
+
+scalarExpr d (SpecialOp [Name Nothing "rowctor"] as) =
+    parens $ commaSep $ map (scalarExpr d) as
+
+scalarExpr d (SpecialOp nm es) =
+  names nm <+> parens (commaSep $ map (scalarExpr d) es)
+
+scalarExpr d (SpecialOpK nm fs as) =
+    names nm <> parens (sep $ catMaybes
+        (fmap (scalarExpr d) fs
+         : map (\(n,e) -> Just (text n <+> scalarExpr d e)) as))
+
+scalarExpr d (PrefixOp f e) = names f <+> scalarExpr d e
+scalarExpr d (PostfixOp f e) = scalarExpr d e <+> names f
+scalarExpr d e@(BinOp _ op _) | op `elem` [[Name Nothing "and"]
+                                         ,[Name Nothing "or"]] =
+    -- special case for and, or, get all the ands so we can vcat them
+    -- nicely
+    case ands e of
+      (e':es) -> vcat (scalarExpr d e'
+                       : map ((names op <+>) . scalarExpr d) es)
+      [] -> empty -- shouldn't be possible
+  where
+    ands (BinOp a op' b) | op == op' = ands a ++ ands b
+    ands x = [x]
+-- special case for . we don't use whitespace
+scalarExpr d (BinOp e0 [Name Nothing "."] e1) =
+    scalarExpr d e0 <> text "." <> scalarExpr d e1
+scalarExpr d (BinOp e0 f e1) =
+    scalarExpr d e0 <+> names f <+> scalarExpr d e1
+
+scalarExpr dia (Case t ws els) =
+    sep $ [text "case" <+> me (scalarExpr dia) t]
+          ++ map w ws
+          ++ maybeToList (fmap e els)
+          ++ [text "end"]
+  where
+    w (t0,t1) =
+      text "when" <+> nest 5 (commaSep $ map (scalarExpr dia) t0)
+      <+> text "then" <+> nest 5 (scalarExpr dia t1)
+    e el = text "else" <+> nest 5 (scalarExpr dia el)
+scalarExpr d (Parens e) = parens $ scalarExpr d e
+scalarExpr d (Cast e tn) =
+    text "cast" <> parens (sep [scalarExpr d e
+                               ,text "as"
+                               ,typeName tn])
+
+scalarExpr _ (TypedLit tn s) =
+    typeName tn <+> quotes (text s)
+
+scalarExpr d (SubQueryExpr ty qe) =
+    (case ty of
+        SqSq -> empty
+        SqExists -> text "exists"
+        SqUnique -> text "unique"
+    ) <+> parens (queryExpr d qe)
+
+scalarExpr d (QuantifiedComparison v c cp sq) =
+    scalarExpr d v
+    <+> names c
+    <+> (text $ case cp of
+             CPAny -> "any"
+             CPSome -> "some"
+             CPAll -> "all")
+    <+> parens (queryExpr d sq)
+
+scalarExpr d (Match v u sq) =
+    scalarExpr d v
+    <+> text "match"
+    <+> (if u then text "unique" else empty)
+    <+> parens (queryExpr d sq)
+
+scalarExpr d (In b se x) =
+    scalarExpr d se <+>
+    (if b then empty else text "not")
+    <+> text "in"
+    <+> parens (nest (if b then 3 else 7) $
+                 case x of
+                     InList es -> commaSep $ map (scalarExpr d) es
+                     InQueryExpr qe -> queryExpr d qe)
+
+scalarExpr d (Array v es) =
+    scalarExpr d v <> brackets (commaSep $ map (scalarExpr d) es)
+
+scalarExpr d (ArrayCtor q) =
+    text "array" <> parens (queryExpr d q)
+
+scalarExpr d (MultisetCtor es) =
+    text "multiset" <> brackets (commaSep $ map (scalarExpr d) es)
+
+scalarExpr d (MultisetQueryCtor q) =
+    text "multiset" <> parens (queryExpr d q)
+
+scalarExpr d (MultisetBinOp a c q b) =
+    sep
+    [scalarExpr d a
+    ,text "multiset"
+    ,text $ case c of
+                Union -> "union"
+                Intersect -> "intersect"
+                Except -> "except"
+    ,case q of
+         SQDefault -> empty
+         All -> text "all"
+         Distinct -> text "distinct"
+    ,scalarExpr d b]
+
+{-scalarExpr d (Escape v e) =
+    scalarExpr d v <+> text "escape" <+> text [e]
+
+scalarExpr d (UEscape v e) =
+    scalarExpr d v <+> text "uescape" <+> text [e]-}
+
+scalarExpr d (Collate v c) =
+    scalarExpr d v <+> text "collate" <+> names c
+
+scalarExpr _ (NextValueFor ns) =
+    text "next value for" <+> names ns
+
+scalarExpr d (VEComment cmt v) =
+    vcat $ map comment cmt ++ [scalarExpr d v]
+
+scalarExpr _ (OdbcLiteral t s) =
+    text "{" <> lt t <+> quotes (text s) <> text "}"
+  where
+    lt OLDate = text "d"
+    lt OLTime = text "t"
+    lt OLTimestamp = text "ts"
+
+scalarExpr d (OdbcFunc e) =
+    text "{fn" <+> scalarExpr d e <> text "}"
+
+scalarExpr d (Convert t e Nothing) =
+    text "convert(" <> typeName t <> text "," <+> scalarExpr d e <> text ")"
+scalarExpr d (Convert t e (Just i)) =
+    text "convert(" <> typeName t <> text "," <+> scalarExpr d e <> text "," <+> text (show i) <> text ")"
+
+unname :: Name -> String
+unname (Name Nothing n) = n
+unname (Name (Just (s,e)) n) =
+    s ++ n ++ e
+
+unnames :: [Name] -> String
+unnames ns = intercalate "." $ map unname ns
+
+
+name :: Name -> Doc
+name (Name Nothing n) = text n
+name (Name (Just (s,e)) n) = text s <> text n <> text e
+
+names :: [Name] -> Doc
+names ns = hcat $ punctuate (text ".") $ map name ns
+
+typeName :: TypeName -> Doc
+typeName (TypeName t) = names t
+typeName (PrecTypeName t a) = names t <+> parens (text $ show a)
+typeName (PrecScaleTypeName t a b) =
+    names t <+> parens (text (show a) <+> comma <+> text (show b))
+typeName (PrecLengthTypeName t i m u) =
+    names t
+    <> parens (text (show i)
+               <> me (\x -> case x of
+                           PrecK -> text "K"
+                           PrecM -> text "M"
+                           PrecG -> text "G"
+                           PrecT -> text "T"
+                           PrecP -> text "P") m
+               <+> me (\x -> case x of
+                       PrecCharacters -> text "CHARACTERS"
+                       PrecOctets -> text "OCTETS") u)
+typeName (CharTypeName t i cs col) =
+    names t
+    <> me (\x -> parens (text $ show x)) i
+    <+> (if null cs
+         then empty
+         else text "character set" <+> names cs)
+    <+> (if null col
+         then empty
+         else text "collate" <+> names col)
+typeName (TimeTypeName t i tz) =
+    names t
+    <> me (\x -> parens (text $ show x)) i
+    <+> text (if tz
+              then "with time zone"
+              else "without time zone")
+typeName (RowTypeName cs) =
+    text "row" <> parens (commaSep $ map f cs)
+  where
+    f (n,t) = name n <+> typeName t
+typeName (IntervalTypeName f t) =
+    text "interval"
+    <+> intervalTypeField f
+    <+> me (\x -> text "to" <+> intervalTypeField x) t
+
+typeName (ArrayTypeName tn sz) =
+    typeName tn <+> text "array" <+> me (brackets . text . show) sz
+
+typeName (MultisetTypeName tn) =
+    typeName tn <+> text "multiset"
+
+intervalTypeField :: IntervalTypeField -> Doc
+intervalTypeField (Itf n p) =
+    text n
+    <+> me (\(x,x1) ->
+             parens (text (show x)
+                     <+> me (\y -> (sep [comma,text (show y)])) x1)) p
+
+
+-- = query expressions
+
+queryExpr :: Dialect -> QueryExpr -> Doc
+queryExpr dia (Select d sl fr wh gb hv od off fe) =
+  sep [text "select"
+      ,case d of
+          SQDefault -> empty
+          All -> text "all"
+          Distinct -> text "distinct"
+      ,nest 7 $ sep [selectList dia sl]
+      ,from dia fr
+      ,maybeScalarExpr dia "where" wh
+      ,grpBy dia gb
+      ,maybeScalarExpr dia "having" hv
+      ,orderBy dia od
+      ,me (\e -> text "offset" <+> scalarExpr dia e <+> text "rows") off
+      ,fetchFirst
+      ]
+  where
+    fetchFirst =
+      me (\e -> if diLimit dia
+                then text "limit" <+> scalarExpr dia e
+                else text "fetch first" <+> scalarExpr dia e
+                     <+> text "rows only") fe
+
+queryExpr dia (QueryExprSetOp q1 ct d c q2) =
+  sep [queryExpr dia q1
+      ,text (case ct of
+                Union -> "union"
+                Intersect -> "intersect"
+                Except -> "except")
+       <+> case d of
+               SQDefault -> empty
+               All -> text "all"
+               Distinct -> text "distinct"
+       <+> case c of
+               Corresponding -> text "corresponding"
+               Respectively -> empty
+      ,queryExpr dia q2]
+queryExpr d (With rc withs qe) =
+  text "with" <+> (if rc then text "recursive" else empty)
+  <+> vcat [nest 5
+            (vcat $ punctuate comma $ flip map withs $ \(n,q) ->
+             withAlias n <+> text "as" <+> parens (queryExpr d q))
+           ,queryExpr d qe]
+  where
+    withAlias (Alias nm cols) = name nm
+                                <+> me (parens . commaSep . map name) cols
+
+
+queryExpr d (Values vs) =
+    text "values"
+    <+> nest 7 (commaSep (map (parens . commaSep . map (scalarExpr d)) vs))
+queryExpr _ (Table t) = text "table" <+> names t
+queryExpr d (QEComment cmt v) =
+    vcat $ map comment cmt ++ [queryExpr d v]
+
+
+alias :: Alias -> Doc
+alias (Alias nm cols) =
+    text "as" <+> name nm
+    <+> me (parens . commaSep . map name) cols
+
+selectList :: Dialect -> [(ScalarExpr,Maybe Name)] -> Doc
+selectList d is = commaSep $ map si is
+  where
+    si (e,al) = scalarExpr d e <+> me als al
+    als al = text "as" <+> name al
+
+from :: Dialect -> [TableRef] -> Doc
+from _ [] = empty
+from d ts =
+    sep [text "from"
+        ,nest 5 $ vcat $ punctuate comma $ map tr ts]
+  where
+    tr (TRSimple t) = names t
+    tr (TRLateral t) = text "lateral" <+> tr t
+    tr (TRFunction f as) =
+        names f <> parens (commaSep $ map (scalarExpr d) as)
+    tr (TRAlias t a) = sep [tr t, alias a]
+    tr (TRParens t) = parens $ tr t
+    tr (TRQueryExpr q) = parens $ queryExpr d q
+    tr (TRJoin t0 b jt t1 jc) =
+       sep [tr t0
+           ,if b then text "natural" else empty
+           ,joinText jt <+> tr t1
+           ,joinCond jc]
+    tr (TROdbc t) = text "{oj" <+> tr t <+> text "}"
+    joinText jt =
+      sep [case jt of
+              JInner -> text "inner"
+              JLeft -> text "left"
+              JRight -> text "right"
+              JFull -> text "full"
+              JCross -> text "cross"
+          ,text "join"]
+    joinCond (Just (JoinOn e)) = text "on" <+> scalarExpr d e
+    joinCond (Just (JoinUsing es)) =
+        text "using" <+> parens (commaSep $ map name es)
+    joinCond Nothing = empty
+
+maybeScalarExpr :: Dialect -> String -> Maybe ScalarExpr -> Doc
+maybeScalarExpr d k = me
+      (\e -> sep [text k
+                 ,nest (length k + 1) $ scalarExpr d e])
+
+grpBy :: Dialect -> [GroupingExpr] -> Doc
+grpBy _ [] = empty
+grpBy d gs = sep [text "group by"
+               ,nest 9 $ commaSep $ map ge gs]
+  where
+    ge (SimpleGroup e) = scalarExpr d e
+    ge (GroupingParens g) = parens (commaSep $ map ge g)
+    ge (Cube es) = text "cube" <> parens (commaSep $ map ge es)
+    ge (Rollup es) = text "rollup" <> parens (commaSep $ map ge es)
+    ge (GroupingSets es) = text "grouping sets" <> parens (commaSep $ map ge es)
+
+orderBy :: Dialect -> [SortSpec] -> Doc
+orderBy _ [] = empty
+orderBy dia os = sep [text "order by"
+                 ,nest 9 $ commaSep $ map f os]
+  where
+    f (SortSpec e d n) =
+        scalarExpr dia e
+        <+> (case d of
+                  Asc -> text "asc"
+                  Desc -> text "desc"
+                  DirDefault -> empty)
+        <+> (case n of
+                NullsOrderDefault -> empty
+                NullsFirst -> text "nulls" <+> text "first"
+                NullsLast -> text "nulls" <+> text "last")
+
+-- = statements
+
+statement :: Dialect -> Statement -> Doc
+
+
+-- == ddl
+
+statement _ (CreateSchema nm) =
+    text "create" <+> text "schema" <+> names nm
+
+statement d (CreateTable nm cds) =
+    text "create" <+> text "table" <+> names nm
+    <+> parens (commaSep $ map cd cds)
+  where
+    cd (TableConstraintDef n con) =
+        maybe empty (\s -> text "constraint" <+> names s) n
+        <+> tableConstraint d con
+    cd (TableColumnDef cd') = columnDef d cd'
+
+statement d (AlterTable t act) =
+    texts ["alter","table"] <+> names t
+    <+> alterTableAction d act
+
+statement _ (DropSchema nm db) =
+    text "drop" <+> text "schema" <+> names nm <+> dropBehav db
+
+statement d (CreateDomain nm ty def cs) =
+    text "create" <+> text "domain" <+> names nm
+    <+> typeName ty
+    <+> maybe empty (\def' -> text "default" <+> scalarExpr d def') def
+    <+> sep (map con cs)
+  where
+    con (cn, e) =
+        maybe empty (\cn' -> text "constraint" <+> names cn') cn
+        <+> text "check" <> parens (scalarExpr d e)
+
+statement d (AlterDomain nm act) =
+    texts ["alter","domain"]
+    <+> names nm
+    <+> a act
+  where
+    a (ADSetDefault v) = texts ["set","default"] <+> scalarExpr d v
+    a (ADDropDefault) = texts ["drop","default"]
+    a (ADAddConstraint cnm e) =
+        text "add"
+        <+> maybe empty (\cnm' -> text "constraint" <+> names cnm') cnm
+        <+> text "check" <> parens (scalarExpr d e)
+    a (ADDropConstraint cnm) = texts ["drop", "constraint"]
+                               <+> names cnm
+
+
+statement _ (DropDomain nm db) =
+    text "drop" <+> text "domain" <+> names nm <+> dropBehav db
+
+statement _ (CreateSequence nm sgos) =
+  texts ["create","sequence"] <+> names nm
+  <+> sep (map sequenceGeneratorOption sgos)
+
+statement _ (AlterSequence nm sgos) =
+  texts ["alter","sequence"] <+> names nm
+  <+> sep (map sequenceGeneratorOption sgos)
+
+statement _ (DropSequence nm db) =
+    text "drop" <+> text "sequence" <+> names nm <+> dropBehav db
+
+
+statement d (CreateAssertion nm ex) =
+  texts ["create","assertion"] <+> names nm
+  <+> text "check" <+> parens (scalarExpr d ex)
+
+statement _ (DropAssertion nm db) =
+    text "drop" <+> text "assertion" <+> names nm <+> dropBehav db
+
+statement _ (CreateIndex un nm tbl cols) =
+  texts (if un
+         then ["create","unique","index"]
+         else  ["create","index"])
+  <+> names nm
+  <+> text "on"
+  <+> names tbl
+  <+> parens (commaSep $ map name cols)
+
+-- == dml
+
+statement d (SelectStatement q) = queryExpr d q
+
+statement d (Delete t a w) =
+    text "delete" <+> text "from"
+    <+> names t <+> maybe empty (\x -> text "as" <+> name x) a
+    <+> maybeScalarExpr d "where" w
+
+statement _ (Truncate t ir) =
+    text "truncate" <+> text "table" <+> names t
+    <+> case ir of
+            DefaultIdentityRestart -> empty
+            ContinueIdentity -> text "continue" <+> text "identity"
+            RestartIdentity -> text "restart" <+> text "identity"
+
+statement d (Insert t cs s) =
+    text "insert" <+> text "into" <+> names t
+    <+> maybe empty (\cs' -> parens (commaSep $ map name cs')) cs
+    <+> case s of
+            DefaultInsertValues -> text "default" <+> text "values"
+            InsertQuery q -> queryExpr d q
+
+statement d (Update t a sts whr) =
+    text "update" <+> names t
+    <+> maybe empty (\x -> text "as" <+> name x) a
+    <+> text "set" <+> commaSep (map sc sts)
+    <+> maybeScalarExpr d "where" whr
+  where
+    sc (Set tg v) = names tg <+> text "=" <+> scalarExpr d v
+    sc (SetMultiple ts vs) = parens (commaSep $ map names ts) <+> text "="
+                             <+> parens (commaSep $ map (scalarExpr d) vs)
+
+statement _ (DropTable n b) =
+    text "drop" <+> text "table" <+> names n <+> dropBehav b
+
+statement d (CreateView r nm al q co) =
+    text "create" <+> (if r then text "recursive" else empty)
+    <+> text "view" <+> names nm
+    <+> (maybe empty (\al' -> parens $ commaSep $ map name al')) al
+    <+> text "as"
+    <+> queryExpr d q
+    <+> case co of
+            Nothing -> empty
+            Just DefaultCheckOption -> texts ["with", "check", "option"]
+            Just CascadedCheckOption -> texts ["with", "cascaded", "check", "option"]
+            Just LocalCheckOption -> texts ["with", "local", "check", "option"]
+
+statement _ (DropView n b) =
+    text "drop" <+> text "view" <+> names n <+> dropBehav b
+
+
+-- == transactions
+
+statement _ StartTransaction =
+    texts ["start", "transaction"]
+
+statement _ (Savepoint nm) =
+    text "savepoint" <+> name nm
+
+statement _ (ReleaseSavepoint nm) =
+    texts ["release", "savepoint"] <+> name nm
+
+statement _ Commit =
+    text "commit"
+
+statement _ (Rollback mn) =
+    text "rollback"
+    <+> maybe empty (\n -> texts ["to","savepoint"] <+> name n) mn
+
+-- == access control
+
+statement _ (GrantPrivilege pas po rs go) =
+    text "grant" <+> commaSep (map privAct pas)
+    <+> text "on" <+> privObj po
+    <+> text "to" <+> commaSep (map name rs)
+    <+> grantOpt go
+  where
+    grantOpt WithGrantOption = texts ["with","grant","option"]
+    grantOpt WithoutGrantOption = empty
+
+statement _ (GrantRole rs trs ao) =
+    text "grant" <+> commaSep (map name rs)
+    <+> text "to" <+> commaSep (map name trs)
+    <+> adminOpt ao
+  where
+    adminOpt WithAdminOption = texts ["with","admin","option"]
+    adminOpt WithoutAdminOption = empty
+
+statement _ (CreateRole nm) =
+    texts ["create","role"] <+> name nm
+
+statement _ (DropRole nm) =
+    texts ["drop","role"] <+> name nm
+
+statement _ (RevokePrivilege go pas po rs db) =
+    text "revoke"
+    <+> grantOptFor go
+    <+> commaSep (map privAct pas)
+    <+> text "on" <+> privObj po
+    <+> text "from" <+> commaSep (map name rs)
+    <+> dropBehav db
+  where
+    grantOptFor GrantOptionFor = texts ["grant","option","for"]
+    grantOptFor NoGrantOptionFor = empty
+
+statement _ (RevokeRole ao rs trs db) =
+    text "revoke"
+    <+> adminOptFor ao
+    <+> commaSep (map name rs)
+    <+> text "from" <+> commaSep (map name trs)
+    <+> dropBehav db
+  where
+    adminOptFor AdminOptionFor = texts ["admin","option","for"]
+    adminOptFor NoAdminOptionFor = empty
+
+
+statement _ (StatementComment cs) = vcat $ map comment cs
+statement _ EmptyStatement = empty
+
+
+{-
+== sessions
+
+
+== extras
+-}
+
+dropBehav :: DropBehaviour -> Doc
+dropBehav DefaultDropBehaviour = empty
+dropBehav Cascade = text "cascade"
+dropBehav Restrict = text "restrict"
+
+
+columnDef :: Dialect -> ColumnDef -> Doc
+columnDef d (ColumnDef n t mdef cons) =
+      name n <+> typeName t
+      <+> case mdef of
+             Nothing -> empty
+             Just (DefaultClause def) ->
+                 text "default" <+> scalarExpr d def
+             Just (GenerationClause e) ->
+                 texts ["generated","always","as"] <+> parens (scalarExpr d e)
+             Just (IdentityColumnSpec w o) ->
+                 text "generated"
+                 <+> (case w of
+                         GeneratedAlways -> text "always"
+                         GeneratedByDefault -> text "by" <+> text "default")
+                 <+> text "as" <+> text "identity"
+                 <+> (case o of
+                         [] -> empty
+                         os -> parens (sep $ map sequenceGeneratorOption os))
+      <+> sep (map cdef cons)
+  where
+    cdef (ColConstraintDef cnm con) =
+        maybe empty (\s -> text "constraint" <+> names s) cnm
+        <+> pcon con
+    pcon ColNotNullConstraint = texts ["not","null"]
+    pcon ColUniqueConstraint = text "unique"
+    pcon (ColPrimaryKeyConstraint autoincrement) = 
+      texts $ ["primary","key"] ++ ["autoincrement"|autoincrement]
+    pcon (ColCheckConstraint v) = text "check" <+> parens (scalarExpr d v)
+    pcon (ColReferencesConstraint tb c m u del) =
+        text "references"
+        <+> names tb
+        <+> maybe empty (\c' -> parens (name c')) c
+        <+> refMatch m
+        <+> refAct "update" u
+        <+> refAct "delete" del
+
+sequenceGeneratorOption :: SequenceGeneratorOption -> Doc
+sequenceGeneratorOption (SGODataType t) =
+    text "as" <+> typeName t
+sequenceGeneratorOption (SGORestart mi) =
+    text "restart" <+> maybe empty (\mi' -> texts ["with", show mi']) mi
+sequenceGeneratorOption (SGOStartWith i) = texts ["start",  "with", show i]
+sequenceGeneratorOption (SGOIncrementBy i) = texts ["increment", "by", show i]
+sequenceGeneratorOption (SGOMaxValue i) = texts ["maxvalue", show i]
+sequenceGeneratorOption SGONoMaxValue = texts ["no", "maxvalue"]
+sequenceGeneratorOption (SGOMinValue i) = texts ["minvalue", show i]
+sequenceGeneratorOption SGONoMinValue = texts ["no", "minvalue"]
+sequenceGeneratorOption SGOCycle = text "cycle"
+sequenceGeneratorOption SGONoCycle = text "no cycle"
+
+refMatch :: ReferenceMatch -> Doc
+refMatch m = case m of
+                     DefaultReferenceMatch -> empty
+                     MatchFull -> texts ["match", "full"]
+                     MatchPartial -> texts ["match","partial"]
+                     MatchSimple -> texts ["match", "simple"]
+
+refAct :: String -> ReferentialAction -> Doc
+refAct t a = case a of
+                     DefaultReferentialAction -> empty
+                     RefCascade -> texts ["on", t, "cascade"]
+                     RefSetNull -> texts ["on", t, "set", "null"]
+                     RefSetDefault -> texts ["on", t, "set", "default"]
+                     RefRestrict -> texts ["on", t, "restrict"]
+                     RefNoAction -> texts ["on", t, "no", "action"]
+
+alterTableAction :: Dialect -> AlterTableAction -> Doc
+alterTableAction d (AddColumnDef cd) =
+    texts ["add", "column"] <+> columnDef d cd
+
+alterTableAction d (AlterColumnSetDefault n v) =
+    texts ["alter", "column"]
+    <+> name n
+    <+> texts ["set","default"] <+> scalarExpr d v
+alterTableAction _ (AlterColumnDropDefault n) =
+    texts ["alter", "column"]
+    <+> name n
+    <+> texts ["drop","default"]
+
+alterTableAction _ (AlterColumnSetNotNull n) =
+    texts ["alter", "column"]
+    <+> name n
+    <+> texts ["set","not","null"]
+
+alterTableAction _ (AlterColumnDropNotNull n) =
+    texts ["alter", "column"]
+    <+> name n
+    <+> texts ["drop","not","null"]
+
+alterTableAction _ (AlterColumnSetDataType n t) =
+    texts ["alter", "column"]
+    <+> name n
+    <+> texts ["set","data","Type"]
+    <+> typeName t
+
+alterTableAction _ (DropColumn n b) =
+    texts ["drop", "column"]
+    <+> name n
+    <+> dropBehav b
+
+alterTableAction d (AddTableConstraintDef n con) =
+    text "add"
+    <+> maybe empty (\s -> text "constraint" <+> names s) n
+    <+> tableConstraint d con
+
+alterTableAction _ (DropTableConstraintDef n b) =
+    texts ["drop", "constraint"]
+    <+> names n
+    <+> dropBehav b
+
+
+tableConstraint :: Dialect -> TableConstraint -> Doc
+tableConstraint _ (TableUniqueConstraint ns) =
+         text "unique" <+> parens (commaSep $ map name ns)
+tableConstraint _ (TablePrimaryKeyConstraint ns) =
+        texts ["primary","key"] <+> parens (commaSep $ map name ns)
+tableConstraint _ (TableReferencesConstraint cs t tcs m u del) =
+        texts ["foreign", "key"]
+        <+> parens (commaSep $ map name cs)
+        <+> text "references"
+        <+> names t
+        <+> maybe empty (\c' -> parens (commaSep $ map name c')) tcs
+        <+> refMatch m
+        <+> refAct "update" u
+        <+> refAct "delete" del
+tableConstraint d (TableCheckConstraint v) = text "check" <+> parens (scalarExpr d v)
+
+
+privAct :: PrivilegeAction -> Doc
+privAct PrivAll = texts ["all","privileges"]
+privAct (PrivSelect cs) = text "select" <+> maybeColList cs
+privAct (PrivInsert cs) = text "insert" <+> maybeColList cs
+privAct (PrivUpdate cs) = text "update" <+> maybeColList cs
+privAct (PrivReferences cs) = text "references" <+> maybeColList cs
+privAct PrivDelete = text "delete"
+privAct PrivUsage = text "usage"
+privAct PrivTrigger = text "trigger"
+privAct PrivExecute = text "execute"
+
+maybeColList :: [Name] -> Doc
+maybeColList cs =
+    if null cs
+    then empty
+    else parens (commaSep $ map name cs)
+
+privObj :: PrivilegeObject -> Doc
+privObj (PrivTable nm) = names nm
+privObj (PrivDomain nm) = text "domain" <+> names nm
+privObj (PrivType nm) = text "type" <+> names nm
+privObj (PrivSequence nm) = text "sequence" <+> names nm
+privObj (PrivFunction nm) = texts ["specific", "function"] <+> names nm
+
+-- = utils
+
+commaSep :: [Doc] -> Doc
+commaSep ds = sep $ punctuate comma ds
+
+me :: (a -> Doc) -> Maybe a -> Doc
+me = maybe empty
+
+comment :: Comment -> Doc
+comment (BlockComment str) = text "/*" <+> text str <+> text "*/"
+
+texts :: [String] -> Doc
+texts ts = sep $ map text ts
diff --git a/Language/SQL/SimpleSQL/Pretty.lhs b/Language/SQL/SimpleSQL/Pretty.lhs
deleted file mode 100644
index 44ac911..0000000
--- a/Language/SQL/SimpleSQL/Pretty.lhs
+++ /dev/null
@@ -1,844 +0,0 @@
-
-> -- | These is the pretty printing functions, which produce SQL
-> -- source from ASTs. The code attempts to format the output in a
-> -- readable way.
-> module Language.SQL.SimpleSQL.Pretty
->     (prettyQueryExpr
->     ,prettyScalarExpr
->     ,prettyStatement
->     ,prettyStatements
->     ) where
-
-> import Prelude hiding ((<>))
-
-TODO: there should be more comments in this file, especially the bits
-which have been changed to try to improve the layout of the output.
-Try to do this when this code is ported to a modern pretty printing lib.
-
-> --import Language.SQL.SimpleSQL.Dialect
-> import Text.PrettyPrint (render, vcat, text, (<>), (<+>), empty, parens,
->                          nest, Doc, punctuate, comma, sep, quotes,
->                          brackets,hcat)
-> import Data.Maybe (maybeToList, catMaybes)
-> import Data.List (intercalate)
-
-
-> import Language.SQL.SimpleSQL.Syntax
-> import Language.SQL.SimpleSQL.Dialect
-
-
-> -- | Convert a query expr ast to concrete syntax.
-> prettyQueryExpr :: Dialect -> QueryExpr -> String
-> prettyQueryExpr d = render . queryExpr d
-
-> -- | Convert a value expr ast to concrete syntax.
-> prettyScalarExpr :: Dialect -> ScalarExpr -> String
-> prettyScalarExpr d = render . scalarExpr d
-
-> -- | A terminating semicolon.
-> terminator :: Doc
-> terminator = text ";\n"
-
-> -- | Convert a statement ast to concrete syntax.
-> prettyStatement :: Dialect -> Statement -> String
-> prettyStatement _ EmptyStatement = render terminator
-> prettyStatement d s = render (statement d s)
-
-> -- | Convert a list of statements to concrete syntax. A semicolon
-> -- is inserted after each statement.
-> prettyStatements :: Dialect -> [Statement] -> String
-> prettyStatements d = render . vcat . map prettyStatementWithSemicolon
->   where
->     prettyStatementWithSemicolon :: Statement -> Doc
->     prettyStatementWithSemicolon s = statement d s <> terminator
-
-= scalar expressions
-
-> scalarExpr :: Dialect -> ScalarExpr -> Doc
-> scalarExpr _ (StringLit s e t) = text s <> text t <> text e
-
-> scalarExpr _ (NumLit s) = text s
-> scalarExpr _ (IntervalLit s v f t) =
->     text "interval"
->     <+> me (\x -> text $ case x of
->                              Plus -> "+"
->                              Minus -> "-") s
->     <+> quotes (text v)
->     <+> intervalTypeField f
->     <+> me (\x -> text "to" <+> intervalTypeField x) t
-> scalarExpr _ (Iden i) = names i
-> scalarExpr _ Star = text "*"
-> scalarExpr _ Parameter = text "?"
-> scalarExpr _ (PositionalArg n) = text $ "$" ++ show n
-> scalarExpr _ (HostParameter p i) =
->     text p
->     <+> me (\i' -> text "indicator" <+> text i') i
-
-> scalarExpr d (App f es) = names f <> parens (commaSep (map (scalarExpr d) es))
-
-> scalarExpr dia (AggregateApp f d es od fil) =
->     names f
->     <> parens ((case d of
->                   Distinct -> text "distinct"
->                   All -> text "all"
->                   SQDefault -> empty)
->                <+> commaSep (map (scalarExpr dia) es)
->                <+> orderBy dia od)
->     <+> me (\x -> text "filter"
->                   <+> parens (text "where" <+> scalarExpr dia x)) fil
-
-> scalarExpr d (AggregateAppGroup f es od) =
->     names f
->     <> parens (commaSep (map (scalarExpr d) es))
->     <+> if null od
->         then empty
->         else text "within group" <+> parens (orderBy d od)
-
-> scalarExpr d (WindowApp f es pb od fr) =
->     names f <> parens (commaSep $ map (scalarExpr d) es)
->     <+> text "over"
->     <+> parens ((case pb of
->                     [] -> empty
->                     _ -> text "partition by"
->                           <+> nest 13 (commaSep $ map (scalarExpr d) pb))
->                 <+> orderBy d od
->     <+> me frd fr)
->   where
->     frd (FrameFrom rs fp) = rsd rs <+> fpd fp
->     frd (FrameBetween rs fps fpe) =
->         rsd rs <+> text "between" <+> fpd fps
->         <+> text "and" <+> fpd fpe
->     rsd rs = case rs of
->                  FrameRows -> text "rows"
->                  FrameRange -> text "range"
->     fpd UnboundedPreceding = text "unbounded preceding"
->     fpd UnboundedFollowing = text "unbounded following"
->     fpd Current = text "current row"
->     fpd (Preceding e) = scalarExpr d e <+> text "preceding"
->     fpd (Following e) = scalarExpr d e <+> text "following"
-
-> scalarExpr dia (SpecialOp nm [a,b,c]) | nm `elem` [[Name Nothing "between"]
->                                                  ,[Name Nothing "not between"]] =
->   sep [scalarExpr dia a
->       ,names nm <+> scalarExpr dia b
->       ,nest (length (unnames nm) + 1) $ text "and" <+> scalarExpr dia c]
-
-> scalarExpr d (SpecialOp [Name Nothing "rowctor"] as) =
->     parens $ commaSep $ map (scalarExpr d) as
-
-> scalarExpr d (SpecialOp nm es) =
->   names nm <+> parens (commaSep $ map (scalarExpr d) es)
-
-> scalarExpr d (SpecialOpK nm fs as) =
->     names nm <> parens (sep $ catMaybes
->         (fmap (scalarExpr d) fs
->          : map (\(n,e) -> Just (text n <+> scalarExpr d e)) as))
-
-> scalarExpr d (PrefixOp f e) = names f <+> scalarExpr d e
-> scalarExpr d (PostfixOp f e) = scalarExpr d e <+> names f
-> scalarExpr d e@(BinOp _ op _) | op `elem` [[Name Nothing "and"]
->                                          ,[Name Nothing "or"]] =
->     -- special case for and, or, get all the ands so we can vcat them
->     -- nicely
->     case ands e of
->       (e':es) -> vcat (scalarExpr d e'
->                        : map ((names op <+>) . scalarExpr d) es)
->       [] -> empty -- shouldn't be possible
->   where
->     ands (BinOp a op' b) | op == op' = ands a ++ ands b
->     ands x = [x]
-> -- special case for . we don't use whitespace
-> scalarExpr d (BinOp e0 [Name Nothing "."] e1) =
->     scalarExpr d e0 <> text "." <> scalarExpr d e1
-> scalarExpr d (BinOp e0 f e1) =
->     scalarExpr d e0 <+> names f <+> scalarExpr d e1
-
-> scalarExpr dia (Case t ws els) =
->     sep $ [text "case" <+> me (scalarExpr dia) t]
->           ++ map w ws
->           ++ maybeToList (fmap e els)
->           ++ [text "end"]
->   where
->     w (t0,t1) =
->       text "when" <+> nest 5 (commaSep $ map (scalarExpr dia) t0)
->       <+> text "then" <+> nest 5 (scalarExpr dia t1)
->     e el = text "else" <+> nest 5 (scalarExpr dia el)
-> scalarExpr d (Parens e) = parens $ scalarExpr d e
-> scalarExpr d (Cast e tn) =
->     text "cast" <> parens (sep [scalarExpr d e
->                                ,text "as"
->                                ,typeName tn])
-
-> scalarExpr _ (TypedLit tn s) =
->     typeName tn <+> quotes (text s)
-
-> scalarExpr d (SubQueryExpr ty qe) =
->     (case ty of
->         SqSq -> empty
->         SqExists -> text "exists"
->         SqUnique -> text "unique"
->     ) <+> parens (queryExpr d qe)
-
-> scalarExpr d (QuantifiedComparison v c cp sq) =
->     scalarExpr d v
->     <+> names c
->     <+> (text $ case cp of
->              CPAny -> "any"
->              CPSome -> "some"
->              CPAll -> "all")
->     <+> parens (queryExpr d sq)
-
-> scalarExpr d (Match v u sq) =
->     scalarExpr d v
->     <+> text "match"
->     <+> (if u then text "unique" else empty)
->     <+> parens (queryExpr d sq)
-
-> scalarExpr d (In b se x) =
->     scalarExpr d se <+>
->     (if b then empty else text "not")
->     <+> text "in"
->     <+> parens (nest (if b then 3 else 7) $
->                  case x of
->                      InList es -> commaSep $ map (scalarExpr d) es
->                      InQueryExpr qe -> queryExpr d qe)
-
-> scalarExpr d (Array v es) =
->     scalarExpr d v <> brackets (commaSep $ map (scalarExpr d) es)
-
-> scalarExpr d (ArrayCtor q) =
->     text "array" <> parens (queryExpr d q)
-
-> scalarExpr d (MultisetCtor es) =
->     text "multiset" <> brackets (commaSep $ map (scalarExpr d) es)
-
-> scalarExpr d (MultisetQueryCtor q) =
->     text "multiset" <> parens (queryExpr d q)
-
-> scalarExpr d (MultisetBinOp a c q b) =
->     sep
->     [scalarExpr d a
->     ,text "multiset"
->     ,text $ case c of
->                 Union -> "union"
->                 Intersect -> "intersect"
->                 Except -> "except"
->     ,case q of
->          SQDefault -> empty
->          All -> text "all"
->          Distinct -> text "distinct"
->     ,scalarExpr d b]
-
-> {-scalarExpr d (Escape v e) =
->     scalarExpr d v <+> text "escape" <+> text [e]
-
-> scalarExpr d (UEscape v e) =
->     scalarExpr d v <+> text "uescape" <+> text [e]-}
-
-> scalarExpr d (Collate v c) =
->     scalarExpr d v <+> text "collate" <+> names c
-
-> scalarExpr _ (NextValueFor ns) =
->     text "next value for" <+> names ns
-
-> scalarExpr d (VEComment cmt v) =
->     vcat $ map comment cmt ++ [scalarExpr d v]
-
-> scalarExpr _ (OdbcLiteral t s) =
->     text "{" <> lt t <+> quotes (text s) <> text "}"
->   where
->     lt OLDate = text "d"
->     lt OLTime = text "t"
->     lt OLTimestamp = text "ts"
-
-> scalarExpr d (OdbcFunc e) =
->     text "{fn" <+> scalarExpr d e <> text "}"
-
-> scalarExpr d (Convert t e Nothing) =
->     text "convert(" <> typeName t <> text "," <+> scalarExpr d e <> text ")"
-> scalarExpr d (Convert t e (Just i)) =
->     text "convert(" <> typeName t <> text "," <+> scalarExpr d e <> text "," <+> text (show i) <> text ")"
-
-> unname :: Name -> String
-> unname (Name Nothing n) = n
-> unname (Name (Just (s,e)) n) =
->     s ++ n ++ e
-
-> unnames :: [Name] -> String
-> unnames ns = intercalate "." $ map unname ns
-
-
-> name :: Name -> Doc
-> name (Name Nothing n) = text n
-> name (Name (Just (s,e)) n) = text s <> text n <> text e
-
-> names :: [Name] -> Doc
-> names ns = hcat $ punctuate (text ".") $ map name ns
-
-> typeName :: TypeName -> Doc
-> typeName (TypeName t) = names t
-> typeName (PrecTypeName t a) = names t <+> parens (text $ show a)
-> typeName (PrecScaleTypeName t a b) =
->     names t <+> parens (text (show a) <+> comma <+> text (show b))
-> typeName (PrecLengthTypeName t i m u) =
->     names t
->     <> parens (text (show i)
->                <> me (\x -> case x of
->                            PrecK -> text "K"
->                            PrecM -> text "M"
->                            PrecG -> text "G"
->                            PrecT -> text "T"
->                            PrecP -> text "P") m
->                <+> me (\x -> case x of
->                        PrecCharacters -> text "CHARACTERS"
->                        PrecOctets -> text "OCTETS") u)
-> typeName (CharTypeName t i cs col) =
->     names t
->     <> me (\x -> parens (text $ show x)) i
->     <+> (if null cs
->          then empty
->          else text "character set" <+> names cs)
->     <+> (if null col
->          then empty
->          else text "collate" <+> names col)
-> typeName (TimeTypeName t i tz) =
->     names t
->     <> me (\x -> parens (text $ show x)) i
->     <+> text (if tz
->               then "with time zone"
->               else "without time zone")
-> typeName (RowTypeName cs) =
->     text "row" <> parens (commaSep $ map f cs)
->   where
->     f (n,t) = name n <+> typeName t
-> typeName (IntervalTypeName f t) =
->     text "interval"
->     <+> intervalTypeField f
->     <+> me (\x -> text "to" <+> intervalTypeField x) t
-
-> typeName (ArrayTypeName tn sz) =
->     typeName tn <+> text "array" <+> me (brackets . text . show) sz
-
-> typeName (MultisetTypeName tn) =
->     typeName tn <+> text "multiset"
-
-> intervalTypeField :: IntervalTypeField -> Doc
-> intervalTypeField (Itf n p) =
->     text n
->     <+> me (\(x,x1) ->
->              parens (text (show x)
->                      <+> me (\y -> (sep [comma,text (show y)])) x1)) p
-
-
-= query expressions
-
-> queryExpr :: Dialect -> QueryExpr -> Doc
-> queryExpr dia (Select d sl fr wh gb hv od off fe) =
->   sep [text "select"
->       ,case d of
->           SQDefault -> empty
->           All -> text "all"
->           Distinct -> text "distinct"
->       ,nest 7 $ sep [selectList dia sl]
->       ,from dia fr
->       ,maybeScalarExpr dia "where" wh
->       ,grpBy dia gb
->       ,maybeScalarExpr dia "having" hv
->       ,orderBy dia od
->       ,me (\e -> text "offset" <+> scalarExpr dia e <+> text "rows") off
->       ,fetchFirst
->       ]
->   where
->     fetchFirst =
->       me (\e -> if diLimit dia
->                 then text "limit" <+> scalarExpr dia e
->                 else text "fetch first" <+> scalarExpr dia e
->                      <+> text "rows only") fe
-
-> queryExpr dia (QueryExprSetOp q1 ct d c q2) =
->   sep [queryExpr dia q1
->       ,text (case ct of
->                 Union -> "union"
->                 Intersect -> "intersect"
->                 Except -> "except")
->        <+> case d of
->                SQDefault -> empty
->                All -> text "all"
->                Distinct -> text "distinct"
->        <+> case c of
->                Corresponding -> text "corresponding"
->                Respectively -> empty
->       ,queryExpr dia q2]
-> queryExpr d (With rc withs qe) =
->   text "with" <+> (if rc then text "recursive" else empty)
->   <+> vcat [nest 5
->             (vcat $ punctuate comma $ flip map withs $ \(n,q) ->
->              withAlias n <+> text "as" <+> parens (queryExpr d q))
->            ,queryExpr d qe]
->   where
->     withAlias (Alias nm cols) = name nm
->                                 <+> me (parens . commaSep . map name) cols
-
-
-> queryExpr d (Values vs) =
->     text "values"
->     <+> nest 7 (commaSep (map (parens . commaSep . map (scalarExpr d)) vs))
-> queryExpr _ (Table t) = text "table" <+> names t
-> queryExpr d (QEComment cmt v) =
->     vcat $ map comment cmt ++ [queryExpr d v]
-
-
-> alias :: Alias -> Doc
-> alias (Alias nm cols) =
->     text "as" <+> name nm
->     <+> me (parens . commaSep . map name) cols
-
-> selectList :: Dialect -> [(ScalarExpr,Maybe Name)] -> Doc
-> selectList d is = commaSep $ map si is
->   where
->     si (e,al) = scalarExpr d e <+> me als al
->     als al = text "as" <+> name al
-
-> from :: Dialect -> [TableRef] -> Doc
-> from _ [] = empty
-> from d ts =
->     sep [text "from"
->         ,nest 5 $ vcat $ punctuate comma $ map tr ts]
->   where
->     tr (TRSimple t) = names t
->     tr (TRLateral t) = text "lateral" <+> tr t
->     tr (TRFunction f as) =
->         names f <> parens (commaSep $ map (scalarExpr d) as)
->     tr (TRAlias t a) = sep [tr t, alias a]
->     tr (TRParens t) = parens $ tr t
->     tr (TRQueryExpr q) = parens $ queryExpr d q
->     tr (TRJoin t0 b jt t1 jc) =
->        sep [tr t0
->            ,if b then text "natural" else empty
->            ,joinText jt <+> tr t1
->            ,joinCond jc]
->     tr (TROdbc t) = text "{oj" <+> tr t <+> text "}"
->     joinText jt =
->       sep [case jt of
->               JInner -> text "inner"
->               JLeft -> text "left"
->               JRight -> text "right"
->               JFull -> text "full"
->               JCross -> text "cross"
->           ,text "join"]
->     joinCond (Just (JoinOn e)) = text "on" <+> scalarExpr d e
->     joinCond (Just (JoinUsing es)) =
->         text "using" <+> parens (commaSep $ map name es)
->     joinCond Nothing = empty
-
-> maybeScalarExpr :: Dialect -> String -> Maybe ScalarExpr -> Doc
-> maybeScalarExpr d k = me
->       (\e -> sep [text k
->                  ,nest (length k + 1) $ scalarExpr d e])
-
-> grpBy :: Dialect -> [GroupingExpr] -> Doc
-> grpBy _ [] = empty
-> grpBy d gs = sep [text "group by"
->                ,nest 9 $ commaSep $ map ge gs]
->   where
->     ge (SimpleGroup e) = scalarExpr d e
->     ge (GroupingParens g) = parens (commaSep $ map ge g)
->     ge (Cube es) = text "cube" <> parens (commaSep $ map ge es)
->     ge (Rollup es) = text "rollup" <> parens (commaSep $ map ge es)
->     ge (GroupingSets es) = text "grouping sets" <> parens (commaSep $ map ge es)
-
-> orderBy :: Dialect -> [SortSpec] -> Doc
-> orderBy _ [] = empty
-> orderBy dia os = sep [text "order by"
->                  ,nest 9 $ commaSep $ map f os]
->   where
->     f (SortSpec e d n) =
->         scalarExpr dia e
->         <+> (case d of
->                   Asc -> text "asc"
->                   Desc -> text "desc"
->                   DirDefault -> empty)
->         <+> (case n of
->                 NullsOrderDefault -> empty
->                 NullsFirst -> text "nulls" <+> text "first"
->                 NullsLast -> text "nulls" <+> text "last")
-
-= statements
-
-> statement :: Dialect -> Statement -> Doc
-
-
-== ddl
-
-> statement _ (CreateSchema nm) =
->     text "create" <+> text "schema" <+> names nm
-
-> statement d (CreateTable nm cds) =
->     text "create" <+> text "table" <+> names nm
->     <+> parens (commaSep $ map cd cds)
->   where
->     cd (TableConstraintDef n con) =
->         maybe empty (\s -> text "constraint" <+> names s) n
->         <+> tableConstraint d con
->     cd (TableColumnDef cd') = columnDef d cd'
-
-> statement d (AlterTable t act) =
->     texts ["alter","table"] <+> names t
->     <+> alterTableAction d act
-
-> statement _ (DropSchema nm db) =
->     text "drop" <+> text "schema" <+> names nm <+> dropBehav db
-
-> statement d (CreateDomain nm ty def cs) =
->     text "create" <+> text "domain" <+> names nm
->     <+> typeName ty
->     <+> maybe empty (\def' -> text "default" <+> scalarExpr d def') def
->     <+> sep (map con cs)
->   where
->     con (cn, e) =
->         maybe empty (\cn' -> text "constraint" <+> names cn') cn
->         <+> text "check" <> parens (scalarExpr d e)
-
-> statement d (AlterDomain nm act) =
->     texts ["alter","domain"]
->     <+> names nm
->     <+> a act
->   where
->     a (ADSetDefault v) = texts ["set","default"] <+> scalarExpr d v
->     a (ADDropDefault) = texts ["drop","default"]
->     a (ADAddConstraint cnm e) =
->         text "add"
->         <+> maybe empty (\cnm' -> text "constraint" <+> names cnm') cnm
->         <+> text "check" <> parens (scalarExpr d e)
->     a (ADDropConstraint cnm) = texts ["drop", "constraint"]
->                                <+> names cnm
-
-
-> statement _ (DropDomain nm db) =
->     text "drop" <+> text "domain" <+> names nm <+> dropBehav db
-
-> statement _ (CreateSequence nm sgos) =
->   texts ["create","sequence"] <+> names nm
->   <+> sep (map sequenceGeneratorOption sgos)
-
-> statement _ (AlterSequence nm sgos) =
->   texts ["alter","sequence"] <+> names nm
->   <+> sep (map sequenceGeneratorOption sgos)
-
-> statement _ (DropSequence nm db) =
->     text "drop" <+> text "sequence" <+> names nm <+> dropBehav db
-
-
-> statement d (CreateAssertion nm ex) =
->   texts ["create","assertion"] <+> names nm
->   <+> text "check" <+> parens (scalarExpr d ex)
-
-> statement _ (DropAssertion nm db) =
->     text "drop" <+> text "assertion" <+> names nm <+> dropBehav db
-
-> statement _ (CreateIndex un nm tbl cols) =
->   texts (if un
->          then ["create","unique","index"]
->          else  ["create","index"])
->   <+> names nm
->   <+> text "on"
->   <+> names tbl
->   <+> parens (commaSep $ map name cols)
-
-== dml
-
-> statement d (SelectStatement q) = queryExpr d q
-
-> statement d (Delete t a w) =
->     text "delete" <+> text "from"
->     <+> names t <+> maybe empty (\x -> text "as" <+> name x) a
->     <+> maybeScalarExpr d "where" w
-
-> statement _ (Truncate t ir) =
->     text "truncate" <+> text "table" <+> names t
->     <+> case ir of
->             DefaultIdentityRestart -> empty
->             ContinueIdentity -> text "continue" <+> text "identity"
->             RestartIdentity -> text "restart" <+> text "identity"
-
-> statement d (Insert t cs s) =
->     text "insert" <+> text "into" <+> names t
->     <+> maybe empty (\cs' -> parens (commaSep $ map name cs')) cs
->     <+> case s of
->             DefaultInsertValues -> text "default" <+> text "values"
->             InsertQuery q -> queryExpr d q
-
-> statement d (Update t a sts whr) =
->     text "update" <+> names t
->     <+> maybe empty (\x -> text "as" <+> name x) a
->     <+> text "set" <+> commaSep (map sc sts)
->     <+> maybeScalarExpr d "where" whr
->   where
->     sc (Set tg v) = names tg <+> text "=" <+> scalarExpr d v
->     sc (SetMultiple ts vs) = parens (commaSep $ map names ts) <+> text "="
->                              <+> parens (commaSep $ map (scalarExpr d) vs)
-
-> statement _ (DropTable n b) =
->     text "drop" <+> text "table" <+> names n <+> dropBehav b
-
-> statement d (CreateView r nm al q co) =
->     text "create" <+> (if r then text "recursive" else empty)
->     <+> text "view" <+> names nm
->     <+> (maybe empty (\al' -> parens $ commaSep $ map name al')) al
->     <+> text "as"
->     <+> queryExpr d q
->     <+> case co of
->             Nothing -> empty
->             Just DefaultCheckOption -> texts ["with", "check", "option"]
->             Just CascadedCheckOption -> texts ["with", "cascaded", "check", "option"]
->             Just LocalCheckOption -> texts ["with", "local", "check", "option"]
-
-> statement _ (DropView n b) =
->     text "drop" <+> text "view" <+> names n <+> dropBehav b
-
-
-== transactions
-
-> statement _ StartTransaction =
->     texts ["start", "transaction"]
-
-> statement _ (Savepoint nm) =
->     text "savepoint" <+> name nm
-
-> statement _ (ReleaseSavepoint nm) =
->     texts ["release", "savepoint"] <+> name nm
-
-> statement _ Commit =
->     text "commit"
-
-> statement _ (Rollback mn) =
->     text "rollback"
->     <+> maybe empty (\n -> texts ["to","savepoint"] <+> name n) mn
-
-== access control
-
-> statement _ (GrantPrivilege pas po rs go) =
->     text "grant" <+> commaSep (map privAct pas)
->     <+> text "on" <+> privObj po
->     <+> text "to" <+> commaSep (map name rs)
->     <+> grantOpt go
->   where
->     grantOpt WithGrantOption = texts ["with","grant","option"]
->     grantOpt WithoutGrantOption = empty
-
-> statement _ (GrantRole rs trs ao) =
->     text "grant" <+> commaSep (map name rs)
->     <+> text "to" <+> commaSep (map name trs)
->     <+> adminOpt ao
->   where
->     adminOpt WithAdminOption = texts ["with","admin","option"]
->     adminOpt WithoutAdminOption = empty
-
-> statement _ (CreateRole nm) =
->     texts ["create","role"] <+> name nm
-
-> statement _ (DropRole nm) =
->     texts ["drop","role"] <+> name nm
-
-> statement _ (RevokePrivilege go pas po rs db) =
->     text "revoke"
->     <+> grantOptFor go
->     <+> commaSep (map privAct pas)
->     <+> text "on" <+> privObj po
->     <+> text "from" <+> commaSep (map name rs)
->     <+> dropBehav db
->   where
->     grantOptFor GrantOptionFor = texts ["grant","option","for"]
->     grantOptFor NoGrantOptionFor = empty
-
-> statement _ (RevokeRole ao rs trs db) =
->     text "revoke"
->     <+> adminOptFor ao
->     <+> commaSep (map name rs)
->     <+> text "from" <+> commaSep (map name trs)
->     <+> dropBehav db
->   where
->     adminOptFor AdminOptionFor = texts ["admin","option","for"]
->     adminOptFor NoAdminOptionFor = empty
-
-
-> statement _ (StatementComment cs) = vcat $ map comment cs
-> statement _ EmptyStatement = empty
-
-
-== sessions
-
-
-== extras
-
-> dropBehav :: DropBehaviour -> Doc
-> dropBehav DefaultDropBehaviour = empty
-> dropBehav Cascade = text "cascade"
-> dropBehav Restrict = text "restrict"
-
-
-> columnDef :: Dialect -> ColumnDef -> Doc
-> columnDef d (ColumnDef n t mdef cons) =
->       name n <+> typeName t
->       <+> case mdef of
->              Nothing -> empty
->              Just (DefaultClause def) ->
->                  text "default" <+> scalarExpr d def
->              Just (GenerationClause e) ->
->                  texts ["generated","always","as"] <+> parens (scalarExpr d e)
->              Just (IdentityColumnSpec w o) ->
->                  text "generated"
->                  <+> (case w of
->                          GeneratedAlways -> text "always"
->                          GeneratedByDefault -> text "by" <+> text "default")
->                  <+> text "as" <+> text "identity"
->                  <+> (case o of
->                          [] -> empty
->                          os -> parens (sep $ map sequenceGeneratorOption os))
->       <+> sep (map cdef cons)
->   where
->     cdef (ColConstraintDef cnm con) =
->         maybe empty (\s -> text "constraint" <+> names s) cnm
->         <+> pcon con
->     pcon ColNotNullConstraint = texts ["not","null"]
->     pcon ColUniqueConstraint = text "unique"
->     pcon (ColPrimaryKeyConstraint autoincrement) = 
->       texts $ ["primary","key"] ++ ["autoincrement"|autoincrement]
->     pcon (ColCheckConstraint v) = text "check" <+> parens (scalarExpr d v)
->     pcon (ColReferencesConstraint tb c m u del) =
->         text "references"
->         <+> names tb
->         <+> maybe empty (\c' -> parens (name c')) c
->         <+> refMatch m
->         <+> refAct "update" u
->         <+> refAct "delete" del
-
-> sequenceGeneratorOption :: SequenceGeneratorOption -> Doc
-> sequenceGeneratorOption (SGODataType t) =
->     text "as" <+> typeName t
-> sequenceGeneratorOption (SGORestart mi) =
->     text "restart" <+> maybe empty (\mi' -> texts ["with", show mi']) mi
-> sequenceGeneratorOption (SGOStartWith i) = texts ["start",  "with", show i]
-> sequenceGeneratorOption (SGOIncrementBy i) = texts ["increment", "by", show i]
-> sequenceGeneratorOption (SGOMaxValue i) = texts ["maxvalue", show i]
-> sequenceGeneratorOption SGONoMaxValue = texts ["no", "maxvalue"]
-> sequenceGeneratorOption (SGOMinValue i) = texts ["minvalue", show i]
-> sequenceGeneratorOption SGONoMinValue = texts ["no", "minvalue"]
-> sequenceGeneratorOption SGOCycle = text "cycle"
-> sequenceGeneratorOption SGONoCycle = text "no cycle"
-
-> refMatch :: ReferenceMatch -> Doc
-> refMatch m = case m of
->                      DefaultReferenceMatch -> empty
->                      MatchFull -> texts ["match", "full"]
->                      MatchPartial -> texts ["match","partial"]
->                      MatchSimple -> texts ["match", "simple"]
-
-> refAct :: String -> ReferentialAction -> Doc
-> refAct t a = case a of
->                      DefaultReferentialAction -> empty
->                      RefCascade -> texts ["on", t, "cascade"]
->                      RefSetNull -> texts ["on", t, "set", "null"]
->                      RefSetDefault -> texts ["on", t, "set", "default"]
->                      RefRestrict -> texts ["on", t, "restrict"]
->                      RefNoAction -> texts ["on", t, "no", "action"]
-
-> alterTableAction :: Dialect -> AlterTableAction -> Doc
-> alterTableAction d (AddColumnDef cd) =
->     texts ["add", "column"] <+> columnDef d cd
-
-> alterTableAction d (AlterColumnSetDefault n v) =
->     texts ["alter", "column"]
->     <+> name n
->     <+> texts ["set","default"] <+> scalarExpr d v
-> alterTableAction _ (AlterColumnDropDefault n) =
->     texts ["alter", "column"]
->     <+> name n
->     <+> texts ["drop","default"]
-
-> alterTableAction _ (AlterColumnSetNotNull n) =
->     texts ["alter", "column"]
->     <+> name n
->     <+> texts ["set","not","null"]
-
-> alterTableAction _ (AlterColumnDropNotNull n) =
->     texts ["alter", "column"]
->     <+> name n
->     <+> texts ["drop","not","null"]
-
-> alterTableAction _ (AlterColumnSetDataType n t) =
->     texts ["alter", "column"]
->     <+> name n
->     <+> texts ["set","data","Type"]
->     <+> typeName t
-
-> alterTableAction _ (DropColumn n b) =
->     texts ["drop", "column"]
->     <+> name n
->     <+> dropBehav b
-
-> alterTableAction d (AddTableConstraintDef n con) =
->     text "add"
->     <+> maybe empty (\s -> text "constraint" <+> names s) n
->     <+> tableConstraint d con
-
-> alterTableAction _ (DropTableConstraintDef n b) =
->     texts ["drop", "constraint"]
->     <+> names n
->     <+> dropBehav b
-
-
-> tableConstraint :: Dialect -> TableConstraint -> Doc
-> tableConstraint _ (TableUniqueConstraint ns) =
->          text "unique" <+> parens (commaSep $ map name ns)
-> tableConstraint _ (TablePrimaryKeyConstraint ns) =
->         texts ["primary","key"] <+> parens (commaSep $ map name ns)
-> tableConstraint _ (TableReferencesConstraint cs t tcs m u del) =
->         texts ["foreign", "key"]
->         <+> parens (commaSep $ map name cs)
->         <+> text "references"
->         <+> names t
->         <+> maybe empty (\c' -> parens (commaSep $ map name c')) tcs
->         <+> refMatch m
->         <+> refAct "update" u
->         <+> refAct "delete" del
-> tableConstraint d (TableCheckConstraint v) = text "check" <+> parens (scalarExpr d v)
-
-
-> privAct :: PrivilegeAction -> Doc
-> privAct PrivAll = texts ["all","privileges"]
-> privAct (PrivSelect cs) = text "select" <+> maybeColList cs
-> privAct (PrivInsert cs) = text "insert" <+> maybeColList cs
-> privAct (PrivUpdate cs) = text "update" <+> maybeColList cs
-> privAct (PrivReferences cs) = text "references" <+> maybeColList cs
-> privAct PrivDelete = text "delete"
-> privAct PrivUsage = text "usage"
-> privAct PrivTrigger = text "trigger"
-> privAct PrivExecute = text "execute"
-
-> maybeColList :: [Name] -> Doc
-> maybeColList cs =
->     if null cs
->     then empty
->     else parens (commaSep $ map name cs)
-
-> privObj :: PrivilegeObject -> Doc
-> privObj (PrivTable nm) = names nm
-> privObj (PrivDomain nm) = text "domain" <+> names nm
-> privObj (PrivType nm) = text "type" <+> names nm
-> privObj (PrivSequence nm) = text "sequence" <+> names nm
-> privObj (PrivFunction nm) = texts ["specific", "function"] <+> names nm
-
-= utils
-
-> commaSep :: [Doc] -> Doc
-> commaSep ds = sep $ punctuate comma ds
-
-> me :: (a -> Doc) -> Maybe a -> Doc
-> me = maybe empty
-
-> comment :: Comment -> Doc
-> comment (BlockComment str) = text "/*" <+> text str <+> text "*/"
-
-> texts :: [String] -> Doc
-> texts ts = sep $ map text ts
diff --git a/Language/SQL/SimpleSQL/Syntax.hs b/Language/SQL/SimpleSQL/Syntax.hs
new file mode 100644
index 0000000..d33bc31
--- /dev/null
+++ b/Language/SQL/SimpleSQL/Syntax.hs
@@ -0,0 +1,744 @@
+
+-- | The AST for SQL.
+{-# LANGUAGE DeriveDataTypeable #-}
+module Language.SQL.SimpleSQL.Syntax
+    (-- * Scalar expressions
+     ScalarExpr(..)
+    ,Name(..)
+    ,TypeName(..)
+    ,IntervalTypeField(..)
+    ,Sign(..)
+    ,PrecMultiplier(..)
+    ,PrecUnits(..)
+    ,SetQuantifier(..)
+    ,SortSpec(..)
+    ,Direction(..)
+    ,NullsOrder(..)
+    ,InPredValue(..)
+    ,SubQueryExprType(..)
+    ,CompPredQuantifier(..)
+    ,Frame(..)
+    ,FrameRows(..)
+    ,FramePos(..)
+    ,OdbcLiteralType(..)
+     -- * Query expressions
+    ,QueryExpr(..)
+    ,makeSelect
+    ,SetOperatorName(..)
+    ,Corresponding(..)
+    ,Alias(..)
+    ,GroupingExpr(..)
+     -- ** From
+    ,TableRef(..)
+    ,JoinType(..)
+    ,JoinCondition(..)
+     -- * Statements
+    ,Statement(..)
+    ,DropBehaviour(..)
+    ,IdentityRestart(..)
+    ,InsertSource(..)
+    ,SetClause(..)
+    ,TableElement(..)
+    ,ColumnDef(..)
+    ,DefaultClause(..)
+    ,IdentityWhen(..)
+    ,SequenceGeneratorOption(..)
+    ,ColConstraintDef(..)
+    ,AutoincrementClause
+    ,ColConstraint(..)
+    ,TableConstraint(..)
+    ,ReferenceMatch(..)
+    ,ReferentialAction(..)
+    ,AlterTableAction(..)
+    ,CheckOption(..)
+    ,AlterDomainAction(..)
+    ,AdminOption(..)
+    ,GrantOption(..)
+    ,PrivilegeObject(..)
+    ,PrivilegeAction(..)
+    ,AdminOptionFor(..)
+    ,GrantOptionFor(..)
+     -- * Comment
+    ,Comment(..)
+    ) where
+
+import Data.Data
+
+-- | Represents a value expression. This is used for the expressions
+-- in select lists. It is also used for expressions in where, group
+-- by, having, order by and so on.
+data ScalarExpr
+    = -- | a numeric literal optional decimal point, e+-
+      -- integral exponent, e.g
+      --
+      -- * 10
+      --
+      -- * 10.
+      --
+      -- * .1
+      --
+      -- * 10.1
+      --
+      -- * 1e5
+      --
+      -- * 12.34e-6
+      NumLit String
+      -- | string literal, with the start and end quote
+      -- e.g. 'test' -> StringLit "'" "'" "test"
+    | StringLit String String String
+      -- | text of interval literal, units of interval precision,
+      -- e.g. interval 3 days (3)
+    | IntervalLit
+      {ilSign :: Maybe Sign -- ^ if + or - used
+      ,ilLiteral :: String -- ^ literal text
+      ,ilFrom :: IntervalTypeField
+      ,ilTo :: Maybe IntervalTypeField
+      }
+
+      -- | prefix 'typed literal', e.g. int '42'
+    | TypedLit TypeName String
+
+      -- | identifier with parts separated by dots
+    | Iden [Name]
+      -- | star, as in select *, t.*, count(*)
+    | Star
+
+    | Parameter -- ^ Represents a ? in a parameterized query
+    | PositionalArg Int -- ^ Represents an e.g. $1 in a parameterized query
+    | HostParameter String (Maybe String) -- ^ represents a host
+                                          -- parameter, e.g. :a. The
+                                          -- Maybe String is for the
+                                          -- indicator, e.g. :var
+                                          -- indicator :nl
+
+
+      -- | Infix binary operators. This is used for symbol operators
+      -- (a + b), keyword operators (a and b) and multiple keyword
+      -- operators (a is similar to b)
+    | BinOp ScalarExpr [Name] ScalarExpr
+      -- | Prefix unary operators. This is used for symbol
+      -- operators, keyword operators and multiple keyword operators.
+    | PrefixOp [Name] ScalarExpr
+      -- | Postfix unary operators. This is used for symbol
+      -- operators, keyword operators and multiple keyword operators.
+    | PostfixOp [Name] ScalarExpr
+      -- | Used for ternary, mixfix and other non orthodox
+      -- operators. Currently used for row constructors, and for
+      -- between.
+    | SpecialOp [Name] [ScalarExpr]
+
+      -- | function application (anything that looks like c style
+      -- function application syntactically)
+    | App [Name] [ScalarExpr]
+
+
+      -- | aggregate application, which adds distinct or all, and
+      -- order by, to regular function application
+    | AggregateApp
+      {aggName :: [Name] -- ^ aggregate function name
+      ,aggDistinct :: SetQuantifier -- ^ distinct
+      ,aggArgs :: [ScalarExpr]-- ^ args
+      ,aggOrderBy :: [SortSpec] -- ^ order by
+      ,aggFilter :: Maybe ScalarExpr -- ^ filter
+      }
+      -- | aggregates with within group
+    | AggregateAppGroup
+      {aggName :: [Name] -- ^ aggregate function name
+      ,aggArgs :: [ScalarExpr] -- ^ args
+      ,aggGroup :: [SortSpec] -- ^ within group
+      }
+      -- | window application, which adds over (partition by a order
+      -- by b) to regular function application. Explicit frames are
+      -- not currently supported
+    | WindowApp
+      {wnName :: [Name] -- ^ window function name
+      ,wnArgs :: [ScalarExpr] -- ^ args
+      ,wnPartition :: [ScalarExpr] -- ^ partition by
+      ,wnOrderBy :: [SortSpec] -- ^ order by
+      ,wnFrame :: Maybe Frame -- ^ frame clause
+      }
+
+      -- | Used for the operators which look like functions
+      -- except the arguments are separated by keywords instead
+      -- of commas. The maybe is for the first unnamed argument
+      -- if it is present, and the list is for the keyword argument
+      -- pairs.
+    | SpecialOpK [Name] (Maybe ScalarExpr) [(String,ScalarExpr)]
+
+      -- | cast(a as typename)
+    | Cast ScalarExpr TypeName
+
+      -- | convert expression to given datatype @CONVERT(data_type(length), expression, style)@
+    | Convert TypeName ScalarExpr (Maybe Integer)
+
+      -- | case expression. both flavours supported
+    | Case
+      {caseTest :: Maybe ScalarExpr -- ^ test value
+      ,caseWhens :: [([ScalarExpr],ScalarExpr)] -- ^ when branches
+      ,caseElse :: Maybe ScalarExpr -- ^ else value
+      }
+
+    | Parens ScalarExpr
+
+      -- | in list literal and in subquery, if the bool is false it
+      -- means not in was used ('a not in (1,2)')
+    | In Bool ScalarExpr InPredValue
+
+      -- | exists, all, any, some subqueries
+    | SubQueryExpr SubQueryExprType QueryExpr
+
+    | QuantifiedComparison
+            ScalarExpr
+            [Name] -- operator
+            CompPredQuantifier
+            QueryExpr
+
+    | Match ScalarExpr Bool -- true if unique
+          QueryExpr
+    | Array ScalarExpr [ScalarExpr] -- ^ represents an array
+                                  -- access expression, or an array ctor
+                                  -- e.g. a[3]. The first
+                                  -- scalarExpr is the array, the
+                                  -- second is the subscripts/ctor args
+    | ArrayCtor QueryExpr -- ^ this is used for the query expression version of array constructors, e.g. array(select * from t)
+
+{-
+todo: special syntax for like, similar with escape - escape cannot go
+in other places
+-}
+
+    --  | Escape ScalarExpr Char
+    --  | UEscape ScalarExpr Char
+    | Collate ScalarExpr [Name]
+    | MultisetBinOp ScalarExpr SetOperatorName SetQuantifier ScalarExpr
+    | MultisetCtor [ScalarExpr]
+    | MultisetQueryCtor QueryExpr
+    | NextValueFor [Name]
+    | VEComment [Comment] ScalarExpr
+    | OdbcLiteral OdbcLiteralType String
+      -- ^ an odbc literal e.g. {d '2000-01-01'}
+    | OdbcFunc ScalarExpr
+      -- ^ an odbc function call e.g. {fn CHARACTER_LENGTH('test')}
+      deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Represents an identifier name, which can be quoted or unquoted.
+-- examples:
+--
+-- * test -> Name Nothing "test"
+-- * "test" -> Name (Just "\"","\"") "test"
+-- * `something` -> Name (Just ("`","`") "something"
+-- * [ms] -> Name (Just ("[","]") "ms"
+data Name = Name (Maybe (String,String)) String
+            deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Represents a type name, used in casts.
+data TypeName
+    = TypeName [Name]
+    | PrecTypeName [Name] Integer
+    | PrecScaleTypeName [Name] Integer Integer
+    | PrecLengthTypeName [Name] Integer (Maybe PrecMultiplier) (Maybe PrecUnits)
+      -- precision, characterset, collate
+    | CharTypeName [Name] (Maybe Integer) [Name] [Name]
+    | TimeTypeName [Name] (Maybe Integer) Bool -- true == with time zone
+    | RowTypeName [(Name,TypeName)]
+    | IntervalTypeName IntervalTypeField (Maybe IntervalTypeField)
+    | ArrayTypeName TypeName (Maybe Integer)
+    | MultisetTypeName TypeName
+      deriving (Eq,Show,Read,Data,Typeable)
+
+data IntervalTypeField = Itf String (Maybe (Integer, Maybe Integer))
+                         deriving (Eq,Show,Read,Data,Typeable)
+
+data Sign = Plus | Minus
+            deriving (Eq,Show,Read,Data,Typeable)
+
+data PrecMultiplier = PrecK | PrecM | PrecG | PrecT | PrecP
+                      deriving (Eq,Show,Read,Data,Typeable)
+data PrecUnits = PrecCharacters
+               | PrecOctets
+                deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Used for 'expr in (scalar expression list)', and 'expr in
+-- (subquery)' syntax.
+data InPredValue = InList [ScalarExpr]
+                 | InQueryExpr QueryExpr
+                   deriving (Eq,Show,Read,Data,Typeable)
+
+-- not sure if scalar subquery, exists and unique should be represented like this
+
+-- | A subquery in a scalar expression.
+data SubQueryExprType
+    = -- | exists (query expr)
+      SqExists
+      -- | unique (query expr)
+    | SqUnique
+      -- | a scalar subquery
+    | SqSq
+      deriving (Eq,Show,Read,Data,Typeable)
+
+data CompPredQuantifier
+    = CPAny
+    | CPSome
+    | CPAll
+      deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Represents one field in an order by list.
+data SortSpec = SortSpec ScalarExpr Direction NullsOrder
+                deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Represents 'nulls first' or 'nulls last' in an order by clause.
+data NullsOrder = NullsOrderDefault
+                | NullsFirst
+                | NullsLast
+                  deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Represents the frame clause of a window
+-- this can be [range | rows] frame_start
+-- or [range | rows] between frame_start and frame_end
+data Frame = FrameFrom FrameRows FramePos
+           | FrameBetween FrameRows FramePos FramePos
+             deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Represents whether a window frame clause is over rows or ranges.
+data FrameRows = FrameRows | FrameRange
+                 deriving (Eq,Show,Read,Data,Typeable)
+
+-- | represents the start or end of a frame
+data FramePos = UnboundedPreceding
+              | Preceding ScalarExpr
+              | Current
+              | Following ScalarExpr
+              | UnboundedFollowing
+                deriving (Eq,Show,Read,Data,Typeable)
+
+
+-- | the type of an odbc literal (e.g. {d '2000-01-01'}),
+-- correpsonding to the letter after the opening {
+data OdbcLiteralType = OLDate
+                     | OLTime
+                     | OLTimestamp
+                       deriving (Eq,Show,Read,Data,Typeable)
+
+
+-- | Represents a query expression, which can be:
+--
+-- * a regular select;
+--
+-- * a set operator (union, except, intersect);
+--
+-- * a common table expression (with);
+--
+-- * a table value constructor (values (1,2),(3,4)); or
+--
+-- * an explicit table (table t).
+data QueryExpr
+    = Select
+      {qeSetQuantifier :: SetQuantifier
+      ,qeSelectList :: [(ScalarExpr,Maybe Name)]
+       -- ^ the expressions and the column aliases
+
+{-
+TODO: consider breaking this up. The SQL grammar has
+queryexpr = select <select list> [<table expression>]
+table expression = <from> [where] [groupby] [having] ...
+
+This would make some things a bit cleaner?
+-}
+
+      ,qeFrom :: [TableRef]
+      ,qeWhere :: Maybe ScalarExpr
+      ,qeGroupBy :: [GroupingExpr]
+      ,qeHaving :: Maybe ScalarExpr
+      ,qeOrderBy :: [SortSpec]
+      ,qeOffset :: Maybe ScalarExpr
+      ,qeFetchFirst :: Maybe ScalarExpr
+      }
+    | QueryExprSetOp
+      {qe0 :: QueryExpr
+      ,qeCombOp :: SetOperatorName
+      ,qeSetQuantifier :: SetQuantifier
+      ,qeCorresponding :: Corresponding
+      ,qe1 :: QueryExpr
+      }
+    | With
+      {qeWithRecursive :: Bool
+      ,qeViews :: [(Alias,QueryExpr)]
+      ,qeQueryExpression :: QueryExpr}
+    | Values [[ScalarExpr]]
+    | Table [Name]
+    | QEComment [Comment] QueryExpr
+      deriving (Eq,Show,Read,Data,Typeable)
+
+{-
+TODO: add queryexpr parens to deal with e.g.
+(select 1 union select 2) union select 3
+I'm not sure if this is valid syntax or not.
+-}
+
+-- | Helper/'default' value for query exprs to make creating query
+-- expr values a little easier. It is defined like this:
+--
+-- > makeSelect :: QueryExpr
+-- > makeSelect = Select {qeSetQuantifier = SQDefault
+-- >                     ,qeSelectList = []
+-- >                     ,qeFrom = []
+-- >                     ,qeWhere = Nothing
+-- >                     ,qeGroupBy = []
+-- >                     ,qeHaving = Nothing
+-- >                     ,qeOrderBy = []
+-- >                     ,qeOffset = Nothing
+-- >                     ,qeFetchFirst = Nothing}
+
+makeSelect :: QueryExpr
+makeSelect = Select {qeSetQuantifier = SQDefault
+                    ,qeSelectList = []
+                    ,qeFrom = []
+                    ,qeWhere = Nothing
+                    ,qeGroupBy = []
+                    ,qeHaving = Nothing
+                    ,qeOrderBy = []
+                    ,qeOffset = Nothing
+                    ,qeFetchFirst = Nothing}
+
+-- | Represents the Distinct or All keywords, which can be used
+-- before a select list, in an aggregate/window function
+-- application, or in a query expression set operator.
+data SetQuantifier = SQDefault | Distinct | All deriving (Eq,Show,Read,Data,Typeable)
+
+-- | The direction for a column in order by.
+data Direction = DirDefault | Asc | Desc deriving (Eq,Show,Read,Data,Typeable)
+-- | Query expression set operators.
+data SetOperatorName = Union | Except | Intersect deriving (Eq,Show,Read,Data,Typeable)
+-- | Corresponding, an option for the set operators.
+data Corresponding = Corresponding | Respectively deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Represents an item in a group by clause.
+data GroupingExpr
+    = GroupingParens [GroupingExpr]
+    | Cube [GroupingExpr]
+    | Rollup [GroupingExpr]
+    | GroupingSets [GroupingExpr]
+    | SimpleGroup ScalarExpr
+      deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Represents a entry in the csv of tables in the from clause.
+data TableRef = -- | from t / from s.t
+                TRSimple [Name]
+                -- | from a join b, the bool is true if natural was used
+              | TRJoin TableRef Bool JoinType TableRef (Maybe JoinCondition)
+                -- | from (a)
+              | TRParens TableRef
+                -- | from a as b(c,d)
+              | TRAlias TableRef Alias
+                -- | from (query expr)
+              | TRQueryExpr QueryExpr
+                -- | from function(args)
+              | TRFunction [Name] [ScalarExpr]
+                -- | from lateral t
+              | TRLateral TableRef
+                -- | ODBC {oj t1 left outer join t2 on expr} syntax
+              | TROdbc TableRef
+                deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Represents an alias for a table valued expression, used in with
+-- queries and in from alias, e.g. select a from t u, select a from t u(b),
+-- with a(c) as select 1, select * from a.
+data Alias = Alias Name (Maybe [Name])
+             deriving (Eq,Show,Read,Data,Typeable)
+
+-- | The type of a join.
+data JoinType = JInner | JLeft | JRight | JFull | JCross
+                deriving (Eq,Show,Read,Data,Typeable)
+
+-- | The join condition.
+data JoinCondition = JoinOn ScalarExpr -- ^ on expr
+                   | JoinUsing [Name] -- ^ using (column list)
+                     deriving (Eq,Show,Read,Data,Typeable)
+
+-- ---------------------------
+
+data Statement =
+    -- ddl
+    CreateSchema [Name]
+  | DropSchema [Name] DropBehaviour
+  | CreateTable [Name] [TableElement]
+  | AlterTable [Name] AlterTableAction
+  | DropTable [Name] DropBehaviour
+  | CreateIndex Bool [Name] [Name] [Name]
+  | CreateView Bool [Name] (Maybe [Name])
+        QueryExpr (Maybe CheckOption)
+  | DropView [Name]  DropBehaviour
+  | CreateDomain [Name] TypeName (Maybe ScalarExpr)
+       [(Maybe [Name], ScalarExpr)]
+  | AlterDomain [Name] AlterDomainAction
+  | DropDomain [Name] DropBehaviour
+
+    -- probably won't do character sets, collations
+    -- and translations because I think they are too far from
+    -- reality
+  {-  | CreateCharacterSet
+  | DropCharacterSet
+  | CreateCollation
+  | DropCollation
+  | CreateTranslation
+  | DropTranslation -}
+  | CreateAssertion [Name] ScalarExpr
+  | DropAssertion [Name] DropBehaviour
+  {-   | CreateTrigger
+  | DropTrigger
+  | CreateType
+  | AlterType
+  | DropType
+    -- routine stuff? TODO
+  | CreateCast
+  | DropCast
+  | CreateOrdering
+  | DropOrdering -}
+    -- transforms
+  | CreateSequence [Name] [SequenceGeneratorOption]
+  | AlterSequence [Name] [SequenceGeneratorOption]
+  | DropSequence [Name] DropBehaviour
+    -- dml
+  | SelectStatement QueryExpr
+  {-    | DeclareCursor
+  | OpenCursor
+  | FetchCursor
+  | CloseCursor
+  | SelectInto -}
+  --   | DeletePositioned
+  | Delete [Name] (Maybe Name) (Maybe ScalarExpr)
+  | Truncate [Name] IdentityRestart
+  | Insert [Name] (Maybe [Name]) InsertSource
+  --  | Merge
+  | Update [Name] (Maybe Name) [SetClause] (Maybe ScalarExpr)
+  {-  | TemporaryTable
+  | FreeLocator
+  | HoldLocator  -}
+    -- access control
+  | GrantPrivilege [PrivilegeAction] PrivilegeObject [Name] GrantOption
+  | GrantRole [Name] [Name] AdminOption
+  | CreateRole Name
+  | DropRole Name
+  | RevokePrivilege GrantOptionFor [PrivilegeAction] PrivilegeObject
+            [Name] DropBehaviour
+  | RevokeRole AdminOptionFor [Name] [Name] DropBehaviour
+    -- transaction management
+  | StartTransaction
+  --  | SetTransaction
+  --  | SetContraints
+  | Savepoint Name
+  | ReleaseSavepoint Name
+  | Commit
+  | Rollback (Maybe Name)
+    -- session
+  {-  | SetSessionCharacteristics
+  | SetSessionAuthorization
+  | SetRole
+  | SetTimeZone
+  | SetCatalog
+  | SetSchema
+  | SetNames
+  | SetTransform
+  | SetCollation -}
+  | StatementComment [Comment]
+  | EmptyStatement
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data DropBehaviour =
+    Restrict
+  | Cascade
+  | DefaultDropBehaviour
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data IdentityRestart =
+    ContinueIdentity
+  | RestartIdentity
+  | DefaultIdentityRestart
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data InsertSource =
+    InsertQuery QueryExpr
+  | DefaultInsertValues
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data SetClause =
+    Set [Name] ScalarExpr
+  | SetMultiple [[Name]] [ScalarExpr]
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data TableElement =
+    TableColumnDef ColumnDef
+  | TableConstraintDef (Maybe [Name]) TableConstraint
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data ColumnDef = ColumnDef Name TypeName
+       (Maybe DefaultClause)
+       [ColConstraintDef]
+       -- (Maybe CollateClause)
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data ColConstraintDef =
+    ColConstraintDef (Maybe [Name]) ColConstraint
+      -- (Maybe [ConstraintCharacteristics])
+    deriving (Eq,Show,Read,Data,Typeable)
+
+type AutoincrementClause = Bool
+
+data ColConstraint =
+    ColNullableConstraint
+  | ColNotNullConstraint
+  | ColUniqueConstraint
+  | ColPrimaryKeyConstraint AutoincrementClause
+  | ColReferencesConstraint [Name] (Maybe Name)
+       ReferenceMatch
+       ReferentialAction
+       ReferentialAction
+  | ColCheckConstraint ScalarExpr
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data TableConstraint =
+    TableUniqueConstraint [Name]
+  | TablePrimaryKeyConstraint [Name]
+  | TableReferencesConstraint [Name] [Name] (Maybe [Name])
+       ReferenceMatch
+       ReferentialAction
+       ReferentialAction
+  | TableCheckConstraint ScalarExpr
+    deriving (Eq,Show,Read,Data,Typeable)
+
+
+data ReferenceMatch =
+    DefaultReferenceMatch
+  | MatchFull
+  | MatchPartial
+  | MatchSimple
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data ReferentialAction =
+    DefaultReferentialAction
+  | RefCascade
+  | RefSetNull
+  | RefSetDefault
+  | RefRestrict
+  | RefNoAction
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data AlterTableAction =
+    AddColumnDef ColumnDef
+  | AlterColumnSetDefault Name ScalarExpr
+  | AlterColumnDropDefault Name
+  | AlterColumnSetNotNull Name
+  | AlterColumnDropNotNull Name
+  | AlterColumnSetDataType Name TypeName
+  {-  | AlterColumnAlterIdentity
+  | AlterColumnDropIdentity
+  | AlterColumnDropColumnGeneration-}
+  | DropColumn Name DropBehaviour
+  | AddTableConstraintDef (Maybe [Name]) TableConstraint
+  --  | AlterTableConstraintDef
+  | DropTableConstraintDef [Name] DropBehaviour
+    deriving (Eq,Show,Read,Data,Typeable)
+
+{-data ConstraintCharacteristics =
+    ConstraintCharacteristics
+        ConstraintCheckTime
+        Deferrable
+        ConstraintEnforcement
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data ConstraintCheckTime =
+    DefaultConstraintCheckTime
+  | InitiallyDeferred
+  | InitiallyImmeditate
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data Deferrable =
+    DefaultDefferable
+  | Deferrable
+  | NotDeferrable
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data ConstraintEnforcement =
+    DefaultConstraintEnforcement
+  | Enforced
+  | NotEnforced
+    deriving (Eq,Show,Read,Data,Typeable) -}
+
+{-data TableConstraintDef
+    deriving (Eq,Show,Read,Data,Typeable) -}
+
+data DefaultClause =
+     DefaultClause ScalarExpr
+   | IdentityColumnSpec IdentityWhen [SequenceGeneratorOption]
+   | GenerationClause ScalarExpr
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data IdentityWhen =
+    GeneratedAlways
+  | GeneratedByDefault
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data SequenceGeneratorOption =
+    SGODataType TypeName
+  | SGOStartWith Integer
+  | SGORestart (Maybe Integer)
+  | SGOIncrementBy Integer
+  | SGOMaxValue Integer
+  | SGONoMaxValue
+  | SGOMinValue Integer
+  | SGONoMinValue
+  | SGOCycle
+  | SGONoCycle
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data CheckOption =
+    DefaultCheckOption
+  | CascadedCheckOption
+  | LocalCheckOption
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data AlterDomainAction =
+    ADSetDefault ScalarExpr
+  | ADDropDefault
+  | ADAddConstraint (Maybe [Name]) ScalarExpr
+  | ADDropConstraint [Name]
+    deriving (Eq,Show,Read,Data,Typeable)
+
+
+data AdminOption = WithAdminOption | WithoutAdminOption
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data GrantOption = WithGrantOption | WithoutGrantOption
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data AdminOptionFor = AdminOptionFor | NoAdminOptionFor
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data GrantOptionFor = GrantOptionFor | NoGrantOptionFor
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data PrivilegeObject =
+      PrivTable [Name]
+    | PrivDomain [Name]
+    | PrivType [Name]
+    | PrivSequence [Name]
+    | PrivFunction [Name]
+    deriving (Eq,Show,Read,Data,Typeable)
+
+data PrivilegeAction =
+      PrivAll
+    | PrivSelect [Name]
+    | PrivDelete
+    | PrivInsert [Name]
+    | PrivUpdate [Name]
+    | PrivReferences [Name]
+    | PrivUsage
+    | PrivTrigger
+    | PrivExecute
+    deriving (Eq,Show,Read,Data,Typeable)
+
+-- | Comment. Useful when generating SQL code programmatically. The
+-- parser doesn't produce these.
+newtype Comment = BlockComment String
+               deriving (Eq,Show,Read,Data,Typeable)
+
diff --git a/Language/SQL/SimpleSQL/Syntax.lhs b/Language/SQL/SimpleSQL/Syntax.lhs
deleted file mode 100644
index 6e1753f..0000000
--- a/Language/SQL/SimpleSQL/Syntax.lhs
+++ /dev/null
@@ -1,738 +0,0 @@
-
-> -- | The AST for SQL.
-> {-# LANGUAGE DeriveDataTypeable #-}
-> module Language.SQL.SimpleSQL.Syntax
->     (-- * Scalar expressions
->      ScalarExpr(..)
->     ,Name(..)
->     ,TypeName(..)
->     ,IntervalTypeField(..)
->     ,Sign(..)
->     ,PrecMultiplier(..)
->     ,PrecUnits(..)
->     ,SetQuantifier(..)
->     ,SortSpec(..)
->     ,Direction(..)
->     ,NullsOrder(..)
->     ,InPredValue(..)
->     ,SubQueryExprType(..)
->     ,CompPredQuantifier(..)
->     ,Frame(..)
->     ,FrameRows(..)
->     ,FramePos(..)
->     ,OdbcLiteralType(..)
->      -- * Query expressions
->     ,QueryExpr(..)
->     ,makeSelect
->     ,SetOperatorName(..)
->     ,Corresponding(..)
->     ,Alias(..)
->     ,GroupingExpr(..)
->      -- ** From
->     ,TableRef(..)
->     ,JoinType(..)
->     ,JoinCondition(..)
->      -- * Statements
->     ,Statement(..)
->     ,DropBehaviour(..)
->     ,IdentityRestart(..)
->     ,InsertSource(..)
->     ,SetClause(..)
->     ,TableElement(..)
->     ,ColumnDef(..)
->     ,DefaultClause(..)
->     ,IdentityWhen(..)
->     ,SequenceGeneratorOption(..)
->     ,ColConstraintDef(..)
->     ,AutoincrementClause
->     ,ColConstraint(..)
->     ,TableConstraint(..)
->     ,ReferenceMatch(..)
->     ,ReferentialAction(..)
->     ,AlterTableAction(..)
->     ,CheckOption(..)
->     ,AlterDomainAction(..)
->     ,AdminOption(..)
->     ,GrantOption(..)
->     ,PrivilegeObject(..)
->     ,PrivilegeAction(..)
->     ,AdminOptionFor(..)
->     ,GrantOptionFor(..)
->      -- * Comment
->     ,Comment(..)
->     ) where
-
-> import Data.Data
-
-> -- | Represents a value expression. This is used for the expressions
-> -- in select lists. It is also used for expressions in where, group
-> -- by, having, order by and so on.
-> data ScalarExpr
->     = -- | a numeric literal optional decimal point, e+-
->       -- integral exponent, e.g
->       --
->       -- * 10
->       --
->       -- * 10.
->       --
->       -- * .1
->       --
->       -- * 10.1
->       --
->       -- * 1e5
->       --
->       -- * 12.34e-6
->       NumLit String
->       -- | string literal, with the start and end quote
->       -- e.g. 'test' -> StringLit "'" "'" "test"
->     | StringLit String String String
->       -- | text of interval literal, units of interval precision,
->       -- e.g. interval 3 days (3)
->     | IntervalLit
->       {ilSign :: Maybe Sign -- ^ if + or - used
->       ,ilLiteral :: String -- ^ literal text
->       ,ilFrom :: IntervalTypeField
->       ,ilTo :: Maybe IntervalTypeField
->       }
-
->       -- | prefix 'typed literal', e.g. int '42'
->     | TypedLit TypeName String
-
->       -- | identifier with parts separated by dots
->     | Iden [Name]
->       -- | star, as in select *, t.*, count(*)
->     | Star
-
->     | Parameter -- ^ Represents a ? in a parameterized query
->     | PositionalArg Int -- ^ Represents an e.g. $1 in a parameterized query
->     | HostParameter String (Maybe String) -- ^ represents a host
->                                           -- parameter, e.g. :a. The
->                                           -- Maybe String is for the
->                                           -- indicator, e.g. :var
->                                           -- indicator :nl
-
-
->       -- | Infix binary operators. This is used for symbol operators
->       -- (a + b), keyword operators (a and b) and multiple keyword
->       -- operators (a is similar to b)
->     | BinOp ScalarExpr [Name] ScalarExpr
->       -- | Prefix unary operators. This is used for symbol
->       -- operators, keyword operators and multiple keyword operators.
->     | PrefixOp [Name] ScalarExpr
->       -- | Postfix unary operators. This is used for symbol
->       -- operators, keyword operators and multiple keyword operators.
->     | PostfixOp [Name] ScalarExpr
->       -- | Used for ternary, mixfix and other non orthodox
->       -- operators. Currently used for row constructors, and for
->       -- between.
->     | SpecialOp [Name] [ScalarExpr]
-
->       -- | function application (anything that looks like c style
->       -- function application syntactically)
->     | App [Name] [ScalarExpr]
-
-
->       -- | aggregate application, which adds distinct or all, and
->       -- order by, to regular function application
->     | AggregateApp
->       {aggName :: [Name] -- ^ aggregate function name
->       ,aggDistinct :: SetQuantifier -- ^ distinct
->       ,aggArgs :: [ScalarExpr]-- ^ args
->       ,aggOrderBy :: [SortSpec] -- ^ order by
->       ,aggFilter :: Maybe ScalarExpr -- ^ filter
->       }
->       -- | aggregates with within group
->     | AggregateAppGroup
->       {aggName :: [Name] -- ^ aggregate function name
->       ,aggArgs :: [ScalarExpr] -- ^ args
->       ,aggGroup :: [SortSpec] -- ^ within group
->       }
->       -- | window application, which adds over (partition by a order
->       -- by b) to regular function application. Explicit frames are
->       -- not currently supported
->     | WindowApp
->       {wnName :: [Name] -- ^ window function name
->       ,wnArgs :: [ScalarExpr] -- ^ args
->       ,wnPartition :: [ScalarExpr] -- ^ partition by
->       ,wnOrderBy :: [SortSpec] -- ^ order by
->       ,wnFrame :: Maybe Frame -- ^ frame clause
->       }
-
->       -- | Used for the operators which look like functions
->       -- except the arguments are separated by keywords instead
->       -- of commas. The maybe is for the first unnamed argument
->       -- if it is present, and the list is for the keyword argument
->       -- pairs.
->     | SpecialOpK [Name] (Maybe ScalarExpr) [(String,ScalarExpr)]
-
->       -- | cast(a as typename)
->     | Cast ScalarExpr TypeName
-
->       -- | convert expression to given datatype @CONVERT(data_type(length), expression, style)@
->     | Convert TypeName ScalarExpr (Maybe Integer)
-
->       -- | case expression. both flavours supported
->     | Case
->       {caseTest :: Maybe ScalarExpr -- ^ test value
->       ,caseWhens :: [([ScalarExpr],ScalarExpr)] -- ^ when branches
->       ,caseElse :: Maybe ScalarExpr -- ^ else value
->       }
-
->     | Parens ScalarExpr
-
->       -- | in list literal and in subquery, if the bool is false it
->       -- means not in was used ('a not in (1,2)')
->     | In Bool ScalarExpr InPredValue
-
->       -- | exists, all, any, some subqueries
->     | SubQueryExpr SubQueryExprType QueryExpr
-
->     | QuantifiedComparison
->             ScalarExpr
->             [Name] -- operator
->             CompPredQuantifier
->             QueryExpr
-
->     | Match ScalarExpr Bool -- true if unique
->           QueryExpr
->     | Array ScalarExpr [ScalarExpr] -- ^ represents an array
->                                   -- access expression, or an array ctor
->                                   -- e.g. a[3]. The first
->                                   -- scalarExpr is the array, the
->                                   -- second is the subscripts/ctor args
->     | ArrayCtor QueryExpr -- ^ this is used for the query expression version of array constructors, e.g. array(select * from t)
-
-todo: special syntax for like, similar with escape - escape cannot go
-in other places
-
->     --  | Escape ScalarExpr Char
->     --  | UEscape ScalarExpr Char
->     | Collate ScalarExpr [Name]
->     | MultisetBinOp ScalarExpr SetOperatorName SetQuantifier ScalarExpr
->     | MultisetCtor [ScalarExpr]
->     | MultisetQueryCtor QueryExpr
->     | NextValueFor [Name]
->     | VEComment [Comment] ScalarExpr
->     | OdbcLiteral OdbcLiteralType String
->       -- ^ an odbc literal e.g. {d '2000-01-01'}
->     | OdbcFunc ScalarExpr
->       -- ^ an odbc function call e.g. {fn CHARACTER_LENGTH('test')}
->       deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Represents an identifier name, which can be quoted or unquoted.
-> -- examples:
-> --
-> -- * test -> Name Nothing "test"
-> -- * "test" -> Name (Just "\"","\"") "test"
-> -- * `something` -> Name (Just ("`","`") "something"
-> -- * [ms] -> Name (Just ("[","]") "ms"
-> data Name = Name (Maybe (String,String)) String
->             deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Represents a type name, used in casts.
-> data TypeName
->     = TypeName [Name]
->     | PrecTypeName [Name] Integer
->     | PrecScaleTypeName [Name] Integer Integer
->     | PrecLengthTypeName [Name] Integer (Maybe PrecMultiplier) (Maybe PrecUnits)
->       -- precision, characterset, collate
->     | CharTypeName [Name] (Maybe Integer) [Name] [Name]
->     | TimeTypeName [Name] (Maybe Integer) Bool -- true == with time zone
->     | RowTypeName [(Name,TypeName)]
->     | IntervalTypeName IntervalTypeField (Maybe IntervalTypeField)
->     | ArrayTypeName TypeName (Maybe Integer)
->     | MultisetTypeName TypeName
->       deriving (Eq,Show,Read,Data,Typeable)
-
-> data IntervalTypeField = Itf String (Maybe (Integer, Maybe Integer))
->                          deriving (Eq,Show,Read,Data,Typeable)
-
-> data Sign = Plus | Minus
->             deriving (Eq,Show,Read,Data,Typeable)
-
-> data PrecMultiplier = PrecK | PrecM | PrecG | PrecT | PrecP
->                       deriving (Eq,Show,Read,Data,Typeable)
-> data PrecUnits = PrecCharacters
->                | PrecOctets
->                 deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Used for 'expr in (scalar expression list)', and 'expr in
-> -- (subquery)' syntax.
-> data InPredValue = InList [ScalarExpr]
->                  | InQueryExpr QueryExpr
->                    deriving (Eq,Show,Read,Data,Typeable)
-
-not sure if scalar subquery, exists and unique should be represented like this
-
-> -- | A subquery in a scalar expression.
-> data SubQueryExprType
->     = -- | exists (query expr)
->       SqExists
->       -- | unique (query expr)
->     | SqUnique
->       -- | a scalar subquery
->     | SqSq
->       deriving (Eq,Show,Read,Data,Typeable)
-
-> data CompPredQuantifier
->     = CPAny
->     | CPSome
->     | CPAll
->       deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Represents one field in an order by list.
-> data SortSpec = SortSpec ScalarExpr Direction NullsOrder
->                 deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Represents 'nulls first' or 'nulls last' in an order by clause.
-> data NullsOrder = NullsOrderDefault
->                 | NullsFirst
->                 | NullsLast
->                   deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Represents the frame clause of a window
-> -- this can be [range | rows] frame_start
-> -- or [range | rows] between frame_start and frame_end
-> data Frame = FrameFrom FrameRows FramePos
->            | FrameBetween FrameRows FramePos FramePos
->              deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Represents whether a window frame clause is over rows or ranges.
-> data FrameRows = FrameRows | FrameRange
->                  deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | represents the start or end of a frame
-> data FramePos = UnboundedPreceding
->               | Preceding ScalarExpr
->               | Current
->               | Following ScalarExpr
->               | UnboundedFollowing
->                 deriving (Eq,Show,Read,Data,Typeable)
-
-
-> -- | the type of an odbc literal (e.g. {d '2000-01-01'}),
-> -- correpsonding to the letter after the opening {
-> data OdbcLiteralType = OLDate
->                      | OLTime
->                      | OLTimestamp
->                        deriving (Eq,Show,Read,Data,Typeable)
-
-
-> -- | Represents a query expression, which can be:
-> --
-> -- * a regular select;
-> --
-> -- * a set operator (union, except, intersect);
-> --
-> -- * a common table expression (with);
-> --
-> -- * a table value constructor (values (1,2),(3,4)); or
-> --
-> -- * an explicit table (table t).
-> data QueryExpr
->     = Select
->       {qeSetQuantifier :: SetQuantifier
->       ,qeSelectList :: [(ScalarExpr,Maybe Name)]
->        -- ^ the expressions and the column aliases
-
-TODO: consider breaking this up. The SQL grammar has
-queryexpr = select <select list> [<table expression>]
-table expression = <from> [where] [groupby] [having] ...
-
-This would make some things a bit cleaner?
-
->       ,qeFrom :: [TableRef]
->       ,qeWhere :: Maybe ScalarExpr
->       ,qeGroupBy :: [GroupingExpr]
->       ,qeHaving :: Maybe ScalarExpr
->       ,qeOrderBy :: [SortSpec]
->       ,qeOffset :: Maybe ScalarExpr
->       ,qeFetchFirst :: Maybe ScalarExpr
->       }
->     | QueryExprSetOp
->       {qe0 :: QueryExpr
->       ,qeCombOp :: SetOperatorName
->       ,qeSetQuantifier :: SetQuantifier
->       ,qeCorresponding :: Corresponding
->       ,qe1 :: QueryExpr
->       }
->     | With
->       {qeWithRecursive :: Bool
->       ,qeViews :: [(Alias,QueryExpr)]
->       ,qeQueryExpression :: QueryExpr}
->     | Values [[ScalarExpr]]
->     | Table [Name]
->     | QEComment [Comment] QueryExpr
->       deriving (Eq,Show,Read,Data,Typeable)
-
-TODO: add queryexpr parens to deal with e.g.
-(select 1 union select 2) union select 3
-I'm not sure if this is valid syntax or not.
-
-> -- | Helper/'default' value for query exprs to make creating query
-> -- expr values a little easier. It is defined like this:
-> --
-> -- > makeSelect :: QueryExpr
-> -- > makeSelect = Select {qeSetQuantifier = SQDefault
-> -- >                     ,qeSelectList = []
-> -- >                     ,qeFrom = []
-> -- >                     ,qeWhere = Nothing
-> -- >                     ,qeGroupBy = []
-> -- >                     ,qeHaving = Nothing
-> -- >                     ,qeOrderBy = []
-> -- >                     ,qeOffset = Nothing
-> -- >                     ,qeFetchFirst = Nothing}
-
-> makeSelect :: QueryExpr
-> makeSelect = Select {qeSetQuantifier = SQDefault
->                     ,qeSelectList = []
->                     ,qeFrom = []
->                     ,qeWhere = Nothing
->                     ,qeGroupBy = []
->                     ,qeHaving = Nothing
->                     ,qeOrderBy = []
->                     ,qeOffset = Nothing
->                     ,qeFetchFirst = Nothing}
-
-> -- | Represents the Distinct or All keywords, which can be used
-> -- before a select list, in an aggregate/window function
-> -- application, or in a query expression set operator.
-> data SetQuantifier = SQDefault | Distinct | All deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | The direction for a column in order by.
-> data Direction = DirDefault | Asc | Desc deriving (Eq,Show,Read,Data,Typeable)
-> -- | Query expression set operators.
-> data SetOperatorName = Union | Except | Intersect deriving (Eq,Show,Read,Data,Typeable)
-> -- | Corresponding, an option for the set operators.
-> data Corresponding = Corresponding | Respectively deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Represents an item in a group by clause.
-> data GroupingExpr
->     = GroupingParens [GroupingExpr]
->     | Cube [GroupingExpr]
->     | Rollup [GroupingExpr]
->     | GroupingSets [GroupingExpr]
->     | SimpleGroup ScalarExpr
->       deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Represents a entry in the csv of tables in the from clause.
-> data TableRef = -- | from t / from s.t
->                 TRSimple [Name]
->                 -- | from a join b, the bool is true if natural was used
->               | TRJoin TableRef Bool JoinType TableRef (Maybe JoinCondition)
->                 -- | from (a)
->               | TRParens TableRef
->                 -- | from a as b(c,d)
->               | TRAlias TableRef Alias
->                 -- | from (query expr)
->               | TRQueryExpr QueryExpr
->                 -- | from function(args)
->               | TRFunction [Name] [ScalarExpr]
->                 -- | from lateral t
->               | TRLateral TableRef
->                 -- | ODBC {oj t1 left outer join t2 on expr} syntax
->               | TROdbc TableRef
->                 deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Represents an alias for a table valued expression, used in with
-> -- queries and in from alias, e.g. select a from t u, select a from t u(b),
-> -- with a(c) as select 1, select * from a.
-> data Alias = Alias Name (Maybe [Name])
->              deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | The type of a join.
-> data JoinType = JInner | JLeft | JRight | JFull | JCross
->                 deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | The join condition.
-> data JoinCondition = JoinOn ScalarExpr -- ^ on expr
->                    | JoinUsing [Name] -- ^ using (column list)
->                      deriving (Eq,Show,Read,Data,Typeable)
-
----------------------------
-
-> data Statement =
->     -- ddl
->     CreateSchema [Name]
->   | DropSchema [Name] DropBehaviour
->   | CreateTable [Name] [TableElement]
->   | AlterTable [Name] AlterTableAction
->   | DropTable [Name] DropBehaviour
->   | CreateIndex Bool [Name] [Name] [Name]
->   | CreateView Bool [Name] (Maybe [Name])
->         QueryExpr (Maybe CheckOption)
->   | DropView [Name]  DropBehaviour
->   | CreateDomain [Name] TypeName (Maybe ScalarExpr)
->        [(Maybe [Name], ScalarExpr)]
->   | AlterDomain [Name] AlterDomainAction
->   | DropDomain [Name] DropBehaviour
-
->     -- probably won't do character sets, collations
->     -- and translations because I think they are too far from
->     -- reality
->   {-  | CreateCharacterSet
->   | DropCharacterSet
->   | CreateCollation
->   | DropCollation
->   | CreateTranslation
->   | DropTranslation -}
->   | CreateAssertion [Name] ScalarExpr
->   | DropAssertion [Name] DropBehaviour
->   {-   | CreateTrigger
->   | DropTrigger
->   | CreateType
->   | AlterType
->   | DropType
->     -- routine stuff? TODO
->   | CreateCast
->   | DropCast
->   | CreateOrdering
->   | DropOrdering -}
->     -- transforms
->   | CreateSequence [Name] [SequenceGeneratorOption]
->   | AlterSequence [Name] [SequenceGeneratorOption]
->   | DropSequence [Name] DropBehaviour
->     -- dml
->   | SelectStatement QueryExpr
->   {-    | DeclareCursor
->   | OpenCursor
->   | FetchCursor
->   | CloseCursor
->   | SelectInto -}
->   --   | DeletePositioned
->   | Delete [Name] (Maybe Name) (Maybe ScalarExpr)
->   | Truncate [Name] IdentityRestart
->   | Insert [Name] (Maybe [Name]) InsertSource
->   --  | Merge
->   | Update [Name] (Maybe Name) [SetClause] (Maybe ScalarExpr)
->   {-  | TemporaryTable
->   | FreeLocator
->   | HoldLocator  -}
->     -- access control
->   | GrantPrivilege [PrivilegeAction] PrivilegeObject [Name] GrantOption
->   | GrantRole [Name] [Name] AdminOption
->   | CreateRole Name
->   | DropRole Name
->   | RevokePrivilege GrantOptionFor [PrivilegeAction] PrivilegeObject
->             [Name] DropBehaviour
->   | RevokeRole AdminOptionFor [Name] [Name] DropBehaviour
->     -- transaction management
->   | StartTransaction
->   --  | SetTransaction
->   --  | SetContraints
->   | Savepoint Name
->   | ReleaseSavepoint Name
->   | Commit
->   | Rollback (Maybe Name)
->     -- session
->   {-  | SetSessionCharacteristics
->   | SetSessionAuthorization
->   | SetRole
->   | SetTimeZone
->   | SetCatalog
->   | SetSchema
->   | SetNames
->   | SetTransform
->   | SetCollation -}
->   | StatementComment [Comment]
->   | EmptyStatement
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data DropBehaviour =
->     Restrict
->   | Cascade
->   | DefaultDropBehaviour
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data IdentityRestart =
->     ContinueIdentity
->   | RestartIdentity
->   | DefaultIdentityRestart
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data InsertSource =
->     InsertQuery QueryExpr
->   | DefaultInsertValues
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data SetClause =
->     Set [Name] ScalarExpr
->   | SetMultiple [[Name]] [ScalarExpr]
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data TableElement =
->     TableColumnDef ColumnDef
->   | TableConstraintDef (Maybe [Name]) TableConstraint
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data ColumnDef = ColumnDef Name TypeName
->        (Maybe DefaultClause)
->        [ColConstraintDef]
->        -- (Maybe CollateClause)
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data ColConstraintDef =
->     ColConstraintDef (Maybe [Name]) ColConstraint
->       -- (Maybe [ConstraintCharacteristics])
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> type AutoincrementClause = Bool
->
-> data ColConstraint =
->     ColNullableConstraint
->   | ColNotNullConstraint
->   | ColUniqueConstraint
->   | ColPrimaryKeyConstraint AutoincrementClause
->   | ColReferencesConstraint [Name] (Maybe Name)
->        ReferenceMatch
->        ReferentialAction
->        ReferentialAction
->   | ColCheckConstraint ScalarExpr
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data TableConstraint =
->     TableUniqueConstraint [Name]
->   | TablePrimaryKeyConstraint [Name]
->   | TableReferencesConstraint [Name] [Name] (Maybe [Name])
->        ReferenceMatch
->        ReferentialAction
->        ReferentialAction
->   | TableCheckConstraint ScalarExpr
->     deriving (Eq,Show,Read,Data,Typeable)
-
-
-> data ReferenceMatch =
->     DefaultReferenceMatch
->   | MatchFull
->   | MatchPartial
->   | MatchSimple
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data ReferentialAction =
->     DefaultReferentialAction
->   | RefCascade
->   | RefSetNull
->   | RefSetDefault
->   | RefRestrict
->   | RefNoAction
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data AlterTableAction =
->     AddColumnDef ColumnDef
->   | AlterColumnSetDefault Name ScalarExpr
->   | AlterColumnDropDefault Name
->   | AlterColumnSetNotNull Name
->   | AlterColumnDropNotNull Name
->   | AlterColumnSetDataType Name TypeName
->   {-  | AlterColumnAlterIdentity
->   | AlterColumnDropIdentity
->   | AlterColumnDropColumnGeneration-}
->   | DropColumn Name DropBehaviour
->   | AddTableConstraintDef (Maybe [Name]) TableConstraint
->   --  | AlterTableConstraintDef
->   | DropTableConstraintDef [Name] DropBehaviour
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> {-data ConstraintCharacteristics =
->     ConstraintCharacteristics
->         ConstraintCheckTime
->         Deferrable
->         ConstraintEnforcement
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data ConstraintCheckTime =
->     DefaultConstraintCheckTime
->   | InitiallyDeferred
->   | InitiallyImmeditate
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data Deferrable =
->     DefaultDefferable
->   | Deferrable
->   | NotDeferrable
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data ConstraintEnforcement =
->     DefaultConstraintEnforcement
->   | Enforced
->   | NotEnforced
->     deriving (Eq,Show,Read,Data,Typeable) -}
-
-> {-data TableConstraintDef
->     deriving (Eq,Show,Read,Data,Typeable) -}
-
-> data DefaultClause =
->      DefaultClause ScalarExpr
->    | IdentityColumnSpec IdentityWhen [SequenceGeneratorOption]
->    | GenerationClause ScalarExpr
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data IdentityWhen =
->     GeneratedAlways
->   | GeneratedByDefault
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data SequenceGeneratorOption =
->     SGODataType TypeName
->   | SGOStartWith Integer
->   | SGORestart (Maybe Integer)
->   | SGOIncrementBy Integer
->   | SGOMaxValue Integer
->   | SGONoMaxValue
->   | SGOMinValue Integer
->   | SGONoMinValue
->   | SGOCycle
->   | SGONoCycle
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data CheckOption =
->     DefaultCheckOption
->   | CascadedCheckOption
->   | LocalCheckOption
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data AlterDomainAction =
->     ADSetDefault ScalarExpr
->   | ADDropDefault
->   | ADAddConstraint (Maybe [Name]) ScalarExpr
->   | ADDropConstraint [Name]
->     deriving (Eq,Show,Read,Data,Typeable)
-
-
-> data AdminOption = WithAdminOption | WithoutAdminOption
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data GrantOption = WithGrantOption | WithoutGrantOption
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data AdminOptionFor = AdminOptionFor | NoAdminOptionFor
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data GrantOptionFor = GrantOptionFor | NoGrantOptionFor
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data PrivilegeObject =
->       PrivTable [Name]
->     | PrivDomain [Name]
->     | PrivType [Name]
->     | PrivSequence [Name]
->     | PrivFunction [Name]
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> data PrivilegeAction =
->       PrivAll
->     | PrivSelect [Name]
->     | PrivDelete
->     | PrivInsert [Name]
->     | PrivUpdate [Name]
->     | PrivReferences [Name]
->     | PrivUsage
->     | PrivTrigger
->     | PrivExecute
->     deriving (Eq,Show,Read,Data,Typeable)
-
-> -- | Comment. Useful when generating SQL code programmatically. The
-> -- parser doesn't produce these.
-> newtype Comment = BlockComment String
->                deriving (Eq,Show,Read,Data,Typeable)
-
diff --git a/Makefile b/Makefile
index 9a1aa2a..83f3e84 100644
--- a/Makefile
+++ b/Makefile
@@ -50,14 +50,14 @@ build/ocean.css : website/ocean.css
 	mkdir -p build
 	cp website/ocean.css build
 
-build/index.html : website/index.asciidoc website/AddLinks.lhs
-	asciidoctor website/index.asciidoc -o - | cabal -v0 exec runhaskell website/AddLinks.lhs > build/index.html
+build/index.html : website/index.asciidoc website/AddLinks.hs
+	asciidoctor website/index.asciidoc -o - | cabal -v0 exec runhaskell website/AddLinks.hs > build/index.html
 
-build/supported_sql.html : website/supported_sql.asciidoc website/AddLinks.lhs
-	asciidoctor website/supported_sql.asciidoc -o - | cabal -v0 exec runhaskell website/AddLinks.lhs > build/supported_sql.html
+build/supported_sql.html : website/supported_sql.asciidoc website/AddLinks.hs
+	asciidoctor website/supported_sql.asciidoc -o - | cabal -v0 exec runhaskell website/AddLinks.hs > build/supported_sql.html
 
-build/test_cases.html : website/RenderTestCases.lhs
-	cabal -v0 exec runhaskell -- --ghc-arg=-package=pretty-show -itools website/RenderTestCases.lhs > build/test_cases.asciidoc
+build/test_cases.html : website/RenderTestCases.hs
+	cabal -v0 exec runhaskell -- --ghc-arg=-package=pretty-show -itools website/RenderTestCases.hs > build/test_cases.asciidoc
 	asciidoctor build/test_cases.asciidoc -o - | \
 	    sed -e "s/max-width:62\.5em//g" > build/test_cases.html
 	# TODO: reduce the text size on the test cases page
diff --git a/TODO b/TODO
index 9ac2e97..e085eb5 100644
--- a/TODO
+++ b/TODO
@@ -156,7 +156,7 @@ reconsider the names and structure of the constructors in the syntax
 refactor the typename parser - it's a real mess
 fix the lexing
 
-add documentation in Parser.lhs on the left factoring/error handling
+add documentation in Parser.hs on the left factoring/error handling
    approach
 
 fixes:
diff --git a/simple-sql-parser.cabal b/simple-sql-parser.cabal
index 21c44e5..dd2851a 100644
--- a/simple-sql-parser.cabal
+++ b/simple-sql-parser.cabal
@@ -59,7 +59,7 @@ library
 Test-Suite Tests
   import:              shared-properties
   type:                exitcode-stdio-1.0
-  main-is:             RunTests.lhs
+  main-is:             RunTests.hs
   hs-source-dirs:      tools
   Build-Depends:       simple-sql-parser,
                        tasty >= 1.1 && < 1.6,
@@ -93,7 +93,7 @@ Test-Suite Tests
 
 executable SimpleSqlParserTool
   import:              shared-properties
-  main-is:             SimpleSqlParserTool.lhs
+  main-is:             SimpleSqlParserTool.hs
   hs-source-dirs:      tools
   Build-Depends:       simple-sql-parser,
                        pretty-show >= 1.6 && < 1.10
@@ -104,7 +104,7 @@ executable SimpleSqlParserTool
 
 executable Fixity
   import:              shared-properties
-  main-is:             Fixity.lhs
+  main-is:             Fixity.hs
   hs-source-dirs:      tools
   Build-Depends:       simple-sql-parser,
                        pretty-show >= 1.6 && < 1.10,
diff --git a/tools/Filter.hs b/tools/Filter.hs
new file mode 100644
index 0000000..6ddc96d
--- /dev/null
+++ b/tools/Filter.hs
@@ -0,0 +1,35 @@
+
+import System.IO
+import System.Environment
+
+
+main :: IO ()
+main = do
+    [a] <- getArgs
+    r <- readFile a
+    let ls = lines r
+        a = noAdjacentBlankLines ls
+        b = concat $ combineGroups $ group [] a
+    putStrLn $ unlines b
+
+noAdjacentBlankLines [] = []
+noAdjacentBlankLines [a] = [a]
+noAdjacentBlankLines ("":xs@("":_)) = noAdjacentBlankLines xs
+noAdjacentBlankLines (x:xs) = x:noAdjacentBlankLines xs
+
+group :: [String] -> [String] -> [[String]]
+group acc [] = [acc]
+group acc ("":xs) = reverse ("":acc) : group [] xs
+group acc (x:xs) = group (x : acc) xs
+
+combineGroups :: [[String]] -> [[String]]
+combineGroups [] = []
+combineGroups (x@(('<':_):_):xs) | gs <- map trim x
+                                 , ns <- trim $ unwords gs
+                                 , length ns < 80 = [ns ++ "\n"] : combineGroups xs
+combineGroups (x:xs) = x:combineGroups xs
+
+trim :: String -> String
+trim = x . x
+   where
+     x = dropWhile (==' ') . reverse
diff --git a/tools/Filter.lhs b/tools/Filter.lhs
deleted file mode 100644
index a179c03..0000000
--- a/tools/Filter.lhs
+++ /dev/null
@@ -1,35 +0,0 @@
-
-> import System.IO
-> import System.Environment
-
-
-> main :: IO ()
-> main = do
->     [a] <- getArgs
->     r <- readFile a
->     let ls = lines r
->         a = noAdjacentBlankLines ls
->         b = concat $ combineGroups $ group [] a
->     putStrLn $ unlines b
-
-> noAdjacentBlankLines [] = []
-> noAdjacentBlankLines [a] = [a]
-> noAdjacentBlankLines ("":xs@("":_)) = noAdjacentBlankLines xs
-> noAdjacentBlankLines (x:xs) = x:noAdjacentBlankLines xs
-
-> group :: [String] -> [String] -> [[String]]
-> group acc [] = [acc]
-> group acc ("":xs) = reverse ("":acc) : group [] xs
-> group acc (x:xs) = group (x : acc) xs
-
-> combineGroups :: [[String]] -> [[String]]
-> combineGroups [] = []
-> combineGroups (x@(('<':_):_):xs) | gs <- map trim x
->                                  , ns <- trim $ unwords gs
->                                  , length ns < 80 = [ns ++ "\n"] : combineGroups xs
-> combineGroups (x:xs) = x:combineGroups xs
-
-> trim :: String -> String
-> trim = x . x
->    where
->      x = dropWhile (==' ') . reverse
diff --git a/tools/FilterSpaces.hs b/tools/FilterSpaces.hs
new file mode 100644
index 0000000..db1d0ca
--- /dev/null
+++ b/tools/FilterSpaces.hs
@@ -0,0 +1,24 @@
+
+--import System.IO
+import System.Environment
+
+main :: IO ()
+main = do
+    [a] <- getArgs
+    r <- readFile a
+    let ls = lines r
+    putStrLn $ unlines $ map dedupeSpaces ls
+
+
+dedupeSpaces :: String -> String
+dedupeSpaces [] = []
+-- don't start until after the leading spaces
+-- including literate haskell source lines
+dedupeSpaces xs@(x:_) | x `notElem` " >" = dedupeSpaces' xs
+dedupeSpaces (x:xs) = x : dedupeSpaces xs
+
+dedupeSpaces' :: String -> String
+dedupeSpaces' (' ':xs@(' ':_)) = dedupeSpaces' xs
+dedupeSpaces' (x:xs) = x : dedupeSpaces' xs
+dedupeSpaces' [] = []
+
diff --git a/tools/FilterSpaces.lhs b/tools/FilterSpaces.lhs
deleted file mode 100644
index d43b113..0000000
--- a/tools/FilterSpaces.lhs
+++ /dev/null
@@ -1,24 +0,0 @@
-
-> --import System.IO
-> import System.Environment
-
-> main :: IO ()
-> main = do
->     [a] <- getArgs
->     r <- readFile a
->     let ls = lines r
->     putStrLn $ unlines $ map dedupeSpaces ls
-
-
-> dedupeSpaces :: String -> String
-> dedupeSpaces [] = []
-> -- don't start until after the leading spaces
-> -- including literate haskell source lines
-> dedupeSpaces xs@(x:_) | x `notElem` " >" = dedupeSpaces' xs
-> dedupeSpaces (x:xs) = x : dedupeSpaces xs
-
-> dedupeSpaces' :: String -> String
-> dedupeSpaces' (' ':xs@(' ':_)) = dedupeSpaces' xs
-> dedupeSpaces' (x:xs) = x : dedupeSpaces' xs
-> dedupeSpaces' [] = []
-
diff --git a/tools/Fixity.hs b/tools/Fixity.hs
new file mode 100644
index 0000000..e1a0d53
--- /dev/null
+++ b/tools/Fixity.hs
@@ -0,0 +1,720 @@
+
+{-
+= Fixity fixups
+
+The point of this code is to be able to take a table of fixity
+information for unary and binary operators, then adjust an ast to
+match these fixities. The standard way of handling this is handling
+fixities at the parsing stage.
+
+For the SQL parser, this is difficult because there is lots of weird
+syntax for operators (such as prefix and postfix multiple keyword
+operators, between, etc.).
+
+An alterative idea which is used in some places is to parse the tree
+regarding all the operators to have the same precedence and left
+associativity, then correct the fixity in a pass over the ast after
+parsing. Would also like to use this to fix the fixity for the join
+trees, and set operations, after parsing them. TODO: anything else?
+
+
+Approach
+
+Really not sure how to get this correct. So: lots of testing
+
+Basic testing idea: create an expression, then write down manually how
+the expression should parse with correct fixity. Can write down the
+expression in concrete syntax, and the correct fixity version using
+parens.
+
+Then can parse the expression, fix it, parse the fixed expression,
+remove the parens and compare them to make sure they are equal.
+
+Second layer of testing. For each source expression parsed, run it
+through a generator which will generate every version of that tree by
+choosing all possibilities of fixities on a token by token basis. This
+will ensure the fixity fixer is robust. An alternative approach is to
+guarantee the parser will produce trees where all the fixities are
+known (e.g. unary operators always bind tighter than binary, binary
+are all left associative, prefix unary bind tighter than postfix. This
+way, the fix code can make some assumptions and have less code. We
+will stick with the full general version which is more robust.
+
+Another testing approach is to parse the tree with our non fixity
+respecting parser then fix it, and also parse it with a fixity
+respecting expression parser, and check the results are the same. This
+is difficult with the parsec build expression parser which doesn't
+handle nested unary operators, so have to find or write another build
+expression parser. We can test the fixer with simple operators (single
+symbol prefix, postfix and binary ops) and then use it on the complex
+sql ast trees.
+
+Can also try to generate trees ala quickcheck/smallcheck, then check
+them with the fixer and the build expression parser.
+
+generate a tree:
+
+start with a term
+then roll dice:
+  add a prefix
+  add a postfix
+  do nothing
+then roll dice
+  add a binary op
+  for the second arg, recurse the algo
+
+
+algorithm:
+
+consider possible cases:
+binop with two binops args
+binop with prefix on left
+binop with postfix on right
+postfix with prefix inside
+prefix with postfix inside
+postfix with binop inside
+prefix with binop inside
+
+write a function to deal with each case and try to compose
+
+Tasks:
+
+write unary op tests: on each other, and with binary ops
+figure out how to generate trees
+do the step one tests (write the fixity with parens)
+check out parsers expression parser
+see if can generate trees using smallcheck
+try to test these trees against expression parser
+  otherwise, generate tree, generate variations, check fixity always
+produces same result
+
+
+
+
+todo:
+
+1. more tests for unary operators with each other
+2. moving unary operators inside and outside binary operators:
+   have to think about how this will work in general case
+3. ways to generate lots of tests and check them
+   -> what about creating a parser which parses to a list of all possible
+      parses with different fixities for each operator it sees?
+4. ambiguous fixity cases - need position annotation to do these nicely
+5. real sql: how to work with a variety of ast nodes
+6. plug into simple-sql-parser
+7. refactor the simple-sql-parser parsing code
+8. simple-sql-parser todo for sqream: add other dml, dialects,
+   procedural?
+9. testing idea: write big expressions with explicit parens everywhere
+   parse this
+   remove the parens
+   pretty print, then parse and fixfixity to see if same
+   then generate all variations of tree as if the fixities are different
+     and then fixfixity to check it restores the original
+
+
+write fixity tests
+write code to do the fixing
+add error cases: put it in the either monad to report these
+
+check the descend
+then: move to real sql
+  different abstract representations of binops, etc.
+  what is the best way to deal with this? typeclass? conversion to and
+  from a generic tree?
+
+
+
+
+
+can the binops be fixed on their own (precedence and assocativity)
+and then the prefix and postfix ops in separate passes
+
+what about a pass which puts the tree into canonical form:
+all left associative, all unary ops tight as possible?
+then the fixer can be easier?
+-}
+
+
+
+
+
+{-# LANGUAGE DeriveDataTypeable,TupleSections #-}
+import Data.Data
+
+import Text.Parsec.String (Parser)
+import Text.Parsec (try)
+import Text.Parsec.Char
+import Text.Parsec.Combinator
+import Text.Parsec (parse,ParseError)
+import Control.Applicative ((<|>),many) -- ((<**>),(<$>),(<*), (*>),(<*>), (<$), (<|>), many)
+--import qualified Text.Parsec.String.Expr as E
+import Control.Monad
+--import Data.List (intercalate)
+import Data.Maybe ()
+--import qualified Test.HUnit as H
+--import FunctionsAndTypesForParsing
+import Debug.Trace
+import Text.Show.Pretty
+import Data.List
+import Control.Applicative
+
+import qualified Test.Tasty as T
+import qualified Test.Tasty.HUnit as H
+
+
+data Expr = BinOp Expr String Expr
+          | PrefOp String Expr
+          | PostOp String Expr
+          | Iden String
+          | Lit String
+          | App String [Expr]
+          | Parens Expr
+            deriving (Eq,Show,Data,Typeable)
+
+{-
+--------
+
+quick parser
+-}
+
+parensValue :: Parser Expr
+parensValue = Parens <$> parens valueExpr
+
+idenApp :: Parser Expr
+idenApp = try $ do
+    i <- identifier
+    guard (i `notElem` ["not", "and", "or", "is"])
+    choice [do
+            args <- parens (commaSep valueExpr)
+            return $ App i args
+           ,return $ Iden i
+           ]
+
+lit :: Parser Expr
+lit = stringLit <|> numLit
+    where
+      stringLit = Lit <$> lexeme (char '\'' *> manyTill anyChar (char '\''))
+      numLit = do
+        x <- lexeme (many1 digit)
+        let y :: Integer
+            y = read x
+        return $ Lit $ show y
+
+prefOp :: Parser Expr
+prefOp = sym <|> kw
+   where
+      sym = do
+            let prefOps = ["+", "-"]
+            s <- choice $ map symbol prefOps
+            v <- term
+            return $ PrefOp s v
+      kw = do
+           let prefOps = ["not"]
+           i <- identifier
+           guard (i `elem` prefOps)
+           v <- term
+           return $ PrefOp i v
+
+postOp :: Parser (Expr -> Expr)
+postOp = try $ do
+    let kws = ["is null"]
+        kwsp = map (\a -> try $ do
+                       let x :: [String]
+                           x = words a
+                       mapM_ keyword_ x
+                       return $ PostOp a
+                       ) kws
+    choice kwsp
+
+binOp :: Parser (Expr -> Expr -> Expr)
+binOp = symbolBinOp <|> kwBinOp
+  where
+    symbolBinOp = do
+                  let binOps = ["+", "-", "*", "/"]
+                  s <- choice $ map symbol binOps
+                  return $ \a b -> BinOp a s b
+    kwBinOp = do
+              let kwBinOps = ["and", "or"]
+              i <- identifier
+              guard (i `elem` kwBinOps)
+              return $ \a b -> BinOp a i b
+
+term :: Parser Expr
+term = (parensValue
+       <|> try prefOp
+       <|> idenApp
+       <|> lit)
+       <??*> postOp
+
+-- (<??>) :: Parser a -> Parser (a -> a) -> Parser a
+-- p <??> q = p <**> option id q
+
+(<??*>) :: Parser a -> Parser (a -> a) -> Parser a
+p <??*> q = foldr ($) <$> p <*> (reverse <$> many q)
+
+valueExpr :: Parser Expr
+valueExpr = chainl1 term binOp
+
+
+parens :: Parser a -> Parser a
+parens = between openParen closeParen
+
+openParen :: Parser Char
+openParen = lexeme $ char '('
+closeParen :: Parser Char
+closeParen = lexeme $ char ')'
+
+symbol :: String -> Parser String
+symbol s = try $ lexeme $ do
+    u <- many1 (oneOf "<>=+-^%/*!|")
+    guard (s == u)
+    return s
+
+identifier :: Parser String
+identifier = lexeme ((:) <$> firstChar <*> many nonFirstChar)
+  where
+    firstChar = letter <|> char '_'
+    nonFirstChar = digit <|> firstChar
+
+keyword :: String -> Parser String
+keyword k = try $ do
+    i <- identifier
+    guard (i == k)
+    return k
+
+keyword_ :: String -> Parser ()
+keyword_ = void . keyword
+
+whitespace :: Parser ()
+whitespace =
+    choice [simpleWhitespace *> whitespace
+           ,lineComment *> whitespace
+           ,blockComment *> whitespace
+           ,return ()]
+  where
+    lineComment = try (string "--")
+                  *> manyTill anyChar (void (char '\n') <|> eof)
+    blockComment = try (string "/*")
+                   *> manyTill anyChar (try $ string "*/")
+    simpleWhitespace = void $ many1 (oneOf " \t\n")
+lexeme :: Parser a -> Parser a
+lexeme p = p <* whitespace
+comma :: Parser Char
+comma = lexeme $ char ','
+
+commaSep :: Parser a -> Parser [a]
+commaSep = (`sepBy` comma)
+
+parseExpr :: String -> Either ParseError Expr
+parseExpr = parse (whitespace *> valueExpr <* eof) ""
+
+-- --------------
+
+data Assoc = AssocLeft | AssocRight | AssocNone deriving (Eq,Show)
+
+type Fixities = [(String, (Int, Assoc))]
+
+fixFixity :: Fixities -> Expr -> Expr
+fixFixity fixities = fixBinOpPrecedence . fixBinOpAssociativity . fixNestedPrefPostPrec
+  where
+    fixBinOpAssociativity e = case e of
+        BinOp a op b ->
+            let a' = fixBinOpAssociativity a
+                b' = fixBinOpAssociativity b
+                def = BinOp a' op b'
+            in case (a',b') of
+                -- both
+                -- a1 op1 a2 op b1 op2 b2
+                (BinOp a1 op1 a2
+                 ,BinOp b1 op2 b2)
+                  | Just (_p,opa) <- lookupFixity op
+                  , Just (_p,op1a) <- lookupFixity op1
+                  , Just (_p,op2a) <- lookupFixity op2
+                  -> case (opa, op1a, op2a) of
+                       (AssocRight, AssocRight, AssocRight) ->
+                           BinOp a1 op1 (BinOp a2 op (BinOp b1 op2 b2))
+                       (AssocLeft, AssocLeft, AssocLeft) ->
+                           BinOp (BinOp (BinOp a1 op1 a2) op b1) op2 b2
+                       --todo: other cases
+                       _ -> def
+                -- just left side
+                (BinOp a1 op1 a2, _)
+                -- a1 op1 a2 op b'
+                  | Just (_p,opa) <- lookupFixity op
+                  , Just (_p,op1a) <- lookupFixity op1
+                  -> case (opa, op1a) of
+                       (AssocRight, AssocRight) ->
+                           BinOp a1 op1 (BinOp a2 op b')
+                       (AssocLeft, AssocLeft) ->
+                           BinOp (BinOp a1 op1 a2) op b'
+                       _ -> def
+
+                -- just right side
+                (_, BinOp b1 op2 b2)
+                -- e op b1 op2 b2
+                  | Just (_p,opa) <- lookupFixity op
+                  , Just (_p,op2a) <- lookupFixity op2
+                  -> case (opa, op2a) of
+                       (AssocRight, AssocRight) ->
+                           BinOp a' op (BinOp b1 op2 b2)
+                       (AssocLeft, AssocLeft) ->
+                           BinOp (BinOp a' op b1) op2 b2
+                       _ -> def
+                _ -> def
+        _ -> e
+
+    fixBinOpPrecedence e = case e of
+        BinOp a op b ->
+            let a' = fixBinOpPrecedence a
+                b' = fixBinOpPrecedence b
+                def = BinOp a' op b'
+            in case (a',b') of
+                -- both
+                -- a1 op1 a2 op b1 op2 b2
+                -- all equal
+                -- p > or < p1 == p2
+                -- p == p1 < or > p2
+                (BinOp a1 op1 a2
+                 ,BinOp b1 op2 b2)
+                  | Just (p,_opa) <- lookupFixity op
+                  , Just (p1,_op1a) <- lookupFixity op1
+                  , Just (p2,_op2a) <- lookupFixity op2
+                  -> case () of
+                        -- _ | trace ("both prec " ++ show (p,p1,p2)) False -> undefined
+                        _ | p == p1 && p1 == p2 -> def
+                        _ | p > p1 && p1 == p2 -> BinOp a1 op1 b'
+                        _ | p < p1 && p1 == p2 -> BinOp (BinOp a1 op1 a2) op b'
+                        _ | p == p1 && p2 > p1 -> BinOp a' op (BinOp b1 op2 b2)
+                        _ | p == p1 && p2 < p1 -> def -- todo
+                        _ | otherwise -> def
+                -- just left side
+                (BinOp a1 op1 a2, _)
+                -- a1 op1 a2 op b'
+                  | Just (p,_opa) <- lookupFixity op
+                  , Just (p1,_op1a) <- lookupFixity op1
+                  -> case () of
+                        --  _ | trace ("left prec " ++ show (p,p1)) False -> undefined
+                        _ | p < p1 -> {-trace "b1" $ -}BinOp (BinOp a1 op1 a2) op b'
+                          | p > p1 -> {-trace "b2" $ -}BinOp a1 op1 (BinOp a2 op b')
+                          | otherwise -> def
+
+                -- just right side
+                (_, BinOp b1 op2 b2)
+                -- a' op b1 op2 b2
+                  | Just (p,_opa) <- lookupFixity op
+                  , Just (p2,_op1a) <- lookupFixity op2
+                  -> case () of
+                        --  _ | trace ("right prec " ++ show (p,p2)) False -> undefined
+                        _ | p > p2 -> {-trace "b1" $ -}BinOp (BinOp a' op b1) op2 b2
+                          | p < p2 -> {-trace "b2" $ -}BinOp a' op (BinOp b1 op2 b2)
+                          | otherwise -> {-trace "def" $ -} def
+                _ -> def
+        _ -> e
+
+    fixNestedPrefPostPrec e = case e of
+        PrefOp op a ->
+            let a' = fixNestedPrefPostPrec a
+            in case a' of
+                 PostOp op1 b | Just (p,_) <- lookupFixity op
+                              , Just (p1,_) <- lookupFixity op1
+                              , p > p1 -> PostOp op1 (PrefOp op b)
+                 _ -> PrefOp op a'
+        PostOp op a ->
+            let a' = fixNestedPrefPostPrec a
+            in case a' of
+                 PrefOp op1 b | Just (p,_) <- lookupFixity op
+                              , Just (p1,_) <- lookupFixity op1
+                              , p > p1 -> PrefOp op1 (PostOp op b)
+                 _ -> PostOp op a'
+        _ -> e
+
+
+
+    lookupFixity :: String -> Maybe (Int,Assoc)
+    lookupFixity s = maybe (trace ("didn't find " ++ s ++ "\n" ++ ppShow fixities) Nothing)
+                     Just $ lookup s fixities
+
+
+sqlFixity :: [(String, (Int, Assoc))]
+sqlFixity = [(".", (13, AssocLeft))
+            ,("[]", (12, AssocNone))
+
+{-
+unary + -
+todo: split the fixity table into prefix, binary and postfix
+
+todo: don't have explicit precedence numbers in the table??
+-}
+
+            ,("^", (10, AssocNone))]
+            ++ m ["*", "/", "%"] (9, AssocLeft)
+            ++ m ["+","-"] (8, AssocLeft)
+            ++ m ["<", ">", "=", "<=", ">=", "<>"] (4, AssocNone)
+            ++ [("is null", (3, AssocNone))
+               ,("not", (2, AssocRight))
+               ,("and", (1, AssocLeft))
+               ,("or", (0, AssocLeft))]
+
+  where
+    m l a = map (,a) l
+
+{-
+-------
+
+some simple parser tests
+-}
+
+data Test = Group String [Test]
+          | ParserTest String Expr
+          | FixityTest Fixities Expr Expr
+
+parserTests :: Test
+parserTests = Group "parserTests" $ map (uncurry ParserTest) $
+    [("a", Iden "a")
+    ,("'test'", Lit "test")
+    ,("34", Lit "34")
+    ,("f()", App "f" [])
+    ,("f(3)", App "f" [Lit "3"])
+    ,("(7)", Parens (Lit "7"))
+    ,("a + 3", BinOp (Iden "a") "+" (Lit "3"))
+    ,("1 + 2 + 3", BinOp (BinOp (Lit "1") "+" (Lit "2")) "+" (Lit "3"))
+
+    ,("a or b", BinOp (Iden "a") "or" (Iden "b"))
+    ,("-1", PrefOp "-" (Lit "1"))
+    ,("not a", PrefOp "not" (Iden "a"))
+    ,("not not a", PrefOp "not" (PrefOp "not" (Iden "a")))
+    ,("a is null", PostOp "is null" (Iden "a"))
+    ,("a is null is null", PostOp "is null" (PostOp "is null" (Iden "a")))
+    ,("-a+3", BinOp (PrefOp "-" (Iden "a")) "+" (Lit "3"))
+    ,("a is null and b is null", BinOp (PostOp "is null" (Iden "a"))
+                                 "and"
+                                 (PostOp "is null" (Iden "b")))
+    ]
+
+makeParserTest :: String -> Expr -> T.TestTree
+makeParserTest s e = H.testCase s $ do
+    let a = parseExpr s
+    if (Right e == a)
+       then putStrLn $ s ++ " OK"
+       else putStrLn $ "bad parse " ++ s ++ " " ++ show a
+
+{-
+------
+
+fixity checks
+
+test cases:
+-}
+
+
+fixityTests :: Test
+fixityTests = Group "fixityTests" $
+     map (\(f,s,e) -> FixityTest f s e) $
+     [
+
+-- 2 bin ops wrong associativity left + null versions
+
+     (sqlFixity
+     ,i "a" `plus` (i "b" `plus` i "c")
+     ,(i "a" `plus` i "b") `plus` i "c")
+    ,(sqlFixity
+     ,(i "a" `plus` i "b") `plus` i "c"
+     ,(i "a" `plus` i "b") `plus` i "c")
+
+-- 2 bin ops wrong associativity right
+
+    ,(timesRight
+     ,i "a" `times` (i "b" `times` i "c")
+     ,i "a" `times` (i "b" `times` i "c"))
+    ,(timesRight
+     ,(i "a" `times` i "b") `times` i "c"
+     ,i "a" `times` (i "b" `times` i "c"))
+
+
+-- 2 bin ops wrong precedence left
+
+    ,(sqlFixity
+     ,i "a" `plus` (i "b" `times` i "c")
+     ,i "a" `plus` (i "b" `times` i "c"))
+
+    ,(sqlFixity
+     ,(i "a" `plus` i "b") `times` i "c"
+     ,i "a" `plus` (i "b" `times` i "c"))
+
+-- 2 bin ops wrong precedence right
+
+    ,(sqlFixity
+     ,(i "a" `times` i "b") `plus` i "c"
+     ,(i "a" `times` i "b") `plus` i "c")
+
+    ,(sqlFixity
+     ,i "a" `times` (i "b" `plus` i "c")
+     ,(i "a" `times` i "b") `plus` i "c")
+
+{-
+a + b * c + d
+a * b + c * d
+
+check all variations
+-}
+
+    ] ++
+      (let t = (i "a" `plus` i "b")
+               `times`
+               (i "c" `plus` i "d")
+           trs = generateTrees $ splitTree t
+       in [(sqlFixity, x
+           ,i "a" `plus` (i "b" `times` i "c")
+            `plus` i "d")
+          | x <- trs])
+      ++
+      (let t = (i "a" `times` i "b")
+               `plus`
+               (i "c" `times` i "d")
+           trs = generateTrees $ splitTree t
+       in [(sqlFixity, x
+           ,(i "a" `times` i "b")
+               `plus`
+               (i "c" `times` i "d"))
+          | x <- trs])
+
+
+    ++ [
+
+-- prefix then postfix wrong precedence
+
+     ([("+", (9, AssocNone))
+      ,("is null", (3, AssocNone))]
+     ,PrefOp "+" (PostOp "is null" (i "a"))
+     ,PostOp "is null" (PrefOp "+" (i "a")))
+
+    ,([("+", (9, AssocNone))
+      ,("is null", (3, AssocNone))]
+     ,PostOp "is null" (PrefOp "+" (i "a"))
+     ,PostOp "is null" (PrefOp "+" (i "a")))
+
+    ,([("+", (3, AssocNone))
+      ,("is null", (9, AssocNone))]
+     ,PrefOp "+" (PostOp "is null" (i "a"))
+     ,PrefOp "+" (PostOp "is null" (i "a")))
+
+    ,([("+", (3, AssocNone))
+      ,("is null", (9, AssocNone))]
+     ,PostOp "is null" (PrefOp "+" (i "a"))
+     ,PrefOp "+" (PostOp "is null" (i "a")))
+
+{-
+3-way unary operator movement:
+take a starting point and generate variations
+
+postfix on first arg of binop (cannot move) make sure precedence wants
+  it to move
+
+prefix on second arg of binop (cannot move)
+
+prefix on binop, precedence wrong
+postfix on binop precedence wrong
+prefix on first arg of binop, precedence wrong
+postfix on second arg of binop, precedence wrong
+
+ambiguous fixity tests
+
+sanity check: parens stops rearrangement
+
+check nesting 1 + f(expr)
+-}
+
+    ]
+    where
+      plus a b = BinOp a "+" b
+      times a b = BinOp a "*" b
+      i a = Iden a
+      timesRight = [("*", (9, AssocRight))]
+
+-- testCase
+
+makeFixityTest :: Fixities -> Expr -> Expr -> T.TestTree
+makeFixityTest fs s e = H.testCase (show s) $ do
+    let s' = fixFixity fs s
+    H.assertEqual "" s' e
+    {-if (s' == e)
+      then putStrLn $ show s ++ " OK"
+      else putStrLn $ "ERROR\nstart: " ++ show s ++ "\nfixed: " ++ show s' ++ "\nshould be: " ++ show e-}
+
+tests :: Test
+tests = Group "Tests" [parserTests, fixityTests]
+
+makeTest :: Test -> T.TestTree
+makeTest (Group n ts) = T.testGroup n $ map makeTest ts
+makeTest (ParserTest s e) = makeParserTest s e
+makeTest (FixityTest f s e) = makeFixityTest f s e
+
+{-
+--------
+
+ > tests :: T.TestTree
+ > tests = T.testGroup "Tests" $ map makeFixityTest fixityTests
+-}
+
+main :: IO ()
+main = T.defaultMain $ makeTest tests
+  {-do
+       mapM_ checkTest tests
+       mapM_ checkFixity fixityTests
+       let plus a b = BinOp a "+" b
+           times a b = BinOp a "*" b
+           i a = Iden a
+       let t = (i "a" `plus` i "b")
+               `times`
+               (i "c" `plus` i "d")
+           spl = splitTree t
+           trs = generateTrees spl
+       --putStrLn $ "\nSplit\n"
+       --putStrLn $ ppShow (fst spl, length $ snd spl)
+       --putStrLn $ show $ length trs
+       --putStrLn $ "\nTrees\n"
+       --putStrLn $ intercalate "\n" $ map show trs
+       return ()-}
+
+{-
+generating trees
+
+1. tree -> list
+val op val op val op ...
+(has to be two lists?
+
+generate variations:
+pick numbers from 0 to n - 1 (n is the number of ops)
+choose the op at this position to be the root
+recurse on the two sides
+-}
+
+splitTree :: Expr -> ([Expr], [Expr->Expr->Expr])
+splitTree (BinOp a op b) = let (x,y) = splitTree a
+                               (z,w) = splitTree b
+                           in (x++z, y++ [\a b -> BinOp a op b] ++ w)
+splitTree x = ([x],[])
+
+
+
+generateTrees :: ([Expr], [Expr->Expr->Expr]) -> [Expr]
+generateTrees (es,ops) | length es /= length ops + 1 =
+    error $ "mismatch in lengths " ++ show (length es, length ops)
+    ++"\n" ++ ppShow es ++ "\n"
+generateTrees ([a,b], [op]) = [op a b]
+generateTrees ([a], []) = [a]
+generateTrees (vs, ops) =
+    let n = length ops
+    in --trace ("generating " ++ show (length vs, n) ++ "trees\n") $
+       concat $ flip map [0..n-1] $ \m ->
+         let (v1,v2) = splitAt (m + 1) vs
+             (ops1,op':ops2) = splitAt m ops
+             r = [op' t u | t <- generateTrees (v1,ops1)
+                     , u <- generateTrees (v2,ops2)]
+         in -- trace ("generated " ++ show (length r) ++ " trees")
+            r
+generateTrees ([],[]) = []
+
+
+
diff --git a/tools/Fixity.lhs b/tools/Fixity.lhs
deleted file mode 100644
index 886608d..0000000
--- a/tools/Fixity.lhs
+++ /dev/null
@@ -1,702 +0,0 @@
-
-= Fixity fixups
-
-The point of this code is to be able to take a table of fixity
-information for unary and binary operators, then adjust an ast to
-match these fixities. The standard way of handling this is handling
-fixities at the parsing stage.
-
-For the SQL parser, this is difficult because there is lots of weird
-syntax for operators (such as prefix and postfix multiple keyword
-operators, between, etc.).
-
-An alterative idea which is used in some places is to parse the tree
-regarding all the operators to have the same precedence and left
-associativity, then correct the fixity in a pass over the ast after
-parsing. Would also like to use this to fix the fixity for the join
-trees, and set operations, after parsing them. TODO: anything else?
-
-
-Approach
-
-Really not sure how to get this correct. So: lots of testing
-
-Basic testing idea: create an expression, then write down manually how
-the expression should parse with correct fixity. Can write down the
-expression in concrete syntax, and the correct fixity version using
-parens.
-
-Then can parse the expression, fix it, parse the fixed expression,
-remove the parens and compare them to make sure they are equal.
-
-Second layer of testing. For each source expression parsed, run it
-through a generator which will generate every version of that tree by
-choosing all possibilities of fixities on a token by token basis. This
-will ensure the fixity fixer is robust. An alternative approach is to
-guarantee the parser will produce trees where all the fixities are
-known (e.g. unary operators always bind tighter than binary, binary
-are all left associative, prefix unary bind tighter than postfix. This
-way, the fix code can make some assumptions and have less code. We
-will stick with the full general version which is more robust.
-
-Another testing approach is to parse the tree with our non fixity
-respecting parser then fix it, and also parse it with a fixity
-respecting expression parser, and check the results are the same. This
-is difficult with the parsec build expression parser which doesn't
-handle nested unary operators, so have to find or write another build
-expression parser. We can test the fixer with simple operators (single
-symbol prefix, postfix and binary ops) and then use it on the complex
-sql ast trees.
-
-Can also try to generate trees ala quickcheck/smallcheck, then check
-them with the fixer and the build expression parser.
-
-generate a tree:
-
-start with a term
-then roll dice:
-  add a prefix
-  add a postfix
-  do nothing
-then roll dice
-  add a binary op
-  for the second arg, recurse the algo
-
-
-algorithm:
-
-consider possible cases:
-binop with two binops args
-binop with prefix on left
-binop with postfix on right
-postfix with prefix inside
-prefix with postfix inside
-postfix with binop inside
-prefix with binop inside
-
-write a function to deal with each case and try to compose
-
-Tasks:
-
-write unary op tests: on each other, and with binary ops
-figure out how to generate trees
-do the step one tests (write the fixity with parens)
-check out parsers expression parser
-see if can generate trees using smallcheck
-try to test these trees against expression parser
-  otherwise, generate tree, generate variations, check fixity always
-produces same result
-
-
-
-
-todo:
-
-1. more tests for unary operators with each other
-2. moving unary operators inside and outside binary operators:
-   have to think about how this will work in general case
-3. ways to generate lots of tests and check them
-   -> what about creating a parser which parses to a list of all possible
-      parses with different fixities for each operator it sees?
-4. ambiguous fixity cases - need position annotation to do these nicely
-5. real sql: how to work with a variety of ast nodes
-6. plug into simple-sql-parser
-7. refactor the simple-sql-parser parsing code
-8. simple-sql-parser todo for sqream: add other dml, dialects,
-   procedural?
-9. testing idea: write big expressions with explicit parens everywhere
-   parse this
-   remove the parens
-   pretty print, then parse and fixfixity to see if same
-   then generate all variations of tree as if the fixities are different
-     and then fixfixity to check it restores the original
-
-
-write fixity tests
-write code to do the fixing
-add error cases: put it in the either monad to report these
-
-check the descend
-then: move to real sql
-  different abstract representations of binops, etc.
-  what is the best way to deal with this? typeclass? conversion to and
-  from a generic tree?
-
-
-
-
-
-can the binops be fixed on their own (precedence and assocativity)
-and then the prefix and postfix ops in separate passes
-
-what about a pass which puts the tree into canonical form:
-all left associative, all unary ops tight as possible?
-then the fixer can be easier?
-
-
-
-
-
-> {-# LANGUAGE DeriveDataTypeable,TupleSections #-}
-> import Data.Data
-
-> import Text.Parsec.String (Parser)
-> import Text.Parsec (try)
-> import Text.Parsec.Char
-> import Text.Parsec.Combinator
-> import Text.Parsec (parse,ParseError)
-> import Control.Applicative ((<|>),many) -- ((<**>),(<$>),(<*), (*>),(<*>), (<$), (<|>), many)
-> --import qualified Text.Parsec.String.Expr as E
-> import Control.Monad
-> --import Data.List (intercalate)
-> import Data.Maybe ()
-> --import qualified Test.HUnit as H
-> --import FunctionsAndTypesForParsing
-> import Debug.Trace
-> import Text.Show.Pretty
-> import Data.List
-> import Control.Applicative
-
-> import qualified Test.Tasty as T
-> import qualified Test.Tasty.HUnit as H
-
-
-> data Expr = BinOp Expr String Expr
->           | PrefOp String Expr
->           | PostOp String Expr
->           | Iden String
->           | Lit String
->           | App String [Expr]
->           | Parens Expr
->             deriving (Eq,Show,Data,Typeable)
-
---------
-
-quick parser
-
-> parensValue :: Parser Expr
-> parensValue = Parens <$> parens valueExpr
-
-> idenApp :: Parser Expr
-> idenApp = try $ do
->     i <- identifier
->     guard (i `notElem` ["not", "and", "or", "is"])
->     choice [do
->             args <- parens (commaSep valueExpr)
->             return $ App i args
->            ,return $ Iden i
->            ]
-
-> lit :: Parser Expr
-> lit = stringLit <|> numLit
->     where
->       stringLit = Lit <$> lexeme (char '\'' *> manyTill anyChar (char '\''))
->       numLit = do
->         x <- lexeme (many1 digit)
->         let y :: Integer
->             y = read x
->         return $ Lit $ show y
-
-> prefOp :: Parser Expr
-> prefOp = sym <|> kw
->    where
->       sym = do
->             let prefOps = ["+", "-"]
->             s <- choice $ map symbol prefOps
->             v <- term
->             return $ PrefOp s v
->       kw = do
->            let prefOps = ["not"]
->            i <- identifier
->            guard (i `elem` prefOps)
->            v <- term
->            return $ PrefOp i v
-
-> postOp :: Parser (Expr -> Expr)
-> postOp = try $ do
->     let kws = ["is null"]
->         kwsp = map (\a -> try $ do
->                        let x :: [String]
->                            x = words a
->                        mapM_ keyword_ x
->                        return $ PostOp a
->                        ) kws
->     choice kwsp
-
-> binOp :: Parser (Expr -> Expr -> Expr)
-> binOp = symbolBinOp <|> kwBinOp
->   where
->     symbolBinOp = do
->                   let binOps = ["+", "-", "*", "/"]
->                   s <- choice $ map symbol binOps
->                   return $ \a b -> BinOp a s b
->     kwBinOp = do
->               let kwBinOps = ["and", "or"]
->               i <- identifier
->               guard (i `elem` kwBinOps)
->               return $ \a b -> BinOp a i b
-
-> term :: Parser Expr
-> term = (parensValue
->        <|> try prefOp
->        <|> idenApp
->        <|> lit)
->        <??*> postOp
-
-> -- (<??>) :: Parser a -> Parser (a -> a) -> Parser a
-> -- p <??> q = p <**> option id q
-
-> (<??*>) :: Parser a -> Parser (a -> a) -> Parser a
-> p <??*> q = foldr ($) <$> p <*> (reverse <$> many q)
-
-> valueExpr :: Parser Expr
-> valueExpr = chainl1 term binOp
-
-
-> parens :: Parser a -> Parser a
-> parens = between openParen closeParen
-
-> openParen :: Parser Char
-> openParen = lexeme $ char '('
-> closeParen :: Parser Char
-> closeParen = lexeme $ char ')'
-
-> symbol :: String -> Parser String
-> symbol s = try $ lexeme $ do
->     u <- many1 (oneOf "<>=+-^%/*!|")
->     guard (s == u)
->     return s
-
-> identifier :: Parser String
-> identifier = lexeme ((:) <$> firstChar <*> many nonFirstChar)
->   where
->     firstChar = letter <|> char '_'
->     nonFirstChar = digit <|> firstChar
-
-> keyword :: String -> Parser String
-> keyword k = try $ do
->     i <- identifier
->     guard (i == k)
->     return k
-
-> keyword_ :: String -> Parser ()
-> keyword_ = void . keyword
-
-> whitespace :: Parser ()
-> whitespace =
->     choice [simpleWhitespace *> whitespace
->            ,lineComment *> whitespace
->            ,blockComment *> whitespace
->            ,return ()]
->   where
->     lineComment = try (string "--")
->                   *> manyTill anyChar (void (char '\n') <|> eof)
->     blockComment = try (string "/*")
->                    *> manyTill anyChar (try $ string "*/")
->     simpleWhitespace = void $ many1 (oneOf " \t\n")
-> lexeme :: Parser a -> Parser a
-> lexeme p = p <* whitespace
-> comma :: Parser Char
-> comma = lexeme $ char ','
-
-> commaSep :: Parser a -> Parser [a]
-> commaSep = (`sepBy` comma)
-
-> parseExpr :: String -> Either ParseError Expr
-> parseExpr = parse (whitespace *> valueExpr <* eof) ""
-
---------------
-
-> data Assoc = AssocLeft | AssocRight | AssocNone deriving (Eq,Show)
-
-> type Fixities = [(String, (Int, Assoc))]
-
-> fixFixity :: Fixities -> Expr -> Expr
-> fixFixity fixities = fixBinOpPrecedence . fixBinOpAssociativity . fixNestedPrefPostPrec
->   where
->     fixBinOpAssociativity e = case e of
->         BinOp a op b ->
->             let a' = fixBinOpAssociativity a
->                 b' = fixBinOpAssociativity b
->                 def = BinOp a' op b'
->             in case (a',b') of
->                 -- both
->                 -- a1 op1 a2 op b1 op2 b2
->                 (BinOp a1 op1 a2
->                  ,BinOp b1 op2 b2)
->                   | Just (_p,opa) <- lookupFixity op
->                   , Just (_p,op1a) <- lookupFixity op1
->                   , Just (_p,op2a) <- lookupFixity op2
->                   -> case (opa, op1a, op2a) of
->                        (AssocRight, AssocRight, AssocRight) ->
->                            BinOp a1 op1 (BinOp a2 op (BinOp b1 op2 b2))
->                        (AssocLeft, AssocLeft, AssocLeft) ->
->                            BinOp (BinOp (BinOp a1 op1 a2) op b1) op2 b2
->                        --todo: other cases
->                        _ -> def
->                 -- just left side
->                 (BinOp a1 op1 a2, _)
->                 -- a1 op1 a2 op b'
->                   | Just (_p,opa) <- lookupFixity op
->                   , Just (_p,op1a) <- lookupFixity op1
->                   -> case (opa, op1a) of
->                        (AssocRight, AssocRight) ->
->                            BinOp a1 op1 (BinOp a2 op b')
->                        (AssocLeft, AssocLeft) ->
->                            BinOp (BinOp a1 op1 a2) op b'
->                        _ -> def
-
->                 -- just right side
->                 (_, BinOp b1 op2 b2)
->                 -- e op b1 op2 b2
->                   | Just (_p,opa) <- lookupFixity op
->                   , Just (_p,op2a) <- lookupFixity op2
->                   -> case (opa, op2a) of
->                        (AssocRight, AssocRight) ->
->                            BinOp a' op (BinOp b1 op2 b2)
->                        (AssocLeft, AssocLeft) ->
->                            BinOp (BinOp a' op b1) op2 b2
->                        _ -> def
->                 _ -> def
->         _ -> e
-
->     fixBinOpPrecedence e = case e of
->         BinOp a op b ->
->             let a' = fixBinOpPrecedence a
->                 b' = fixBinOpPrecedence b
->                 def = BinOp a' op b'
->             in case (a',b') of
->                 -- both
->                 -- a1 op1 a2 op b1 op2 b2
->                 -- all equal
->                 -- p > or < p1 == p2
->                 -- p == p1 < or > p2
->                 (BinOp a1 op1 a2
->                  ,BinOp b1 op2 b2)
->                   | Just (p,_opa) <- lookupFixity op
->                   , Just (p1,_op1a) <- lookupFixity op1
->                   , Just (p2,_op2a) <- lookupFixity op2
->                   -> case () of
->                         -- _ | trace ("both prec " ++ show (p,p1,p2)) False -> undefined
->                         _ | p == p1 && p1 == p2 -> def
->                         _ | p > p1 && p1 == p2 -> BinOp a1 op1 b'
->                         _ | p < p1 && p1 == p2 -> BinOp (BinOp a1 op1 a2) op b'
->                         _ | p == p1 && p2 > p1 -> BinOp a' op (BinOp b1 op2 b2)
->                         _ | p == p1 && p2 < p1 -> def -- todo
->                         _ | otherwise -> def
->                 -- just left side
->                 (BinOp a1 op1 a2, _)
->                 -- a1 op1 a2 op b'
->                   | Just (p,_opa) <- lookupFixity op
->                   , Just (p1,_op1a) <- lookupFixity op1
->                   -> case () of
->                         --  _ | trace ("left prec " ++ show (p,p1)) False -> undefined
->                         _ | p < p1 -> {-trace "b1" $ -}BinOp (BinOp a1 op1 a2) op b'
->                           | p > p1 -> {-trace "b2" $ -}BinOp a1 op1 (BinOp a2 op b')
->                           | otherwise -> def
-
->                 -- just right side
->                 (_, BinOp b1 op2 b2)
->                 -- a' op b1 op2 b2
->                   | Just (p,_opa) <- lookupFixity op
->                   , Just (p2,_op1a) <- lookupFixity op2
->                   -> case () of
->                         --  _ | trace ("right prec " ++ show (p,p2)) False -> undefined
->                         _ | p > p2 -> {-trace "b1" $ -}BinOp (BinOp a' op b1) op2 b2
->                           | p < p2 -> {-trace "b2" $ -}BinOp a' op (BinOp b1 op2 b2)
->                           | otherwise -> {-trace "def" $ -} def
->                 _ -> def
->         _ -> e
-
->     fixNestedPrefPostPrec e = case e of
->         PrefOp op a ->
->             let a' = fixNestedPrefPostPrec a
->             in case a' of
->                  PostOp op1 b | Just (p,_) <- lookupFixity op
->                               , Just (p1,_) <- lookupFixity op1
->                               , p > p1 -> PostOp op1 (PrefOp op b)
->                  _ -> PrefOp op a'
->         PostOp op a ->
->             let a' = fixNestedPrefPostPrec a
->             in case a' of
->                  PrefOp op1 b | Just (p,_) <- lookupFixity op
->                               , Just (p1,_) <- lookupFixity op1
->                               , p > p1 -> PrefOp op1 (PostOp op b)
->                  _ -> PostOp op a'
->         _ -> e
-
-
-
->     lookupFixity :: String -> Maybe (Int,Assoc)
->     lookupFixity s = maybe (trace ("didn't find " ++ s ++ "\n" ++ ppShow fixities) Nothing)
->                      Just $ lookup s fixities
-
-
-> sqlFixity :: [(String, (Int, Assoc))]
-> sqlFixity = [(".", (13, AssocLeft))
->             ,("[]", (12, AssocNone))
-
-unary + -
-todo: split the fixity table into prefix, binary and postfix
-
-todo: don't have explicit precedence numbers in the table??
-
->             ,("^", (10, AssocNone))]
->             ++ m ["*", "/", "%"] (9, AssocLeft)
->             ++ m ["+","-"] (8, AssocLeft)
->             ++ m ["<", ">", "=", "<=", ">=", "<>"] (4, AssocNone)
->             ++ [("is null", (3, AssocNone))
->                ,("not", (2, AssocRight))
->                ,("and", (1, AssocLeft))
->                ,("or", (0, AssocLeft))]
-
->   where
->     m l a = map (,a) l
-
--------
-
-some simple parser tests
-
-> data Test = Group String [Test]
->           | ParserTest String Expr
->           | FixityTest Fixities Expr Expr
-
-> parserTests :: Test
-> parserTests = Group "parserTests" $ map (uncurry ParserTest) $
->     [("a", Iden "a")
->     ,("'test'", Lit "test")
->     ,("34", Lit "34")
->     ,("f()", App "f" [])
->     ,("f(3)", App "f" [Lit "3"])
->     ,("(7)", Parens (Lit "7"))
->     ,("a + 3", BinOp (Iden "a") "+" (Lit "3"))
->     ,("1 + 2 + 3", BinOp (BinOp (Lit "1") "+" (Lit "2")) "+" (Lit "3"))
-
->     ,("a or b", BinOp (Iden "a") "or" (Iden "b"))
->     ,("-1", PrefOp "-" (Lit "1"))
->     ,("not a", PrefOp "not" (Iden "a"))
->     ,("not not a", PrefOp "not" (PrefOp "not" (Iden "a")))
->     ,("a is null", PostOp "is null" (Iden "a"))
->     ,("a is null is null", PostOp "is null" (PostOp "is null" (Iden "a")))
->     ,("-a+3", BinOp (PrefOp "-" (Iden "a")) "+" (Lit "3"))
->     ,("a is null and b is null", BinOp (PostOp "is null" (Iden "a"))
->                                  "and"
->                                  (PostOp "is null" (Iden "b")))
->     ]
-
-> makeParserTest :: String -> Expr -> T.TestTree
-> makeParserTest s e = H.testCase s $ do
->     let a = parseExpr s
->     if (Right e == a)
->        then putStrLn $ s ++ " OK"
->        else putStrLn $ "bad parse " ++ s ++ " " ++ show a
-
-------
-
-fixity checks
-
-test cases:
-
-
-> fixityTests :: Test
-> fixityTests = Group "fixityTests" $
->      map (\(f,s,e) -> FixityTest f s e) $
->      [
-
-2 bin ops wrong associativity left + null versions
-
->      (sqlFixity
->      ,i "a" `plus` (i "b" `plus` i "c")
->      ,(i "a" `plus` i "b") `plus` i "c")
->     ,(sqlFixity
->      ,(i "a" `plus` i "b") `plus` i "c"
->      ,(i "a" `plus` i "b") `plus` i "c")
-
-2 bin ops wrong associativity right
-
->     ,(timesRight
->      ,i "a" `times` (i "b" `times` i "c")
->      ,i "a" `times` (i "b" `times` i "c"))
->     ,(timesRight
->      ,(i "a" `times` i "b") `times` i "c"
->      ,i "a" `times` (i "b" `times` i "c"))
-
-
-2 bin ops wrong precedence left
-
->     ,(sqlFixity
->      ,i "a" `plus` (i "b" `times` i "c")
->      ,i "a" `plus` (i "b" `times` i "c"))
-
->     ,(sqlFixity
->      ,(i "a" `plus` i "b") `times` i "c"
->      ,i "a" `plus` (i "b" `times` i "c"))
-
-2 bin ops wrong precedence right
-
->     ,(sqlFixity
->      ,(i "a" `times` i "b") `plus` i "c"
->      ,(i "a" `times` i "b") `plus` i "c")
-
->     ,(sqlFixity
->      ,i "a" `times` (i "b" `plus` i "c")
->      ,(i "a" `times` i "b") `plus` i "c")
-
-a + b * c + d
-a * b + c * d
-
-check all variations
-
->     ] ++
->       (let t = (i "a" `plus` i "b")
->                `times`
->                (i "c" `plus` i "d")
->            trs = generateTrees $ splitTree t
->        in [(sqlFixity, x
->            ,i "a" `plus` (i "b" `times` i "c")
->             `plus` i "d")
->           | x <- trs])
->       ++
->       (let t = (i "a" `times` i "b")
->                `plus`
->                (i "c" `times` i "d")
->            trs = generateTrees $ splitTree t
->        in [(sqlFixity, x
->            ,(i "a" `times` i "b")
->                `plus`
->                (i "c" `times` i "d"))
->           | x <- trs])
-
-
->     ++ [
-
-prefix then postfix wrong precedence
-
->      ([("+", (9, AssocNone))
->       ,("is null", (3, AssocNone))]
->      ,PrefOp "+" (PostOp "is null" (i "a"))
->      ,PostOp "is null" (PrefOp "+" (i "a")))
-
->     ,([("+", (9, AssocNone))
->       ,("is null", (3, AssocNone))]
->      ,PostOp "is null" (PrefOp "+" (i "a"))
->      ,PostOp "is null" (PrefOp "+" (i "a")))
-
->     ,([("+", (3, AssocNone))
->       ,("is null", (9, AssocNone))]
->      ,PrefOp "+" (PostOp "is null" (i "a"))
->      ,PrefOp "+" (PostOp "is null" (i "a")))
-
->     ,([("+", (3, AssocNone))
->       ,("is null", (9, AssocNone))]
->      ,PostOp "is null" (PrefOp "+" (i "a"))
->      ,PrefOp "+" (PostOp "is null" (i "a")))
-
-3-way unary operator movement:
-take a starting point and generate variations
-
-postfix on first arg of binop (cannot move) make sure precedence wants
-  it to move
-
-prefix on second arg of binop (cannot move)
-
-prefix on binop, precedence wrong
-postfix on binop precedence wrong
-prefix on first arg of binop, precedence wrong
-postfix on second arg of binop, precedence wrong
-
-ambiguous fixity tests
-
-sanity check: parens stops rearrangement
-
-check nesting 1 + f(expr)
-
->     ]
->     where
->       plus a b = BinOp a "+" b
->       times a b = BinOp a "*" b
->       i a = Iden a
->       timesRight = [("*", (9, AssocRight))]
-
-testCase
-
-> makeFixityTest :: Fixities -> Expr -> Expr -> T.TestTree
-> makeFixityTest fs s e = H.testCase (show s) $ do
->     let s' = fixFixity fs s
->     H.assertEqual "" s' e
->     {-if (s' == e)
->       then putStrLn $ show s ++ " OK"
->       else putStrLn $ "ERROR\nstart: " ++ show s ++ "\nfixed: " ++ show s' ++ "\nshould be: " ++ show e-}
-
-> tests :: Test
-> tests = Group "Tests" [parserTests, fixityTests]
-
-> makeTest :: Test -> T.TestTree
-> makeTest (Group n ts) = T.testGroup n $ map makeTest ts
-> makeTest (ParserTest s e) = makeParserTest s e
-> makeTest (FixityTest f s e) = makeFixityTest f s e
-
---------
-
- > tests :: T.TestTree
- > tests = T.testGroup "Tests" $ map makeFixityTest fixityTests
-
-> main :: IO ()
-> main = T.defaultMain $ makeTest tests
->   {-do
->        mapM_ checkTest tests
->        mapM_ checkFixity fixityTests
->        let plus a b = BinOp a "+" b
->            times a b = BinOp a "*" b
->            i a = Iden a
->        let t = (i "a" `plus` i "b")
->                `times`
->                (i "c" `plus` i "d")
->            spl = splitTree t
->            trs = generateTrees spl
->        --putStrLn $ "\nSplit\n"
->        --putStrLn $ ppShow (fst spl, length $ snd spl)
->        --putStrLn $ show $ length trs
->        --putStrLn $ "\nTrees\n"
->        --putStrLn $ intercalate "\n" $ map show trs
->        return ()-}
-
-generating trees
-
-1. tree -> list
-val op val op val op ...
-(has to be two lists?
-
-generate variations:
-pick numbers from 0 to n - 1 (n is the number of ops)
-choose the op at this position to be the root
-recurse on the two sides
-
-> splitTree :: Expr -> ([Expr], [Expr->Expr->Expr])
-> splitTree (BinOp a op b) = let (x,y) = splitTree a
->                                (z,w) = splitTree b
->                            in (x++z, y++ [\a b -> BinOp a op b] ++ w)
-> splitTree x = ([x],[])
-
-
-
-> generateTrees :: ([Expr], [Expr->Expr->Expr]) -> [Expr]
-> generateTrees (es,ops) | length es /= length ops + 1 =
->     error $ "mismatch in lengths " ++ show (length es, length ops)
->     ++"\n" ++ ppShow es ++ "\n"
-> generateTrees ([a,b], [op]) = [op a b]
-> generateTrees ([a], []) = [a]
-> generateTrees (vs, ops) =
->     let n = length ops
->     in --trace ("generating " ++ show (length vs, n) ++ "trees\n") $
->        concat $ flip map [0..n-1] $ \m ->
->          let (v1,v2) = splitAt (m + 1) vs
->              (ops1,op':ops2) = splitAt m ops
->              r = [op' t u | t <- generateTrees (v1,ops1)
->                      , u <- generateTrees (v2,ops2)]
->          in -- trace ("generated " ++ show (length r) ++ " trees")
->             r
-> generateTrees ([],[]) = []
-
-
-
diff --git a/tools/Language/SQL/SimpleSQL/CreateIndex.hs b/tools/Language/SQL/SimpleSQL/CreateIndex.hs
new file mode 100644
index 0000000..5928707
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/CreateIndex.hs
@@ -0,0 +1,17 @@
+
+module Language.SQL.SimpleSQL.CreateIndex where
+
+import Language.SQL.SimpleSQL.Syntax
+import Language.SQL.SimpleSQL.TestTypes
+
+createIndexTests :: TestItem
+createIndexTests = Group "create index tests"
+    [TestStatement ansi2011 "create index a on tbl(c1)"
+      $ CreateIndex False [nm "a"] [nm "tbl"] [nm "c1"]
+    ,TestStatement ansi2011 "create index a.b on sc.tbl (c1, c2)"
+      $ CreateIndex False [nm "a", nm "b"] [nm "sc", nm "tbl"] [nm "c1", nm "c2"]
+    ,TestStatement ansi2011 "create unique index a on tbl(c1)"
+      $ CreateIndex True [nm "a"] [nm "tbl"] [nm "c1"]
+    ]
+  where
+    nm = Name Nothing
diff --git a/tools/Language/SQL/SimpleSQL/CreateIndex.lhs b/tools/Language/SQL/SimpleSQL/CreateIndex.lhs
deleted file mode 100644
index cfb38be..0000000
--- a/tools/Language/SQL/SimpleSQL/CreateIndex.lhs
+++ /dev/null
@@ -1,17 +0,0 @@
-
-> module Language.SQL.SimpleSQL.CreateIndex where
->
-> import Language.SQL.SimpleSQL.Syntax
-> import Language.SQL.SimpleSQL.TestTypes
->
-> createIndexTests :: TestItem
-> createIndexTests = Group "create index tests"
->     [TestStatement ansi2011 "create index a on tbl(c1)"
->       $ CreateIndex False [nm "a"] [nm "tbl"] [nm "c1"]
->     ,TestStatement ansi2011 "create index a.b on sc.tbl (c1, c2)"
->       $ CreateIndex False [nm "a", nm "b"] [nm "sc", nm "tbl"] [nm "c1", nm "c2"]
->     ,TestStatement ansi2011 "create unique index a on tbl(c1)"
->       $ CreateIndex True [nm "a"] [nm "tbl"] [nm "c1"]
->     ]
->   where
->     nm = Name Nothing
diff --git a/tools/Language/SQL/SimpleSQL/CustomDialect.hs b/tools/Language/SQL/SimpleSQL/CustomDialect.hs
new file mode 100644
index 0000000..8fe28b3
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/CustomDialect.hs
@@ -0,0 +1,27 @@
+
+module Language.SQL.SimpleSQL.CustomDialect (customDialectTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+
+customDialectTests :: TestItem
+customDialectTests = Group "custom dialect tests" (map (uncurry ParseQueryExpr) passTests
+    ++ map (uncurry ParseScalarExprFails) failTests )
+  where
+    failTests = [(ansi2011,"SELECT DATE('2000-01-01')")
+                ,(ansi2011,"SELECT DATE")
+                ,(dateApp,"SELECT DATE")
+                ,(dateIden,"SELECT DATE('2000-01-01')")
+                -- show this never being allowed as an alias
+                ,(ansi2011,"SELECT a date")
+                ,(dateApp,"SELECT a date")
+                ,(dateIden,"SELECT a date")
+                ]
+    passTests = [(ansi2011,"SELECT a b")
+                ,(noDateKeyword,"SELECT DATE('2000-01-01')")
+                ,(noDateKeyword,"SELECT DATE")
+                ,(dateApp,"SELECT DATE('2000-01-01')")
+                ,(dateIden,"SELECT DATE")
+                ]
+    noDateKeyword = ansi2011 {diKeywords = filter (/="date") (diKeywords ansi2011)}
+    dateIden = ansi2011 {diIdentifierKeywords = "date" : diIdentifierKeywords ansi2011}
+    dateApp = ansi2011 {diAppKeywords = "date" : diAppKeywords ansi2011}
diff --git a/tools/Language/SQL/SimpleSQL/CustomDialect.lhs b/tools/Language/SQL/SimpleSQL/CustomDialect.lhs
deleted file mode 100644
index daa22a9..0000000
--- a/tools/Language/SQL/SimpleSQL/CustomDialect.lhs
+++ /dev/null
@@ -1,27 +0,0 @@
-
-> module Language.SQL.SimpleSQL.CustomDialect (customDialectTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-
-> customDialectTests :: TestItem
-> customDialectTests = Group "custom dialect tests" (map (uncurry ParseQueryExpr) passTests
->     ++ map (uncurry ParseScalarExprFails) failTests )
->   where
->     failTests = [(ansi2011,"SELECT DATE('2000-01-01')")
->                 ,(ansi2011,"SELECT DATE")
->                 ,(dateApp,"SELECT DATE")
->                 ,(dateIden,"SELECT DATE('2000-01-01')")
->                 -- show this never being allowed as an alias
->                 ,(ansi2011,"SELECT a date")
->                 ,(dateApp,"SELECT a date")
->                 ,(dateIden,"SELECT a date")
->                 ]
->     passTests = [(ansi2011,"SELECT a b")
->                 ,(noDateKeyword,"SELECT DATE('2000-01-01')")
->                 ,(noDateKeyword,"SELECT DATE")
->                 ,(dateApp,"SELECT DATE('2000-01-01')")
->                 ,(dateIden,"SELECT DATE")
->                 ]
->     noDateKeyword = ansi2011 {diKeywords = filter (/="date") (diKeywords ansi2011)}
->     dateIden = ansi2011 {diIdentifierKeywords = "date" : diIdentifierKeywords ansi2011}
->     dateApp = ansi2011 {diAppKeywords = "date" : diAppKeywords ansi2011}
diff --git a/tools/Language/SQL/SimpleSQL/EmptyStatement.hs b/tools/Language/SQL/SimpleSQL/EmptyStatement.hs
new file mode 100644
index 0000000..60a722d
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/EmptyStatement.hs
@@ -0,0 +1,20 @@
+module Language.SQL.SimpleSQL.EmptyStatement where
+
+import Language.SQL.SimpleSQL.Syntax
+import Language.SQL.SimpleSQL.TestTypes
+
+emptyStatementTests :: TestItem
+emptyStatementTests = Group "empty statement"
+  [ TestStatement ansi2011 ";" EmptyStatement
+  , TestStatements ansi2011 ";" [EmptyStatement]
+  , TestStatements ansi2011 ";;" [EmptyStatement, EmptyStatement]
+  , TestStatements ansi2011 ";;;" [EmptyStatement, EmptyStatement, EmptyStatement]
+  , TestStatement ansi2011 "/* comment */ ;" EmptyStatement
+  , TestStatements ansi2011 "" []
+  , TestStatements ansi2011 "/* comment */" []
+  , TestStatements ansi2011 "/* comment */ ;" [EmptyStatement]
+  , TestStatements ansi2011 "/* comment */ ; /* comment */ ;"
+      [EmptyStatement, EmptyStatement]
+  , TestStatements ansi2011 "/* comment */ ; /* comment */ ; /* comment */ ;"
+      [EmptyStatement, EmptyStatement, EmptyStatement]
+  ]
diff --git a/tools/Language/SQL/SimpleSQL/EmptyStatement.lhs b/tools/Language/SQL/SimpleSQL/EmptyStatement.lhs
deleted file mode 100644
index 616174d..0000000
--- a/tools/Language/SQL/SimpleSQL/EmptyStatement.lhs
+++ /dev/null
@@ -1,20 +0,0 @@
-> module Language.SQL.SimpleSQL.EmptyStatement where
->
-> import Language.SQL.SimpleSQL.Syntax
-> import Language.SQL.SimpleSQL.TestTypes
->
-> emptyStatementTests :: TestItem
-> emptyStatementTests = Group "empty statement"
->   [ TestStatement ansi2011 ";" EmptyStatement
->   , TestStatements ansi2011 ";" [EmptyStatement]
->   , TestStatements ansi2011 ";;" [EmptyStatement, EmptyStatement]
->   , TestStatements ansi2011 ";;;" [EmptyStatement, EmptyStatement, EmptyStatement]
->   , TestStatement ansi2011 "/* comment */ ;" EmptyStatement
->   , TestStatements ansi2011 "" []
->   , TestStatements ansi2011 "/* comment */" []
->   , TestStatements ansi2011 "/* comment */ ;" [EmptyStatement]
->   , TestStatements ansi2011 "/* comment */ ; /* comment */ ;"
->       [EmptyStatement, EmptyStatement]
->   , TestStatements ansi2011 "/* comment */ ; /* comment */ ; /* comment */ ;"
->       [EmptyStatement, EmptyStatement, EmptyStatement]
->   ]
diff --git a/tools/Language/SQL/SimpleSQL/ErrorMessages.lhs b/tools/Language/SQL/SimpleSQL/ErrorMessages.hs
similarity index 55%
rename from tools/Language/SQL/SimpleSQL/ErrorMessages.lhs
rename to tools/Language/SQL/SimpleSQL/ErrorMessages.hs
index b8567ab..031c576 100644
--- a/tools/Language/SQL/SimpleSQL/ErrorMessages.lhs
+++ b/tools/Language/SQL/SimpleSQL/ErrorMessages.hs
@@ -1,4 +1,5 @@
 
+{-
 Want to work on the error messages. Ultimately, parsec won't give the
 best error message for a parser combinator library in haskell. Should
 check out the alternatives such as polyparse and uu-parsing.
@@ -51,100 +52,105 @@ review the error messages.
 
 Then, create some query expressions to focus on the non value
 expression parts.
+-}
 
 
-> module Language.SQL.SimpleSQL.ErrorMessages where
+module Language.SQL.SimpleSQL.ErrorMessages where
 
-> {-import Language.SQL.SimpleSQL.Parser
-> import Data.List
-> import Text.Groom
+{-import Language.SQL.SimpleSQL.Parser
+import Data.List
+import Text.Groom
 
-> valueExpressions :: [String]
-> valueExpressions =
->     ["10.."
->     ,"..10"
->     ,"10e1e2"
->     ,"10e--3"
->     ,"1a"
->     ,"1%"
+valueExpressions :: [String]
+valueExpressions =
+    ["10.."
+    ,"..10"
+    ,"10e1e2"
+    ,"10e--3"
+    ,"1a"
+    ,"1%"
 
->     ,"'b'ad'"
->     ,"'bad"
->     ,"bad'"
+    ,"'b'ad'"
+    ,"'bad"
+    ,"bad'"
 
->     ,"interval '5' ay"
->     ,"interval '5' day (4.4)"
->     ,"interval '5' day (a)"
->     ,"intervala '5' day"
->     ,"interval 'x' day (3"
->     ,"interval 'x' day 3)"
+    ,"interval '5' ay"
+    ,"interval '5' day (4.4)"
+    ,"interval '5' day (a)"
+    ,"intervala '5' day"
+    ,"interval 'x' day (3"
+    ,"interval 'x' day 3)"
 
->     ,"1badiden"
->     ,"$"
->     ,"!"
->     ,"*.a"
+    ,"1badiden"
+    ,"$"
+    ,"!"
+    ,"*.a"
 
->     ,"??"
->     ,"3?"
->     ,"?a"
+    ,"??"
+    ,"3?"
+    ,"?a"
 
->     ,"row"
->     ,"row 1,2"
->     ,"row(1,2"
->     ,"row 1,2)"
->     ,"row(1 2)"
+    ,"row"
+    ,"row 1,2"
+    ,"row(1,2"
+    ,"row 1,2)"
+    ,"row(1 2)"
 
->     ,"f("
->     ,"f)"
+    ,"f("
+    ,"f)"
 
->     ,"f(a"
->     ,"f a)"
->     ,"f(a b)"
+    ,"f(a"
+    ,"f a)"
+    ,"f(a b)"
 
+{-
 TODO:
 case
 operators
+-}
 
->    ,"a + (b + c"
+   ,"a + (b + c"
 
+{-
 casts
 subqueries: + whole set of parentheses use
 in list
 'keyword' functions
 aggregates
 window functions
+-}
 
 
->    ]
+   ]
 
-> queryExpressions :: [String]
-> queryExpressions =
->     map sl1 valueExpressions
->     ++ map sl2 valueExpressions
->     ++ map sl3 valueExpressions
->     ++
->     ["select a from t inner jin u"]
->   where
->     sl1 x = "select " ++ x ++ " from t"
->     sl2 x = "select " ++ x ++ ", y from t"
->     sl3 x = "select " ++ x ++ " fom t"
+queryExpressions :: [String]
+queryExpressions =
+    map sl1 valueExpressions
+    ++ map sl2 valueExpressions
+    ++ map sl3 valueExpressions
+    ++
+    ["select a from t inner jin u"]
+  where
+    sl1 x = "select " ++ x ++ " from t"
+    sl2 x = "select " ++ x ++ ", y from t"
+    sl3 x = "select " ++ x ++ " fom t"
 
-> valExprs :: [String] -> [(String,String)]
-> valExprs = map parseOne
->   where
->     parseOne x = let p = parseValueExpr "" Nothing x
->                  in (x,either peFormattedError (\x -> "ERROR: parsed ok " ++ groom x) p)
+valExprs :: [String] -> [(String,String)]
+valExprs = map parseOne
+  where
+    parseOne x = let p = parseValueExpr "" Nothing x
+                 in (x,either peFormattedError (\x -> "ERROR: parsed ok " ++ groom x) p)
 
 
-> queryExprs :: [String] -> [(String,String)]
-> queryExprs = map parseOne
->   where
->     parseOne x = let p = parseQueryExpr "" Nothing x
->                  in (x,either peFormattedError (\x -> "ERROR: parsed ok " ++ groom x) p)
+queryExprs :: [String] -> [(String,String)]
+queryExprs = map parseOne
+  where
+    parseOne x = let p = parseQueryExpr "" Nothing x
+                 in (x,either peFormattedError (\x -> "ERROR: parsed ok " ++ groom x) p)
 
 
-> pExprs :: [String] -> [String] -> String
-> pExprs x y =
->     let l = valExprs x ++ queryExprs y
->     in intercalate "\n\n\n\n" $ map (\(a,b) -> a ++ "\n" ++ b) l
-> -}
+pExprs :: [String] -> [String] -> String
+pExprs x y =
+    let l = valExprs x ++ queryExprs y
+    in intercalate "\n\n\n\n" $ map (\(a,b) -> a ++ "\n" ++ b) l
+-}
diff --git a/tools/Language/SQL/SimpleSQL/FullQueries.hs b/tools/Language/SQL/SimpleSQL/FullQueries.hs
new file mode 100644
index 0000000..9a590ee
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/FullQueries.hs
@@ -0,0 +1,39 @@
+
+-- Some tests for parsing full queries.
+
+module Language.SQL.SimpleSQL.FullQueries (fullQueriesTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+
+fullQueriesTests :: TestItem
+fullQueriesTests = Group "queries" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select count(*) from t"
+     ,makeSelect
+      {qeSelectList = [(App [Name Nothing "count"] [Star], Nothing)]
+      ,qeFrom = [TRSimple [Name Nothing "t"]]
+      }
+     )
+
+    ,("select a, sum(c+d) as s\n\
+      \  from t,u\n\
+      \  where a > 5\n\
+      \  group by a\n\
+      \  having count(1) > 5\n\
+      \  order by s"
+     ,makeSelect
+      {qeSelectList = [(Iden [Name Nothing "a"], Nothing)
+                      ,(App [Name Nothing "sum"]
+                        [BinOp (Iden [Name Nothing "c"])
+                               [Name Nothing "+"] (Iden [Name Nothing "d"])]
+                       ,Just $ Name Nothing "s")]
+      ,qeFrom = [TRSimple [Name Nothing "t"], TRSimple [Name Nothing "u"]]
+      ,qeWhere = Just $ BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (NumLit "5")
+      ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]]
+      ,qeHaving = Just $ BinOp (App [Name Nothing "count"] [NumLit "1"])
+                               [Name Nothing ">"] (NumLit "5")
+      ,qeOrderBy = [SortSpec (Iden [Name Nothing "s"]) DirDefault NullsOrderDefault]
+      }
+     )
+    ]
diff --git a/tools/Language/SQL/SimpleSQL/FullQueries.lhs b/tools/Language/SQL/SimpleSQL/FullQueries.lhs
deleted file mode 100644
index a7a3052..0000000
--- a/tools/Language/SQL/SimpleSQL/FullQueries.lhs
+++ /dev/null
@@ -1,39 +0,0 @@
-
-Some tests for parsing full queries.
-
-> module Language.SQL.SimpleSQL.FullQueries (fullQueriesTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-
-> fullQueriesTests :: TestItem
-> fullQueriesTests = Group "queries" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select count(*) from t"
->      ,makeSelect
->       {qeSelectList = [(App [Name Nothing "count"] [Star], Nothing)]
->       ,qeFrom = [TRSimple [Name Nothing "t"]]
->       }
->      )
-
->     ,("select a, sum(c+d) as s\n\
->       \  from t,u\n\
->       \  where a > 5\n\
->       \  group by a\n\
->       \  having count(1) > 5\n\
->       \  order by s"
->      ,makeSelect
->       {qeSelectList = [(Iden [Name Nothing "a"], Nothing)
->                       ,(App [Name Nothing "sum"]
->                         [BinOp (Iden [Name Nothing "c"])
->                                [Name Nothing "+"] (Iden [Name Nothing "d"])]
->                        ,Just $ Name Nothing "s")]
->       ,qeFrom = [TRSimple [Name Nothing "t"], TRSimple [Name Nothing "u"]]
->       ,qeWhere = Just $ BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (NumLit "5")
->       ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]]
->       ,qeHaving = Just $ BinOp (App [Name Nothing "count"] [NumLit "1"])
->                                [Name Nothing ">"] (NumLit "5")
->       ,qeOrderBy = [SortSpec (Iden [Name Nothing "s"]) DirDefault NullsOrderDefault]
->       }
->      )
->     ]
diff --git a/tools/Language/SQL/SimpleSQL/GroupBy.hs b/tools/Language/SQL/SimpleSQL/GroupBy.hs
new file mode 100644
index 0000000..72d897c
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/GroupBy.hs
@@ -0,0 +1,237 @@
+
+-- Here are the tests for the group by component of query exprs
+
+module Language.SQL.SimpleSQL.GroupBy (groupByTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+
+groupByTests :: TestItem
+groupByTests = Group "groupByTests"
+    [simpleGroupBy
+    ,newGroupBy
+    ,randomGroupBy
+    ]
+
+simpleGroupBy :: TestItem
+simpleGroupBy = Group "simpleGroupBy" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a,sum(b) from t group by a"
+     ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)
+                                 ,(App [Name Nothing "sum"] [Iden [Name Nothing "b"]],Nothing)]
+                 ,qeFrom = [TRSimple [Name Nothing "t"]]
+                 ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]]
+                 })
+
+    ,("select a,b,sum(c) from t group by a,b"
+     ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)
+                                 ,(Iden [Name Nothing "b"],Nothing)
+                                 ,(App [Name Nothing "sum"] [Iden [Name Nothing "c"]],Nothing)]
+                 ,qeFrom = [TRSimple [Name Nothing "t"]]
+                 ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]
+                              ,SimpleGroup $ Iden [Name Nothing "b"]]
+                 })
+    ]
+
+{-
+test the new group by (), grouping sets, cube and rollup syntax (not
+sure which sql version they were introduced, 1999 or 2003 I think).
+-}
+
+newGroupBy :: TestItem
+newGroupBy = Group "newGroupBy" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select * from t group by ()", ms [GroupingParens []])
+    ,("select * from t group by grouping sets ((), (a))"
+     ,ms [GroupingSets [GroupingParens []
+                       ,GroupingParens [SimpleGroup $ Iden [Name Nothing "a"]]]])
+    ,("select * from t group by cube(a,b)"
+     ,ms [Cube [SimpleGroup $ Iden [Name Nothing "a"], SimpleGroup $ Iden [Name Nothing "b"]]])
+    ,("select * from t group by rollup(a,b)"
+     ,ms [Rollup [SimpleGroup $ Iden [Name Nothing "a"], SimpleGroup $ Iden [Name Nothing "b"]]])
+    ]
+  where
+    ms g = makeSelect {qeSelectList = [(Star,Nothing)]
+                      ,qeFrom = [TRSimple [Name Nothing "t"]]
+                      ,qeGroupBy = g}
+
+randomGroupBy :: TestItem
+randomGroupBy = Group "randomGroupBy" $ map (ParseQueryExpr ansi2011)
+    ["select * from t GROUP BY a"
+    ,"select * from t GROUP BY GROUPING SETS((a))"
+    ,"select * from t GROUP BY a,b,c"
+    ,"select * from t GROUP BY GROUPING SETS((a,b,c))"
+    ,"select * from t GROUP BY ROLLUP(a,b)"
+    ,"select * from t GROUP BY GROUPING SETS((a,b),\n\
+     \(a),\n\
+     \() )"
+    ,"select * from t GROUP BY ROLLUP(b,a)"
+    ,"select * from t GROUP BY GROUPING SETS((b,a),\n\
+     \(b),\n\
+     \() )"
+    ,"select * from t GROUP BY CUBE(a,b,c)"
+    ,"select * from t GROUP BY GROUPING SETS((a,b,c),\n\
+     \(a,b),\n\
+     \(a,c),\n\
+     \(b,c),\n\
+     \(a),\n\
+     \(b),\n\
+     \(c),\n\
+     \() )"
+    ,"select * from t GROUP BY ROLLUP(Province, County, City)"
+    ,"select * from t GROUP BY ROLLUP(Province, (County, City))"
+    ,"select * from t GROUP BY ROLLUP(Province, (County, City))"
+    ,"select * from t GROUP BY GROUPING SETS((Province, County, City),\n\
+     \(Province),\n\
+     \() )"
+    ,"select * from t GROUP BY GROUPING SETS((Province, County, City),\n\
+     \(Province, County),\n\
+     \(Province),\n\
+     \() )"
+    ,"select * from t GROUP BY a, ROLLUP(b,c)"
+    ,"select * from t GROUP BY GROUPING SETS((a,b,c),\n\
+     \(a,b),\n\
+     \(a) )"
+    ,"select * from t GROUP BY a, b, ROLLUP(c,d)"
+    ,"select * from t GROUP BY GROUPING SETS((a,b,c,d),\n\
+     \(a,b,c),\n\
+     \(a,b) )"
+    ,"select * from t GROUP BY ROLLUP(a), ROLLUP(b,c)"
+    ,"select * from t GROUP BY GROUPING SETS((a,b,c),\n\
+     \(a,b),\n\
+     \(a),\n\
+     \(b,c),\n\
+     \(b),\n\
+     \() )"
+    ,"select * from t GROUP BY ROLLUP(a), CUBE(b,c)"
+    ,"select * from t GROUP BY GROUPING SETS((a,b,c),\n\
+     \(a,b),\n\
+     \(a,c),\n\
+     \(a),\n\
+     \(b,c),\n\
+     \(b),\n\
+     \(c),\n\
+     \() )"
+    ,"select * from t GROUP BY CUBE(a,b), ROLLUP(c,d)"
+    ,"select * from t GROUP BY GROUPING SETS((a,b,c,d),\n\
+     \(a,b,c),\n\
+     \(a,b),\n\
+     \(a,c,d),\n\
+     \(a,c),\n\
+     \(a),\n\
+     \(b,c,d),\n\
+     \(b,c),\n\
+     \(b),\n\
+     \(c,d),\n\
+     \(c),\n\
+     \() )"
+    ,"select * from t GROUP BY a, ROLLUP(a,b)"
+    ,"select * from t GROUP BY GROUPING SETS((a,b),\n\
+     \(a) )"
+    ,"select * from t GROUP BY Region,\n\
+     \ROLLUP(Sales_Person, WEEK(Sales_Date)),\n\
+     \CUBE(YEAR(Sales_Date), MONTH (Sales_Date))"
+    ,"select * from t GROUP BY ROLLUP (Region, Sales_Person, WEEK(Sales_Date),\n\
+     \YEAR(Sales_Date), MONTH(Sales_Date) )"
+
+    ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
+     \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
+     \SALES_PERSON, SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES\n\
+     \WHERE WEEK(SALES_DATE) = 13\n\
+     \GROUP BY WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE), SALES_PERSON\n\
+     \ORDER BY WEEK, DAY_WEEK, SALES_PERSON"
+
+    ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
+     \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
+     \SALES_PERSON, SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES\n\
+     \WHERE WEEK(SALES_DATE) = 13\n\
+     \GROUP BY GROUPING SETS ( (WEEK(SALES_DATE), SALES_PERSON),\n\
+     \(DAYOFWEEK(SALES_DATE), SALES_PERSON))\n\
+     \ORDER BY WEEK, DAY_WEEK, SALES_PERSON"
+
+    ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
+     \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
+     \SALES_PERSON, SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES\n\
+     \WHERE WEEK(SALES_DATE) = 13\n\
+     \GROUP BY ROLLUP ( WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE), SALES_PERSON )\n\
+     \ORDER BY WEEK, DAY_WEEK, SALES_PERSON"
+
+    ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
+     \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
+     \SALES_PERSON, SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES\n\
+     \WHERE WEEK(SALES_DATE) = 13\n\
+     \GROUP BY CUBE ( WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE), SALES_PERSON )\n\
+     \ORDER BY WEEK, DAY_WEEK, SALES_PERSON"
+
+    ,"SELECT SALES_PERSON,\n\
+     \MONTH(SALES_DATE) AS MONTH,\n\
+     \SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES\n\
+     \GROUP BY GROUPING SETS ( (SALES_PERSON, MONTH(SALES_DATE)),\n\
+     \()\n\
+     \)\n\
+     \ORDER BY SALES_PERSON, MONTH"
+
+    ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
+     \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
+     \SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES\n\
+     \GROUP BY ROLLUP ( WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE) )\n\
+     \ORDER BY WEEK, DAY_WEEK"
+
+    ,"SELECT MONTH(SALES_DATE) AS MONTH,\n\
+     \REGION,\n\
+     \SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES\n\
+     \GROUP BY ROLLUP ( MONTH(SALES_DATE), REGION )\n\
+     \ORDER BY MONTH, REGION"
+
+    ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
+     \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
+     \MONTH(SALES_DATE) AS MONTH,\n\
+     \REGION,\n\
+     \SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES\n\
+     \GROUP BY GROUPING SETS ( ROLLUP( WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE) ),\n\
+     \ROLLUP( MONTH(SALES_DATE), REGION ) )\n\
+     \ORDER BY WEEK, DAY_WEEK, MONTH, REGION"
+
+    ,"SELECT R1, R2,\n\
+     \WEEK(SALES_DATE) AS WEEK,\n\
+     \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
+     \MONTH(SALES_DATE) AS MONTH,\n\
+     \REGION, SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES,(VALUES('GROUP 1','GROUP 2')) AS X(R1,R2)\n\
+     \GROUP BY GROUPING SETS ((R1, ROLLUP(WEEK(SALES_DATE),\n\
+     \DAYOFWEEK(SALES_DATE))),\n\
+     \(R2,ROLLUP( MONTH(SALES_DATE), REGION ) ))\n\
+     \ORDER BY WEEK, DAY_WEEK, MONTH, REGION"
+
+    {-,"SELECT COALESCE(R1,R2) AS GROUP,\n\
+     \WEEK(SALES_DATE) AS WEEK,\n\
+     \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
+     \MONTH(SALES_DATE) AS MONTH,\n\
+     \REGION, SUM(SALES) AS UNITS_SOLD\n\
+     \FROM SALES,(VALUES('GROUP 1','GROUP 2')) AS X(R1,R2)\n\
+     \GROUP BY GROUPING SETS ((R1, ROLLUP(WEEK(SALES_DATE),\n\
+     \DAYOFWEEK(SALES_DATE))),\n\
+     \(R2,ROLLUP( MONTH(SALES_DATE), REGION ) ))\n\
+     \ORDER BY GROUP, WEEK, DAY_WEEK, MONTH, REGION"-}
+     -- as group - needs more subtle keyword blacklisting
+
+    -- decimal as a function not allowed due to the reserved keyword
+    -- handling: todo, review if this is ansi standard function or
+    -- if there are places where reserved keywords can still be used
+    ,"SELECT MONTH(SALES_DATE) AS MONTH,\n\
+     \REGION,\n\
+     \SUM(SALES) AS UNITS_SOLD,\n\
+     \MAX(SALES) AS BEST_SALE,\n\
+     \CAST(ROUND(AVG(DECIMALx(SALES)),2) AS DECIMAL(5,2)) AS AVG_UNITS_SOLD\n\
+     \FROM SALES\n\
+     \GROUP BY CUBE(MONTH(SALES_DATE),REGION)\n\
+     \ORDER BY MONTH, REGION"
+
+    ]
diff --git a/tools/Language/SQL/SimpleSQL/GroupBy.lhs b/tools/Language/SQL/SimpleSQL/GroupBy.lhs
deleted file mode 100644
index 6edc6c7..0000000
--- a/tools/Language/SQL/SimpleSQL/GroupBy.lhs
+++ /dev/null
@@ -1,235 +0,0 @@
-
-Here are the tests for the group by component of query exprs
-
-> module Language.SQL.SimpleSQL.GroupBy (groupByTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-
-> groupByTests :: TestItem
-> groupByTests = Group "groupByTests"
->     [simpleGroupBy
->     ,newGroupBy
->     ,randomGroupBy
->     ]
-
-> simpleGroupBy :: TestItem
-> simpleGroupBy = Group "simpleGroupBy" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a,sum(b) from t group by a"
->      ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)
->                                  ,(App [Name Nothing "sum"] [Iden [Name Nothing "b"]],Nothing)]
->                  ,qeFrom = [TRSimple [Name Nothing "t"]]
->                  ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]]
->                  })
-
->     ,("select a,b,sum(c) from t group by a,b"
->      ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)
->                                  ,(Iden [Name Nothing "b"],Nothing)
->                                  ,(App [Name Nothing "sum"] [Iden [Name Nothing "c"]],Nothing)]
->                  ,qeFrom = [TRSimple [Name Nothing "t"]]
->                  ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]
->                               ,SimpleGroup $ Iden [Name Nothing "b"]]
->                  })
->     ]
-
-test the new group by (), grouping sets, cube and rollup syntax (not
-sure which sql version they were introduced, 1999 or 2003 I think).
-
-> newGroupBy :: TestItem
-> newGroupBy = Group "newGroupBy" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select * from t group by ()", ms [GroupingParens []])
->     ,("select * from t group by grouping sets ((), (a))"
->      ,ms [GroupingSets [GroupingParens []
->                        ,GroupingParens [SimpleGroup $ Iden [Name Nothing "a"]]]])
->     ,("select * from t group by cube(a,b)"
->      ,ms [Cube [SimpleGroup $ Iden [Name Nothing "a"], SimpleGroup $ Iden [Name Nothing "b"]]])
->     ,("select * from t group by rollup(a,b)"
->      ,ms [Rollup [SimpleGroup $ Iden [Name Nothing "a"], SimpleGroup $ Iden [Name Nothing "b"]]])
->     ]
->   where
->     ms g = makeSelect {qeSelectList = [(Star,Nothing)]
->                       ,qeFrom = [TRSimple [Name Nothing "t"]]
->                       ,qeGroupBy = g}
-
-> randomGroupBy :: TestItem
-> randomGroupBy = Group "randomGroupBy" $ map (ParseQueryExpr ansi2011)
->     ["select * from t GROUP BY a"
->     ,"select * from t GROUP BY GROUPING SETS((a))"
->     ,"select * from t GROUP BY a,b,c"
->     ,"select * from t GROUP BY GROUPING SETS((a,b,c))"
->     ,"select * from t GROUP BY ROLLUP(a,b)"
->     ,"select * from t GROUP BY GROUPING SETS((a,b),\n\
->      \(a),\n\
->      \() )"
->     ,"select * from t GROUP BY ROLLUP(b,a)"
->     ,"select * from t GROUP BY GROUPING SETS((b,a),\n\
->      \(b),\n\
->      \() )"
->     ,"select * from t GROUP BY CUBE(a,b,c)"
->     ,"select * from t GROUP BY GROUPING SETS((a,b,c),\n\
->      \(a,b),\n\
->      \(a,c),\n\
->      \(b,c),\n\
->      \(a),\n\
->      \(b),\n\
->      \(c),\n\
->      \() )"
->     ,"select * from t GROUP BY ROLLUP(Province, County, City)"
->     ,"select * from t GROUP BY ROLLUP(Province, (County, City))"
->     ,"select * from t GROUP BY ROLLUP(Province, (County, City))"
->     ,"select * from t GROUP BY GROUPING SETS((Province, County, City),\n\
->      \(Province),\n\
->      \() )"
->     ,"select * from t GROUP BY GROUPING SETS((Province, County, City),\n\
->      \(Province, County),\n\
->      \(Province),\n\
->      \() )"
->     ,"select * from t GROUP BY a, ROLLUP(b,c)"
->     ,"select * from t GROUP BY GROUPING SETS((a,b,c),\n\
->      \(a,b),\n\
->      \(a) )"
->     ,"select * from t GROUP BY a, b, ROLLUP(c,d)"
->     ,"select * from t GROUP BY GROUPING SETS((a,b,c,d),\n\
->      \(a,b,c),\n\
->      \(a,b) )"
->     ,"select * from t GROUP BY ROLLUP(a), ROLLUP(b,c)"
->     ,"select * from t GROUP BY GROUPING SETS((a,b,c),\n\
->      \(a,b),\n\
->      \(a),\n\
->      \(b,c),\n\
->      \(b),\n\
->      \() )"
->     ,"select * from t GROUP BY ROLLUP(a), CUBE(b,c)"
->     ,"select * from t GROUP BY GROUPING SETS((a,b,c),\n\
->      \(a,b),\n\
->      \(a,c),\n\
->      \(a),\n\
->      \(b,c),\n\
->      \(b),\n\
->      \(c),\n\
->      \() )"
->     ,"select * from t GROUP BY CUBE(a,b), ROLLUP(c,d)"
->     ,"select * from t GROUP BY GROUPING SETS((a,b,c,d),\n\
->      \(a,b,c),\n\
->      \(a,b),\n\
->      \(a,c,d),\n\
->      \(a,c),\n\
->      \(a),\n\
->      \(b,c,d),\n\
->      \(b,c),\n\
->      \(b),\n\
->      \(c,d),\n\
->      \(c),\n\
->      \() )"
->     ,"select * from t GROUP BY a, ROLLUP(a,b)"
->     ,"select * from t GROUP BY GROUPING SETS((a,b),\n\
->      \(a) )"
->     ,"select * from t GROUP BY Region,\n\
->      \ROLLUP(Sales_Person, WEEK(Sales_Date)),\n\
->      \CUBE(YEAR(Sales_Date), MONTH (Sales_Date))"
->     ,"select * from t GROUP BY ROLLUP (Region, Sales_Person, WEEK(Sales_Date),\n\
->      \YEAR(Sales_Date), MONTH(Sales_Date) )"
-
->     ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
->      \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
->      \SALES_PERSON, SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES\n\
->      \WHERE WEEK(SALES_DATE) = 13\n\
->      \GROUP BY WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE), SALES_PERSON\n\
->      \ORDER BY WEEK, DAY_WEEK, SALES_PERSON"
-
->     ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
->      \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
->      \SALES_PERSON, SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES\n\
->      \WHERE WEEK(SALES_DATE) = 13\n\
->      \GROUP BY GROUPING SETS ( (WEEK(SALES_DATE), SALES_PERSON),\n\
->      \(DAYOFWEEK(SALES_DATE), SALES_PERSON))\n\
->      \ORDER BY WEEK, DAY_WEEK, SALES_PERSON"
-
->     ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
->      \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
->      \SALES_PERSON, SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES\n\
->      \WHERE WEEK(SALES_DATE) = 13\n\
->      \GROUP BY ROLLUP ( WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE), SALES_PERSON )\n\
->      \ORDER BY WEEK, DAY_WEEK, SALES_PERSON"
-
->     ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
->      \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
->      \SALES_PERSON, SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES\n\
->      \WHERE WEEK(SALES_DATE) = 13\n\
->      \GROUP BY CUBE ( WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE), SALES_PERSON )\n\
->      \ORDER BY WEEK, DAY_WEEK, SALES_PERSON"
-
->     ,"SELECT SALES_PERSON,\n\
->      \MONTH(SALES_DATE) AS MONTH,\n\
->      \SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES\n\
->      \GROUP BY GROUPING SETS ( (SALES_PERSON, MONTH(SALES_DATE)),\n\
->      \()\n\
->      \)\n\
->      \ORDER BY SALES_PERSON, MONTH"
-
->     ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
->      \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
->      \SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES\n\
->      \GROUP BY ROLLUP ( WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE) )\n\
->      \ORDER BY WEEK, DAY_WEEK"
-
->     ,"SELECT MONTH(SALES_DATE) AS MONTH,\n\
->      \REGION,\n\
->      \SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES\n\
->      \GROUP BY ROLLUP ( MONTH(SALES_DATE), REGION )\n\
->      \ORDER BY MONTH, REGION"
-
->     ,"SELECT WEEK(SALES_DATE) AS WEEK,\n\
->      \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
->      \MONTH(SALES_DATE) AS MONTH,\n\
->      \REGION,\n\
->      \SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES\n\
->      \GROUP BY GROUPING SETS ( ROLLUP( WEEK(SALES_DATE), DAYOFWEEK(SALES_DATE) ),\n\
->      \ROLLUP( MONTH(SALES_DATE), REGION ) )\n\
->      \ORDER BY WEEK, DAY_WEEK, MONTH, REGION"
-
->     ,"SELECT R1, R2,\n\
->      \WEEK(SALES_DATE) AS WEEK,\n\
->      \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
->      \MONTH(SALES_DATE) AS MONTH,\n\
->      \REGION, SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES,(VALUES('GROUP 1','GROUP 2')) AS X(R1,R2)\n\
->      \GROUP BY GROUPING SETS ((R1, ROLLUP(WEEK(SALES_DATE),\n\
->      \DAYOFWEEK(SALES_DATE))),\n\
->      \(R2,ROLLUP( MONTH(SALES_DATE), REGION ) ))\n\
->      \ORDER BY WEEK, DAY_WEEK, MONTH, REGION"
-
->     {-,"SELECT COALESCE(R1,R2) AS GROUP,\n\
->      \WEEK(SALES_DATE) AS WEEK,\n\
->      \DAYOFWEEK(SALES_DATE) AS DAY_WEEK,\n\
->      \MONTH(SALES_DATE) AS MONTH,\n\
->      \REGION, SUM(SALES) AS UNITS_SOLD\n\
->      \FROM SALES,(VALUES('GROUP 1','GROUP 2')) AS X(R1,R2)\n\
->      \GROUP BY GROUPING SETS ((R1, ROLLUP(WEEK(SALES_DATE),\n\
->      \DAYOFWEEK(SALES_DATE))),\n\
->      \(R2,ROLLUP( MONTH(SALES_DATE), REGION ) ))\n\
->      \ORDER BY GROUP, WEEK, DAY_WEEK, MONTH, REGION"-}
->      -- as group - needs more subtle keyword blacklisting
-
->     -- decimal as a function not allowed due to the reserved keyword
->     -- handling: todo, review if this is ansi standard function or
->     -- if there are places where reserved keywords can still be used
->     ,"SELECT MONTH(SALES_DATE) AS MONTH,\n\
->      \REGION,\n\
->      \SUM(SALES) AS UNITS_SOLD,\n\
->      \MAX(SALES) AS BEST_SALE,\n\
->      \CAST(ROUND(AVG(DECIMALx(SALES)),2) AS DECIMAL(5,2)) AS AVG_UNITS_SOLD\n\
->      \FROM SALES\n\
->      \GROUP BY CUBE(MONTH(SALES_DATE),REGION)\n\
->      \ORDER BY MONTH, REGION"
-
->     ]
diff --git a/tools/Language/SQL/SimpleSQL/LexerTests.hs b/tools/Language/SQL/SimpleSQL/LexerTests.hs
new file mode 100644
index 0000000..4d7fb49
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/LexerTests.hs
@@ -0,0 +1,343 @@
+
+
+-- Test for the lexer
+
+module Language.SQL.SimpleSQL.LexerTests (lexerTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Lex (Token(..),tokenListWillPrintAndLex)
+--import Debug.Trace
+--import Data.Char (isAlpha)
+import Data.List
+
+lexerTests :: TestItem
+lexerTests = Group "lexerTests" $
+    [Group "lexer token tests" [ansiLexerTests
+                               ,postgresLexerTests
+                               ,sqlServerLexerTests
+                               ,oracleLexerTests
+                               ,mySqlLexerTests
+                               ,odbcLexerTests]]
+
+ansiLexerTable :: [(String,[Token])]
+ansiLexerTable =
+    -- single char symbols
+    map (\s -> ([s],[Symbol [s]])) "+-^*/%~&|?<>[]=,;()"
+    -- multi char symbols
+    ++ map (\s -> (s,[Symbol s])) [">=","<=","!=","<>","||"]
+    ++ (let idens = ["a", "_a", "test", "table", "Stuff", "STUFF"]
+        -- simple identifiers
+        in map (\i -> (i, [Identifier Nothing i])) idens
+           ++ map (\i -> ("\"" ++ i ++ "\"", [Identifier (Just ("\"","\"")) i])) idens
+           -- todo: in order to make lex . pretty id, need to
+           -- preserve the case of the u
+           ++ map (\i -> ("u&\"" ++ i ++ "\"", [Identifier (Just ("u&\"","\"")) i])) idens
+           -- host param
+           ++ map (\i -> (':':i, [PrefixedVariable ':' i])) idens
+       )
+    -- quoted identifiers with embedded double quotes
+    -- the lexer doesn't unescape the quotes
+    ++ [("\"normal \"\" iden\"", [Identifier (Just ("\"","\"")) "normal \"\" iden"])]
+    -- strings
+    -- the lexer doesn't apply escapes at all
+    ++ [("'string'", [SqlString "'" "'" "string"])
+       ,("'normal '' quote'", [SqlString "'" "'" "normal '' quote"])
+       ,("'normalendquote '''", [SqlString "'" "'" "normalendquote ''"])
+       ,("'\n'", [SqlString "'" "'" "\n"])]
+    -- csstrings
+    ++ map (\c -> (c ++ "'test'", [SqlString (c ++ "'") "'" "test"]))
+       ["n", "N","b", "B","x", "X", "u&"]
+    -- numbers
+    ++ [("10", [SqlNumber "10"])
+       ,(".1", [SqlNumber ".1"])
+       ,("5e3", [SqlNumber "5e3"])
+       ,("5e+3", [SqlNumber "5e+3"])
+       ,("5e-3", [SqlNumber "5e-3"])
+       ,("10.2", [SqlNumber "10.2"])
+       ,("10.2e7", [SqlNumber "10.2e7"])]
+    -- whitespace
+    ++ concat [[([a],[Whitespace [a]])
+               ,([a,b], [Whitespace [a,b]])]
+              | a <- " \n\t", b <- " \n\t"]
+    -- line comment
+    ++ map (\c -> (c, [LineComment c]))
+       ["--", "-- ", "-- this is a comment", "-- line com\n"]
+    -- block comment
+    ++ map (\c -> (c, [BlockComment c]))
+       ["/**/", "/* */","/* this is a comment */"
+       ,"/* this *is/ a comment */"
+       ]
+
+ansiLexerTests :: TestItem
+ansiLexerTests = Group "ansiLexerTests" $
+    [Group "ansi lexer token tests" $ [LexTest ansi2011 s t |  (s,t) <- ansiLexerTable]
+    ,Group "ansi generated combination lexer tests" $
+    [ LexTest ansi2011 (s ++ s1) (t ++ t1)
+    | (s,t) <- ansiLexerTable
+    , (s1,t1) <- ansiLexerTable
+    , tokenListWillPrintAndLex ansi2011 $ t ++ t1
+
+    ]
+    ,Group "ansiadhoclexertests" $
+       map (uncurry $ LexTest ansi2011)
+       [("", [])
+       ,("-- line com\nstuff", [LineComment "-- line com\n",Identifier Nothing "stuff"])
+       ] ++
+       [-- want to make sure this gives a parse error
+        LexFails ansi2011 "*/"
+        -- combinations of pipes: make sure they fail because they could be
+        -- ambiguous and it is really unclear when they are or not, and
+        -- what the result is even when they are not ambiguous
+       ,LexFails ansi2011 "|||"
+       ,LexFails ansi2011 "||||"
+       ,LexFails ansi2011 "|||||"
+       -- another user experience thing: make sure extra trailing
+       -- number chars are rejected rather than attempting to parse
+       -- if the user means to write something that is rejected by this code,
+       -- then they can use whitespace to make it clear and then it will parse
+       ,LexFails ansi2011 "12e3e4"
+       ,LexFails ansi2011 "12e3e4"
+       ,LexFails ansi2011 "12e3e4"
+       ,LexFails ansi2011 "12e3.4"
+       ,LexFails ansi2011 "12.4.5"
+       ,LexFails ansi2011 "12.4e5.6"
+       ,LexFails ansi2011 "12.4e5e7"]
+     ]
+
+{-
+todo: lexing tests
+do quickcheck testing:
+can try to generate valid tokens then check they parse
+
+same as above: can also try to pair tokens, create an accurate
+  function to say which ones can appear adjacent, and test
+
+I think this plus the explicit lists of tokens like above which do
+basic sanity + explicit edge casts will provide a high level of
+assurance.
+-}
+
+
+
+postgresLexerTable :: [(String,[Token])]
+postgresLexerTable =
+    -- single char symbols
+    map (\s -> ([s],[Symbol [s]])) "+-^*/%~&|?<>[]=,;():"
+    -- multi char symbols
+    ++ map (\s -> (s,[Symbol s])) [">=","<=","!=","<>","||", "::","..",":="]
+    -- generic symbols
+
+    ++ (let idens = ["a", "_a", "test", "table", "Stuff", "STUFF"]
+        -- simple identifiers
+        in map (\i -> (i, [Identifier Nothing i])) idens
+           ++ map (\i -> ("\"" ++ i ++ "\"", [Identifier (Just ("\"","\"")) i])) idens
+           -- todo: in order to make lex . pretty id, need to
+           -- preserve the case of the u
+           ++ map (\i -> ("u&\"" ++ i ++ "\"", [Identifier (Just ("u&\"","\"")) i])) idens
+           -- host param
+           ++ map (\i -> (':':i, [PrefixedVariable ':' i])) idens
+       )
+    -- positional var
+    ++ [("$1", [PositionalArg 1])]
+    -- quoted identifiers with embedded double quotes
+    ++ [("\"normal \"\" iden\"", [Identifier (Just ("\"","\"")) "normal \"\" iden"])]
+    -- strings
+    ++ [("'string'", [SqlString "'" "'" "string"])
+       ,("'normal '' quote'", [SqlString "'" "'" "normal '' quote"])
+       ,("'normalendquote '''", [SqlString "'" "'" "normalendquote ''"])
+       ,("'\n'", [SqlString "'" "'" "\n"])
+       ,("E'\n'", [SqlString "E'" "'" "\n"])
+       ,("e'this '' quote'", [SqlString "e'" "'" "this '' quote"])
+       ,("e'this \\' quote'", [SqlString "e'" "'" "this \\' quote"])
+       ,("'not this \\' quote", [SqlString "'" "'" "not this \\"
+                                ,Whitespace " "
+                                ,Identifier Nothing "quote"])
+       ,("$$ string 1 $$", [SqlString "$$" "$$" " string 1 "])
+       ,("$$ string $ 2 $$", [SqlString "$$" "$$" " string $ 2 "])
+       ,("$a$ $$string 3$$ $a$", [SqlString "$a$" "$a$" " $$string 3$$ "])
+       ]
+    -- csstrings
+    ++ map (\c -> (c ++ "'test'", [SqlString (c ++ "'") "'" "test"]))
+       ["n", "N","b", "B","x", "X", "u&", "e", "E"]
+    -- numbers
+    ++ [("10", [SqlNumber "10"])
+       ,(".1", [SqlNumber ".1"])
+       ,("5e3", [SqlNumber "5e3"])
+       ,("5e+3", [SqlNumber "5e+3"])
+       ,("5e-3", [SqlNumber "5e-3"])
+       ,("10.2", [SqlNumber "10.2"])
+       ,("10.2e7", [SqlNumber "10.2e7"])]
+    -- whitespace
+    ++ concat [[([a],[Whitespace [a]])
+               ,([a,b], [Whitespace [a,b]])]
+              | a <- " \n\t", b <- " \n\t"]
+    -- line comment
+    ++ map (\c -> (c, [LineComment c]))
+       ["--", "-- ", "-- this is a comment", "-- line com\n"]
+    -- block comment
+    ++ map (\c -> (c, [BlockComment c]))
+       ["/**/", "/* */","/* this is a comment */"
+       ,"/* this *is/ a comment */"
+       ]
+
+{-
+An operator name is a sequence of up to NAMEDATALEN-1 (63 by default) characters from the following list:
+
++ - * / < > = ~ ! @ # % ^ & | ` ?
+
+There are a few restrictions on operator names, however:
+-- and /* cannot appear anywhere in an operator name, since they will be taken as the start of a comment.
+
+A multiple-character operator name cannot end in + or -, unless the name also contains at least one of these characters:
+
+~ ! @ # % ^ & | ` ?
+
+todo: 'negative' tests
+symbol then --
+symbol then /*
+operators without one of the exception chars
+  followed by + or - without whitespace
+
+also: do the testing for the ansi compatibility special cases
+-}
+
+postgresShortOperatorTable :: [(String,[Token])]
+postgresShortOperatorTable =
+    [ (x, [Symbol x]) | x <- someValidPostgresOperators 2]
+
+
+postgresExtraOperatorTable :: [(String,[Token])]
+postgresExtraOperatorTable =
+    [ (x, [Symbol x]) | x <- someValidPostgresOperators 4]
+
+
+someValidPostgresOperators :: Int -> [String]
+someValidPostgresOperators l =
+       [ x
+       | n <- [1..l]
+       , x <- combos "+-*/<>=~!@#%^&|`?" n
+       , not ("--" `isInfixOf` x || "/*" `isInfixOf` x || "*/" `isInfixOf` x)
+       , not (last x `elem` "+-")
+         || or (map (`elem` x) "~!@#%^&|`?")
+       ]
+
+{-
+These are postgres operators, which if followed immediately by a + or
+-, will lex as separate operators rather than one operator including
+the + or -.
+-}
+
+somePostgresOpsWhichWontAddTrailingPlusMinus :: Int -> [String]
+somePostgresOpsWhichWontAddTrailingPlusMinus l =
+       [ x
+       | n <- [1..l]
+       , x <- combos "+-*/<>=" n
+       , not ("--" `isInfixOf` x || "/*" `isInfixOf` x || "*/" `isInfixOf` x)
+       , not (last x `elem` "+-")
+       ]
+
+
+postgresLexerTests :: TestItem
+postgresLexerTests = Group "postgresLexerTests" $
+    [Group "postgres lexer token tests" $
+     [LexTest postgres s t | (s,t) <- postgresLexerTable]
+    ,Group "postgres generated lexer token tests" $
+     [LexTest postgres s t | (s,t) <- postgresShortOperatorTable ++ postgresExtraOperatorTable]
+    ,Group "postgres generated combination lexer tests" $
+    [ LexTest postgres (s ++ s1) (t ++ t1)
+    | (s,t) <- postgresLexerTable ++ postgresShortOperatorTable
+    , (s1,t1) <- postgresLexerTable ++ postgresShortOperatorTable
+    , tokenListWillPrintAndLex postgres $ t ++ t1
+
+    ]
+    ,Group "generated postgres edgecase lexertests" $
+     [LexTest postgres s t
+     | (s,t) <- edgeCaseCommentOps
+                ++ edgeCasePlusMinusOps
+                ++ edgeCasePlusMinusComments]
+
+    ,Group "adhoc postgres lexertests" $
+      -- need more tests for */ to make sure it is caught if it is in the middle of a
+      -- sequence of symbol letters
+        [LexFails postgres "*/"
+        ,LexFails postgres ":::"
+        ,LexFails postgres "::::"
+        ,LexFails postgres ":::::"
+        ,LexFails postgres "@*/"
+        ,LexFails postgres "-*/"
+        ,LexFails postgres "12e3e4"
+        ,LexFails postgres "12e3e4"
+        ,LexFails postgres "12e3e4"
+        ,LexFails postgres "12e3.4"
+        ,LexFails postgres "12.4.5"
+        ,LexFails postgres "12.4e5.6"
+        ,LexFails postgres "12.4e5e7"
+         -- special case allow this to lex to 1 .. 2
+         -- this is for 'for loops' in plpgsql
+        ,LexTest postgres "1..2" [SqlNumber "1", Symbol "..", SqlNumber "2"]]
+    ]
+ where
+   edgeCaseCommentOps =
+     [ (x ++ "/*<test*/", [Symbol x, BlockComment "/*<test*/"])
+     | x <- eccops
+     , not (last x == '*')
+     ] ++
+     [ (x ++ "--<test", [Symbol x, LineComment "--<test"])
+     | x <- eccops
+     , not (last x == '-')
+     ]
+   eccops = someValidPostgresOperators 2
+   edgeCasePlusMinusOps = concat
+     [ [ (x ++ "+", [Symbol x, Symbol "+"])
+       , (x ++ "-", [Symbol x, Symbol "-"]) ]
+     | x <- somePostgresOpsWhichWontAddTrailingPlusMinus 2
+     ]
+   edgeCasePlusMinusComments =
+     [("---", [LineComment "---"])
+     ,("+--", [Symbol "+", LineComment "--"])
+     ,("-/**/", [Symbol "-", BlockComment "/**/"])
+     ,("+/**/", [Symbol "+", BlockComment "/**/"])
+     ]
+
+
+sqlServerLexerTests :: TestItem
+sqlServerLexerTests = Group "sqlServerLexTests" $
+    [ LexTest sqlserver s t | (s,t) <-
+    [("@variable", [(PrefixedVariable '@' "variable")])
+    ,("#variable", [(PrefixedVariable '#' "variable")])
+    ,("[quoted identifier]", [(Identifier (Just ("[", "]")) "quoted identifier")])
+    ]]
+
+oracleLexerTests :: TestItem
+oracleLexerTests = Group "oracleLexTests" $
+    [] -- nothing oracle specific atm
+
+mySqlLexerTests :: TestItem
+mySqlLexerTests = Group "mySqlLexerTests" $
+    [ LexTest mysql s t | (s,t) <-
+    [("`quoted identifier`", [(Identifier (Just ("`", "`")) "quoted identifier")])
+    ]
+    ]
+
+odbcLexerTests :: TestItem
+odbcLexerTests = Group "odbcLexTests" $
+    [ LexTest sqlserver {diOdbc = True} s t | (s,t) <-
+    [("{}", [Symbol "{", Symbol "}"])
+    ]]
+    ++ [LexFails sqlserver {diOdbc = False} "{"
+       ,LexFails sqlserver {diOdbc = False} "}"]
+
+combos :: [a] -> Int -> [[a]]
+combos _ 0 = [[]]
+combos l n = [ x:tl | x <- l, tl <- combos l (n - 1) ]
+
+{-
+figure out a way to do quickcheck testing:
+1. generate valid tokens and check they parse
+
+2. combine two generated tokens together for the combo testing
+
+this especially will work much better for the postgresql extensible
+operator tests which doing exhaustively takes ages and doesn't bring
+much benefit over testing a few using quickcheck.
+-}
diff --git a/tools/Language/SQL/SimpleSQL/LexerTests.lhs b/tools/Language/SQL/SimpleSQL/LexerTests.lhs
deleted file mode 100644
index 14246f5..0000000
--- a/tools/Language/SQL/SimpleSQL/LexerTests.lhs
+++ /dev/null
@@ -1,335 +0,0 @@
-
-
-Test for the lexer
-
-> module Language.SQL.SimpleSQL.LexerTests (lexerTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Lex (Token(..),tokenListWillPrintAndLex)
-> --import Debug.Trace
-> --import Data.Char (isAlpha)
-> import Data.List
-
-> lexerTests :: TestItem
-> lexerTests = Group "lexerTests" $
->     [Group "lexer token tests" [ansiLexerTests
->                                ,postgresLexerTests
->                                ,sqlServerLexerTests
->                                ,oracleLexerTests
->                                ,mySqlLexerTests
->                                ,odbcLexerTests]]
-
-> ansiLexerTable :: [(String,[Token])]
-> ansiLexerTable =
->     -- single char symbols
->     map (\s -> ([s],[Symbol [s]])) "+-^*/%~&|?<>[]=,;()"
->     -- multi char symbols
->     ++ map (\s -> (s,[Symbol s])) [">=","<=","!=","<>","||"]
->     ++ (let idens = ["a", "_a", "test", "table", "Stuff", "STUFF"]
->         -- simple identifiers
->         in map (\i -> (i, [Identifier Nothing i])) idens
->            ++ map (\i -> ("\"" ++ i ++ "\"", [Identifier (Just ("\"","\"")) i])) idens
->            -- todo: in order to make lex . pretty id, need to
->            -- preserve the case of the u
->            ++ map (\i -> ("u&\"" ++ i ++ "\"", [Identifier (Just ("u&\"","\"")) i])) idens
->            -- host param
->            ++ map (\i -> (':':i, [PrefixedVariable ':' i])) idens
->        )
->     -- quoted identifiers with embedded double quotes
->     -- the lexer doesn't unescape the quotes
->     ++ [("\"normal \"\" iden\"", [Identifier (Just ("\"","\"")) "normal \"\" iden"])]
->     -- strings
->     -- the lexer doesn't apply escapes at all
->     ++ [("'string'", [SqlString "'" "'" "string"])
->        ,("'normal '' quote'", [SqlString "'" "'" "normal '' quote"])
->        ,("'normalendquote '''", [SqlString "'" "'" "normalendquote ''"])
->        ,("'\n'", [SqlString "'" "'" "\n"])]
->     -- csstrings
->     ++ map (\c -> (c ++ "'test'", [SqlString (c ++ "'") "'" "test"]))
->        ["n", "N","b", "B","x", "X", "u&"]
->     -- numbers
->     ++ [("10", [SqlNumber "10"])
->        ,(".1", [SqlNumber ".1"])
->        ,("5e3", [SqlNumber "5e3"])
->        ,("5e+3", [SqlNumber "5e+3"])
->        ,("5e-3", [SqlNumber "5e-3"])
->        ,("10.2", [SqlNumber "10.2"])
->        ,("10.2e7", [SqlNumber "10.2e7"])]
->     -- whitespace
->     ++ concat [[([a],[Whitespace [a]])
->                ,([a,b], [Whitespace [a,b]])]
->               | a <- " \n\t", b <- " \n\t"]
->     -- line comment
->     ++ map (\c -> (c, [LineComment c]))
->        ["--", "-- ", "-- this is a comment", "-- line com\n"]
->     -- block comment
->     ++ map (\c -> (c, [BlockComment c]))
->        ["/**/", "/* */","/* this is a comment */"
->        ,"/* this *is/ a comment */"
->        ]
-
-> ansiLexerTests :: TestItem
-> ansiLexerTests = Group "ansiLexerTests" $
->     [Group "ansi lexer token tests" $ [LexTest ansi2011 s t |  (s,t) <- ansiLexerTable]
->     ,Group "ansi generated combination lexer tests" $
->     [ LexTest ansi2011 (s ++ s1) (t ++ t1)
->     | (s,t) <- ansiLexerTable
->     , (s1,t1) <- ansiLexerTable
->     , tokenListWillPrintAndLex ansi2011 $ t ++ t1
-
->     ]
->     ,Group "ansiadhoclexertests" $
->        map (uncurry $ LexTest ansi2011)
->        [("", [])
->        ,("-- line com\nstuff", [LineComment "-- line com\n",Identifier Nothing "stuff"])
->        ] ++
->        [-- want to make sure this gives a parse error
->         LexFails ansi2011 "*/"
->         -- combinations of pipes: make sure they fail because they could be
->         -- ambiguous and it is really unclear when they are or not, and
->         -- what the result is even when they are not ambiguous
->        ,LexFails ansi2011 "|||"
->        ,LexFails ansi2011 "||||"
->        ,LexFails ansi2011 "|||||"
->        -- another user experience thing: make sure extra trailing
->        -- number chars are rejected rather than attempting to parse
->        -- if the user means to write something that is rejected by this code,
->        -- then they can use whitespace to make it clear and then it will parse
->        ,LexFails ansi2011 "12e3e4"
->        ,LexFails ansi2011 "12e3e4"
->        ,LexFails ansi2011 "12e3e4"
->        ,LexFails ansi2011 "12e3.4"
->        ,LexFails ansi2011 "12.4.5"
->        ,LexFails ansi2011 "12.4e5.6"
->        ,LexFails ansi2011 "12.4e5e7"]
->      ]
-
-todo: lexing tests
-do quickcheck testing:
-can try to generate valid tokens then check they parse
-
-same as above: can also try to pair tokens, create an accurate
-  function to say which ones can appear adjacent, and test
-
-I think this plus the explicit lists of tokens like above which do
-basic sanity + explicit edge casts will provide a high level of
-assurance.
-
-
-
-> postgresLexerTable :: [(String,[Token])]
-> postgresLexerTable =
->     -- single char symbols
->     map (\s -> ([s],[Symbol [s]])) "+-^*/%~&|?<>[]=,;():"
->     -- multi char symbols
->     ++ map (\s -> (s,[Symbol s])) [">=","<=","!=","<>","||", "::","..",":="]
->     -- generic symbols
-
->     ++ (let idens = ["a", "_a", "test", "table", "Stuff", "STUFF"]
->         -- simple identifiers
->         in map (\i -> (i, [Identifier Nothing i])) idens
->            ++ map (\i -> ("\"" ++ i ++ "\"", [Identifier (Just ("\"","\"")) i])) idens
->            -- todo: in order to make lex . pretty id, need to
->            -- preserve the case of the u
->            ++ map (\i -> ("u&\"" ++ i ++ "\"", [Identifier (Just ("u&\"","\"")) i])) idens
->            -- host param
->            ++ map (\i -> (':':i, [PrefixedVariable ':' i])) idens
->        )
->     -- positional var
->     ++ [("$1", [PositionalArg 1])]
->     -- quoted identifiers with embedded double quotes
->     ++ [("\"normal \"\" iden\"", [Identifier (Just ("\"","\"")) "normal \"\" iden"])]
->     -- strings
->     ++ [("'string'", [SqlString "'" "'" "string"])
->        ,("'normal '' quote'", [SqlString "'" "'" "normal '' quote"])
->        ,("'normalendquote '''", [SqlString "'" "'" "normalendquote ''"])
->        ,("'\n'", [SqlString "'" "'" "\n"])
->        ,("E'\n'", [SqlString "E'" "'" "\n"])
->        ,("e'this '' quote'", [SqlString "e'" "'" "this '' quote"])
->        ,("e'this \\' quote'", [SqlString "e'" "'" "this \\' quote"])
->        ,("'not this \\' quote", [SqlString "'" "'" "not this \\"
->                                 ,Whitespace " "
->                                 ,Identifier Nothing "quote"])
->        ,("$$ string 1 $$", [SqlString "$$" "$$" " string 1 "])
->        ,("$$ string $ 2 $$", [SqlString "$$" "$$" " string $ 2 "])
->        ,("$a$ $$string 3$$ $a$", [SqlString "$a$" "$a$" " $$string 3$$ "])
->        ]
->     -- csstrings
->     ++ map (\c -> (c ++ "'test'", [SqlString (c ++ "'") "'" "test"]))
->        ["n", "N","b", "B","x", "X", "u&", "e", "E"]
->     -- numbers
->     ++ [("10", [SqlNumber "10"])
->        ,(".1", [SqlNumber ".1"])
->        ,("5e3", [SqlNumber "5e3"])
->        ,("5e+3", [SqlNumber "5e+3"])
->        ,("5e-3", [SqlNumber "5e-3"])
->        ,("10.2", [SqlNumber "10.2"])
->        ,("10.2e7", [SqlNumber "10.2e7"])]
->     -- whitespace
->     ++ concat [[([a],[Whitespace [a]])
->                ,([a,b], [Whitespace [a,b]])]
->               | a <- " \n\t", b <- " \n\t"]
->     -- line comment
->     ++ map (\c -> (c, [LineComment c]))
->        ["--", "-- ", "-- this is a comment", "-- line com\n"]
->     -- block comment
->     ++ map (\c -> (c, [BlockComment c]))
->        ["/**/", "/* */","/* this is a comment */"
->        ,"/* this *is/ a comment */"
->        ]
-
-An operator name is a sequence of up to NAMEDATALEN-1 (63 by default) characters from the following list:
-
-+ - * / < > = ~ ! @ # % ^ & | ` ?
-
-There are a few restrictions on operator names, however:
--- and /* cannot appear anywhere in an operator name, since they will be taken as the start of a comment.
-
-A multiple-character operator name cannot end in + or -, unless the name also contains at least one of these characters:
-
-~ ! @ # % ^ & | ` ?
-
-todo: 'negative' tests
-symbol then --
-symbol then /*
-operators without one of the exception chars
-  followed by + or - without whitespace
-
-also: do the testing for the ansi compatibility special cases
-
-> postgresShortOperatorTable :: [(String,[Token])]
-> postgresShortOperatorTable =
->     [ (x, [Symbol x]) | x <- someValidPostgresOperators 2]
-
-
-> postgresExtraOperatorTable :: [(String,[Token])]
-> postgresExtraOperatorTable =
->     [ (x, [Symbol x]) | x <- someValidPostgresOperators 4]
-
-
-> someValidPostgresOperators :: Int -> [String]
-> someValidPostgresOperators l =
->        [ x
->        | n <- [1..l]
->        , x <- combos "+-*/<>=~!@#%^&|`?" n
->        , not ("--" `isInfixOf` x || "/*" `isInfixOf` x || "*/" `isInfixOf` x)
->        , not (last x `elem` "+-")
->          || or (map (`elem` x) "~!@#%^&|`?")
->        ]
-
-These are postgres operators, which if followed immediately by a + or
--, will lex as separate operators rather than one operator including
-the + or -.
-
-> somePostgresOpsWhichWontAddTrailingPlusMinus :: Int -> [String]
-> somePostgresOpsWhichWontAddTrailingPlusMinus l =
->        [ x
->        | n <- [1..l]
->        , x <- combos "+-*/<>=" n
->        , not ("--" `isInfixOf` x || "/*" `isInfixOf` x || "*/" `isInfixOf` x)
->        , not (last x `elem` "+-")
->        ]
-
-
-> postgresLexerTests :: TestItem
-> postgresLexerTests = Group "postgresLexerTests" $
->     [Group "postgres lexer token tests" $
->      [LexTest postgres s t | (s,t) <- postgresLexerTable]
->     ,Group "postgres generated lexer token tests" $
->      [LexTest postgres s t | (s,t) <- postgresShortOperatorTable ++ postgresExtraOperatorTable]
->     ,Group "postgres generated combination lexer tests" $
->     [ LexTest postgres (s ++ s1) (t ++ t1)
->     | (s,t) <- postgresLexerTable ++ postgresShortOperatorTable
->     , (s1,t1) <- postgresLexerTable ++ postgresShortOperatorTable
->     , tokenListWillPrintAndLex postgres $ t ++ t1
-
->     ]
->     ,Group "generated postgres edgecase lexertests" $
->      [LexTest postgres s t
->      | (s,t) <- edgeCaseCommentOps
->                 ++ edgeCasePlusMinusOps
->                 ++ edgeCasePlusMinusComments]
-
->     ,Group "adhoc postgres lexertests" $
->       -- need more tests for */ to make sure it is caught if it is in the middle of a
->       -- sequence of symbol letters
->         [LexFails postgres "*/"
->         ,LexFails postgres ":::"
->         ,LexFails postgres "::::"
->         ,LexFails postgres ":::::"
->         ,LexFails postgres "@*/"
->         ,LexFails postgres "-*/"
->         ,LexFails postgres "12e3e4"
->         ,LexFails postgres "12e3e4"
->         ,LexFails postgres "12e3e4"
->         ,LexFails postgres "12e3.4"
->         ,LexFails postgres "12.4.5"
->         ,LexFails postgres "12.4e5.6"
->         ,LexFails postgres "12.4e5e7"
->          -- special case allow this to lex to 1 .. 2
->          -- this is for 'for loops' in plpgsql
->         ,LexTest postgres "1..2" [SqlNumber "1", Symbol "..", SqlNumber "2"]]
->     ]
->  where
->    edgeCaseCommentOps =
->      [ (x ++ "/*<test*/", [Symbol x, BlockComment "/*<test*/"])
->      | x <- eccops
->      , not (last x == '*')
->      ] ++
->      [ (x ++ "--<test", [Symbol x, LineComment "--<test"])
->      | x <- eccops
->      , not (last x == '-')
->      ]
->    eccops = someValidPostgresOperators 2
->    edgeCasePlusMinusOps = concat
->      [ [ (x ++ "+", [Symbol x, Symbol "+"])
->        , (x ++ "-", [Symbol x, Symbol "-"]) ]
->      | x <- somePostgresOpsWhichWontAddTrailingPlusMinus 2
->      ]
->    edgeCasePlusMinusComments =
->      [("---", [LineComment "---"])
->      ,("+--", [Symbol "+", LineComment "--"])
->      ,("-/**/", [Symbol "-", BlockComment "/**/"])
->      ,("+/**/", [Symbol "+", BlockComment "/**/"])
->      ]
-
-
-> sqlServerLexerTests :: TestItem
-> sqlServerLexerTests = Group "sqlServerLexTests" $
->     [ LexTest sqlserver s t | (s,t) <-
->     [("@variable", [(PrefixedVariable '@' "variable")])
->     ,("#variable", [(PrefixedVariable '#' "variable")])
->     ,("[quoted identifier]", [(Identifier (Just ("[", "]")) "quoted identifier")])
->     ]]
-
-> oracleLexerTests :: TestItem
-> oracleLexerTests = Group "oracleLexTests" $
->     [] -- nothing oracle specific atm
-
-> mySqlLexerTests :: TestItem
-> mySqlLexerTests = Group "mySqlLexerTests" $
->     [ LexTest mysql s t | (s,t) <-
->     [("`quoted identifier`", [(Identifier (Just ("`", "`")) "quoted identifier")])
->     ]
->     ]
-
-> odbcLexerTests :: TestItem
-> odbcLexerTests = Group "odbcLexTests" $
->     [ LexTest sqlserver {diOdbc = True} s t | (s,t) <-
->     [("{}", [Symbol "{", Symbol "}"])
->     ]]
->     ++ [LexFails sqlserver {diOdbc = False} "{"
->        ,LexFails sqlserver {diOdbc = False} "}"]
-
-> combos :: [a] -> Int -> [[a]]
-> combos _ 0 = [[]]
-> combos l n = [ x:tl | x <- l, tl <- combos l (n - 1) ]
-
-figure out a way to do quickcheck testing:
-1. generate valid tokens and check they parse
-
-2. combine two generated tokens together for the combo testing
-
-this especially will work much better for the postgresql extensible
-operator tests which doing exhaustively takes ages and doesn't bring
-much benefit over testing a few using quickcheck.
diff --git a/tools/Language/SQL/SimpleSQL/MySQL.hs b/tools/Language/SQL/SimpleSQL/MySQL.hs
new file mode 100644
index 0000000..ffaf4eb
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/MySQL.hs
@@ -0,0 +1,42 @@
+
+-- Tests for mysql dialect parsing
+
+module Language.SQL.SimpleSQL.MySQL (mySQLTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+mySQLTests :: TestItem
+mySQLTests = Group "mysql dialect"
+    [backtickQuotes
+    ,limit]
+
+{-
+backtick quotes
+
+limit syntax
+
+[LIMIT {[offset,] row_count | row_count OFFSET offset}]
+-}
+
+backtickQuotes :: TestItem
+backtickQuotes = Group "backtickQuotes" (map (uncurry (TestScalarExpr mysql))
+    [("`test`", Iden [Name (Just ("`","`")) "test"])
+    ]
+    ++ [ParseScalarExprFails ansi2011 "`test`"]
+    )
+
+limit :: TestItem
+limit = Group "queries" ( map (uncurry (TestQueryExpr mysql))
+    [("select * from t limit 5"
+     ,sel {qeFetchFirst = Just (NumLit "5")}
+     )
+    ]
+    ++ [ParseQueryExprFails mysql "select a from t fetch next 10 rows only;"
+       ,ParseQueryExprFails ansi2011 "select * from t limit 5"]
+    )
+  where
+    sel = makeSelect
+          {qeSelectList = [(Star, Nothing)]
+          ,qeFrom = [TRSimple [Name Nothing "t"]]
+          }
diff --git a/tools/Language/SQL/SimpleSQL/MySQL.lhs b/tools/Language/SQL/SimpleSQL/MySQL.lhs
deleted file mode 100644
index 6bac55d..0000000
--- a/tools/Language/SQL/SimpleSQL/MySQL.lhs
+++ /dev/null
@@ -1,40 +0,0 @@
-
-Tests for mysql dialect parsing
-
-> module Language.SQL.SimpleSQL.MySQL (mySQLTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-> mySQLTests :: TestItem
-> mySQLTests = Group "mysql dialect"
->     [backtickQuotes
->     ,limit]
-
-backtick quotes
-
-limit syntax
-
-[LIMIT {[offset,] row_count | row_count OFFSET offset}]
-
-> backtickQuotes :: TestItem
-> backtickQuotes = Group "backtickQuotes" (map (uncurry (TestScalarExpr mysql))
->     [("`test`", Iden [Name (Just ("`","`")) "test"])
->     ]
->     ++ [ParseScalarExprFails ansi2011 "`test`"]
->     )
-
-> limit :: TestItem
-> limit = Group "queries" ( map (uncurry (TestQueryExpr mysql))
->     [("select * from t limit 5"
->      ,sel {qeFetchFirst = Just (NumLit "5")}
->      )
->     ]
->     ++ [ParseQueryExprFails mysql "select a from t fetch next 10 rows only;"
->        ,ParseQueryExprFails ansi2011 "select * from t limit 5"]
->     )
->   where
->     sel = makeSelect
->           {qeSelectList = [(Star, Nothing)]
->           ,qeFrom = [TRSimple [Name Nothing "t"]]
->           }
diff --git a/tools/Language/SQL/SimpleSQL/Odbc.hs b/tools/Language/SQL/SimpleSQL/Odbc.hs
new file mode 100644
index 0000000..b791084
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/Odbc.hs
@@ -0,0 +1,52 @@
+
+module Language.SQL.SimpleSQL.Odbc (odbcTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+odbcTests :: TestItem
+odbcTests = Group "odbc" [
+       Group "datetime" [
+           e "{d '2000-01-01'}" (OdbcLiteral OLDate "2000-01-01")
+          ,e "{t '12:00:01.1'}" (OdbcLiteral OLTime "12:00:01.1")
+          ,e "{ts '2000-01-01 12:00:01.1'}"
+               (OdbcLiteral OLTimestamp "2000-01-01 12:00:01.1")
+       ]
+       ,Group "functions" [
+             e "{fn CHARACTER_LENGTH(string_exp)}"
+             $ OdbcFunc (ap "CHARACTER_LENGTH" [iden "string_exp"])
+            ,e "{fn EXTRACT(day from t)}"
+            $ OdbcFunc (SpecialOpK [Name Nothing "extract"] (Just $ Iden [Name Nothing "day"]) [("from", Iden [Name Nothing "t"])])
+            ,e "{fn now()}"
+             $ OdbcFunc (ap "now" [])
+            ,e "{fn CONVERT('2000-01-01', SQL_DATE)}"
+             $ OdbcFunc (ap "CONVERT"
+              [StringLit "'" "'" "2000-01-01"
+              ,iden "SQL_DATE"])
+            ,e "{fn CONVERT({fn CURDATE()}, SQL_DATE)}"
+             $ OdbcFunc (ap "CONVERT"
+              [OdbcFunc (ap "CURDATE" [])
+              ,iden "SQL_DATE"])
+            ]
+       ,Group "outer join" [
+             TestQueryExpr ansi2011 {diOdbc=True}
+             "select * from {oj t1 left outer join t2 on expr}"
+             $ makeSelect
+                   {qeSelectList = [(Star,Nothing)]
+                   ,qeFrom = [TROdbc $ TRJoin (TRSimple [Name Nothing "t1"]) False JLeft (TRSimple [Name Nothing "t2"])
+                                         (Just $ JoinOn $ Iden [Name Nothing "expr"])]}]
+       ,Group "check parsing bugs" [
+             TestQueryExpr ansi2011 {diOdbc=True}
+             "select {fn CONVERT(cint,SQL_BIGINT)} from t;"
+             $ makeSelect
+                   {qeSelectList = [(OdbcFunc (ap "CONVERT"
+                                                      [iden "cint"
+                                                      ,iden "SQL_BIGINT"]), Nothing)]
+                   ,qeFrom = [TRSimple [Name Nothing "t"]]}]
+       ]
+  where
+    e = TestScalarExpr ansi2011 {diOdbc = True}
+    --tsql = ParseProcSql defaultParseFlags {pfDialect=sqlServerDialect}
+    ap n = App [Name Nothing n]
+    iden n = Iden [Name Nothing n]
+
diff --git a/tools/Language/SQL/SimpleSQL/Odbc.lhs b/tools/Language/SQL/SimpleSQL/Odbc.lhs
deleted file mode 100644
index 2da3cf4..0000000
--- a/tools/Language/SQL/SimpleSQL/Odbc.lhs
+++ /dev/null
@@ -1,52 +0,0 @@
-
-> module Language.SQL.SimpleSQL.Odbc (odbcTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-> odbcTests :: TestItem
-> odbcTests = Group "odbc" [
->        Group "datetime" [
->            e "{d '2000-01-01'}" (OdbcLiteral OLDate "2000-01-01")
->           ,e "{t '12:00:01.1'}" (OdbcLiteral OLTime "12:00:01.1")
->           ,e "{ts '2000-01-01 12:00:01.1'}"
->                (OdbcLiteral OLTimestamp "2000-01-01 12:00:01.1")
->        ]
->        ,Group "functions" [
->              e "{fn CHARACTER_LENGTH(string_exp)}"
->              $ OdbcFunc (ap "CHARACTER_LENGTH" [iden "string_exp"])
->             ,e "{fn EXTRACT(day from t)}"
->             $ OdbcFunc (SpecialOpK [Name Nothing "extract"] (Just $ Iden [Name Nothing "day"]) [("from", Iden [Name Nothing "t"])])
->             ,e "{fn now()}"
->              $ OdbcFunc (ap "now" [])
->             ,e "{fn CONVERT('2000-01-01', SQL_DATE)}"
->              $ OdbcFunc (ap "CONVERT"
->               [StringLit "'" "'" "2000-01-01"
->               ,iden "SQL_DATE"])
->             ,e "{fn CONVERT({fn CURDATE()}, SQL_DATE)}"
->              $ OdbcFunc (ap "CONVERT"
->               [OdbcFunc (ap "CURDATE" [])
->               ,iden "SQL_DATE"])
->             ]
->        ,Group "outer join" [
->              TestQueryExpr ansi2011 {diOdbc=True}
->              "select * from {oj t1 left outer join t2 on expr}"
->              $ makeSelect
->                    {qeSelectList = [(Star,Nothing)]
->                    ,qeFrom = [TROdbc $ TRJoin (TRSimple [Name Nothing "t1"]) False JLeft (TRSimple [Name Nothing "t2"])
->                                          (Just $ JoinOn $ Iden [Name Nothing "expr"])]}]
->        ,Group "check parsing bugs" [
->              TestQueryExpr ansi2011 {diOdbc=True}
->              "select {fn CONVERT(cint,SQL_BIGINT)} from t;"
->              $ makeSelect
->                    {qeSelectList = [(OdbcFunc (ap "CONVERT"
->                                                       [iden "cint"
->                                                       ,iden "SQL_BIGINT"]), Nothing)]
->                    ,qeFrom = [TRSimple [Name Nothing "t"]]}]
->        ]
->   where
->     e = TestScalarExpr ansi2011 {diOdbc = True}
->     --tsql = ParseProcSql defaultParseFlags {pfDialect=sqlServerDialect}
->     ap n = App [Name Nothing n]
->     iden n = Iden [Name Nothing n]
-
diff --git a/tools/Language/SQL/SimpleSQL/Oracle.hs b/tools/Language/SQL/SimpleSQL/Oracle.hs
new file mode 100644
index 0000000..36dbb38
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/Oracle.hs
@@ -0,0 +1,29 @@
+
+-- Tests for oracle dialect parsing
+
+module Language.SQL.SimpleSQL.Oracle (oracleTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+oracleTests :: TestItem
+oracleTests = Group "oracle dialect"
+    [oracleLobUnits]
+
+
+oracleLobUnits :: TestItem
+oracleLobUnits = Group "oracleLobUnits" (map (uncurry (TestScalarExpr oracle))
+    [("cast (a as varchar2(3 char))"
+     ,Cast (Iden [Name Nothing "a"]) (
+         PrecLengthTypeName [Name Nothing "varchar2"] 3 Nothing (Just PrecCharacters)))
+     ,("cast (a as varchar2(3 byte))"
+     ,Cast (Iden [Name Nothing "a"]) (
+         PrecLengthTypeName [Name Nothing "varchar2"] 3 Nothing (Just PrecOctets)))
+     ]
+    ++ [TestStatement oracle
+      "create table t (a varchar2(55 BYTE));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a")
+        (PrecLengthTypeName [Name Nothing "varchar2"] 55 Nothing (Just PrecOctets))
+        Nothing []]]
+    )
diff --git a/tools/Language/SQL/SimpleSQL/Oracle.lhs b/tools/Language/SQL/SimpleSQL/Oracle.lhs
deleted file mode 100644
index e9318c7..0000000
--- a/tools/Language/SQL/SimpleSQL/Oracle.lhs
+++ /dev/null
@@ -1,29 +0,0 @@
-
-Tests for oracle dialect parsing
-
-> module Language.SQL.SimpleSQL.Oracle (oracleTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-> oracleTests :: TestItem
-> oracleTests = Group "oracle dialect"
->     [oracleLobUnits]
-
-
-> oracleLobUnits :: TestItem
-> oracleLobUnits = Group "oracleLobUnits" (map (uncurry (TestScalarExpr oracle))
->     [("cast (a as varchar2(3 char))"
->      ,Cast (Iden [Name Nothing "a"]) (
->          PrecLengthTypeName [Name Nothing "varchar2"] 3 Nothing (Just PrecCharacters)))
->      ,("cast (a as varchar2(3 byte))"
->      ,Cast (Iden [Name Nothing "a"]) (
->          PrecLengthTypeName [Name Nothing "varchar2"] 3 Nothing (Just PrecOctets)))
->      ]
->     ++ [TestStatement oracle
->       "create table t (a varchar2(55 BYTE));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a")
->         (PrecLengthTypeName [Name Nothing "varchar2"] 55 Nothing (Just PrecOctets))
->         Nothing []]]
->     )
diff --git a/tools/Language/SQL/SimpleSQL/Postgres.hs b/tools/Language/SQL/SimpleSQL/Postgres.hs
new file mode 100644
index 0000000..509993c
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/Postgres.hs
@@ -0,0 +1,278 @@
+
+{-
+Here are some tests taken from the SQL in the postgres manual. Almost
+all of the postgres specific syntax has been skipped, this can be
+revisited when the dialect support is added.
+-}
+
+module Language.SQL.SimpleSQL.Postgres (postgresTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+
+postgresTests :: TestItem
+postgresTests = Group "postgresTests" $ map (ParseQueryExpr ansi2011)
+
+{-
+lexical syntax section
+
+TODO: get all the commented out tests working
+-}
+
+    [-- "SELECT 'foo'\n\
+    -- \'bar';" -- this should parse as select 'foobar'
+    -- ,
+     "SELECT name, (SELECT max(pop) FROM cities\n\
+     \ WHERE cities.state = states.name)\n\
+     \    FROM states;"
+    ,"SELECT ROW(1,2.5,'this is a test');"
+
+    ,"SELECT ROW(t.*, 42) FROM t;"
+    ,"SELECT ROW(t.f1, t.f2, 42) FROM t;"
+    ,"SELECT getf1(CAST(ROW(11,'this is a test',2.5) AS myrowtype));"
+
+    ,"SELECT ROW(1,2.5,'this is a test') = ROW(1, 3, 'not the same');"
+
+    -- table is a reservered keyword?
+    --,"SELECT ROW(table.*) IS NULL FROM table;"
+    ,"SELECT ROW(tablex.*) IS NULL FROM tablex;"
+
+    ,"SELECT true OR somefunc();"
+
+    ,"SELECT somefunc() OR true;"
+
+-- queries section
+
+    ,"SELECT * FROM t1 CROSS JOIN t2;"
+    ,"SELECT * FROM t1 INNER JOIN t2 ON t1.num = t2.num;"
+    ,"SELECT * FROM t1 INNER JOIN t2 USING (num);"
+    ,"SELECT * FROM t1 NATURAL INNER JOIN t2;"
+    ,"SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num;"
+    ,"SELECT * FROM t1 LEFT JOIN t2 USING (num);"
+    ,"SELECT * FROM t1 RIGHT JOIN t2 ON t1.num = t2.num;"
+    ,"SELECT * FROM t1 FULL JOIN t2 ON t1.num = t2.num;"
+    ,"SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num AND t2.value = 'xxx';"
+    ,"SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num WHERE t2.value = 'xxx';"
+
+    ,"SELECT * FROM some_very_long_table_name s\n\
+     \JOIN another_fairly_long_name a ON s.id = a.num;"
+    ,"SELECT * FROM people AS mother JOIN people AS child\n\
+     \ ON mother.id = child.mother_id;"
+    ,"SELECT * FROM my_table AS a CROSS JOIN my_table AS b;"
+    ,"SELECT * FROM (my_table AS a CROSS JOIN my_table) AS b;"
+    ,"SELECT * FROM getfoo(1) AS t1;"
+    ,"SELECT * FROM foo\n\
+     \    WHERE foosubid IN (\n\
+     \                        SELECT foosubid\n\
+     \                        FROM getfoo(foo.fooid) z\n\
+     \                        WHERE z.fooid = foo.fooid\n\
+     \                      );"
+    {-,"SELECT *\n\
+     \    FROM dblink('dbname=mydb', 'SELECT proname, prosrc FROM pg_proc')\n\
+     \      AS t1(proname name, prosrc text)\n\
+     \    WHERE proname LIKE 'bytea%';"-} -- types in the alias??
+
+    ,"SELECT * FROM foo, LATERAL (SELECT * FROM bar WHERE bar.id = foo.bar_id) ss;"
+    ,"SELECT * FROM foo, bar WHERE bar.id = foo.bar_id;"
+
+    {-,"SELECT p1.id, p2.id, v1, v2\n\
+     \FROM polygons p1, polygons p2,\n\
+     \     LATERAL vertices(p1.poly) v1,\n\
+     \     LATERAL vertices(p2.poly) v2\n\
+     \WHERE (v1 <-> v2) < 10 AND p1.id != p2.id;"-} -- <-> operator?
+
+    {-,"SELECT p1.id, p2.id, v1, v2\n\
+     \FROM polygons p1 CROSS JOIN LATERAL vertices(p1.poly) v1,\n\
+     \     polygons p2 CROSS JOIN LATERAL vertices(p2.poly) v2\n\
+     \WHERE (v1 <-> v2) < 10 AND p1.id != p2.id;"-}
+
+    ,"SELECT m.name\n\
+     \FROM manufacturers m LEFT JOIN LATERAL get_product_names(m.id) pname ON true\n\
+     \WHERE pname IS NULL;"
+
+
+    ,"SELECT * FROM fdt WHERE c1 > 5"
+
+    ,"SELECT * FROM fdt WHERE c1 IN (1, 2, 3)"
+
+    ,"SELECT * FROM fdt WHERE c1 IN (SELECT c1 FROM t2)"
+
+    ,"SELECT * FROM fdt WHERE c1 IN (SELECT c3 FROM t2 WHERE c2 = fdt.c1 + 10)"
+
+    ,"SELECT * FROM fdt WHERE c1 BETWEEN \n\
+     \    (SELECT c3 FROM t2 WHERE c2 = fdt.c1 + 10) AND 100"
+
+    ,"SELECT * FROM fdt WHERE EXISTS (SELECT c1 FROM t2 WHERE c2 > fdt.c1)"
+
+    ,"SELECT * FROM test1;"
+
+    ,"SELECT x FROM test1 GROUP BY x;"
+    ,"SELECT x, sum(y) FROM test1 GROUP BY x;"
+    -- s.date changed to s.datex because of reserved keyword
+    -- handling, not sure if this is correct or not for ansi sql
+    ,"SELECT product_id, p.name, (sum(s.units) * p.price) AS sales\n\
+     \    FROM products p LEFT JOIN sales s USING (product_id)\n\
+     \    GROUP BY product_id, p.name, p.price;"
+
+    ,"SELECT x, sum(y) FROM test1 GROUP BY x HAVING sum(y) > 3;"
+    ,"SELECT x, sum(y) FROM test1 GROUP BY x HAVING x < 'c';"
+    ,"SELECT product_id, p.name, (sum(s.units) * (p.price - p.cost)) AS profit\n\
+     \    FROM products p LEFT JOIN sales s USING (product_id)\n\
+     \    WHERE s.datex > CURRENT_DATE - INTERVAL '4 weeks'\n\
+     \    GROUP BY product_id, p.name, p.price, p.cost\n\
+     \    HAVING sum(p.price * s.units) > 5000;"
+
+    ,"SELECT a, b, c FROM t"
+
+    ,"SELECT tbl1.a, tbl2.a, tbl1.b FROM t"
+
+    ,"SELECT tbl1.*, tbl2.a FROM t"
+
+    ,"SELECT a AS value, b + c AS sum FROM t"
+
+    ,"SELECT a \"value\", b + c AS sum FROM t"
+
+    ,"SELECT DISTINCT select_list t"
+
+    ,"VALUES (1, 'one'), (2, 'two'), (3, 'three');"
+
+    ,"SELECT 1 AS column1, 'one' AS column2\n\
+     \UNION ALL\n\
+     \SELECT 2, 'two'\n\
+     \UNION ALL\n\
+     \SELECT 3, 'three';"
+
+    ,"SELECT * FROM (VALUES (1, 'one'), (2, 'two'), (3, 'three')) AS t (num,letter);"
+
+    ,"WITH regional_sales AS (\n\
+     \        SELECT region, SUM(amount) AS total_sales\n\
+     \        FROM orders\n\
+     \        GROUP BY region\n\
+     \     ), top_regions AS (\n\
+     \        SELECT region\n\
+     \        FROM regional_sales\n\
+     \        WHERE total_sales > (SELECT SUM(total_sales)/10 FROM regional_sales)\n\
+     \     )\n\
+     \SELECT region,\n\
+     \       product,\n\
+     \       SUM(quantity) AS product_units,\n\
+     \       SUM(amount) AS product_sales\n\
+     \FROM orders\n\
+     \WHERE region IN (SELECT region FROM top_regions)\n\
+     \GROUP BY region, product;"
+
+    ,"WITH RECURSIVE t(n) AS (\n\
+     \    VALUES (1)\n\
+     \  UNION ALL\n\
+     \    SELECT n+1 FROM t WHERE n < 100\n\
+     \)\n\
+     \SELECT sum(n) FROM t"
+
+    ,"WITH RECURSIVE included_parts(sub_part, part, quantity) AS (\n\
+     \    SELECT sub_part, part, quantity FROM parts WHERE part = 'our_product'\n\
+     \  UNION ALL\n\
+     \    SELECT p.sub_part, p.part, p.quantity\n\
+     \    FROM included_parts pr, parts p\n\
+     \    WHERE p.part = pr.sub_part\n\
+     \  )\n\
+     \SELECT sub_part, SUM(quantity) as total_quantity\n\
+     \FROM included_parts\n\
+     \GROUP BY sub_part"
+
+    ,"WITH RECURSIVE search_graph(id, link, data, depth) AS (\n\
+     \        SELECT g.id, g.link, g.data, 1\n\
+     \        FROM graph g\n\
+     \      UNION ALL\n\
+     \        SELECT g.id, g.link, g.data, sg.depth + 1\n\
+     \        FROM graph g, search_graph sg\n\
+     \        WHERE g.id = sg.link\n\
+     \)\n\
+     \SELECT * FROM search_graph;"
+
+    {-,"WITH RECURSIVE search_graph(id, link, data, depth, path, cycle) AS (\n\
+     \        SELECT g.id, g.link, g.data, 1,\n\
+     \          ARRAY[g.id],\n\
+     \          false\n\
+     \        FROM graph g\n\
+     \      UNION ALL\n\
+     \        SELECT g.id, g.link, g.data, sg.depth + 1,\n\
+     \          path || g.id,\n\
+     \          g.id = ANY(path)\n\
+     \        FROM graph g, search_graph sg\n\
+     \        WHERE g.id = sg.link AND NOT cycle\n\
+     \)\n\
+     \SELECT * FROM search_graph;"-} -- ARRAY
+
+    {-,"WITH RECURSIVE search_graph(id, link, data, depth, path, cycle) AS (\n\
+     \        SELECT g.id, g.link, g.data, 1,\n\
+     \          ARRAY[ROW(g.f1, g.f2)],\n\
+     \          false\n\
+     \        FROM graph g\n\
+     \      UNION ALL\n\
+     \        SELECT g.id, g.link, g.data, sg.depth + 1,\n\
+     \          path || ROW(g.f1, g.f2),\n\
+     \          ROW(g.f1, g.f2) = ANY(path)\n\
+     \        FROM graph g, search_graph sg\n\
+     \        WHERE g.id = sg.link AND NOT cycle\n\
+     \)\n\
+     \SELECT * FROM search_graph;"-} -- ARRAY
+
+    ,"WITH RECURSIVE t(n) AS (\n\
+     \    SELECT 1\n\
+     \  UNION ALL\n\
+     \    SELECT n+1 FROM t\n\
+     \)\n\
+     \SELECT n FROM t --LIMIT 100;" -- limit is not standard
+
+-- select page reference
+
+    ,"SELECT f.title, f.did, d.name, f.date_prod, f.kind\n\
+     \    FROM distributors d, films f\n\
+     \    WHERE f.did = d.did"
+
+    ,"SELECT kind, sum(len) AS total\n\
+     \    FROM films\n\
+     \    GROUP BY kind\n\
+     \    HAVING sum(len) < interval '5 hours';"
+
+    ,"SELECT * FROM distributors ORDER BY name;"
+    ,"SELECT * FROM distributors ORDER BY 2;"
+
+    ,"SELECT distributors.name\n\
+     \    FROM distributors\n\
+     \    WHERE distributors.name LIKE 'W%'\n\
+     \UNION\n\
+     \SELECT actors.name\n\
+     \    FROM actors\n\
+     \    WHERE actors.name LIKE 'W%';"
+
+    ,"WITH t AS (\n\
+     \    SELECT random() as x FROM generate_series(1, 3)\n\
+     \  )\n\
+     \SELECT * FROM t\n\
+     \UNION ALL\n\
+     \SELECT * FROM t"
+
+    ,"WITH RECURSIVE employee_recursive(distance, employee_name, manager_name) AS (\n\
+     \    SELECT 1, employee_name, manager_name\n\
+     \    FROM employee\n\
+     \    WHERE manager_name = 'Mary'\n\
+     \  UNION ALL\n\
+     \    SELECT er.distance + 1, e.employee_name, e.manager_name\n\
+     \    FROM employee_recursive er, employee e\n\
+     \    WHERE er.employee_name = e.manager_name\n\
+     \  )\n\
+     \SELECT distance, employee_name FROM employee_recursive;"
+
+    ,"SELECT m.name AS mname, pname\n\
+     \FROM manufacturers m, LATERAL get_product_names(m.id) pname;"
+
+    ,"SELECT m.name AS mname, pname\n\
+     \FROM manufacturers m LEFT JOIN LATERAL get_product_names(m.id) pname ON true;"
+
+    ,"SELECT 2+2;"
+
+    -- simple-sql-parser doesn't support where without from
+    -- this can be added for the postgres dialect when it is written
+    --,"SELECT distributors.* WHERE distributors.name = 'Westward';"
+
+    ]
diff --git a/tools/Language/SQL/SimpleSQL/Postgres.lhs b/tools/Language/SQL/SimpleSQL/Postgres.lhs
deleted file mode 100644
index 5a596be..0000000
--- a/tools/Language/SQL/SimpleSQL/Postgres.lhs
+++ /dev/null
@@ -1,274 +0,0 @@
-
-Here are some tests taken from the SQL in the postgres manual. Almost
-all of the postgres specific syntax has been skipped, this can be
-revisited when the dialect support is added.
-
-> module Language.SQL.SimpleSQL.Postgres (postgresTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-
-> postgresTests :: TestItem
-> postgresTests = Group "postgresTests" $ map (ParseQueryExpr ansi2011)
-
-lexical syntax section
-
-TODO: get all the commented out tests working
-
->     [-- "SELECT 'foo'\n\
->     -- \'bar';" -- this should parse as select 'foobar'
->     -- ,
->      "SELECT name, (SELECT max(pop) FROM cities\n\
->      \ WHERE cities.state = states.name)\n\
->      \    FROM states;"
->     ,"SELECT ROW(1,2.5,'this is a test');"
-
->     ,"SELECT ROW(t.*, 42) FROM t;"
->     ,"SELECT ROW(t.f1, t.f2, 42) FROM t;"
->     ,"SELECT getf1(CAST(ROW(11,'this is a test',2.5) AS myrowtype));"
-
->     ,"SELECT ROW(1,2.5,'this is a test') = ROW(1, 3, 'not the same');"
-
->     -- table is a reservered keyword?
->     --,"SELECT ROW(table.*) IS NULL FROM table;"
->     ,"SELECT ROW(tablex.*) IS NULL FROM tablex;"
-
->     ,"SELECT true OR somefunc();"
-
->     ,"SELECT somefunc() OR true;"
-
-queries section
-
->     ,"SELECT * FROM t1 CROSS JOIN t2;"
->     ,"SELECT * FROM t1 INNER JOIN t2 ON t1.num = t2.num;"
->     ,"SELECT * FROM t1 INNER JOIN t2 USING (num);"
->     ,"SELECT * FROM t1 NATURAL INNER JOIN t2;"
->     ,"SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num;"
->     ,"SELECT * FROM t1 LEFT JOIN t2 USING (num);"
->     ,"SELECT * FROM t1 RIGHT JOIN t2 ON t1.num = t2.num;"
->     ,"SELECT * FROM t1 FULL JOIN t2 ON t1.num = t2.num;"
->     ,"SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num AND t2.value = 'xxx';"
->     ,"SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num WHERE t2.value = 'xxx';"
-
->     ,"SELECT * FROM some_very_long_table_name s\n\
->      \JOIN another_fairly_long_name a ON s.id = a.num;"
->     ,"SELECT * FROM people AS mother JOIN people AS child\n\
->      \ ON mother.id = child.mother_id;"
->     ,"SELECT * FROM my_table AS a CROSS JOIN my_table AS b;"
->     ,"SELECT * FROM (my_table AS a CROSS JOIN my_table) AS b;"
->     ,"SELECT * FROM getfoo(1) AS t1;"
->     ,"SELECT * FROM foo\n\
->      \    WHERE foosubid IN (\n\
->      \                        SELECT foosubid\n\
->      \                        FROM getfoo(foo.fooid) z\n\
->      \                        WHERE z.fooid = foo.fooid\n\
->      \                      );"
->     {-,"SELECT *\n\
->      \    FROM dblink('dbname=mydb', 'SELECT proname, prosrc FROM pg_proc')\n\
->      \      AS t1(proname name, prosrc text)\n\
->      \    WHERE proname LIKE 'bytea%';"-} -- types in the alias??
-
->     ,"SELECT * FROM foo, LATERAL (SELECT * FROM bar WHERE bar.id = foo.bar_id) ss;"
->     ,"SELECT * FROM foo, bar WHERE bar.id = foo.bar_id;"
-
->     {-,"SELECT p1.id, p2.id, v1, v2\n\
->      \FROM polygons p1, polygons p2,\n\
->      \     LATERAL vertices(p1.poly) v1,\n\
->      \     LATERAL vertices(p2.poly) v2\n\
->      \WHERE (v1 <-> v2) < 10 AND p1.id != p2.id;"-} -- <-> operator?
-
->     {-,"SELECT p1.id, p2.id, v1, v2\n\
->      \FROM polygons p1 CROSS JOIN LATERAL vertices(p1.poly) v1,\n\
->      \     polygons p2 CROSS JOIN LATERAL vertices(p2.poly) v2\n\
->      \WHERE (v1 <-> v2) < 10 AND p1.id != p2.id;"-}
-
->     ,"SELECT m.name\n\
->      \FROM manufacturers m LEFT JOIN LATERAL get_product_names(m.id) pname ON true\n\
->      \WHERE pname IS NULL;"
-
-
->     ,"SELECT * FROM fdt WHERE c1 > 5"
-
->     ,"SELECT * FROM fdt WHERE c1 IN (1, 2, 3)"
-
->     ,"SELECT * FROM fdt WHERE c1 IN (SELECT c1 FROM t2)"
-
->     ,"SELECT * FROM fdt WHERE c1 IN (SELECT c3 FROM t2 WHERE c2 = fdt.c1 + 10)"
-
->     ,"SELECT * FROM fdt WHERE c1 BETWEEN \n\
->      \    (SELECT c3 FROM t2 WHERE c2 = fdt.c1 + 10) AND 100"
-
->     ,"SELECT * FROM fdt WHERE EXISTS (SELECT c1 FROM t2 WHERE c2 > fdt.c1)"
-
->     ,"SELECT * FROM test1;"
-
->     ,"SELECT x FROM test1 GROUP BY x;"
->     ,"SELECT x, sum(y) FROM test1 GROUP BY x;"
->     -- s.date changed to s.datex because of reserved keyword
->     -- handling, not sure if this is correct or not for ansi sql
->     ,"SELECT product_id, p.name, (sum(s.units) * p.price) AS sales\n\
->      \    FROM products p LEFT JOIN sales s USING (product_id)\n\
->      \    GROUP BY product_id, p.name, p.price;"
-
->     ,"SELECT x, sum(y) FROM test1 GROUP BY x HAVING sum(y) > 3;"
->     ,"SELECT x, sum(y) FROM test1 GROUP BY x HAVING x < 'c';"
->     ,"SELECT product_id, p.name, (sum(s.units) * (p.price - p.cost)) AS profit\n\
->      \    FROM products p LEFT JOIN sales s USING (product_id)\n\
->      \    WHERE s.datex > CURRENT_DATE - INTERVAL '4 weeks'\n\
->      \    GROUP BY product_id, p.name, p.price, p.cost\n\
->      \    HAVING sum(p.price * s.units) > 5000;"
-
->     ,"SELECT a, b, c FROM t"
-
->     ,"SELECT tbl1.a, tbl2.a, tbl1.b FROM t"
-
->     ,"SELECT tbl1.*, tbl2.a FROM t"
-
->     ,"SELECT a AS value, b + c AS sum FROM t"
-
->     ,"SELECT a \"value\", b + c AS sum FROM t"
-
->     ,"SELECT DISTINCT select_list t"
-
->     ,"VALUES (1, 'one'), (2, 'two'), (3, 'three');"
-
->     ,"SELECT 1 AS column1, 'one' AS column2\n\
->      \UNION ALL\n\
->      \SELECT 2, 'two'\n\
->      \UNION ALL\n\
->      \SELECT 3, 'three';"
-
->     ,"SELECT * FROM (VALUES (1, 'one'), (2, 'two'), (3, 'three')) AS t (num,letter);"
-
->     ,"WITH regional_sales AS (\n\
->      \        SELECT region, SUM(amount) AS total_sales\n\
->      \        FROM orders\n\
->      \        GROUP BY region\n\
->      \     ), top_regions AS (\n\
->      \        SELECT region\n\
->      \        FROM regional_sales\n\
->      \        WHERE total_sales > (SELECT SUM(total_sales)/10 FROM regional_sales)\n\
->      \     )\n\
->      \SELECT region,\n\
->      \       product,\n\
->      \       SUM(quantity) AS product_units,\n\
->      \       SUM(amount) AS product_sales\n\
->      \FROM orders\n\
->      \WHERE region IN (SELECT region FROM top_regions)\n\
->      \GROUP BY region, product;"
-
->     ,"WITH RECURSIVE t(n) AS (\n\
->      \    VALUES (1)\n\
->      \  UNION ALL\n\
->      \    SELECT n+1 FROM t WHERE n < 100\n\
->      \)\n\
->      \SELECT sum(n) FROM t"
-
->     ,"WITH RECURSIVE included_parts(sub_part, part, quantity) AS (\n\
->      \    SELECT sub_part, part, quantity FROM parts WHERE part = 'our_product'\n\
->      \  UNION ALL\n\
->      \    SELECT p.sub_part, p.part, p.quantity\n\
->      \    FROM included_parts pr, parts p\n\
->      \    WHERE p.part = pr.sub_part\n\
->      \  )\n\
->      \SELECT sub_part, SUM(quantity) as total_quantity\n\
->      \FROM included_parts\n\
->      \GROUP BY sub_part"
-
->     ,"WITH RECURSIVE search_graph(id, link, data, depth) AS (\n\
->      \        SELECT g.id, g.link, g.data, 1\n\
->      \        FROM graph g\n\
->      \      UNION ALL\n\
->      \        SELECT g.id, g.link, g.data, sg.depth + 1\n\
->      \        FROM graph g, search_graph sg\n\
->      \        WHERE g.id = sg.link\n\
->      \)\n\
->      \SELECT * FROM search_graph;"
-
->     {-,"WITH RECURSIVE search_graph(id, link, data, depth, path, cycle) AS (\n\
->      \        SELECT g.id, g.link, g.data, 1,\n\
->      \          ARRAY[g.id],\n\
->      \          false\n\
->      \        FROM graph g\n\
->      \      UNION ALL\n\
->      \        SELECT g.id, g.link, g.data, sg.depth + 1,\n\
->      \          path || g.id,\n\
->      \          g.id = ANY(path)\n\
->      \        FROM graph g, search_graph sg\n\
->      \        WHERE g.id = sg.link AND NOT cycle\n\
->      \)\n\
->      \SELECT * FROM search_graph;"-} -- ARRAY
-
->     {-,"WITH RECURSIVE search_graph(id, link, data, depth, path, cycle) AS (\n\
->      \        SELECT g.id, g.link, g.data, 1,\n\
->      \          ARRAY[ROW(g.f1, g.f2)],\n\
->      \          false\n\
->      \        FROM graph g\n\
->      \      UNION ALL\n\
->      \        SELECT g.id, g.link, g.data, sg.depth + 1,\n\
->      \          path || ROW(g.f1, g.f2),\n\
->      \          ROW(g.f1, g.f2) = ANY(path)\n\
->      \        FROM graph g, search_graph sg\n\
->      \        WHERE g.id = sg.link AND NOT cycle\n\
->      \)\n\
->      \SELECT * FROM search_graph;"-} -- ARRAY
-
->     ,"WITH RECURSIVE t(n) AS (\n\
->      \    SELECT 1\n\
->      \  UNION ALL\n\
->      \    SELECT n+1 FROM t\n\
->      \)\n\
->      \SELECT n FROM t --LIMIT 100;" -- limit is not standard
-
-select page reference
-
->     ,"SELECT f.title, f.did, d.name, f.date_prod, f.kind\n\
->      \    FROM distributors d, films f\n\
->      \    WHERE f.did = d.did"
-
->     ,"SELECT kind, sum(len) AS total\n\
->      \    FROM films\n\
->      \    GROUP BY kind\n\
->      \    HAVING sum(len) < interval '5 hours';"
-
->     ,"SELECT * FROM distributors ORDER BY name;"
->     ,"SELECT * FROM distributors ORDER BY 2;"
-
->     ,"SELECT distributors.name\n\
->      \    FROM distributors\n\
->      \    WHERE distributors.name LIKE 'W%'\n\
->      \UNION\n\
->      \SELECT actors.name\n\
->      \    FROM actors\n\
->      \    WHERE actors.name LIKE 'W%';"
-
->     ,"WITH t AS (\n\
->      \    SELECT random() as x FROM generate_series(1, 3)\n\
->      \  )\n\
->      \SELECT * FROM t\n\
->      \UNION ALL\n\
->      \SELECT * FROM t"
-
->     ,"WITH RECURSIVE employee_recursive(distance, employee_name, manager_name) AS (\n\
->      \    SELECT 1, employee_name, manager_name\n\
->      \    FROM employee\n\
->      \    WHERE manager_name = 'Mary'\n\
->      \  UNION ALL\n\
->      \    SELECT er.distance + 1, e.employee_name, e.manager_name\n\
->      \    FROM employee_recursive er, employee e\n\
->      \    WHERE er.employee_name = e.manager_name\n\
->      \  )\n\
->      \SELECT distance, employee_name FROM employee_recursive;"
-
->     ,"SELECT m.name AS mname, pname\n\
->      \FROM manufacturers m, LATERAL get_product_names(m.id) pname;"
-
->     ,"SELECT m.name AS mname, pname\n\
->      \FROM manufacturers m LEFT JOIN LATERAL get_product_names(m.id) pname ON true;"
-
->     ,"SELECT 2+2;"
-
->     -- simple-sql-parser doesn't support where without from
->     -- this can be added for the postgres dialect when it is written
->     --,"SELECT distributors.* WHERE distributors.name = 'Westward';"
-
->     ]
diff --git a/tools/Language/SQL/SimpleSQL/QueryExprComponents.hs b/tools/Language/SQL/SimpleSQL/QueryExprComponents.hs
new file mode 100644
index 0000000..a9d0c1f
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/QueryExprComponents.hs
@@ -0,0 +1,211 @@
+
+{-
+These are the tests for the query expression components apart from the
+table refs which are in a separate file.
+
+
+These are a few misc tests which don't fit anywhere else.
+-}
+
+module Language.SQL.SimpleSQL.QueryExprComponents (queryExprComponentTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+
+queryExprComponentTests :: TestItem
+queryExprComponentTests = Group "queryExprComponentTests"
+    [duplicates
+    ,selectLists
+    ,whereClause
+    ,having
+    ,orderBy
+    ,offsetFetch
+    ,combos
+    ,withQueries
+    ,values
+    ,tables
+    ]
+
+
+
+duplicates :: TestItem
+duplicates = Group "duplicates" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a from t" ,ms SQDefault)
+    ,("select all a from t" ,ms All)
+    ,("select distinct a from t", ms Distinct)
+    ]
+ where
+   ms d = makeSelect
+          {qeSetQuantifier = d
+          ,qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
+          ,qeFrom = [TRSimple [Name Nothing "t"]]}
+
+selectLists :: TestItem
+selectLists = Group "selectLists" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select 1",
+      makeSelect {qeSelectList = [(NumLit "1",Nothing)]})
+
+    ,("select a"
+     ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]})
+
+    ,("select a,b"
+     ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)
+                                 ,(Iden [Name Nothing "b"],Nothing)]})
+
+    ,("select 1+2,3+4"
+     ,makeSelect {qeSelectList =
+                     [(BinOp (NumLit "1") [Name Nothing "+"] (NumLit "2"),Nothing)
+                     ,(BinOp (NumLit "3") [Name Nothing "+"] (NumLit "4"),Nothing)]})
+
+    ,("select a as a, /*comment*/ b as b"
+     ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"], Just $ Name Nothing "a")
+                                 ,(Iden [Name Nothing "b"], Just $ Name Nothing "b")]})
+
+    ,("select a a, b b"
+     ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"], Just $ Name Nothing "a")
+                                 ,(Iden [Name Nothing "b"], Just $ Name Nothing "b")]})
+
+    ,("select a + b * c"
+     ,makeSelect {qeSelectList =
+      [(BinOp (Iden [Name Nothing "a"]) [Name Nothing "+"]
+        (BinOp (Iden [Name Nothing "b"]) [Name Nothing "*"] (Iden [Name Nothing "c"]))
+       ,Nothing)]})
+
+    ]
+
+whereClause :: TestItem
+whereClause = Group "whereClause" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a from t where a = 5"
+     ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
+                 ,qeFrom = [TRSimple [Name Nothing "t"]]
+                 ,qeWhere = Just $ BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "5")})
+    ]
+
+having :: TestItem
+having = Group "having" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a,sum(b) from t group by a having sum(b) > 5"
+     ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)
+                                 ,(App [Name Nothing "sum"] [Iden [Name Nothing "b"]],Nothing)]
+                 ,qeFrom = [TRSimple [Name Nothing "t"]]
+                 ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]]
+                 ,qeHaving = Just $ BinOp (App [Name Nothing "sum"] [Iden [Name Nothing "b"]])
+                                          [Name Nothing ">"] (NumLit "5")
+                 })
+    ]
+
+orderBy :: TestItem
+orderBy = Group "orderBy" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a from t order by a"
+     ,ms [SortSpec (Iden [Name Nothing "a"]) DirDefault NullsOrderDefault])
+
+    ,("select a from t order by a, b"
+     ,ms [SortSpec (Iden [Name Nothing "a"]) DirDefault NullsOrderDefault
+         ,SortSpec (Iden [Name Nothing "b"]) DirDefault NullsOrderDefault])
+
+    ,("select a from t order by a asc"
+     ,ms [SortSpec (Iden [Name Nothing "a"]) Asc NullsOrderDefault])
+
+    ,("select a from t order by a desc, b desc"
+     ,ms [SortSpec (Iden [Name Nothing "a"]) Desc NullsOrderDefault
+         ,SortSpec (Iden [Name Nothing "b"]) Desc NullsOrderDefault])
+
+    ,("select a from t order by a desc nulls first, b desc nulls last"
+     ,ms [SortSpec (Iden [Name Nothing "a"]) Desc NullsFirst
+         ,SortSpec (Iden [Name Nothing "b"]) Desc NullsLast])
+
+    ]
+  where
+    ms o = makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
+                      ,qeFrom = [TRSimple [Name Nothing "t"]]
+                      ,qeOrderBy = o}
+
+offsetFetch :: TestItem
+offsetFetch = Group "offsetFetch" $ map (uncurry (TestQueryExpr ansi2011))
+    [-- ansi standard
+     ("select a from t offset 5 rows fetch next 10 rows only"
+     ,ms (Just $ NumLit "5") (Just $ NumLit "10"))
+    ,("select a from t offset 5 rows;"
+     ,ms (Just $ NumLit "5") Nothing)
+    ,("select a from t fetch next 10 row only;"
+     ,ms Nothing (Just $ NumLit "10"))
+    ,("select a from t offset 5 row fetch first 10 row only"
+     ,ms (Just $ NumLit "5") (Just $ NumLit "10"))
+     -- postgres: disabled, will add back when postgres
+     -- dialect is added
+    --,("select a from t limit 10 offset 5"
+    -- ,ms (Just $ NumLit "5") (Just $ NumLit "10"))
+    ]
+  where
+    ms o l = makeSelect
+             {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
+             ,qeFrom = [TRSimple [Name Nothing "t"]]
+             ,qeOffset = o
+             ,qeFetchFirst = l}
+
+combos :: TestItem
+combos = Group "combos" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a from t union select b from u"
+     ,QueryExprSetOp ms1 Union SQDefault Respectively ms2)
+
+    ,("select a from t intersect select b from u"
+     ,QueryExprSetOp ms1 Intersect SQDefault Respectively ms2)
+
+    ,("select a from t except all select b from u"
+     ,QueryExprSetOp ms1 Except All Respectively ms2)
+
+    ,("select a from t union distinct corresponding \
+      \select b from u"
+     ,QueryExprSetOp ms1 Union Distinct Corresponding ms2)
+
+    ,("select a from t union select a from t union select a from t"
+     -- TODO: union should be left associative. I think the others also
+     -- so this needs to be fixed (new optionSuffix variation which
+     -- handles this)
+     ,QueryExprSetOp ms1 Union SQDefault Respectively
+       (QueryExprSetOp ms1 Union SQDefault Respectively ms1))
+    ]
+  where
+    ms1 = makeSelect
+          {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
+          ,qeFrom = [TRSimple [Name Nothing "t"]]}
+    ms2 = makeSelect
+          {qeSelectList = [(Iden [Name Nothing "b"],Nothing)]
+          ,qeFrom = [TRSimple [Name Nothing "u"]]}
+
+
+withQueries :: TestItem
+withQueries = Group "with queries" $ map (uncurry (TestQueryExpr ansi2011))
+    [("with u as (select a from t) select a from u"
+     ,With False [(Alias (Name Nothing "u") Nothing, ms1)] ms2)
+
+    ,("with u(b) as (select a from t) select a from u"
+     ,With False [(Alias (Name Nothing "u") (Just [Name Nothing "b"]), ms1)] ms2)
+
+    ,("with x as (select a from t),\n\
+      \     u as (select a from x)\n\
+      \select a from u"
+     ,With False [(Alias (Name Nothing "x") Nothing, ms1), (Alias (Name Nothing "u") Nothing,ms3)] ms2)
+
+    ,("with recursive u as (select a from t) select a from u"
+     ,With True [(Alias (Name Nothing "u") Nothing, ms1)] ms2)
+    ]
+ where
+   ms c t = makeSelect
+            {qeSelectList = [(Iden [Name Nothing c],Nothing)]
+            ,qeFrom = [TRSimple [Name Nothing t]]}
+   ms1 = ms "a" "t"
+   ms2 = ms "a" "u"
+   ms3 = ms "a" "x"
+
+values :: TestItem
+values = Group "values" $ map (uncurry (TestQueryExpr ansi2011))
+    [("values (1,2),(3,4)"
+      ,Values [[NumLit "1", NumLit "2"]
+              ,[NumLit "3", NumLit "4"]])
+    ]
+
+tables :: TestItem
+tables = Group "tables" $ map (uncurry (TestQueryExpr ansi2011))
+    [("table tbl", Table [Name Nothing "tbl"])
+    ]
diff --git a/tools/Language/SQL/SimpleSQL/QueryExprComponents.lhs b/tools/Language/SQL/SimpleSQL/QueryExprComponents.lhs
deleted file mode 100644
index 2f947d9..0000000
--- a/tools/Language/SQL/SimpleSQL/QueryExprComponents.lhs
+++ /dev/null
@@ -1,209 +0,0 @@
-
-These are the tests for the query expression components apart from the
-table refs which are in a separate file.
-
-
-These are a few misc tests which don't fit anywhere else.
-
-> module Language.SQL.SimpleSQL.QueryExprComponents (queryExprComponentTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-
-> queryExprComponentTests :: TestItem
-> queryExprComponentTests = Group "queryExprComponentTests"
->     [duplicates
->     ,selectLists
->     ,whereClause
->     ,having
->     ,orderBy
->     ,offsetFetch
->     ,combos
->     ,withQueries
->     ,values
->     ,tables
->     ]
-
-
-
-> duplicates :: TestItem
-> duplicates = Group "duplicates" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a from t" ,ms SQDefault)
->     ,("select all a from t" ,ms All)
->     ,("select distinct a from t", ms Distinct)
->     ]
->  where
->    ms d = makeSelect
->           {qeSetQuantifier = d
->           ,qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
->           ,qeFrom = [TRSimple [Name Nothing "t"]]}
-
-> selectLists :: TestItem
-> selectLists = Group "selectLists" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select 1",
->       makeSelect {qeSelectList = [(NumLit "1",Nothing)]})
-
->     ,("select a"
->      ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]})
-
->     ,("select a,b"
->      ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)
->                                  ,(Iden [Name Nothing "b"],Nothing)]})
-
->     ,("select 1+2,3+4"
->      ,makeSelect {qeSelectList =
->                      [(BinOp (NumLit "1") [Name Nothing "+"] (NumLit "2"),Nothing)
->                      ,(BinOp (NumLit "3") [Name Nothing "+"] (NumLit "4"),Nothing)]})
-
->     ,("select a as a, /*comment*/ b as b"
->      ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"], Just $ Name Nothing "a")
->                                  ,(Iden [Name Nothing "b"], Just $ Name Nothing "b")]})
-
->     ,("select a a, b b"
->      ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"], Just $ Name Nothing "a")
->                                  ,(Iden [Name Nothing "b"], Just $ Name Nothing "b")]})
-
->     ,("select a + b * c"
->      ,makeSelect {qeSelectList =
->       [(BinOp (Iden [Name Nothing "a"]) [Name Nothing "+"]
->         (BinOp (Iden [Name Nothing "b"]) [Name Nothing "*"] (Iden [Name Nothing "c"]))
->        ,Nothing)]})
-
->     ]
-
-> whereClause :: TestItem
-> whereClause = Group "whereClause" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a from t where a = 5"
->      ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
->                  ,qeFrom = [TRSimple [Name Nothing "t"]]
->                  ,qeWhere = Just $ BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "5")})
->     ]
-
-> having :: TestItem
-> having = Group "having" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a,sum(b) from t group by a having sum(b) > 5"
->      ,makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)
->                                  ,(App [Name Nothing "sum"] [Iden [Name Nothing "b"]],Nothing)]
->                  ,qeFrom = [TRSimple [Name Nothing "t"]]
->                  ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]]
->                  ,qeHaving = Just $ BinOp (App [Name Nothing "sum"] [Iden [Name Nothing "b"]])
->                                           [Name Nothing ">"] (NumLit "5")
->                  })
->     ]
-
-> orderBy :: TestItem
-> orderBy = Group "orderBy" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a from t order by a"
->      ,ms [SortSpec (Iden [Name Nothing "a"]) DirDefault NullsOrderDefault])
-
->     ,("select a from t order by a, b"
->      ,ms [SortSpec (Iden [Name Nothing "a"]) DirDefault NullsOrderDefault
->          ,SortSpec (Iden [Name Nothing "b"]) DirDefault NullsOrderDefault])
-
->     ,("select a from t order by a asc"
->      ,ms [SortSpec (Iden [Name Nothing "a"]) Asc NullsOrderDefault])
-
->     ,("select a from t order by a desc, b desc"
->      ,ms [SortSpec (Iden [Name Nothing "a"]) Desc NullsOrderDefault
->          ,SortSpec (Iden [Name Nothing "b"]) Desc NullsOrderDefault])
-
->     ,("select a from t order by a desc nulls first, b desc nulls last"
->      ,ms [SortSpec (Iden [Name Nothing "a"]) Desc NullsFirst
->          ,SortSpec (Iden [Name Nothing "b"]) Desc NullsLast])
-
->     ]
->   where
->     ms o = makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
->                       ,qeFrom = [TRSimple [Name Nothing "t"]]
->                       ,qeOrderBy = o}
-
-> offsetFetch :: TestItem
-> offsetFetch = Group "offsetFetch" $ map (uncurry (TestQueryExpr ansi2011))
->     [-- ansi standard
->      ("select a from t offset 5 rows fetch next 10 rows only"
->      ,ms (Just $ NumLit "5") (Just $ NumLit "10"))
->     ,("select a from t offset 5 rows;"
->      ,ms (Just $ NumLit "5") Nothing)
->     ,("select a from t fetch next 10 row only;"
->      ,ms Nothing (Just $ NumLit "10"))
->     ,("select a from t offset 5 row fetch first 10 row only"
->      ,ms (Just $ NumLit "5") (Just $ NumLit "10"))
->      -- postgres: disabled, will add back when postgres
->      -- dialect is added
->     --,("select a from t limit 10 offset 5"
->     -- ,ms (Just $ NumLit "5") (Just $ NumLit "10"))
->     ]
->   where
->     ms o l = makeSelect
->              {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
->              ,qeFrom = [TRSimple [Name Nothing "t"]]
->              ,qeOffset = o
->              ,qeFetchFirst = l}
-
-> combos :: TestItem
-> combos = Group "combos" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a from t union select b from u"
->      ,QueryExprSetOp ms1 Union SQDefault Respectively ms2)
-
->     ,("select a from t intersect select b from u"
->      ,QueryExprSetOp ms1 Intersect SQDefault Respectively ms2)
-
->     ,("select a from t except all select b from u"
->      ,QueryExprSetOp ms1 Except All Respectively ms2)
-
->     ,("select a from t union distinct corresponding \
->       \select b from u"
->      ,QueryExprSetOp ms1 Union Distinct Corresponding ms2)
-
->     ,("select a from t union select a from t union select a from t"
->      -- TODO: union should be left associative. I think the others also
->      -- so this needs to be fixed (new optionSuffix variation which
->      -- handles this)
->      ,QueryExprSetOp ms1 Union SQDefault Respectively
->        (QueryExprSetOp ms1 Union SQDefault Respectively ms1))
->     ]
->   where
->     ms1 = makeSelect
->           {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
->           ,qeFrom = [TRSimple [Name Nothing "t"]]}
->     ms2 = makeSelect
->           {qeSelectList = [(Iden [Name Nothing "b"],Nothing)]
->           ,qeFrom = [TRSimple [Name Nothing "u"]]}
-
-
-> withQueries :: TestItem
-> withQueries = Group "with queries" $ map (uncurry (TestQueryExpr ansi2011))
->     [("with u as (select a from t) select a from u"
->      ,With False [(Alias (Name Nothing "u") Nothing, ms1)] ms2)
-
->     ,("with u(b) as (select a from t) select a from u"
->      ,With False [(Alias (Name Nothing "u") (Just [Name Nothing "b"]), ms1)] ms2)
-
->     ,("with x as (select a from t),\n\
->       \     u as (select a from x)\n\
->       \select a from u"
->      ,With False [(Alias (Name Nothing "x") Nothing, ms1), (Alias (Name Nothing "u") Nothing,ms3)] ms2)
-
->     ,("with recursive u as (select a from t) select a from u"
->      ,With True [(Alias (Name Nothing "u") Nothing, ms1)] ms2)
->     ]
->  where
->    ms c t = makeSelect
->             {qeSelectList = [(Iden [Name Nothing c],Nothing)]
->             ,qeFrom = [TRSimple [Name Nothing t]]}
->    ms1 = ms "a" "t"
->    ms2 = ms "a" "u"
->    ms3 = ms "a" "x"
-
-> values :: TestItem
-> values = Group "values" $ map (uncurry (TestQueryExpr ansi2011))
->     [("values (1,2),(3,4)"
->       ,Values [[NumLit "1", NumLit "2"]
->               ,[NumLit "3", NumLit "4"]])
->     ]
-
-> tables :: TestItem
-> tables = Group "tables" $ map (uncurry (TestQueryExpr ansi2011))
->     [("table tbl", Table [Name Nothing "tbl"])
->     ]
diff --git a/tools/Language/SQL/SimpleSQL/QueryExprs.hs b/tools/Language/SQL/SimpleSQL/QueryExprs.hs
new file mode 100644
index 0000000..b2e23f1
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/QueryExprs.hs
@@ -0,0 +1,26 @@
+
+{-
+These are the tests for the queryExprs parsing which parses multiple
+query expressions from one string.
+-}
+
+module Language.SQL.SimpleSQL.QueryExprs (queryExprsTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+queryExprsTests :: TestItem
+queryExprsTests = Group "query exprs" $ map (uncurry (TestStatements ansi2011))
+    [("select 1",[ms])
+    ,("select 1;",[ms])
+    ,("select 1;select 1",[ms,ms])
+    ,(" select 1;select 1; ",[ms,ms])
+    ,("SELECT CURRENT_TIMESTAMP;"
+     ,[SelectStatement $ makeSelect
+      {qeSelectList = [(Iden [Name Nothing "CURRENT_TIMESTAMP"],Nothing)]}])
+    ,("SELECT \"CURRENT_TIMESTAMP\";"
+     ,[SelectStatement $ makeSelect
+      {qeSelectList = [(Iden [Name (Just ("\"","\"")) "CURRENT_TIMESTAMP"],Nothing)]}])
+    ]
+  where
+    ms = SelectStatement $ makeSelect {qeSelectList = [(NumLit "1",Nothing)]}
diff --git a/tools/Language/SQL/SimpleSQL/QueryExprs.lhs b/tools/Language/SQL/SimpleSQL/QueryExprs.lhs
deleted file mode 100644
index deb25cd..0000000
--- a/tools/Language/SQL/SimpleSQL/QueryExprs.lhs
+++ /dev/null
@@ -1,24 +0,0 @@
-
-These are the tests for the queryExprs parsing which parses multiple
-query expressions from one string.
-
-> module Language.SQL.SimpleSQL.QueryExprs (queryExprsTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-> queryExprsTests :: TestItem
-> queryExprsTests = Group "query exprs" $ map (uncurry (TestStatements ansi2011))
->     [("select 1",[ms])
->     ,("select 1;",[ms])
->     ,("select 1;select 1",[ms,ms])
->     ,(" select 1;select 1; ",[ms,ms])
->     ,("SELECT CURRENT_TIMESTAMP;"
->      ,[SelectStatement $ makeSelect
->       {qeSelectList = [(Iden [Name Nothing "CURRENT_TIMESTAMP"],Nothing)]}])
->     ,("SELECT \"CURRENT_TIMESTAMP\";"
->      ,[SelectStatement $ makeSelect
->       {qeSelectList = [(Iden [Name (Just ("\"","\"")) "CURRENT_TIMESTAMP"],Nothing)]}])
->     ]
->   where
->     ms = SelectStatement $ makeSelect {qeSelectList = [(NumLit "1",Nothing)]}
diff --git a/tools/Language/SQL/SimpleSQL/SQL2011AccessControl.hs b/tools/Language/SQL/SimpleSQL/SQL2011AccessControl.hs
new file mode 100644
index 0000000..5ebb8c0
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/SQL2011AccessControl.hs
@@ -0,0 +1,329 @@
+
+{-
+Section 12 in Foundation
+
+grant, etc
+-}
+
+
+module Language.SQL.SimpleSQL.SQL2011AccessControl (sql2011AccessControlTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+sql2011AccessControlTests :: TestItem
+sql2011AccessControlTests = Group "sql 2011 access control tests" [
+
+{-
+12 Access control
+
+12.1 <grant statement>
+
+<grant statement> ::=
+    <grant privilege statement>
+  | <grant role statement>
+
+12.2 <grant privilege statement>
+
+<grant privilege statement> ::=
+  GRANT <privileges> TO <grantee> [ { <comma> <grantee> }... ]
+      [ WITH HIERARCHY OPTION ]
+      [ WITH GRANT OPTION ]
+      [ GRANTED BY <grantor> ]
+
+12.3 <privileges>
+<privileges> ::=
+  <object privileges> ON <object name>
+
+<object name> ::=
+      [ TABLE ] <table name>
+  |   DOMAIN <domain name>
+  |   COLLATION <collation name>
+  |   CHARACTER SET <character set name>
+  |   TRANSLATION <transliteration name>
+  |   TYPE <schema-resolved user-defined type name>
+  |   SEQUENCE <sequence generator name>
+  |   <specific routine designator>
+
+<object privileges> ::=
+    ALL PRIVILEGES
+  | <action> [ { <comma> <action> }... ]
+
+<action> ::=
+    SELECT
+  | SELECT <left paren> <privilege column list> <right paren>
+  | SELECT <left paren> <privilege method list> <right paren>
+  | DELETE
+  | INSERT [ <left paren> <privilege column list> <right paren> ]
+  | UPDATE [ <left paren> <privilege column list> <right paren> ]
+  | REFERENCES [ <left paren> <privilege column list> <right paren> ]
+  | USAGE
+  | TRIGGER
+  | UNDER
+  | EXECUTE
+
+<privilege method list> ::=
+  <specific routine designator> [ { <comma> <specific routine designator> }... ]
+
+<privilege column list> ::=
+  <column name list>
+
+<grantee> ::=
+    PUBLIC
+  | <authorization identifier>
+
+<grantor> ::=
+    CURRENT_USER
+  | CURRENT_ROLE
+-}
+
+     (TestStatement ansi2011
+      "grant all privileges on tbl1 to role1"
+     $ GrantPrivilege [PrivAll]
+       (PrivTable [Name Nothing "tbl1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+
+    ,(TestStatement ansi2011
+      "grant all privileges on tbl1 to role1,role2"
+     $ GrantPrivilege [PrivAll]
+       (PrivTable [Name Nothing "tbl1"])
+       [Name Nothing "role1",Name Nothing "role2"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant all privileges on tbl1 to role1 with grant option"
+     $ GrantPrivilege [PrivAll]
+       (PrivTable [Name Nothing "tbl1"])
+       [Name Nothing "role1"] WithGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant all privileges on table tbl1 to role1"
+     $ GrantPrivilege [PrivAll]
+       (PrivTable [Name Nothing "tbl1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant all privileges on domain mydom to role1"
+     $ GrantPrivilege [PrivAll]
+       (PrivDomain [Name Nothing "mydom"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant all privileges on type t1 to role1"
+     $ GrantPrivilege [PrivAll]
+       (PrivType [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant all privileges on sequence s1 to role1"
+     $ GrantPrivilege [PrivAll]
+       (PrivSequence [Name Nothing "s1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+
+    ,(TestStatement ansi2011
+      "grant select on table t1 to role1"
+     $ GrantPrivilege [PrivSelect []]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant select(a,b) on table t1 to role1"
+     $ GrantPrivilege [PrivSelect [Name Nothing "a", Name Nothing "b"]]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant delete on table t1 to role1"
+     $ GrantPrivilege [PrivDelete]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant insert on table t1 to role1"
+     $ GrantPrivilege [PrivInsert []]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant insert(a,b) on table t1 to role1"
+     $ GrantPrivilege [PrivInsert [Name Nothing "a", Name Nothing "b"]]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant update on table t1 to role1"
+     $ GrantPrivilege [PrivUpdate []]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant update(a,b) on table t1 to role1"
+     $ GrantPrivilege [PrivUpdate [Name Nothing "a", Name Nothing "b"]]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant references on table t1 to role1"
+     $ GrantPrivilege [PrivReferences []]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant references(a,b) on table t1 to role1"
+     $ GrantPrivilege [PrivReferences [Name Nothing "a", Name Nothing "b"]]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant usage on table t1 to role1"
+     $ GrantPrivilege [PrivUsage]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant trigger on table t1 to role1"
+     $ GrantPrivilege [PrivTrigger]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+
+    ,(TestStatement ansi2011
+      "grant execute on specific function f to role1"
+     $ GrantPrivilege [PrivExecute]
+       (PrivFunction [Name Nothing "f"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+    ,(TestStatement ansi2011
+      "grant select,delete on table t1 to role1"
+     $ GrantPrivilege [PrivSelect [], PrivDelete]
+       (PrivTable [Name Nothing "t1"])
+       [Name Nothing "role1"] WithoutGrantOption)
+
+{-
+skipping for now:
+
+what is 'under' action?
+
+collation, character set, translation, member thing, methods
+
+for review
+
+some pretty big things missing in the standard:
+
+schema, database
+
+functions, etc., by argument types since they can be overloaded
+
+
+
+12.4 <role definition>
+
+<role definition> ::=
+  CREATE ROLE <role name> [ WITH ADMIN <grantor> ]
+-}
+
+    ,(TestStatement ansi2011
+      "create role rolee"
+     $ CreateRole (Name Nothing "rolee"))
+
+
+{-
+12.5 <grant role statement>
+
+<grant role statement> ::=
+  GRANT <role granted> [ { <comma> <role granted> }... ]
+      TO <grantee> [ { <comma> <grantee> }... ]
+      [ WITH ADMIN OPTION ]
+      [ GRANTED BY <grantor> ]
+
+<role granted> ::=
+  <role name>
+-}
+
+    ,(TestStatement ansi2011
+      "grant role1 to public"
+     $ GrantRole [Name Nothing "role1"] [Name Nothing "public"] WithoutAdminOption)
+
+    ,(TestStatement ansi2011
+      "grant role1,role2 to role3,role4"
+     $ GrantRole [Name Nothing "role1",Name Nothing "role2"]
+                 [Name Nothing "role3", Name Nothing "role4"] WithoutAdminOption)
+
+    ,(TestStatement ansi2011
+      "grant role1 to role3 with admin option"
+     $ GrantRole [Name Nothing "role1"] [Name Nothing "role3"] WithAdminOption)
+
+
+{-
+12.6 <drop role statement>
+
+<drop role statement> ::=
+  DROP ROLE <role name>
+-}
+
+    ,(TestStatement ansi2011
+      "drop role rolee"
+     $ DropRole (Name Nothing "rolee"))
+
+
+{-
+12.7 <revoke statement>
+
+<revoke statement> ::=
+    <revoke privilege statement>
+  | <revoke role statement>
+
+<revoke privilege statement> ::=
+  REVOKE [ <revoke option extension> ] <privileges>
+      FROM <grantee> [ { <comma> <grantee> }... ]
+      [ GRANTED BY <grantor> ]
+      <drop behavior>
+
+<revoke option extension> ::=
+    GRANT OPTION FOR
+  | HIERARCHY OPTION FOR
+-}
+
+
+    ,(TestStatement ansi2011
+      "revoke select on t1 from role1"
+     $ RevokePrivilege NoGrantOptionFor [PrivSelect []]
+              (PrivTable [Name Nothing "t1"])
+              [Name Nothing "role1"] DefaultDropBehaviour)
+
+    ,(TestStatement ansi2011
+      "revoke grant option for select on t1 from role1,role2 cascade"
+     $ RevokePrivilege GrantOptionFor [PrivSelect []]
+                       (PrivTable [Name Nothing "t1"])
+              [Name Nothing "role1",Name Nothing "role2"] Cascade)
+
+
+{-
+<revoke role statement> ::=
+  REVOKE [ ADMIN OPTION FOR ] <role revoked> [ { <comma> <role revoked> }... ]
+      FROM <grantee> [ { <comma> <grantee> }... ]
+      [ GRANTED BY <grantor> ]
+      <drop behavior>
+
+<role revoked> ::=
+  <role name>
+-}
+
+    ,(TestStatement ansi2011
+      "revoke role1 from role2"
+     $ RevokeRole NoAdminOptionFor [Name Nothing "role1"]
+                  [Name Nothing "role2"] DefaultDropBehaviour)
+
+    ,(TestStatement ansi2011
+      "revoke role1,role2 from role3,role4"
+     $ RevokeRole NoAdminOptionFor [Name Nothing "role1",Name Nothing "role2"]
+                  [Name Nothing "role3",Name Nothing "role4"] DefaultDropBehaviour)
+
+
+    ,(TestStatement ansi2011
+      "revoke admin option for role1 from role2 cascade"
+     $ RevokeRole AdminOptionFor [Name Nothing "role1"] [Name Nothing "role2"] Cascade)
+
+
+   ]
diff --git a/tools/Language/SQL/SimpleSQL/SQL2011AccessControl.lhs b/tools/Language/SQL/SimpleSQL/SQL2011AccessControl.lhs
deleted file mode 100644
index cc52215..0000000
--- a/tools/Language/SQL/SimpleSQL/SQL2011AccessControl.lhs
+++ /dev/null
@@ -1,315 +0,0 @@
-
-Section 12 in Foundation
-
-grant, etc
-
-
-> module Language.SQL.SimpleSQL.SQL2011AccessControl (sql2011AccessControlTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-> sql2011AccessControlTests :: TestItem
-> sql2011AccessControlTests = Group "sql 2011 access control tests" [
-
-12 Access control
-
-12.1 <grant statement>
-
-<grant statement> ::=
-    <grant privilege statement>
-  | <grant role statement>
-
-12.2 <grant privilege statement>
-
-<grant privilege statement> ::=
-  GRANT <privileges> TO <grantee> [ { <comma> <grantee> }... ]
-      [ WITH HIERARCHY OPTION ]
-      [ WITH GRANT OPTION ]
-      [ GRANTED BY <grantor> ]
-
-12.3 <privileges>
-<privileges> ::=
-  <object privileges> ON <object name>
-
-<object name> ::=
-      [ TABLE ] <table name>
-  |   DOMAIN <domain name>
-  |   COLLATION <collation name>
-  |   CHARACTER SET <character set name>
-  |   TRANSLATION <transliteration name>
-  |   TYPE <schema-resolved user-defined type name>
-  |   SEQUENCE <sequence generator name>
-  |   <specific routine designator>
-
-<object privileges> ::=
-    ALL PRIVILEGES
-  | <action> [ { <comma> <action> }... ]
-
-<action> ::=
-    SELECT
-  | SELECT <left paren> <privilege column list> <right paren>
-  | SELECT <left paren> <privilege method list> <right paren>
-  | DELETE
-  | INSERT [ <left paren> <privilege column list> <right paren> ]
-  | UPDATE [ <left paren> <privilege column list> <right paren> ]
-  | REFERENCES [ <left paren> <privilege column list> <right paren> ]
-  | USAGE
-  | TRIGGER
-  | UNDER
-  | EXECUTE
-
-<privilege method list> ::=
-  <specific routine designator> [ { <comma> <specific routine designator> }... ]
-
-<privilege column list> ::=
-  <column name list>
-
-<grantee> ::=
-    PUBLIC
-  | <authorization identifier>
-
-<grantor> ::=
-    CURRENT_USER
-  | CURRENT_ROLE
-
->      (TestStatement ansi2011
->       "grant all privileges on tbl1 to role1"
->      $ GrantPrivilege [PrivAll]
->        (PrivTable [Name Nothing "tbl1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
-
->     ,(TestStatement ansi2011
->       "grant all privileges on tbl1 to role1,role2"
->      $ GrantPrivilege [PrivAll]
->        (PrivTable [Name Nothing "tbl1"])
->        [Name Nothing "role1",Name Nothing "role2"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant all privileges on tbl1 to role1 with grant option"
->      $ GrantPrivilege [PrivAll]
->        (PrivTable [Name Nothing "tbl1"])
->        [Name Nothing "role1"] WithGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant all privileges on table tbl1 to role1"
->      $ GrantPrivilege [PrivAll]
->        (PrivTable [Name Nothing "tbl1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant all privileges on domain mydom to role1"
->      $ GrantPrivilege [PrivAll]
->        (PrivDomain [Name Nothing "mydom"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant all privileges on type t1 to role1"
->      $ GrantPrivilege [PrivAll]
->        (PrivType [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant all privileges on sequence s1 to role1"
->      $ GrantPrivilege [PrivAll]
->        (PrivSequence [Name Nothing "s1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
-
->     ,(TestStatement ansi2011
->       "grant select on table t1 to role1"
->      $ GrantPrivilege [PrivSelect []]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant select(a,b) on table t1 to role1"
->      $ GrantPrivilege [PrivSelect [Name Nothing "a", Name Nothing "b"]]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant delete on table t1 to role1"
->      $ GrantPrivilege [PrivDelete]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant insert on table t1 to role1"
->      $ GrantPrivilege [PrivInsert []]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant insert(a,b) on table t1 to role1"
->      $ GrantPrivilege [PrivInsert [Name Nothing "a", Name Nothing "b"]]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant update on table t1 to role1"
->      $ GrantPrivilege [PrivUpdate []]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant update(a,b) on table t1 to role1"
->      $ GrantPrivilege [PrivUpdate [Name Nothing "a", Name Nothing "b"]]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant references on table t1 to role1"
->      $ GrantPrivilege [PrivReferences []]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant references(a,b) on table t1 to role1"
->      $ GrantPrivilege [PrivReferences [Name Nothing "a", Name Nothing "b"]]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant usage on table t1 to role1"
->      $ GrantPrivilege [PrivUsage]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant trigger on table t1 to role1"
->      $ GrantPrivilege [PrivTrigger]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
-
->     ,(TestStatement ansi2011
->       "grant execute on specific function f to role1"
->      $ GrantPrivilege [PrivExecute]
->        (PrivFunction [Name Nothing "f"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
->     ,(TestStatement ansi2011
->       "grant select,delete on table t1 to role1"
->      $ GrantPrivilege [PrivSelect [], PrivDelete]
->        (PrivTable [Name Nothing "t1"])
->        [Name Nothing "role1"] WithoutGrantOption)
-
-skipping for now:
-
-what is 'under' action?
-
-collation, character set, translation, member thing, methods
-
-for review
-
-some pretty big things missing in the standard:
-
-schema, database
-
-functions, etc., by argument types since they can be overloaded
-
-
-
-12.4 <role definition>
-
-<role definition> ::=
-  CREATE ROLE <role name> [ WITH ADMIN <grantor> ]
-
->     ,(TestStatement ansi2011
->       "create role rolee"
->      $ CreateRole (Name Nothing "rolee"))
-
-
-12.5 <grant role statement>
-
-<grant role statement> ::=
-  GRANT <role granted> [ { <comma> <role granted> }... ]
-      TO <grantee> [ { <comma> <grantee> }... ]
-      [ WITH ADMIN OPTION ]
-      [ GRANTED BY <grantor> ]
-
-<role granted> ::=
-  <role name>
-
->     ,(TestStatement ansi2011
->       "grant role1 to public"
->      $ GrantRole [Name Nothing "role1"] [Name Nothing "public"] WithoutAdminOption)
-
->     ,(TestStatement ansi2011
->       "grant role1,role2 to role3,role4"
->      $ GrantRole [Name Nothing "role1",Name Nothing "role2"]
->                  [Name Nothing "role3", Name Nothing "role4"] WithoutAdminOption)
-
->     ,(TestStatement ansi2011
->       "grant role1 to role3 with admin option"
->      $ GrantRole [Name Nothing "role1"] [Name Nothing "role3"] WithAdminOption)
-
-
-12.6 <drop role statement>
-
-<drop role statement> ::=
-  DROP ROLE <role name>
-
->     ,(TestStatement ansi2011
->       "drop role rolee"
->      $ DropRole (Name Nothing "rolee"))
-
-
-12.7 <revoke statement>
-
-<revoke statement> ::=
-    <revoke privilege statement>
-  | <revoke role statement>
-
-<revoke privilege statement> ::=
-  REVOKE [ <revoke option extension> ] <privileges>
-      FROM <grantee> [ { <comma> <grantee> }... ]
-      [ GRANTED BY <grantor> ]
-      <drop behavior>
-
-<revoke option extension> ::=
-    GRANT OPTION FOR
-  | HIERARCHY OPTION FOR
-
-
->     ,(TestStatement ansi2011
->       "revoke select on t1 from role1"
->      $ RevokePrivilege NoGrantOptionFor [PrivSelect []]
->               (PrivTable [Name Nothing "t1"])
->               [Name Nothing "role1"] DefaultDropBehaviour)
-
->     ,(TestStatement ansi2011
->       "revoke grant option for select on t1 from role1,role2 cascade"
->      $ RevokePrivilege GrantOptionFor [PrivSelect []]
->                        (PrivTable [Name Nothing "t1"])
->               [Name Nothing "role1",Name Nothing "role2"] Cascade)
-
-
-<revoke role statement> ::=
-  REVOKE [ ADMIN OPTION FOR ] <role revoked> [ { <comma> <role revoked> }... ]
-      FROM <grantee> [ { <comma> <grantee> }... ]
-      [ GRANTED BY <grantor> ]
-      <drop behavior>
-
-<role revoked> ::=
-  <role name>
-
->     ,(TestStatement ansi2011
->       "revoke role1 from role2"
->      $ RevokeRole NoAdminOptionFor [Name Nothing "role1"]
->                   [Name Nothing "role2"] DefaultDropBehaviour)
-
->     ,(TestStatement ansi2011
->       "revoke role1,role2 from role3,role4"
->      $ RevokeRole NoAdminOptionFor [Name Nothing "role1",Name Nothing "role2"]
->                   [Name Nothing "role3",Name Nothing "role4"] DefaultDropBehaviour)
-
-
->     ,(TestStatement ansi2011
->       "revoke admin option for role1 from role2 cascade"
->      $ RevokeRole AdminOptionFor [Name Nothing "role1"] [Name Nothing "role2"] Cascade)
-
-
->    ]
diff --git a/tools/Language/SQL/SimpleSQL/SQL2011Bits.lhs b/tools/Language/SQL/SimpleSQL/SQL2011Bits.hs
similarity index 79%
rename from tools/Language/SQL/SimpleSQL/SQL2011Bits.lhs
rename to tools/Language/SQL/SimpleSQL/SQL2011Bits.hs
index 3e13a51..443f232 100644
--- a/tools/Language/SQL/SimpleSQL/SQL2011Bits.lhs
+++ b/tools/Language/SQL/SimpleSQL/SQL2011Bits.hs
@@ -1,18 +1,21 @@
 
+{-
 Sections 17 and 19 in Foundation
 
 This module covers the tests for transaction management (begin,
 commit, savepoint, etc.), and session management (set).
+-}
 
 
-> module Language.SQL.SimpleSQL.SQL2011Bits (sql2011BitsTests) where
+module Language.SQL.SimpleSQL.SQL2011Bits (sql2011BitsTests) where
 
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
 
-> sql2011BitsTests :: TestItem
-> sql2011BitsTests = Group "sql 2011 bits tests" [
+sql2011BitsTests :: TestItem
+sql2011BitsTests = Group "sql 2011 bits tests" [
 
+{-
 17 Transaction management
 
 17.1 <start transaction statement>
@@ -21,11 +24,13 @@ commit, savepoint, etc.), and session management (set).
   START TRANSACTION [ <transaction characteristics> ]
 
 BEGIN is not in the standard!
+-}
 
->      (TestStatement ansi2011
->       "start transaction"
->      $ StartTransaction)
+     (TestStatement ansi2011
+      "start transaction"
+     $ StartTransaction)
 
+{-
 17.2 <set transaction statement>
 
 <set transaction statement> ::=
@@ -76,36 +81,42 @@ BEGIN is not in the standard!
 
 <savepoint specifier> ::=
   <savepoint name>
+-}
 
->     ,(TestStatement ansi2011
->       "savepoint difficult_bit"
->      $ Savepoint $ Name Nothing "difficult_bit")
+    ,(TestStatement ansi2011
+      "savepoint difficult_bit"
+     $ Savepoint $ Name Nothing "difficult_bit")
 
 
+{-
 17.6 <release savepoint statement>
 
 <release savepoint statement> ::=
   RELEASE SAVEPOINT <savepoint specifier>
+-}
 
->     ,(TestStatement ansi2011
->       "release savepoint difficult_bit"
->      $ ReleaseSavepoint $ Name Nothing "difficult_bit")
+    ,(TestStatement ansi2011
+      "release savepoint difficult_bit"
+     $ ReleaseSavepoint $ Name Nothing "difficult_bit")
 
 
+{-
 17.7 <commit statement>
 
 <commit statement> ::=
   COMMIT [ WORK ] [ AND [ NO ] CHAIN ]
+-}
 
->     ,(TestStatement ansi2011
->       "commit"
->      $ Commit)
+    ,(TestStatement ansi2011
+      "commit"
+     $ Commit)
 
->     ,(TestStatement ansi2011
->       "commit work"
->      $ Commit)
+    ,(TestStatement ansi2011
+      "commit work"
+     $ Commit)
 
 
+{-
 17.8 <rollback statement>
 
 <rollback statement> ::=
@@ -113,20 +124,22 @@ BEGIN is not in the standard!
 
 <savepoint clause> ::=
   TO SAVEPOINT <savepoint specifier>
+-}
 
->     ,(TestStatement ansi2011
->       "rollback"
->      $ Rollback Nothing)
+    ,(TestStatement ansi2011
+      "rollback"
+     $ Rollback Nothing)
 
->     ,(TestStatement ansi2011
->       "rollback work"
->      $ Rollback Nothing)
+    ,(TestStatement ansi2011
+      "rollback work"
+     $ Rollback Nothing)
 
->     ,(TestStatement ansi2011
->       "rollback to savepoint difficult_bit"
->      $ Rollback $ Just $ Name Nothing "difficult_bit")
+    ,(TestStatement ansi2011
+      "rollback to savepoint difficult_bit"
+     $ Rollback $ Just $ Name Nothing "difficult_bit")
 
 
+{-
 19 Session management
 
 19.1 <set session characteristics statement>
@@ -215,5 +228,6 @@ BEGIN is not in the standard!
 
 <collation specification> ::=
   <value specification>
+-}
 
->    ]
+   ]
diff --git a/tools/Language/SQL/SimpleSQL/SQL2011DataManipulation.lhs b/tools/Language/SQL/SimpleSQL/SQL2011DataManipulation.hs
similarity index 74%
rename from tools/Language/SQL/SimpleSQL/SQL2011DataManipulation.lhs
rename to tools/Language/SQL/SimpleSQL/SQL2011DataManipulation.hs
index 2f6ce48..d4e973a 100644
--- a/tools/Language/SQL/SimpleSQL/SQL2011DataManipulation.lhs
+++ b/tools/Language/SQL/SimpleSQL/SQL2011DataManipulation.hs
@@ -1,17 +1,18 @@
 
-Section 14 in Foundation
+-- Section 14 in Foundation
 
 
-> module Language.SQL.SimpleSQL.SQL2011DataManipulation (sql2011DataManipulationTests) where
+module Language.SQL.SimpleSQL.SQL2011DataManipulation (sql2011DataManipulationTests) where
 
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
 
-> sql2011DataManipulationTests :: TestItem
-> sql2011DataManipulationTests = Group "sql 2011 data manipulation tests"
->     [
+sql2011DataManipulationTests :: TestItem
+sql2011DataManipulationTests = Group "sql 2011 data manipulation tests"
+    [
 
 
+{-
 14 Data manipulation
 
 
@@ -107,22 +108,24 @@ Section 14 in Foundation
         FROM <point in time 1> TO <point in time 2> ]
       [ [ AS ] <correlation name> ]
       [ WHERE <search condition> ]
+-}
 
->      (TestStatement ansi2011 "delete from t"
->      $ Delete [Name Nothing "t"] Nothing Nothing)
+     (TestStatement ansi2011 "delete from t"
+     $ Delete [Name Nothing "t"] Nothing Nothing)
 
->     ,(TestStatement ansi2011 "delete from t as u"
->      $ Delete [Name Nothing "t"] (Just (Name Nothing "u")) Nothing)
+    ,(TestStatement ansi2011 "delete from t as u"
+     $ Delete [Name Nothing "t"] (Just (Name Nothing "u")) Nothing)
 
->     ,(TestStatement ansi2011 "delete from t where x = 5"
->      $ Delete [Name Nothing "t"] Nothing
->        (Just $ BinOp (Iden [Name Nothing "x"]) [Name Nothing "="] (NumLit "5")))
+    ,(TestStatement ansi2011 "delete from t where x = 5"
+     $ Delete [Name Nothing "t"] Nothing
+       (Just $ BinOp (Iden [Name Nothing "x"]) [Name Nothing "="] (NumLit "5")))
 
 
->     ,(TestStatement ansi2011 "delete from t as u where u.x = 5"
->      $ Delete [Name Nothing "t"] (Just (Name Nothing "u"))
->        (Just $ BinOp (Iden [Name Nothing "u", Name Nothing "x"]) [Name Nothing "="] (NumLit "5")))
+    ,(TestStatement ansi2011 "delete from t as u where u.x = 5"
+     $ Delete [Name Nothing "t"] (Just (Name Nothing "u"))
+       (Just $ BinOp (Iden [Name Nothing "u", Name Nothing "x"]) [Name Nothing "="] (NumLit "5")))
 
+{-
 14.10 <truncate table statement>
 
 <truncate table statement> ::=
@@ -131,17 +134,19 @@ Section 14 in Foundation
 <identity column restart option> ::=
     CONTINUE IDENTITY
   | RESTART IDENTITY
+-}
 
->     ,(TestStatement ansi2011 "truncate table t"
->      $ Truncate [Name Nothing "t"] DefaultIdentityRestart)
+    ,(TestStatement ansi2011 "truncate table t"
+     $ Truncate [Name Nothing "t"] DefaultIdentityRestart)
 
->     ,(TestStatement ansi2011 "truncate table t continue identity"
->      $ Truncate [Name Nothing "t"] ContinueIdentity)
+    ,(TestStatement ansi2011 "truncate table t continue identity"
+     $ Truncate [Name Nothing "t"] ContinueIdentity)
 
->     ,(TestStatement ansi2011 "truncate table t restart identity"
->      $ Truncate [Name Nothing "t"] RestartIdentity)
+    ,(TestStatement ansi2011 "truncate table t restart identity"
+     $ Truncate [Name Nothing "t"] RestartIdentity)
 
 
+{-
 14.11 <insert statement>
 
 <insert statement> ::=
@@ -174,40 +179,42 @@ Section 14 in Foundation
 
 <insert column list> ::=
   <column name list>
+-}
 
->     ,(TestStatement ansi2011 "insert into t select * from u"
->      $ Insert [Name Nothing "t"] Nothing
->        $ InsertQuery makeSelect
->          {qeSelectList = [(Star, Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "u"]]})
+    ,(TestStatement ansi2011 "insert into t select * from u"
+     $ Insert [Name Nothing "t"] Nothing
+       $ InsertQuery makeSelect
+         {qeSelectList = [(Star, Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "u"]]})
 
->     ,(TestStatement ansi2011 "insert into t(a,b,c) select * from u"
->      $ Insert [Name Nothing "t"] (Just [Name Nothing "a", Name Nothing "b", Name Nothing "c"])
->        $ InsertQuery makeSelect
->          {qeSelectList = [(Star, Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "u"]]})
+    ,(TestStatement ansi2011 "insert into t(a,b,c) select * from u"
+     $ Insert [Name Nothing "t"] (Just [Name Nothing "a", Name Nothing "b", Name Nothing "c"])
+       $ InsertQuery makeSelect
+         {qeSelectList = [(Star, Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "u"]]})
 
->     ,(TestStatement ansi2011 "insert into t default values"
->      $ Insert [Name Nothing "t"] Nothing DefaultInsertValues)
+    ,(TestStatement ansi2011 "insert into t default values"
+     $ Insert [Name Nothing "t"] Nothing DefaultInsertValues)
 
->     ,(TestStatement ansi2011 "insert into t values(1,2)"
->      $ Insert [Name Nothing "t"] Nothing
->        $ InsertQuery $ Values [[NumLit "1", NumLit "2"]])
+    ,(TestStatement ansi2011 "insert into t values(1,2)"
+     $ Insert [Name Nothing "t"] Nothing
+       $ InsertQuery $ Values [[NumLit "1", NumLit "2"]])
 
->     ,(TestStatement ansi2011 "insert into t values (1,2),(3,4)"
->      $ Insert [Name Nothing "t"] Nothing
->        $ InsertQuery $ Values [[NumLit "1", NumLit "2"]
->                               ,[NumLit "3", NumLit "4"]])
+    ,(TestStatement ansi2011 "insert into t values (1,2),(3,4)"
+     $ Insert [Name Nothing "t"] Nothing
+       $ InsertQuery $ Values [[NumLit "1", NumLit "2"]
+                              ,[NumLit "3", NumLit "4"]])
 
->     ,(TestStatement ansi2011
->       "insert into t values (default,null,array[],multiset[])"
->      $ Insert [Name Nothing "t"] Nothing
->        $ InsertQuery $ Values [[Iden [Name Nothing "default"]
->                                ,Iden [Name Nothing "null"]
->                                ,Array (Iden [Name Nothing "array"]) []
->                                ,MultisetCtor []]])
+    ,(TestStatement ansi2011
+      "insert into t values (default,null,array[],multiset[])"
+     $ Insert [Name Nothing "t"] Nothing
+       $ InsertQuery $ Values [[Iden [Name Nothing "default"]
+                               ,Iden [Name Nothing "null"]
+                               ,Array (Iden [Name Nothing "array"]) []
+                               ,MultisetCtor []]])
 
 
+{-
 14.12 <merge statement>
 
 <merge statement> ::=
@@ -445,37 +452,39 @@ FROM CentralOfficeAccounts;
       [ [ AS ] <correlation name> ]
       SET <set clause list>
       [ WHERE <search condition> ]
+-}
 
 
->     ,(TestStatement ansi2011 "update t set a=b"
->      $ Update [Name Nothing "t"] Nothing
->        [Set [Name Nothing "a"] (Iden [Name Nothing "b"])] Nothing)
+    ,(TestStatement ansi2011 "update t set a=b"
+     $ Update [Name Nothing "t"] Nothing
+       [Set [Name Nothing "a"] (Iden [Name Nothing "b"])] Nothing)
 
->     ,(TestStatement ansi2011 "update t set a=b, c=5"
->      $ Update [Name Nothing "t"] Nothing
->        [Set [Name Nothing "a"] (Iden [Name Nothing "b"])
->        ,Set [Name Nothing "c"] (NumLit "5")] Nothing)
+    ,(TestStatement ansi2011 "update t set a=b, c=5"
+     $ Update [Name Nothing "t"] Nothing
+       [Set [Name Nothing "a"] (Iden [Name Nothing "b"])
+       ,Set [Name Nothing "c"] (NumLit "5")] Nothing)
 
 
->     ,(TestStatement ansi2011 "update t set a=b where a>5"
->      $ Update [Name Nothing "t"] Nothing
->        [Set [Name Nothing "a"] (Iden [Name Nothing "b"])]
->        $ Just $ BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (NumLit "5"))
+    ,(TestStatement ansi2011 "update t set a=b where a>5"
+     $ Update [Name Nothing "t"] Nothing
+       [Set [Name Nothing "a"] (Iden [Name Nothing "b"])]
+       $ Just $ BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (NumLit "5"))
 
 
->     ,(TestStatement ansi2011 "update t as u set a=b where u.a>5"
->      $ Update [Name Nothing "t"] (Just $ Name Nothing "u")
->        [Set [Name Nothing "a"] (Iden [Name Nothing "b"])]
->        $ Just $ BinOp (Iden [Name Nothing "u",Name Nothing "a"])
->                       [Name Nothing ">"] (NumLit "5"))
+    ,(TestStatement ansi2011 "update t as u set a=b where u.a>5"
+     $ Update [Name Nothing "t"] (Just $ Name Nothing "u")
+       [Set [Name Nothing "a"] (Iden [Name Nothing "b"])]
+       $ Just $ BinOp (Iden [Name Nothing "u",Name Nothing "a"])
+                      [Name Nothing ">"] (NumLit "5"))
 
->     ,(TestStatement ansi2011 "update t set (a,b)=(3,5)"
->      $ Update [Name Nothing "t"] Nothing
->        [SetMultiple [[Name Nothing "a"],[Name Nothing "b"]]
->                     [NumLit "3", NumLit "5"]] Nothing)
+    ,(TestStatement ansi2011 "update t set (a,b)=(3,5)"
+     $ Update [Name Nothing "t"] Nothing
+       [SetMultiple [[Name Nothing "a"],[Name Nothing "b"]]
+                    [NumLit "3", NumLit "5"]] Nothing)
 
 
 
+{-
 14.15 <set clause list>
 
 <set clause list> ::=
@@ -539,6 +548,7 @@ declare local temporary table t (a int) [on commit {preserve | delete} rows]
 
 <hold locator statement> ::=
   HOLD LOCATOR <locator reference> [ { <comma> <locator reference> }... ]
+-}
 
 
->    ]
+   ]
diff --git a/tools/Language/SQL/SimpleSQL/SQL2011Queries.lhs b/tools/Language/SQL/SimpleSQL/SQL2011Queries.hs
similarity index 64%
rename from tools/Language/SQL/SimpleSQL/SQL2011Queries.lhs
rename to tools/Language/SQL/SimpleSQL/SQL2011Queries.hs
index ca57523..03db5cb 100644
--- a/tools/Language/SQL/SimpleSQL/SQL2011Queries.lhs
+++ b/tools/Language/SQL/SimpleSQL/SQL2011Queries.hs
@@ -1,4 +1,5 @@
 
+{-
 This file goes through the grammar for SQL 2011 queries (using the
 draft standard).
 
@@ -28,27 +29,29 @@ todo: finish this list
 The goal is to create some example tests for each bit of grammar, with
 some areas getting more comprehensive coverage tests, and also to note
 which parts aren't currently supported.
+-}
 
-> module Language.SQL.SimpleSQL.SQL2011Queries (sql2011QueryTests) where
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
+module Language.SQL.SimpleSQL.SQL2011Queries (sql2011QueryTests) where
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
 
-> sql2011QueryTests :: TestItem
-> sql2011QueryTests = Group "sql 2011 query tests"
->     [literals
->     ,identifiers
->     ,typeNameTests
->     ,fieldDefinition
->     ,valueExpressions
->     ,queryExpressions
->     ,scalarSubquery
->     ,predicates
->     ,intervalQualifier
->     ,collateClause
->     ,aggregateFunction
->     ,sortSpecificationList
->     ]
+sql2011QueryTests :: TestItem
+sql2011QueryTests = Group "sql 2011 query tests"
+    [literals
+    ,identifiers
+    ,typeNameTests
+    ,fieldDefinition
+    ,valueExpressions
+    ,queryExpressions
+    ,scalarSubquery
+    ,predicates
+    ,intervalQualifier
+    ,collateClause
+    ,aggregateFunction
+    ,sortSpecificationList
+    ]
 
+{-
 = 5 Lexical elements
 
 The tests don't make direct use of these definitions.
@@ -461,11 +464,13 @@ language.
 
 Function
 Specify a non-null value.
+-}
 
-> literals :: TestItem
-> literals = Group "literals"
->     [numericLiterals,generalLiterals]
+literals :: TestItem
+literals = Group "literals"
+    [numericLiterals,generalLiterals]
 
+{-
 <literal> ::= <signed numeric literal> | <general literal>
 
 <unsigned literal> ::= <unsigned numeric literal> | <general literal>
@@ -478,17 +483,19 @@ Specify a non-null value.
   | <datetime literal>
   | <interval literal>
   | <boolean literal>
+-}
 
-> generalLiterals :: TestItem
-> generalLiterals = Group "general literals"
->     [characterStringLiterals
->     ,nationalCharacterStringLiterals
->     ,unicodeCharacterStringLiterals
->     ,binaryStringLiterals
->     ,dateTimeLiterals
->     ,intervalLiterals
->     ,booleanLiterals]
+generalLiterals :: TestItem
+generalLiterals = Group "general literals"
+    [characterStringLiterals
+    ,nationalCharacterStringLiterals
+    ,unicodeCharacterStringLiterals
+    ,binaryStringLiterals
+    ,dateTimeLiterals
+    ,intervalLiterals
+    ,booleanLiterals]
 
+{-
 <character string literal> ::=
   [ <introducer> <character set specification> ]
       <quote> [ <character representation>... ] <quote>
@@ -501,42 +508,48 @@ Specify a non-null value.
 <nonquote character> ::= !! See the Syntax Rules.
 
 <quote symbol> ::= <quote> <quote>
+-}
 
-> characterStringLiterals :: TestItem
-> characterStringLiterals = Group "character string literals"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("'a regular string literal'"
->      ,StringLit "'" "'" "a regular string literal")
->     ,("'something' ' some more' 'and more'"
->      ,StringLit "'" "'" "something some moreand more")
->     ,("'something' \n ' some more' \t 'and more'"
->      ,StringLit "'" "'" "something some moreand more")
->     ,("'something' -- a comment\n ' some more' /*another comment*/ 'and more'"
->      ,StringLit "'" "'" "something some moreand more")
->     ,("'a quote: '', stuff'"
->      ,StringLit "'" "'" "a quote: '', stuff")
->     ,("''"
->      ,StringLit "'" "'" "")
+characterStringLiterals :: TestItem
+characterStringLiterals = Group "character string literals"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("'a regular string literal'"
+     ,StringLit "'" "'" "a regular string literal")
+    ,("'something' ' some more' 'and more'"
+     ,StringLit "'" "'" "something some moreand more")
+    ,("'something' \n ' some more' \t 'and more'"
+     ,StringLit "'" "'" "something some moreand more")
+    ,("'something' -- a comment\n ' some more' /*another comment*/ 'and more'"
+     ,StringLit "'" "'" "something some moreand more")
+    ,("'a quote: '', stuff'"
+     ,StringLit "'" "'" "a quote: '', stuff")
+    ,("''"
+     ,StringLit "'" "'" "")
 
+{-
 I'm not sure how this should work. Maybe the parser should reject non
 ascii characters in strings and identifiers unless the current SQL
 character set allows them.
+-}
 
->     ,("_francais 'français'"
->      ,TypedLit (TypeName [Name Nothing "_francais"]) "français")
->     ]
+    ,("_francais 'français'"
+     ,TypedLit (TypeName [Name Nothing "_francais"]) "français")
+    ]
 
+{-
 <national character string literal> ::=
   N <quote> [ <character representation>... ]
       <quote> [ { <separator> <quote> [ <character representation>... ] <quote> }... ]
+-}
 
-> nationalCharacterStringLiterals :: TestItem
-> nationalCharacterStringLiterals = Group "national character string literals"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("N'something'", StringLit "N'" "'" "something")
->     ,("n'something'", StringLit "n'" "'" "something")
->     ]
+nationalCharacterStringLiterals :: TestItem
+nationalCharacterStringLiterals = Group "national character string literals"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("N'something'", StringLit "N'" "'" "something")
+    ,("n'something'", StringLit "n'" "'" "something")
+    ]
 
+{-
 <Unicode character string literal> ::=
   [ <introducer> <character set specification> ]
       U <ampersand> <quote> [ <Unicode representation>... ] <quote>
@@ -546,17 +559,19 @@ character set allows them.
 <Unicode representation> ::=
     <character representation>
   | <Unicode escape value>
+-}
 
-> unicodeCharacterStringLiterals :: TestItem
-> unicodeCharacterStringLiterals = Group "unicode character string literals"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("U&'something'", StringLit "U&'" "'" "something")
->     {-,("u&'something' escape ="
->      ,Escape (StringLit "u&'" "'" "something") '=')
->     ,("u&'something' uescape ="
->      ,UEscape (StringLit "u&'" "'" "something") '=')-}
->     ]
+unicodeCharacterStringLiterals :: TestItem
+unicodeCharacterStringLiterals = Group "unicode character string literals"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("U&'something'", StringLit "U&'" "'" "something")
+    {-,("u&'something' escape ="
+     ,Escape (StringLit "u&'" "'" "something") '=')
+    ,("u&'something' uescape ="
+     ,UEscape (StringLit "u&'" "'" "something") '=')-}
+    ]
 
+{-
 TODO: unicode escape
 
 <binary string literal> ::=
@@ -565,15 +580,17 @@ TODO: unicode escape
       <hexit> [ <space>... ] }... ] <quote> }... ]
 
 <hexit> ::= <digit> | A | B | C | D | E | F | a | b | c | d | e | f
+-}
 
-> binaryStringLiterals :: TestItem
-> binaryStringLiterals = Group "binary string literals"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [--("B'101010'", CSStringLit "B" "101010")
->      ("X'7f7f7f'", StringLit "X'" "'" "7f7f7f")
->     --,("X'7f7f7f' escape z", Escape (StringLit "X'" "'" "7f7f7f") 'z')
->     ]
+binaryStringLiterals :: TestItem
+binaryStringLiterals = Group "binary string literals"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [--("B'101010'", CSStringLit "B" "101010")
+     ("X'7f7f7f'", StringLit "X'" "'" "7f7f7f")
+    --,("X'7f7f7f' escape z", Escape (StringLit "X'" "'" "7f7f7f") 'z')
+    ]
 
+{-
 <signed numeric literal> ::= [ <sign> ] <unsigned numeric literal>
 
 <unsigned numeric literal> ::=
@@ -595,39 +612,41 @@ TODO: unicode escape
 <signed integer> ::= [ <sign> ] <unsigned integer>
 
 <unsigned integer> ::= <digit>...
+-}
 
-> numericLiterals :: TestItem
-> numericLiterals = Group "numeric literals"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("11", NumLit "11")
->     ,("11.11", NumLit "11.11")
+numericLiterals :: TestItem
+numericLiterals = Group "numeric literals"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("11", NumLit "11")
+    ,("11.11", NumLit "11.11")
 
->     ,("11E23", NumLit "11E23")
->     ,("11E+23", NumLit "11E+23")
->     ,("11E-23", NumLit "11E-23")
+    ,("11E23", NumLit "11E23")
+    ,("11E+23", NumLit "11E+23")
+    ,("11E-23", NumLit "11E-23")
 
->     ,("11.11E23", NumLit "11.11E23")
->     ,("11.11E+23", NumLit "11.11E+23")
->     ,("11.11E-23", NumLit "11.11E-23")
+    ,("11.11E23", NumLit "11.11E23")
+    ,("11.11E+23", NumLit "11.11E+23")
+    ,("11.11E-23", NumLit "11.11E-23")
 
->     ,("+11E23", PrefixOp [Name Nothing "+"] $ NumLit "11E23")
->     ,("+11E+23", PrefixOp [Name Nothing "+"] $ NumLit "11E+23")
->     ,("+11E-23", PrefixOp [Name Nothing "+"] $ NumLit "11E-23")
->     ,("+11.11E23", PrefixOp [Name Nothing "+"] $ NumLit "11.11E23")
->     ,("+11.11E+23", PrefixOp [Name Nothing "+"] $ NumLit "11.11E+23")
->     ,("+11.11E-23", PrefixOp [Name Nothing "+"] $ NumLit "11.11E-23")
+    ,("+11E23", PrefixOp [Name Nothing "+"] $ NumLit "11E23")
+    ,("+11E+23", PrefixOp [Name Nothing "+"] $ NumLit "11E+23")
+    ,("+11E-23", PrefixOp [Name Nothing "+"] $ NumLit "11E-23")
+    ,("+11.11E23", PrefixOp [Name Nothing "+"] $ NumLit "11.11E23")
+    ,("+11.11E+23", PrefixOp [Name Nothing "+"] $ NumLit "11.11E+23")
+    ,("+11.11E-23", PrefixOp [Name Nothing "+"] $ NumLit "11.11E-23")
 
->     ,("-11E23", PrefixOp [Name Nothing "-"] $ NumLit "11E23")
->     ,("-11E+23", PrefixOp [Name Nothing "-"] $ NumLit "11E+23")
->     ,("-11E-23", PrefixOp [Name Nothing "-"] $ NumLit "11E-23")
->     ,("-11.11E23", PrefixOp [Name Nothing "-"] $ NumLit "11.11E23")
->     ,("-11.11E+23", PrefixOp [Name Nothing "-"] $ NumLit "11.11E+23")
->     ,("-11.11E-23", PrefixOp [Name Nothing "-"] $ NumLit "11.11E-23")
+    ,("-11E23", PrefixOp [Name Nothing "-"] $ NumLit "11E23")
+    ,("-11E+23", PrefixOp [Name Nothing "-"] $ NumLit "11E+23")
+    ,("-11E-23", PrefixOp [Name Nothing "-"] $ NumLit "11E-23")
+    ,("-11.11E23", PrefixOp [Name Nothing "-"] $ NumLit "11.11E23")
+    ,("-11.11E+23", PrefixOp [Name Nothing "-"] $ NumLit "11.11E+23")
+    ,("-11.11E-23", PrefixOp [Name Nothing "-"] $ NumLit "11.11E-23")
 
->     ,("11.11e23", NumLit "11.11e23")
+    ,("11.11e23", NumLit "11.11e23")
 
->     ]
+    ]
 
+{-
 <datetime literal> ::= <date literal> | <time literal> | <timestamp literal>
 
 <date literal> ::= DATE <date string>
@@ -648,12 +667,14 @@ TODO: unicode escape
   <years value> <minus sign> <months value> <minus sign> <days value>
 
 <time value> ::= <hours value> <colon> <minutes value> <colon> <seconds value>
+-}
 
-> dateTimeLiterals :: TestItem
-> dateTimeLiterals = Group "datetime literals"
->     [-- TODO: datetime literals
->     ]
+dateTimeLiterals :: TestItem
+dateTimeLiterals = Group "datetime literals"
+    [-- TODO: datetime literals
+    ]
 
+{-
 <interval literal> ::=
   INTERVAL [ <sign> ] <interval string> <interval qualifier>
 
@@ -701,38 +722,40 @@ TODO: unicode escape
 <seconds fraction> ::= <unsigned integer>
 
 <datetime value> ::= <unsigned integer>
+-}
 
-> intervalLiterals :: TestItem
-> intervalLiterals = Group "intervalLiterals literals"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("interval '1'", TypedLit (TypeName [Name Nothing "interval"]) "1")
->     ,("interval '1' day"
->      ,IntervalLit Nothing "1" (Itf "day" Nothing) Nothing)
->     ,("interval '1' day(3)"
->      ,IntervalLit Nothing "1" (Itf "day" $ Just (3,Nothing)) Nothing)
->     ,("interval + '1' day(3)"
->      ,IntervalLit (Just Plus) "1" (Itf "day" $ Just (3,Nothing)) Nothing)
->     ,("interval - '1' second(2,2)"
->      ,IntervalLit (Just Minus) "1" (Itf "second" $ Just (2,Just 2)) Nothing)
->     ,("interval '1' year to month"
->      ,IntervalLit Nothing "1" (Itf "year" Nothing)
->                                   (Just $ Itf "month" Nothing))
+intervalLiterals :: TestItem
+intervalLiterals = Group "intervalLiterals literals"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("interval '1'", TypedLit (TypeName [Name Nothing "interval"]) "1")
+    ,("interval '1' day"
+     ,IntervalLit Nothing "1" (Itf "day" Nothing) Nothing)
+    ,("interval '1' day(3)"
+     ,IntervalLit Nothing "1" (Itf "day" $ Just (3,Nothing)) Nothing)
+    ,("interval + '1' day(3)"
+     ,IntervalLit (Just Plus) "1" (Itf "day" $ Just (3,Nothing)) Nothing)
+    ,("interval - '1' second(2,2)"
+     ,IntervalLit (Just Minus) "1" (Itf "second" $ Just (2,Just 2)) Nothing)
+    ,("interval '1' year to month"
+     ,IntervalLit Nothing "1" (Itf "year" Nothing)
+                                  (Just $ Itf "month" Nothing))
 
->     ,("interval '1' year(4) to second(2,3) "
->      ,IntervalLit Nothing "1" (Itf "year" $ Just (4,Nothing))
->                             (Just $ Itf "second" $ Just (2, Just 3)))
->     ]
+    ,("interval '1' year(4) to second(2,3) "
+     ,IntervalLit Nothing "1" (Itf "year" $ Just (4,Nothing))
+                            (Just $ Itf "second" $ Just (2, Just 3)))
+    ]
 
-<boolean literal> ::= TRUE | FALSE | UNKNOWN
+-- <boolean literal> ::= TRUE | FALSE | UNKNOWN
 
-> booleanLiterals :: TestItem
-> booleanLiterals = Group "boolean literals"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("true", Iden [Name Nothing "true"])
->     ,("false", Iden [Name Nothing "false"])
->     ,("unknown", Iden [Name Nothing "unknown"])
->     ]
+booleanLiterals :: TestItem
+booleanLiterals = Group "boolean literals"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("true", Iden [Name Nothing "true"])
+    ,("false", Iden [Name Nothing "false"])
+    ,("unknown", Iden [Name Nothing "unknown"])
+    ]
 
+{-
 == 5.4 Names and identifiers
 
 Function
@@ -744,21 +767,23 @@ Specify names.
     <regular identifier>
   | <delimited identifier>
   | <Unicode delimited identifier>
+-}
 
-> identifiers :: TestItem
-> identifiers = Group "identifiers"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("test",Iden [Name Nothing "test"])
->     ,("_test",Iden [Name Nothing "_test"])
->     ,("t1",Iden [Name Nothing "t1"])
->     ,("a.b",Iden [Name Nothing "a", Name Nothing "b"])
->     ,("a.b.c",Iden [Name Nothing "a", Name Nothing "b", Name Nothing "c"])
->     ,("\"quoted iden\"", Iden [Name (Just ("\"","\"")) "quoted iden"])
->     ,("\"quoted \"\" iden\"", Iden [Name (Just ("\"","\"")) "quoted \"\" iden"])
->     ,("U&\"quoted iden\"", Iden [Name (Just ("U&\"","\"")) "quoted iden"])
->     ,("U&\"quoted \"\" iden\"", Iden [Name (Just ("U&\"","\"")) "quoted \"\" iden"])
->     ]
+identifiers :: TestItem
+identifiers = Group "identifiers"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("test",Iden [Name Nothing "test"])
+    ,("_test",Iden [Name Nothing "_test"])
+    ,("t1",Iden [Name Nothing "t1"])
+    ,("a.b",Iden [Name Nothing "a", Name Nothing "b"])
+    ,("a.b.c",Iden [Name Nothing "a", Name Nothing "b", Name Nothing "c"])
+    ,("\"quoted iden\"", Iden [Name (Just ("\"","\"")) "quoted iden"])
+    ,("\"quoted \"\" iden\"", Iden [Name (Just ("\"","\"")) "quoted \"\" iden"])
+    ,("U&\"quoted iden\"", Iden [Name (Just ("U&\"","\"")) "quoted iden"])
+    ,("U&\"quoted \"\" iden\"", Iden [Name (Just ("U&\"","\"")) "quoted \"\" iden"])
+    ]
 
+{-
 TODO: more identifiers, e.g. unicode escapes?, mixed quoted/unquoted
 chains
 
@@ -1023,202 +1048,208 @@ binary, binary varying/varbinary
 new multipliers
 
 create a list of type name variations:
+-}
 
-> typeNames :: ([(String,TypeName)],[(String,TypeName)])
-> typeNames =
->     (basicTypes, concatMap makeArray basicTypes
->                  ++ map makeMultiset basicTypes)
->   where
->     makeArray (s,t) = [(s ++ " array", ArrayTypeName t Nothing)
->                       ,(s ++ " array[5]", ArrayTypeName t (Just 5))]
->     makeMultiset (s,t) = (s ++ " multiset", MultisetTypeName t)
->     basicTypes =
->         -- example of every standard type name
->         map (\t -> (t,TypeName [Name Nothing t]))
->         ["binary"
->         ,"binary varying"
->         ,"character"
->         ,"char"
->         ,"character varying"
->         ,"char varying"
->         ,"varbinary"
->         ,"varchar"
->         ,"character large object"
->         ,"char large object"
->         ,"clob"
->         ,"national character"
->         ,"national char"
->         ,"nchar"
->         ,"national character varying"
->         ,"national char varying"
->         ,"nchar varying"
->         ,"national character large object"
->         ,"nchar large object"
->         ,"nclob"
->         ,"binary large object"
->         ,"blob"
->         ,"numeric"
->         ,"decimal"
->         ,"dec"
->         ,"smallint"
->         ,"integer"
->         ,"int"
->         ,"bigint"
->         ,"float"
->         ,"real"
->         ,"double precision"
->         ,"boolean"
->         ,"date"
->         ,"time"
->         ,"timestamp"]
->         --interval -- not allowed without interval qualifier
->         --row -- not allowed without row type body
->         -- array -- not allowed on own
->         -- multiset -- not allowed on own
+typeNames :: ([(String,TypeName)],[(String,TypeName)])
+typeNames =
+    (basicTypes, concatMap makeArray basicTypes
+                 ++ map makeMultiset basicTypes)
+  where
+    makeArray (s,t) = [(s ++ " array", ArrayTypeName t Nothing)
+                      ,(s ++ " array[5]", ArrayTypeName t (Just 5))]
+    makeMultiset (s,t) = (s ++ " multiset", MultisetTypeName t)
+    basicTypes =
+        -- example of every standard type name
+        map (\t -> (t,TypeName [Name Nothing t]))
+        ["binary"
+        ,"binary varying"
+        ,"character"
+        ,"char"
+        ,"character varying"
+        ,"char varying"
+        ,"varbinary"
+        ,"varchar"
+        ,"character large object"
+        ,"char large object"
+        ,"clob"
+        ,"national character"
+        ,"national char"
+        ,"nchar"
+        ,"national character varying"
+        ,"national char varying"
+        ,"nchar varying"
+        ,"national character large object"
+        ,"nchar large object"
+        ,"nclob"
+        ,"binary large object"
+        ,"blob"
+        ,"numeric"
+        ,"decimal"
+        ,"dec"
+        ,"smallint"
+        ,"integer"
+        ,"int"
+        ,"bigint"
+        ,"float"
+        ,"real"
+        ,"double precision"
+        ,"boolean"
+        ,"date"
+        ,"time"
+        ,"timestamp"]
+        --interval -- not allowed without interval qualifier
+        --row -- not allowed without row type body
+        -- array -- not allowed on own
+        -- multiset -- not allowed on own
 
->          ++
->          [-- 1 single prec + 1 with multiname
->           ("char(5)", PrecTypeName [Name Nothing "char"] 5)
->          ,("char varying(5)", PrecTypeName [Name Nothing "char varying"] 5)
->          -- 1 scale
->          ,("decimal(15,2)", PrecScaleTypeName [Name Nothing "decimal"] 15 2)
->          ,("char(3 octets)"
->           ,PrecLengthTypeName [Name Nothing "char"] 3 Nothing (Just PrecOctets))
->          ,("varchar(50 characters)"
->           ,PrecLengthTypeName [Name Nothing "varchar"] 50 Nothing (Just PrecCharacters))
->          -- lob prec + with multiname
->          ,("blob(3M)", PrecLengthTypeName [Name Nothing "blob"] 3 (Just PrecM) Nothing)
->          ,("blob(3T)", PrecLengthTypeName [Name Nothing "blob"] 3 (Just PrecT) Nothing)
->          ,("blob(3P)", PrecLengthTypeName [Name Nothing "blob"] 3 (Just PrecP) Nothing)
->          ,("blob(4M characters) "
->           ,PrecLengthTypeName [Name Nothing "blob"] 4 (Just PrecM) (Just PrecCharacters))
->          ,("blob(6G octets) "
->           ,PrecLengthTypeName [Name Nothing "blob"] 6 (Just PrecG) (Just PrecOctets))
->          ,("national character large object(7K) "
->           ,PrecLengthTypeName [Name Nothing "national character large object"]
->                7 (Just PrecK) Nothing)
->          -- 1 with and without tz
->          ,("time with time zone"
->           ,TimeTypeName [Name Nothing "time"] Nothing True)
->          ,("datetime(3) without time zone"
->           ,TimeTypeName [Name Nothing "datetime"] (Just 3) False)
->          -- chars: (single/multiname) x prec x charset x collate
->          -- 1111
->          ,("char varying(5) character set something collate something_insensitive"
->           ,CharTypeName [Name Nothing "char varying"] (Just 5)
->            [Name Nothing "something"] [Name Nothing "something_insensitive"])
->           -- 0111
->          ,("char(5) character set something collate something_insensitive"
->           ,CharTypeName [Name Nothing "char"] (Just 5)
->            [Name Nothing "something"] [Name Nothing "something_insensitive"])
+         ++
+         [-- 1 single prec + 1 with multiname
+          ("char(5)", PrecTypeName [Name Nothing "char"] 5)
+         ,("char varying(5)", PrecTypeName [Name Nothing "char varying"] 5)
+         -- 1 scale
+         ,("decimal(15,2)", PrecScaleTypeName [Name Nothing "decimal"] 15 2)
+         ,("char(3 octets)"
+          ,PrecLengthTypeName [Name Nothing "char"] 3 Nothing (Just PrecOctets))
+         ,("varchar(50 characters)"
+          ,PrecLengthTypeName [Name Nothing "varchar"] 50 Nothing (Just PrecCharacters))
+         -- lob prec + with multiname
+         ,("blob(3M)", PrecLengthTypeName [Name Nothing "blob"] 3 (Just PrecM) Nothing)
+         ,("blob(3T)", PrecLengthTypeName [Name Nothing "blob"] 3 (Just PrecT) Nothing)
+         ,("blob(3P)", PrecLengthTypeName [Name Nothing "blob"] 3 (Just PrecP) Nothing)
+         ,("blob(4M characters) "
+          ,PrecLengthTypeName [Name Nothing "blob"] 4 (Just PrecM) (Just PrecCharacters))
+         ,("blob(6G octets) "
+          ,PrecLengthTypeName [Name Nothing "blob"] 6 (Just PrecG) (Just PrecOctets))
+         ,("national character large object(7K) "
+          ,PrecLengthTypeName [Name Nothing "national character large object"]
+               7 (Just PrecK) Nothing)
+         -- 1 with and without tz
+         ,("time with time zone"
+          ,TimeTypeName [Name Nothing "time"] Nothing True)
+         ,("datetime(3) without time zone"
+          ,TimeTypeName [Name Nothing "datetime"] (Just 3) False)
+         -- chars: (single/multiname) x prec x charset x collate
+         -- 1111
+         ,("char varying(5) character set something collate something_insensitive"
+          ,CharTypeName [Name Nothing "char varying"] (Just 5)
+           [Name Nothing "something"] [Name Nothing "something_insensitive"])
+          -- 0111
+         ,("char(5) character set something collate something_insensitive"
+          ,CharTypeName [Name Nothing "char"] (Just 5)
+           [Name Nothing "something"] [Name Nothing "something_insensitive"])
 
->           -- 1011
->          ,("char varying character set something collate something_insensitive"
->           ,CharTypeName [Name Nothing "char varying"] Nothing
->            [Name Nothing "something"] [Name Nothing "something_insensitive"])
->           -- 0011
->          ,("char character set something collate something_insensitive"
->           ,CharTypeName [Name Nothing "char"] Nothing
->            [Name Nothing "something"] [Name Nothing "something_insensitive"])
+          -- 1011
+         ,("char varying character set something collate something_insensitive"
+          ,CharTypeName [Name Nothing "char varying"] Nothing
+           [Name Nothing "something"] [Name Nothing "something_insensitive"])
+          -- 0011
+         ,("char character set something collate something_insensitive"
+          ,CharTypeName [Name Nothing "char"] Nothing
+           [Name Nothing "something"] [Name Nothing "something_insensitive"])
 
->           -- 1101
->          ,("char varying(5) collate something_insensitive"
->           ,CharTypeName [Name Nothing "char varying"] (Just 5)
->            [] [Name Nothing "something_insensitive"])
->           -- 0101
->          ,("char(5) collate something_insensitive"
->           ,CharTypeName [Name Nothing "char"] (Just 5)
->            [] [Name Nothing "something_insensitive"])
->           -- 1001
->          ,("char varying collate something_insensitive"
->           ,CharTypeName [Name Nothing "char varying"] Nothing
->            [] [Name Nothing "something_insensitive"])
->           -- 0001
->          ,("char collate something_insensitive"
->           ,CharTypeName [Name Nothing "char"] Nothing
->            [] [Name Nothing "something_insensitive"])
+          -- 1101
+         ,("char varying(5) collate something_insensitive"
+          ,CharTypeName [Name Nothing "char varying"] (Just 5)
+           [] [Name Nothing "something_insensitive"])
+          -- 0101
+         ,("char(5) collate something_insensitive"
+          ,CharTypeName [Name Nothing "char"] (Just 5)
+           [] [Name Nothing "something_insensitive"])
+          -- 1001
+         ,("char varying collate something_insensitive"
+          ,CharTypeName [Name Nothing "char varying"] Nothing
+           [] [Name Nothing "something_insensitive"])
+          -- 0001
+         ,("char collate something_insensitive"
+          ,CharTypeName [Name Nothing "char"] Nothing
+           [] [Name Nothing "something_insensitive"])
 
->          -- 1110
->          ,("char varying(5) character set something"
->           ,CharTypeName [Name Nothing "char varying"] (Just 5)
->            [Name Nothing "something"] [])
->           -- 0110
->          ,("char(5) character set something"
->           ,CharTypeName [Name Nothing "char"] (Just 5)
->            [Name Nothing "something"] [])
->           -- 1010
->          ,("char varying character set something"
->           ,CharTypeName [Name Nothing "char varying"] Nothing
->            [Name Nothing "something"] [])
->           -- 0010
->          ,("char character set something"
->           ,CharTypeName [Name Nothing "char"] Nothing
->            [Name Nothing "something"] [])
->           -- 1100
->          ,("char varying character set something"
->           ,CharTypeName [Name Nothing "char varying"] Nothing
->            [Name Nothing "something"] [])
+         -- 1110
+         ,("char varying(5) character set something"
+          ,CharTypeName [Name Nothing "char varying"] (Just 5)
+           [Name Nothing "something"] [])
+          -- 0110
+         ,("char(5) character set something"
+          ,CharTypeName [Name Nothing "char"] (Just 5)
+           [Name Nothing "something"] [])
+          -- 1010
+         ,("char varying character set something"
+          ,CharTypeName [Name Nothing "char varying"] Nothing
+           [Name Nothing "something"] [])
+          -- 0010
+         ,("char character set something"
+          ,CharTypeName [Name Nothing "char"] Nothing
+           [Name Nothing "something"] [])
+          -- 1100
+         ,("char varying character set something"
+          ,CharTypeName [Name Nothing "char varying"] Nothing
+           [Name Nothing "something"] [])
 
->          -- single row field, two row field
->          ,("row(a int)", RowTypeName [(Name Nothing "a", TypeName [Name Nothing "int"])])
->          ,("row(a int,b char)"
->           ,RowTypeName [(Name Nothing "a", TypeName [Name Nothing "int"])
->                        ,(Name Nothing "b", TypeName [Name Nothing "char"])])
->          -- interval each type raw
->          ,("interval year"
->           ,IntervalTypeName (Itf "year" Nothing) Nothing)
->          -- one type with single suffix
->          -- one type with double suffix
->          ,("interval year(2)"
->           ,IntervalTypeName (Itf "year" $ Just (2,Nothing)) Nothing)
->          ,("interval second(2,5)"
->           ,IntervalTypeName (Itf "second" $ Just (2,Just 5)) Nothing)
->          -- a to b with raw
->          -- a to b with single suffix
->          ,("interval year to month"
->           ,IntervalTypeName (Itf "year" Nothing)
->                             (Just $ Itf "month" Nothing))
->          ,("interval year(4) to second(2,3)"
->           ,IntervalTypeName (Itf "year" $ Just (4,Nothing))
->                             (Just $ Itf "second" $ Just (2, Just 3)))
->          ]
+         -- single row field, two row field
+         ,("row(a int)", RowTypeName [(Name Nothing "a", TypeName [Name Nothing "int"])])
+         ,("row(a int,b char)"
+          ,RowTypeName [(Name Nothing "a", TypeName [Name Nothing "int"])
+                       ,(Name Nothing "b", TypeName [Name Nothing "char"])])
+         -- interval each type raw
+         ,("interval year"
+          ,IntervalTypeName (Itf "year" Nothing) Nothing)
+         -- one type with single suffix
+         -- one type with double suffix
+         ,("interval year(2)"
+          ,IntervalTypeName (Itf "year" $ Just (2,Nothing)) Nothing)
+         ,("interval second(2,5)"
+          ,IntervalTypeName (Itf "second" $ Just (2,Just 5)) Nothing)
+         -- a to b with raw
+         -- a to b with single suffix
+         ,("interval year to month"
+          ,IntervalTypeName (Itf "year" Nothing)
+                            (Just $ Itf "month" Nothing))
+         ,("interval year(4) to second(2,3)"
+          ,IntervalTypeName (Itf "year" $ Just (4,Nothing))
+                            (Just $ Itf "second" $ Just (2, Just 3)))
+         ]
 
+{-
 Now test each variation in both cast expression and typed literal
 expression
+-}
 
-> typeNameTests :: TestItem
-> typeNameTests = Group "type names"
->     [Group "type names" $ map (uncurry (TestScalarExpr ansi2011))
->                           $ concatMap makeSimpleTests $ fst typeNames
->     ,Group "generated casts" $ map (uncurry (TestScalarExpr ansi2011))
->                           $ concatMap makeCastTests $ fst typeNames
->     ,Group "generated typename" $ map (uncurry (TestScalarExpr ansi2011))
->                           $ concatMap makeTests $ snd typeNames]
->   where
->     makeSimpleTests (ctn, stn) =
->         [(ctn ++ " 'test'", TypedLit stn "test")
->         ]
->     makeCastTests (ctn, stn) =
->         [("cast('test' as " ++ ctn ++ ")", Cast (StringLit "'" "'" "test") stn)
->         ]
->     makeTests a = makeSimpleTests a ++ makeCastTests a
+typeNameTests :: TestItem
+typeNameTests = Group "type names"
+    [Group "type names" $ map (uncurry (TestScalarExpr ansi2011))
+                          $ concatMap makeSimpleTests $ fst typeNames
+    ,Group "generated casts" $ map (uncurry (TestScalarExpr ansi2011))
+                          $ concatMap makeCastTests $ fst typeNames
+    ,Group "generated typename" $ map (uncurry (TestScalarExpr ansi2011))
+                          $ concatMap makeTests $ snd typeNames]
+  where
+    makeSimpleTests (ctn, stn) =
+        [(ctn ++ " 'test'", TypedLit stn "test")
+        ]
+    makeCastTests (ctn, stn) =
+        [("cast('test' as " ++ ctn ++ ")", Cast (StringLit "'" "'" "test") stn)
+        ]
+    makeTests a = makeSimpleTests a ++ makeCastTests a
 
 
+{-
 == 6.2 <field definition>
 
 Function
 Define a field of a row type.
 
 <field definition> ::= <field name> <data type>
+-}
 
-> fieldDefinition :: TestItem
-> fieldDefinition = Group "field definition"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("cast('(1,2)' as row(a int,b char))"
->      ,Cast (StringLit "'" "'" "(1,2)")
->      $ RowTypeName [(Name Nothing "a", TypeName [Name Nothing "int"])
->                    ,(Name Nothing "b", TypeName [Name Nothing "char"])])]
+fieldDefinition :: TestItem
+fieldDefinition = Group "field definition"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("cast('(1,2)' as row(a int,b char))"
+     ,Cast (StringLit "'" "'" "(1,2)")
+     $ RowTypeName [(Name Nothing "a", TypeName [Name Nothing "int"])
+                   ,(Name Nothing "b", TypeName [Name Nothing "char"])])]
 
+{-
 == 6.3 <value expression primary>
 
 Function
@@ -1256,48 +1287,50 @@ Specify a value that is syntactically self-delimited.
 <collection value constructor> ::=
     <array value constructor>
   | <multiset value constructor>
+-}
 
-> valueExpressions :: TestItem
-> valueExpressions = Group "value expressions"
->     [generalValueSpecification
->     ,parameterSpecification
->     ,contextuallyTypedValueSpecification
->     ,identifierChain
->     ,columnReference
->     ,setFunctionSpecification
->     ,windowFunction
->     ,nestedWindowFunction
->     ,caseExpression
->     ,castSpecification
->     ,nextScalarExpression
->     ,fieldReference
->     ,arrayElementReference
->     ,multisetElementReference
->     ,numericScalarExpression
->     ,numericValueFunction
->     ,stringScalarExpression
->     ,stringValueFunction
->     ,datetimeScalarExpression
->     ,datetimeValueFunction
->     ,intervalScalarExpression
->     ,intervalValueFunction
->     ,booleanScalarExpression
->     ,arrayScalarExpression
->     ,arrayValueFunction
->     ,arrayValueConstructor
->     ,multisetScalarExpression
->     ,multisetValueFunction
->     ,multisetValueConstructor
->     ,parenthesizedScalarExpression
->     ]
+valueExpressions :: TestItem
+valueExpressions = Group "value expressions"
+    [generalValueSpecification
+    ,parameterSpecification
+    ,contextuallyTypedValueSpecification
+    ,identifierChain
+    ,columnReference
+    ,setFunctionSpecification
+    ,windowFunction
+    ,nestedWindowFunction
+    ,caseExpression
+    ,castSpecification
+    ,nextScalarExpression
+    ,fieldReference
+    ,arrayElementReference
+    ,multisetElementReference
+    ,numericScalarExpression
+    ,numericValueFunction
+    ,stringScalarExpression
+    ,stringValueFunction
+    ,datetimeScalarExpression
+    ,datetimeValueFunction
+    ,intervalScalarExpression
+    ,intervalValueFunction
+    ,booleanScalarExpression
+    ,arrayScalarExpression
+    ,arrayValueFunction
+    ,arrayValueConstructor
+    ,multisetScalarExpression
+    ,multisetValueFunction
+    ,multisetValueConstructor
+    ,parenthesizedScalarExpression
+    ]
 
-> parenthesizedScalarExpression :: TestItem
-> parenthesizedScalarExpression = Group "parenthesized value expression"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("(3)", Parens (NumLit "3"))
->     ,("((3))", Parens $ Parens (NumLit "3"))
->     ]
+parenthesizedScalarExpression :: TestItem
+parenthesizedScalarExpression = Group "parenthesized value expression"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("(3)", Parens (NumLit "3"))
+    ,("((3))", Parens $ Parens (NumLit "3"))
+    ]
 
+{-
 == 6.4 <value specification> and <target specification>
 
 Function
@@ -1326,21 +1359,23 @@ Specify one or more values, host parameters, SQL parameters, dynamic parameters,
   | SYSTEM_USER
   | USER
   | VALUE
+-}
 
-> generalValueSpecification :: TestItem
-> generalValueSpecification = Group "general value specification"
->     $ map (uncurry (TestScalarExpr ansi2011)) $
->     map mkIden ["CURRENT_DEFAULT_TRANSFORM_GROUP"
->                ,"CURRENT_PATH"
->                ,"CURRENT_ROLE"
->                ,"CURRENT_USER"
->                ,"SESSION_USER"
->                ,"SYSTEM_USER"
->                ,"USER"
->                ,"VALUE"]
->   where
->     mkIden nm = (nm,Iden [Name Nothing nm])
+generalValueSpecification :: TestItem
+generalValueSpecification = Group "general value specification"
+    $ map (uncurry (TestScalarExpr ansi2011)) $
+    map mkIden ["CURRENT_DEFAULT_TRANSFORM_GROUP"
+               ,"CURRENT_PATH"
+               ,"CURRENT_ROLE"
+               ,"CURRENT_USER"
+               ,"SESSION_USER"
+               ,"SYSTEM_USER"
+               ,"USER"
+               ,"VALUE"]
+  where
+    mkIden nm = (nm,Iden [Name Nothing nm])
 
+{-
 TODO: add the missing bits
 
 <simple value specification> ::=
@@ -1380,17 +1415,19 @@ TODO: add the missing bits
       <left bracket or trigraph> <simple value specification> <right bracket or trigraph>
 
 <target array reference> ::= <SQL parameter reference> | <column reference>
+-}
 
-> parameterSpecification :: TestItem
-> parameterSpecification = Group "parameter specification"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [(":hostparam", HostParameter ":hostparam" Nothing)
->     ,(":hostparam indicator :another_host_param"
->      ,HostParameter ":hostparam" $ Just ":another_host_param")
->     ,("?", Parameter)
->     ,(":h[3]", Array (HostParameter ":h" Nothing) [NumLit "3"])
->     ]
+parameterSpecification :: TestItem
+parameterSpecification = Group "parameter specification"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [(":hostparam", HostParameter ":hostparam" Nothing)
+    ,(":hostparam indicator :another_host_param"
+     ,HostParameter ":hostparam" $ Just ":another_host_param")
+    ,("?", Parameter)
+    ,(":h[3]", Array (HostParameter ":h" Nothing) [NumLit "3"])
+    ]
 
+{-
 <current collation specification> ::=
   COLLATION FOR <left paren> <string value expression> <right paren>
 
@@ -1416,17 +1453,19 @@ Specify a value whose data type is to be inferred from its context.
   | MULTISET <left bracket or trigraph> <right bracket or trigraph>
 
 <default specification> ::= DEFAULT
+-}
 
-> contextuallyTypedValueSpecification :: TestItem
-> contextuallyTypedValueSpecification =
->     Group "contextually typed value specification"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("null", Iden [Name Nothing "null"])
->     ,("array[]", Array (Iden [Name Nothing "array"]) [])
->     ,("multiset[]", MultisetCtor [])
->     ,("default", Iden [Name Nothing "default"])
->     ]
+contextuallyTypedValueSpecification :: TestItem
+contextuallyTypedValueSpecification =
+    Group "contextually typed value specification"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("null", Iden [Name Nothing "null"])
+    ,("array[]", Array (Iden [Name Nothing "array"]) [])
+    ,("multiset[]", MultisetCtor [])
+    ,("default", Iden [Name Nothing "default"])
+    ]
 
+{-
 == 6.6 <identifier chain>
 
 Function
@@ -1435,12 +1474,14 @@ Disambiguate a <period>-separated chain of identifiers.
 <identifier chain> ::= <identifier> [ { <period> <identifier> }... ]
 
 <basic identifier chain> ::= <identifier chain>
+-}
 
-> identifierChain :: TestItem
-> identifierChain = Group "identifier chain"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("a.b", Iden [Name Nothing "a",Name Nothing "b"])]
+identifierChain :: TestItem
+identifierChain = Group "identifier chain"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("a.b", Iden [Name Nothing "a",Name Nothing "b"])]
 
+{-
 == 6.7 <column reference>
 
 Function
@@ -1449,12 +1490,14 @@ Reference a column.
 <column reference> ::=
     <basic identifier chain>
   | MODULE <period> <qualified identifier> <period> <column name>
+-}
 
-> columnReference :: TestItem
-> columnReference = Group "column reference"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("module.a.b", Iden [Name Nothing "module",Name Nothing "a",Name Nothing "b"])]
+columnReference :: TestItem
+columnReference = Group "column reference"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("module.a.b", Iden [Name Nothing "module",Name Nothing "a",Name Nothing "b"])]
 
+{-
 == 6.8 <SQL parameter reference>
 
 Function
@@ -1472,24 +1515,26 @@ Specify a value derived by the application of a function to an argument.
 <grouping operation> ::=
   GROUPING <left paren> <column reference>
       [ { <comma> <column reference> }... ] <right paren>
+-}
 
-> setFunctionSpecification :: TestItem
-> setFunctionSpecification = Group "set function specification"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("SELECT SalesQuota, SUM(SalesYTD) TotalSalesYTD,\n\
->       \   GROUPING(SalesQuota) AS Grouping\n\
->       \FROM Sales.SalesPerson\n\
->       \GROUP BY ROLLUP(SalesQuota);"
->      ,makeSelect
->       {qeSelectList = [(Iden [Name Nothing "SalesQuota"],Nothing)
->                       ,(App [Name Nothing "SUM"] [Iden [Name Nothing "SalesYTD"]]
->                        ,Just (Name Nothing "TotalSalesYTD"))
->                       ,(App [Name Nothing "GROUPING"] [Iden [Name Nothing "SalesQuota"]]
->                        ,Just (Name Nothing "Grouping"))]
->       ,qeFrom = [TRSimple [Name Nothing "Sales",Name Nothing "SalesPerson"]]
->       ,qeGroupBy = [Rollup [SimpleGroup (Iden [Name Nothing "SalesQuota"])]]})
->     ]
+setFunctionSpecification :: TestItem
+setFunctionSpecification = Group "set function specification"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("SELECT SalesQuota, SUM(SalesYTD) TotalSalesYTD,\n\
+      \   GROUPING(SalesQuota) AS Grouping\n\
+      \FROM Sales.SalesPerson\n\
+      \GROUP BY ROLLUP(SalesQuota);"
+     ,makeSelect
+      {qeSelectList = [(Iden [Name Nothing "SalesQuota"],Nothing)
+                      ,(App [Name Nothing "SUM"] [Iden [Name Nothing "SalesYTD"]]
+                       ,Just (Name Nothing "TotalSalesYTD"))
+                      ,(App [Name Nothing "GROUPING"] [Iden [Name Nothing "SalesQuota"]]
+                       ,Just (Name Nothing "Grouping"))]
+      ,qeFrom = [TRSimple [Name Nothing "Sales",Name Nothing "SalesPerson"]]
+      ,qeGroupBy = [Rollup [SimpleGroup (Iden [Name Nothing "SalesQuota"])]]})
+    ]
 
+{-
 == 6.10 <window function>
 
 Function
@@ -1549,12 +1594,14 @@ Specify a window function.
   | <in-line window specification>
 
 <in-line window specification> ::= <window specification>
+-}
 
-> windowFunction :: TestItem
-> windowFunction = Group "window function"
->     [-- todo: window function
->     ]
+windowFunction :: TestItem
+windowFunction = Group "window function"
+    [-- todo: window function
+    ]
 
+{-
 == 6.11 <nested window function>
 
 Function
@@ -1592,13 +1639,15 @@ Specify a function nested in an aggregated argument of an
   | <dynamic parameter specification>
 
 <value_of default value> ::= <value expression>
+-}
 
-> nestedWindowFunction :: TestItem
-> nestedWindowFunction = Group "nested window function"
->     [-- todo: nested window function
->     ]
+nestedWindowFunction :: TestItem
+nestedWindowFunction = Group "nested window function"
+    [-- todo: nested window function
+    ]
 
 
+{-
 == 6.12 <case expression>
 
 Function
@@ -1656,12 +1705,14 @@ binary ops as unary prefix ops.
 <result> ::= <result expression> | NULL
 
 <result expression> ::= <value expression>
+-}
 
-> caseExpression :: TestItem
-> caseExpression = Group "case expression"
->     [-- todo: case expression
->     ]
+caseExpression :: TestItem
+caseExpression = Group "case expression"
+    [-- todo: case expression
+    ]
 
+{-
 == 6.13 <cast specification>
 
 Function
@@ -1673,43 +1724,49 @@ Specify a data conversion.
 <cast operand> ::= <value expression> | <implicitly typed value specification>
 
 <cast target> ::= <domain name> | <data type>
+-}
 
-> castSpecification :: TestItem
-> castSpecification = Group "cast specification"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("cast(a as int)"
->      ,Cast (Iden [Name Nothing "a"]) (TypeName [Name Nothing "int"]))
->     ]
+castSpecification :: TestItem
+castSpecification = Group "cast specification"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("cast(a as int)"
+     ,Cast (Iden [Name Nothing "a"]) (TypeName [Name Nothing "int"]))
+    ]
 
+{-
 == 6.14 <next value expression>
 
 Function
 Return the next value of a sequence generator.
 
 <next value expression> ::= NEXT VALUE FOR <sequence generator name>
+-}
 
-> nextScalarExpression :: TestItem
-> nextScalarExpression = Group "next value expression"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("next value for a.b", NextValueFor [Name Nothing "a", Name Nothing "b"])
->     ]
+nextScalarExpression :: TestItem
+nextScalarExpression = Group "next value expression"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("next value for a.b", NextValueFor [Name Nothing "a", Name Nothing "b"])
+    ]
 
+{-
 == 6.15 <field reference>
 
 Function
 Reference a field of a row value.
 
 <field reference> ::= <value expression primary> <period> <field name>
+-}
 
-> fieldReference :: TestItem
-> fieldReference = Group "field reference"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("f(something).a"
->       ,BinOp (App [Name Nothing "f"] [Iden [Name Nothing "something"]])
->        [Name Nothing "."]
->        (Iden [Name Nothing "a"]))
->     ]
+fieldReference :: TestItem
+fieldReference = Group "field reference"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("f(something).a"
+      ,BinOp (App [Name Nothing "f"] [Iden [Name Nothing "something"]])
+       [Name Nothing "."]
+       (Iden [Name Nothing "a"]))
+    ]
 
+{-
 TODO: try all possible value expression syntax variations followed by
 field reference
 
@@ -1824,22 +1881,24 @@ Return an element of an array.
 <array element reference> ::=
   <array value expression>
       <left bracket or trigraph> <numeric value expression> <right bracket or trigraph>
+-}
 
-> arrayElementReference :: TestItem
-> arrayElementReference = Group "array element reference"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("something[3]"
->      ,Array (Iden [Name Nothing "something"]) [NumLit "3"])
->     ,("(something(a))[x]"
->       ,Array (Parens (App [Name Nothing "something"] [Iden [Name Nothing "a"]]))
->         [Iden [Name Nothing "x"]])
->     ,("(something(a))[x][y] "
->       ,Array (
->         Array (Parens (App [Name Nothing "something"] [Iden [Name Nothing "a"]]))
->         [Iden [Name Nothing "x"]])
->         [Iden [Name Nothing "y"]])
->     ]
+arrayElementReference :: TestItem
+arrayElementReference = Group "array element reference"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("something[3]"
+     ,Array (Iden [Name Nothing "something"]) [NumLit "3"])
+    ,("(something(a))[x]"
+      ,Array (Parens (App [Name Nothing "something"] [Iden [Name Nothing "a"]]))
+        [Iden [Name Nothing "x"]])
+    ,("(something(a))[x][y] "
+      ,Array (
+        Array (Parens (App [Name Nothing "something"] [Iden [Name Nothing "a"]]))
+        [Iden [Name Nothing "x"]])
+        [Iden [Name Nothing "y"]])
+    ]
 
+{-
 == 6.25 <multiset element reference>
 
 Function
@@ -1847,14 +1906,16 @@ Return the sole element of a multiset of one element.
 
 <multiset element reference> ::=
   ELEMENT <left paren> <multiset value expression> <right paren>
+-}
 
-> multisetElementReference :: TestItem
-> multisetElementReference = Group "multisetElementReference"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("element(something)"
->      ,App [Name Nothing "element"] [Iden [Name Nothing "something"]])
->     ]
+multisetElementReference :: TestItem
+multisetElementReference = Group "multisetElementReference"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("element(something)"
+     ,App [Name Nothing "element"] [Iden [Name Nothing "something"]])
+    ]
 
+{-
 == 6.26 <value expression>
 
 Function
@@ -1897,21 +1958,23 @@ Specify a numeric value.
 <factor> ::= [ <sign> ] <numeric primary>
 
 <numeric primary> ::= <value expression primary> | <numeric value function>
+-}
 
-> numericScalarExpression :: TestItem
-> numericScalarExpression = Group "numeric value expression"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("a + b", binOp "+")
->     ,("a - b", binOp "-")
->     ,("a * b", binOp "*")
->     ,("a / b", binOp "/")
->     ,("+a", prefOp "+")
->     ,("-a", prefOp "-")
->     ]
->   where
->     binOp o = BinOp (Iden [Name Nothing "a"]) [Name Nothing o] (Iden [Name Nothing "b"])
->     prefOp o = PrefixOp [Name Nothing o] (Iden [Name Nothing "a"])
+numericScalarExpression :: TestItem
+numericScalarExpression = Group "numeric value expression"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("a + b", binOp "+")
+    ,("a - b", binOp "-")
+    ,("a * b", binOp "*")
+    ,("a / b", binOp "/")
+    ,("+a", prefOp "+")
+    ,("-a", prefOp "-")
+    ]
+  where
+    binOp o = BinOp (Iden [Name Nothing "a"]) [Name Nothing o] (Iden [Name Nothing "b"])
+    prefOp o = PrefixOp [Name Nothing o] (Iden [Name Nothing "a"])
 
+{-
 TODO: precedence and associativity tests (need to review all operators
 for what precendence and associativity tests to write)
 
@@ -1937,13 +2000,15 @@ Specify a function yielding a value of type numeric.
   | <floor function>
   | <ceiling function>
   | <width bucket function>
+-}
 
 
-> numericValueFunction :: TestItem
-> numericValueFunction = Group "numeric value function"
->     [-- todo: numeric value function
->     ]
+numericValueFunction :: TestItem
+numericValueFunction = Group "numeric value function"
+    [-- todo: numeric value function
+    ]
 
+{-
 <position expression> ::=
     <character position expression>
   | <binary position expression>
@@ -2085,12 +2150,14 @@ Specify a character string value or a binary string value.
 
 <binary concatenation> ::=
   <binary value expression> <concatenation operator> <binary factor>
+-}
 
-> stringScalarExpression :: TestItem
-> stringScalarExpression = Group "string value expression"
->     [-- todo: string value expression
->     ]
+stringScalarExpression :: TestItem
+stringScalarExpression = Group "string value expression"
+    [-- todo: string value expression
+    ]
 
+{-
 == 6.30 <string value function>
 
 Function
@@ -2112,12 +2179,14 @@ Specify a function yielding a value of type character string or binary string.
   | <character overlay function>
   | <normalize function>
   | <specific type method>
+-}
 
-> stringValueFunction :: TestItem
-> stringValueFunction = Group "string value function"
->     [-- todo: string value function
->     ]
+stringValueFunction :: TestItem
+stringValueFunction = Group "string value function"
+    [-- todo: string value function
+    ]
 
+{-
 <character substring function> ::=
   SUBSTRING <left paren> <character value expression> FROM <start position>
       [ FOR <string length> ] [ USING <char length units> ] <right paren>
@@ -2228,13 +2297,15 @@ Specify a datetime value.
   | <interval value expression> <plus sign> <datetime term>
   | <datetime value expression> <plus sign> <interval term>
   | <datetime value expression> <minus sign> <interval term>
+-}
 
-> datetimeScalarExpression :: TestItem
-> datetimeScalarExpression = Group "datetime value expression"
->     [-- todo: datetime value expression
->      datetimeValueFunction 
->     ]
+datetimeScalarExpression :: TestItem
+datetimeScalarExpression = Group "datetime value expression"
+    [-- todo: datetime value expression
+     datetimeValueFunction 
+    ]
 
+{-
 <datetime term> ::= <datetime factor>
 
 <datetime factor> ::= <datetime primary> [ <time zone> ]
@@ -2256,12 +2327,14 @@ Specify a function yielding a value of type datetime.
   | <current timestamp value function>
   | <current local time value function>
   | <current local timestamp value function>
+-}
 
-> datetimeValueFunction :: TestItem
-> datetimeValueFunction = Group "datetime value function"
->     [-- todo: datetime value function
->     ]
+datetimeValueFunction :: TestItem
+datetimeValueFunction = Group "datetime value function"
+    [-- todo: datetime value function
+    ]
 
+{-
 <current date value function> ::= CURRENT_DATE
 
 <current time value function> ::=
@@ -2287,13 +2360,15 @@ Specify an interval value.
   | <interval value expression 1> <minus sign> <interval term 1>
   | <left paren> <datetime value expression> <minus sign> <datetime term> <right paren>
       <interval qualifier>
+-}
 
-> intervalScalarExpression :: TestItem
-> intervalScalarExpression = Group "interval value expression"
->     [-- todo: interval value expression
->     ]
+intervalScalarExpression :: TestItem
+intervalScalarExpression = Group "interval value expression"
+    [-- todo: interval value expression
+    ]
 
 
+{-
 <interval term> ::=
     <interval factor>
   | <interval term 2> <asterisk> <factor>
@@ -2321,13 +2396,15 @@ Specify a function yielding a value of type interval.
 
 <interval absolute value function> ::=
   ABS <left paren> <interval value expression> <right paren>
+-}
 
-> intervalValueFunction :: TestItem
-> intervalValueFunction = Group "interval value function"
->     [-- todo: interval value function
->     ]
+intervalValueFunction :: TestItem
+intervalValueFunction = Group "interval value function"
+    [-- todo: interval value function
+    ]
 
 
+{-
 == 6.35 <boolean value expression>
 
 Function
@@ -2353,27 +2430,29 @@ Specify a boolean value.
 
 <parenthesized boolean value expression> ::=
   <left paren> <boolean value expression> <right paren>
+-}
 
 
-> booleanScalarExpression :: TestItem
-> booleanScalarExpression = Group "booleab value expression"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("a or b", BinOp a [Name Nothing "or"] b)
->     ,("a and b", BinOp a [Name Nothing "and"] b)
->     ,("not a", PrefixOp [Name Nothing "not"] a)
->     ,("a is true", postfixOp "is true")
->     ,("a is false", postfixOp "is false")
->     ,("a is unknown", postfixOp "is unknown")
->     ,("a is not true", postfixOp "is not true")
->     ,("a is not false", postfixOp "is not false")
->     ,("a is not unknown", postfixOp "is not unknown")
->     ,("(a or b)", Parens $ BinOp a [Name Nothing "or"] b)
->     ]
->   where
->     a = Iden [Name Nothing "a"]
->     b = Iden [Name Nothing "b"]
->     postfixOp nm = PostfixOp [Name Nothing nm] a
+booleanScalarExpression :: TestItem
+booleanScalarExpression = Group "booleab value expression"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("a or b", BinOp a [Name Nothing "or"] b)
+    ,("a and b", BinOp a [Name Nothing "and"] b)
+    ,("not a", PrefixOp [Name Nothing "not"] a)
+    ,("a is true", postfixOp "is true")
+    ,("a is false", postfixOp "is false")
+    ,("a is unknown", postfixOp "is unknown")
+    ,("a is not true", postfixOp "is not true")
+    ,("a is not false", postfixOp "is not false")
+    ,("a is not unknown", postfixOp "is not unknown")
+    ,("(a or b)", Parens $ BinOp a [Name Nothing "or"] b)
+    ]
+  where
+    a = Iden [Name Nothing "a"]
+    b = Iden [Name Nothing "b"]
+    postfixOp nm = PostfixOp [Name Nothing nm] a
 
+{-
 TODO: review if more tests are needed. Should at least have
 precendence tests for mixed and, or and not without parens.
 
@@ -2390,12 +2469,14 @@ Specify an array value.
 <array value expression 1> ::= <array value expression>
 
 <array primary> ::= <array value function> | <value expression primary>
+-}
 
-> arrayScalarExpression :: TestItem
-> arrayScalarExpression = Group "array value expression"
->     [-- todo: array value expression
->     ]
+arrayScalarExpression :: TestItem
+arrayScalarExpression = Group "array value expression"
+    [-- todo: array value expression
+    ]
 
+{-
 == 6.37 <array value function>
 
 Function
@@ -2406,12 +2487,14 @@ Specify a function yielding a value of an array type.
 <trim array function> ::=
   TRIM_ARRAY <left paren> <array value expression> <comma> <numeric value expression>
     <right paren>
+-}
 
-> arrayValueFunction :: TestItem
-> arrayValueFunction = Group "array value function"
->     [-- todo: array value function
->     ]
+arrayValueFunction :: TestItem
+arrayValueFunction = Group "array value function"
+    [-- todo: array value function
+    ]
 
+{-
 == 6.38 <array value constructor>
 
 Function
@@ -2429,29 +2512,31 @@ Specify construction of an array.
 <array element> ::= <value expression>
 
 <array value constructor by query> ::= ARRAY <table subquery>
+-}
 
-> arrayValueConstructor :: TestItem
-> arrayValueConstructor = Group "array value constructor"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("array[1,2,3]"
->      ,Array (Iden [Name Nothing "array"])
->       [NumLit "1", NumLit "2", NumLit "3"])
->     ,("array[a,b,c]"
->      ,Array (Iden [Name Nothing "array"])
->       [Iden [Name Nothing "a"], Iden [Name Nothing "b"], Iden [Name Nothing "c"]])
->     ,("array(select * from t)"
->       ,ArrayCtor (makeSelect
->                   {qeSelectList = [(Star,Nothing)]
->                   ,qeFrom = [TRSimple [Name Nothing "t"]]}))
->     ,("array(select * from t order by a)"
->       ,ArrayCtor (makeSelect
->                   {qeSelectList = [(Star,Nothing)]
->                   ,qeFrom = [TRSimple [Name Nothing "t"]]
->                   ,qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
->                                     DirDefault NullsOrderDefault]}))
->     ]
+arrayValueConstructor :: TestItem
+arrayValueConstructor = Group "array value constructor"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("array[1,2,3]"
+     ,Array (Iden [Name Nothing "array"])
+      [NumLit "1", NumLit "2", NumLit "3"])
+    ,("array[a,b,c]"
+     ,Array (Iden [Name Nothing "array"])
+      [Iden [Name Nothing "a"], Iden [Name Nothing "b"], Iden [Name Nothing "c"]])
+    ,("array(select * from t)"
+      ,ArrayCtor (makeSelect
+                  {qeSelectList = [(Star,Nothing)]
+                  ,qeFrom = [TRSimple [Name Nothing "t"]]}))
+    ,("array(select * from t order by a)"
+      ,ArrayCtor (makeSelect
+                  {qeSelectList = [(Star,Nothing)]
+                  ,qeFrom = [TRSimple [Name Nothing "t"]]
+                  ,qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
+                                    DirDefault NullsOrderDefault]}))
+    ]
 
 
+{-
 == 6.39 <multiset value expression>
 
 Function
@@ -2467,22 +2552,24 @@ Specify a multiset value.
   | <multiset term> MULTISET INTERSECT [ ALL | DISTINCT ] <multiset primary>
 
 <multiset primary> ::= <multiset value function> | <value expression primary>
+-}
 
-> multisetScalarExpression :: TestItem
-> multisetScalarExpression = Group "multiset value expression"
->    $ map (uncurry (TestScalarExpr ansi2011))
->    [("a multiset union b"
->     ,MultisetBinOp (Iden [Name Nothing "a"]) Union SQDefault (Iden [Name Nothing "b"]))
->    ,("a multiset union all b"
->     ,MultisetBinOp (Iden [Name Nothing "a"]) Union All (Iden [Name Nothing "b"]))
->    ,("a multiset union distinct b"
->     ,MultisetBinOp (Iden [Name Nothing "a"]) Union Distinct (Iden [Name Nothing "b"]))
->    ,("a multiset except b"
->     ,MultisetBinOp (Iden [Name Nothing "a"]) Except SQDefault (Iden [Name Nothing "b"]))
->    ,("a multiset intersect b"
->     ,MultisetBinOp (Iden [Name Nothing "a"]) Intersect SQDefault (Iden [Name Nothing "b"]))
->    ]
+multisetScalarExpression :: TestItem
+multisetScalarExpression = Group "multiset value expression"
+   $ map (uncurry (TestScalarExpr ansi2011))
+   [("a multiset union b"
+    ,MultisetBinOp (Iden [Name Nothing "a"]) Union SQDefault (Iden [Name Nothing "b"]))
+   ,("a multiset union all b"
+    ,MultisetBinOp (Iden [Name Nothing "a"]) Union All (Iden [Name Nothing "b"]))
+   ,("a multiset union distinct b"
+    ,MultisetBinOp (Iden [Name Nothing "a"]) Union Distinct (Iden [Name Nothing "b"]))
+   ,("a multiset except b"
+    ,MultisetBinOp (Iden [Name Nothing "a"]) Except SQDefault (Iden [Name Nothing "b"]))
+   ,("a multiset intersect b"
+    ,MultisetBinOp (Iden [Name Nothing "a"]) Intersect SQDefault (Iden [Name Nothing "b"]))
+   ]
 
+{-
 TODO: check precedence and associativity
 
 == 6.40 <multiset value function>
@@ -2497,13 +2584,15 @@ Specify a function yielding a value of a multiset type.
 
 TODO: set is now a reserved keyword. Fix the set parsing with a
 special case term.
+-}
 
-> multisetValueFunction :: TestItem
-> multisetValueFunction = Group "multiset value function"
->    $ map (uncurry (TestScalarExpr ansi2011))
->    [("set(a)", App [Name Nothing "set"] [Iden [Name Nothing "a"]])
->    ]
+multisetValueFunction :: TestItem
+multisetValueFunction = Group "multiset value function"
+   $ map (uncurry (TestScalarExpr ansi2011))
+   [("set(a)", App [Name Nothing "set"] [Iden [Name Nothing "a"]])
+   ]
 
+{-
 == 6.41 <multiset value constructor>
 
 Function
@@ -2525,42 +2614,44 @@ Specify construction of a multiset.
 <multiset value constructor by query> ::= MULTISET <table subquery>
 
 <table value constructor by query> ::= TABLE <table subquery>
+-}
 
-> multisetValueConstructor :: TestItem
-> multisetValueConstructor = Group "multiset value constructor"
->    $ map (uncurry (TestScalarExpr ansi2011))
->    [("multiset[a,b,c]", MultisetCtor[Iden [Name Nothing "a"]
->                                     ,Iden [Name Nothing "b"], Iden [Name Nothing "c"]])
->    ,("multiset(select * from t)", MultisetQueryCtor qe)
->    ,("table(select * from t)", MultisetQueryCtor qe)
->    ]
->   where
->     qe = makeSelect {qeSelectList = [(Star,Nothing)]
->                     ,qeFrom = [TRSimple [Name Nothing "t"]]}
+multisetValueConstructor :: TestItem
+multisetValueConstructor = Group "multiset value constructor"
+   $ map (uncurry (TestScalarExpr ansi2011))
+   [("multiset[a,b,c]", MultisetCtor[Iden [Name Nothing "a"]
+                                    ,Iden [Name Nothing "b"], Iden [Name Nothing "c"]])
+   ,("multiset(select * from t)", MultisetQueryCtor qe)
+   ,("table(select * from t)", MultisetQueryCtor qe)
+   ]
+  where
+    qe = makeSelect {qeSelectList = [(Star,Nothing)]
+                    ,qeFrom = [TRSimple [Name Nothing "t"]]}
 
 
-= 7 Query expressions
+-- = 7 Query expressions
 
-> queryExpressions :: TestItem
-> queryExpressions = Group "query expressions"
->     [rowValueConstructor
->     ,tableValueConstructor
->     ,fromClause
->     ,tableReference
->     ,joinedTable
->     ,whereClause
->     ,groupByClause
->     ,havingClause
->     ,windowClause
->     ,querySpecification
->     ,withQueryExpression
->     ,setOpQueryExpression
->     ,explicitTableQueryExpression
->     ,orderOffsetFetchQueryExpression
->     ,searchOrCycleClause
->     ]
+queryExpressions :: TestItem
+queryExpressions = Group "query expressions"
+    [rowValueConstructor
+    ,tableValueConstructor
+    ,fromClause
+    ,tableReference
+    ,joinedTable
+    ,whereClause
+    ,groupByClause
+    ,havingClause
+    ,windowClause
+    ,querySpecification
+    ,withQueryExpression
+    ,setOpQueryExpression
+    ,explicitTableQueryExpression
+    ,orderOffsetFetchQueryExpression
+    ,searchOrCycleClause
+    ]
 
 
+{-
 == 7.1 <row value constructor>
 
 Function
@@ -2603,16 +2694,18 @@ Specify a value or list of values to be constructed into a row.
     <common value expression>
   | <boolean predicand>
   | <explicit row value constructor>
+-}
 
-> rowValueConstructor :: TestItem
-> rowValueConstructor = Group "row value constructor"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("(a,b)"
->      ,SpecialOp [Name Nothing "rowctor"] [Iden [Name Nothing "a"], Iden [Name Nothing "b"]])
->     ,("row(1)",App [Name Nothing "row"] [NumLit "1"])
->     ,("row(1,2)",App [Name Nothing "row"] [NumLit "1",NumLit "2"])
->     ]
+rowValueConstructor :: TestItem
+rowValueConstructor = Group "row value constructor"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("(a,b)"
+     ,SpecialOp [Name Nothing "rowctor"] [Iden [Name Nothing "a"], Iden [Name Nothing "b"]])
+    ,("row(1)",App [Name Nothing "row"] [NumLit "1"])
+    ,("row(1,2)",App [Name Nothing "row"] [NumLit "1",NumLit "2"])
+    ]
 
+{-
 == 7.2 <row value expression>
 
 Function
@@ -2654,20 +2747,22 @@ Specify a set of <row value expression>s to be constructed into a table.
 <contextually typed row value expression list> ::=
   <contextually typed row value expression>
       [ { <comma> <contextually typed row value expression> }... ]
+-}
 
-> tableValueConstructor :: TestItem
-> tableValueConstructor = Group "table value constructor"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("values (1,2), (a+b,(select count(*) from t));"
->      ,Values [[NumLit "1", NumLit "2"]
->              ,[BinOp (Iden [Name Nothing "a"]) [Name Nothing "+"]
->                      (Iden [Name Nothing "b"])
->               ,SubQueryExpr SqSq
->                (makeSelect
->                 {qeSelectList = [(App [Name Nothing "count"] [Star],Nothing)]
->                 ,qeFrom = [TRSimple [Name Nothing "t"]]})]])
->     ]
+tableValueConstructor :: TestItem
+tableValueConstructor = Group "table value constructor"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("values (1,2), (a+b,(select count(*) from t));"
+     ,Values [[NumLit "1", NumLit "2"]
+             ,[BinOp (Iden [Name Nothing "a"]) [Name Nothing "+"]
+                     (Iden [Name Nothing "b"])
+              ,SubQueryExpr SqSq
+               (makeSelect
+                {qeSelectList = [(App [Name Nothing "count"] [Star],Nothing)]
+                ,qeFrom = [TRSimple [Name Nothing "t"]]})]])
+    ]
 
+{-
 == 7.4 <table expression>
 
 Function
@@ -2689,27 +2784,31 @@ Specify a table derived from one or more tables.
 
 <table reference list> ::=
   <table reference> [ { <comma> <table reference> }... ]
+-}
 
-> fromClause :: TestItem
-> fromClause = Group "fromClause"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("select * from tbl1,tbl2"
->      ,makeSelect
->       {qeSelectList = [(Star, Nothing)]
->       ,qeFrom = [TRSimple [Name Nothing "tbl1"], TRSimple [Name Nothing "tbl2"]]
->       })]
+fromClause :: TestItem
+fromClause = Group "fromClause"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("select * from tbl1,tbl2"
+     ,makeSelect
+      {qeSelectList = [(Star, Nothing)]
+      ,qeFrom = [TRSimple [Name Nothing "tbl1"], TRSimple [Name Nothing "tbl2"]]
+      })]
 
 
+{-
 == 7.6 <table reference>
 
 Function
 Reference a table.
+-}
 
-> tableReference :: TestItem
-> tableReference = Group "table reference"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("select * from t", sel)
+tableReference :: TestItem
+tableReference = Group "table reference"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("select * from t", sel)
 
+{-
 <table reference> ::= <table factor> | <joined table>
 
 <table factor> ::= <table primary> [ <sample clause> ]
@@ -2794,55 +2893,57 @@ TODO: only
 <parenthesized joined table> ::=
     <left paren> <parenthesized joined table> <right paren>
   | <left paren> <joined table> <right paren>
+-}
 
 
->     -- table or query name
->     ,("select * from t u", a sel)
->     ,("select * from t as u", a sel)
->     ,("select * from t u(a,b)", sel1 )
->     ,("select * from t as u(a,b)", sel1)
->     -- derived table TODO: realistic example
->     ,("select * from (select * from t) u"
->      ,a $ sel {qeFrom = [TRQueryExpr sel]})
->     -- lateral TODO: realistic example
->     ,("select * from lateral t"
->      ,af TRLateral sel)
->     -- TODO: bug, lateral should bind more tightly than the alias
->     --,("select * from lateral t u"
->     -- ,a $ af sel TRLateral)
->     -- collection TODO: realistic example
->      -- TODO: make it work
->     --,("select * from unnest(a)", undefined)
->     --,("select * from unnest(a,b)", undefined)
->     --,("select * from unnest(a,b) with ordinality", undefined)
->     --,("select * from unnest(a,b) with ordinality u", undefined)
->     --,("select * from unnest(a,b) with ordinality as u", undefined)
->     -- table fn TODO: realistic example
->      -- TODO: make it work
->     --,("select * from table(a)", undefined)
->     -- parens
->     ,("select * from (a join b)", jsel)
->     ,("select * from (a join b) u", a jsel)
->     ,("select * from ((a join b)) u", a $ af TRParens jsel)
->     ,("select * from ((a join b) u) u", a $ af TRParens $ a jsel)
->     ]
->   where
->     sel = makeSelect
->           {qeSelectList = [(Star, Nothing)]
->           ,qeFrom = [TRSimple [Name Nothing "t"]]}
->     af f s = s {qeFrom = map f (qeFrom s)}
->     a s = af (\x -> TRAlias x $ Alias (Name Nothing "u") Nothing) s
->     sel1 = makeSelect
->           {qeSelectList = [(Star, Nothing)]
->           ,qeFrom = [TRAlias (TRSimple [Name Nothing "t"])
->                      $ Alias (Name Nothing "u") $ Just [Name Nothing "a", Name Nothing "b"]]}
->     jsel = sel {qeFrom =
->                 [TRParens $ TRJoin (TRSimple [Name Nothing "a"])
->                                    False
->                                    JInner
->                                    (TRSimple [Name Nothing "b"])
->                                    Nothing]}
+    -- table or query name
+    ,("select * from t u", a sel)
+    ,("select * from t as u", a sel)
+    ,("select * from t u(a,b)", sel1 )
+    ,("select * from t as u(a,b)", sel1)
+    -- derived table TODO: realistic example
+    ,("select * from (select * from t) u"
+     ,a $ sel {qeFrom = [TRQueryExpr sel]})
+    -- lateral TODO: realistic example
+    ,("select * from lateral t"
+     ,af TRLateral sel)
+    -- TODO: bug, lateral should bind more tightly than the alias
+    --,("select * from lateral t u"
+    -- ,a $ af sel TRLateral)
+    -- collection TODO: realistic example
+     -- TODO: make it work
+    --,("select * from unnest(a)", undefined)
+    --,("select * from unnest(a,b)", undefined)
+    --,("select * from unnest(a,b) with ordinality", undefined)
+    --,("select * from unnest(a,b) with ordinality u", undefined)
+    --,("select * from unnest(a,b) with ordinality as u", undefined)
+    -- table fn TODO: realistic example
+     -- TODO: make it work
+    --,("select * from table(a)", undefined)
+    -- parens
+    ,("select * from (a join b)", jsel)
+    ,("select * from (a join b) u", a jsel)
+    ,("select * from ((a join b)) u", a $ af TRParens jsel)
+    ,("select * from ((a join b) u) u", a $ af TRParens $ a jsel)
+    ]
+  where
+    sel = makeSelect
+          {qeSelectList = [(Star, Nothing)]
+          ,qeFrom = [TRSimple [Name Nothing "t"]]}
+    af f s = s {qeFrom = map f (qeFrom s)}
+    a s = af (\x -> TRAlias x $ Alias (Name Nothing "u") Nothing) s
+    sel1 = makeSelect
+          {qeSelectList = [(Star, Nothing)]
+          ,qeFrom = [TRAlias (TRSimple [Name Nothing "t"])
+                     $ Alias (Name Nothing "u") $ Just [Name Nothing "a", Name Nothing "b"]]}
+    jsel = sel {qeFrom =
+                [TRParens $ TRJoin (TRSimple [Name Nothing "a"])
+                                   False
+                                   JInner
+                                   (TRSimple [Name Nothing "b"])
+                                   Nothing]}
 
+{-
 == 7.7 <joined table>
 
 Function
@@ -2885,53 +2986,55 @@ Specify a table derived from a Cartesian product, inner join, or outer join.
 <outer join type> ::= LEFT | RIGHT | FULL
 
 <join column list> ::= <column name list>
+-}
 
-> joinedTable :: TestItem
-> joinedTable = Group "joined table"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("select * from a cross join b"
->      ,sel $ TRJoin a False JCross b Nothing)
->     ,("select * from a join b on true"
->      ,sel $ TRJoin a False JInner b
->       (Just $ JoinOn $ Iden [Name Nothing "true"]))
->     ,("select * from a join b using (c)"
->      ,sel $ TRJoin a False JInner b
->       (Just $ JoinUsing [Name Nothing "c"]))
->     ,("select * from a inner join b on true"
->      ,sel $ TRJoin a False JInner b
->       (Just $ JoinOn $ Iden [Name Nothing "true"]))
->     ,("select * from a left join b on true"
->      ,sel $ TRJoin a False JLeft b
->       (Just $ JoinOn $ Iden [Name Nothing "true"]))
->     ,("select * from a left outer join b on true"
->      ,sel $ TRJoin a False JLeft b
->       (Just $ JoinOn $ Iden [Name Nothing "true"]))
->     ,("select * from a right join b on true"
->      ,sel $ TRJoin a False JRight b
->       (Just $ JoinOn $ Iden [Name Nothing "true"]))
->     ,("select * from a full join b on true"
->      ,sel $ TRJoin a False JFull b
->       (Just $ JoinOn $ Iden [Name Nothing "true"]))
->     ,("select * from a natural join b"
->      ,sel $ TRJoin a True JInner b Nothing)
->     ,("select * from a natural inner join b"
->      ,sel $ TRJoin a True JInner b Nothing)
->     ,("select * from a natural left join b"
->      ,sel $ TRJoin a True JLeft b Nothing)
->     ,("select * from a natural left outer join b"
->      ,sel $ TRJoin a True JLeft b Nothing)
->     ,("select * from a natural right join b"
->      ,sel $ TRJoin a True JRight b Nothing)
->     ,("select * from a natural full join b"
->      ,sel $ TRJoin a True JFull b Nothing)
->     ]
->   where
->     sel t = makeSelect
->             {qeSelectList = [(Star, Nothing)]
->             ,qeFrom = [t]}
->     a = TRSimple [Name Nothing "a"]
->     b = TRSimple [Name Nothing "b"]
+joinedTable :: TestItem
+joinedTable = Group "joined table"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("select * from a cross join b"
+     ,sel $ TRJoin a False JCross b Nothing)
+    ,("select * from a join b on true"
+     ,sel $ TRJoin a False JInner b
+      (Just $ JoinOn $ Iden [Name Nothing "true"]))
+    ,("select * from a join b using (c)"
+     ,sel $ TRJoin a False JInner b
+      (Just $ JoinUsing [Name Nothing "c"]))
+    ,("select * from a inner join b on true"
+     ,sel $ TRJoin a False JInner b
+      (Just $ JoinOn $ Iden [Name Nothing "true"]))
+    ,("select * from a left join b on true"
+     ,sel $ TRJoin a False JLeft b
+      (Just $ JoinOn $ Iden [Name Nothing "true"]))
+    ,("select * from a left outer join b on true"
+     ,sel $ TRJoin a False JLeft b
+      (Just $ JoinOn $ Iden [Name Nothing "true"]))
+    ,("select * from a right join b on true"
+     ,sel $ TRJoin a False JRight b
+      (Just $ JoinOn $ Iden [Name Nothing "true"]))
+    ,("select * from a full join b on true"
+     ,sel $ TRJoin a False JFull b
+      (Just $ JoinOn $ Iden [Name Nothing "true"]))
+    ,("select * from a natural join b"
+     ,sel $ TRJoin a True JInner b Nothing)
+    ,("select * from a natural inner join b"
+     ,sel $ TRJoin a True JInner b Nothing)
+    ,("select * from a natural left join b"
+     ,sel $ TRJoin a True JLeft b Nothing)
+    ,("select * from a natural left outer join b"
+     ,sel $ TRJoin a True JLeft b Nothing)
+    ,("select * from a natural right join b"
+     ,sel $ TRJoin a True JRight b Nothing)
+    ,("select * from a natural full join b"
+     ,sel $ TRJoin a True JFull b Nothing)
+    ]
+  where
+    sel t = makeSelect
+            {qeSelectList = [(Star, Nothing)]
+            ,qeFrom = [t]}
+    a = TRSimple [Name Nothing "a"]
+    b = TRSimple [Name Nothing "b"]
 
+{-
 TODO: partitioned joins
 
 == 7.8 <where clause>
@@ -2942,16 +3045,18 @@ Specify a table derived by the application of a <search condition> to
 the result of the preceding <from clause>.
 
 <where clause> ::= WHERE <search condition>
+-}
 
-> whereClause :: TestItem
-> whereClause = Group "where clause"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("select * from t where a = 5"
->     ,makeSelect
->      {qeSelectList = [(Star,Nothing)]
->      ,qeFrom = [TRSimple [Name Nothing "t"]]
->      ,qeWhere = Just $ BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "5")})]
+whereClause :: TestItem
+whereClause = Group "where clause"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("select * from t where a = 5"
+    ,makeSelect
+     {qeSelectList = [(Star,Nothing)]
+     ,qeFrom = [TRSimple [Name Nothing "t"]]
+     ,qeWhere = Just $ BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "5")})]
 
+{-
 == 7.9 <group by clause>
 
 Function
@@ -3001,47 +3106,49 @@ clause> to the result of the previously specified clause.
   | <empty grouping set>
 
 <empty grouping set> ::= <left paren> <right paren>
+-}
 
 
-> groupByClause :: TestItem
-> groupByClause = Group "group by clause"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a,sum(x) from t group by a"
->      ,qe [SimpleGroup $ Iden [Name Nothing "a"]])
->      ,("select a,sum(x) from t group by a collate c"
->      ,qe [SimpleGroup $ Collate (Iden [Name Nothing "a"]) [Name Nothing "c"]])
->     ,("select a,b,sum(x) from t group by a,b"
->      ,qex [SimpleGroup $ Iden [Name Nothing "a"]
->           ,SimpleGroup $ Iden [Name Nothing "b"]])
->     -- todo: group by set quantifier
->     --,("select a,sum(x) from t group by distinct a"
->     --,undefined)
->     --,("select a,sum(x) from t group by all a"
->     -- ,undefined)
->     ,("select a,b,sum(x) from t group by rollup(a,b)"
->      ,qex [Rollup [SimpleGroup $ Iden [Name Nothing "a"]
->                   ,SimpleGroup $ Iden [Name Nothing "b"]]])
->     ,("select a,b,sum(x) from t group by cube(a,b)"
->      ,qex [Cube [SimpleGroup $ Iden [Name Nothing "a"]
->                 ,SimpleGroup $ Iden [Name Nothing "b"]]])
->     ,("select a,b,sum(x) from t group by grouping sets((),(a,b))"
->      ,qex [GroupingSets [GroupingParens []
->                         ,GroupingParens [SimpleGroup $ Iden [Name Nothing "a"]
->                                         ,SimpleGroup $ Iden [Name Nothing "b"]]]])
->     ,("select sum(x) from t group by ()"
->      ,let x = qe [GroupingParens []]
->       in x {qeSelectList = tail $ qeSelectList x})
->     ]
->   where
->     qe g = makeSelect
->            {qeSelectList = [(Iden [Name Nothing "a"], Nothing)
->                            ,(App [Name Nothing "sum"] [Iden [Name Nothing "x"]], Nothing)]
->            ,qeFrom = [TRSimple [Name Nothing "t"]]
->            ,qeGroupBy = g}
->     qex g = let x = qe g
->             in x {qeSelectList = let [a,b] = qeSelectList x
->                                  in [a,(Iden [Name Nothing "b"],Nothing),b]}
+groupByClause :: TestItem
+groupByClause = Group "group by clause"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a,sum(x) from t group by a"
+     ,qe [SimpleGroup $ Iden [Name Nothing "a"]])
+     ,("select a,sum(x) from t group by a collate c"
+     ,qe [SimpleGroup $ Collate (Iden [Name Nothing "a"]) [Name Nothing "c"]])
+    ,("select a,b,sum(x) from t group by a,b"
+     ,qex [SimpleGroup $ Iden [Name Nothing "a"]
+          ,SimpleGroup $ Iden [Name Nothing "b"]])
+    -- todo: group by set quantifier
+    --,("select a,sum(x) from t group by distinct a"
+    --,undefined)
+    --,("select a,sum(x) from t group by all a"
+    -- ,undefined)
+    ,("select a,b,sum(x) from t group by rollup(a,b)"
+     ,qex [Rollup [SimpleGroup $ Iden [Name Nothing "a"]
+                  ,SimpleGroup $ Iden [Name Nothing "b"]]])
+    ,("select a,b,sum(x) from t group by cube(a,b)"
+     ,qex [Cube [SimpleGroup $ Iden [Name Nothing "a"]
+                ,SimpleGroup $ Iden [Name Nothing "b"]]])
+    ,("select a,b,sum(x) from t group by grouping sets((),(a,b))"
+     ,qex [GroupingSets [GroupingParens []
+                        ,GroupingParens [SimpleGroup $ Iden [Name Nothing "a"]
+                                        ,SimpleGroup $ Iden [Name Nothing "b"]]]])
+    ,("select sum(x) from t group by ()"
+     ,let x = qe [GroupingParens []]
+      in x {qeSelectList = tail $ qeSelectList x})
+    ]
+  where
+    qe g = makeSelect
+           {qeSelectList = [(Iden [Name Nothing "a"], Nothing)
+                           ,(App [Name Nothing "sum"] [Iden [Name Nothing "x"]], Nothing)]
+           ,qeFrom = [TRSimple [Name Nothing "t"]]
+           ,qeGroupBy = g}
+    qex g = let x = qe g
+            in x {qeSelectList = let [a,b] = qeSelectList x
+                                 in [a,(Iden [Name Nothing "b"],Nothing),b]}
 
+{-
 == 7.10 <having clause>
 
 Function
@@ -3050,21 +3157,23 @@ Specify a grouped table derived by the elimination of groups that do
 not satisfy a <search condition>.
 
 <having clause> ::= HAVING <search condition>
+-}
 
-> havingClause :: TestItem
-> havingClause = Group "having clause"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a,sum(x) from t group by a having sum(x) > 1000"
->      ,makeSelect
->       {qeSelectList = [(Iden [Name Nothing "a"], Nothing)
->                       ,(App [Name Nothing "sum"] [Iden [Name Nothing "x"]], Nothing)]
->       ,qeFrom = [TRSimple [Name Nothing "t"]]
->       ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]]
->       ,qeHaving = Just $ BinOp (App [Name Nothing "sum"] [Iden [Name Nothing "x"]])
->                                [Name Nothing ">"]
->                                (NumLit "1000")})
->     ]
+havingClause :: TestItem
+havingClause = Group "having clause"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a,sum(x) from t group by a having sum(x) > 1000"
+     ,makeSelect
+      {qeSelectList = [(Iden [Name Nothing "a"], Nothing)
+                      ,(App [Name Nothing "sum"] [Iden [Name Nothing "x"]], Nothing)]
+      ,qeFrom = [TRSimple [Name Nothing "t"]]
+      ,qeGroupBy = [SimpleGroup $ Iden [Name Nothing "a"]]
+      ,qeHaving = Just $ BinOp (App [Name Nothing "sum"] [Iden [Name Nothing "x"]])
+                               [Name Nothing ">"]
+                               (NumLit "1000")})
+    ]
 
+{-
 == 7.11 <window clause>
 
 Function
@@ -3135,12 +3244,14 @@ Specify one or more window definitions.
   | EXCLUDE GROUP
   | EXCLUDE TIES
   | EXCLUDE NO OTHERS
+-}
 
-> windowClause :: TestItem
-> windowClause = Group "window clause"
->     [-- todo: window clause
->     ]
+windowClause :: TestItem
+windowClause = Group "window clause"
+    [-- todo: window clause
+    ]
 
+{-
 == 7.12 <query specification>
 
 Function
@@ -3173,33 +3284,35 @@ Specify a table derived from the result of a <table expression>.
       [ AS <left paren> <all fields column name list> <right paren> ]
 
 <all fields column name list> ::= <column name list>
+-}
 
-> querySpecification :: TestItem
-> querySpecification = Group "query specification"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a from t",qe)
->     ,("select all a from t",qe {qeSetQuantifier = All})
->     ,("select distinct a from t",qe {qeSetQuantifier = Distinct})
->     ,("select * from t", qe {qeSelectList = [(Star,Nothing)]})
->     ,("select a.* from t"
->      ,qe {qeSelectList = [(BinOp (Iden [Name Nothing "a"]) [Name Nothing "."] Star
->                            ,Nothing)]})
->     ,("select a b from t"
->      ,qe {qeSelectList = [(Iden [Name Nothing "a"], Just $ Name Nothing "b")]})
->     ,("select a as b from t"
->      ,qe {qeSelectList = [(Iden [Name Nothing "a"], Just $ Name Nothing "b")]})
->     ,("select a,b from t"
->      ,qe {qeSelectList = [(Iden [Name Nothing "a"], Nothing)
->                          ,(Iden [Name Nothing "b"], Nothing)]})
->     -- todo: all field reference alias
->     --,("select * as (a,b) from t",undefined)
->     ]
->   where
->     qe = makeSelect
->          {qeSelectList = [(Iden [Name Nothing "a"], Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]
->          }
+querySpecification :: TestItem
+querySpecification = Group "query specification"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a from t",qe)
+    ,("select all a from t",qe {qeSetQuantifier = All})
+    ,("select distinct a from t",qe {qeSetQuantifier = Distinct})
+    ,("select * from t", qe {qeSelectList = [(Star,Nothing)]})
+    ,("select a.* from t"
+     ,qe {qeSelectList = [(BinOp (Iden [Name Nothing "a"]) [Name Nothing "."] Star
+                           ,Nothing)]})
+    ,("select a b from t"
+     ,qe {qeSelectList = [(Iden [Name Nothing "a"], Just $ Name Nothing "b")]})
+    ,("select a as b from t"
+     ,qe {qeSelectList = [(Iden [Name Nothing "a"], Just $ Name Nothing "b")]})
+    ,("select a,b from t"
+     ,qe {qeSelectList = [(Iden [Name Nothing "a"], Nothing)
+                         ,(Iden [Name Nothing "b"], Nothing)]})
+    -- todo: all field reference alias
+    --,("select * as (a,b) from t",undefined)
+    ]
+  where
+    qe = makeSelect
+         {qeSelectList = [(Iden [Name Nothing "a"], Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]
+         }
 
+{-
 == 7.13 <query expression>
 
 Function
@@ -3218,12 +3331,14 @@ Specify a table.
       AS <table subquery> [ <search or cycle clause> ]
 
 <with column list> ::= <column name list>
+-}
 
-> withQueryExpression :: TestItem
-> withQueryExpression= Group "with query expression"
->     [-- todo: with query expression
->     ]
+withQueryExpression :: TestItem
+withQueryExpression= Group "with query expression"
+    [-- todo: with query expression
+    ]
 
+{-
 <query expression body> ::=
     <query term>
   | <query expression body> UNION [ ALL | DISTINCT ]
@@ -3241,27 +3356,29 @@ Specify a table.
   | <left paren> <query expression body>
       [ <order by clause> ] [ <result offset clause> ] [ <fetch first clause> ]
       <right paren>
+-}
 
-> setOpQueryExpression :: TestItem
-> setOpQueryExpression= Group "set operation query expression"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     -- todo: complete setop query expression tests
->     [{-("select * from t union select * from t"
->      ,undefined)
->     ,("select * from t union all select * from t"
->      ,undefined)
->     ,("select * from t union distinct select * from t"
->      ,undefined)
->     ,("select * from t union corresponding select * from t"
->      ,undefined)
->     ,("select * from t union corresponding by (a,b) select * from t"
->      ,undefined)
->     ,("select * from t except select * from t"
->      ,undefined)
->     ,("select * from t in intersect select * from t"
->      ,undefined)-}
->     ]
+setOpQueryExpression :: TestItem
+setOpQueryExpression= Group "set operation query expression"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    -- todo: complete setop query expression tests
+    [{-("select * from t union select * from t"
+     ,undefined)
+    ,("select * from t union all select * from t"
+     ,undefined)
+    ,("select * from t union distinct select * from t"
+     ,undefined)
+    ,("select * from t union corresponding select * from t"
+     ,undefined)
+    ,("select * from t union corresponding by (a,b) select * from t"
+     ,undefined)
+    ,("select * from t except select * from t"
+     ,undefined)
+    ,("select * from t in intersect select * from t"
+     ,undefined)-}
+    ]
 
+{-
 TODO: tests for the associativity and precendence
 
 TODO: not sure exactly where parens are allowed, we will allow them
@@ -3278,14 +3395,16 @@ everywhere
   CORRESPONDING [ BY <left paren> <corresponding column list> <right paren> ]
 
 <corresponding column list> ::= <column name list>
+-}
 
-> explicitTableQueryExpression :: TestItem
-> explicitTableQueryExpression= Group "explicit table query expression"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("table t", Table [Name Nothing "t"])
->     ]
+explicitTableQueryExpression :: TestItem
+explicitTableQueryExpression= Group "explicit table query expression"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("table t", Table [Name Nothing "t"])
+    ]
 
 
+{-
 <order by clause> ::= ORDER BY <sort specification list>
 
 <result offset clause> ::= OFFSET <offset row count> { ROW | ROWS }
@@ -3300,31 +3419,33 @@ everywhere
 <fetch first row count> ::= <simple value specification>
 
 <fetch first percentage> ::= <simple value specification> PERCENT
+-}
 
-> orderOffsetFetchQueryExpression :: TestItem
-> orderOffsetFetchQueryExpression = Group "order, offset, fetch query expression"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [-- todo: finish tests for order offset and fetch
->      ("select a from t order by a"
->      ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
->                            DirDefault NullsOrderDefault]})
->     ,("select a from t offset 5 row"
->      ,qe {qeOffset = Just $ NumLit "5"})
->     ,("select a from t offset 5 rows"
->      ,qe {qeOffset = Just $ NumLit "5"})
->     ,("select a from t fetch first 5 row only"
->      ,qe {qeFetchFirst = Just $ NumLit "5"})
->     -- todo: support with ties and percent in fetch
->     --,("select a from t fetch next 5 rows with ties"
->     --,("select a from t fetch first 5 percent rows only"
->     ]
->  where
->     qe = makeSelect
->          {qeSelectList = [(Iden [Name Nothing "a"], Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]
->          }
+orderOffsetFetchQueryExpression :: TestItem
+orderOffsetFetchQueryExpression = Group "order, offset, fetch query expression"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [-- todo: finish tests for order offset and fetch
+     ("select a from t order by a"
+     ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
+                           DirDefault NullsOrderDefault]})
+    ,("select a from t offset 5 row"
+     ,qe {qeOffset = Just $ NumLit "5"})
+    ,("select a from t offset 5 rows"
+     ,qe {qeOffset = Just $ NumLit "5"})
+    ,("select a from t fetch first 5 row only"
+     ,qe {qeFetchFirst = Just $ NumLit "5"})
+    -- todo: support with ties and percent in fetch
+    --,("select a from t fetch next 5 rows with ties"
+    --,("select a from t fetch first 5 percent rows only"
+    ]
+ where
+    qe = makeSelect
+         {qeSelectList = [(Iden [Name Nothing "a"], Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]
+         }
 
 
+{-
 == 7.14 <search or cycle clause>
 
 Function
@@ -3360,12 +3481,14 @@ the result of recursive query expressions.
 <cycle mark value> ::= <value expression>
 
 <non-cycle mark value> ::= <value expression>
+-}
 
-> searchOrCycleClause :: TestItem
-> searchOrCycleClause = Group "search or cycle clause"
->     [-- todo: search or cycle clause
->     ]
+searchOrCycleClause :: TestItem
+searchOrCycleClause = Group "search or cycle clause"
+    [-- todo: search or cycle clause
+    ]
 
+{-
 == 7.15 <subquery>
 
 Function
@@ -3380,12 +3503,14 @@ expression>.
 <table subquery> ::= <subquery>
 
 <subquery> ::= <left paren> <query expression> <right paren>
+-}
 
-> scalarSubquery :: TestItem
-> scalarSubquery = Group "scalar subquery"
->     [-- todo: scalar subquery
->     ]
+scalarSubquery :: TestItem
+scalarSubquery = Group "scalar subquery"
+    [-- todo: scalar subquery
+    ]
 
+{-
 = 8 Predicates
 
 == 8.1 <predicate>
@@ -3413,30 +3538,32 @@ Specify a condition that can be evaluated to give a boolean value.
   | <set predicate>
   | <type predicate>
   | <period predicate>
+-}
 
-> predicates :: TestItem
-> predicates = Group "predicates"
->     [comparisonPredicates
->     ,betweenPredicate
->     ,inPredicate
->     ,likePredicate
->     ,similarPredicate
->     ,regexLikePredicate
->     ,nullPredicate
->     ,quantifiedComparisonPredicate
->     ,existsPredicate
->     ,uniquePredicate
->     ,normalizedPredicate
->     ,matchPredicate
->     ,overlapsPredicate
->     ,distinctPredicate
->     ,memberPredicate
->     ,submultisetPredicate
->     ,setPredicate
->     ,periodPredicate
->     ]
+predicates :: TestItem
+predicates = Group "predicates"
+    [comparisonPredicates
+    ,betweenPredicate
+    ,inPredicate
+    ,likePredicate
+    ,similarPredicate
+    ,regexLikePredicate
+    ,nullPredicate
+    ,quantifiedComparisonPredicate
+    ,existsPredicate
+    ,uniquePredicate
+    ,normalizedPredicate
+    ,matchPredicate
+    ,overlapsPredicate
+    ,distinctPredicate
+    ,memberPredicate
+    ,submultisetPredicate
+    ,setPredicate
+    ,periodPredicate
+    ]
 
 
+{-
 == 8.1 <predicate>
 
 No grammar
@@ -3457,26 +3584,28 @@ Specify a comparison of two row values.
   | <greater than operator>
   | <less than or equals operator>
   | <greater than or equals operator>
+-}
 
-> comparisonPredicates :: TestItem
-> comparisonPredicates = Group "comparison predicates"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     $ map mkOp ["=", "<>", "<", ">", "<=", ">="]
->     ++ [("ROW(a) = ROW(b)"
->         ,BinOp (App [Name Nothing "ROW"] [a])
->                [Name Nothing "="]
->                (App [Name Nothing "ROW"] [b]))
->        ,("(a,b) = (c,d)"
->         ,BinOp (SpecialOp [Name Nothing "rowctor"] [a,b])
->            [Name Nothing "="]
->            (SpecialOp [Name Nothing "rowctor"] [Iden [Name Nothing "c"], Iden [Name Nothing "d"]]))
->     ]
->   where
->     mkOp nm = ("a " ++ nm ++ " b"
->               ,BinOp a [Name Nothing nm] b)
->     a = Iden [Name Nothing "a"]
->     b = Iden [Name Nothing "b"]
+comparisonPredicates :: TestItem
+comparisonPredicates = Group "comparison predicates"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    $ map mkOp ["=", "<>", "<", ">", "<=", ">="]
+    ++ [("ROW(a) = ROW(b)"
+        ,BinOp (App [Name Nothing "ROW"] [a])
+               [Name Nothing "="]
+               (App [Name Nothing "ROW"] [b]))
+       ,("(a,b) = (c,d)"
+        ,BinOp (SpecialOp [Name Nothing "rowctor"] [a,b])
+           [Name Nothing "="]
+           (SpecialOp [Name Nothing "rowctor"] [Iden [Name Nothing "c"], Iden [Name Nothing "d"]]))
+    ]
+  where
+    mkOp nm = ("a " ++ nm ++ " b"
+              ,BinOp a [Name Nothing nm] b)
+    a = Iden [Name Nothing "a"]
+    b = Iden [Name Nothing "b"]
 
+{-
 TODO: what other tests, more complex expressions with comparisons?
 
 == 8.3 <between predicate>
@@ -3489,12 +3618,14 @@ Specify a range comparison.
 <between predicate part 2> ::=
   [ NOT ] BETWEEN [ ASYMMETRIC | SYMMETRIC ]
       <row value predicand> AND <row value predicand>
+-}
 
-> betweenPredicate :: TestItem
-> betweenPredicate = Group "between predicate"
->     [-- todo: between predicate
->     ]
+betweenPredicate :: TestItem
+betweenPredicate = Group "between predicate"
+    [-- todo: between predicate
+    ]
 
+{-
 == 8.4 <in predicate>
 
 Function
@@ -3510,12 +3641,14 @@ Specify a quantified comparison.
 
 <in value list> ::=
   <row value expression> [ { <comma> <row value expression> }... ]
+-}
 
-> inPredicate :: TestItem
-> inPredicate = Group "in predicate"
->     [-- todo: in predicate
->     ]
+inPredicate :: TestItem
+inPredicate = Group "in predicate"
+    [-- todo: in predicate
+    ]
 
+{-
 == 8.5 <like predicate>
 
 Function
@@ -3541,12 +3674,14 @@ Specify a pattern-match comparison.
 <octet pattern> ::= <binary value expression>
 
 <escape octet> ::= <binary value expression>
+-}
 
-> likePredicate :: TestItem
-> likePredicate = Group "like predicate"
->     [-- todo: like predicate
->     ]
+likePredicate :: TestItem
+likePredicate = Group "like predicate"
+    [-- todo: like predicate
+    ]
 
+{-
 == 8.6 <similar predicate>
 
 Function
@@ -3609,13 +3744,15 @@ Specify a character string similarity by means of a regular expression.
   | <left bracket> <colon> <regular character set identifier> <colon> <right bracket>
 
 <regular character set identifier> ::= <identifier>
+-}
 
-> similarPredicate :: TestItem
-> similarPredicate = Group "similar predicate"
->     [-- todo: similar predicate
->     ]
+similarPredicate :: TestItem
+similarPredicate = Group "similar predicate"
+    [-- todo: similar predicate
+    ]
 
 
+{-
 == 8.7 <regex like predicate>
 
 Function
@@ -3625,12 +3762,14 @@ Specify a pattern-match comparison using an XQuery regular expression.
 
 <regex like predicate part 2> ::=
   [ NOT ] LIKE_REGEX <XQuery pattern> [ FLAG <XQuery option flag> ]
+-}
 
-> regexLikePredicate :: TestItem
-> regexLikePredicate = Group "regex like predicate"
->     [-- todo: regex like predicate
->     ]
+regexLikePredicate :: TestItem
+regexLikePredicate = Group "regex like predicate"
+    [-- todo: regex like predicate
+    ]
 
+{-
 == 8.8 <null predicate>
 
 Function
@@ -3639,12 +3778,14 @@ Specify a test for a null value.
 <null predicate> ::= <row value predicand> <null predicate part 2>
 
 <null predicate part 2> ::= IS [ NOT ] NULL
+-}
 
-> nullPredicate :: TestItem
-> nullPredicate = Group "null predicate"
->     [-- todo: null predicate
->     ]
+nullPredicate :: TestItem
+nullPredicate = Group "null predicate"
+    [-- todo: null predicate
+    ]
 
+{-
 == 8.9 <quantified comparison predicate>
 
 Function
@@ -3661,65 +3802,71 @@ Specify a quantified comparison.
 <all> ::= ALL
 
 <some> ::= SOME | ANY
+-}
 
-> quantifiedComparisonPredicate :: TestItem
-> quantifiedComparisonPredicate = Group "quantified comparison predicate"
->     $ map (uncurry (TestScalarExpr ansi2011))
+quantifiedComparisonPredicate :: TestItem
+quantifiedComparisonPredicate = Group "quantified comparison predicate"
+    $ map (uncurry (TestScalarExpr ansi2011))
 
->     [("a = any (select * from t)"
->      ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing "="] CPAny qe)
->     ,("a <= some (select * from t)"
->      ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing "<="] CPSome qe)
->     ,("a > all (select * from t)"
->      ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing ">"] CPAll qe)
->     ,("(a,b) <> all (select * from t)"
->      ,QuantifiedComparison
->       (SpecialOp [Name Nothing "rowctor"] [Iden [Name Nothing "a"]
->                                   ,Iden [Name Nothing "b"]]) [Name Nothing "<>"] CPAll qe)
->     ]
->   where
->     qe = makeSelect
->          {qeSelectList = [(Star,Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]}
+    [("a = any (select * from t)"
+     ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing "="] CPAny qe)
+    ,("a <= some (select * from t)"
+     ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing "<="] CPSome qe)
+    ,("a > all (select * from t)"
+     ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing ">"] CPAll qe)
+    ,("(a,b) <> all (select * from t)"
+     ,QuantifiedComparison
+      (SpecialOp [Name Nothing "rowctor"] [Iden [Name Nothing "a"]
+                                  ,Iden [Name Nothing "b"]]) [Name Nothing "<>"] CPAll qe)
+    ]
+  where
+    qe = makeSelect
+         {qeSelectList = [(Star,Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]}
 
+{-
 == 8.10 <exists predicate>
 
 Function
 Specify a test for a non-empty set.
 
 <exists predicate> ::= EXISTS <table subquery>
+-}
 
-> existsPredicate :: TestItem
-> existsPredicate = Group "exists predicate"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("exists(select * from t where a = 4)"
->      ,SubQueryExpr SqExists
->       $ makeSelect
->         {qeSelectList = [(Star,Nothing)]
->         ,qeFrom = [TRSimple [Name Nothing "t"]]
->         ,qeWhere = Just (BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "4"))
->         }
->      )]
+existsPredicate :: TestItem
+existsPredicate = Group "exists predicate"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("exists(select * from t where a = 4)"
+     ,SubQueryExpr SqExists
+      $ makeSelect
+        {qeSelectList = [(Star,Nothing)]
+        ,qeFrom = [TRSimple [Name Nothing "t"]]
+        ,qeWhere = Just (BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "4"))
+        }
+     )]
 
+{-
 == 8.11 <unique predicate>
 
 Function
 Specify a test for the absence of duplicate rows.
 
 <unique predicate> ::= UNIQUE <table subquery>
+-}
 
-> uniquePredicate :: TestItem
-> uniquePredicate = Group "unique predicate"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("unique(select * from t where a = 4)"
->      ,SubQueryExpr SqUnique
->       $ makeSelect
->         {qeSelectList = [(Star,Nothing)]
->         ,qeFrom = [TRSimple [Name Nothing "t"]]
->         ,qeWhere = Just (BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "4"))
->         }
->      )]
+uniquePredicate :: TestItem
+uniquePredicate = Group "unique predicate"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("unique(select * from t where a = 4)"
+     ,SubQueryExpr SqUnique
+      $ makeSelect
+        {qeSelectList = [(Star,Nothing)]
+        ,qeFrom = [TRSimple [Name Nothing "t"]]
+        ,qeWhere = Just (BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "4"))
+        }
+     )]
 
+{-
 == 8.12 <normalized predicate>
 
 Function
@@ -3728,12 +3875,14 @@ Determine whether a character string value is normalized.
 <normalized predicate> ::= <row value predicand> <normalized predicate part 2>
 
 <normalized predicate part 2> ::= IS [ NOT ] [ <normal form> ] NORMALIZED
+-}
 
-> normalizedPredicate :: TestItem
-> normalizedPredicate = Group "normalized predicate"
->     [-- todo: normalized predicate
->     ]
+normalizedPredicate :: TestItem
+normalizedPredicate = Group "normalized predicate"
+    [-- todo: normalized predicate
+    ]
 
+{-
 == 8.13 <match predicate>
 
 Function
@@ -3743,26 +3892,28 @@ Specify a test for matching rows.
 
 <match predicate part 2> ::=
   MATCH [ UNIQUE ] [ SIMPLE | PARTIAL | FULL ] <table subquery>
+-}
 
-> matchPredicate :: TestItem
-> matchPredicate = Group "match predicate"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("a match (select a from t)"
->      ,Match (Iden [Name Nothing "a"]) False qe)
->     ,("(a,b) match (select a,b from t)"
->      ,Match (SpecialOp [Name Nothing "rowctor"]
->              [Iden [Name Nothing "a"], Iden [Name Nothing "b"]]) False qea)
->     ,("(a,b) match unique (select a,b from t)"
->      ,Match (SpecialOp [Name Nothing "rowctor"]
->              [Iden [Name Nothing "a"], Iden [Name Nothing "b"]]) True qea)
->     ]
->   where
->     qe = makeSelect
->          {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]}
->     qea = qe {qeSelectList = qeSelectList qe
->                              ++ [(Iden [Name Nothing "b"],Nothing)]}
+matchPredicate :: TestItem
+matchPredicate = Group "match predicate"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("a match (select a from t)"
+     ,Match (Iden [Name Nothing "a"]) False qe)
+    ,("(a,b) match (select a,b from t)"
+     ,Match (SpecialOp [Name Nothing "rowctor"]
+             [Iden [Name Nothing "a"], Iden [Name Nothing "b"]]) False qea)
+    ,("(a,b) match unique (select a,b from t)"
+     ,Match (SpecialOp [Name Nothing "rowctor"]
+             [Iden [Name Nothing "a"], Iden [Name Nothing "b"]]) True qea)
+    ]
+  where
+    qe = makeSelect
+         {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]}
+    qea = qe {qeSelectList = qeSelectList qe
+                             ++ [(Iden [Name Nothing "b"],Nothing)]}
 
+{-
 TODO: simple, partial and full
 
 == 8.14 <overlaps predicate>
@@ -3780,12 +3931,14 @@ Specify a test for an overlap between two datetime periods.
 <row value predicand 1> ::= <row value predicand>
 
 <row value predicand 2> ::= <row value predicand>
+-}
 
-> overlapsPredicate :: TestItem
-> overlapsPredicate = Group "overlaps predicate"
->     [-- todo: overlaps predicate
->     ]
+overlapsPredicate :: TestItem
+overlapsPredicate = Group "overlaps predicate"
+    [-- todo: overlaps predicate
+    ]
 
+{-
 == 8.15 <distinct predicate>
 
 Function
@@ -3799,12 +3952,14 @@ Specify a test of whether two row values are distinct
 <row value predicand 3> ::= <row value predicand>
 
 <row value predicand 4> ::= <row value predicand>
+-}
 
-> distinctPredicate :: TestItem
-> distinctPredicate = Group "distinct predicate"
->     [-- todo: distinct predicate
->     ]
+distinctPredicate :: TestItem
+distinctPredicate = Group "distinct predicate"
+    [-- todo: distinct predicate
+    ]
 
+{-
 == 8.16 <member predicate>
 
 Function
@@ -3813,12 +3968,14 @@ Specify a test of whether a value is a member of a multiset.
 <member predicate> ::= <row value predicand> <member predicate part 2>
 
 <member predicate part 2> ::= [ NOT ] MEMBER [ OF ] <multiset value expression>
+-}
 
-> memberPredicate :: TestItem
-> memberPredicate = Group "member predicate"
->     [-- todo: member predicate
->     ]
+memberPredicate :: TestItem
+memberPredicate = Group "member predicate"
+    [-- todo: member predicate
+    ]
 
+{-
 == 8.17 <submultiset predicate>
 
 Function
@@ -3829,12 +3986,14 @@ Specify a test of whether a multiset is a submultiset of another multiset.
 
 <submultiset predicate part 2> ::=
   [ NOT ] SUBMULTISET [ OF ] <multiset value expression>
+-}
 
-> submultisetPredicate :: TestItem
-> submultisetPredicate = Group "submultiset predicate"
->     [-- todo: submultiset predicate
->     ]
+submultisetPredicate :: TestItem
+submultisetPredicate = Group "submultiset predicate"
+    [-- todo: submultiset predicate
+    ]
 
+{-
 == 8.18 <set predicate>
 
 Function
@@ -3845,12 +4004,14 @@ contain any duplicates).
 <set predicate> ::= <row value predicand> <set predicate part 2>
 
 <set predicate part 2> ::= IS [ NOT ] A SET
+-}
 
-> setPredicate :: TestItem
-> setPredicate = Group "set predicate"
->     [-- todo: set predicate
->     ]
+setPredicate :: TestItem
+setPredicate = Group "set predicate"
+    [-- todo: set predicate
+    ]
 
+{-
 == 8.19 <type predicate>
 
 Function
@@ -3946,12 +4107,14 @@ Specify a test to determine the relationship between periods.
 
 <period immediately succeeds predicate part 2> ::=
   IMMEDIATELY SUCCEEDS <period predicand 2>
+-}
 
-> periodPredicate :: TestItem
-> periodPredicate = Group "period predicate"
->     [-- todo: period predicate
->     ]
+periodPredicate :: TestItem
+periodPredicate = Group "period predicate"
+    [-- todo: period predicate
+    ]
 
+{-
 == 8.21 <search condition>
 
 Function
@@ -3991,12 +4154,14 @@ Specify the precision of an interval data type.
 <interval fractional seconds precision> ::= <unsigned integer>
 
 <interval leading field precision> ::= <unsigned integer>
+-}
 
-> intervalQualifier :: TestItem
-> intervalQualifier = Group "interval qualifier"
->     [-- todo: interval qualifier
->     ]
+intervalQualifier :: TestItem
+intervalQualifier = Group "interval qualifier"
+    [-- todo: interval qualifier
+    ]
 
+{-
 todo: also test all of these in the typenames and in the interval
 literal tests
 
@@ -4095,13 +4260,15 @@ Function
 Specify a default collation.
 
 <collate clause> ::= COLLATE <collation name>
+-}
 
-> collateClause :: TestItem
-> collateClause = Group "collate clause"
->     $ map (uncurry (TestScalarExpr ansi2011))
->     [("a collate my_collation"
->      ,Collate (Iden [Name Nothing "a"]) [Name Nothing "my_collation"])]
+collateClause :: TestItem
+collateClause = Group "collate clause"
+    $ map (uncurry (TestScalarExpr ansi2011))
+    [("a collate my_collation"
+     ,Collate (Iden [Name Nothing "a"]) [Name Nothing "my_collation"])]
 
+{-
 == 10.8 <constraint name definition> and <constraint characteristics>
 
 Function
@@ -4206,83 +4373,85 @@ Specify a value computed from a collection of rows.
 <array aggregate function> ::=
   ARRAY_AGG
       <left paren> <value expression> [ ORDER BY <sort specification list> ] <right paren>
+-}
 
-> aggregateFunction :: TestItem
-> aggregateFunction = Group "aggregate function"
->     $ map (uncurry (TestScalarExpr ansi2011)) $
->     [("count(*)",App [Name Nothing "count"] [Star])
->     ,("count(*) filter (where something > 5)"
->      ,AggregateApp [Name Nothing "count"] SQDefault [Star] [] fil)
+aggregateFunction :: TestItem
+aggregateFunction = Group "aggregate function"
+    $ map (uncurry (TestScalarExpr ansi2011)) $
+    [("count(*)",App [Name Nothing "count"] [Star])
+    ,("count(*) filter (where something > 5)"
+     ,AggregateApp [Name Nothing "count"] SQDefault [Star] [] fil)
 
-gsf
+-- gsf
 
->     ,("count(a)",App [Name Nothing "count"] [Iden [Name Nothing "a"]])
->     ,("count(distinct a)"
->      ,AggregateApp [Name Nothing "count"]
->                    Distinct
->                    [Iden [Name Nothing "a"]] [] Nothing)
->     ,("count(all a)"
->      ,AggregateApp [Name Nothing "count"]
->                    All
->                    [Iden [Name Nothing "a"]] [] Nothing)
->     ,("count(all a) filter (where something > 5)"
->      ,AggregateApp [Name Nothing "count"]
->                    All
->                    [Iden [Name Nothing "a"]] [] fil)
->     ] ++ concatMap mkSimpleAgg
->          ["avg","max","min","sum"
->          ,"every", "any", "some"
->          ,"stddev_pop","stddev_samp","var_samp","var_pop"
->          ,"collect","fusion","intersection"]
+    ,("count(a)",App [Name Nothing "count"] [Iden [Name Nothing "a"]])
+    ,("count(distinct a)"
+     ,AggregateApp [Name Nothing "count"]
+                   Distinct
+                   [Iden [Name Nothing "a"]] [] Nothing)
+    ,("count(all a)"
+     ,AggregateApp [Name Nothing "count"]
+                   All
+                   [Iden [Name Nothing "a"]] [] Nothing)
+    ,("count(all a) filter (where something > 5)"
+     ,AggregateApp [Name Nothing "count"]
+                   All
+                   [Iden [Name Nothing "a"]] [] fil)
+    ] ++ concatMap mkSimpleAgg
+         ["avg","max","min","sum"
+         ,"every", "any", "some"
+         ,"stddev_pop","stddev_samp","var_samp","var_pop"
+         ,"collect","fusion","intersection"]
 
-bsf
+-- bsf
 
->     ++ concatMap mkBsf
->          ["COVAR_POP","COVAR_SAMP","CORR","REGR_SLOPE"
->           ,"REGR_INTERCEPT","REGR_COUNT","REGR_R2"
->           ,"REGR_AVGX","REGR_AVGY"
->           ,"REGR_SXX","REGR_SYY","REGR_SXY"]
+    ++ concatMap mkBsf
+         ["COVAR_POP","COVAR_SAMP","CORR","REGR_SLOPE"
+          ,"REGR_INTERCEPT","REGR_COUNT","REGR_R2"
+          ,"REGR_AVGX","REGR_AVGY"
+          ,"REGR_SXX","REGR_SYY","REGR_SXY"]
 
-osf
+-- osf
 
->     ++
->     [("rank(a,c) within group (order by b)"
->      ,AggregateAppGroup [Name Nothing "rank"]
->           [Iden [Name Nothing "a"], Iden [Name Nothing "c"]]
->           ob)]
->     ++ map mkGp ["dense_rank","percent_rank"
->                 ,"cume_dist", "percentile_cont"
->                 ,"percentile_disc"]
->     ++ [("array_agg(a)", App [Name Nothing "array_agg"] [Iden [Name Nothing "a"]])
->        ,("array_agg(a order by z)"
->         ,AggregateApp [Name Nothing "array_agg"]
->                        SQDefault
->                        [Iden [Name Nothing "a"]]
->                        [SortSpec (Iden [Name Nothing "z"])
->                             DirDefault NullsOrderDefault]
->                        Nothing)]
+    ++
+    [("rank(a,c) within group (order by b)"
+     ,AggregateAppGroup [Name Nothing "rank"]
+          [Iden [Name Nothing "a"], Iden [Name Nothing "c"]]
+          ob)]
+    ++ map mkGp ["dense_rank","percent_rank"
+                ,"cume_dist", "percentile_cont"
+                ,"percentile_disc"]
+    ++ [("array_agg(a)", App [Name Nothing "array_agg"] [Iden [Name Nothing "a"]])
+       ,("array_agg(a order by z)"
+        ,AggregateApp [Name Nothing "array_agg"]
+                       SQDefault
+                       [Iden [Name Nothing "a"]]
+                       [SortSpec (Iden [Name Nothing "z"])
+                            DirDefault NullsOrderDefault]
+                       Nothing)]
 
->   where
->     fil = Just $ BinOp (Iden [Name Nothing "something"]) [Name Nothing ">"] (NumLit "5")
->     ob = [SortSpec (Iden [Name Nothing "b"]) DirDefault NullsOrderDefault]
->     mkGp nm = (nm ++ "(a) within group (order by b)"
->               ,AggregateAppGroup [Name Nothing nm]
->                [Iden [Name Nothing "a"]]
->                ob)
+  where
+    fil = Just $ BinOp (Iden [Name Nothing "something"]) [Name Nothing ">"] (NumLit "5")
+    ob = [SortSpec (Iden [Name Nothing "b"]) DirDefault NullsOrderDefault]
+    mkGp nm = (nm ++ "(a) within group (order by b)"
+              ,AggregateAppGroup [Name Nothing nm]
+               [Iden [Name Nothing "a"]]
+               ob)
 
->     mkSimpleAgg nm =
->         [(nm ++ "(a)",App [Name Nothing nm] [Iden [Name Nothing "a"]])
->         ,(nm ++ "(distinct a)"
->          ,AggregateApp [Name Nothing nm]
->                        Distinct
->                        [Iden [Name Nothing "a"]] [] Nothing)]
->     mkBsf nm =
->         [(nm ++ "(a,b)",App [Name Nothing nm] [Iden [Name Nothing "a"],Iden [Name Nothing "b"]])
->         ,(nm ++"(a,b) filter (where something > 5)"
->           ,AggregateApp [Name Nothing nm]
->                         SQDefault
->                         [Iden [Name Nothing "a"],Iden [Name Nothing "b"]] [] fil)]
+    mkSimpleAgg nm =
+        [(nm ++ "(a)",App [Name Nothing nm] [Iden [Name Nothing "a"]])
+        ,(nm ++ "(distinct a)"
+         ,AggregateApp [Name Nothing nm]
+                       Distinct
+                       [Iden [Name Nothing "a"]] [] Nothing)]
+    mkBsf nm =
+        [(nm ++ "(a,b)",App [Name Nothing nm] [Iden [Name Nothing "a"],Iden [Name Nothing "b"]])
+        ,(nm ++"(a,b) filter (where something > 5)"
+          ,AggregateApp [Name Nothing nm]
+                        SQDefault
+                        [Iden [Name Nothing "a"],Iden [Name Nothing "b"]] [] fil)]
 
+{-
 == 10.10 <sort specification list>
 
 Function
@@ -4301,41 +4470,42 @@ Specify a sort order.
 <null ordering> ::=
   | NULLS LAST
     NULLS FIRST
+-}
 
-> sortSpecificationList :: TestItem
-> sortSpecificationList = Group "sort specification list"
->     $ map (uncurry (TestQueryExpr ansi2011))
->     [("select * from t order by a"
->      ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
->                            DirDefault NullsOrderDefault]})
->     ,("select * from t order by a,b"
->      ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
->                            DirDefault NullsOrderDefault
->                       ,SortSpec (Iden [Name Nothing "b"])
->                            DirDefault NullsOrderDefault]})
->     ,("select * from t order by a asc,b"
->      ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
->                            Asc NullsOrderDefault
->                       ,SortSpec (Iden [Name Nothing "b"])
->                            DirDefault NullsOrderDefault]})
->     ,("select * from t order by a desc,b"
->      ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
->                            Desc NullsOrderDefault
->                       ,SortSpec (Iden [Name Nothing "b"])
->                            DirDefault NullsOrderDefault]})
->     ,("select * from t order by a collate x desc,b"
->      ,qe {qeOrderBy = [SortSpec
->                            (Collate (Iden [Name Nothing "a"]) [Name Nothing "x"])
->                            Desc NullsOrderDefault
->                       ,SortSpec (Iden [Name Nothing "b"])
->                            DirDefault NullsOrderDefault]})
->     ,("select * from t order by 1,2"
->      ,qe {qeOrderBy = [SortSpec (NumLit "1")
->                            DirDefault NullsOrderDefault
->                       ,SortSpec (NumLit "2")
->                            DirDefault NullsOrderDefault]})
->     ]
->   where
->     qe = makeSelect
->          {qeSelectList = [(Star,Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]}
+sortSpecificationList :: TestItem
+sortSpecificationList = Group "sort specification list"
+    $ map (uncurry (TestQueryExpr ansi2011))
+    [("select * from t order by a"
+     ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
+                           DirDefault NullsOrderDefault]})
+    ,("select * from t order by a,b"
+     ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
+                           DirDefault NullsOrderDefault
+                      ,SortSpec (Iden [Name Nothing "b"])
+                           DirDefault NullsOrderDefault]})
+    ,("select * from t order by a asc,b"
+     ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
+                           Asc NullsOrderDefault
+                      ,SortSpec (Iden [Name Nothing "b"])
+                           DirDefault NullsOrderDefault]})
+    ,("select * from t order by a desc,b"
+     ,qe {qeOrderBy = [SortSpec (Iden [Name Nothing "a"])
+                           Desc NullsOrderDefault
+                      ,SortSpec (Iden [Name Nothing "b"])
+                           DirDefault NullsOrderDefault]})
+    ,("select * from t order by a collate x desc,b"
+     ,qe {qeOrderBy = [SortSpec
+                           (Collate (Iden [Name Nothing "a"]) [Name Nothing "x"])
+                           Desc NullsOrderDefault
+                      ,SortSpec (Iden [Name Nothing "b"])
+                           DirDefault NullsOrderDefault]})
+    ,("select * from t order by 1,2"
+     ,qe {qeOrderBy = [SortSpec (NumLit "1")
+                           DirDefault NullsOrderDefault
+                      ,SortSpec (NumLit "2")
+                           DirDefault NullsOrderDefault]})
+    ]
+  where
+    qe = makeSelect
+         {qeSelectList = [(Star,Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]}
diff --git a/tools/Language/SQL/SimpleSQL/SQL2011Schema.lhs b/tools/Language/SQL/SimpleSQL/SQL2011Schema.hs
similarity index 59%
rename from tools/Language/SQL/SimpleSQL/SQL2011Schema.lhs
rename to tools/Language/SQL/SimpleSQL/SQL2011Schema.hs
index f10f479..f860fa2 100644
--- a/tools/Language/SQL/SimpleSQL/SQL2011Schema.lhs
+++ b/tools/Language/SQL/SimpleSQL/SQL2011Schema.hs
@@ -1,28 +1,33 @@
 
+{-
 Section 11 in Foundation
 
 This module covers the tests for parsing schema and DDL statements.
+-}
 
-> module Language.SQL.SimpleSQL.SQL2011Schema (sql2011SchemaTests) where
+module Language.SQL.SimpleSQL.SQL2011Schema (sql2011SchemaTests) where
 
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
 
-> sql2011SchemaTests :: TestItem
-> sql2011SchemaTests = Group "sql 2011 schema tests"
->     [
+sql2011SchemaTests :: TestItem
+sql2011SchemaTests = Group "sql 2011 schema tests"
+    [
 
 
+{-
 11.1 <schema definition>
 
 <schema definition> ::=
   CREATE SCHEMA <schema name clause>
       [ <schema character set or path> ]
       [ <schema element>... ]
+-}
 
->      (TestStatement ansi2011 "create schema my_schema"
->      $ CreateSchema [Name Nothing "my_schema"])
+     (TestStatement ansi2011 "create schema my_schema"
+     $ CreateSchema [Name Nothing "my_schema"])
 
+{-
 todo: schema name can have .
 schema name can be quoted iden or unicode quoted iden
 add schema element support:
@@ -77,15 +82,17 @@ add schema element support:
 <drop behavior> ::=
     CASCADE
   | RESTRICT
+-}
 
 
->     ,(TestStatement ansi2011 "drop schema my_schema"
->      $ DropSchema [Name Nothing "my_schema"] DefaultDropBehaviour)
->     ,(TestStatement ansi2011 "drop schema my_schema cascade"
->      $ DropSchema [Name Nothing "my_schema"] Cascade)
->     ,(TestStatement ansi2011 "drop schema my_schema restrict"
->      $ DropSchema [Name Nothing "my_schema"] Restrict)
+    ,(TestStatement ansi2011 "drop schema my_schema"
+     $ DropSchema [Name Nothing "my_schema"] DefaultDropBehaviour)
+    ,(TestStatement ansi2011 "drop schema my_schema cascade"
+     $ DropSchema [Name Nothing "my_schema"] Cascade)
+    ,(TestStatement ansi2011 "drop schema my_schema restrict"
+     $ DropSchema [Name Nothing "my_schema"] Restrict)
 
+{-
 11.3 <table definition>
 
 
@@ -93,13 +100,15 @@ add schema element support:
   CREATE [ <table scope> ] TABLE <table name> <table contents source>
       [ WITH <system versioning clause> ]
       [ ON COMMIT <table commit action> ROWS ]
+-}
 
->     ,(TestStatement ansi2011 "create table t (a int, b int);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []])
+    ,(TestStatement ansi2011 "create table t (a int, b int);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []])
 
 
+{-
 <table contents source> ::=
     <table element list>
   | <typed table clause>
@@ -308,193 +317,201 @@ one constratint:
 optional name: constraint [Name]
 not null | unique | references | check
 todo: constraint characteristics
+-}
 
 
->     ,(TestStatement ansi2011
->       "create table t (a int not null);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing ColNotNullConstraint]])
+    ,(TestStatement ansi2011
+      "create table t (a int not null);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing ColNotNullConstraint]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int constraint a_not_null not null);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef (Just [Name Nothing "a_not_null"]) ColNotNullConstraint]])
+    ,(TestStatement ansi2011
+      "create table t (a int constraint a_not_null not null);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef (Just [Name Nothing "a_not_null"]) ColNotNullConstraint]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int unique);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing ColUniqueConstraint]])
+    ,(TestStatement ansi2011
+      "create table t (a int unique);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing ColUniqueConstraint]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int primary key);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing (ColPrimaryKeyConstraint False)]])
+    ,(TestStatement ansi2011
+      "create table t (a int primary key);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing (ColPrimaryKeyConstraint False)]])
 
->     ,(TestStatement ansi2011 { diAutoincrement = True }
->       "create table t (a int primary key autoincrement);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing (ColPrimaryKeyConstraint True)]])
+    ,(TestStatement ansi2011 { diAutoincrement = True }
+      "create table t (a int primary key autoincrement);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing (ColPrimaryKeyConstraint True)]])
 
+{-
 references t(a,b)
   [ Full |partial| simepl]
   [perm: on update [cascade | set null | set default | restrict | no action]
          on delete ""
+-}
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing DefaultReferenceMatch
->          DefaultReferentialAction DefaultReferentialAction]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing DefaultReferenceMatch
+         DefaultReferentialAction DefaultReferentialAction]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u(a));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] (Just $ Name Nothing "a") DefaultReferenceMatch
->          DefaultReferentialAction DefaultReferentialAction]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u(a));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] (Just $ Name Nothing "a") DefaultReferenceMatch
+         DefaultReferentialAction DefaultReferentialAction]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u match full);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing MatchFull
->          DefaultReferentialAction DefaultReferentialAction]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u match full);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing MatchFull
+         DefaultReferentialAction DefaultReferentialAction]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u match partial);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing MatchPartial
->          DefaultReferentialAction DefaultReferentialAction]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u match partial);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing MatchPartial
+         DefaultReferentialAction DefaultReferentialAction]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u match simple);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing MatchSimple
->          DefaultReferentialAction DefaultReferentialAction]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u match simple);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing MatchSimple
+         DefaultReferentialAction DefaultReferentialAction]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u on update cascade );"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing DefaultReferenceMatch
->          RefCascade DefaultReferentialAction]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u on update cascade );"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing DefaultReferenceMatch
+         RefCascade DefaultReferentialAction]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u on update set null );"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing DefaultReferenceMatch
->          RefSetNull DefaultReferentialAction]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u on update set null );"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing DefaultReferenceMatch
+         RefSetNull DefaultReferentialAction]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u on update set default );"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing DefaultReferenceMatch
->          RefSetDefault DefaultReferentialAction]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u on update set default );"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing DefaultReferenceMatch
+         RefSetDefault DefaultReferentialAction]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u on update no action );"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing DefaultReferenceMatch
->          RefNoAction DefaultReferentialAction]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u on update no action );"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing DefaultReferenceMatch
+         RefNoAction DefaultReferentialAction]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u on delete cascade );"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing DefaultReferenceMatch
->          DefaultReferentialAction RefCascade]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u on delete cascade );"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing DefaultReferenceMatch
+         DefaultReferentialAction RefCascade]])
 
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u on update cascade on delete restrict );"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing DefaultReferenceMatch
->          RefCascade RefRestrict]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u on update cascade on delete restrict );"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing DefaultReferenceMatch
+         RefCascade RefRestrict]])
 
->     ,(TestStatement ansi2011
->       "create table t (a int references u on delete restrict on update cascade );"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing $ ColReferencesConstraint
->          [Name Nothing "u"] Nothing DefaultReferenceMatch
->          RefCascade RefRestrict]])
+    ,(TestStatement ansi2011
+      "create table t (a int references u on delete restrict on update cascade );"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing $ ColReferencesConstraint
+         [Name Nothing "u"] Nothing DefaultReferenceMatch
+         RefCascade RefRestrict]])
 
+{-
 TODO: try combinations and permutations of column constraints and
 options
+-}
 
 
->     ,(TestStatement ansi2011
->       "create table t (a int check (a>5));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
->         [ColConstraintDef Nothing
->          (ColCheckConstraint $ BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (NumLit "5"))]])
+    ,(TestStatement ansi2011
+      "create table t (a int check (a>5));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing
+        [ColConstraintDef Nothing
+         (ColCheckConstraint $ BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (NumLit "5"))]])
 
 
 
 
 
+{-
 <identity column specification> ::=
   GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY
       [ <left paren> <common sequence generator options> <right paren> ]
+-}
 
->     ,(TestStatement ansi2011 "create table t (a int generated always as identity);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
->         (Just $ IdentityColumnSpec GeneratedAlways []) []])
+    ,(TestStatement ansi2011 "create table t (a int generated always as identity);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
+        (Just $ IdentityColumnSpec GeneratedAlways []) []])
 
->     ,(TestStatement ansi2011 "create table t (a int generated by default as identity);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
->         (Just $ IdentityColumnSpec GeneratedByDefault []) []])
+    ,(TestStatement ansi2011 "create table t (a int generated by default as identity);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
+        (Just $ IdentityColumnSpec GeneratedByDefault []) []])
 
 
->     ,(TestStatement ansi2011
->       "create table t (a int generated always as identity\n\
->       \  ( start with 5 increment by 5 maxvalue 500 minvalue 5 cycle ));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
->         (Just $ IdentityColumnSpec GeneratedAlways
->          [SGOStartWith 5
->          ,SGOIncrementBy 5
->          ,SGOMaxValue 500
->          ,SGOMinValue 5
->          ,SGOCycle]) []])
+    ,(TestStatement ansi2011
+      "create table t (a int generated always as identity\n\
+      \  ( start with 5 increment by 5 maxvalue 500 minvalue 5 cycle ));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
+        (Just $ IdentityColumnSpec GeneratedAlways
+         [SGOStartWith 5
+         ,SGOIncrementBy 5
+         ,SGOMaxValue 500
+         ,SGOMinValue 5
+         ,SGOCycle]) []])
 
->     ,(TestStatement ansi2011
->       "create table t (a int generated always as identity\n\
->       \  ( start with -4 no maxvalue no minvalue no cycle ));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
->         (Just $ IdentityColumnSpec GeneratedAlways
->          [SGOStartWith (-4)
->          ,SGONoMaxValue
->          ,SGONoMinValue
->          ,SGONoCycle]) []])
+    ,(TestStatement ansi2011
+      "create table t (a int generated always as identity\n\
+      \  ( start with -4 no maxvalue no minvalue no cycle ));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
+        (Just $ IdentityColumnSpec GeneratedAlways
+         [SGOStartWith (-4)
+         ,SGONoMaxValue
+         ,SGONoMinValue
+         ,SGONoCycle]) []])
 
+{-
 I think <common sequence generator options> is supposed to just
 whitespace separated. In db2 it seems to be csv, but the grammar here
 just seems to be whitespace separated, and it is just whitespace
@@ -511,18 +528,20 @@ generated always (valueexpr)
 
 <generation expression> ::=
   <left paren> <value expression> <right paren>
+-}
 
->     ,(TestStatement ansi2011
->       "create table t (a int, \n\
->       \                a2 int generated always as (a * 2));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableColumnDef $ ColumnDef (Name Nothing "a2") (TypeName [Name Nothing "int"])
->         (Just $ GenerationClause
->          (BinOp (Iden [Name Nothing "a"]) [Name Nothing "*"] (NumLit "2"))) []])
+    ,(TestStatement ansi2011
+      "create table t (a int, \n\
+      \                a2 int generated always as (a * 2));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableColumnDef $ ColumnDef (Name Nothing "a2") (TypeName [Name Nothing "int"])
+        (Just $ GenerationClause
+         (BinOp (Iden [Name Nothing "a"]) [Name Nothing "*"] (NumLit "2"))) []])
 
 
 
+{-
 11.5 <default clause>
 
 <default clause> ::=
@@ -540,15 +559,17 @@ generated always (valueexpr)
   | CURRENT_SCHEMA
   | CURRENT_PATH
   | <implicitly typed value specification>
+-}
 
 
->     ,(TestStatement ansi2011 "create table t (a int default 0);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
->         (Just $ DefaultClause $ NumLit "0") []])
+    ,(TestStatement ansi2011 "create table t (a int default 0);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"])
+        (Just $ DefaultClause $ NumLit "0") []])
 
 
 
+{-
 11.6 <table constraint definition>
 
 <table constraint definition> ::=
@@ -573,43 +594,45 @@ generated always (valueexpr)
 
 <unique column list> ::=
   <column name list>
+-}
 
->     ,(TestStatement ansi2011
->       "create table t (a int, unique (a));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableConstraintDef Nothing $ TableUniqueConstraint [Name Nothing "a"]
->         ])
+    ,(TestStatement ansi2011
+      "create table t (a int, unique (a));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableConstraintDef Nothing $ TableUniqueConstraint [Name Nothing "a"]
+        ])
 
->     ,(TestStatement ansi2011
->       "create table t (a int, constraint a_unique unique (a));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableConstraintDef (Just [Name Nothing "a_unique"]) $
->             TableUniqueConstraint [Name Nothing "a"]
->         ])
+    ,(TestStatement ansi2011
+      "create table t (a int, constraint a_unique unique (a));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableConstraintDef (Just [Name Nothing "a_unique"]) $
+            TableUniqueConstraint [Name Nothing "a"]
+        ])
 
-todo: test permutations of column defs and table constraints
+-- todo: test permutations of column defs and table constraints
 
->     ,(TestStatement ansi2011
->       "create table t (a int, b int, unique (a,b));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableConstraintDef Nothing $
->             TableUniqueConstraint [Name Nothing "a", Name Nothing "b"]
->         ])
+    ,(TestStatement ansi2011
+      "create table t (a int, b int, unique (a,b));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableConstraintDef Nothing $
+            TableUniqueConstraint [Name Nothing "a", Name Nothing "b"]
+        ])
 
->     ,(TestStatement ansi2011
->       "create table t (a int, b int, primary key (a,b));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableConstraintDef Nothing $
->             TablePrimaryKeyConstraint [Name Nothing "a", Name Nothing "b"]
->         ])
+    ,(TestStatement ansi2011
+      "create table t (a int, b int, primary key (a,b));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableConstraintDef Nothing $
+            TablePrimaryKeyConstraint [Name Nothing "a", Name Nothing "b"]
+        ])
 
 
+{-
 <without overlap specification> ::=
   <application time period name> WITHOUT OVERLAPS
 
@@ -622,59 +645,61 @@ defintely skip
   FOREIGN KEY <left paren> <referencing column list>
       [ <comma> <referencing period specification> ] <right paren>
       <references specification>
+-}
 
 
->     ,(TestStatement ansi2011
->       "create table t (a int, b int,\n\
->       \                foreign key (a,b) references u(c,d) match full on update cascade on delete restrict );"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableConstraintDef Nothing $
->             TableReferencesConstraint
->                 [Name Nothing "a", Name Nothing "b"]
->                 [Name Nothing "u"]
->                 (Just [Name Nothing "c", Name Nothing "d"])
->                 MatchFull RefCascade RefRestrict
->        ])
+    ,(TestStatement ansi2011
+      "create table t (a int, b int,\n\
+      \                foreign key (a,b) references u(c,d) match full on update cascade on delete restrict );"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableConstraintDef Nothing $
+            TableReferencesConstraint
+                [Name Nothing "a", Name Nothing "b"]
+                [Name Nothing "u"]
+                (Just [Name Nothing "c", Name Nothing "d"])
+                MatchFull RefCascade RefRestrict
+       ])
 
->     ,(TestStatement ansi2011
->       "create table t (a int,\n\
->       \                constraint tfku1 foreign key (a) references u);"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableConstraintDef (Just [Name Nothing "tfku1"]) $
->             TableReferencesConstraint
->                 [Name Nothing "a"]
->                 [Name Nothing "u"]
->                 Nothing DefaultReferenceMatch
->                 DefaultReferentialAction DefaultReferentialAction
->        ])
+    ,(TestStatement ansi2011
+      "create table t (a int,\n\
+      \                constraint tfku1 foreign key (a) references u);"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableConstraintDef (Just [Name Nothing "tfku1"]) $
+            TableReferencesConstraint
+                [Name Nothing "a"]
+                [Name Nothing "u"]
+                Nothing DefaultReferenceMatch
+                DefaultReferentialAction DefaultReferentialAction
+       ])
 
->     ,(TestStatement ansi2011 { diNonCommaSeparatedConstraints = True }
->       "create table t (a int, b int,\n\
->       \                foreign key (a) references u(c)\n\
->       \                foreign key (b) references v(d));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableConstraintDef Nothing $
->             TableReferencesConstraint
->                 [Name Nothing "a"]
->                 [Name Nothing "u"]
->                 (Just [Name Nothing "c"])
->                 DefaultReferenceMatch
->                 DefaultReferentialAction DefaultReferentialAction
->        ,TableConstraintDef Nothing $
->             TableReferencesConstraint
->                 [Name Nothing "b"]
->                 [Name Nothing "v"]
->                 (Just [Name Nothing "d"])
->                 DefaultReferenceMatch
->                 DefaultReferentialAction DefaultReferentialAction
->        ])
+    ,(TestStatement ansi2011 { diNonCommaSeparatedConstraints = True }
+      "create table t (a int, b int,\n\
+      \                foreign key (a) references u(c)\n\
+      \                foreign key (b) references v(d));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableConstraintDef Nothing $
+            TableReferencesConstraint
+                [Name Nothing "a"]
+                [Name Nothing "u"]
+                (Just [Name Nothing "c"])
+                DefaultReferenceMatch
+                DefaultReferentialAction DefaultReferentialAction
+       ,TableConstraintDef Nothing $
+            TableReferencesConstraint
+                [Name Nothing "b"]
+                [Name Nothing "v"]
+                (Just [Name Nothing "d"])
+                DefaultReferenceMatch
+                DefaultReferentialAction DefaultReferentialAction
+       ])
 
 
+{-
 <references specification> ::=
   REFERENCES <referenced table and columns>
       [ MATCH <match type> ] [ <referential triggered action> ]
@@ -727,31 +752,33 @@ defintely skip
 
 <check  constraint definition> ::=
      CHECK <left paren> <search condition> <right paren>
+-}
 
->     ,(TestStatement ansi2011
->       "create table t (a int, b int, \n\
->       \                check (a > b));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableConstraintDef Nothing $
->             TableCheckConstraint
->             (BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (Iden [Name Nothing "b"]))
->        ])
+    ,(TestStatement ansi2011
+      "create table t (a int, b int, \n\
+      \                check (a > b));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableConstraintDef Nothing $
+            TableCheckConstraint
+            (BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (Iden [Name Nothing "b"]))
+       ])
 
 
->     ,(TestStatement ansi2011
->       "create table t (a int, b int, \n\
->       \                constraint agtb check (a > b));"
->      $ CreateTable [Name Nothing "t"]
->        [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
->        ,TableConstraintDef (Just [Name Nothing "agtb"]) $
->             TableCheckConstraint
->             (BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (Iden [Name Nothing "b"]))
->        ])
+    ,(TestStatement ansi2011
+      "create table t (a int, b int, \n\
+      \                constraint agtb check (a > b));"
+     $ CreateTable [Name Nothing "t"]
+       [TableColumnDef $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableColumnDef $ ColumnDef (Name Nothing "b") (TypeName [Name Nothing "int"]) Nothing []
+       ,TableConstraintDef (Just [Name Nothing "agtb"]) $
+            TableCheckConstraint
+            (BinOp (Iden [Name Nothing "a"]) [Name Nothing ">"] (Iden [Name Nothing "b"]))
+       ])
 
 
+{-
 TODO: lots more combos of table elements
 and types and the other bits in a column def
 
@@ -780,13 +807,15 @@ and types and the other bits in a column def
 alter table t add column a int
 alter table t add a int
 alter table t add a int unique not null check (a>0)
+-}
 
->     ,(TestStatement ansi2011
->       "alter table t add column a int"
->      $ AlterTable [Name Nothing "t"] $ AddColumnDef
->        $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
->        )
+    ,(TestStatement ansi2011
+      "alter table t add column a int"
+     $ AlterTable [Name Nothing "t"] $ AddColumnDef
+       $ ColumnDef (Name Nothing "a") (TypeName [Name Nothing "int"]) Nothing []
+       )
 
+{-
 todo: more add column
 
 11.12 <alter column definition>
@@ -811,41 +840,49 @@ todo: more add column
 
 <set column default clause> ::=
   SET <default clause>
+-}
 
 
->     ,(TestStatement ansi2011
->       "alter table t alter column c set default 0"
->      $ AlterTable [Name Nothing "t"] $ AlterColumnSetDefault (Name Nothing "c")
->        $ NumLit "0")
+    ,(TestStatement ansi2011
+      "alter table t alter column c set default 0"
+     $ AlterTable [Name Nothing "t"] $ AlterColumnSetDefault (Name Nothing "c")
+       $ NumLit "0")
 
+{-
 11.14 <drop column default clause>
 
 <drop column default clause> ::=
   DROP DEFAULT
+-}
 
->     ,(TestStatement ansi2011
->       "alter table t alter column c drop default"
->      $ AlterTable [Name Nothing "t"] $ AlterColumnDropDefault (Name Nothing "c"))
+    ,(TestStatement ansi2011
+      "alter table t alter column c drop default"
+     $ AlterTable [Name Nothing "t"] $ AlterColumnDropDefault (Name Nothing "c"))
 
 
+{-
 11.15 <set column not null clause>
 
 <set column not null clause> ::=
   SET NOT NULL
+-}
 
->     ,(TestStatement ansi2011
->       "alter table t alter column c set not null"
->      $ AlterTable [Name Nothing "t"] $ AlterColumnSetNotNull (Name Nothing "c"))
+    ,(TestStatement ansi2011
+      "alter table t alter column c set not null"
+     $ AlterTable [Name Nothing "t"] $ AlterColumnSetNotNull (Name Nothing "c"))
 
+{-
 11.16 <drop column not null clause>
 
 <drop column not null clause> ::=
   DROP NOT NULL
+-}
 
->     ,(TestStatement ansi2011
->       "alter table t alter column c drop not null"
->      $ AlterTable [Name Nothing "t"] $ AlterColumnDropNotNull (Name Nothing "c"))
+    ,(TestStatement ansi2011
+      "alter table t alter column c drop not null"
+     $ AlterTable [Name Nothing "t"] $ AlterColumnDropNotNull (Name Nothing "c"))
 
+{-
 11.17 <add column scope clause>
 
 <add column scope clause> ::=
@@ -860,14 +897,16 @@ todo: more add column
 
 <alter column data type clause> ::=
   SET DATA TYPE <data type>
+-}
 
->     ,(TestStatement ansi2011
->       "alter table t alter column c set data type int;"
->      $ AlterTable [Name Nothing "t"] $
->        AlterColumnSetDataType (Name Nothing "c") (TypeName [Name Nothing "int"]))
+    ,(TestStatement ansi2011
+      "alter table t alter column c set data type int;"
+     $ AlterTable [Name Nothing "t"] $
+       AlterColumnSetDataType (Name Nothing "c") (TypeName [Name Nothing "int"]))
 
 
 
+{-
 11.20 <alter identity column specification>
 
 <alter identity column specification> ::=
@@ -959,42 +998,46 @@ included in the generated plan above
 
 <drop column definition> ::=
   DROP [ COLUMN ] <column name> <drop behavior>
+-}
 
->     ,(TestStatement ansi2011
->       "alter table t drop column c"
->      $ AlterTable [Name Nothing "t"] $
->        DropColumn (Name Nothing "c") DefaultDropBehaviour)
+    ,(TestStatement ansi2011
+      "alter table t drop column c"
+     $ AlterTable [Name Nothing "t"] $
+       DropColumn (Name Nothing "c") DefaultDropBehaviour)
 
->     ,(TestStatement ansi2011
->       "alter table t drop c cascade"
->      $ AlterTable [Name Nothing "t"] $
->        DropColumn (Name Nothing "c") Cascade)
+    ,(TestStatement ansi2011
+      "alter table t drop c cascade"
+     $ AlterTable [Name Nothing "t"] $
+       DropColumn (Name Nothing "c") Cascade)
 
->     ,(TestStatement ansi2011
->       "alter table t drop c restrict"
->      $ AlterTable [Name Nothing "t"] $
->        DropColumn (Name Nothing "c") Restrict)
+    ,(TestStatement ansi2011
+      "alter table t drop c restrict"
+     $ AlterTable [Name Nothing "t"] $
+       DropColumn (Name Nothing "c") Restrict)
 
 
 
+{-
 11.24 <add table constraint definition>
 
 <add table constraint definition> ::=
   ADD <table constraint definition>
+-}
 
->     ,(TestStatement ansi2011
->       "alter table t add constraint c unique (a,b)"
->      $ AlterTable [Name Nothing "t"] $
->        AddTableConstraintDef (Just [Name Nothing "c"])
->             $ TableUniqueConstraint [Name Nothing "a", Name Nothing "b"])
+    ,(TestStatement ansi2011
+      "alter table t add constraint c unique (a,b)"
+     $ AlterTable [Name Nothing "t"] $
+       AddTableConstraintDef (Just [Name Nothing "c"])
+            $ TableUniqueConstraint [Name Nothing "a", Name Nothing "b"])
 
->     ,(TestStatement ansi2011
->       "alter table t add unique (a,b)"
->      $ AlterTable [Name Nothing "t"] $
->        AddTableConstraintDef Nothing
->             $ TableUniqueConstraint [Name Nothing "a", Name Nothing "b"])
+    ,(TestStatement ansi2011
+      "alter table t add unique (a,b)"
+     $ AlterTable [Name Nothing "t"] $
+       AddTableConstraintDef Nothing
+            $ TableUniqueConstraint [Name Nothing "a", Name Nothing "b"])
 
 
+{-
 11.25 <alter table constraint definition>
 <alter table constraint definition> ::=
   ALTER CONSTRAINT <constraint name> <constraint enforcement>
@@ -1005,17 +1048,19 @@ todo
 
 <drop table constraint definition> ::=
   DROP CONSTRAINT <constraint name> <drop behavior>
+-}
 
->     ,(TestStatement ansi2011
->       "alter table t drop constraint c"
->      $ AlterTable [Name Nothing "t"] $
->        DropTableConstraintDef [Name Nothing "c"] DefaultDropBehaviour)
+    ,(TestStatement ansi2011
+      "alter table t drop constraint c"
+     $ AlterTable [Name Nothing "t"] $
+       DropTableConstraintDef [Name Nothing "c"] DefaultDropBehaviour)
 
->     ,(TestStatement ansi2011
->       "alter table t drop constraint c restrict"
->      $ AlterTable [Name Nothing "t"] $
->        DropTableConstraintDef [Name Nothing "c"] Restrict)
+    ,(TestStatement ansi2011
+      "alter table t drop constraint c restrict"
+     $ AlterTable [Name Nothing "t"] $
+       DropTableConstraintDef [Name Nothing "c"] Restrict)
 
+{-
 11.27 <add table period definition>
 
 <add table period definition> ::=
@@ -1063,16 +1108,18 @@ defintely skip
 
 <drop table statement> ::=
   DROP TABLE <table name> <drop behavior>
+-}
 
->     ,(TestStatement ansi2011
->       "drop table t"
->      $ DropTable [Name Nothing "t"] DefaultDropBehaviour)
+    ,(TestStatement ansi2011
+      "drop table t"
+     $ DropTable [Name Nothing "t"] DefaultDropBehaviour)
 
->     ,(TestStatement ansi2011
->       "drop table t restrict"
->      $ DropTable [Name Nothing "t"] Restrict)
+    ,(TestStatement ansi2011
+      "drop table t restrict"
+     $ DropTable [Name Nothing "t"] Restrict)
 
 
+{-
 11.32 <view definition>
 
 <view definition> ::=
@@ -1109,69 +1156,73 @@ defintely skip
 
 <view column list> ::=
   <column name list>
+-}
 
->     ,(TestStatement ansi2011
->       "create view v as select * from t"
->      $ CreateView False [Name Nothing "v"] Nothing (makeSelect
->          {qeSelectList = [(Star, Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]
->          }) Nothing)
+    ,(TestStatement ansi2011
+      "create view v as select * from t"
+     $ CreateView False [Name Nothing "v"] Nothing (makeSelect
+         {qeSelectList = [(Star, Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]
+         }) Nothing)
 
 
->     ,(TestStatement ansi2011
->       "create recursive view v as select * from t"
->      $ CreateView True [Name Nothing "v"] Nothing (makeSelect
->          {qeSelectList = [(Star, Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]
->          }) Nothing)
+    ,(TestStatement ansi2011
+      "create recursive view v as select * from t"
+     $ CreateView True [Name Nothing "v"] Nothing (makeSelect
+         {qeSelectList = [(Star, Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]
+         }) Nothing)
 
->     ,(TestStatement ansi2011
->       "create view v(a,b) as select * from t"
->      $ CreateView False [Name Nothing "v"] (Just [Name Nothing "a", Name Nothing "b"])
->          (makeSelect
->          {qeSelectList = [(Star, Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]
->          }) Nothing)
+    ,(TestStatement ansi2011
+      "create view v(a,b) as select * from t"
+     $ CreateView False [Name Nothing "v"] (Just [Name Nothing "a", Name Nothing "b"])
+         (makeSelect
+         {qeSelectList = [(Star, Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]
+         }) Nothing)
 
 
->     ,(TestStatement ansi2011
->       "create view v as select * from t with check option"
->      $ CreateView False [Name Nothing "v"] Nothing (makeSelect
->          {qeSelectList = [(Star, Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]
->          }) (Just DefaultCheckOption))
+    ,(TestStatement ansi2011
+      "create view v as select * from t with check option"
+     $ CreateView False [Name Nothing "v"] Nothing (makeSelect
+         {qeSelectList = [(Star, Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]
+         }) (Just DefaultCheckOption))
 
->     ,(TestStatement ansi2011
->       "create view v as select * from t with cascaded check option"
->      $ CreateView False [Name Nothing "v"] Nothing (makeSelect
->          {qeSelectList = [(Star, Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]
->          }) (Just CascadedCheckOption))
+    ,(TestStatement ansi2011
+      "create view v as select * from t with cascaded check option"
+     $ CreateView False [Name Nothing "v"] Nothing (makeSelect
+         {qeSelectList = [(Star, Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]
+         }) (Just CascadedCheckOption))
 
->     ,(TestStatement ansi2011
->       "create view v as select * from t with local check option"
->      $ CreateView False [Name Nothing "v"] Nothing
->          (makeSelect
->          {qeSelectList = [(Star, Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]
->          }) (Just LocalCheckOption))
+    ,(TestStatement ansi2011
+      "create view v as select * from t with local check option"
+     $ CreateView False [Name Nothing "v"] Nothing
+         (makeSelect
+         {qeSelectList = [(Star, Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]
+         }) (Just LocalCheckOption))
 
 
+{-
 11.33 <drop view statement>
 
 <drop view statement> ::=
   DROP VIEW <table name> <drop behavior>
+-}
 
 
->     ,(TestStatement ansi2011
->       "drop view v"
->      $ DropView [Name Nothing "v"] DefaultDropBehaviour)
+    ,(TestStatement ansi2011
+      "drop view v"
+     $ DropView [Name Nothing "v"] DefaultDropBehaviour)
 
->     ,(TestStatement ansi2011
->       "drop view v cascade"
->      $ DropView [Name Nothing "v"] Cascade)
+    ,(TestStatement ansi2011
+      "drop view v cascade"
+     $ DropView [Name Nothing "v"] Cascade)
 
 
+{-
 11.34 <domain definition>
 
 <domain definition> ::=
@@ -1183,41 +1234,43 @@ defintely skip
 <domain constraint> ::=
   [ <constraint name definition> ] <check constraint definition> [
       <constraint characteristics> ]
+-}
 
->     ,(TestStatement ansi2011
->       "create domain my_int int"
->      $ CreateDomain [Name Nothing "my_int"]
->           (TypeName [Name Nothing "int"])
->           Nothing [])
+    ,(TestStatement ansi2011
+      "create domain my_int int"
+     $ CreateDomain [Name Nothing "my_int"]
+          (TypeName [Name Nothing "int"])
+          Nothing [])
 
->     ,(TestStatement ansi2011
->       "create domain my_int as int"
->      $ CreateDomain [Name Nothing "my_int"]
->           (TypeName [Name Nothing "int"])
->           Nothing [])
+    ,(TestStatement ansi2011
+      "create domain my_int as int"
+     $ CreateDomain [Name Nothing "my_int"]
+          (TypeName [Name Nothing "int"])
+          Nothing [])
 
->     ,(TestStatement ansi2011
->       "create domain my_int int default 0"
->      $ CreateDomain [Name Nothing "my_int"]
->           (TypeName [Name Nothing "int"])
->           (Just (NumLit "0")) [])
+    ,(TestStatement ansi2011
+      "create domain my_int int default 0"
+     $ CreateDomain [Name Nothing "my_int"]
+          (TypeName [Name Nothing "int"])
+          (Just (NumLit "0")) [])
 
->     ,(TestStatement ansi2011
->       "create domain my_int int check (value > 5)"
->      $ CreateDomain [Name Nothing "my_int"]
->           (TypeName [Name Nothing "int"])
->           Nothing [(Nothing
->                    ,BinOp (Iden [Name Nothing "value"]) [Name Nothing ">"] (NumLit "5"))])
+    ,(TestStatement ansi2011
+      "create domain my_int int check (value > 5)"
+     $ CreateDomain [Name Nothing "my_int"]
+          (TypeName [Name Nothing "int"])
+          Nothing [(Nothing
+                   ,BinOp (Iden [Name Nothing "value"]) [Name Nothing ">"] (NumLit "5"))])
 
->     ,(TestStatement ansi2011
->       "create domain my_int int constraint gt5 check (value > 5)"
->      $ CreateDomain [Name Nothing "my_int"]
->           (TypeName [Name Nothing "int"])
->           Nothing [(Just [Name Nothing "gt5"]
->                    ,BinOp (Iden [Name Nothing "value"]) [Name Nothing ">"] (NumLit "5"))])
+    ,(TestStatement ansi2011
+      "create domain my_int int constraint gt5 check (value > 5)"
+     $ CreateDomain [Name Nothing "my_int"]
+          (TypeName [Name Nothing "int"])
+          Nothing [(Just [Name Nothing "gt5"]
+                   ,BinOp (Iden [Name Nothing "value"]) [Name Nothing ">"] (NumLit "5"))])
 
 
 
+{-
 11.35 <alter domain statement>
 
 <alter domain statement> ::=
@@ -1233,67 +1286,77 @@ defintely skip
 
 <set domain default clause> ::=
   SET <default clause>
+-}
 
->     ,(TestStatement ansi2011
->       "alter domain my_int set default 0"
->      $ AlterDomain [Name Nothing "my_int"]
->        $ ADSetDefault $ NumLit "0")
+    ,(TestStatement ansi2011
+      "alter domain my_int set default 0"
+     $ AlterDomain [Name Nothing "my_int"]
+       $ ADSetDefault $ NumLit "0")
 
 
+{-
 11.37 <drop domain default clause>
 
 <drop domain default clause> ::=
   DROP DEFAULT
+-}
 
->     ,(TestStatement ansi2011
->       "alter domain my_int drop default"
->      $ AlterDomain [Name Nothing "my_int"]
->        $ ADDropDefault)
+    ,(TestStatement ansi2011
+      "alter domain my_int drop default"
+     $ AlterDomain [Name Nothing "my_int"]
+       $ ADDropDefault)
 
 
+{-
 11.38 <add domain constraint definition>
 
 <add domain constraint definition> ::=
   ADD <domain constraint>
+-}
 
->     ,(TestStatement ansi2011
->       "alter domain my_int add check (value > 6)"
->      $ AlterDomain [Name Nothing "my_int"]
->        $ ADAddConstraint Nothing
->          $ BinOp (Iden [Name Nothing "value"]) [Name Nothing ">"] (NumLit "6"))
+    ,(TestStatement ansi2011
+      "alter domain my_int add check (value > 6)"
+     $ AlterDomain [Name Nothing "my_int"]
+       $ ADAddConstraint Nothing
+         $ BinOp (Iden [Name Nothing "value"]) [Name Nothing ">"] (NumLit "6"))
 
->     ,(TestStatement ansi2011
->       "alter domain my_int add constraint gt6 check (value > 6)"
->      $ AlterDomain [Name Nothing "my_int"]
->        $ ADAddConstraint (Just [Name Nothing "gt6"])
->          $ BinOp (Iden [Name Nothing "value"]) [Name Nothing ">"] (NumLit "6"))
+    ,(TestStatement ansi2011
+      "alter domain my_int add constraint gt6 check (value > 6)"
+     $ AlterDomain [Name Nothing "my_int"]
+       $ ADAddConstraint (Just [Name Nothing "gt6"])
+         $ BinOp (Iden [Name Nothing "value"]) [Name Nothing ">"] (NumLit "6"))
 
 
+{-
 11.39 <drop domain constraint definition>
 
 <drop domain constraint definition> ::=
   DROP CONSTRAINT <constraint name>
+-}
 
->     ,(TestStatement ansi2011
->       "alter domain my_int drop constraint gt6"
->      $ AlterDomain [Name Nothing "my_int"]
->        $ ADDropConstraint [Name Nothing "gt6"])
+    ,(TestStatement ansi2011
+      "alter domain my_int drop constraint gt6"
+     $ AlterDomain [Name Nothing "my_int"]
+       $ ADDropConstraint [Name Nothing "gt6"])
 
+{-
 11.40 <drop domain statement>
 
 <drop domain statement> ::=
   DROP DOMAIN <domain name> <drop behavior>
+-}
 
->     ,(TestStatement ansi2011
->       "drop domain my_int"
->      $ DropDomain [Name Nothing "my_int"] DefaultDropBehaviour)
+    ,(TestStatement ansi2011
+      "drop domain my_int"
+     $ DropDomain [Name Nothing "my_int"] DefaultDropBehaviour)
 
->     ,(TestStatement ansi2011
->       "drop domain my_int cascade"
->      $ DropDomain [Name Nothing "my_int"] Cascade)
+    ,(TestStatement ansi2011
+      "drop domain my_int cascade"
+     $ DropDomain [Name Nothing "my_int"] Cascade)
 
 
 
+{-
 11.41 <character set definition>
 
 <character set definition> ::=
@@ -1359,31 +1422,35 @@ defintely skip
      CREATE ASSERTION <constraint name>
          CHECK <left paren> <search condition> <right paren>
          [ <constraint characteristics> ]
+-}
 
->     ,(TestStatement ansi2011
->       "create assertion t1_not_empty CHECK ((select count(*) from t1) > 0);"
->      $ CreateAssertion [Name Nothing "t1_not_empty"]
->         $ BinOp (SubQueryExpr SqSq $
->                  makeSelect
->                  {qeSelectList = [(App [Name Nothing "count"] [Star],Nothing)]
->                  ,qeFrom = [TRSimple [Name Nothing "t1"]]
->                  })
->                 [Name Nothing ">"] (NumLit "0"))
+    ,(TestStatement ansi2011
+      "create assertion t1_not_empty CHECK ((select count(*) from t1) > 0);"
+     $ CreateAssertion [Name Nothing "t1_not_empty"]
+        $ BinOp (SubQueryExpr SqSq $
+                 makeSelect
+                 {qeSelectList = [(App [Name Nothing "count"] [Star],Nothing)]
+                 ,qeFrom = [TRSimple [Name Nothing "t1"]]
+                 })
+                [Name Nothing ">"] (NumLit "0"))
 
+{-
 11.48 <drop assertion statement>
 
 <drop assertion statement> ::=
   DROP ASSERTION <constraint name> [ <drop behavior> ]
+-}
 
->     ,(TestStatement ansi2011
->       "drop assertion t1_not_empty;"
->      $ DropAssertion [Name Nothing "t1_not_empty"] DefaultDropBehaviour)
+    ,(TestStatement ansi2011
+      "drop assertion t1_not_empty;"
+     $ DropAssertion [Name Nothing "t1_not_empty"] DefaultDropBehaviour)
 
->     ,(TestStatement ansi2011
->       "drop assertion t1_not_empty cascade;"
->      $ DropAssertion [Name Nothing "t1_not_empty"] Cascade)
+    ,(TestStatement ansi2011
+      "drop assertion t1_not_empty cascade;"
+     $ DropAssertion [Name Nothing "t1_not_empty"] Cascade)
 
 
+{-
 11.49 <trigger definition>
 
 <trigger definition> ::=
@@ -2015,24 +2082,26 @@ defintely skip
 <sequence generator cycle option> ::=
     CYCLE
   | NO CYCLE
+-}
 
->     ,(TestStatement ansi2011
->       "create sequence seq"
->      $ CreateSequence [Name Nothing "seq"] [])
+    ,(TestStatement ansi2011
+      "create sequence seq"
+     $ CreateSequence [Name Nothing "seq"] [])
 
->     ,(TestStatement ansi2011
->       "create sequence seq as bigint"
->      $ CreateSequence [Name Nothing "seq"]
->         [SGODataType $ TypeName [Name Nothing "bigint"]])
+    ,(TestStatement ansi2011
+      "create sequence seq as bigint"
+     $ CreateSequence [Name Nothing "seq"]
+        [SGODataType $ TypeName [Name Nothing "bigint"]])
 
->     ,(TestStatement ansi2011
->       "create sequence seq as bigint start with 5"
->      $ CreateSequence [Name Nothing "seq"]
->         [SGOStartWith 5
->         ,SGODataType $ TypeName [Name Nothing "bigint"]
->         ])
+    ,(TestStatement ansi2011
+      "create sequence seq as bigint start with 5"
+     $ CreateSequence [Name Nothing "seq"]
+        [SGOStartWith 5
+        ,SGODataType $ TypeName [Name Nothing "bigint"]
+        ])
 
 
+{-
 11.73 <alter sequence generator statement>
 
 <alter sequence generator statement> ::=
@@ -2050,36 +2119,39 @@ defintely skip
 
 <sequence generator restart value> ::=
   <signed numeric literal>
+-}
 
->     ,(TestStatement ansi2011
->       "alter sequence seq restart"
->      $ AlterSequence [Name Nothing "seq"]
->         [SGORestart Nothing])
+    ,(TestStatement ansi2011
+      "alter sequence seq restart"
+     $ AlterSequence [Name Nothing "seq"]
+        [SGORestart Nothing])
 
->     ,(TestStatement ansi2011
->       "alter sequence seq restart with 5"
->      $ AlterSequence [Name Nothing "seq"]
->         [SGORestart $ Just 5])
+    ,(TestStatement ansi2011
+      "alter sequence seq restart with 5"
+     $ AlterSequence [Name Nothing "seq"]
+        [SGORestart $ Just 5])
 
->     ,(TestStatement ansi2011
->       "alter sequence seq restart with 5 increment by 5"
->      $ AlterSequence [Name Nothing "seq"]
->         [SGORestart $ Just 5
->         ,SGOIncrementBy 5])
+    ,(TestStatement ansi2011
+      "alter sequence seq restart with 5 increment by 5"
+     $ AlterSequence [Name Nothing "seq"]
+        [SGORestart $ Just 5
+        ,SGOIncrementBy 5])
 
 
+{-
 11.74 <drop sequence generator statement>
 
 <drop sequence generator statement> ::=
   DROP SEQUENCE <sequence generator name> <drop behavior>
+-}
 
->     ,(TestStatement ansi2011
->       "drop sequence seq"
->      $ DropSequence [Name Nothing "seq"] DefaultDropBehaviour)
+    ,(TestStatement ansi2011
+      "drop sequence seq"
+     $ DropSequence [Name Nothing "seq"] DefaultDropBehaviour)
 
->     ,(TestStatement ansi2011
->       "drop sequence seq restrict"
->      $ DropSequence [Name Nothing "seq"] Restrict)
+    ,(TestStatement ansi2011
+      "drop sequence seq restrict"
+     $ DropSequence [Name Nothing "seq"] Restrict)
 
 
->     ]
+    ]
diff --git a/tools/Language/SQL/SimpleSQL/ScalarExprs.hs b/tools/Language/SQL/SimpleSQL/ScalarExprs.hs
new file mode 100644
index 0000000..4c371f7
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/ScalarExprs.hs
@@ -0,0 +1,432 @@
+
+-- Tests for parsing scalar expressions
+
+module Language.SQL.SimpleSQL.ScalarExprs (scalarExprTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+scalarExprTests :: TestItem
+scalarExprTests = Group "scalarExprTests"
+    [literals
+    ,identifiers
+    ,star
+    ,parameter
+    ,dots
+    ,app
+    ,caseexp
+    ,convertfun     
+    ,operators
+    ,parens
+    ,subqueries
+    ,aggregates
+    ,windowFunctions
+    ,functionsWithReservedNames
+    ]
+
+literals :: TestItem
+literals = Group "literals" $ map (uncurry (TestScalarExpr ansi2011))
+    [("3", NumLit "3")
+     ,("3.", NumLit "3.")
+     ,("3.3", NumLit "3.3")
+     ,(".3", NumLit ".3")
+     ,("3.e3", NumLit "3.e3")
+     ,("3.3e3", NumLit "3.3e3")
+     ,(".3e3", NumLit ".3e3")
+     ,("3e3", NumLit "3e3")
+     ,("3e+3", NumLit "3e+3")
+     ,("3e-3", NumLit "3e-3")
+     ,("'string'", StringLit "'" "'" "string")
+     ,("'string with a '' quote'", StringLit "'" "'" "string with a '' quote")
+     ,("'1'", StringLit "'" "'" "1")
+     ,("interval '3' day"
+      ,IntervalLit Nothing "3" (Itf "day" Nothing) Nothing)
+     ,("interval '3' day (3)"
+      ,IntervalLit Nothing "3" (Itf "day" $ Just (3,Nothing)) Nothing)
+     ,("interval '3 weeks'", TypedLit (TypeName [Name Nothing "interval"]) "3 weeks")
+    ]
+
+identifiers :: TestItem
+identifiers = Group "identifiers" $ map (uncurry (TestScalarExpr ansi2011))
+    [("iden1", Iden [Name Nothing "iden1"])
+    --,("t.a", Iden2 "t" "a")
+    ,("\"quoted identifier\"", Iden [Name (Just ("\"","\"")) "quoted identifier"])
+    ,("\"from\"", Iden [Name (Just ("\"","\"")) "from"])
+    ]
+
+star :: TestItem
+star = Group "star" $ map (uncurry (TestScalarExpr ansi2011))
+    [("*", Star)
+    --,("t.*", Star2 "t")
+    --,("ROW(t.*,42)", App "ROW" [Star2 "t", NumLit "42"])
+    ]
+
+parameter :: TestItem
+parameter = Group "parameter"
+    [TestScalarExpr ansi2011 "?" Parameter
+    ,TestScalarExpr postgres "$13" $ PositionalArg 13]
+
+
+dots :: TestItem
+dots = Group "dot" $ map (uncurry (TestScalarExpr ansi2011))
+    [("t.a", Iden [Name Nothing "t",Name Nothing "a"])
+    ,("t.*", BinOp (Iden [Name Nothing "t"]) [Name Nothing "."] Star)
+    ,("a.b.c", Iden [Name Nothing "a",Name Nothing "b",Name Nothing "c"])
+    ,("ROW(t.*,42)", App [Name Nothing "ROW"] [BinOp (Iden [Name Nothing "t"]) [Name Nothing "."] Star, NumLit "42"])
+    ]
+
+app :: TestItem
+app = Group "app" $ map (uncurry (TestScalarExpr ansi2011))
+    [("f()", App [Name Nothing "f"] [])
+    ,("f(a)", App [Name Nothing "f"] [Iden [Name Nothing "a"]])
+    ,("f(a,b)", App [Name Nothing "f"] [Iden [Name Nothing "a"], Iden [Name Nothing "b"]])
+    ]
+
+caseexp :: TestItem
+caseexp = Group "caseexp" $ map (uncurry (TestScalarExpr ansi2011))
+    [("case a when 1 then 2 end"
+     ,Case (Just $ Iden [Name Nothing "a"]) [([NumLit "1"]
+                              ,NumLit "2")] Nothing)
+
+    ,("case a when 1 then 2 when 3 then 4 end"
+     ,Case (Just $ Iden [Name Nothing "a"]) [([NumLit "1"], NumLit "2")
+                             ,([NumLit "3"], NumLit "4")] Nothing)
+
+    ,("case a when 1 then 2 when 3 then 4 else 5 end"
+     ,Case (Just $ Iden [Name Nothing "a"]) [([NumLit "1"], NumLit "2")
+                             ,([NumLit "3"], NumLit "4")]
+                             (Just $ NumLit "5"))
+
+    ,("case when a=1 then 2 when a=3 then 4 else 5 end"
+     ,Case Nothing [([BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "1")], NumLit "2")
+                   ,([BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "3")], NumLit "4")]
+                   (Just $ NumLit "5"))
+
+    ,("case a when 1,2 then 10 when 3,4 then 20 end"
+     ,Case (Just $ Iden [Name Nothing "a"]) [([NumLit "1",NumLit "2"]
+                               ,NumLit "10")
+                             ,([NumLit "3",NumLit "4"]
+                               ,NumLit "20")]
+                             Nothing)
+
+    ]
+
+convertfun :: TestItem 
+convertfun = Group "convert" $ map (uncurry (TestScalarExpr sqlserver))
+    [("CONVERT(varchar, 25.65)"
+     ,Convert (TypeName [Name Nothing "varchar"]) (NumLit "25.65") Nothing)
+    ,("CONVERT(datetime, '2017-08-25')"
+     ,Convert (TypeName [Name Nothing "datetime"]) (StringLit "'" "'" "2017-08-25") Nothing)
+    ,("CONVERT(varchar, '2017-08-25', 101)"
+     ,Convert (TypeName [Name Nothing "varchar"]) (StringLit "'" "'" "2017-08-25") (Just 101))
+    ]
+
+operators :: TestItem
+operators = Group "operators"
+    [binaryOperators
+    ,unaryOperators
+    ,casts
+    ,miscOps]
+
+binaryOperators :: TestItem
+binaryOperators = Group "binaryOperators" $ map (uncurry (TestScalarExpr ansi2011))
+    [("a + b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "+"] (Iden [Name Nothing "b"]))
+     -- sanity check fixities
+     -- todo: add more fixity checking
+
+    ,("a + b * c"
+     ,BinOp  (Iden [Name Nothing "a"]) [Name Nothing "+"]
+             (BinOp (Iden [Name Nothing "b"]) [Name Nothing "*"] (Iden [Name Nothing "c"])))
+
+    ,("a * b + c"
+     ,BinOp (BinOp (Iden [Name Nothing "a"]) [Name Nothing "*"] (Iden [Name Nothing "b"]))
+            [Name Nothing "+"] (Iden [Name Nothing "c"]))
+    ]
+
+unaryOperators :: TestItem
+unaryOperators = Group "unaryOperators" $ map (uncurry (TestScalarExpr ansi2011))
+    [("not a", PrefixOp [Name Nothing "not"] $ Iden [Name Nothing "a"])
+    ,("not not a", PrefixOp [Name Nothing "not"] $ PrefixOp [Name Nothing "not"] $ Iden [Name Nothing "a"])
+    ,("+a", PrefixOp [Name Nothing "+"] $ Iden [Name Nothing "a"])
+    ,("-a", PrefixOp [Name Nothing "-"] $ Iden [Name Nothing "a"])
+    ]
+
+
+casts :: TestItem
+casts = Group "operators" $ map (uncurry (TestScalarExpr ansi2011))
+    [("cast('1' as int)"
+     ,Cast (StringLit "'" "'" "1") $ TypeName [Name Nothing "int"])
+
+    ,("int '3'"
+     ,TypedLit (TypeName [Name Nothing "int"]) "3")
+
+    ,("cast('1' as double precision)"
+     ,Cast (StringLit "'" "'" "1") $ TypeName [Name Nothing "double precision"])
+
+    ,("cast('1' as float(8))"
+     ,Cast (StringLit "'" "'" "1") $ PrecTypeName [Name Nothing "float"] 8)
+
+    ,("cast('1' as decimal(15,2))"
+     ,Cast (StringLit "'" "'" "1") $ PrecScaleTypeName [Name Nothing "decimal"] 15 2)
+
+
+    ,("double precision '3'"
+     ,TypedLit (TypeName [Name Nothing "double precision"]) "3")
+    ]
+
+subqueries :: TestItem
+subqueries = Group "unaryOperators" $ map (uncurry (TestScalarExpr ansi2011))
+    [("exists (select a from t)", SubQueryExpr SqExists ms)
+    ,("(select a from t)", SubQueryExpr SqSq ms)
+
+    ,("a in (select a from t)"
+     ,In True (Iden [Name Nothing "a"]) (InQueryExpr ms))
+
+    ,("a not in (select a from t)"
+     ,In False (Iden [Name Nothing "a"]) (InQueryExpr ms))
+
+    ,("a > all (select a from t)"
+     ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing ">"] CPAll ms)
+
+    ,("a = some (select a from t)"
+     ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing "="] CPSome ms)
+
+    ,("a <= any (select a from t)"
+     ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing "<="] CPAny ms)
+    ]
+  where
+    ms = makeSelect
+         {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
+         ,qeFrom = [TRSimple [Name Nothing "t"]]
+         }
+
+miscOps :: TestItem
+miscOps = Group "unaryOperators" $ map (uncurry (TestScalarExpr ansi2011))
+    [("a in (1,2,3)"
+     ,In True (Iden [Name Nothing "a"]) $ InList $ map NumLit ["1","2","3"])
+
+    ,("a is null", PostfixOp [Name Nothing "is null"] (Iden [Name Nothing "a"]))
+    ,("a is not null", PostfixOp [Name Nothing "is not null"] (Iden [Name Nothing "a"]))
+    ,("a is true", PostfixOp [Name Nothing "is true"] (Iden [Name Nothing "a"]))
+    ,("a is not true", PostfixOp [Name Nothing "is not true"] (Iden [Name Nothing "a"]))
+    ,("a is false", PostfixOp [Name Nothing "is false"] (Iden [Name Nothing "a"]))
+    ,("a is not false", PostfixOp [Name Nothing "is not false"] (Iden [Name Nothing "a"]))
+    ,("a is unknown", PostfixOp [Name Nothing "is unknown"] (Iden [Name Nothing "a"]))
+    ,("a is not unknown", PostfixOp [Name Nothing "is not unknown"] (Iden [Name Nothing "a"]))
+    ,("a is distinct from b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "is distinct from"] (Iden [Name Nothing "b"]))
+
+    ,("a is not distinct from b"
+     ,BinOp (Iden [Name Nothing "a"]) [Name Nothing "is not distinct from"] (Iden [Name Nothing "b"]))
+
+    ,("a like b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "like"] (Iden [Name Nothing "b"]))
+    ,("a not like b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "not like"] (Iden [Name Nothing "b"]))
+    ,("a is similar to b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "is similar to"] (Iden [Name Nothing "b"]))
+
+    ,("a is not similar to b"
+     ,BinOp (Iden [Name Nothing "a"]) [Name Nothing "is not similar to"] (Iden [Name Nothing "b"]))
+
+    ,("a overlaps b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "overlaps"] (Iden [Name Nothing "b"]))
+
+
+-- special operators
+
+    ,("a between b and c", SpecialOp [Name Nothing "between"] [Iden [Name Nothing "a"]
+                                               ,Iden [Name Nothing "b"]
+                                               ,Iden [Name Nothing "c"]])
+
+    ,("a not between b and c", SpecialOp [Name Nothing "not between"] [Iden [Name Nothing "a"]
+                                                       ,Iden [Name Nothing "b"]
+                                                       ,Iden [Name Nothing "c"]])
+    ,("(1,2)"
+     ,SpecialOp [Name Nothing "rowctor"] [NumLit "1", NumLit "2"])
+
+
+-- keyword special operators
+
+    ,("extract(day from t)"
+     , SpecialOpK [Name Nothing "extract"] (Just $ Iden [Name Nothing "day"]) [("from", Iden [Name Nothing "t"])])
+
+    ,("substring(x from 1 for 2)"
+     ,SpecialOpK [Name Nothing "substring"] (Just $ Iden [Name Nothing "x"]) [("from", NumLit "1")
+                                               ,("for", NumLit "2")])
+
+    ,("substring(x from 1)"
+     ,SpecialOpK [Name Nothing "substring"] (Just $ Iden [Name Nothing "x"]) [("from", NumLit "1")])
+
+    ,("substring(x for 2)"
+     ,SpecialOpK [Name Nothing "substring"] (Just $ Iden [Name Nothing "x"]) [("for", NumLit "2")])
+
+    ,("substring(x from 1 for 2 collate C)"
+     ,SpecialOpK [Name Nothing "substring"] (Just $ Iden [Name Nothing "x"])
+          [("from", NumLit "1")
+          ,("for", Collate (NumLit "2") [Name Nothing "C"])])
+
+-- this doesn't work because of a overlap in the 'in' parser
+
+    ,("POSITION( string1 IN string2 )"
+     ,SpecialOpK [Name Nothing "position"] (Just $ Iden [Name Nothing "string1"]) [("in", Iden [Name Nothing "string2"])])
+
+    ,("CONVERT(char_value USING conversion_char_name)"
+     ,SpecialOpK [Name Nothing "convert"] (Just $ Iden [Name Nothing "char_value"])
+          [("using", Iden [Name Nothing "conversion_char_name"])])
+
+    ,("TRANSLATE(char_value USING translation_name)"
+     ,SpecialOpK [Name Nothing "translate"] (Just $ Iden [Name Nothing "char_value"])
+          [("using", Iden [Name Nothing "translation_name"])])
+
+{-
+OVERLAY(string PLACING embedded_string FROM start
+[FOR length])
+-}
+
+    ,("OVERLAY(string PLACING embedded_string FROM start)"
+     ,SpecialOpK [Name Nothing "overlay"] (Just $ Iden [Name Nothing "string"])
+          [("placing", Iden [Name Nothing "embedded_string"])
+          ,("from", Iden [Name Nothing "start"])])
+
+    ,("OVERLAY(string PLACING embedded_string FROM start FOR length)"
+     ,SpecialOpK [Name Nothing "overlay"] (Just $ Iden [Name Nothing "string"])
+          [("placing", Iden [Name Nothing "embedded_string"])
+          ,("from", Iden [Name Nothing "start"])
+          ,("for", Iden [Name Nothing "length"])])
+
+{-
+TRIM( [ [{LEADING | TRAILING | BOTH}] [removal_char] FROM ]
+target_string
+[COLLATE collation_name] )
+-}
+
+
+
+    ,("trim(from target_string)"
+     ,SpecialOpK [Name Nothing "trim"] Nothing
+          [("both", StringLit "'" "'" " ")
+          ,("from", Iden [Name Nothing "target_string"])])
+
+    ,("trim(leading from target_string)"
+     ,SpecialOpK [Name Nothing "trim"] Nothing
+          [("leading", StringLit "'" "'" " ")
+          ,("from", Iden [Name Nothing "target_string"])])
+
+    ,("trim(trailing from target_string)"
+     ,SpecialOpK [Name Nothing "trim"] Nothing
+          [("trailing", StringLit "'" "'" " ")
+          ,("from", Iden [Name Nothing "target_string"])])
+
+    ,("trim(both from target_string)"
+     ,SpecialOpK [Name Nothing "trim"] Nothing
+          [("both", StringLit "'" "'" " ")
+          ,("from", Iden [Name Nothing "target_string"])])
+
+
+    ,("trim(leading 'x' from target_string)"
+     ,SpecialOpK [Name Nothing "trim"] Nothing
+          [("leading", StringLit "'" "'" "x")
+          ,("from", Iden [Name Nothing "target_string"])])
+
+    ,("trim(trailing 'y' from target_string)"
+     ,SpecialOpK [Name Nothing "trim"] Nothing
+          [("trailing", StringLit "'" "'" "y")
+          ,("from", Iden [Name Nothing "target_string"])])
+
+    ,("trim(both 'z' from target_string collate C)"
+     ,SpecialOpK [Name Nothing "trim"] Nothing
+          [("both", StringLit "'" "'" "z")
+          ,("from", Collate (Iden [Name Nothing "target_string"]) [Name Nothing "C"])])
+
+    ,("trim(leading from target_string)"
+     ,SpecialOpK [Name Nothing "trim"] Nothing
+          [("leading", StringLit "'" "'" " ")
+          ,("from", Iden [Name Nothing "target_string"])])
+
+
+    ]
+
+aggregates :: TestItem
+aggregates = Group "aggregates" $ map (uncurry (TestScalarExpr ansi2011))
+    [("count(*)",App [Name Nothing "count"] [Star])
+
+    ,("sum(a order by a)"
+    ,AggregateApp [Name Nothing "sum"] SQDefault [Iden [Name Nothing "a"]]
+                  [SortSpec (Iden [Name Nothing "a"]) DirDefault NullsOrderDefault] Nothing)
+
+    ,("sum(all a)"
+    ,AggregateApp [Name Nothing "sum"] All [Iden [Name Nothing "a"]] [] Nothing)
+
+    ,("count(distinct a)"
+    ,AggregateApp [Name Nothing "count"] Distinct [Iden [Name Nothing "a"]] [] Nothing)
+    ]
+
+windowFunctions :: TestItem
+windowFunctions = Group "windowFunctions" $ map (uncurry (TestScalarExpr ansi2011))
+    [("max(a) over ()", WindowApp [Name Nothing "max"] [Iden [Name Nothing "a"]] [] [] Nothing)
+    ,("count(*) over ()", WindowApp [Name Nothing "count"] [Star] [] [] Nothing)
+
+    ,("max(a) over (partition by b)"
+     ,WindowApp [Name Nothing "max"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]] [] Nothing)
+
+    ,("max(a) over (partition by b,c)"
+     ,WindowApp [Name Nothing "max"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"],Iden [Name Nothing "c"]] [] Nothing)
+
+    ,("sum(a) over (order by b)"
+     ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] []
+          [SortSpec (Iden [Name Nothing "b"]) DirDefault NullsOrderDefault] Nothing)
+
+    ,("sum(a) over (order by b desc,c)"
+     ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] []
+          [SortSpec (Iden [Name Nothing "b"]) Desc NullsOrderDefault
+          ,SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault] Nothing)
+
+    ,("sum(a) over (partition by b order by c)"
+     ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
+          [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault] Nothing)
+
+    ,("sum(a) over (partition by b order by c range unbounded preceding)"
+     ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
+      [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
+      $ Just $ FrameFrom FrameRange UnboundedPreceding)
+
+    ,("sum(a) over (partition by b order by c range 5 preceding)"
+     ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
+      [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
+      $ Just $ FrameFrom FrameRange $ Preceding (NumLit "5"))
+
+    ,("sum(a) over (partition by b order by c range current row)"
+     ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
+      [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
+      $ Just $ FrameFrom FrameRange Current)
+
+    ,("sum(a) over (partition by b order by c rows 5 following)"
+     ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
+      [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
+      $ Just $ FrameFrom FrameRows $ Following (NumLit "5"))
+
+    ,("sum(a) over (partition by b order by c range unbounded following)"
+     ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
+      [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
+      $ Just $ FrameFrom FrameRange UnboundedFollowing)
+
+    ,("sum(a) over (partition by b order by c \n\
+      \range between 5 preceding and 5 following)"
+     ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
+      [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
+      $ Just $ FrameBetween FrameRange
+                            (Preceding (NumLit "5"))
+                            (Following (NumLit "5")))
+
+    ]
+
+parens :: TestItem
+parens = Group "parens" $ map (uncurry (TestScalarExpr ansi2011))
+    [("(a)", Parens (Iden [Name Nothing "a"]))
+    ,("(a + b)", Parens (BinOp (Iden [Name Nothing "a"]) [Name Nothing "+"] (Iden [Name Nothing "b"])))
+    ]
+
+functionsWithReservedNames :: TestItem
+functionsWithReservedNames = Group "functionsWithReservedNames" $ map t
+    ["abs"
+    ,"char_length"
+    ]
+  where
+    t fn = TestScalarExpr ansi2011 (fn ++ "(a)") $ App [Name Nothing fn] [Iden [Name Nothing "a"]]
+
diff --git a/tools/Language/SQL/SimpleSQL/ScalarExprs.lhs b/tools/Language/SQL/SimpleSQL/ScalarExprs.lhs
deleted file mode 100644
index f587e1c..0000000
--- a/tools/Language/SQL/SimpleSQL/ScalarExprs.lhs
+++ /dev/null
@@ -1,428 +0,0 @@
-
-Tests for parsing scalar expressions
-
-> module Language.SQL.SimpleSQL.ScalarExprs (scalarExprTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-> scalarExprTests :: TestItem
-> scalarExprTests = Group "scalarExprTests"
->     [literals
->     ,identifiers
->     ,star
->     ,parameter
->     ,dots
->     ,app
->     ,caseexp
->     ,convertfun     
->     ,operators
->     ,parens
->     ,subqueries
->     ,aggregates
->     ,windowFunctions
->     ,functionsWithReservedNames
->     ]
-
-> literals :: TestItem
-> literals = Group "literals" $ map (uncurry (TestScalarExpr ansi2011))
->     [("3", NumLit "3")
->      ,("3.", NumLit "3.")
->      ,("3.3", NumLit "3.3")
->      ,(".3", NumLit ".3")
->      ,("3.e3", NumLit "3.e3")
->      ,("3.3e3", NumLit "3.3e3")
->      ,(".3e3", NumLit ".3e3")
->      ,("3e3", NumLit "3e3")
->      ,("3e+3", NumLit "3e+3")
->      ,("3e-3", NumLit "3e-3")
->      ,("'string'", StringLit "'" "'" "string")
->      ,("'string with a '' quote'", StringLit "'" "'" "string with a '' quote")
->      ,("'1'", StringLit "'" "'" "1")
->      ,("interval '3' day"
->       ,IntervalLit Nothing "3" (Itf "day" Nothing) Nothing)
->      ,("interval '3' day (3)"
->       ,IntervalLit Nothing "3" (Itf "day" $ Just (3,Nothing)) Nothing)
->      ,("interval '3 weeks'", TypedLit (TypeName [Name Nothing "interval"]) "3 weeks")
->     ]
-
-> identifiers :: TestItem
-> identifiers = Group "identifiers" $ map (uncurry (TestScalarExpr ansi2011))
->     [("iden1", Iden [Name Nothing "iden1"])
->     --,("t.a", Iden2 "t" "a")
->     ,("\"quoted identifier\"", Iden [Name (Just ("\"","\"")) "quoted identifier"])
->     ,("\"from\"", Iden [Name (Just ("\"","\"")) "from"])
->     ]
-
-> star :: TestItem
-> star = Group "star" $ map (uncurry (TestScalarExpr ansi2011))
->     [("*", Star)
->     --,("t.*", Star2 "t")
->     --,("ROW(t.*,42)", App "ROW" [Star2 "t", NumLit "42"])
->     ]
-
-> parameter :: TestItem
-> parameter = Group "parameter"
->     [TestScalarExpr ansi2011 "?" Parameter
->     ,TestScalarExpr postgres "$13" $ PositionalArg 13]
-
-
-> dots :: TestItem
-> dots = Group "dot" $ map (uncurry (TestScalarExpr ansi2011))
->     [("t.a", Iden [Name Nothing "t",Name Nothing "a"])
->     ,("t.*", BinOp (Iden [Name Nothing "t"]) [Name Nothing "."] Star)
->     ,("a.b.c", Iden [Name Nothing "a",Name Nothing "b",Name Nothing "c"])
->     ,("ROW(t.*,42)", App [Name Nothing "ROW"] [BinOp (Iden [Name Nothing "t"]) [Name Nothing "."] Star, NumLit "42"])
->     ]
-
-> app :: TestItem
-> app = Group "app" $ map (uncurry (TestScalarExpr ansi2011))
->     [("f()", App [Name Nothing "f"] [])
->     ,("f(a)", App [Name Nothing "f"] [Iden [Name Nothing "a"]])
->     ,("f(a,b)", App [Name Nothing "f"] [Iden [Name Nothing "a"], Iden [Name Nothing "b"]])
->     ]
-
-> caseexp :: TestItem
-> caseexp = Group "caseexp" $ map (uncurry (TestScalarExpr ansi2011))
->     [("case a when 1 then 2 end"
->      ,Case (Just $ Iden [Name Nothing "a"]) [([NumLit "1"]
->                               ,NumLit "2")] Nothing)
-
->     ,("case a when 1 then 2 when 3 then 4 end"
->      ,Case (Just $ Iden [Name Nothing "a"]) [([NumLit "1"], NumLit "2")
->                              ,([NumLit "3"], NumLit "4")] Nothing)
-
->     ,("case a when 1 then 2 when 3 then 4 else 5 end"
->      ,Case (Just $ Iden [Name Nothing "a"]) [([NumLit "1"], NumLit "2")
->                              ,([NumLit "3"], NumLit "4")]
->                              (Just $ NumLit "5"))
-
->     ,("case when a=1 then 2 when a=3 then 4 else 5 end"
->      ,Case Nothing [([BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "1")], NumLit "2")
->                    ,([BinOp (Iden [Name Nothing "a"]) [Name Nothing "="] (NumLit "3")], NumLit "4")]
->                    (Just $ NumLit "5"))
-
->     ,("case a when 1,2 then 10 when 3,4 then 20 end"
->      ,Case (Just $ Iden [Name Nothing "a"]) [([NumLit "1",NumLit "2"]
->                                ,NumLit "10")
->                              ,([NumLit "3",NumLit "4"]
->                                ,NumLit "20")]
->                              Nothing)
-
->     ]
-
-> convertfun :: TestItem 
-> convertfun = Group "convert" $ map (uncurry (TestScalarExpr sqlserver))
->     [("CONVERT(varchar, 25.65)"
->      ,Convert (TypeName [Name Nothing "varchar"]) (NumLit "25.65") Nothing)
->     ,("CONVERT(datetime, '2017-08-25')"
->      ,Convert (TypeName [Name Nothing "datetime"]) (StringLit "'" "'" "2017-08-25") Nothing)
->     ,("CONVERT(varchar, '2017-08-25', 101)"
->      ,Convert (TypeName [Name Nothing "varchar"]) (StringLit "'" "'" "2017-08-25") (Just 101))
->     ]
-
-> operators :: TestItem
-> operators = Group "operators"
->     [binaryOperators
->     ,unaryOperators
->     ,casts
->     ,miscOps]
-
-> binaryOperators :: TestItem
-> binaryOperators = Group "binaryOperators" $ map (uncurry (TestScalarExpr ansi2011))
->     [("a + b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "+"] (Iden [Name Nothing "b"]))
->      -- sanity check fixities
->      -- todo: add more fixity checking
-
->     ,("a + b * c"
->      ,BinOp  (Iden [Name Nothing "a"]) [Name Nothing "+"]
->              (BinOp (Iden [Name Nothing "b"]) [Name Nothing "*"] (Iden [Name Nothing "c"])))
-
->     ,("a * b + c"
->      ,BinOp (BinOp (Iden [Name Nothing "a"]) [Name Nothing "*"] (Iden [Name Nothing "b"]))
->             [Name Nothing "+"] (Iden [Name Nothing "c"]))
->     ]
-
-> unaryOperators :: TestItem
-> unaryOperators = Group "unaryOperators" $ map (uncurry (TestScalarExpr ansi2011))
->     [("not a", PrefixOp [Name Nothing "not"] $ Iden [Name Nothing "a"])
->     ,("not not a", PrefixOp [Name Nothing "not"] $ PrefixOp [Name Nothing "not"] $ Iden [Name Nothing "a"])
->     ,("+a", PrefixOp [Name Nothing "+"] $ Iden [Name Nothing "a"])
->     ,("-a", PrefixOp [Name Nothing "-"] $ Iden [Name Nothing "a"])
->     ]
-
-
-> casts :: TestItem
-> casts = Group "operators" $ map (uncurry (TestScalarExpr ansi2011))
->     [("cast('1' as int)"
->      ,Cast (StringLit "'" "'" "1") $ TypeName [Name Nothing "int"])
-
->     ,("int '3'"
->      ,TypedLit (TypeName [Name Nothing "int"]) "3")
-
->     ,("cast('1' as double precision)"
->      ,Cast (StringLit "'" "'" "1") $ TypeName [Name Nothing "double precision"])
-
->     ,("cast('1' as float(8))"
->      ,Cast (StringLit "'" "'" "1") $ PrecTypeName [Name Nothing "float"] 8)
-
->     ,("cast('1' as decimal(15,2))"
->      ,Cast (StringLit "'" "'" "1") $ PrecScaleTypeName [Name Nothing "decimal"] 15 2)
-
-
->     ,("double precision '3'"
->      ,TypedLit (TypeName [Name Nothing "double precision"]) "3")
->     ]
-
-> subqueries :: TestItem
-> subqueries = Group "unaryOperators" $ map (uncurry (TestScalarExpr ansi2011))
->     [("exists (select a from t)", SubQueryExpr SqExists ms)
->     ,("(select a from t)", SubQueryExpr SqSq ms)
-
->     ,("a in (select a from t)"
->      ,In True (Iden [Name Nothing "a"]) (InQueryExpr ms))
-
->     ,("a not in (select a from t)"
->      ,In False (Iden [Name Nothing "a"]) (InQueryExpr ms))
-
->     ,("a > all (select a from t)"
->      ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing ">"] CPAll ms)
-
->     ,("a = some (select a from t)"
->      ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing "="] CPSome ms)
-
->     ,("a <= any (select a from t)"
->      ,QuantifiedComparison (Iden [Name Nothing "a"]) [Name Nothing "<="] CPAny ms)
->     ]
->   where
->     ms = makeSelect
->          {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
->          ,qeFrom = [TRSimple [Name Nothing "t"]]
->          }
-
-> miscOps :: TestItem
-> miscOps = Group "unaryOperators" $ map (uncurry (TestScalarExpr ansi2011))
->     [("a in (1,2,3)"
->      ,In True (Iden [Name Nothing "a"]) $ InList $ map NumLit ["1","2","3"])
-
->     ,("a is null", PostfixOp [Name Nothing "is null"] (Iden [Name Nothing "a"]))
->     ,("a is not null", PostfixOp [Name Nothing "is not null"] (Iden [Name Nothing "a"]))
->     ,("a is true", PostfixOp [Name Nothing "is true"] (Iden [Name Nothing "a"]))
->     ,("a is not true", PostfixOp [Name Nothing "is not true"] (Iden [Name Nothing "a"]))
->     ,("a is false", PostfixOp [Name Nothing "is false"] (Iden [Name Nothing "a"]))
->     ,("a is not false", PostfixOp [Name Nothing "is not false"] (Iden [Name Nothing "a"]))
->     ,("a is unknown", PostfixOp [Name Nothing "is unknown"] (Iden [Name Nothing "a"]))
->     ,("a is not unknown", PostfixOp [Name Nothing "is not unknown"] (Iden [Name Nothing "a"]))
->     ,("a is distinct from b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "is distinct from"] (Iden [Name Nothing "b"]))
-
->     ,("a is not distinct from b"
->      ,BinOp (Iden [Name Nothing "a"]) [Name Nothing "is not distinct from"] (Iden [Name Nothing "b"]))
-
->     ,("a like b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "like"] (Iden [Name Nothing "b"]))
->     ,("a not like b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "not like"] (Iden [Name Nothing "b"]))
->     ,("a is similar to b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "is similar to"] (Iden [Name Nothing "b"]))
-
->     ,("a is not similar to b"
->      ,BinOp (Iden [Name Nothing "a"]) [Name Nothing "is not similar to"] (Iden [Name Nothing "b"]))
-
->     ,("a overlaps b", BinOp (Iden [Name Nothing "a"]) [Name Nothing "overlaps"] (Iden [Name Nothing "b"]))
-
-
-special operators
-
->     ,("a between b and c", SpecialOp [Name Nothing "between"] [Iden [Name Nothing "a"]
->                                                ,Iden [Name Nothing "b"]
->                                                ,Iden [Name Nothing "c"]])
-
->     ,("a not between b and c", SpecialOp [Name Nothing "not between"] [Iden [Name Nothing "a"]
->                                                        ,Iden [Name Nothing "b"]
->                                                        ,Iden [Name Nothing "c"]])
->     ,("(1,2)"
->      ,SpecialOp [Name Nothing "rowctor"] [NumLit "1", NumLit "2"])
-
-
-keyword special operators
-
->     ,("extract(day from t)"
->      , SpecialOpK [Name Nothing "extract"] (Just $ Iden [Name Nothing "day"]) [("from", Iden [Name Nothing "t"])])
-
->     ,("substring(x from 1 for 2)"
->      ,SpecialOpK [Name Nothing "substring"] (Just $ Iden [Name Nothing "x"]) [("from", NumLit "1")
->                                                ,("for", NumLit "2")])
-
->     ,("substring(x from 1)"
->      ,SpecialOpK [Name Nothing "substring"] (Just $ Iden [Name Nothing "x"]) [("from", NumLit "1")])
-
->     ,("substring(x for 2)"
->      ,SpecialOpK [Name Nothing "substring"] (Just $ Iden [Name Nothing "x"]) [("for", NumLit "2")])
-
->     ,("substring(x from 1 for 2 collate C)"
->      ,SpecialOpK [Name Nothing "substring"] (Just $ Iden [Name Nothing "x"])
->           [("from", NumLit "1")
->           ,("for", Collate (NumLit "2") [Name Nothing "C"])])
-
-this doesn't work because of a overlap in the 'in' parser
-
->     ,("POSITION( string1 IN string2 )"
->      ,SpecialOpK [Name Nothing "position"] (Just $ Iden [Name Nothing "string1"]) [("in", Iden [Name Nothing "string2"])])
-
->     ,("CONVERT(char_value USING conversion_char_name)"
->      ,SpecialOpK [Name Nothing "convert"] (Just $ Iden [Name Nothing "char_value"])
->           [("using", Iden [Name Nothing "conversion_char_name"])])
-
->     ,("TRANSLATE(char_value USING translation_name)"
->      ,SpecialOpK [Name Nothing "translate"] (Just $ Iden [Name Nothing "char_value"])
->           [("using", Iden [Name Nothing "translation_name"])])
-
-OVERLAY(string PLACING embedded_string FROM start
-[FOR length])
-
->     ,("OVERLAY(string PLACING embedded_string FROM start)"
->      ,SpecialOpK [Name Nothing "overlay"] (Just $ Iden [Name Nothing "string"])
->           [("placing", Iden [Name Nothing "embedded_string"])
->           ,("from", Iden [Name Nothing "start"])])
-
->     ,("OVERLAY(string PLACING embedded_string FROM start FOR length)"
->      ,SpecialOpK [Name Nothing "overlay"] (Just $ Iden [Name Nothing "string"])
->           [("placing", Iden [Name Nothing "embedded_string"])
->           ,("from", Iden [Name Nothing "start"])
->           ,("for", Iden [Name Nothing "length"])])
-
-TRIM( [ [{LEADING | TRAILING | BOTH}] [removal_char] FROM ]
-target_string
-[COLLATE collation_name] )
-
-
-
->     ,("trim(from target_string)"
->      ,SpecialOpK [Name Nothing "trim"] Nothing
->           [("both", StringLit "'" "'" " ")
->           ,("from", Iden [Name Nothing "target_string"])])
-
->     ,("trim(leading from target_string)"
->      ,SpecialOpK [Name Nothing "trim"] Nothing
->           [("leading", StringLit "'" "'" " ")
->           ,("from", Iden [Name Nothing "target_string"])])
-
->     ,("trim(trailing from target_string)"
->      ,SpecialOpK [Name Nothing "trim"] Nothing
->           [("trailing", StringLit "'" "'" " ")
->           ,("from", Iden [Name Nothing "target_string"])])
-
->     ,("trim(both from target_string)"
->      ,SpecialOpK [Name Nothing "trim"] Nothing
->           [("both", StringLit "'" "'" " ")
->           ,("from", Iden [Name Nothing "target_string"])])
-
-
->     ,("trim(leading 'x' from target_string)"
->      ,SpecialOpK [Name Nothing "trim"] Nothing
->           [("leading", StringLit "'" "'" "x")
->           ,("from", Iden [Name Nothing "target_string"])])
-
->     ,("trim(trailing 'y' from target_string)"
->      ,SpecialOpK [Name Nothing "trim"] Nothing
->           [("trailing", StringLit "'" "'" "y")
->           ,("from", Iden [Name Nothing "target_string"])])
-
->     ,("trim(both 'z' from target_string collate C)"
->      ,SpecialOpK [Name Nothing "trim"] Nothing
->           [("both", StringLit "'" "'" "z")
->           ,("from", Collate (Iden [Name Nothing "target_string"]) [Name Nothing "C"])])
-
->     ,("trim(leading from target_string)"
->      ,SpecialOpK [Name Nothing "trim"] Nothing
->           [("leading", StringLit "'" "'" " ")
->           ,("from", Iden [Name Nothing "target_string"])])
-
-
->     ]
-
-> aggregates :: TestItem
-> aggregates = Group "aggregates" $ map (uncurry (TestScalarExpr ansi2011))
->     [("count(*)",App [Name Nothing "count"] [Star])
-
->     ,("sum(a order by a)"
->     ,AggregateApp [Name Nothing "sum"] SQDefault [Iden [Name Nothing "a"]]
->                   [SortSpec (Iden [Name Nothing "a"]) DirDefault NullsOrderDefault] Nothing)
-
->     ,("sum(all a)"
->     ,AggregateApp [Name Nothing "sum"] All [Iden [Name Nothing "a"]] [] Nothing)
-
->     ,("count(distinct a)"
->     ,AggregateApp [Name Nothing "count"] Distinct [Iden [Name Nothing "a"]] [] Nothing)
->     ]
-
-> windowFunctions :: TestItem
-> windowFunctions = Group "windowFunctions" $ map (uncurry (TestScalarExpr ansi2011))
->     [("max(a) over ()", WindowApp [Name Nothing "max"] [Iden [Name Nothing "a"]] [] [] Nothing)
->     ,("count(*) over ()", WindowApp [Name Nothing "count"] [Star] [] [] Nothing)
-
->     ,("max(a) over (partition by b)"
->      ,WindowApp [Name Nothing "max"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]] [] Nothing)
-
->     ,("max(a) over (partition by b,c)"
->      ,WindowApp [Name Nothing "max"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"],Iden [Name Nothing "c"]] [] Nothing)
-
->     ,("sum(a) over (order by b)"
->      ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] []
->           [SortSpec (Iden [Name Nothing "b"]) DirDefault NullsOrderDefault] Nothing)
-
->     ,("sum(a) over (order by b desc,c)"
->      ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] []
->           [SortSpec (Iden [Name Nothing "b"]) Desc NullsOrderDefault
->           ,SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault] Nothing)
-
->     ,("sum(a) over (partition by b order by c)"
->      ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
->           [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault] Nothing)
-
->     ,("sum(a) over (partition by b order by c range unbounded preceding)"
->      ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
->       [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
->       $ Just $ FrameFrom FrameRange UnboundedPreceding)
-
->     ,("sum(a) over (partition by b order by c range 5 preceding)"
->      ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
->       [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
->       $ Just $ FrameFrom FrameRange $ Preceding (NumLit "5"))
-
->     ,("sum(a) over (partition by b order by c range current row)"
->      ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
->       [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
->       $ Just $ FrameFrom FrameRange Current)
-
->     ,("sum(a) over (partition by b order by c rows 5 following)"
->      ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
->       [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
->       $ Just $ FrameFrom FrameRows $ Following (NumLit "5"))
-
->     ,("sum(a) over (partition by b order by c range unbounded following)"
->      ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
->       [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
->       $ Just $ FrameFrom FrameRange UnboundedFollowing)
-
->     ,("sum(a) over (partition by b order by c \n\
->       \range between 5 preceding and 5 following)"
->      ,WindowApp [Name Nothing "sum"] [Iden [Name Nothing "a"]] [Iden [Name Nothing "b"]]
->       [SortSpec (Iden [Name Nothing "c"]) DirDefault NullsOrderDefault]
->       $ Just $ FrameBetween FrameRange
->                             (Preceding (NumLit "5"))
->                             (Following (NumLit "5")))
-
->     ]
-
-> parens :: TestItem
-> parens = Group "parens" $ map (uncurry (TestScalarExpr ansi2011))
->     [("(a)", Parens (Iden [Name Nothing "a"]))
->     ,("(a + b)", Parens (BinOp (Iden [Name Nothing "a"]) [Name Nothing "+"] (Iden [Name Nothing "b"])))
->     ]
-
-> functionsWithReservedNames :: TestItem
-> functionsWithReservedNames = Group "functionsWithReservedNames" $ map t
->     ["abs"
->     ,"char_length"
->     ]
->   where
->     t fn = TestScalarExpr ansi2011 (fn ++ "(a)") $ App [Name Nothing fn] [Iden [Name Nothing "a"]]
-
diff --git a/tools/Language/SQL/SimpleSQL/TableRefs.hs b/tools/Language/SQL/SimpleSQL/TableRefs.hs
new file mode 100644
index 0000000..8aad6f6
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/TableRefs.hs
@@ -0,0 +1,107 @@
+
+{-
+These are the tests for parsing focusing on the from part of query
+expression
+-}
+
+module Language.SQL.SimpleSQL.TableRefs (tableRefTests) where
+
+import Language.SQL.SimpleSQL.TestTypes
+import Language.SQL.SimpleSQL.Syntax
+
+
+tableRefTests :: TestItem
+tableRefTests = Group "tableRefTests" $ map (uncurry (TestQueryExpr ansi2011))
+    [("select a from t"
+     ,ms [TRSimple [Name Nothing "t"]])
+
+     ,("select a from f(a)"
+      ,ms [TRFunction [Name Nothing "f"] [Iden [Name Nothing "a"]]])
+
+    ,("select a from t,u"
+     ,ms [TRSimple [Name Nothing "t"], TRSimple [Name Nothing "u"]])
+
+    ,("select a from s.t"
+     ,ms [TRSimple [Name Nothing "s", Name Nothing "t"]])
+
+-- these lateral queries make no sense but the syntax is valid
+
+    ,("select a from lateral a"
+     ,ms [TRLateral $ TRSimple [Name Nothing "a"]])
+
+    ,("select a from lateral a,b"
+     ,ms [TRLateral $ TRSimple [Name Nothing "a"], TRSimple [Name Nothing "b"]])
+
+    ,("select a from a, lateral b"
+     ,ms [TRSimple [Name Nothing "a"], TRLateral $ TRSimple [Name Nothing "b"]])
+
+    ,("select a from a natural join lateral b"
+     ,ms [TRJoin (TRSimple [Name Nothing "a"]) True JInner
+                 (TRLateral $ TRSimple [Name Nothing "b"])
+                 Nothing])
+
+    ,("select a from lateral a natural join lateral b"
+     ,ms [TRJoin (TRLateral $ TRSimple [Name Nothing "a"]) True JInner
+                 (TRLateral $ TRSimple [Name Nothing "b"])
+                 Nothing])
+
+
+    ,("select a from t inner join u on expr"
+     ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JInner (TRSimple [Name Nothing "u"])
+                       (Just $ JoinOn $ Iden [Name Nothing "expr"])])
+
+    ,("select a from t join u on expr"
+     ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JInner (TRSimple [Name Nothing "u"])
+                       (Just $ JoinOn $ Iden [Name Nothing "expr"])])
+
+    ,("select a from t left join u on expr"
+     ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JLeft (TRSimple [Name Nothing "u"])
+                       (Just $ JoinOn $ Iden [Name Nothing "expr"])])
+
+    ,("select a from t right join u on expr"
+     ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JRight (TRSimple [Name Nothing "u"])
+                       (Just $ JoinOn $ Iden [Name Nothing "expr"])])
+
+    ,("select a from t full join u on expr"
+     ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JFull (TRSimple [Name Nothing "u"])
+                       (Just $ JoinOn $ Iden [Name Nothing "expr"])])
+
+    ,("select a from t cross join u"
+     ,ms [TRJoin (TRSimple [Name Nothing "t"]) False
+                       JCross (TRSimple [Name Nothing "u"]) Nothing])
+
+    ,("select a from t natural inner join u"
+     ,ms [TRJoin (TRSimple [Name Nothing "t"]) True JInner (TRSimple [Name Nothing "u"])
+                       Nothing])
+
+    ,("select a from t inner join u using(a,b)"
+     ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JInner (TRSimple [Name Nothing "u"])
+                       (Just $ JoinUsing [Name Nothing "a", Name Nothing "b"])])
+
+    ,("select a from (select a from t)"
+     ,ms [TRQueryExpr $ ms [TRSimple [Name Nothing "t"]]])
+
+    ,("select a from t as u"
+     ,ms [TRAlias (TRSimple [Name Nothing "t"]) (Alias (Name Nothing "u") Nothing)])
+
+    ,("select a from t u"
+     ,ms [TRAlias (TRSimple [Name Nothing "t"]) (Alias (Name Nothing "u") Nothing)])
+
+    ,("select a from t u(b)"
+     ,ms [TRAlias (TRSimple [Name Nothing "t"]) (Alias (Name Nothing "u") $ Just [Name Nothing "b"])])
+
+    ,("select a from (t cross join u) as u"
+     ,ms [TRAlias (TRParens $
+                   TRJoin (TRSimple [Name Nothing "t"]) False JCross (TRSimple [Name Nothing "u"]) Nothing)
+                          (Alias (Name Nothing "u") Nothing)])
+     -- todo: not sure if the associativity is correct
+
+    ,("select a from t cross join u cross join v",
+       ms [TRJoin
+           (TRJoin (TRSimple [Name Nothing "t"]) False
+                   JCross (TRSimple [Name Nothing "u"]) Nothing)
+           False JCross (TRSimple [Name Nothing "v"]) Nothing])
+    ]
+  where
+    ms f = makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
+                      ,qeFrom = f}
diff --git a/tools/Language/SQL/SimpleSQL/TableRefs.lhs b/tools/Language/SQL/SimpleSQL/TableRefs.lhs
deleted file mode 100644
index 84a69fe..0000000
--- a/tools/Language/SQL/SimpleSQL/TableRefs.lhs
+++ /dev/null
@@ -1,105 +0,0 @@
-
-These are the tests for parsing focusing on the from part of query
-expression
-
-> module Language.SQL.SimpleSQL.TableRefs (tableRefTests) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-> import Language.SQL.SimpleSQL.Syntax
-
-
-> tableRefTests :: TestItem
-> tableRefTests = Group "tableRefTests" $ map (uncurry (TestQueryExpr ansi2011))
->     [("select a from t"
->      ,ms [TRSimple [Name Nothing "t"]])
-
->      ,("select a from f(a)"
->       ,ms [TRFunction [Name Nothing "f"] [Iden [Name Nothing "a"]]])
-
->     ,("select a from t,u"
->      ,ms [TRSimple [Name Nothing "t"], TRSimple [Name Nothing "u"]])
-
->     ,("select a from s.t"
->      ,ms [TRSimple [Name Nothing "s", Name Nothing "t"]])
-
-these lateral queries make no sense but the syntax is valid
-
->     ,("select a from lateral a"
->      ,ms [TRLateral $ TRSimple [Name Nothing "a"]])
-
->     ,("select a from lateral a,b"
->      ,ms [TRLateral $ TRSimple [Name Nothing "a"], TRSimple [Name Nothing "b"]])
-
->     ,("select a from a, lateral b"
->      ,ms [TRSimple [Name Nothing "a"], TRLateral $ TRSimple [Name Nothing "b"]])
-
->     ,("select a from a natural join lateral b"
->      ,ms [TRJoin (TRSimple [Name Nothing "a"]) True JInner
->                  (TRLateral $ TRSimple [Name Nothing "b"])
->                  Nothing])
-
->     ,("select a from lateral a natural join lateral b"
->      ,ms [TRJoin (TRLateral $ TRSimple [Name Nothing "a"]) True JInner
->                  (TRLateral $ TRSimple [Name Nothing "b"])
->                  Nothing])
-
-
->     ,("select a from t inner join u on expr"
->      ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JInner (TRSimple [Name Nothing "u"])
->                        (Just $ JoinOn $ Iden [Name Nothing "expr"])])
-
->     ,("select a from t join u on expr"
->      ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JInner (TRSimple [Name Nothing "u"])
->                        (Just $ JoinOn $ Iden [Name Nothing "expr"])])
-
->     ,("select a from t left join u on expr"
->      ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JLeft (TRSimple [Name Nothing "u"])
->                        (Just $ JoinOn $ Iden [Name Nothing "expr"])])
-
->     ,("select a from t right join u on expr"
->      ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JRight (TRSimple [Name Nothing "u"])
->                        (Just $ JoinOn $ Iden [Name Nothing "expr"])])
-
->     ,("select a from t full join u on expr"
->      ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JFull (TRSimple [Name Nothing "u"])
->                        (Just $ JoinOn $ Iden [Name Nothing "expr"])])
-
->     ,("select a from t cross join u"
->      ,ms [TRJoin (TRSimple [Name Nothing "t"]) False
->                        JCross (TRSimple [Name Nothing "u"]) Nothing])
-
->     ,("select a from t natural inner join u"
->      ,ms [TRJoin (TRSimple [Name Nothing "t"]) True JInner (TRSimple [Name Nothing "u"])
->                        Nothing])
-
->     ,("select a from t inner join u using(a,b)"
->      ,ms [TRJoin (TRSimple [Name Nothing "t"]) False JInner (TRSimple [Name Nothing "u"])
->                        (Just $ JoinUsing [Name Nothing "a", Name Nothing "b"])])
-
->     ,("select a from (select a from t)"
->      ,ms [TRQueryExpr $ ms [TRSimple [Name Nothing "t"]]])
-
->     ,("select a from t as u"
->      ,ms [TRAlias (TRSimple [Name Nothing "t"]) (Alias (Name Nothing "u") Nothing)])
-
->     ,("select a from t u"
->      ,ms [TRAlias (TRSimple [Name Nothing "t"]) (Alias (Name Nothing "u") Nothing)])
-
->     ,("select a from t u(b)"
->      ,ms [TRAlias (TRSimple [Name Nothing "t"]) (Alias (Name Nothing "u") $ Just [Name Nothing "b"])])
-
->     ,("select a from (t cross join u) as u"
->      ,ms [TRAlias (TRParens $
->                    TRJoin (TRSimple [Name Nothing "t"]) False JCross (TRSimple [Name Nothing "u"]) Nothing)
->                           (Alias (Name Nothing "u") Nothing)])
->      -- todo: not sure if the associativity is correct
-
->     ,("select a from t cross join u cross join v",
->        ms [TRJoin
->            (TRJoin (TRSimple [Name Nothing "t"]) False
->                    JCross (TRSimple [Name Nothing "u"]) Nothing)
->            False JCross (TRSimple [Name Nothing "v"]) Nothing])
->     ]
->   where
->     ms f = makeSelect {qeSelectList = [(Iden [Name Nothing "a"],Nothing)]
->                       ,qeFrom = f}
diff --git a/tools/Language/SQL/SimpleSQL/TestTypes.hs b/tools/Language/SQL/SimpleSQL/TestTypes.hs
new file mode 100644
index 0000000..87e0f7b
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/TestTypes.hs
@@ -0,0 +1,43 @@
+
+{-
+This is the types used to define the tests as pure data. See the
+Tests.hs module for the 'interpreter'.
+-}
+
+module Language.SQL.SimpleSQL.TestTypes
+    (TestItem(..)
+    ,module Language.SQL.SimpleSQL.Dialect
+    ) where
+
+import Language.SQL.SimpleSQL.Syntax
+import Language.SQL.SimpleSQL.Lex (Token)
+import Language.SQL.SimpleSQL.Dialect
+
+{-
+TODO: maybe make the dialect args into [dialect], then each test
+checks all the dialects mentioned work, and all the dialects not
+mentioned give a parse error. Not sure if this will be too awkward due
+to lots of tricky exceptions/variationsx.
+-}
+
+data TestItem = Group String [TestItem]
+              | TestScalarExpr Dialect String ScalarExpr
+              | TestQueryExpr Dialect String QueryExpr
+              | TestStatement Dialect String Statement
+              | TestStatements Dialect String [Statement]
+
+{-
+this just checks the sql parses without error, mostly just a
+intermediate when I'm too lazy to write out the parsed AST. These
+should all be TODO to convert to a testqueryexpr test.
+-}
+
+              | ParseQueryExpr Dialect String
+
+-- check that the string given fails to parse
+
+              | ParseQueryExprFails Dialect String
+              | ParseScalarExprFails Dialect String
+              | LexTest Dialect String [Token]
+              | LexFails Dialect String
+                deriving (Eq,Show)
diff --git a/tools/Language/SQL/SimpleSQL/TestTypes.lhs b/tools/Language/SQL/SimpleSQL/TestTypes.lhs
deleted file mode 100644
index dbdf140..0000000
--- a/tools/Language/SQL/SimpleSQL/TestTypes.lhs
+++ /dev/null
@@ -1,37 +0,0 @@
-
-This is the types used to define the tests as pure data. See the
-Tests.lhs module for the 'interpreter'.
-
-> module Language.SQL.SimpleSQL.TestTypes
->     (TestItem(..)
->     ,module Language.SQL.SimpleSQL.Dialect
->     ) where
-
-> import Language.SQL.SimpleSQL.Syntax
-> import Language.SQL.SimpleSQL.Lex (Token)
-> import Language.SQL.SimpleSQL.Dialect
-
-TODO: maybe make the dialect args into [dialect], then each test
-checks all the dialects mentioned work, and all the dialects not
-mentioned give a parse error. Not sure if this will be too awkward due
-to lots of tricky exceptions/variationsx.
-
-> data TestItem = Group String [TestItem]
->               | TestScalarExpr Dialect String ScalarExpr
->               | TestQueryExpr Dialect String QueryExpr
->               | TestStatement Dialect String Statement
->               | TestStatements Dialect String [Statement]
-
-this just checks the sql parses without error, mostly just a
-intermediate when I'm too lazy to write out the parsed AST. These
-should all be TODO to convert to a testqueryexpr test.
-
->               | ParseQueryExpr Dialect String
-
-check that the string given fails to parse
-
->               | ParseQueryExprFails Dialect String
->               | ParseScalarExprFails Dialect String
->               | LexTest Dialect String [Token]
->               | LexFails Dialect String
->                 deriving (Eq,Show)
diff --git a/tools/Language/SQL/SimpleSQL/Tests.hs b/tools/Language/SQL/SimpleSQL/Tests.hs
new file mode 100644
index 0000000..909a5d9
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/Tests.hs
@@ -0,0 +1,175 @@
+
+{-
+This is the main tests module which exposes the test data plus the
+Test.Framework tests. It also contains the code which converts the
+test data to the Test.Framework tests.
+-}
+
+module Language.SQL.SimpleSQL.Tests
+    (testData
+    ,tests
+    ,TestItem(..)
+    ) where
+
+import qualified Test.Tasty as T
+import qualified Test.Tasty.HUnit as H
+
+--import Language.SQL.SimpleSQL.Syntax
+import Language.SQL.SimpleSQL.Pretty
+import Language.SQL.SimpleSQL.Parse
+import Language.SQL.SimpleSQL.Lex
+
+import Language.SQL.SimpleSQL.TestTypes
+
+import Language.SQL.SimpleSQL.FullQueries
+import Language.SQL.SimpleSQL.GroupBy
+import Language.SQL.SimpleSQL.Postgres
+import Language.SQL.SimpleSQL.QueryExprComponents
+import Language.SQL.SimpleSQL.QueryExprs
+import Language.SQL.SimpleSQL.TableRefs
+import Language.SQL.SimpleSQL.ScalarExprs
+import Language.SQL.SimpleSQL.Odbc
+import Language.SQL.SimpleSQL.Tpch
+import Language.SQL.SimpleSQL.LexerTests
+import Language.SQL.SimpleSQL.EmptyStatement
+import Language.SQL.SimpleSQL.CreateIndex
+
+import Language.SQL.SimpleSQL.SQL2011Queries
+import Language.SQL.SimpleSQL.SQL2011AccessControl
+import Language.SQL.SimpleSQL.SQL2011Bits
+import Language.SQL.SimpleSQL.SQL2011DataManipulation
+import Language.SQL.SimpleSQL.SQL2011Schema
+
+import Language.SQL.SimpleSQL.MySQL
+import Language.SQL.SimpleSQL.Oracle
+import Language.SQL.SimpleSQL.CustomDialect
+
+
+{-
+Order the tests to start from the simplest first. This is also the
+order on the generated documentation.
+-}
+
+testData :: TestItem
+testData =
+    Group "parserTest"
+    [lexerTests
+    ,scalarExprTests
+    ,odbcTests
+    ,queryExprComponentTests
+    ,queryExprsTests
+    ,tableRefTests
+    ,groupByTests
+    ,fullQueriesTests
+    ,postgresTests
+    ,tpchTests
+    ,sql2011QueryTests
+    ,sql2011DataManipulationTests
+    ,sql2011SchemaTests
+    ,sql2011AccessControlTests
+    ,sql2011BitsTests
+    ,mySQLTests
+    ,oracleTests
+    ,customDialectTests
+    ,emptyStatementTests
+    ,createIndexTests
+    ]
+
+tests :: T.TestTree
+tests = itemToTest testData
+
+--runTests :: IO ()
+--runTests = void $ H.runTestTT $ itemToTest testData
+
+itemToTest :: TestItem -> T.TestTree
+itemToTest (Group nm ts) =
+    T.testGroup nm $ map itemToTest ts
+itemToTest (TestScalarExpr d str expected) =
+    toTest parseScalarExpr prettyScalarExpr d str expected
+itemToTest (TestQueryExpr d str expected) =
+    toTest parseQueryExpr prettyQueryExpr d str expected
+itemToTest (TestStatement d str expected) =
+    toTest parseStatement prettyStatement d str expected
+itemToTest (TestStatements d str expected) =
+    toTest parseStatements prettyStatements d str expected
+itemToTest (ParseQueryExpr d str) =
+    toPTest parseQueryExpr prettyQueryExpr d str
+
+itemToTest (ParseQueryExprFails d str) =
+    toFTest parseQueryExpr prettyQueryExpr d str
+
+itemToTest (ParseScalarExprFails d str) =
+    toFTest parseScalarExpr prettyScalarExpr d str
+
+itemToTest (LexTest d s ts) = makeLexerTest d s ts
+itemToTest (LexFails d s) = makeLexingFailsTest d s
+
+makeLexerTest :: Dialect -> String -> [Token] -> T.TestTree
+makeLexerTest d s ts = H.testCase s $ do
+    let lx = either (error . show) id $ lexSQL d "" Nothing s
+    H.assertEqual "" ts $ map snd lx
+    let s' = prettyTokens d $ map snd lx
+    H.assertEqual "pretty print" s s'
+
+makeLexingFailsTest :: Dialect -> String -> T.TestTree
+makeLexingFailsTest d s = H.testCase s $ do
+    case lexSQL d "" Nothing s of
+         Right x -> H.assertFailure $ "lexing should have failed: " ++ s ++ "\ngot: " ++ show x
+         Left _ -> return ()
+
+
+toTest :: (Eq a, Show a) =>
+          (Dialect -> String -> Maybe (Int,Int) -> String -> Either ParseError a)
+       -> (Dialect -> a -> String)
+       -> Dialect
+       -> String
+       -> a
+       -> T.TestTree
+toTest parser pp d str expected = H.testCase str $ do
+        let egot = parser d "" Nothing str
+        case egot of
+            Left e -> H.assertFailure $ peFormattedError e
+            Right got -> do
+                H.assertEqual "" expected got
+                let str' = pp d got
+                let egot' = parser d "" Nothing str'
+                case egot' of
+                    Left e' -> H.assertFailure $ "pp roundtrip"
+                                                 ++ "\n" ++ str'
+                                                 ++ peFormattedError e'
+                    Right got' -> H.assertEqual
+                                  ("pp roundtrip" ++ "\n" ++ str')
+                                   expected got'
+
+toPTest :: (Eq a, Show a) =>
+          (Dialect -> String -> Maybe (Int,Int) -> String -> Either ParseError a)
+       -> (Dialect -> a -> String)
+       -> Dialect
+       -> String
+       -> T.TestTree
+toPTest parser pp d str = H.testCase str $ do
+        let egot = parser d "" Nothing str
+        case egot of
+            Left e -> H.assertFailure $ peFormattedError e
+            Right got -> do
+                let str' = pp d got
+                let egot' = parser d "" Nothing str'
+                case egot' of
+                    Left e' -> H.assertFailure $ "pp roundtrip "
+                                                 ++ "\n" ++ str' ++ "\n"
+                                                 ++ peFormattedError e'
+                    Right _got' -> return ()
+
+
+toFTest :: (Eq a, Show a) =>
+          (Dialect -> String -> Maybe (Int,Int) -> String -> Either ParseError a)
+       -> (Dialect -> a -> String)
+       -> Dialect
+       -> String
+       -> T.TestTree
+toFTest parser _pp d str = H.testCase str $ do
+        let egot = parser d "" Nothing str
+        case egot of
+            Left _e -> return ()
+            Right _got ->
+              H.assertFailure $ "parse didn't fail: " ++ show d ++ "\n" ++ str
diff --git a/tools/Language/SQL/SimpleSQL/Tests.lhs b/tools/Language/SQL/SimpleSQL/Tests.lhs
deleted file mode 100644
index 1cc5c2e..0000000
--- a/tools/Language/SQL/SimpleSQL/Tests.lhs
+++ /dev/null
@@ -1,171 +0,0 @@
-
-This is the main tests module which exposes the test data plus the
-Test.Framework tests. It also contains the code which converts the
-test data to the Test.Framework tests.
-
-> module Language.SQL.SimpleSQL.Tests
->     (testData
->     ,tests
->     ,TestItem(..)
->     ) where
-
-> import qualified Test.Tasty as T
-> import qualified Test.Tasty.HUnit as H
-
-> --import Language.SQL.SimpleSQL.Syntax
-> import Language.SQL.SimpleSQL.Pretty
-> import Language.SQL.SimpleSQL.Parse
-> import Language.SQL.SimpleSQL.Lex
-
-> import Language.SQL.SimpleSQL.TestTypes
-
-> import Language.SQL.SimpleSQL.FullQueries
-> import Language.SQL.SimpleSQL.GroupBy
-> import Language.SQL.SimpleSQL.Postgres
-> import Language.SQL.SimpleSQL.QueryExprComponents
-> import Language.SQL.SimpleSQL.QueryExprs
-> import Language.SQL.SimpleSQL.TableRefs
-> import Language.SQL.SimpleSQL.ScalarExprs
-> import Language.SQL.SimpleSQL.Odbc
-> import Language.SQL.SimpleSQL.Tpch
-> import Language.SQL.SimpleSQL.LexerTests
-> import Language.SQL.SimpleSQL.EmptyStatement
-> import Language.SQL.SimpleSQL.CreateIndex
-
-> import Language.SQL.SimpleSQL.SQL2011Queries
-> import Language.SQL.SimpleSQL.SQL2011AccessControl
-> import Language.SQL.SimpleSQL.SQL2011Bits
-> import Language.SQL.SimpleSQL.SQL2011DataManipulation
-> import Language.SQL.SimpleSQL.SQL2011Schema
-
-> import Language.SQL.SimpleSQL.MySQL
-> import Language.SQL.SimpleSQL.Oracle
-> import Language.SQL.SimpleSQL.CustomDialect
-
-
-Order the tests to start from the simplest first. This is also the
-order on the generated documentation.
-
-> testData :: TestItem
-> testData =
->     Group "parserTest"
->     [lexerTests
->     ,scalarExprTests
->     ,odbcTests
->     ,queryExprComponentTests
->     ,queryExprsTests
->     ,tableRefTests
->     ,groupByTests
->     ,fullQueriesTests
->     ,postgresTests
->     ,tpchTests
->     ,sql2011QueryTests
->     ,sql2011DataManipulationTests
->     ,sql2011SchemaTests
->     ,sql2011AccessControlTests
->     ,sql2011BitsTests
->     ,mySQLTests
->     ,oracleTests
->     ,customDialectTests
->     ,emptyStatementTests
->     ,createIndexTests
->     ]
-
-> tests :: T.TestTree
-> tests = itemToTest testData
-
-> --runTests :: IO ()
-> --runTests = void $ H.runTestTT $ itemToTest testData
-
-> itemToTest :: TestItem -> T.TestTree
-> itemToTest (Group nm ts) =
->     T.testGroup nm $ map itemToTest ts
-> itemToTest (TestScalarExpr d str expected) =
->     toTest parseScalarExpr prettyScalarExpr d str expected
-> itemToTest (TestQueryExpr d str expected) =
->     toTest parseQueryExpr prettyQueryExpr d str expected
-> itemToTest (TestStatement d str expected) =
->     toTest parseStatement prettyStatement d str expected
-> itemToTest (TestStatements d str expected) =
->     toTest parseStatements prettyStatements d str expected
-> itemToTest (ParseQueryExpr d str) =
->     toPTest parseQueryExpr prettyQueryExpr d str
-
-> itemToTest (ParseQueryExprFails d str) =
->     toFTest parseQueryExpr prettyQueryExpr d str
-
-> itemToTest (ParseScalarExprFails d str) =
->     toFTest parseScalarExpr prettyScalarExpr d str
-
-> itemToTest (LexTest d s ts) = makeLexerTest d s ts
-> itemToTest (LexFails d s) = makeLexingFailsTest d s
-
-> makeLexerTest :: Dialect -> String -> [Token] -> T.TestTree
-> makeLexerTest d s ts = H.testCase s $ do
->     let lx = either (error . show) id $ lexSQL d "" Nothing s
->     H.assertEqual "" ts $ map snd lx
->     let s' = prettyTokens d $ map snd lx
->     H.assertEqual "pretty print" s s'
-
-> makeLexingFailsTest :: Dialect -> String -> T.TestTree
-> makeLexingFailsTest d s = H.testCase s $ do
->     case lexSQL d "" Nothing s of
->          Right x -> H.assertFailure $ "lexing should have failed: " ++ s ++ "\ngot: " ++ show x
->          Left _ -> return ()
-
-
-> toTest :: (Eq a, Show a) =>
->           (Dialect -> String -> Maybe (Int,Int) -> String -> Either ParseError a)
->        -> (Dialect -> a -> String)
->        -> Dialect
->        -> String
->        -> a
->        -> T.TestTree
-> toTest parser pp d str expected = H.testCase str $ do
->         let egot = parser d "" Nothing str
->         case egot of
->             Left e -> H.assertFailure $ peFormattedError e
->             Right got -> do
->                 H.assertEqual "" expected got
->                 let str' = pp d got
->                 let egot' = parser d "" Nothing str'
->                 case egot' of
->                     Left e' -> H.assertFailure $ "pp roundtrip"
->                                                  ++ "\n" ++ str'
->                                                  ++ peFormattedError e'
->                     Right got' -> H.assertEqual
->                                   ("pp roundtrip" ++ "\n" ++ str')
->                                    expected got'
-
-> toPTest :: (Eq a, Show a) =>
->           (Dialect -> String -> Maybe (Int,Int) -> String -> Either ParseError a)
->        -> (Dialect -> a -> String)
->        -> Dialect
->        -> String
->        -> T.TestTree
-> toPTest parser pp d str = H.testCase str $ do
->         let egot = parser d "" Nothing str
->         case egot of
->             Left e -> H.assertFailure $ peFormattedError e
->             Right got -> do
->                 let str' = pp d got
->                 let egot' = parser d "" Nothing str'
->                 case egot' of
->                     Left e' -> H.assertFailure $ "pp roundtrip "
->                                                  ++ "\n" ++ str' ++ "\n"
->                                                  ++ peFormattedError e'
->                     Right _got' -> return ()
-
-
-> toFTest :: (Eq a, Show a) =>
->           (Dialect -> String -> Maybe (Int,Int) -> String -> Either ParseError a)
->        -> (Dialect -> a -> String)
->        -> Dialect
->        -> String
->        -> T.TestTree
-> toFTest parser _pp d str = H.testCase str $ do
->         let egot = parser d "" Nothing str
->         case egot of
->             Left _e -> return ()
->             Right _got ->
->               H.assertFailure $ "parse didn't fail: " ++ show d ++ "\n" ++ str
diff --git a/tools/Language/SQL/SimpleSQL/Tpch.hs b/tools/Language/SQL/SimpleSQL/Tpch.hs
new file mode 100644
index 0000000..c00a6be
--- /dev/null
+++ b/tools/Language/SQL/SimpleSQL/Tpch.hs
@@ -0,0 +1,685 @@
+
+{-
+Some tests for parsing the tpch queries
+
+The changes made to the official syntax are:
+1. replace the set rowcount with ansi standard fetch first n rows only
+2. replace the create view, query, drop view sequence with a query
+   using a common table expression
+-}
+
+module Language.SQL.SimpleSQL.Tpch (tpchTests,tpchQueries) where
+
+import Language.SQL.SimpleSQL.TestTypes
+
+tpchTests :: TestItem
+tpchTests =
+    Group "parse tpch"
+    $ map (ParseQueryExpr ansi2011 . snd) tpchQueries
+
+tpchQueries :: [(String,String)]
+tpchQueries =
+  [("Q1","\n\
+         \select\n\
+         \        l_returnflag,\n\
+         \        l_linestatus,\n\
+         \        sum(l_quantity) as sum_qty,\n\
+         \        sum(l_extendedprice) as sum_base_price,\n\
+         \        sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,\n\
+         \        sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,\n\
+         \        avg(l_quantity) as avg_qty,\n\
+         \        avg(l_extendedprice) as avg_price,\n\
+         \        avg(l_discount) as avg_disc,\n\
+         \        count(*) as count_order\n\
+         \from\n\
+         \        lineitem\n\
+         \where\n\
+         \        l_shipdate <= date '1998-12-01' - interval '63' day (3)\n\
+         \group by\n\
+         \        l_returnflag,\n\
+         \        l_linestatus\n\
+         \order by\n\
+         \        l_returnflag,\n\
+         \        l_linestatus")
+  ,("Q2","\n\
+         \select\n\
+         \        s_acctbal,\n\
+         \        s_name,\n\
+         \        n_name,\n\
+         \        p_partkey,\n\
+         \        p_mfgr,\n\
+         \        s_address,\n\
+         \        s_phone,\n\
+         \        s_comment\n\
+         \from\n\
+         \        part,\n\
+         \        supplier,\n\
+         \        partsupp,\n\
+         \        nation,\n\
+         \        region\n\
+         \where\n\
+         \        p_partkey = ps_partkey\n\
+         \        and s_suppkey = ps_suppkey\n\
+         \        and p_size = 15\n\
+         \        and p_type like '%BRASS'\n\
+         \        and s_nationkey = n_nationkey\n\
+         \        and n_regionkey = r_regionkey\n\
+         \        and r_name = 'EUROPE'\n\
+         \        and ps_supplycost = (\n\
+         \                select\n\
+         \                        min(ps_supplycost)\n\
+         \                from\n\
+         \                        partsupp,\n\
+         \                        supplier,\n\
+         \                        nation,\n\
+         \                        region\n\
+         \                where\n\
+         \                        p_partkey = ps_partkey\n\
+         \                        and s_suppkey = ps_suppkey\n\
+         \                        and s_nationkey = n_nationkey\n\
+         \                        and n_regionkey = r_regionkey\n\
+         \                        and r_name = 'EUROPE'\n\
+         \        )\n\
+         \order by\n\
+         \        s_acctbal desc,\n\
+         \        n_name,\n\
+         \        s_name,\n\
+         \        p_partkey\n\
+         \fetch first 100 rows only")
+  ,("Q3","\n\
+         \ select\n\
+         \         l_orderkey,\n\
+         \         sum(l_extendedprice * (1 - l_discount)) as revenue,\n\
+         \         o_orderdate,\n\
+         \         o_shippriority\n\
+         \ from\n\
+         \         customer,\n\
+         \         orders,\n\
+         \         lineitem\n\
+         \ where\n\
+         \         c_mktsegment = 'MACHINERY'\n\
+         \         and c_custkey = o_custkey\n\
+         \         and l_orderkey = o_orderkey\n\
+         \         and o_orderdate < date '1995-03-21'\n\
+         \         and l_shipdate > date '1995-03-21'\n\
+         \ group by\n\
+         \         l_orderkey,\n\
+         \         o_orderdate,\n\
+         \         o_shippriority\n\
+         \ order by\n\
+         \         revenue desc,\n\
+         \         o_orderdate\n\
+         \ fetch first 10 rows only")
+  ,("Q4","\n\
+         \ select\n\
+         \         o_orderpriority,\n\
+         \         count(*) as order_count\n\
+         \ from\n\
+         \         orders\n\
+         \ where\n\
+         \         o_orderdate >= date '1996-03-01'\n\
+         \         and o_orderdate < date '1996-03-01' + interval '3' month\n\
+         \         and exists (\n\
+         \                 select\n\
+         \                         *\n\
+         \                 from\n\
+         \                         lineitem\n\
+         \                 where\n\
+         \                         l_orderkey = o_orderkey\n\
+         \                         and l_commitdate < l_receiptdate\n\
+         \         )\n\
+         \ group by\n\
+         \         o_orderpriority\n\
+         \ order by\n\
+         \         o_orderpriority")
+  ,("Q5","\n\
+         \ select\n\
+         \         n_name,\n\
+         \         sum(l_extendedprice * (1 - l_discount)) as revenue\n\
+         \ from\n\
+         \         customer,\n\
+         \         orders,\n\
+         \         lineitem,\n\
+         \         supplier,\n\
+         \         nation,\n\
+         \         region\n\
+         \ where\n\
+         \         c_custkey = o_custkey\n\
+         \         and l_orderkey = o_orderkey\n\
+         \         and l_suppkey = s_suppkey\n\
+         \         and c_nationkey = s_nationkey\n\
+         \         and s_nationkey = n_nationkey\n\
+         \         and n_regionkey = r_regionkey\n\
+         \         and r_name = 'EUROPE'\n\
+         \         and o_orderdate >= date '1997-01-01'\n\
+         \         and o_orderdate < date '1997-01-01' + interval '1' year\n\
+         \ group by\n\
+         \         n_name\n\
+         \ order by\n\
+         \         revenue desc")
+  ,("Q6","\n\
+         \ select\n\
+         \         sum(l_extendedprice * l_discount) as revenue\n\
+         \ from\n\
+         \         lineitem\n\
+         \ where\n\
+         \         l_shipdate >= date '1997-01-01'\n\
+         \         and l_shipdate < date '1997-01-01' + interval '1' year\n\
+         \         and l_discount between 0.07 - 0.01 and 0.07 + 0.01\n\
+         \         and l_quantity < 24")
+  ,("Q7","\n\
+         \ select\n\
+         \         supp_nation,\n\
+         \         cust_nation,\n\
+         \         l_year,\n\
+         \         sum(volume) as revenue\n\
+         \ from\n\
+         \         (\n\
+         \                 select\n\
+         \                         n1.n_name as supp_nation,\n\
+         \                         n2.n_name as cust_nation,\n\
+         \                         extract(year from l_shipdate) as l_year,\n\
+         \                         l_extendedprice * (1 - l_discount) as volume\n\
+         \                 from\n\
+         \                         supplier,\n\
+         \                         lineitem,\n\
+         \                         orders,\n\
+         \                         customer,\n\
+         \                         nation n1,\n\
+         \                         nation n2\n\
+         \                 where\n\
+         \                         s_suppkey = l_suppkey\n\
+         \                         and o_orderkey = l_orderkey\n\
+         \                         and c_custkey = o_custkey\n\
+         \                         and s_nationkey = n1.n_nationkey\n\
+         \                         and c_nationkey = n2.n_nationkey\n\
+         \                         and (\n\
+         \                                 (n1.n_name = 'PERU' and n2.n_name = 'IRAQ')\n\
+         \                                 or (n1.n_name = 'IRAQ' and n2.n_name = 'PERU')\n\
+         \                         )\n\
+         \                         and l_shipdate between date '1995-01-01' and date '1996-12-31'\n\
+         \         ) as shipping\n\
+         \ group by\n\
+         \         supp_nation,\n\
+         \         cust_nation,\n\
+         \         l_year\n\
+         \ order by\n\
+         \         supp_nation,\n\
+         \         cust_nation,\n\
+         \         l_year")
+  ,("Q8","\n\
+         \ select\n\
+         \         o_year,\n\
+         \         sum(case\n\
+         \                 when nation = 'IRAQ' then volume\n\
+         \                 else 0\n\
+         \         end) / sum(volume) as mkt_share\n\
+         \ from\n\
+         \         (\n\
+         \                 select\n\
+         \                         extract(year from o_orderdate) as o_year,\n\
+         \                         l_extendedprice * (1 - l_discount) as volume,\n\
+         \                         n2.n_name as nation\n\
+         \                 from\n\
+         \                         part,\n\
+         \                         supplier,\n\
+         \                         lineitem,\n\
+         \                         orders,\n\
+         \                         customer,\n\
+         \                         nation n1,\n\
+         \                         nation n2,\n\
+         \                         region\n\
+         \                 where\n\
+         \                         p_partkey = l_partkey\n\
+         \                         and s_suppkey = l_suppkey\n\
+         \                         and l_orderkey = o_orderkey\n\
+         \                         and o_custkey = c_custkey\n\
+         \                         and c_nationkey = n1.n_nationkey\n\
+         \                         and n1.n_regionkey = r_regionkey\n\
+         \                         and r_name = 'MIDDLE EAST'\n\
+         \                         and s_nationkey = n2.n_nationkey\n\
+         \                         and o_orderdate between date '1995-01-01' and date '1996-12-31'\n\
+         \                         and p_type = 'STANDARD ANODIZED BRASS'\n\
+         \         ) as all_nations\n\
+         \ group by\n\
+         \         o_year\n\
+         \ order by\n\
+         \         o_year")
+   ,("Q9","\n\
+         \ select\n\
+         \         nation,\n\
+         \         o_year,\n\
+         \         sum(amount) as sum_profit\n\
+         \ from\n\
+         \         (\n\
+         \                 select\n\
+         \                         n_name as nation,\n\
+         \                         extract(year from o_orderdate) as o_year,\n\
+         \                         l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount\n\
+         \                 from\n\
+         \                         part,\n\
+         \                         supplier,\n\
+         \                         lineitem,\n\
+         \                         partsupp,\n\
+         \                         orders,\n\
+         \                         nation\n\
+         \                 where\n\
+         \                         s_suppkey = l_suppkey\n\
+         \                         and ps_suppkey = l_suppkey\n\
+         \                         and ps_partkey = l_partkey\n\
+         \                         and p_partkey = l_partkey\n\
+         \                         and o_orderkey = l_orderkey\n\
+         \                         and s_nationkey = n_nationkey\n\
+         \                         and p_name like '%antique%'\n\
+         \         ) as profit\n\
+         \ group by\n\
+         \         nation,\n\
+         \         o_year\n\
+         \ order by\n\
+         \         nation,\n\
+         \         o_year desc")
+   ,("Q10","\n\
+         \ select\n\
+         \         c_custkey,\n\
+         \         c_name,\n\
+         \         sum(l_extendedprice * (1 - l_discount)) as revenue,\n\
+         \         c_acctbal,\n\
+         \         n_name,\n\
+         \         c_address,\n\
+         \         c_phone,\n\
+         \         c_comment\n\
+         \ from\n\
+         \         customer,\n\
+         \         orders,\n\
+         \         lineitem,\n\
+         \         nation\n\
+         \ where\n\
+         \         c_custkey = o_custkey\n\
+         \         and l_orderkey = o_orderkey\n\
+         \         and o_orderdate >= date '1993-12-01'\n\
+         \         and o_orderdate < date '1993-12-01' + interval '3' month\n\
+         \         and l_returnflag = 'R'\n\
+         \         and c_nationkey = n_nationkey\n\
+         \ group by\n\
+         \         c_custkey,\n\
+         \         c_name,\n\
+         \         c_acctbal,\n\
+         \         c_phone,\n\
+         \         n_name,\n\
+         \         c_address,\n\
+         \         c_comment\n\
+         \ order by\n\
+         \         revenue desc\n\
+         \ fetch first 20 rows only")
+   ,("Q11","\n\
+         \ select\n\
+         \         ps_partkey,\n\
+         \         sum(ps_supplycost * ps_availqty) as value\n\
+         \ from\n\
+         \         partsupp,\n\
+         \         supplier,\n\
+         \         nation\n\
+         \ where\n\
+         \         ps_suppkey = s_suppkey\n\
+         \         and s_nationkey = n_nationkey\n\
+         \         and n_name = 'CHINA'\n\
+         \ group by\n\
+         \         ps_partkey having\n\
+         \                 sum(ps_supplycost * ps_availqty) > (\n\
+         \                         select\n\
+         \                                 sum(ps_supplycost * ps_availqty) * 0.0001000000\n\
+         \                         from\n\
+         \                                 partsupp,\n\
+         \                                 supplier,\n\
+         \                                 nation\n\
+         \                         where\n\
+         \                                 ps_suppkey = s_suppkey\n\
+         \                                 and s_nationkey = n_nationkey\n\
+         \                                 and n_name = 'CHINA'\n\
+         \                 )\n\
+         \ order by\n\
+         \         value desc")
+   ,("Q12","\n\
+         \ select\n\
+         \         l_shipmode,\n\
+         \         sum(case\n\
+         \                 when o_orderpriority = '1-URGENT'\n\
+         \                         or o_orderpriority = '2-HIGH'\n\
+         \                         then 1\n\
+         \                 else 0\n\
+         \         end) as high_line_count,\n\
+         \         sum(case\n\
+         \                 when o_orderpriority <> '1-URGENT'\n\
+         \                         and o_orderpriority <> '2-HIGH'\n\
+         \                         then 1\n\
+         \                 else 0\n\
+         \         end) as low_line_count\n\
+         \ from\n\
+         \         orders,\n\
+         \         lineitem\n\
+         \ where\n\
+         \         o_orderkey = l_orderkey\n\
+         \         and l_shipmode in ('AIR', 'RAIL')\n\
+         \         and l_commitdate < l_receiptdate\n\
+         \         and l_shipdate < l_commitdate\n\
+         \         and l_receiptdate >= date '1994-01-01'\n\
+         \         and l_receiptdate < date '1994-01-01' + interval '1' year\n\
+         \ group by\n\
+         \         l_shipmode\n\
+         \ order by\n\
+         \         l_shipmode")
+   ,("Q13","\n\
+         \ select\n\
+         \         c_count,\n\
+         \         count(*) as custdist\n\
+         \ from\n\
+         \         (\n\
+         \                 select\n\
+         \                         c_custkey,\n\
+         \                         count(o_orderkey)\n\
+         \                 from\n\
+         \                         customer left outer join orders on\n\
+         \                                 c_custkey = o_custkey\n\
+         \                                 and o_comment not like '%pending%requests%'\n\
+         \                 group by\n\
+         \                         c_custkey\n\
+         \         ) as c_orders (c_custkey, c_count)\n\
+         \ group by\n\
+         \         c_count\n\
+         \ order by\n\
+         \         custdist desc,\n\
+         \         c_count desc")
+   ,("Q14","\n\
+         \ select\n\
+         \         100.00 * sum(case\n\
+         \                 when p_type like 'PROMO%'\n\
+         \                         then l_extendedprice * (1 - l_discount)\n\
+         \                 else 0\n\
+         \         end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue\n\
+         \ from\n\
+         \         lineitem,\n\
+         \         part\n\
+         \ where\n\
+         \         l_partkey = p_partkey\n\
+         \         and l_shipdate >= date '1994-12-01'\n\
+         \         and l_shipdate < date '1994-12-01' + interval '1' month")
+   ,("Q15","\n\
+         \ /*create view revenue0 (supplier_no, total_revenue) as\n\
+         \         select\n\
+         \                 l_suppkey,\n\
+         \                 sum(l_extendedprice * (1 - l_discount))\n\
+         \         from\n\
+         \                 lineitem\n\
+         \         where\n\
+         \                 l_shipdate >= date '1995-06-01'\n\
+         \                 and l_shipdate < date '1995-06-01' + interval '3' month\n\
+         \         group by\n\
+         \                 l_suppkey;*/\n\
+         \ with\n\
+         \ revenue0 as\n\
+         \         (select\n\
+         \                 l_suppkey as supplier_no,\n\
+         \                 sum(l_extendedprice * (1 - l_discount)) as total_revenue\n\
+         \         from\n\
+         \                 lineitem\n\
+         \         where\n\
+         \                 l_shipdate >= date '1995-06-01'\n\
+         \                 and l_shipdate < date '1995-06-01' + interval '3' month\n\
+         \         group by\n\
+         \                 l_suppkey)\n\
+         \ select\n\
+         \         s_suppkey,\n\
+         \         s_name,\n\
+         \         s_address,\n\
+         \         s_phone,\n\
+         \         total_revenue\n\
+         \ from\n\
+         \         supplier,\n\
+         \         revenue0\n\
+         \ where\n\
+         \         s_suppkey = supplier_no\n\
+         \         and total_revenue = (\n\
+         \                 select\n\
+         \                         max(total_revenue)\n\
+         \                 from\n\
+         \                         revenue0\n\
+         \         )\n\
+         \ order by\n\
+         \         s_suppkey")
+   ,("Q16","\n\
+         \ select\n\
+         \         p_brand,\n\
+         \         p_type,\n\
+         \         p_size,\n\
+         \         count(distinct ps_suppkey) as supplier_cnt\n\
+         \ from\n\
+         \         partsupp,\n\
+         \         part\n\
+         \ where\n\
+         \         p_partkey = ps_partkey\n\
+         \         and p_brand <> 'Brand#15'\n\
+         \         and p_type not like 'MEDIUM BURNISHED%'\n\
+         \         and p_size in (39, 26, 18, 45, 19, 1, 3, 9)\n\
+         \         and ps_suppkey not in (\n\
+         \                 select\n\
+         \                         s_suppkey\n\
+         \                 from\n\
+         \                         supplier\n\
+         \                 where\n\
+         \                         s_comment like '%Customer%Complaints%'\n\
+         \         )\n\
+         \ group by\n\
+         \         p_brand,\n\
+         \         p_type,\n\
+         \         p_size\n\
+         \ order by\n\
+         \         supplier_cnt desc,\n\
+         \         p_brand,\n\
+         \         p_type,\n\
+         \         p_size")
+   ,("Q17","\n\
+         \ select\n\
+         \         sum(l_extendedprice) / 7.0 as avg_yearly\n\
+         \ from\n\
+         \         lineitem,\n\
+         \         part\n\
+         \ where\n\
+         \         p_partkey = l_partkey\n\
+         \         and p_brand = 'Brand#52'\n\
+         \         and p_container = 'JUMBO CAN'\n\
+         \         and l_quantity < (\n\
+         \                 select\n\
+         \                         0.2 * avg(l_quantity)\n\
+         \                 from\n\
+         \                         lineitem\n\
+         \                 where\n\
+         \                         l_partkey = p_partkey\n\
+         \         )")
+   ,("Q18","\n\
+         \ select\n\
+         \         c_name,\n\
+         \         c_custkey,\n\
+         \         o_orderkey,\n\
+         \         o_orderdate,\n\
+         \         o_totalprice,\n\
+         \         sum(l_quantity)\n\
+         \ from\n\
+         \         customer,\n\
+         \         orders,\n\
+         \         lineitem\n\
+         \ where\n\
+         \         o_orderkey in (\n\
+         \                 select\n\
+         \                         l_orderkey\n\
+         \                 from\n\
+         \                         lineitem\n\
+         \                 group by\n\
+         \                         l_orderkey having\n\
+         \                                 sum(l_quantity) > 313\n\
+         \         )\n\
+         \         and c_custkey = o_custkey\n\
+         \         and o_orderkey = l_orderkey\n\
+         \ group by\n\
+         \         c_name,\n\
+         \         c_custkey,\n\
+         \         o_orderkey,\n\
+         \         o_orderdate,\n\
+         \         o_totalprice\n\
+         \ order by\n\
+         \         o_totalprice desc,\n\
+         \         o_orderdate\n\
+         \ fetch first 100 rows only")
+   ,("Q19","\n\
+         \ select\n\
+         \         sum(l_extendedprice* (1 - l_discount)) as revenue\n\
+         \ from\n\
+         \         lineitem,\n\
+         \         part\n\
+         \ where\n\
+         \         (\n\
+         \                 p_partkey = l_partkey\n\
+         \                 and p_brand = 'Brand#43'\n\
+         \                 and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')\n\
+         \                 and l_quantity >= 3 and l_quantity <= 3 + 10\n\
+         \                 and p_size between 1 and 5\n\
+         \                 and l_shipmode in ('AIR', 'AIR REG')\n\
+         \                 and l_shipinstruct = 'DELIVER IN PERSON'\n\
+         \         )\n\
+         \         or\n\
+         \         (\n\
+         \                 p_partkey = l_partkey\n\
+         \                 and p_brand = 'Brand#25'\n\
+         \                 and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')\n\
+         \                 and l_quantity >= 10 and l_quantity <= 10 + 10\n\
+         \                 and p_size between 1 and 10\n\
+         \                 and l_shipmode in ('AIR', 'AIR REG')\n\
+         \                 and l_shipinstruct = 'DELIVER IN PERSON'\n\
+         \         )\n\
+         \         or\n\
+         \         (\n\
+         \                 p_partkey = l_partkey\n\
+         \                 and p_brand = 'Brand#24'\n\
+         \                 and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')\n\
+         \                 and l_quantity >= 22 and l_quantity <= 22 + 10\n\
+         \                 and p_size between 1 and 15\n\
+         \                 and l_shipmode in ('AIR', 'AIR REG')\n\
+         \                 and l_shipinstruct = 'DELIVER IN PERSON'\n\
+         \         )")
+   ,("Q20","\n\
+         \ select\n\
+         \         s_name,\n\
+         \         s_address\n\
+         \ from\n\
+         \         supplier,\n\
+         \         nation\n\
+         \ where\n\
+         \         s_suppkey in (\n\
+         \                 select\n\
+         \                         ps_suppkey\n\
+         \                 from\n\
+         \                         partsupp\n\
+         \                 where\n\
+         \                         ps_partkey in (\n\
+         \                                 select\n\
+         \                                         p_partkey\n\
+         \                                 from\n\
+         \                                         part\n\
+         \                                 where\n\
+         \                                         p_name like 'lime%'\n\
+         \                         )\n\
+         \                         and ps_availqty > (\n\
+         \                                 select\n\
+         \                                         0.5 * sum(l_quantity)\n\
+         \                                 from\n\
+         \                                         lineitem\n\
+         \                                 where\n\
+         \                                         l_partkey = ps_partkey\n\
+         \                                         and l_suppkey = ps_suppkey\n\
+         \                                         and l_shipdate >= date '1994-01-01'\n\
+         \                                         and l_shipdate < date '1994-01-01' + interval '1' year\n\
+         \                         )\n\
+         \         )\n\
+         \         and s_nationkey = n_nationkey\n\
+         \         and n_name = 'VIETNAM'\n\
+         \ order by\n\
+         \         s_name")
+   ,("Q21","\n\
+         \ select\n\
+         \         s_name,\n\
+         \         count(*) as numwait\n\
+         \ from\n\
+         \         supplier,\n\
+         \         lineitem l1,\n\
+         \         orders,\n\
+         \         nation\n\
+         \ where\n\
+         \         s_suppkey = l1.l_suppkey\n\
+         \         and o_orderkey = l1.l_orderkey\n\
+         \         and o_orderstatus = 'F'\n\
+         \         and l1.l_receiptdate > l1.l_commitdate\n\
+         \         and exists (\n\
+         \                 select\n\
+         \                         *\n\
+         \                 from\n\
+         \                         lineitem l2\n\
+         \                 where\n\
+         \                         l2.l_orderkey = l1.l_orderkey\n\
+         \                         and l2.l_suppkey <> l1.l_suppkey\n\
+         \         )\n\
+         \         and not exists (\n\
+         \                 select\n\
+         \                         *\n\
+         \                 from\n\
+         \                         lineitem l3\n\
+         \                 where\n\
+         \                         l3.l_orderkey = l1.l_orderkey\n\
+         \                         and l3.l_suppkey <> l1.l_suppkey\n\
+         \                         and l3.l_receiptdate > l3.l_commitdate\n\
+         \         )\n\
+         \         and s_nationkey = n_nationkey\n\
+         \         and n_name = 'INDIA'\n\
+         \ group by\n\
+         \         s_name\n\
+         \ order by\n\
+         \         numwait desc,\n\
+         \         s_name\n\
+         \ fetch first 100 rows only")
+   ,("Q22","\n\
+         \ select\n\
+         \         cntrycode,\n\
+         \         count(*) as numcust,\n\
+         \         sum(c_acctbal) as totacctbal\n\
+         \ from\n\
+         \         (\n\
+         \                 select\n\
+         \                         substring(c_phone from 1 for 2) as cntrycode,\n\
+         \                         c_acctbal\n\
+         \                 from\n\
+         \                         customer\n\
+         \                 where\n\
+         \                         substring(c_phone from 1 for 2) in\n\
+         \                                 ('41', '28', '39', '21', '24', '29', '44')\n\
+         \                         and c_acctbal > (\n\
+         \                                 select\n\
+         \                                         avg(c_acctbal)\n\
+         \                                 from\n\
+         \                                         customer\n\
+         \                                 where\n\
+         \                                         c_acctbal > 0.00\n\
+         \                                         and substring(c_phone from 1 for 2) in\n\
+         \                                                 ('41', '28', '39', '21', '24', '29', '44')\n\
+         \                         )\n\
+         \                         and not exists (\n\
+         \                                 select\n\
+         \                                         *\n\
+         \                                 from\n\
+         \                                         orders\n\
+         \                                 where\n\
+         \                                         o_custkey = c_custkey\n\
+         \                         )\n\
+         \         ) as custsale\n\
+         \ group by\n\
+         \         cntrycode\n\
+         \ order by\n\
+         \         cntrycode")
+  ]
diff --git a/tools/Language/SQL/SimpleSQL/Tpch.lhs b/tools/Language/SQL/SimpleSQL/Tpch.lhs
deleted file mode 100644
index 45e8d9a..0000000
--- a/tools/Language/SQL/SimpleSQL/Tpch.lhs
+++ /dev/null
@@ -1,683 +0,0 @@
-
-Some tests for parsing the tpch queries
-
-The changes made to the official syntax are:
-1. replace the set rowcount with ansi standard fetch first n rows only
-2. replace the create view, query, drop view sequence with a query
-   using a common table expression
-
-> module Language.SQL.SimpleSQL.Tpch (tpchTests,tpchQueries) where
-
-> import Language.SQL.SimpleSQL.TestTypes
-
-> tpchTests :: TestItem
-> tpchTests =
->     Group "parse tpch"
->     $ map (ParseQueryExpr ansi2011 . snd) tpchQueries
-
-> tpchQueries :: [(String,String)]
-> tpchQueries =
->   [("Q1","\n\
->          \select\n\
->          \        l_returnflag,\n\
->          \        l_linestatus,\n\
->          \        sum(l_quantity) as sum_qty,\n\
->          \        sum(l_extendedprice) as sum_base_price,\n\
->          \        sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,\n\
->          \        sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,\n\
->          \        avg(l_quantity) as avg_qty,\n\
->          \        avg(l_extendedprice) as avg_price,\n\
->          \        avg(l_discount) as avg_disc,\n\
->          \        count(*) as count_order\n\
->          \from\n\
->          \        lineitem\n\
->          \where\n\
->          \        l_shipdate <= date '1998-12-01' - interval '63' day (3)\n\
->          \group by\n\
->          \        l_returnflag,\n\
->          \        l_linestatus\n\
->          \order by\n\
->          \        l_returnflag,\n\
->          \        l_linestatus")
->   ,("Q2","\n\
->          \select\n\
->          \        s_acctbal,\n\
->          \        s_name,\n\
->          \        n_name,\n\
->          \        p_partkey,\n\
->          \        p_mfgr,\n\
->          \        s_address,\n\
->          \        s_phone,\n\
->          \        s_comment\n\
->          \from\n\
->          \        part,\n\
->          \        supplier,\n\
->          \        partsupp,\n\
->          \        nation,\n\
->          \        region\n\
->          \where\n\
->          \        p_partkey = ps_partkey\n\
->          \        and s_suppkey = ps_suppkey\n\
->          \        and p_size = 15\n\
->          \        and p_type like '%BRASS'\n\
->          \        and s_nationkey = n_nationkey\n\
->          \        and n_regionkey = r_regionkey\n\
->          \        and r_name = 'EUROPE'\n\
->          \        and ps_supplycost = (\n\
->          \                select\n\
->          \                        min(ps_supplycost)\n\
->          \                from\n\
->          \                        partsupp,\n\
->          \                        supplier,\n\
->          \                        nation,\n\
->          \                        region\n\
->          \                where\n\
->          \                        p_partkey = ps_partkey\n\
->          \                        and s_suppkey = ps_suppkey\n\
->          \                        and s_nationkey = n_nationkey\n\
->          \                        and n_regionkey = r_regionkey\n\
->          \                        and r_name = 'EUROPE'\n\
->          \        )\n\
->          \order by\n\
->          \        s_acctbal desc,\n\
->          \        n_name,\n\
->          \        s_name,\n\
->          \        p_partkey\n\
->          \fetch first 100 rows only")
->   ,("Q3","\n\
->          \ select\n\
->          \         l_orderkey,\n\
->          \         sum(l_extendedprice * (1 - l_discount)) as revenue,\n\
->          \         o_orderdate,\n\
->          \         o_shippriority\n\
->          \ from\n\
->          \         customer,\n\
->          \         orders,\n\
->          \         lineitem\n\
->          \ where\n\
->          \         c_mktsegment = 'MACHINERY'\n\
->          \         and c_custkey = o_custkey\n\
->          \         and l_orderkey = o_orderkey\n\
->          \         and o_orderdate < date '1995-03-21'\n\
->          \         and l_shipdate > date '1995-03-21'\n\
->          \ group by\n\
->          \         l_orderkey,\n\
->          \         o_orderdate,\n\
->          \         o_shippriority\n\
->          \ order by\n\
->          \         revenue desc,\n\
->          \         o_orderdate\n\
->          \ fetch first 10 rows only")
->   ,("Q4","\n\
->          \ select\n\
->          \         o_orderpriority,\n\
->          \         count(*) as order_count\n\
->          \ from\n\
->          \         orders\n\
->          \ where\n\
->          \         o_orderdate >= date '1996-03-01'\n\
->          \         and o_orderdate < date '1996-03-01' + interval '3' month\n\
->          \         and exists (\n\
->          \                 select\n\
->          \                         *\n\
->          \                 from\n\
->          \                         lineitem\n\
->          \                 where\n\
->          \                         l_orderkey = o_orderkey\n\
->          \                         and l_commitdate < l_receiptdate\n\
->          \         )\n\
->          \ group by\n\
->          \         o_orderpriority\n\
->          \ order by\n\
->          \         o_orderpriority")
->   ,("Q5","\n\
->          \ select\n\
->          \         n_name,\n\
->          \         sum(l_extendedprice * (1 - l_discount)) as revenue\n\
->          \ from\n\
->          \         customer,\n\
->          \         orders,\n\
->          \         lineitem,\n\
->          \         supplier,\n\
->          \         nation,\n\
->          \         region\n\
->          \ where\n\
->          \         c_custkey = o_custkey\n\
->          \         and l_orderkey = o_orderkey\n\
->          \         and l_suppkey = s_suppkey\n\
->          \         and c_nationkey = s_nationkey\n\
->          \         and s_nationkey = n_nationkey\n\
->          \         and n_regionkey = r_regionkey\n\
->          \         and r_name = 'EUROPE'\n\
->          \         and o_orderdate >= date '1997-01-01'\n\
->          \         and o_orderdate < date '1997-01-01' + interval '1' year\n\
->          \ group by\n\
->          \         n_name\n\
->          \ order by\n\
->          \         revenue desc")
->   ,("Q6","\n\
->          \ select\n\
->          \         sum(l_extendedprice * l_discount) as revenue\n\
->          \ from\n\
->          \         lineitem\n\
->          \ where\n\
->          \         l_shipdate >= date '1997-01-01'\n\
->          \         and l_shipdate < date '1997-01-01' + interval '1' year\n\
->          \         and l_discount between 0.07 - 0.01 and 0.07 + 0.01\n\
->          \         and l_quantity < 24")
->   ,("Q7","\n\
->          \ select\n\
->          \         supp_nation,\n\
->          \         cust_nation,\n\
->          \         l_year,\n\
->          \         sum(volume) as revenue\n\
->          \ from\n\
->          \         (\n\
->          \                 select\n\
->          \                         n1.n_name as supp_nation,\n\
->          \                         n2.n_name as cust_nation,\n\
->          \                         extract(year from l_shipdate) as l_year,\n\
->          \                         l_extendedprice * (1 - l_discount) as volume\n\
->          \                 from\n\
->          \                         supplier,\n\
->          \                         lineitem,\n\
->          \                         orders,\n\
->          \                         customer,\n\
->          \                         nation n1,\n\
->          \                         nation n2\n\
->          \                 where\n\
->          \                         s_suppkey = l_suppkey\n\
->          \                         and o_orderkey = l_orderkey\n\
->          \                         and c_custkey = o_custkey\n\
->          \                         and s_nationkey = n1.n_nationkey\n\
->          \                         and c_nationkey = n2.n_nationkey\n\
->          \                         and (\n\
->          \                                 (n1.n_name = 'PERU' and n2.n_name = 'IRAQ')\n\
->          \                                 or (n1.n_name = 'IRAQ' and n2.n_name = 'PERU')\n\
->          \                         )\n\
->          \                         and l_shipdate between date '1995-01-01' and date '1996-12-31'\n\
->          \         ) as shipping\n\
->          \ group by\n\
->          \         supp_nation,\n\
->          \         cust_nation,\n\
->          \         l_year\n\
->          \ order by\n\
->          \         supp_nation,\n\
->          \         cust_nation,\n\
->          \         l_year")
->   ,("Q8","\n\
->          \ select\n\
->          \         o_year,\n\
->          \         sum(case\n\
->          \                 when nation = 'IRAQ' then volume\n\
->          \                 else 0\n\
->          \         end) / sum(volume) as mkt_share\n\
->          \ from\n\
->          \         (\n\
->          \                 select\n\
->          \                         extract(year from o_orderdate) as o_year,\n\
->          \                         l_extendedprice * (1 - l_discount) as volume,\n\
->          \                         n2.n_name as nation\n\
->          \                 from\n\
->          \                         part,\n\
->          \                         supplier,\n\
->          \                         lineitem,\n\
->          \                         orders,\n\
->          \                         customer,\n\
->          \                         nation n1,\n\
->          \                         nation n2,\n\
->          \                         region\n\
->          \                 where\n\
->          \                         p_partkey = l_partkey\n\
->          \                         and s_suppkey = l_suppkey\n\
->          \                         and l_orderkey = o_orderkey\n\
->          \                         and o_custkey = c_custkey\n\
->          \                         and c_nationkey = n1.n_nationkey\n\
->          \                         and n1.n_regionkey = r_regionkey\n\
->          \                         and r_name = 'MIDDLE EAST'\n\
->          \                         and s_nationkey = n2.n_nationkey\n\
->          \                         and o_orderdate between date '1995-01-01' and date '1996-12-31'\n\
->          \                         and p_type = 'STANDARD ANODIZED BRASS'\n\
->          \         ) as all_nations\n\
->          \ group by\n\
->          \         o_year\n\
->          \ order by\n\
->          \         o_year")
->    ,("Q9","\n\
->          \ select\n\
->          \         nation,\n\
->          \         o_year,\n\
->          \         sum(amount) as sum_profit\n\
->          \ from\n\
->          \         (\n\
->          \                 select\n\
->          \                         n_name as nation,\n\
->          \                         extract(year from o_orderdate) as o_year,\n\
->          \                         l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount\n\
->          \                 from\n\
->          \                         part,\n\
->          \                         supplier,\n\
->          \                         lineitem,\n\
->          \                         partsupp,\n\
->          \                         orders,\n\
->          \                         nation\n\
->          \                 where\n\
->          \                         s_suppkey = l_suppkey\n\
->          \                         and ps_suppkey = l_suppkey\n\
->          \                         and ps_partkey = l_partkey\n\
->          \                         and p_partkey = l_partkey\n\
->          \                         and o_orderkey = l_orderkey\n\
->          \                         and s_nationkey = n_nationkey\n\
->          \                         and p_name like '%antique%'\n\
->          \         ) as profit\n\
->          \ group by\n\
->          \         nation,\n\
->          \         o_year\n\
->          \ order by\n\
->          \         nation,\n\
->          \         o_year desc")
->    ,("Q10","\n\
->          \ select\n\
->          \         c_custkey,\n\
->          \         c_name,\n\
->          \         sum(l_extendedprice * (1 - l_discount)) as revenue,\n\
->          \         c_acctbal,\n\
->          \         n_name,\n\
->          \         c_address,\n\
->          \         c_phone,\n\
->          \         c_comment\n\
->          \ from\n\
->          \         customer,\n\
->          \         orders,\n\
->          \         lineitem,\n\
->          \         nation\n\
->          \ where\n\
->          \         c_custkey = o_custkey\n\
->          \         and l_orderkey = o_orderkey\n\
->          \         and o_orderdate >= date '1993-12-01'\n\
->          \         and o_orderdate < date '1993-12-01' + interval '3' month\n\
->          \         and l_returnflag = 'R'\n\
->          \         and c_nationkey = n_nationkey\n\
->          \ group by\n\
->          \         c_custkey,\n\
->          \         c_name,\n\
->          \         c_acctbal,\n\
->          \         c_phone,\n\
->          \         n_name,\n\
->          \         c_address,\n\
->          \         c_comment\n\
->          \ order by\n\
->          \         revenue desc\n\
->          \ fetch first 20 rows only")
->    ,("Q11","\n\
->          \ select\n\
->          \         ps_partkey,\n\
->          \         sum(ps_supplycost * ps_availqty) as value\n\
->          \ from\n\
->          \         partsupp,\n\
->          \         supplier,\n\
->          \         nation\n\
->          \ where\n\
->          \         ps_suppkey = s_suppkey\n\
->          \         and s_nationkey = n_nationkey\n\
->          \         and n_name = 'CHINA'\n\
->          \ group by\n\
->          \         ps_partkey having\n\
->          \                 sum(ps_supplycost * ps_availqty) > (\n\
->          \                         select\n\
->          \                                 sum(ps_supplycost * ps_availqty) * 0.0001000000\n\
->          \                         from\n\
->          \                                 partsupp,\n\
->          \                                 supplier,\n\
->          \                                 nation\n\
->          \                         where\n\
->          \                                 ps_suppkey = s_suppkey\n\
->          \                                 and s_nationkey = n_nationkey\n\
->          \                                 and n_name = 'CHINA'\n\
->          \                 )\n\
->          \ order by\n\
->          \         value desc")
->    ,("Q12","\n\
->          \ select\n\
->          \         l_shipmode,\n\
->          \         sum(case\n\
->          \                 when o_orderpriority = '1-URGENT'\n\
->          \                         or o_orderpriority = '2-HIGH'\n\
->          \                         then 1\n\
->          \                 else 0\n\
->          \         end) as high_line_count,\n\
->          \         sum(case\n\
->          \                 when o_orderpriority <> '1-URGENT'\n\
->          \                         and o_orderpriority <> '2-HIGH'\n\
->          \                         then 1\n\
->          \                 else 0\n\
->          \         end) as low_line_count\n\
->          \ from\n\
->          \         orders,\n\
->          \         lineitem\n\
->          \ where\n\
->          \         o_orderkey = l_orderkey\n\
->          \         and l_shipmode in ('AIR', 'RAIL')\n\
->          \         and l_commitdate < l_receiptdate\n\
->          \         and l_shipdate < l_commitdate\n\
->          \         and l_receiptdate >= date '1994-01-01'\n\
->          \         and l_receiptdate < date '1994-01-01' + interval '1' year\n\
->          \ group by\n\
->          \         l_shipmode\n\
->          \ order by\n\
->          \         l_shipmode")
->    ,("Q13","\n\
->          \ select\n\
->          \         c_count,\n\
->          \         count(*) as custdist\n\
->          \ from\n\
->          \         (\n\
->          \                 select\n\
->          \                         c_custkey,\n\
->          \                         count(o_orderkey)\n\
->          \                 from\n\
->          \                         customer left outer join orders on\n\
->          \                                 c_custkey = o_custkey\n\
->          \                                 and o_comment not like '%pending%requests%'\n\
->          \                 group by\n\
->          \                         c_custkey\n\
->          \         ) as c_orders (c_custkey, c_count)\n\
->          \ group by\n\
->          \         c_count\n\
->          \ order by\n\
->          \         custdist desc,\n\
->          \         c_count desc")
->    ,("Q14","\n\
->          \ select\n\
->          \         100.00 * sum(case\n\
->          \                 when p_type like 'PROMO%'\n\
->          \                         then l_extendedprice * (1 - l_discount)\n\
->          \                 else 0\n\
->          \         end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue\n\
->          \ from\n\
->          \         lineitem,\n\
->          \         part\n\
->          \ where\n\
->          \         l_partkey = p_partkey\n\
->          \         and l_shipdate >= date '1994-12-01'\n\
->          \         and l_shipdate < date '1994-12-01' + interval '1' month")
->    ,("Q15","\n\
->          \ /*create view revenue0 (supplier_no, total_revenue) as\n\
->          \         select\n\
->          \                 l_suppkey,\n\
->          \                 sum(l_extendedprice * (1 - l_discount))\n\
->          \         from\n\
->          \                 lineitem\n\
->          \         where\n\
->          \                 l_shipdate >= date '1995-06-01'\n\
->          \                 and l_shipdate < date '1995-06-01' + interval '3' month\n\
->          \         group by\n\
->          \                 l_suppkey;*/\n\
->          \ with\n\
->          \ revenue0 as\n\
->          \         (select\n\
->          \                 l_suppkey as supplier_no,\n\
->          \                 sum(l_extendedprice * (1 - l_discount)) as total_revenue\n\
->          \         from\n\
->          \                 lineitem\n\
->          \         where\n\
->          \                 l_shipdate >= date '1995-06-01'\n\
->          \                 and l_shipdate < date '1995-06-01' + interval '3' month\n\
->          \         group by\n\
->          \                 l_suppkey)\n\
->          \ select\n\
->          \         s_suppkey,\n\
->          \         s_name,\n\
->          \         s_address,\n\
->          \         s_phone,\n\
->          \         total_revenue\n\
->          \ from\n\
->          \         supplier,\n\
->          \         revenue0\n\
->          \ where\n\
->          \         s_suppkey = supplier_no\n\
->          \         and total_revenue = (\n\
->          \                 select\n\
->          \                         max(total_revenue)\n\
->          \                 from\n\
->          \                         revenue0\n\
->          \         )\n\
->          \ order by\n\
->          \         s_suppkey")
->    ,("Q16","\n\
->          \ select\n\
->          \         p_brand,\n\
->          \         p_type,\n\
->          \         p_size,\n\
->          \         count(distinct ps_suppkey) as supplier_cnt\n\
->          \ from\n\
->          \         partsupp,\n\
->          \         part\n\
->          \ where\n\
->          \         p_partkey = ps_partkey\n\
->          \         and p_brand <> 'Brand#15'\n\
->          \         and p_type not like 'MEDIUM BURNISHED%'\n\
->          \         and p_size in (39, 26, 18, 45, 19, 1, 3, 9)\n\
->          \         and ps_suppkey not in (\n\
->          \                 select\n\
->          \                         s_suppkey\n\
->          \                 from\n\
->          \                         supplier\n\
->          \                 where\n\
->          \                         s_comment like '%Customer%Complaints%'\n\
->          \         )\n\
->          \ group by\n\
->          \         p_brand,\n\
->          \         p_type,\n\
->          \         p_size\n\
->          \ order by\n\
->          \         supplier_cnt desc,\n\
->          \         p_brand,\n\
->          \         p_type,\n\
->          \         p_size")
->    ,("Q17","\n\
->          \ select\n\
->          \         sum(l_extendedprice) / 7.0 as avg_yearly\n\
->          \ from\n\
->          \         lineitem,\n\
->          \         part\n\
->          \ where\n\
->          \         p_partkey = l_partkey\n\
->          \         and p_brand = 'Brand#52'\n\
->          \         and p_container = 'JUMBO CAN'\n\
->          \         and l_quantity < (\n\
->          \                 select\n\
->          \                         0.2 * avg(l_quantity)\n\
->          \                 from\n\
->          \                         lineitem\n\
->          \                 where\n\
->          \                         l_partkey = p_partkey\n\
->          \         )")
->    ,("Q18","\n\
->          \ select\n\
->          \         c_name,\n\
->          \         c_custkey,\n\
->          \         o_orderkey,\n\
->          \         o_orderdate,\n\
->          \         o_totalprice,\n\
->          \         sum(l_quantity)\n\
->          \ from\n\
->          \         customer,\n\
->          \         orders,\n\
->          \         lineitem\n\
->          \ where\n\
->          \         o_orderkey in (\n\
->          \                 select\n\
->          \                         l_orderkey\n\
->          \                 from\n\
->          \                         lineitem\n\
->          \                 group by\n\
->          \                         l_orderkey having\n\
->          \                                 sum(l_quantity) > 313\n\
->          \         )\n\
->          \         and c_custkey = o_custkey\n\
->          \         and o_orderkey = l_orderkey\n\
->          \ group by\n\
->          \         c_name,\n\
->          \         c_custkey,\n\
->          \         o_orderkey,\n\
->          \         o_orderdate,\n\
->          \         o_totalprice\n\
->          \ order by\n\
->          \         o_totalprice desc,\n\
->          \         o_orderdate\n\
->          \ fetch first 100 rows only")
->    ,("Q19","\n\
->          \ select\n\
->          \         sum(l_extendedprice* (1 - l_discount)) as revenue\n\
->          \ from\n\
->          \         lineitem,\n\
->          \         part\n\
->          \ where\n\
->          \         (\n\
->          \                 p_partkey = l_partkey\n\
->          \                 and p_brand = 'Brand#43'\n\
->          \                 and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')\n\
->          \                 and l_quantity >= 3 and l_quantity <= 3 + 10\n\
->          \                 and p_size between 1 and 5\n\
->          \                 and l_shipmode in ('AIR', 'AIR REG')\n\
->          \                 and l_shipinstruct = 'DELIVER IN PERSON'\n\
->          \         )\n\
->          \         or\n\
->          \         (\n\
->          \                 p_partkey = l_partkey\n\
->          \                 and p_brand = 'Brand#25'\n\
->          \                 and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')\n\
->          \                 and l_quantity >= 10 and l_quantity <= 10 + 10\n\
->          \                 and p_size between 1 and 10\n\
->          \                 and l_shipmode in ('AIR', 'AIR REG')\n\
->          \                 and l_shipinstruct = 'DELIVER IN PERSON'\n\
->          \         )\n\
->          \         or\n\
->          \         (\n\
->          \                 p_partkey = l_partkey\n\
->          \                 and p_brand = 'Brand#24'\n\
->          \                 and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')\n\
->          \                 and l_quantity >= 22 and l_quantity <= 22 + 10\n\
->          \                 and p_size between 1 and 15\n\
->          \                 and l_shipmode in ('AIR', 'AIR REG')\n\
->          \                 and l_shipinstruct = 'DELIVER IN PERSON'\n\
->          \         )")
->    ,("Q20","\n\
->          \ select\n\
->          \         s_name,\n\
->          \         s_address\n\
->          \ from\n\
->          \         supplier,\n\
->          \         nation\n\
->          \ where\n\
->          \         s_suppkey in (\n\
->          \                 select\n\
->          \                         ps_suppkey\n\
->          \                 from\n\
->          \                         partsupp\n\
->          \                 where\n\
->          \                         ps_partkey in (\n\
->          \                                 select\n\
->          \                                         p_partkey\n\
->          \                                 from\n\
->          \                                         part\n\
->          \                                 where\n\
->          \                                         p_name like 'lime%'\n\
->          \                         )\n\
->          \                         and ps_availqty > (\n\
->          \                                 select\n\
->          \                                         0.5 * sum(l_quantity)\n\
->          \                                 from\n\
->          \                                         lineitem\n\
->          \                                 where\n\
->          \                                         l_partkey = ps_partkey\n\
->          \                                         and l_suppkey = ps_suppkey\n\
->          \                                         and l_shipdate >= date '1994-01-01'\n\
->          \                                         and l_shipdate < date '1994-01-01' + interval '1' year\n\
->          \                         )\n\
->          \         )\n\
->          \         and s_nationkey = n_nationkey\n\
->          \         and n_name = 'VIETNAM'\n\
->          \ order by\n\
->          \         s_name")
->    ,("Q21","\n\
->          \ select\n\
->          \         s_name,\n\
->          \         count(*) as numwait\n\
->          \ from\n\
->          \         supplier,\n\
->          \         lineitem l1,\n\
->          \         orders,\n\
->          \         nation\n\
->          \ where\n\
->          \         s_suppkey = l1.l_suppkey\n\
->          \         and o_orderkey = l1.l_orderkey\n\
->          \         and o_orderstatus = 'F'\n\
->          \         and l1.l_receiptdate > l1.l_commitdate\n\
->          \         and exists (\n\
->          \                 select\n\
->          \                         *\n\
->          \                 from\n\
->          \                         lineitem l2\n\
->          \                 where\n\
->          \                         l2.l_orderkey = l1.l_orderkey\n\
->          \                         and l2.l_suppkey <> l1.l_suppkey\n\
->          \         )\n\
->          \         and not exists (\n\
->          \                 select\n\
->          \                         *\n\
->          \                 from\n\
->          \                         lineitem l3\n\
->          \                 where\n\
->          \                         l3.l_orderkey = l1.l_orderkey\n\
->          \                         and l3.l_suppkey <> l1.l_suppkey\n\
->          \                         and l3.l_receiptdate > l3.l_commitdate\n\
->          \         )\n\
->          \         and s_nationkey = n_nationkey\n\
->          \         and n_name = 'INDIA'\n\
->          \ group by\n\
->          \         s_name\n\
->          \ order by\n\
->          \         numwait desc,\n\
->          \         s_name\n\
->          \ fetch first 100 rows only")
->    ,("Q22","\n\
->          \ select\n\
->          \         cntrycode,\n\
->          \         count(*) as numcust,\n\
->          \         sum(c_acctbal) as totacctbal\n\
->          \ from\n\
->          \         (\n\
->          \                 select\n\
->          \                         substring(c_phone from 1 for 2) as cntrycode,\n\
->          \                         c_acctbal\n\
->          \                 from\n\
->          \                         customer\n\
->          \                 where\n\
->          \                         substring(c_phone from 1 for 2) in\n\
->          \                                 ('41', '28', '39', '21', '24', '29', '44')\n\
->          \                         and c_acctbal > (\n\
->          \                                 select\n\
->          \                                         avg(c_acctbal)\n\
->          \                                 from\n\
->          \                                         customer\n\
->          \                                 where\n\
->          \                                         c_acctbal > 0.00\n\
->          \                                         and substring(c_phone from 1 for 2) in\n\
->          \                                                 ('41', '28', '39', '21', '24', '29', '44')\n\
->          \                         )\n\
->          \                         and not exists (\n\
->          \                                 select\n\
->          \                                         *\n\
->          \                                 from\n\
->          \                                         orders\n\
->          \                                 where\n\
->          \                                         o_custkey = c_custkey\n\
->          \                         )\n\
->          \         ) as custsale\n\
->          \ group by\n\
->          \         cntrycode\n\
->          \ order by\n\
->          \         cntrycode")
->   ]
diff --git a/tools/RunTests.hs b/tools/RunTests.hs
new file mode 100644
index 0000000..168c564
--- /dev/null
+++ b/tools/RunTests.hs
@@ -0,0 +1,8 @@
+
+
+import Test.Tasty
+
+import Language.SQL.SimpleSQL.Tests
+
+main :: IO ()
+main = defaultMain tests
diff --git a/tools/RunTests.lhs b/tools/RunTests.lhs
deleted file mode 100644
index 6322c10..0000000
--- a/tools/RunTests.lhs
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-> import Test.Tasty
-
-> import Language.SQL.SimpleSQL.Tests
-
-> main :: IO ()
-> main = defaultMain tests
diff --git a/tools/ShowErrors.hs b/tools/ShowErrors.hs
new file mode 100644
index 0000000..a7b1153
--- /dev/null
+++ b/tools/ShowErrors.hs
@@ -0,0 +1,7 @@
+
+
+import Language.SQL.SimpleSQL.ErrorMessages
+
+
+main :: IO ()
+main = putStrLn $ pExprs valueExpressions queryExpressions
diff --git a/tools/ShowErrors.lhs b/tools/ShowErrors.lhs
deleted file mode 100644
index 6a0edf0..0000000
--- a/tools/ShowErrors.lhs
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-> import Language.SQL.SimpleSQL.ErrorMessages
-
-
-> main :: IO ()
-> main = putStrLn $ pExprs valueExpressions queryExpressions
diff --git a/tools/SimpleSqlParserTool.hs b/tools/SimpleSqlParserTool.hs
new file mode 100644
index 0000000..15e521c
--- /dev/null
+++ b/tools/SimpleSqlParserTool.hs
@@ -0,0 +1,95 @@
+
+{-
+Simple command line tool to experiment with simple-sql-parser
+
+Commands:
+
+parse: parse sql from file, stdin or from command line
+lex: lex sql same
+indent: parse then pretty print sql
+-}
+
+{-# LANGUAGE TupleSections #-}
+import System.Environment
+import Control.Monad
+import Data.Maybe
+import System.Exit
+import Data.List
+import Text.Show.Pretty
+--import Control.Applicative
+
+import Language.SQL.SimpleSQL.Pretty
+import Language.SQL.SimpleSQL.Parse
+import Language.SQL.SimpleSQL.Lex
+
+
+main :: IO ()
+main = do
+    args <- getArgs
+    case args of
+        [] -> do
+              showHelp $ Just "no command given"
+        (c:as) -> do
+             let cmd = lookup c commands
+             maybe (showHelp (Just "command not recognised"))
+                   (\(_,cmd') -> cmd' as)
+                   cmd
+
+commands :: [(String, (String,[String] -> IO ()))]
+commands =
+    [("help", helpCommand)
+    ,("parse", parseCommand)
+    ,("lex", lexCommand)
+    ,("indent", indentCommand)]
+
+showHelp :: Maybe String -> IO ()
+showHelp msg = do
+          maybe (return ()) (\e -> putStrLn $ "Error: " ++ e) msg
+          putStrLn "Usage:\n SimpleSqlParserTool command args"
+          forM_ commands $ \(c, (h,_)) -> do
+               putStrLn $ c ++ "\t" ++ h
+          when (isJust msg) $ exitFailure
+
+helpCommand :: (String,[String] -> IO ())
+helpCommand =
+    ("show help for this progam", \_ -> showHelp Nothing)
+
+getInput :: [String] -> IO (FilePath,String)
+getInput as =
+    case as of
+      ["-"] -> ("",) <$> getContents
+      ("-c":as') -> return ("", unwords as')
+      [filename] -> (filename,) <$> readFile filename
+      _ -> showHelp (Just "arguments not recognised") >> error ""
+
+parseCommand :: (String,[String] -> IO ())
+parseCommand =
+  ("parse SQL from file/stdin/command line (use -c to parse from command line)"
+  ,\args -> do
+      (f,src) <- getInput args
+      either (error . peFormattedError)
+          (putStrLn . ppShow)
+          $ parseStatements ansi2011 f Nothing src
+  )
+
+lexCommand :: (String,[String] -> IO ())
+lexCommand =
+  ("lex SQL from file/stdin/command line (use -c to parse from command line)"
+  ,\args -> do
+      (f,src) <- getInput args
+      either (error . peFormattedError)
+             (putStrLn . intercalate ",\n" . map show)
+             $ lexSQL ansi2011 f Nothing src
+  )
+
+
+indentCommand :: (String,[String] -> IO ())
+indentCommand =
+  ("parse then pretty print SQL from file/stdin/command line (use -c to parse from command line)"
+  ,\args -> do
+      (f,src) <- getInput args
+      either (error . peFormattedError)
+          (putStrLn . prettyStatements ansi2011)
+          $ parseStatements ansi2011 f Nothing src
+
+  )
diff --git a/tools/SimpleSqlParserTool.lhs b/tools/SimpleSqlParserTool.lhs
deleted file mode 100644
index 4e81c69..0000000
--- a/tools/SimpleSqlParserTool.lhs
+++ /dev/null
@@ -1,93 +0,0 @@
-
-Simple command line tool to experiment with simple-sql-parser
-
-Commands:
-
-parse: parse sql from file, stdin or from command line
-lex: lex sql same
-indent: parse then pretty print sql
-
-> {-# LANGUAGE TupleSections #-}
-> import System.Environment
-> import Control.Monad
-> import Data.Maybe
-> import System.Exit
-> import Data.List
-> import Text.Show.Pretty
-> --import Control.Applicative
-
-> import Language.SQL.SimpleSQL.Pretty
-> import Language.SQL.SimpleSQL.Parse
-> import Language.SQL.SimpleSQL.Lex
-
-
-> main :: IO ()
-> main = do
->     args <- getArgs
->     case args of
->         [] -> do
->               showHelp $ Just "no command given"
->         (c:as) -> do
->              let cmd = lookup c commands
->              maybe (showHelp (Just "command not recognised"))
->                    (\(_,cmd') -> cmd' as)
->                    cmd
-
-> commands :: [(String, (String,[String] -> IO ()))]
-> commands =
->     [("help", helpCommand)
->     ,("parse", parseCommand)
->     ,("lex", lexCommand)
->     ,("indent", indentCommand)]
-
-> showHelp :: Maybe String -> IO ()
-> showHelp msg = do
->           maybe (return ()) (\e -> putStrLn $ "Error: " ++ e) msg
->           putStrLn "Usage:\n SimpleSqlParserTool command args"
->           forM_ commands $ \(c, (h,_)) -> do
->                putStrLn $ c ++ "\t" ++ h
->           when (isJust msg) $ exitFailure
-
-> helpCommand :: (String,[String] -> IO ())
-> helpCommand =
->     ("show help for this progam", \_ -> showHelp Nothing)
-
-> getInput :: [String] -> IO (FilePath,String)
-> getInput as =
->     case as of
->       ["-"] -> ("",) <$> getContents
->       ("-c":as') -> return ("", unwords as')
->       [filename] -> (filename,) <$> readFile filename
->       _ -> showHelp (Just "arguments not recognised") >> error ""
-
-> parseCommand :: (String,[String] -> IO ())
-> parseCommand =
->   ("parse SQL from file/stdin/command line (use -c to parse from command line)"
->   ,\args -> do
->       (f,src) <- getInput args
->       either (error . peFormattedError)
->           (putStrLn . ppShow)
->           $ parseStatements ansi2011 f Nothing src
->   )
-
-> lexCommand :: (String,[String] -> IO ())
-> lexCommand =
->   ("lex SQL from file/stdin/command line (use -c to parse from command line)"
->   ,\args -> do
->       (f,src) <- getInput args
->       either (error . peFormattedError)
->              (putStrLn . intercalate ",\n" . map show)
->              $ lexSQL ansi2011 f Nothing src
->   )
-
-
-> indentCommand :: (String,[String] -> IO ())
-> indentCommand =
->   ("parse then pretty print SQL from file/stdin/command line (use -c to parse from command line)"
->   ,\args -> do
->       (f,src) <- getInput args
->       either (error . peFormattedError)
->           (putStrLn . prettyStatements ansi2011)
->           $ parseStatements ansi2011 f Nothing src
-
->   )
diff --git a/website/AddLinks.hs b/website/AddLinks.hs
new file mode 100644
index 0000000..f7ba98e
--- /dev/null
+++ b/website/AddLinks.hs
@@ -0,0 +1,34 @@
+
+-- Little hack to add links to the navigation bars
+
+main :: IO ()
+main = interact addLinks
+
+
+addLinks :: String -> String
+addLinks [] = error "not found"
+addLinks ('<':'/':'u':'l':'>':'\n':'<':'/':'d':'i':'v':'>':xs) =
+    "</ul>" ++ linkSection ++ "\n</div>" ++ xs
+addLinks (x:xs) = x : addLinks xs
+
+linkSection :: String
+linkSection =
+  "<hr />\n\
+  \<ul class=\"sectlevel1\">\n\
+  \<div id=\"toctitle\">Links</div>\n\
+  \<li><a href=\"index.html\">Index</a></li>\n\
+  \<li><a href='haddock/index.html'>Haddock</li>\n\
+  \<li><a href=\"supported_sql.html\" class=\"bare\">Supported SQL</a></li>\n\
+  \<li><a href=\"test_cases.html\">Test cases</a></li>\n\
+  \</ul>\n\
+  \<br />\n\
+  \<ul class=\"sectlevel1\">\n\
+  \<li><a href=\"http://jakewheat.github.io/simple-sql-parser/latest\" class=\"bare\">Homepage</a></li>\n\
+  \<li><a href=\"http://hackage.haskell.org/package/simple-sql-parser\" class=\"bare\">Hackage</a></li>\n\
+  \<li><a href=\"https://github.com/JakeWheat/simple-sql-parser\" class=\"bare\">Repository</a></li>\n\
+  \<li><a href=\"https://github.com/JakeWheat/simple-sql-parser/issues\" class=\"bare\">Bug tracker</a></li>\n\
+  \<li><a href=\"https://github.com/JakeWheat/simple-sql-parser/blob/master/changelog\" class=\"bare\">Changes</a></li>\n\
+  \<li><a href=\"http://jakewheat.github.io/simple-sql-parser/\" class=\"bare\">Other versions</a></li>\n\
+  \<li><a href=\"http://jakewheat.github.io/\" class=\"bare\">Parent project</a>\n\
+  \</li><li>jakewheatmail@gmail.com</li>\n\
+  \</ul>\n"
diff --git a/website/AddLinks.lhs b/website/AddLinks.lhs
deleted file mode 100644
index 2337494..0000000
--- a/website/AddLinks.lhs
+++ /dev/null
@@ -1,34 +0,0 @@
-
-Little hack to add links to the navigation bars
-
-> main :: IO ()
-> main = interact addLinks
-
-
-> addLinks :: String -> String
-> addLinks [] = error "not found"
-> addLinks ('<':'/':'u':'l':'>':'\n':'<':'/':'d':'i':'v':'>':xs) =
->     "</ul>" ++ linkSection ++ "\n</div>" ++ xs
-> addLinks (x:xs) = x : addLinks xs
-
-> linkSection :: String
-> linkSection =
->   "<hr />\n\
->   \<ul class=\"sectlevel1\">\n\
->   \<div id=\"toctitle\">Links</div>\n\
->   \<li><a href=\"index.html\">Index</a></li>\n\
->   \<li><a href='haddock/index.html'>Haddock</li>\n\
->   \<li><a href=\"supported_sql.html\" class=\"bare\">Supported SQL</a></li>\n\
->   \<li><a href=\"test_cases.html\">Test cases</a></li>\n\
->   \</ul>\n\
->   \<br />\n\
->   \<ul class=\"sectlevel1\">\n\
->   \<li><a href=\"http://jakewheat.github.io/simple-sql-parser/latest\" class=\"bare\">Homepage</a></li>\n\
->   \<li><a href=\"http://hackage.haskell.org/package/simple-sql-parser\" class=\"bare\">Hackage</a></li>\n\
->   \<li><a href=\"https://github.com/JakeWheat/simple-sql-parser\" class=\"bare\">Repository</a></li>\n\
->   \<li><a href=\"https://github.com/JakeWheat/simple-sql-parser/issues\" class=\"bare\">Bug tracker</a></li>\n\
->   \<li><a href=\"https://github.com/JakeWheat/simple-sql-parser/blob/master/changelog\" class=\"bare\">Changes</a></li>\n\
->   \<li><a href=\"http://jakewheat.github.io/simple-sql-parser/\" class=\"bare\">Other versions</a></li>\n\
->   \<li><a href=\"http://jakewheat.github.io/\" class=\"bare\">Parent project</a>\n\
->   \</li><li>jakewheatmail@gmail.com</li>\n\
->   \</ul>\n"
diff --git a/website/RenderTestCases.hs b/website/RenderTestCases.hs
new file mode 100644
index 0000000..559c391
--- /dev/null
+++ b/website/RenderTestCases.hs
@@ -0,0 +1,77 @@
+
+-- Converts the test data to asciidoc
+
+import Language.SQL.SimpleSQL.Tests
+import Text.Show.Pretty
+import Control.Monad.State
+import Language.SQL.SimpleSQL.Parse
+import Language.SQL.SimpleSQL.Lex
+import Data.List
+import Control.Monad (when, unless)
+
+data TableItem = Heading Int String
+               | Row String String
+
+doc :: Int -> TestItem -> [TableItem]
+-- filter out some groups of tests
+doc n (Group nm _) | "generated" `isInfixOf` nm = []
+doc n (Group nm is) =
+    Heading n nm
+    : concatMap (doc (n + 1)) is
+doc _ (TestScalarExpr _ str e) =
+    [Row str (ppShow e)]
+doc _ (TestQueryExpr _ str e) =
+    [Row str (ppShow e)]
+doc _ (TestStatement _ str e) =
+    [Row str (ppShow e)]
+doc _ (TestStatements _ str e) =
+    [Row str (ppShow e)]
+doc _ (ParseQueryExpr d str) =
+    [Row str (ppShow $ parseQueryExpr d "" Nothing str)]
+doc _ (ParseQueryExprFails d str) =
+    [Row str (ppShow $ parseQueryExpr d "" Nothing str)]
+doc _ (ParseScalarExprFails d str) =
+    [Row str (ppShow $ parseScalarExpr d "" Nothing str)]
+
+doc _ (LexTest d str t) =
+    [Row str (ppShow $ lexSQL d "" Nothing str)]
+
+doc _ (LexFails d str) =
+    [Row str (ppShow $ lexSQL d "" Nothing str)]
+
+-- TODO: should put the dialect in the html output
+
+
+render :: [TableItem] -> IO ()
+render = go False
+  where
+    go t (Heading level title : is) = do
+        when t $ putStrLn "|==="
+        -- slight hack
+        when (level > 1) $
+            putStrLn $ "\n" ++ replicate level '=' ++ " " ++ title
+        go False is
+    go t (Row sql hask : is) = do
+        unless t $ putStrLn "[cols=\"2\"]\n|==="
+        let sql' = "\n[source,sql]\n----\n" ++ sql ++ "\n----\n"
+            hask' = "\n[source,haskell]\n----\n" ++ hask ++ "\n----\n"
+        putStrLn $ "a| " ++ escapePipe sql'
+                   ++ "a| " ++ escapePipe hask' ++ " "
+        go True is
+    go t [] = when t $ putStrLn "|==="
+    escapePipe [] = []
+    escapePipe ('\\':'|':xs) = '\\' : '\\' : '\\' : '|' : escapePipe xs
+    escapePipe ('|':xs) = '\\' : '|' : escapePipe xs
+    escapePipe (x:xs) = x : escapePipe xs
+
+main :: IO ()
+main = do
+       putStrLn "\n:toc:\n\
+                \:toc-placement: macro\n\
+                \:sectnums:\n\
+                \:toclevels: 10\n\
+                \:sectnumlevels: 10\n\
+                \:source-highlighter: pygments\n\n\
+                \= simple-sql-parser examples/test cases\n\n\
+                \toc::[]\n"
+       render $ doc 1 testData
diff --git a/website/RenderTestCases.lhs b/website/RenderTestCases.lhs
deleted file mode 100644
index 4afcc7e..0000000
--- a/website/RenderTestCases.lhs
+++ /dev/null
@@ -1,77 +0,0 @@
-
-Converts the test data to asciidoc
-
-> import Language.SQL.SimpleSQL.Tests
-> import Text.Show.Pretty
-> import Control.Monad.State
-> import Language.SQL.SimpleSQL.Parse
-> import Language.SQL.SimpleSQL.Lex
-> import Data.List
-> import Control.Monad (when, unless)
-
-> data TableItem = Heading Int String
->                | Row String String
-
-> doc :: Int -> TestItem -> [TableItem]
-> -- filter out some groups of tests
-> doc n (Group nm _) | "generated" `isInfixOf` nm = []
-> doc n (Group nm is) =
->     Heading n nm
->     : concatMap (doc (n + 1)) is
-> doc _ (TestScalarExpr _ str e) =
->     [Row str (ppShow e)]
-> doc _ (TestQueryExpr _ str e) =
->     [Row str (ppShow e)]
-> doc _ (TestStatement _ str e) =
->     [Row str (ppShow e)]
-> doc _ (TestStatements _ str e) =
->     [Row str (ppShow e)]
-> doc _ (ParseQueryExpr d str) =
->     [Row str (ppShow $ parseQueryExpr d "" Nothing str)]
-> doc _ (ParseQueryExprFails d str) =
->     [Row str (ppShow $ parseQueryExpr d "" Nothing str)]
-> doc _ (ParseScalarExprFails d str) =
->     [Row str (ppShow $ parseScalarExpr d "" Nothing str)]
-
-> doc _ (LexTest d str t) =
->     [Row str (ppShow $ lexSQL d "" Nothing str)]
-
-> doc _ (LexFails d str) =
->     [Row str (ppShow $ lexSQL d "" Nothing str)]
-
-TODO: should put the dialect in the html output
-
-
-> render :: [TableItem] -> IO ()
-> render = go False
->   where
->     go t (Heading level title : is) = do
->         when t $ putStrLn "|==="
->         -- slight hack
->         when (level > 1) $
->             putStrLn $ "\n" ++ replicate level '=' ++ " " ++ title
->         go False is
->     go t (Row sql hask : is) = do
->         unless t $ putStrLn "[cols=\"2\"]\n|==="
->         let sql' = "\n[source,sql]\n----\n" ++ sql ++ "\n----\n"
->             hask' = "\n[source,haskell]\n----\n" ++ hask ++ "\n----\n"
->         putStrLn $ "a| " ++ escapePipe sql'
->                    ++ "a| " ++ escapePipe hask' ++ " "
->         go True is
->     go t [] = when t $ putStrLn "|==="
->     escapePipe [] = []
->     escapePipe ('\\':'|':xs) = '\\' : '\\' : '\\' : '|' : escapePipe xs
->     escapePipe ('|':xs) = '\\' : '|' : escapePipe xs
->     escapePipe (x:xs) = x : escapePipe xs
-
-> main :: IO ()
-> main = do
->        putStrLn "\n:toc:\n\
->                 \:toc-placement: macro\n\
->                 \:sectnums:\n\
->                 \:toclevels: 10\n\
->                 \:sectnumlevels: 10\n\
->                 \:source-highlighter: pygments\n\n\
->                 \= simple-sql-parser examples/test cases\n\n\
->                 \toc::[]\n"
->        render $ doc 1 testData