Merge branch 'master' into finish_syntax_colon

db4
Slava Pestov 2009-03-21 18:13:47 -05:00
commit d1436cea15
33 changed files with 301 additions and 174 deletions

View File

@ -25,8 +25,7 @@ M: tuple-class component-tag ( tag class -- )
[ compile-component-attrs ] 2bi
[ render ] [code] ;
: COMPONENT:
SYNTAX: COMPONENT:
scan-word
[ name>> ] [ '[ _ component-tag ] ] bi
define-chloe-tag ;
parsing

View File

@ -7,30 +7,30 @@ IN: io.encodings.iso2022
[ "hello" ] [ "hello" >byte-array iso2022 decode ] unit-test
[ "hello" ] [ "hello" iso2022 encode >string ] unit-test
[ "hi" ] [ B{ CHAR: h $ ESC CHAR: ( CHAR: B CHAR: i } iso2022 decode ] unit-test
[ "hi" ] [ B{ CHAR: h CHAR: i $ ESC CHAR: ( CHAR: B } iso2022 decode ] unit-test
[ "hi\u00fffd" ] [ B{ CHAR: h CHAR: i $ ESC CHAR: ( } iso2022 decode ] unit-test
[ "hi\u00fffd" ] [ B{ CHAR: h CHAR: i $ ESC } iso2022 decode ] unit-test
[ "hi" ] [ B{ CHAR: h ESC CHAR: ( CHAR: B CHAR: i } iso2022 decode ] unit-test
[ "hi" ] [ B{ CHAR: h CHAR: i ESC CHAR: ( CHAR: B } iso2022 decode ] unit-test
[ "hi\u00fffd" ] [ B{ CHAR: h CHAR: i ESC CHAR: ( } iso2022 decode ] unit-test
[ "hi\u00fffd" ] [ B{ CHAR: h CHAR: i ESC } iso2022 decode ] unit-test
[ B{ CHAR: h $ ESC CHAR: ( CHAR: J HEX: D8 } ] [ "h\u00ff98" iso2022 encode ] unit-test
[ "h\u00ff98" ] [ B{ CHAR: h $ ESC CHAR: ( CHAR: J HEX: D8 } iso2022 decode ] unit-test
[ "hi" ] [ B{ CHAR: h $ ESC CHAR: ( CHAR: J CHAR: i } iso2022 decode ] unit-test
[ "h" ] [ B{ CHAR: h $ ESC CHAR: ( CHAR: J } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h $ ESC CHAR: ( CHAR: J HEX: 80 } iso2022 decode ] unit-test
[ B{ CHAR: h ESC CHAR: ( CHAR: J HEX: D8 } ] [ "h\u00ff98" iso2022 encode ] unit-test
[ "h\u00ff98" ] [ B{ CHAR: h ESC CHAR: ( CHAR: J HEX: D8 } iso2022 decode ] unit-test
[ "hi" ] [ B{ CHAR: h ESC CHAR: ( CHAR: J CHAR: i } iso2022 decode ] unit-test
[ "h" ] [ B{ CHAR: h ESC CHAR: ( CHAR: J } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h ESC CHAR: ( CHAR: J HEX: 80 } iso2022 decode ] unit-test
[ B{ CHAR: h $ ESC CHAR: $ CHAR: B HEX: 3E HEX: 47 } ] [ "h\u007126" iso2022 encode ] unit-test
[ "h\u007126" ] [ B{ CHAR: h $ ESC CHAR: $ CHAR: B HEX: 3E HEX: 47 } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h $ ESC CHAR: $ CHAR: B HEX: 3E } iso2022 decode ] unit-test
[ "h" ] [ B{ CHAR: h $ ESC CHAR: $ CHAR: B } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h $ ESC CHAR: $ } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h $ ESC } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h $ ESC CHAR: $ CHAR: B HEX: 80 HEX: 80 } iso2022 decode ] unit-test
[ B{ CHAR: h ESC CHAR: $ CHAR: B HEX: 3E HEX: 47 } ] [ "h\u007126" iso2022 encode ] unit-test
[ "h\u007126" ] [ B{ CHAR: h ESC CHAR: $ CHAR: B HEX: 3E HEX: 47 } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h ESC CHAR: $ CHAR: B HEX: 3E } iso2022 decode ] unit-test
[ "h" ] [ B{ CHAR: h ESC CHAR: $ CHAR: B } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h ESC CHAR: $ } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h ESC } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h ESC CHAR: $ CHAR: B HEX: 80 HEX: 80 } iso2022 decode ] unit-test
[ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D HEX: 38 HEX: 54 } ] [ "h\u0058ce" iso2022 encode ] unit-test
[ "h\u0058ce" ] [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D HEX: 38 HEX: 54 } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D HEX: 38 } iso2022 decode ] unit-test
[ "h" ] [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D HEX: 70 HEX: 70 } iso2022 decode ] unit-test
[ B{ CHAR: h ESC CHAR: $ CHAR: ( CHAR: D HEX: 38 HEX: 54 } ] [ "h\u0058ce" iso2022 encode ] unit-test
[ "h\u0058ce" ] [ B{ CHAR: h ESC CHAR: $ CHAR: ( CHAR: D HEX: 38 HEX: 54 } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h ESC CHAR: $ CHAR: ( CHAR: D HEX: 38 } iso2022 decode ] unit-test
[ "h" ] [ B{ CHAR: h ESC CHAR: $ CHAR: ( CHAR: D } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h ESC CHAR: $ CHAR: ( } iso2022 decode ] unit-test
[ "h\u00fffd" ] [ B{ CHAR: h ESC CHAR: $ CHAR: ( CHAR: D HEX: 70 HEX: 70 } iso2022 decode ] unit-test
[ "\u{syriac-music}" iso2022 encode ] must-fail

View File

@ -2,7 +2,7 @@
! See http://factorcode.org/license.txt for BSD license.
USING: io.encodings kernel sequences io simple-flat-file sets math
combinators.short-circuit io.binary values arrays assocs
locals accessors combinators literals biassocs byte-arrays ;
locals accessors combinators biassocs byte-arrays parser ;
IN: io.encodings.iso2022
SINGLETON: iso2022
@ -31,12 +31,12 @@ M: iso2022 <encoder>
M: iso2022 <decoder>
make-iso-coder <decoder> ;
CONSTANT: ESC HEX: 16
<< SYNTAX: ESC HEX: 16 parsed ; >>
CONSTANT: switch-ascii B{ $ ESC CHAR: ( CHAR: B }
CONSTANT: switch-jis201 B{ $ ESC CHAR: ( CHAR: J }
CONSTANT: switch-jis208 B{ $ ESC CHAR: $ CHAR: B }
CONSTANT: switch-jis212 B{ $ ESC CHAR: $ CHAR: ( CHAR: D }
CONSTANT: switch-ascii B{ ESC CHAR: ( CHAR: B }
CONSTANT: switch-jis201 B{ ESC CHAR: ( CHAR: J }
CONSTANT: switch-jis208 B{ ESC CHAR: $ CHAR: B }
CONSTANT: switch-jis212 B{ ESC CHAR: $ CHAR: ( CHAR: D }
: find-type ( char -- code type )
{

View File

@ -1 +1,2 @@
Doug Coleman
Daniel Ehrenberg

View File

@ -230,7 +230,10 @@ TUPLE: class-partition integers not-integers simples not-simples and or other ;
dup or-class flatten partition-classes
dup not-integers>> length {
{ 0 [ nip make-or-class ] }
{ 1 [ not-integers>> first [ class>> '[ _ swap class-member? ] any? ] keep or ] }
{ 1 [
not-integers>> first
[ class>> '[ _ swap class-member? ] any? ] keep or
] }
[ 3drop t ]
} case ;
@ -251,6 +254,12 @@ M: or-class <not-class>
M: t <not-class> drop f ;
M: f <not-class> drop t ;
: <minus-class> ( a b -- a-b )
<not-class> 2array <and-class> ;
: <sym-diff-class> ( a b -- a~b )
2array [ <or-class> ] [ <and-class> ] bi <minus-class> ;
M: primitive-class class-member?
class>> class-member? ;

View File

@ -11,7 +11,7 @@ IN: regexp.parser.tests
"a|b" "a.b" "a|b|c" "abc|b" "a|bcd" "a|(b)" "(?-i:a)" "||"
"(a)|b" "(a|b)" "((a)|(b))" "(?:a)" "(?i:a)" "|b" "b|"
"[abc]" "[a-c]" "[^a-c]" "[^]]" "[]a]" "[[]" "[]-a]" "[a-]" "[-]"
"[--a]" "foo*" "(foo)*" "(a|b)|c" "(foo){2,3}" "(foo){2,}"
"foo*" "(foo)*" "(a|b)|c" "(foo){2,3}" "(foo){2,}"
"(foo){2}" "{2,3}" "{," "{,}" "}" "foo}" "[^]-a]" "[^-]a]"
"[a-]" "[^a-]" "[^a-]" "a{,2}" "(?#foobar)"
"\\p{Space}" "\\t" "\\[" "[\\]]" "\\P{Space}"

View File

@ -148,19 +148,29 @@ Character = EscapeSequence
| "^" => [[ ^ <tagged-epsilon> ]]
| . ?[ allowed-char? ]?
AnyRangeCharacter = EscapeSequence | .
AnyRangeCharacter = !("&&"|"||"|"--"|"~~") (EscapeSequence | .)
RangeCharacter = !("]") AnyRangeCharacter
Range = RangeCharacter:a "-" RangeCharacter:b => [[ a b <range-class> ]]
Range = RangeCharacter:a "-" !("-") RangeCharacter:b => [[ a b <range-class> ]]
| RangeCharacter
StartRange = AnyRangeCharacter:a "-" RangeCharacter:b => [[ a b <range-class> ]]
StartRange = AnyRangeCharacter:a "-" !("-") RangeCharacter:b => [[ a b <range-class> ]]
| AnyRangeCharacter
Ranges = StartRange:s Range*:r => [[ r s prefix ]]
CharClass = "^"?:n Ranges:e => [[ e n char-class ]]
BasicCharClass = "^"?:n Ranges:e => [[ e n char-class ]]
CharClass = BasicCharClass:b "&&" CharClass:c
=> [[ b c 2array <and-class> ]]
| BasicCharClass:b "||" CharClass:c
=> [[ b c 2array <or-class> ]]
| BasicCharClass:b "~~" CharClass:c
=> [[ b c <sym-diff-class> ]]
| BasicCharClass:b "--" CharClass:c
=> [[ b c <minus-class> ]]
| BasicCharClass
Options = [idmsux]*

View File

@ -45,11 +45,11 @@ ARTICLE: { "regexp" "construction" } "Constructing regular expressions"
ARTICLE: { "regexp" "syntax" } "Regular expression syntax"
"Regexp syntax is largely compatible with Perl, Java and extended POSIX regexps, but not completely. Below, the syntax is documented."
{ $heading "Characters" }
"At its core, regular expressions consist of character literals. For example, " { $snippet "R/ f/" } " is a regular expression matching just the string 'f'. In addition, the normal escape codes are provided, like " { $snippet "\\t" } " for the tab character and " { $snippet "\\uxxxxxx" } "for an arbitrary Unicode code point, by its hex value. In addition, any character can be preceded by a backslash to escape it, unless this has special meaning. For example, to match a literal opening parenthesis, use " { $snippet "\\(" } "."
"At its core, regular expressions consist of character literals. For example, " { $snippet "R/ f/" } " is a regular expression matching just the string 'f'. In addition, the normal escape codes are provided, like " { $snippet "\\t" } " for the tab character and " { $snippet "\\uxxxxxx" } " for an arbitrary Unicode code point, by its hex value. In addition, any character can be preceded by a backslash to escape it, unless this has special meaning. For example, to match a literal opening parenthesis, use " { $snippet "\\(" } "."
{ $heading "Concatenation, alternation and grouping" }
"Regular expressions can be built out of multiple characters by concatenation. For example, " { $snippet "R/ ab/" } " matches a followed by b. The " { $snippet "|" } " (alternation) operator can construct a regexp which matches one of two alternatives. Parentheses can be used for gropuing. So " { $snippet "R/ f(oo|ar)/" } " would match either 'foo' or 'far'."
"Regular expressions can be built out of multiple characters by concatenation. For example, " { $snippet "R/ ab/" } " matches a followed by b. The " { $snippet "|" } " (alternation) operator can construct a regexp which matches one of two alternatives. Parentheses can be used for grouping. So " { $snippet "R/ f(oo|ar)/" } " would match either 'foo' or 'far'."
{ $heading "Character classes" }
"Square brackets define a convenient way to refer to a set of characters. For example, " { $snippet "[ab]" } " refers to either a or b. And " { $snippet "[a-z]" } " refers to all of the characters between a and z, in code point order. You can use these together, as in " { $snippet "[ac-fz]" } " which matches all of the characters between c and f, in addition to a and z. Character classes can be negated using a carat, as in " { $snippet "[^a]" } " which matches all characters which are not a."
"Square brackets define a convenient way to refer to a set of characters. For example, " { $snippet "[ab]" } " refers to either a or b. And " { $snippet "[a-z]" } " refers to all of the characters between a and z, in code point order. You can use these together, as in " { $snippet "[ac-fz]" } " which matches all of the characters between c and f, in addition to a and z. Character classes can be negated using a caret, as in " { $snippet "[^a]" } " which matches all characters which are not a."
{ $heading "Predefined character classes" }
"Several character classes are predefined, both for convenience and because they are too large to represent directly. In Factor regular expressions, all character classes are Unicode-aware."
{ $table
@ -72,10 +72,12 @@ ARTICLE: { "regexp" "syntax" } "Regular expression syntax"
{ { $snippet "\\p{blank}" } "Non-newline whitespace" }
{ { $snippet "\\p{cntrl}" } "Control character" }
{ { $snippet "\\p{space}" } "Whitespace" }
{ { $snippet "\\p{xdigit}" } "Hexidecimal digit" }
{ { $snippet "\\p{xdigit}" } "Hexadecimal digit" }
{ { $snippet "\\p{Nd}" } "Character in Unicode category Nd" }
{ { $snippet "\\p{Z}" } "Character in Unicode category beginning with Z" }
{ { $snippet "\\p{script=Cham}" } "Character in the Cham writing system" } }
{ $heading "Character class operations" }
"Character classes can be composed using four binary operations: " { $snippet "|| && ~~ --" } ". These do the operations union, intersection, symmetric difference and difference, respectively. For example, characters which are lower-case but not Latin script could be matched as " { $snippet "[\\p{lower}--\\p{script=latin}]" } ". These operations are right-associative, and " { $snippet "^" } " binds tighter than them. There is no syntax for grouping."
{ $heading "Boundaries" }
"Special operators exist to match certain points in the string. These are called 'zero-width' because they do not consume any characters."
{ $table
@ -107,9 +109,18 @@ ARTICLE: { "regexp" "syntax" } "Regular expression syntax"
{ $heading "Quotation" }
"To make it convenient to have a long string which uses regexp operators, a special syntax is provided. If a substring begins with " { $snippet "\\Q" } " then everything until " { $snippet "\\E" } " is quoted (escaped). For example, " { $snippet "R/ \\Qfoo\\bar|baz()\\E/" } " matches exactly the string " { $snippet "\"foo\\bar|baz()\"" } "."
{ $heading "Unsupported features" }
"One missing feature is backreferences. This is because of a design decision to allow only regular expressions following the formal theory of regular languages. For more information, see " { $link { "regexp" "theory" } } ". You can create a new regular expression to match a particular string using " { $vocab-link "regexp.combinators" } " and group capture is available to extract parts of a regular expression match." $nl
"Another feature is Perl's " { $snippet "\\G" } " syntax, which references the previous match, is not included. This is because that sequence is inherently stateful, and Factor regexps don't hold state." $nl
"None of the operations which embed code into a regexp are supported, as this would require the inclusion of the Factor parser and compiler in any application which wants to expose regexps to the user. None of the casing operations are included of Perl like \\L, for simplicity." ; ! Also describe syntax, from the beginning
{ $subheading "Group capture" }
{ $subheading "Reluctant and posessive quantifiers" }
{ $subheading "Backreferences" }
"Backreferences were omitted because of a design decision to allow only regular expressions following the formal theory of regular languages. For more information, see " { $link { "regexp" "theory" } } "."
$nl
"To work around the lack of backreferences, consider using group capture and then creating a new regular expression to match the captured string using " { $vocab-link "regexp.combinators" } "."
{ $subheading "Previous match" }
"Another feature that is not included is Perl's " { $snippet "\\G" } " syntax, which references the previous match. This is because that sequence is inherently stateful, and Factor regexps don't hold state."
{ $subheading "Embedding code" }
"Operations which embed code into a regexp are not supported. This would require the inclusion of the Factor parser and compiler in any deployed application which wants to expose regexps to the user, leading to an undesirable increase in the code size."
{ $heading "Casing operations" }
"No special casing operations are included, for example Perl's " { $snippet "\\L" } "." ;
ARTICLE: { "regexp" "options" } "Regular expression options"
"When " { $link { "regexp" "construction" } } ", various options can be provided. Options have single-character names. A string of options has one of the following two forms:"
@ -152,7 +163,7 @@ ARTICLE: { "regexp" "theory" } "The theory of regular expressions"
"This implies, by DeMorgan's law, that, if you have two regular languages, their intersection is also regular. That is, for any two regular expressions, there exists a regular expression which matches strings that match both inputs." $nl
"Traditionally, regular expressions on computer support an additional operation: backreferences. For example, the Perl regexp " { $snippet "/(.*)$1/" } " matches a string repated twice. If a backreference refers to a string with a predetermined maximum length, then the resulting language is still regular." $nl
"But, if not, the language is not regular. There is strong evidence that there is no efficient way to parse with backreferences in the general case. Perl uses a naive backtracking algorithm which has pathological behavior in some cases, taking exponential time to match even if backreferences aren't used. Additionally, expressions with backreferences don't have the properties with negation and intersection described above." $nl
"The Factor regular expression engine was built with the design decision to support negation and intersection at the expense of backreferences. This lets us have a guaranteed linear-time matching algorithm. Systems like Ragel and Lex also use this algorithm, but in the Factor regular expression engine, all other features of regexps are still present." ;
"The Factor regular expression engine was built with the design decision to support negation and intersection at the expense of backreferences. This lets us have a guaranteed linear-time matching algorithm. Systems like Ragel and Lex use the same algorithm." ;
ARTICLE: { "regexp" "operations" } "Matching operations with regular expressions"
"Testing if a string matches a regular expression:"

View File

@ -508,3 +508,29 @@ IN: regexp-tests
[ t ] [ " " R/ \P{LL}/ matches? ] unit-test
[ f ] [ "a" R/ \P{sCriPt = latin}/ matches? ] unit-test
[ t ] [ " " R/ \P{SCRIPT = laTIn}/ matches? ] unit-test
! Logical operators
[ t ] [ "a" R/ [\p{script=latin}\p{lower}]/ matches? ] unit-test
[ t ] [ "π" R/ [\p{script=latin}\p{lower}]/ matches? ] unit-test
[ t ] [ "A" R/ [\p{script=latin}\p{lower}]/ matches? ] unit-test
[ f ] [ "3" R/ [\p{script=latin}\p{lower}]/ matches? ] unit-test
[ t ] [ "a" R/ [\p{script=latin}||\p{lower}]/ matches? ] unit-test
[ t ] [ "π" R/ [\p{script=latin}||\p{lower}]/ matches? ] unit-test
[ t ] [ "A" R/ [\p{script=latin}||\p{lower}]/ matches? ] unit-test
[ f ] [ "3" R/ [\p{script=latin}||\p{lower}]/ matches? ] unit-test
[ t ] [ "a" R/ [\p{script=latin}&&\p{lower}]/ matches? ] unit-test
[ f ] [ "π" R/ [\p{script=latin}&&\p{lower}]/ matches? ] unit-test
[ f ] [ "A" R/ [\p{script=latin}&&\p{lower}]/ matches? ] unit-test
[ f ] [ "3" R/ [\p{script=latin}&&\p{lower}]/ matches? ] unit-test
[ f ] [ "a" R/ [\p{script=latin}~~\p{lower}]/ matches? ] unit-test
[ t ] [ "π" R/ [\p{script=latin}~~\p{lower}]/ matches? ] unit-test
[ t ] [ "A" R/ [\p{script=latin}~~\p{lower}]/ matches? ] unit-test
[ f ] [ "3" R/ [\p{script=latin}~~\p{lower}]/ matches? ] unit-test
[ f ] [ "a" R/ [\p{script=latin}--\p{lower}]/ matches? ] unit-test
[ f ] [ "π" R/ [\p{script=latin}--\p{lower}]/ matches? ] unit-test
[ t ] [ "A" R/ [\p{script=latin}--\p{lower}]/ matches? ] unit-test
[ f ] [ "3" R/ [\p{script=latin}--\p{lower}]/ matches? ] unit-test

View File

@ -1,8 +1,24 @@
USING: help.syntax help.markup strings ;
! Copyright (C) 2009 Daniel Ehrenberg
! See http://factorcode.org/license.txt for BSD license.
USING: help.syntax help.markup strings biassocs arrays ;
IN: simple-flat-file
ABOUT: "simple-flat-file"
ARTICLE: "simple-flat-file" "Parsing simple flat files"
"The " { $vocab-link "simple-flat-file" } " vocabulary provides words for loading and parsing simple flat files in a particular format which is common for encoding tasks."
{ $subsection flat-file>biassoc } ;
"The " { $vocab-link "simple-flat-file" } " vocabulary provides words for loading and parsing simple flat files in a particular format which is common for encoding and Unicode tasks."
{ $subsection flat-file>biassoc }
{ $subsection load-interval-file }
{ $subsection data } ;
HELP: load-interval-file
{ $values { "filename" string } { "table" "an interval map" } }
{ $description "This loads a file that looks like Script.txt in the Unicode Character Database and converts it into an efficient interval map, where the keys are characters and the values are strings for the properties." } ;
HELP: data
{ $values { "filename" string } { "data" array } }
{ $description "This loads a file that's delineated by semicolons and lines, returning an array of lines, where each line is an array split by the semicolons, with whitespace trimmed off." } ;
HELP: flat-file>biassoc
{ $values { "filename" string } { "biassoc" biassoc } }
{ $description "This loads a flat file, in the form that many encoding resource files are in, with two columns of numeric data in hex, and returns a biassoc associating them." } ;

View File

@ -1,7 +1,7 @@
! Copyright (C) 2009 Daniel Ehrenberg
! See http://factorcode.org/license.txt for BSD license.
USING: sequences splitting kernel math.parser io.files io.encodings.utf8
biassocs ascii ;
biassocs ascii namespaces arrays make assocs interval-maps sets ;
IN: simple-flat-file
: drop-comments ( seq -- newseq )
@ -30,3 +30,25 @@ IN: simple-flat-file
: data ( filename -- data )
utf8 file-lines drop-comments [ split-; ] map ;
SYMBOL: interned
: range, ( value key -- )
swap interned get
[ = ] with find nip 2array , ;
: expand-ranges ( assoc -- interval-map )
[
[
swap CHAR: . over member? [
".." split1 [ hex> ] bi@ 2array
] [ hex> ] if range,
] assoc-each
] { } make <interval-map> ;
: process-interval-file ( ranges -- table )
dup values prune interned
[ expand-ranges ] with-variable ;
: load-interval-file ( filename -- table )
data process-interval-file ;

View File

@ -4,8 +4,9 @@ USING: combinators.short-circuit unicode.categories kernel math
combinators splitting sequences math.parser io.files io assocs
arrays namespaces make math.ranges unicode.normalize
unicode.normalize.private values io.encodings.ascii
unicode.syntax unicode.data compiler.units fry
alien.syntax sets accessors interval-maps memoize locals words ;
unicode.data compiler.units fry unicode.categories.syntax
alien.syntax sets accessors interval-maps memoize locals words
simple-flat-file ;
IN: unicode.breaks
<PRIVATE
@ -31,9 +32,9 @@ CATEGORY: grapheme-control Zl Zp Cc Cf ;
[ drop Control ]
} case ;
CATEGORY: (extend) Me Mn ;
: extend? ( ch -- ? )
{ [ (extend)? ] [ "Other_Grapheme_Extend" property? ] } 1|| ;
CATEGORY: extend
Me Mn |
"Other_Grapheme_Extend" property? ;
: loe? ( ch -- ? )
"Logical_Order_Exception" property? ;
@ -127,7 +128,7 @@ to: grapheme-table
VALUE: word-break-table
"vocab:unicode/data/WordBreakProperty.txt" load-key-value
"vocab:unicode/data/WordBreakProperty.txt" load-interval-file
to: word-break-table
C-ENUM: wOther wCR wLF wNewline wExtend wFormat wKatakana wALetter wMidLetter

View File

@ -1,8 +1,8 @@
! Copyright (C) 2008, 2009 Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
USING: unicode.data sequences namespaces
sbufs make unicode.syntax unicode.normalize math hints
unicode.categories combinators unicode.syntax assocs combinators.short-circuit
sbufs make unicode.normalize math hints
unicode.categories combinators assocs combinators.short-circuit
strings splitting kernel accessors unicode.breaks fry locals ;
QUALIFIED: ascii
IN: unicode.case

View File

@ -12,6 +12,9 @@ HELP: Letter
HELP: alpha
{ $class-description "The class of alphanumeric characters." } ;
HELP: math
{ $class-description "The class of Unicode math characters." } ;
HELP: blank
{ $class-description "The class of whitespace characters." } ;
@ -54,6 +57,8 @@ ARTICLE: "unicode.categories" "Character classes"
{ $subsection uncased }
{ $subsection uncased? }
{ $subsection character }
{ $subsection character? } ;
{ $subsection character? }
{ $subsection math }
{ $subsection math? } ;
ABOUT: "unicode.categories"

View File

@ -1,15 +1,16 @@
! Copyright (C) 2008 Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
USING: unicode.syntax ;
USING: unicode.categories.syntax sequences unicode.data ;
IN: unicode.categories
CATEGORY: blank Zs Zl Zp \r\n ;
CATEGORY: letter Ll ;
CATEGORY: LETTER Lu ;
CATEGORY: Letter Lu Ll Lt Lm Lo ;
CATEGORY: blank Zs Zl Zp | "\r\n" member? ;
CATEGORY: letter Ll | "Other_Lowercase" property? ;
CATEGORY: LETTER Lu | "Other_Uppercase" property? ;
CATEGORY: Letter Lu Ll Lt Lm Lo Nl ;
CATEGORY: digit Nd Nl No ;
CATEGORY-NOT: printable Cc Cf Cs Co Cn ;
CATEGORY: alpha Lu Ll Lt Lm Lo Nd Nl No ;
CATEGORY: alpha Lu Ll Lt Lm Lo Nd Nl No | "Other_Alphabetic" property? ;
CATEGORY: control Cc ;
CATEGORY-NOT: uncased Lu Ll Lt Lm Mn Me ;
CATEGORY-NOT: character Cn ;
CATEGORY: math Sm | "Other_Math" property? ;

View File

@ -0,0 +1,19 @@
! Copyright (C) 2008 Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
USING: help.syntax help.markup ;
IN: unicode.categories.syntax
ABOUT: "unicode.categories.syntax"
ARTICLE: "unicode.categories.syntax" "Unicode category syntax"
"There is special syntax sugar for making predicate classes which are unions of Unicode general categories, plus some other code."
{ $subsection POSTPONE: CATEGORY: }
{ $subsection POSTPONE: CATEGORY-NOT: } ;
HELP: CATEGORY:
{ $syntax "CATEGORY: foo Nl Pd Lu | \"Diacritic\" property? ;" }
{ $description "This defines a predicate class which is a subset of code points. In this example, " { $snippet "foo" } " is the class of characters which are in the general category Nl or Pd or Lu, or which have the Diacritic property." } ;
HELP: CATEGORY-NOT:
{ $syntax "CATEGORY-NOT: foo Nl Pd Lu | \"Diacritic\" property? ;" }
{ $description "This defines a predicate class which is a subset of code points, the complement of what " { $link POSTPONE: CATEGORY: } " would define. In this example, " { $snippet "foo" } " is the class of characters which are neither in the general category Nl or Pd or Lu, nor have the Diacritic property." } ;

View File

@ -0,0 +1,3 @@
! Copyright (C) 2009 Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.

View File

@ -0,0 +1,34 @@
! Copyright (C) 2008, 2009 Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
USING: unicode.data kernel math sequences parser unicode.data.private
bit-arrays namespaces sequences.private arrays classes.parser
assocs classes.predicate sets fry splitting accessors ;
IN: unicode.categories.syntax
! For use in CATEGORY:
SYMBOLS: Cn Lu Ll Lt Lm Lo Mn Mc Me Nd Nl No Pc Pd Ps Pe Pi Pf Po Sm Sc Sk So Zs Zl Zp Cc Cf Cs Co | ;
<PRIVATE
: [category] ( categories code -- quot )
'[ dup category# _ member? [ drop t ] _ if ] ;
: integer-predicate-class ( word predicate -- )
integer swap define-predicate-class ;
: define-category ( word categories code -- )
[category] integer-predicate-class ;
: define-not-category ( word categories code -- )
[category] [ not ] compose integer-predicate-class ;
: parse-category ( -- word tokens quot )
CREATE-CLASS \ ; parse-until { | } split1
[ [ name>> categories-map at ] map ]
[ [ [ ] like ] [ [ drop f ] ] if* ] bi* ;
PRIVATE>
SYNTAX: CATEGORY: parse-category define-category ;
SYNTAX: CATEGORY-NOT: parse-category define-not-category ;

View File

@ -4,7 +4,7 @@ USING: combinators.short-circuit sequences io.files
io.encodings.ascii kernel values splitting accessors math.parser
ascii io assocs strings math namespaces make sorting combinators
math.order arrays unicode.normalize unicode.data locals
unicode.syntax macros sequences.deep words unicode.breaks
macros sequences.deep words unicode.breaks
quotations combinators.short-circuit simple-flat-file ;
IN: unicode.collation

View File

@ -6,7 +6,7 @@ IN: unicode.data
ABOUT: "unicode.data"
ARTICLE: "unicode.data" "Unicode data tables"
"The " { $vocab-link "unicode.data" "unicode.data" } " vocabulary contains core Unicode data tables and code for parsing this from files."
"The " { $vocab-link "unicode.data" "unicode.data" } " vocabulary contains core Unicode data tables and code for parsing this from files. The following words access these data tables."
{ $subsection canonical-entry }
{ $subsection combine-chars }
{ $subsection combining-class }
@ -14,7 +14,11 @@ ARTICLE: "unicode.data" "Unicode data tables"
{ $subsection name>char }
{ $subsection char>name }
{ $subsection property? }
{ $subsection load-key-value } ;
{ $subsection category }
{ $subsection ch>upper }
{ $subsection ch>lower }
{ $subsection ch>title }
{ $subsection special-case } ;
HELP: canonical-entry
{ $values { "char" "a code point" } { "seq" string } }
@ -48,6 +52,22 @@ HELP: property?
{ $values { "char" "a code point" } { "property" string } { "?" "a boolean" } }
{ $description "Tests whether the code point is listed under the given property in PropList.txt in the Unicode Character Database." } ;
HELP: load-key-value
{ $values { "filename" string } { "table" "an interval map" } }
{ $description "This loads a file that looks like Script.txt in the Unicode Character Database and converts it into an efficient interval map, where the keys are characters and the values are strings for the properties." } ;
HELP: category
{ $values { "char" "a code point" } { "category" string } }
{ $description "Returns the general category of a code point, in the form of a string. This will always be a string within the ASCII range of length two. If the code point is unassigned, then it returns " { $snippet "Cn" } "." } ;
HELP: ch>upper
{ $values { "ch" "a code point" } { "upper" "a code point" } }
{ $description "Returns the simple upper-cased version of the code point, if it exists. This does not handle context-sensitive or locale-dependent properties of linguistically accurate case conversion, and does not correctly handle characters which become multiple characters on conversion to this case." } ;
HELP: ch>lower
{ $values { "ch" "a code point" } { "lower" "a code point" } }
{ $description "Returns the simple lower-cased version of the code point, if it exists. This does not handle context-sensitive or locale-dependent properties of linguistically accurate case conversion, and does not correctly handle characters which become multiple characters on conversion to this case." } ;
HELP: ch>title
{ $values { "ch" "a code point" } { "title" "a code point" } }
{ $description "Returns the simple title-cased version of the code point, if it exists. This does not handle context-sensitive or locale-dependent properties of linguistically accurate case conversion, and does not correctly handle characters which become multiple characters on conversion to this case." } ;
HELP: special-case
{ $values { "ch" "a code point" } { "casing-tuple" { "a tuple, or " { $link f } } } }
{ $description "If a code point has special casing behavior, returns a tuple which represents that information." } ;

View File

@ -58,7 +58,7 @@ CONSTANT: num-chars HEX: 2FA1E
PRIVATE>
: category# ( char -- category )
: category# ( char -- n )
! There are a few characters that should be Cn
! that this gives Cf or Mn
! Cf = 26; Mn = 5; Cn = 29
@ -219,27 +219,3 @@ load-properties to: properties
[ name>char [ "Invalid character" throw ] unless* ]
name>char-hook set-global
SYMBOL: interned
: range, ( value key -- )
swap interned get
[ = ] with find nip 2array , ;
: expand-ranges ( assoc -- interval-map )
[
[
swap CHAR: . over member? [
".." split1 [ hex> ] bi@ 2array
] [ hex> ] if range,
] assoc-each
] { } make <interval-map> ;
: process-key-value ( ranges -- table )
dup values prune interned
[ expand-ranges ] with-variable ;
PRIVATE>
: load-key-value ( filename -- table )
data process-key-value ;

View File

@ -2,7 +2,7 @@
! See http://factorcode.org/license.txt for BSD license.
USING: ascii sequences namespaces make unicode.data kernel math arrays
locals sorting.insertion accessors assocs math.order combinators
unicode.syntax strings sbufs hints combinators.short-circuit vectors ;
strings sbufs hints combinators.short-circuit vectors ;
IN: unicode.normalize
<PRIVATE

View File

@ -1,17 +1,13 @@
! Copyright (C) 2008 Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
USING: accessors values kernel sequences assocs io.files
io.encodings ascii math.ranges io splitting math.parser
namespaces make byte-arrays locals math sets io.encodings.ascii
words words.symbol compiler.units arrays interval-maps
unicode.data ;
USING: values interval-maps simple-flat-file ;
IN: unicode.script
<PRIVATE
VALUE: script-table
"vocab:unicode/script/Scripts.txt" load-key-value
"vocab:unicode/script/Scripts.txt" load-interval-file
to: script-table
PRIVATE>

View File

@ -1,38 +0,0 @@
! Copyright (C) 2008 Daniel Ehrenberg.
! See http://factorcode.org/license.txt for BSD license.
USING: unicode.data kernel math sequences parser lexer
bit-arrays namespaces make sequences.private arrays quotations
assocs classes.predicate math.order strings.parser ;
IN: unicode.syntax
<PRIVATE
: >category-array ( categories -- bitarray )
categories [ swap member? ] with map >bit-array ;
: as-string ( strings -- bit-array )
concat unescape-string ;
: [category] ( categories -- quot )
[
[ [ categories member? not ] filter as-string ] keep
[ categories member? ] filter >category-array
[ dup category# ] % , [ nth-unsafe [ drop t ] ] %
\ member? 2array >quotation ,
\ if ,
] [ ] make ;
: define-category ( word categories -- )
[category] integer swap define-predicate-class ;
PRIVATE>
SYNTAX: CATEGORY:
CREATE ";" parse-tokens define-category ;
: seq-minus ( seq1 seq2 -- diff )
[ member? not ] curry filter ;
SYNTAX: CATEGORY-NOT:
CREATE ";" parse-tokens
categories swap seq-minus define-category ;

View File

@ -15,7 +15,7 @@ $nl
{ $vocab-subsection "Word and grapheme breaks" "unicode.breaks" }
{ $vocab-subsection "Unicode normalization" "unicode.normalize" }
"The following are mostly for internal use:"
{ $vocab-subsection "Unicode syntax" "unicode.syntax" }
{ $vocab-subsection "Unicode category syntax" "unicode.categories.syntax" }
{ $vocab-subsection "Unicode data tables" "unicode.data" }
{ $see-also "ascii" "io.encodings" } ;

View File

@ -1,19 +1,26 @@
! Copyright (C) 2005, 2009 Daniel Ehrenberg
! See http://factorcode.org/license.txt for BSD license.
USING: kernel sequences unicode.syntax math math.order combinators
hints ;
USING: kernel sequences unicode.categories.syntax math math.order
combinators hints combinators.short-circuit ;
IN: xml.char-classes
CATEGORY: 1.0name-start* Ll Lu Lo Lt Nl \u000559\u0006E5\u0006E6_: ;
: 1.0name-start? ( char -- ? )
dup 1.0name-start*? [ drop t ]
[ HEX: 2BB HEX: 2C1 between? ] if ;
CATEGORY: 1.0name-start
Ll Lu Lo Lt Nl | {
[ HEX: 2BB HEX: 2C1 between? ]
[ "\u000559\u0006E5\u0006E6_:" member? ]
} 1|| ;
CATEGORY: 1.0name-char Ll Lu Lo Lt Nl Mc Me Mn Lm Nd _-.\u000387: ;
CATEGORY: 1.0name-char
Ll Lu Lo Lt Nl Mc Me Mn Lm Nd |
"_-.\u000387:" member? ;
CATEGORY: 1.1name-start Ll Lu Lo Lm Ln Nl _: ;
CATEGORY: 1.1name-start
Ll Lu Lo Lm Nl |
"_:" member? ;
CATEGORY: 1.1name-char Ll Lu Lo Lm Ln Nl Mc Mn Nd Pc Cf _-.\u0000b7: ;
CATEGORY: 1.1name-char
Ll Lu Lo Lm Nl Mc Mn Nd Pc Cf |
"_-.\u0000b7:" member? ;
: name-start? ( 1.0? char -- ? )
swap [ 1.0name-start? ] [ 1.1name-start? ] if ;

View File

@ -67,7 +67,7 @@ IN: mason.child
try-process
] with-directory ;
: return-with ( obj -- ) return-continuation get continue-with ;
: return-with ( obj -- * ) return-continuation get continue-with ;
: build-clean? ( -- ? )
{

View File

@ -16,7 +16,7 @@ IN: mason.report
"git id: " write "git-id" eval-file print nl ;
: with-report ( quot -- )
[ "report" utf8 ] dip '[ common-report @ ] with-file-writer ;
[ "report" utf8 ] dip '[ common-report @ ] with-file-writer ; inline
: compile-failed-report ( error -- )
[

View File

@ -224,8 +224,7 @@ M: no-method error.
] if ;
! Syntax
: GENERIC:
CREATE define-generic ; parsing
SYNTAX: GENERIC: CREATE define-generic ;
: parse-method ( -- quot classes generic )
parse-definition [ 2 tail ] [ second ] [ first ] tri ;
@ -238,13 +237,13 @@ M: no-method error.
: (METHOD:) ( -- method def ) CREATE-METHOD parse-definition ;
: METHOD: (METHOD:) define ; parsing
SYNTAX: METHOD: (METHOD:) define ;
! For compatibility
: M:
SYNTAX: M:
scan-word 1array scan-word create-method-in
parse-definition
define ; parsing
define ;
! Definition protocol. We qualify core generics here
QUALIFIED: syntax

View File

@ -9,36 +9,46 @@ CONSULT: assoc-protocol lex-hash hash>> ;
: pos-or-0 ( neg? -- pos/0 ) dup 0 < [ drop 0 ] when ;
:: prepare-pos ( v i -- c l )
[let | n [ i v head-slice ] |
v CHAR: \n n last-index -1 or 1+ -
n [ CHAR: \n = ] count 1+ ] ;
[let | n [ i v head-slice ] |
v CHAR: \n n last-index -1 or 1+ -
n [ CHAR: \n = ] count 1+
] ;
: store-pos ( v a -- ) input swap at prepare-pos
lexer get [ (>>line) ] keep (>>column) ;
: store-pos ( v a -- )
input swap at prepare-pos
lexer get [ (>>line) ] keep (>>column) ;
M: lex-hash set-at swap {
{ pos [ store-pos ] }
[ swap hash>> set-at ] } case ;
M: lex-hash set-at
swap {
{ pos [ store-pos ] }
[ swap hash>> set-at ]
} case ;
:: at-pos ( t l c -- p ) t l head-slice [ length ] map sum l 1- + c + ;
M: lex-hash at* swap {
M: lex-hash at*
swap {
{ input [ drop lexer get text>> "\n" join t ] }
{ pos [ drop lexer get [ text>> ] [ line>> 1- ] [ column>> 1+ ] tri at-pos t ] }
[ swap hash>> at* ] } case ;
[ swap hash>> at* ]
} case ;
: with-global-lexer ( quot -- result )
[ f lrstack set
V{ } clone error-stack set H{ } clone \ heads set
H{ } clone \ packrat set ] f make-assoc <lex-hash>
[
f lrstack set
V{ } clone error-stack set H{ } clone \ heads set
H{ } clone \ packrat set
] f make-assoc <lex-hash>
swap bind ; inline
: parse* ( parser -- ast ) compile
[ execute [ error-stack get first throw ] unless* ] with-global-lexer
ast>> ;
: parse* ( parser -- ast )
compile
[ execute [ error-stack get first throw ] unless* ] with-global-lexer
ast>> ;
: create-bnf ( name parser -- ) reset-tokenizer [ lexer get skip-blank parse* parsed ] curry
define word make-parsing ;
: create-bnf ( name parser -- )
reset-tokenizer [ lexer get skip-blank parse* parsed ] curry
define-syntax ;
SYNTAX: ON-BNF:
CREATE-WORD reset-tokenizer ";ON-BNF" parse-multiline-string parse-ebnf