factor: Add more character escapes.

modern-harvey2
Doug Coleman 2017-08-26 02:01:26 -05:00
parent 7cf91e005d
commit eb173e2caa
30 changed files with 89 additions and 89 deletions

View File

@ -21,7 +21,7 @@ ERROR: bad-array-type ;
: (parse-c-type) ( string -- type ) : (parse-c-type) ( string -- type )
{ {
{ [ "*" ?tail ] [ (parse-c-type) <pointer> ] } { [ "*" ?tail ] [ (parse-c-type) <pointer> ] }
{ [ CHAR: ] over member? ] [ parse-array-type ] } { [ CHAR: \] over member? ] [ parse-array-type ] }
{ [ dup search ] [ parse-word ] } { [ dup search ] [ parse-word ] }
[ parse-word ] [ parse-word ]
} cond ; } cond ;

View File

@ -17,7 +17,7 @@ IN: help.lint.spaces
dup utf8 file-lines [ 1 + 2array ] map-index dup utf8 file-lines [ 1 + 2array ] map-index
[ [
first [ first [
{ [ CHAR: space = ] [ CHAR: " = ] } 1|| { [ CHAR: space = ] [ CHAR: \" = ] } 1||
] trim-head ] trim-head
" " swap subseq? " " swap subseq?
] filter ] filter

View File

@ -7,16 +7,16 @@ IN: io.encodings.iso2022
{ "hello" } [ "hello" >byte-array iso2022 decode ] unit-test { "hello" } [ "hello" >byte-array iso2022 decode ] unit-test
{ "hello" } [ "hello" iso2022 encode >string ] unit-test { "hello" } [ "hello" iso2022 encode >string ] unit-test
{ "hi" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: B CHAR: i } iso2022 decode ] unit-test { "hi" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: B CHAR: i } iso2022 decode ] unit-test
{ "hi" } [ B{ CHAR: h CHAR: i $ ESC CHAR: ( CHAR: B } iso2022 decode ] unit-test { "hi" } [ B{ CHAR: h CHAR: i $ ESC CHAR: \( CHAR: B } iso2022 decode ] unit-test
{ "hi\u00fffd" } [ B{ CHAR: h CHAR: i $ ESC CHAR: ( } iso2022 decode ] unit-test { "hi\u00fffd" } [ B{ CHAR: h CHAR: i $ ESC CHAR: \( } iso2022 decode ] unit-test
{ "hi\u00fffd" } [ B{ CHAR: h CHAR: i $ ESC } iso2022 decode ] unit-test { "hi\u00fffd" } [ B{ CHAR: h CHAR: i $ ESC } iso2022 decode ] unit-test
{ B{ CHAR: h $ ESC CHAR: ( CHAR: J 0xD8 } } [ "h\u00ff98" iso2022 encode ] unit-test { B{ CHAR: h $ ESC CHAR: \( CHAR: J 0xD8 } } [ "h\u00ff98" iso2022 encode ] unit-test
{ "h\u00ff98" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: J 0xD8 } iso2022 decode ] unit-test { "h\u00ff98" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: J 0xD8 } iso2022 decode ] unit-test
{ "hi" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: J CHAR: i } iso2022 decode ] unit-test { "hi" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: J CHAR: i } iso2022 decode ] unit-test
{ "h" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: J } iso2022 decode ] unit-test { "h" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: J } iso2022 decode ] unit-test
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: J 0x80 } iso2022 decode ] unit-test { "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: J 0x80 } iso2022 decode ] unit-test
{ B{ CHAR: h $ ESC CHAR: $ CHAR: B 0x3E 0x47 } } [ "h\u007126" iso2022 encode ] unit-test { B{ CHAR: h $ ESC CHAR: $ CHAR: B 0x3E 0x47 } } [ "h\u007126" iso2022 encode ] unit-test
{ "h\u007126" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: B 0x3E 0x47 } iso2022 decode ] unit-test { "h\u007126" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: B 0x3E 0x47 } iso2022 decode ] unit-test
@ -26,11 +26,11 @@ IN: io.encodings.iso2022
{ "h\u00fffd" } [ B{ CHAR: h $ ESC } iso2022 decode ] unit-test { "h\u00fffd" } [ B{ CHAR: h $ ESC } iso2022 decode ] unit-test
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: B 0x80 0x80 } iso2022 decode ] unit-test { "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: B 0x80 0x80 } iso2022 decode ] unit-test
{ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D 0x38 0x54 } } [ "h\u0058ce" iso2022 encode ] unit-test { B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D 0x38 0x54 } } [ "h\u0058ce" iso2022 encode ] unit-test
{ "h\u0058ce" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D 0x38 0x54 } iso2022 decode ] unit-test { "h\u0058ce" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D 0x38 0x54 } iso2022 decode ] unit-test
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D 0x38 } iso2022 decode ] unit-test { "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D 0x38 } iso2022 decode ] unit-test
{ "h" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D } iso2022 decode ] unit-test { "h" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D } iso2022 decode ] unit-test
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( } iso2022 decode ] unit-test { "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( } iso2022 decode ] unit-test
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D 0x70 0x70 } iso2022 decode ] unit-test { "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D 0x70 0x70 } iso2022 decode ] unit-test
[ "\u{syriac-music}" iso2022 encode ] must-fail [ "\u{syriac-music}" iso2022 encode ] must-fail

View File

@ -33,10 +33,10 @@ M: iso2022 <decoder>
CONSTANT: ESC 0x16 CONSTANT: ESC 0x16
CONSTANT: switch-ascii B{ $ ESC CHAR: ( CHAR: B } CONSTANT: switch-ascii B{ $ ESC CHAR: \( CHAR: B }
CONSTANT: switch-jis201 B{ $ ESC CHAR: ( CHAR: J } CONSTANT: switch-jis201 B{ $ ESC CHAR: \( CHAR: J }
CONSTANT: switch-jis208 B{ $ ESC CHAR: $ CHAR: B } CONSTANT: switch-jis208 B{ $ ESC CHAR: $ CHAR: B }
CONSTANT: switch-jis212 B{ $ ESC CHAR: $ CHAR: ( CHAR: D } CONSTANT: switch-jis212 B{ $ ESC CHAR: $ CHAR: \( CHAR: D }
: find-type ( char -- code type ) : find-type ( char -- code type )
{ {
@ -62,7 +62,7 @@ M:: iso2022-state encode-char ( char stream encoding -- )
: read-escape ( stream -- type/f ) : read-escape ( stream -- type/f )
dup stream-read1 { dup stream-read1 {
{ CHAR: ( [ { CHAR: \( [
stream-read1 { stream-read1 {
{ CHAR: B [ ascii get-global ] } { CHAR: B [ ascii get-global ] }
{ CHAR: J [ jis201 get-global ] } { CHAR: J [ jis201 get-global ] }
@ -73,7 +73,7 @@ M:: iso2022-state encode-char ( char stream encoding -- )
dup stream-read1 { dup stream-read1 {
{ CHAR: @ [ drop jis208 get-global ] } ! want: JIS X 0208-1978 { CHAR: @ [ drop jis208 get-global ] } ! want: JIS X 0208-1978
{ CHAR: B [ drop jis208 get-global ] } { CHAR: B [ drop jis208 get-global ] }
{ CHAR: ( [ { CHAR: \( [
stream-read1 CHAR: D = jis212 get-global f ? stream-read1 CHAR: D = jis212 get-global f ?
] } ] }
[ 2drop f ] [ 2drop f ]

View File

@ -62,7 +62,7 @@ TUPLE: CreateProcess-args
! Find groups of \, groups of \ followed by ", or naked " ! Find groups of \, groups of \ followed by ", or naked "
: escape-double-quote ( str -- newstr ) : escape-double-quote ( str -- newstr )
[ [
{ [ drop CHAR: \ = ] [ nip "\\\"" member? ] } 2&& { [ drop CHAR: \\ = ] [ nip "\\\"" member? ] } 2&&
] monotonic-split [ ] monotonic-split [
dup last CHAR: \" = [ dup last CHAR: \" = [
dup length 1 > [ dup length 1 > [

View File

@ -111,10 +111,10 @@ DEFER: (read-json-string)
{ CHAR: \" [ over read-json-string suffix! ] } { CHAR: \" [ over read-json-string suffix! ] }
{ CHAR: \[ [ json-open-array ] } { CHAR: \[ [ json-open-array ] }
{ CHAR: , [ v-over-push ] } { CHAR: , [ v-over-push ] }
{ CHAR: ] [ json-close-array ] } { CHAR: \] [ json-close-array ] }
{ CHAR: \{ [ json-open-hash ] } { CHAR: \{ [ json-open-hash ] }
{ CHAR: \: [ v-pick-push ] } { CHAR: \: [ v-pick-push ] }
{ CHAR: } [ json-close-hash ] } { CHAR: \} [ json-close-hash ] }
{ CHAR: \s [ ] } { CHAR: \s [ ] }
{ CHAR: \t [ ] } { CHAR: \t [ ] }
{ CHAR: \r [ ] } { CHAR: \r [ ] }

View File

@ -114,7 +114,7 @@ M: sequence stream-json-print
CHAR: \[ over stream-write1 swap CHAR: \[ over stream-write1 swap
over '[ CHAR: , _ stream-write1 ] over '[ CHAR: , _ stream-write1 ]
pick '[ _ stream-json-print ] interleave pick '[ _ stream-json-print ] interleave
CHAR: ] swap stream-write1 ; CHAR: \] swap stream-write1 ;
<PRIVATE <PRIVATE
@ -144,7 +144,7 @@ M: real json-coerce >float number>string ;
stream stream-json-print stream stream-json-print
] bi* ] bi*
] interleave ] interleave
CHAR: } stream stream-write1 ; CHAR: \} stream stream-write1 ;
PRIVATE> PRIVATE>

View File

@ -112,7 +112,7 @@ C: <ebnf> ebnf
! between the quotes. ! between the quotes.
[ [
[ [
[ CHAR: \ = ] satisfy [ CHAR: \\ = ] satisfy
[ "\"\\" member? ] satisfy 2seq , [ "\"\\" member? ] satisfy 2seq ,
[ CHAR: \" = not ] satisfy , [ CHAR: \" = not ] satisfy ,
] choice* repeat1 "\"" "\"" surrounded-by , ] choice* repeat1 "\"" "\"" surrounded-by ,
@ -161,7 +161,7 @@ C: <ebnf> ebnf
! Match the syntax for declaring character ranges ! Match the syntax for declaring character ranges
[ [
[ "[" syntax , "[" token ensure-not , ] seq* hide , [ "[" syntax , "[" token ensure-not , ] seq* hide ,
[ CHAR: ] = not ] satisfy repeat1 , [ CHAR: \] = not ] satisfy repeat1 ,
"]" syntax , "]" syntax ,
] seq* [ first >string unescape-string <ebnf-range> ] action ; ] seq* [ first >string unescape-string <ebnf-range> ] action ;

View File

@ -53,7 +53,7 @@ IN: xml.autoencoding
get-next { get-next {
{ 0 [ next next start-utf16le ] } { 0 [ next next start-utf16le ] }
{ CHAR: ? [ go-utf8 instruct dup instruct-encoding ] } { CHAR: ? [ go-utf8 instruct dup instruct-encoding ] }
{ CHAR: ! [ go-utf8 direct ] } { CHAR: \! [ go-utf8 direct ] }
[ check start<name ] [ check start<name ]
} case ; } case ;

View File

@ -109,7 +109,7 @@ DEFER: make-tag ! Is this unavoidable?
: dtd-loop ( -- ) : dtd-loop ( -- )
pass-blank get-char { pass-blank get-char {
{ CHAR: ] [ next ] } { CHAR: \] [ next ] }
{ CHAR: % [ expand-pe ] } { CHAR: % [ expand-pe ] }
{ CHAR: < [ { CHAR: < [
next make-tag dup dtd-acceptable? next make-tag dup dtd-acceptable?
@ -166,7 +166,7 @@ DEFER: make-tag ! Is this unavoidable?
: make-tag ( -- tag ) : make-tag ( -- tag )
get-char { get-char {
{ CHAR: ! [ next direct ] } { CHAR: \! [ next direct ] }
{ CHAR: ? [ next instruct ] } { CHAR: ? [ next instruct ] }
{ CHAR: - [ next interpolate-tag ] } { CHAR: - [ next interpolate-tag ] }
[ drop normal-tag ] [ drop normal-tag ]

View File

@ -44,7 +44,7 @@ name>char-hook [
: unicode-escape ( str -- ch str' ) : unicode-escape ( str -- ch str' )
"{" ?head-slice [ "{" ?head-slice [
CHAR: } over index cut-slice [ CHAR: \} over index cut-slice [
dup hex> [ dup hex> [
nip nip
] [ ] [
@ -122,7 +122,7 @@ DEFER: (parse-string)
: parse-found-token ( accum lexer i elt -- ) : parse-found-token ( accum lexer i elt -- )
{ sbuf lexer fixnum fixnum } declare { sbuf lexer fixnum fixnum } declare
[ over lexer-subseq pick push-all ] dip [ over lexer-subseq pick push-all ] dip
CHAR: \ = [ CHAR: \\ = [
dup dup [ next-char ] bi@ dup dup [ next-char ] bi@
[ [ pick push ] bi@ ] [ [ pick push ] bi@ ]
[ drop 2dup next-line% ] if* [ drop 2dup next-line% ] if*

View File

@ -51,32 +51,32 @@ IN: c.lexer.tests
{ f } { f }
[ [
"\"abc\" asdf" <sequence-parser> "\"abc\" asdf" <sequence-parser>
[ CHAR: \ CHAR: \" take-quoted-string drop ] [ "asdf" take-sequence ] bi [ CHAR: \\ CHAR: \" take-quoted-string drop ] [ "asdf" take-sequence ] bi
] unit-test ] unit-test
{ "abc\\\"def" } { "abc\\\"def" }
[ [
"\"abc\\\"def\" asdf" <sequence-parser> "\"abc\\\"def\" asdf" <sequence-parser>
CHAR: \ CHAR: \" take-quoted-string CHAR: \\ CHAR: \" take-quoted-string
] unit-test ] unit-test
{ "asdf" } { "asdf" }
[ [
"\"abc\" asdf" <sequence-parser> "\"abc\" asdf" <sequence-parser>
[ CHAR: \ CHAR: \" take-quoted-string drop ] [ CHAR: \\ CHAR: \" take-quoted-string drop ]
[ skip-whitespace "asdf" take-sequence ] bi [ skip-whitespace "asdf" take-sequence ] bi
] unit-test ] unit-test
{ f } { f }
[ [
"\"abc asdf" <sequence-parser> "\"abc asdf" <sequence-parser>
CHAR: \ CHAR: \" take-quoted-string CHAR: \\ CHAR: \" take-quoted-string
] unit-test ] unit-test
{ "\"abc" } { "\"abc" }
[ [
"\"abc asdf" <sequence-parser> "\"abc asdf" <sequence-parser>
[ CHAR: \ CHAR: \" take-quoted-string drop ] [ CHAR: \\ CHAR: \" take-quoted-string drop ]
[ "\"abc" take-sequence ] bi [ "\"abc" take-sequence ] bi
] unit-test ] unit-test
@ -87,7 +87,7 @@ IN: c.lexer.tests
[ "" <sequence-parser> take-token ] unit-test [ "" <sequence-parser> take-token ] unit-test
{ "abcd e \\\"f g" } { "abcd e \\\"f g" }
[ "\"abcd e \\\"f g\"" <sequence-parser> CHAR: \ CHAR: \" take-token* ] unit-test [ "\"abcd e \\\"f g\"" <sequence-parser> CHAR: \\ CHAR: \" take-token* ] unit-test
{ "123" } { "123" }
[ "123jjj" <sequence-parser> take-c-integer ] unit-test [ "123jjj" <sequence-parser> take-c-integer ] unit-test

View File

@ -40,7 +40,7 @@ IN: c.lexer
: take-define-identifier ( sequence-parser -- string ) : take-define-identifier ( sequence-parser -- string )
skip-whitespace/comments skip-whitespace/comments
[ current { [ blank? ] [ CHAR: ( = ] } 1|| ] take-until ; [ current { [ blank? ] [ CHAR: \( = ] } 1|| ] take-until ;
:: take-quoted-string ( sequence-parser escape-char quote-char -- string ) :: take-quoted-string ( sequence-parser escape-char quote-char -- string )
sequence-parser n>> :> start-n sequence-parser n>> :> start-n
@ -69,7 +69,7 @@ IN: c.lexer
} case ; } case ;
: take-token ( sequence-parser -- string/f ) : take-token ( sequence-parser -- string/f )
CHAR: \ CHAR: \" take-token* ; CHAR: \\ CHAR: \" take-token* ;
: c-identifier-begin? ( ch -- ? ) : c-identifier-begin? ( ch -- ? )
CHAR: a CHAR: z [a,b] CHAR: a CHAR: z [a,b]

View File

@ -85,7 +85,7 @@ ERROR: header-file-missing path ;
: take-define-identifier ( sequence-parser -- string ) : take-define-identifier ( sequence-parser -- string )
skip-whitespace/comments skip-whitespace/comments
[ current { [ blank? ] [ CHAR: ( = ] } 1|| ] take-until ; [ current { [ blank? ] [ CHAR: \( = ] } 1|| ] take-until ;
:: handle-define ( preprocessor-state sequence-parser -- ) :: handle-define ( preprocessor-state sequence-parser -- )
sequence-parser take-define-identifier :> ident sequence-parser take-define-identifier :> ident

View File

@ -59,7 +59,7 @@ DEFER: expression-parser
[ [
{ {
[ blank? not ] [ blank? not ]
[ CHAR: ) = not ] [ CHAR: \) = not ]
[ CHAR: - = not ] [ CHAR: - = not ]
} 1&& } 1&&
] satisfy repeat1 [ >string ] action ; ] satisfy repeat1 [ >string ] action ;

View File

@ -70,15 +70,15 @@ CONSTANT: CHARS H{
! { CHAR: 8 CHAR: 8 } ! { CHAR: 8 CHAR: 8 }
{ CHAR: 9 CHAR: 6 } { CHAR: 9 CHAR: 6 }
{ CHAR: & 0x214B } { CHAR: & 0x214B }
{ CHAR: ! 0x00A1 } { CHAR: \! 0x00A1 }
{ CHAR: \" 0x201E } { CHAR: \" 0x201E }
{ CHAR: . 0x02D9 } { CHAR: . 0x02D9 }
{ CHAR: ; 0x061B } { CHAR: ; 0x061B }
{ CHAR: \[ CHAR: ] } { CHAR: \[ CHAR: \] }
{ CHAR: ( CHAR: ) } { CHAR: \( CHAR: \) }
{ CHAR: \{ CHAR: } } { CHAR: \{ CHAR: \} }
{ CHAR: ? 0x00BF } { CHAR: ? 0x00BF }
{ CHAR: ! 0x00A1 } { CHAR: \! 0x00A1 }
{ CHAR: ' CHAR: , } { CHAR: ' CHAR: , }
{ CHAR: < CHAR: > } { CHAR: < CHAR: > }
{ CHAR: _ 0x203E } { CHAR: _ 0x203E }

View File

@ -12,7 +12,7 @@ GENERIC: fuel-pprint ( obj -- )
<PRIVATE <PRIVATE
: fuel-maybe-scape ( ch -- seq ) : fuel-maybe-scape ( ch -- seq )
dup "\\\"?#()[]'`;." member? [ CHAR: \ swap 2array ] [ 1array ] if ; dup "\\\"?#()[]'`;." member? [ CHAR: \\ swap 2array ] [ 1array ] if ;
SYMBOL: :restarts SYMBOL: :restarts

View File

@ -108,7 +108,7 @@ SYMBOL: tagstack
: read-< ( sequence-parser -- string/f ) : read-< ( sequence-parser -- string/f )
advance dup current [ advance dup current [
CHAR: ! = [ read-bang f ] [ read-tag ] if CHAR: \! = [ read-bang f ] [ read-tag ] if
] [ ] [
drop f drop f
] if* ; ] if* ;

View File

@ -5,17 +5,17 @@ IN: infix.tokenizer.tests
{ V{ T{ ast-value f 1 } } } [ "1" tokenize-infix ] unit-test { V{ T{ ast-value f 1 } } } [ "1" tokenize-infix ] unit-test
{ V{ T{ ast-value f 1.02 } CHAR: * T{ ast-value f 3 } } } [ "1.02*3" tokenize-infix ] unit-test { V{ T{ ast-value f 1.02 } CHAR: * T{ ast-value f 3 } } } [ "1.02*3" tokenize-infix ] unit-test
{ V{ T{ ast-value f 3 } CHAR: / CHAR: ( T{ ast-value f 3 } CHAR: + T{ ast-value f 4 } CHAR: ) } } { V{ T{ ast-value f 3 } CHAR: / CHAR: \( T{ ast-value f 3 } CHAR: + T{ ast-value f 4 } CHAR: \) } }
[ "3/(3+4)" tokenize-infix ] unit-test [ "3/(3+4)" tokenize-infix ] unit-test
{ V{ "foo" CHAR: ( "x" CHAR: , "y" CHAR: , "z" CHAR: ) } } [ "foo(x,y,z)" tokenize-infix ] unit-test { V{ "foo" CHAR: \( "x" CHAR: , "y" CHAR: , "z" CHAR: \) } } [ "foo(x,y,z)" tokenize-infix ] unit-test
{ V{ "arr" CHAR: \[ "x" CHAR: + T{ ast-value f 3 } CHAR: ] } } { V{ "arr" CHAR: \[ "x" CHAR: + T{ ast-value f 3 } CHAR: \] } }
[ "arr[x+3]" tokenize-infix ] unit-test [ "arr[x+3]" tokenize-infix ] unit-test
[ "1.0.4" tokenize-infix ] must-fail [ "1.0.4" tokenize-infix ] must-fail
{ V{ CHAR: + CHAR: ] T{ ast-value f 3.4 } CHAR: , "bar" } } { V{ CHAR: + CHAR: \] T{ ast-value f 3.4 } CHAR: , "bar" } }
[ "+]3.4,bar" tokenize-infix ] unit-test [ "+]3.4,bar" tokenize-infix ] unit-test
{ V{ "baz_34c" } } [ "baz_34c" tokenize-infix ] unit-test { V{ "baz_34c" } } [ "baz_34c" tokenize-infix ] unit-test
{ V{ T{ ast-value f 34 } "c_baz" } } [ "34c_baz" tokenize-infix ] unit-test { V{ T{ ast-value f 34 } "c_baz" } } [ "34c_baz" tokenize-infix ] unit-test
{ V{ CHAR: ( T{ ast-value f 1 } CHAR: + T{ ast-value f 2 } CHAR: ) } } { V{ CHAR: \( T{ ast-value f 1 } CHAR: + T{ ast-value f 2 } CHAR: \) } }
[ "(1+2)" tokenize-infix ] unit-test [ "(1+2)" tokenize-infix ] unit-test
{ V{ T{ ast-value f 1 } CHAR: + T{ ast-value f 2 } CHAR: / T{ ast-value f 3 } } } { V{ T{ ast-value f 1 } CHAR: + T{ ast-value f 2 } CHAR: / T{ ast-value f 3 } } }
[ "1\n+\r2\t/ 3" tokenize-infix ] unit-test [ "1\n+\r2\t/ 3" tokenize-infix ] unit-test

View File

@ -17,7 +17,7 @@ NameFirst = Letter | "_" => [[ CHAR: _ ]]
NameRest = NameFirst | Digit NameRest = NameFirst | Digit
Name = NameFirst NameRest* => [[ first2 swap prefix >string ]] Name = NameFirst NameRest* => [[ first2 swap prefix >string ]]
Special = [+*/%(),] | "-" => [[ CHAR: - ]] Special = [+*/%(),] | "-" => [[ CHAR: - ]]
| "[" => [[ CHAR: \[ ]] | "]" => [[ CHAR: ] ]] | "[" => [[ CHAR: \[ ]] | "]" => [[ CHAR: \] ]]
| ":" => [[ CHAR: \: ]] | ":" => [[ CHAR: \: ]]
Tok = Spaces (Name | Number | String | Special ) Tok = Spaces (Name | Number | String | Special )
End = !(.) End = !(.)

View File

@ -24,7 +24,7 @@ IN: ini-file
{ CHAR: ? CHAR: ? } { CHAR: ? CHAR: ? }
{ CHAR: ; CHAR: ; } { CHAR: ; CHAR: ; }
{ CHAR: \[ CHAR: \[ } { CHAR: \[ CHAR: \[ }
{ CHAR: ] CHAR: ] } { CHAR: \] CHAR: \] }
{ CHAR: = CHAR: = } { CHAR: = CHAR: = }
} ?at [ bad-escape ] unless ; } ?at [ bad-escape ] unless ;
@ -55,7 +55,7 @@ USE: xml.entities
{ CHAR: ? "\\?" } { CHAR: ? "\\?" }
{ CHAR: ; "\\;" } { CHAR: ; "\\;" }
{ CHAR: \[ "\\[" } { CHAR: \[ "\\[" }
{ CHAR: ] "\\]" } { CHAR: \] "\\]" }
{ CHAR: = "\\=" } { CHAR: = "\\=" }
} escape-string-by ; } escape-string-by ;
@ -88,11 +88,11 @@ SYMBOL: option
{ {
[ length 1 > ] [ length 1 > ]
[ first CHAR: \[ = ] [ first CHAR: \[ = ]
[ CHAR: ] swap last-index ] [ CHAR: \] swap last-index ]
} 1&& ; } 1&& ;
: line-continues? ( line -- ? ) : line-continues? ( line -- ? )
{ [ empty? not ] [ last CHAR: \ = ] } 1&& ; { [ empty? not ] [ last CHAR: \\ = ] } 1&& ;
: section, ( -- ) : section, ( -- )
section get [ , ] when* ; section get [ , ] when* ;

View File

@ -44,7 +44,7 @@ MACRO:: read-double-matched ( open-ch -- quot: ( n string tag ch -- n' string se
} case } case
] ; ] ;
: read-double-matched-paren ( n string tag ch -- n' string seq ) CHAR: ( read-double-matched ; : read-double-matched-paren ( n string tag ch -- n' string seq ) CHAR: \( read-double-matched ;
: read-double-matched-bracket ( n string tag ch -- n' string seq ) CHAR: \[ read-double-matched ; : read-double-matched-bracket ( n string tag ch -- n' string seq ) CHAR: \[ read-double-matched ;
: read-double-matched-brace ( n string tag ch -- n' string seq ) CHAR: \{ read-double-matched ; : read-double-matched-brace ( n string tag ch -- n' string seq ) CHAR: \{ read-double-matched ;
@ -105,13 +105,13 @@ MACRO:: read-matched ( ch -- quot: ( n string tag -- n' string slice' ) )
: read-bracket ( n string slice -- n' string slice' ) CHAR: \[ read-matched ; : read-bracket ( n string slice -- n' string slice' ) CHAR: \[ read-matched ;
: read-brace ( n string slice -- n' string slice' ) CHAR: \{ read-matched ; : read-brace ( n string slice -- n' string slice' ) CHAR: \{ read-matched ;
: read-paren ( n string slice -- n' string slice' ) CHAR: ( read-matched ; : read-paren ( n string slice -- n' string slice' ) CHAR: \( read-matched ;
: read-string-payload ( n string -- n' string ) : read-string-payload ( n string -- n' string )
over [ over [
{ CHAR: \ CHAR: " } slice-til-separator-inclusive { { CHAR: \\ CHAR: \" } slice-til-separator-inclusive {
{ f [ drop ] } { f [ drop ] }
{ CHAR: " [ drop ] } { CHAR: \" [ drop ] }
{ CHAR: \ [ drop next-char-from drop read-string-payload ] } { CHAR: \\ [ drop next-char-from drop read-string-payload ] }
} case } case
] [ ] [
string-expected-got-eof string-expected-got-eof
@ -230,9 +230,9 @@ ERROR: mismatched-terminator n string slice ;
over [ over [
skip-whitespace "\"\\!:[{(<>\s\r\n" slice-til-either { skip-whitespace "\"\\!:[{(<>\s\r\n" slice-til-either {
! { CHAR: ` [ read-backtick ] } ! { CHAR: ` [ read-backtick ] }
{ CHAR: " [ read-string ] } { CHAR: \" [ read-string ] }
{ CHAR: \ [ read-backslash ] } { CHAR: \\ [ read-backslash ] }
{ CHAR: ! [ read-exclamation ] } { CHAR: \! [ read-exclamation ] }
{ CHAR: \: [ { CHAR: \: [
dup strict-upper? strict-upper get and [ dup strict-upper? strict-upper get and [
length swap [ - ] dip f length swap [ - ] dip f
@ -269,7 +269,7 @@ ERROR: mismatched-terminator n string slice ;
] } ] }
{ CHAR: \[ [ read-bracket ] } { CHAR: \[ [ read-bracket ] }
{ CHAR: \{ [ read-brace ] } { CHAR: \{ [ read-brace ] }
{ CHAR: ( [ read-paren ] } { CHAR: \( [ read-paren ] }
{ CHAR: \s [ read-token-or-whitespace ] } { CHAR: \s [ read-token-or-whitespace ] }
{ CHAR: \r [ read-token-or-whitespace ] } { CHAR: \r [ read-token-or-whitespace ] }
{ CHAR: \n [ read-token-or-whitespace ] } { CHAR: \n [ read-token-or-whitespace ] }

View File

@ -9,9 +9,9 @@ IN: modern.slices
: matching-delimiter ( ch -- ch' ) : matching-delimiter ( ch -- ch' )
H{ H{
{ CHAR: ( CHAR: ) } { CHAR: \( CHAR: \) }
{ CHAR: \[ CHAR: ] } { CHAR: \[ CHAR: \] }
{ CHAR: \{ CHAR: } } { CHAR: \{ CHAR: \} }
{ CHAR: < CHAR: > } { CHAR: < CHAR: > }
{ CHAR: \: CHAR: ; } { CHAR: \: CHAR: ; }
} ?at drop ; } ?at drop ;
@ -144,7 +144,7 @@ ERROR: unexpected-end n string ;
:: merge-slice-til-eol-slash' ( n string slice -- n' string slice/f ch/f ) :: merge-slice-til-eol-slash' ( n string slice -- n' string slice/f ch/f )
n string merge-slice-til-eol-slash'' :> ( n' string' slice' ch' ) n string merge-slice-til-eol-slash'' :> ( n' string' slice' ch' )
ch' CHAR: \ = [ ch' CHAR: \\ = [
n' 1 + string' ?nth' "\r\n" member? [ n' 1 + string' ?nth' "\r\n" member? [
n' 2 + string' slice slice' span-slices merge-slice-til-eol-slash' n' 2 + string' slice slice' span-slices merge-slice-til-eol-slash'
] [ ] [
@ -203,9 +203,9 @@ ERROR: subseq-expected-but-got-eof n string expected ;
[ [ from>> ] [ to>> ] [ seq>> ] tri ] dip [ [ from>> ] [ to>> ] [ seq>> ] tri ] dip
swap [ + ] dip <slice> ; swap [ + ] dip <slice> ;
! { CHAR: ] [ read-closing ] } ! { CHAR: \] [ read-closing ] }
! { CHAR: } [ read-closing ] } ! { CHAR: \} [ read-closing ] }
! { CHAR: ) [ read-closing ] } ! { CHAR: \) [ read-closing ] }
: read-closing ( n string tok -- n string tok ) : read-closing ( n string tok -- n string tok )
dup length 1 = [ dup length 1 = [
-1 modify-to [ 1 - ] 2dip -1 modify-to [ 1 - ] 2dip

View File

@ -60,10 +60,10 @@ CONSTANT: morse-code-table $[
{ CHAR: , "--..--" } { CHAR: , "--..--" }
{ CHAR: ? "..--.." } { CHAR: ? "..--.." }
{ CHAR: ' ".----." } { CHAR: ' ".----." }
{ CHAR: ! "-.-.--" } { CHAR: \! "-.-.--" }
{ CHAR: / "-..-." } { CHAR: / "-..-." }
{ CHAR: ( "-.--." } { CHAR: \( "-.--." }
{ CHAR: ) "-.--.-" } { CHAR: \) "-.--.-" }
{ CHAR: & ".-..." } { CHAR: & ".-..." }
{ CHAR: \: "---..." } { CHAR: \: "---..." }
{ CHAR: ; "-.-.-." } { CHAR: ; "-.-.-." }

View File

@ -18,8 +18,8 @@ IN: pdf.values
{ CHAR: \r "\\r" } { CHAR: \r "\\r" }
{ CHAR: \t "\\t" } { CHAR: \t "\\t" }
{ CHAR: \\ "\\\\" } { CHAR: \\ "\\\\" }
{ CHAR: ( "\\(" } { CHAR: \( "\\(" }
{ CHAR: ) "\\)" } { CHAR: \) "\\)" }
} escape-string-by ; } escape-string-by ;
PRIVATE> PRIVATE>

View File

@ -27,7 +27,7 @@ IN: rosetta-code.balanced-brackets
str [ str [
{ {
{ CHAR: \[ [ 1 ] } { CHAR: \[ [ 1 ] }
{ CHAR: ] [ -1 ] } { CHAR: \] [ -1 ] }
[ drop 0 ] [ drop 0 ]
} case counter + counter! } case counter + counter!
counter 0 < [ f ok! ] when counter 0 < [ f ok! ] when

View File

@ -20,7 +20,7 @@ WhitespaceCharacter = [ \t\n\r]
DecimalDigit = [0-9] DecimalDigit = [0-9]
Letter = [A-Za-z] Letter = [A-Za-z]
CommentCharacter = [^"] | '""' => [[ CHAR: " ]] CommentCharacter = [^"] | '""' => [[ CHAR: \" ]]
Comment = '"' (CommentCharacter)*:s '"' => [[ s >string ast-comment boa ]] Comment = '"' (CommentCharacter)*:s '"' => [[ s >string ast-comment boa ]]
OptionalWhiteSpace = (WhitespaceCharacter | Comment)* OptionalWhiteSpace = (WhitespaceCharacter | Comment)*

View File

@ -17,8 +17,8 @@ IN: text-to-pdf
{ CHAR: \r "\\r" } { CHAR: \r "\\r" }
{ CHAR: \t "\\t" } { CHAR: \t "\\t" }
{ CHAR: \\ "\\\\" } { CHAR: \\ "\\\\" }
{ CHAR: ( "\\(" } { CHAR: \( "\\(" }
{ CHAR: ) "\\)" } { CHAR: \) "\\)" }
} escape-string-by "(" ")" surround ; } escape-string-by "(" ")" surround ;
: pdf-object ( str n -- str' ) : pdf-object ( str n -- str' )

View File

@ -45,9 +45,9 @@ DEFER: parse-tnetstring
parse-payload { parse-payload {
{ CHAR: # [ string>number ] } { CHAR: # [ string>number ] }
{ CHAR: \" [ ] } { CHAR: \" [ ] }
{ CHAR: } [ parse-dict ] } { CHAR: \} [ parse-dict ] }
{ CHAR: ] [ parse-list ] } { CHAR: \] [ parse-list ] }
{ CHAR: ! [ parse-bool ] } { CHAR: \! [ parse-bool ] }
{ CHAR: ~ [ parse-null ] } { CHAR: ~ [ parse-null ] }
{ CHAR: , [ ] } { CHAR: , [ ] }
[ "Invalid payload type: %c" sprintf throw ] [ "Invalid payload type: %c" sprintf throw ]

View File

@ -13,7 +13,7 @@ IN: txon
"\\`" "`" replace ; "\\`" "`" replace ;
: `? ( ch1 ch2 -- ? ) : `? ( ch1 ch2 -- ? )
[ CHAR: \ = not ] [ CHAR: ` = ] bi* and ; [ CHAR: \\ = not ] [ CHAR: ` = ] bi* and ;
: (find-`) ( string -- n/f ) : (find-`) ( string -- n/f )
2 clump [ first2 `? ] find drop [ 1 + ] [ f ] if* ; 2 clump [ first2 `? ] find drop [ 1 + ] [ f ] if* ;