factor: Add more character escapes.
parent
7cf91e005d
commit
eb173e2caa
|
@ -21,7 +21,7 @@ ERROR: bad-array-type ;
|
|||
: (parse-c-type) ( string -- type )
|
||||
{
|
||||
{ [ "*" ?tail ] [ (parse-c-type) <pointer> ] }
|
||||
{ [ CHAR: ] over member? ] [ parse-array-type ] }
|
||||
{ [ CHAR: \] over member? ] [ parse-array-type ] }
|
||||
{ [ dup search ] [ parse-word ] }
|
||||
[ parse-word ]
|
||||
} cond ;
|
||||
|
|
|
@ -17,7 +17,7 @@ IN: help.lint.spaces
|
|||
dup utf8 file-lines [ 1 + 2array ] map-index
|
||||
[
|
||||
first [
|
||||
{ [ CHAR: space = ] [ CHAR: " = ] } 1||
|
||||
{ [ CHAR: space = ] [ CHAR: \" = ] } 1||
|
||||
] trim-head
|
||||
" " swap subseq?
|
||||
] filter
|
||||
|
|
|
@ -7,16 +7,16 @@ IN: io.encodings.iso2022
|
|||
{ "hello" } [ "hello" >byte-array iso2022 decode ] unit-test
|
||||
{ "hello" } [ "hello" iso2022 encode >string ] unit-test
|
||||
|
||||
{ "hi" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: B CHAR: i } iso2022 decode ] unit-test
|
||||
{ "hi" } [ B{ CHAR: h CHAR: i $ ESC CHAR: ( CHAR: B } iso2022 decode ] unit-test
|
||||
{ "hi\u00fffd" } [ B{ CHAR: h CHAR: i $ ESC CHAR: ( } iso2022 decode ] unit-test
|
||||
{ "hi" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: B CHAR: i } iso2022 decode ] unit-test
|
||||
{ "hi" } [ B{ CHAR: h CHAR: i $ ESC CHAR: \( CHAR: B } iso2022 decode ] unit-test
|
||||
{ "hi\u00fffd" } [ B{ CHAR: h CHAR: i $ ESC CHAR: \( } iso2022 decode ] unit-test
|
||||
{ "hi\u00fffd" } [ B{ CHAR: h CHAR: i $ ESC } iso2022 decode ] unit-test
|
||||
|
||||
{ B{ CHAR: h $ ESC CHAR: ( CHAR: J 0xD8 } } [ "h\u00ff98" iso2022 encode ] unit-test
|
||||
{ "h\u00ff98" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: J 0xD8 } iso2022 decode ] unit-test
|
||||
{ "hi" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: J CHAR: i } iso2022 decode ] unit-test
|
||||
{ "h" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: J } iso2022 decode ] unit-test
|
||||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: ( CHAR: J 0x80 } iso2022 decode ] unit-test
|
||||
{ B{ CHAR: h $ ESC CHAR: \( CHAR: J 0xD8 } } [ "h\u00ff98" iso2022 encode ] unit-test
|
||||
{ "h\u00ff98" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: J 0xD8 } iso2022 decode ] unit-test
|
||||
{ "hi" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: J CHAR: i } iso2022 decode ] unit-test
|
||||
{ "h" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: J } iso2022 decode ] unit-test
|
||||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: \( CHAR: J 0x80 } iso2022 decode ] unit-test
|
||||
|
||||
{ B{ CHAR: h $ ESC CHAR: $ CHAR: B 0x3E 0x47 } } [ "h\u007126" iso2022 encode ] unit-test
|
||||
{ "h\u007126" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: B 0x3E 0x47 } iso2022 decode ] unit-test
|
||||
|
@ -26,11 +26,11 @@ IN: io.encodings.iso2022
|
|||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC } iso2022 decode ] unit-test
|
||||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: B 0x80 0x80 } iso2022 decode ] unit-test
|
||||
|
||||
{ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D 0x38 0x54 } } [ "h\u0058ce" iso2022 encode ] unit-test
|
||||
{ "h\u0058ce" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D 0x38 0x54 } iso2022 decode ] unit-test
|
||||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D 0x38 } iso2022 decode ] unit-test
|
||||
{ "h" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D } iso2022 decode ] unit-test
|
||||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( } iso2022 decode ] unit-test
|
||||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: ( CHAR: D 0x70 0x70 } iso2022 decode ] unit-test
|
||||
{ B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D 0x38 0x54 } } [ "h\u0058ce" iso2022 encode ] unit-test
|
||||
{ "h\u0058ce" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D 0x38 0x54 } iso2022 decode ] unit-test
|
||||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D 0x38 } iso2022 decode ] unit-test
|
||||
{ "h" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D } iso2022 decode ] unit-test
|
||||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( } iso2022 decode ] unit-test
|
||||
{ "h\u00fffd" } [ B{ CHAR: h $ ESC CHAR: $ CHAR: \( CHAR: D 0x70 0x70 } iso2022 decode ] unit-test
|
||||
|
||||
[ "\u{syriac-music}" iso2022 encode ] must-fail
|
||||
|
|
|
@ -33,10 +33,10 @@ M: iso2022 <decoder>
|
|||
|
||||
CONSTANT: ESC 0x16
|
||||
|
||||
CONSTANT: switch-ascii B{ $ ESC CHAR: ( CHAR: B }
|
||||
CONSTANT: switch-jis201 B{ $ ESC CHAR: ( CHAR: J }
|
||||
CONSTANT: switch-ascii B{ $ ESC CHAR: \( CHAR: B }
|
||||
CONSTANT: switch-jis201 B{ $ ESC CHAR: \( CHAR: J }
|
||||
CONSTANT: switch-jis208 B{ $ ESC CHAR: $ CHAR: B }
|
||||
CONSTANT: switch-jis212 B{ $ ESC CHAR: $ CHAR: ( CHAR: D }
|
||||
CONSTANT: switch-jis212 B{ $ ESC CHAR: $ CHAR: \( CHAR: D }
|
||||
|
||||
: find-type ( char -- code type )
|
||||
{
|
||||
|
@ -62,7 +62,7 @@ M:: iso2022-state encode-char ( char stream encoding -- )
|
|||
|
||||
: read-escape ( stream -- type/f )
|
||||
dup stream-read1 {
|
||||
{ CHAR: ( [
|
||||
{ CHAR: \( [
|
||||
stream-read1 {
|
||||
{ CHAR: B [ ascii get-global ] }
|
||||
{ CHAR: J [ jis201 get-global ] }
|
||||
|
@ -73,7 +73,7 @@ M:: iso2022-state encode-char ( char stream encoding -- )
|
|||
dup stream-read1 {
|
||||
{ CHAR: @ [ drop jis208 get-global ] } ! want: JIS X 0208-1978
|
||||
{ CHAR: B [ drop jis208 get-global ] }
|
||||
{ CHAR: ( [
|
||||
{ CHAR: \( [
|
||||
stream-read1 CHAR: D = jis212 get-global f ?
|
||||
] }
|
||||
[ 2drop f ]
|
||||
|
|
|
@ -62,7 +62,7 @@ TUPLE: CreateProcess-args
|
|||
! Find groups of \, groups of \ followed by ", or naked "
|
||||
: escape-double-quote ( str -- newstr )
|
||||
[
|
||||
{ [ drop CHAR: \ = ] [ nip "\\\"" member? ] } 2&&
|
||||
{ [ drop CHAR: \\ = ] [ nip "\\\"" member? ] } 2&&
|
||||
] monotonic-split [
|
||||
dup last CHAR: \" = [
|
||||
dup length 1 > [
|
||||
|
|
|
@ -111,10 +111,10 @@ DEFER: (read-json-string)
|
|||
{ CHAR: \" [ over read-json-string suffix! ] }
|
||||
{ CHAR: \[ [ json-open-array ] }
|
||||
{ CHAR: , [ v-over-push ] }
|
||||
{ CHAR: ] [ json-close-array ] }
|
||||
{ CHAR: \] [ json-close-array ] }
|
||||
{ CHAR: \{ [ json-open-hash ] }
|
||||
{ CHAR: \: [ v-pick-push ] }
|
||||
{ CHAR: } [ json-close-hash ] }
|
||||
{ CHAR: \} [ json-close-hash ] }
|
||||
{ CHAR: \s [ ] }
|
||||
{ CHAR: \t [ ] }
|
||||
{ CHAR: \r [ ] }
|
||||
|
|
|
@ -114,7 +114,7 @@ M: sequence stream-json-print
|
|||
CHAR: \[ over stream-write1 swap
|
||||
over '[ CHAR: , _ stream-write1 ]
|
||||
pick '[ _ stream-json-print ] interleave
|
||||
CHAR: ] swap stream-write1 ;
|
||||
CHAR: \] swap stream-write1 ;
|
||||
|
||||
<PRIVATE
|
||||
|
||||
|
@ -144,7 +144,7 @@ M: real json-coerce >float number>string ;
|
|||
stream stream-json-print
|
||||
] bi*
|
||||
] interleave
|
||||
CHAR: } stream stream-write1 ;
|
||||
CHAR: \} stream stream-write1 ;
|
||||
|
||||
PRIVATE>
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ C: <ebnf> ebnf
|
|||
! between the quotes.
|
||||
[
|
||||
[
|
||||
[ CHAR: \ = ] satisfy
|
||||
[ CHAR: \\ = ] satisfy
|
||||
[ "\"\\" member? ] satisfy 2seq ,
|
||||
[ CHAR: \" = not ] satisfy ,
|
||||
] choice* repeat1 "\"" "\"" surrounded-by ,
|
||||
|
@ -161,7 +161,7 @@ C: <ebnf> ebnf
|
|||
! Match the syntax for declaring character ranges
|
||||
[
|
||||
[ "[" syntax , "[" token ensure-not , ] seq* hide ,
|
||||
[ CHAR: ] = not ] satisfy repeat1 ,
|
||||
[ CHAR: \] = not ] satisfy repeat1 ,
|
||||
"]" syntax ,
|
||||
] seq* [ first >string unescape-string <ebnf-range> ] action ;
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ IN: xml.autoencoding
|
|||
get-next {
|
||||
{ 0 [ next next start-utf16le ] }
|
||||
{ CHAR: ? [ go-utf8 instruct dup instruct-encoding ] }
|
||||
{ CHAR: ! [ go-utf8 direct ] }
|
||||
{ CHAR: \! [ go-utf8 direct ] }
|
||||
[ check start<name ]
|
||||
} case ;
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ DEFER: make-tag ! Is this unavoidable?
|
|||
|
||||
: dtd-loop ( -- )
|
||||
pass-blank get-char {
|
||||
{ CHAR: ] [ next ] }
|
||||
{ CHAR: \] [ next ] }
|
||||
{ CHAR: % [ expand-pe ] }
|
||||
{ CHAR: < [
|
||||
next make-tag dup dtd-acceptable?
|
||||
|
@ -166,7 +166,7 @@ DEFER: make-tag ! Is this unavoidable?
|
|||
|
||||
: make-tag ( -- tag )
|
||||
get-char {
|
||||
{ CHAR: ! [ next direct ] }
|
||||
{ CHAR: \! [ next direct ] }
|
||||
{ CHAR: ? [ next instruct ] }
|
||||
{ CHAR: - [ next interpolate-tag ] }
|
||||
[ drop normal-tag ]
|
||||
|
|
|
@ -44,7 +44,7 @@ name>char-hook [
|
|||
|
||||
: unicode-escape ( str -- ch str' )
|
||||
"{" ?head-slice [
|
||||
CHAR: } over index cut-slice [
|
||||
CHAR: \} over index cut-slice [
|
||||
dup hex> [
|
||||
nip
|
||||
] [
|
||||
|
@ -122,7 +122,7 @@ DEFER: (parse-string)
|
|||
: parse-found-token ( accum lexer i elt -- )
|
||||
{ sbuf lexer fixnum fixnum } declare
|
||||
[ over lexer-subseq pick push-all ] dip
|
||||
CHAR: \ = [
|
||||
CHAR: \\ = [
|
||||
dup dup [ next-char ] bi@
|
||||
[ [ pick push ] bi@ ]
|
||||
[ drop 2dup next-line% ] if*
|
||||
|
|
|
@ -51,32 +51,32 @@ IN: c.lexer.tests
|
|||
{ f }
|
||||
[
|
||||
"\"abc\" asdf" <sequence-parser>
|
||||
[ CHAR: \ CHAR: \" take-quoted-string drop ] [ "asdf" take-sequence ] bi
|
||||
[ CHAR: \\ CHAR: \" take-quoted-string drop ] [ "asdf" take-sequence ] bi
|
||||
] unit-test
|
||||
|
||||
{ "abc\\\"def" }
|
||||
[
|
||||
"\"abc\\\"def\" asdf" <sequence-parser>
|
||||
CHAR: \ CHAR: \" take-quoted-string
|
||||
CHAR: \\ CHAR: \" take-quoted-string
|
||||
] unit-test
|
||||
|
||||
{ "asdf" }
|
||||
[
|
||||
"\"abc\" asdf" <sequence-parser>
|
||||
[ CHAR: \ CHAR: \" take-quoted-string drop ]
|
||||
[ CHAR: \\ CHAR: \" take-quoted-string drop ]
|
||||
[ skip-whitespace "asdf" take-sequence ] bi
|
||||
] unit-test
|
||||
|
||||
{ f }
|
||||
[
|
||||
"\"abc asdf" <sequence-parser>
|
||||
CHAR: \ CHAR: \" take-quoted-string
|
||||
CHAR: \\ CHAR: \" take-quoted-string
|
||||
] unit-test
|
||||
|
||||
{ "\"abc" }
|
||||
[
|
||||
"\"abc asdf" <sequence-parser>
|
||||
[ CHAR: \ CHAR: \" take-quoted-string drop ]
|
||||
[ CHAR: \\ CHAR: \" take-quoted-string drop ]
|
||||
[ "\"abc" take-sequence ] bi
|
||||
] unit-test
|
||||
|
||||
|
@ -87,7 +87,7 @@ IN: c.lexer.tests
|
|||
[ "" <sequence-parser> take-token ] unit-test
|
||||
|
||||
{ "abcd e \\\"f g" }
|
||||
[ "\"abcd e \\\"f g\"" <sequence-parser> CHAR: \ CHAR: \" take-token* ] unit-test
|
||||
[ "\"abcd e \\\"f g\"" <sequence-parser> CHAR: \\ CHAR: \" take-token* ] unit-test
|
||||
|
||||
{ "123" }
|
||||
[ "123jjj" <sequence-parser> take-c-integer ] unit-test
|
||||
|
|
|
@ -40,7 +40,7 @@ IN: c.lexer
|
|||
|
||||
: take-define-identifier ( sequence-parser -- string )
|
||||
skip-whitespace/comments
|
||||
[ current { [ blank? ] [ CHAR: ( = ] } 1|| ] take-until ;
|
||||
[ current { [ blank? ] [ CHAR: \( = ] } 1|| ] take-until ;
|
||||
|
||||
:: take-quoted-string ( sequence-parser escape-char quote-char -- string )
|
||||
sequence-parser n>> :> start-n
|
||||
|
@ -69,7 +69,7 @@ IN: c.lexer
|
|||
} case ;
|
||||
|
||||
: take-token ( sequence-parser -- string/f )
|
||||
CHAR: \ CHAR: \" take-token* ;
|
||||
CHAR: \\ CHAR: \" take-token* ;
|
||||
|
||||
: c-identifier-begin? ( ch -- ? )
|
||||
CHAR: a CHAR: z [a,b]
|
||||
|
|
|
@ -85,7 +85,7 @@ ERROR: header-file-missing path ;
|
|||
|
||||
: take-define-identifier ( sequence-parser -- string )
|
||||
skip-whitespace/comments
|
||||
[ current { [ blank? ] [ CHAR: ( = ] } 1|| ] take-until ;
|
||||
[ current { [ blank? ] [ CHAR: \( = ] } 1|| ] take-until ;
|
||||
|
||||
:: handle-define ( preprocessor-state sequence-parser -- )
|
||||
sequence-parser take-define-identifier :> ident
|
||||
|
|
|
@ -59,7 +59,7 @@ DEFER: expression-parser
|
|||
[
|
||||
{
|
||||
[ blank? not ]
|
||||
[ CHAR: ) = not ]
|
||||
[ CHAR: \) = not ]
|
||||
[ CHAR: - = not ]
|
||||
} 1&&
|
||||
] satisfy repeat1 [ >string ] action ;
|
||||
|
|
|
@ -70,15 +70,15 @@ CONSTANT: CHARS H{
|
|||
! { CHAR: 8 CHAR: 8 }
|
||||
{ CHAR: 9 CHAR: 6 }
|
||||
{ CHAR: & 0x214B }
|
||||
{ CHAR: ! 0x00A1 }
|
||||
{ CHAR: \! 0x00A1 }
|
||||
{ CHAR: \" 0x201E }
|
||||
{ CHAR: . 0x02D9 }
|
||||
{ CHAR: ; 0x061B }
|
||||
{ CHAR: \[ CHAR: ] }
|
||||
{ CHAR: ( CHAR: ) }
|
||||
{ CHAR: \{ CHAR: } }
|
||||
{ CHAR: \[ CHAR: \] }
|
||||
{ CHAR: \( CHAR: \) }
|
||||
{ CHAR: \{ CHAR: \} }
|
||||
{ CHAR: ? 0x00BF }
|
||||
{ CHAR: ! 0x00A1 }
|
||||
{ CHAR: \! 0x00A1 }
|
||||
{ CHAR: ' CHAR: , }
|
||||
{ CHAR: < CHAR: > }
|
||||
{ CHAR: _ 0x203E }
|
||||
|
|
|
@ -12,7 +12,7 @@ GENERIC: fuel-pprint ( obj -- )
|
|||
<PRIVATE
|
||||
|
||||
: fuel-maybe-scape ( ch -- seq )
|
||||
dup "\\\"?#()[]'`;." member? [ CHAR: \ swap 2array ] [ 1array ] if ;
|
||||
dup "\\\"?#()[]'`;." member? [ CHAR: \\ swap 2array ] [ 1array ] if ;
|
||||
|
||||
SYMBOL: :restarts
|
||||
|
||||
|
|
|
@ -108,7 +108,7 @@ SYMBOL: tagstack
|
|||
|
||||
: read-< ( sequence-parser -- string/f )
|
||||
advance dup current [
|
||||
CHAR: ! = [ read-bang f ] [ read-tag ] if
|
||||
CHAR: \! = [ read-bang f ] [ read-tag ] if
|
||||
] [
|
||||
drop f
|
||||
] if* ;
|
||||
|
|
|
@ -5,17 +5,17 @@ IN: infix.tokenizer.tests
|
|||
|
||||
{ V{ T{ ast-value f 1 } } } [ "1" tokenize-infix ] unit-test
|
||||
{ V{ T{ ast-value f 1.02 } CHAR: * T{ ast-value f 3 } } } [ "1.02*3" tokenize-infix ] unit-test
|
||||
{ V{ T{ ast-value f 3 } CHAR: / CHAR: ( T{ ast-value f 3 } CHAR: + T{ ast-value f 4 } CHAR: ) } }
|
||||
{ V{ T{ ast-value f 3 } CHAR: / CHAR: \( T{ ast-value f 3 } CHAR: + T{ ast-value f 4 } CHAR: \) } }
|
||||
[ "3/(3+4)" tokenize-infix ] unit-test
|
||||
{ V{ "foo" CHAR: ( "x" CHAR: , "y" CHAR: , "z" CHAR: ) } } [ "foo(x,y,z)" tokenize-infix ] unit-test
|
||||
{ V{ "arr" CHAR: \[ "x" CHAR: + T{ ast-value f 3 } CHAR: ] } }
|
||||
{ V{ "foo" CHAR: \( "x" CHAR: , "y" CHAR: , "z" CHAR: \) } } [ "foo(x,y,z)" tokenize-infix ] unit-test
|
||||
{ V{ "arr" CHAR: \[ "x" CHAR: + T{ ast-value f 3 } CHAR: \] } }
|
||||
[ "arr[x+3]" tokenize-infix ] unit-test
|
||||
[ "1.0.4" tokenize-infix ] must-fail
|
||||
{ V{ CHAR: + CHAR: ] T{ ast-value f 3.4 } CHAR: , "bar" } }
|
||||
{ V{ CHAR: + CHAR: \] T{ ast-value f 3.4 } CHAR: , "bar" } }
|
||||
[ "+]3.4,bar" tokenize-infix ] unit-test
|
||||
{ V{ "baz_34c" } } [ "baz_34c" tokenize-infix ] unit-test
|
||||
{ V{ T{ ast-value f 34 } "c_baz" } } [ "34c_baz" tokenize-infix ] unit-test
|
||||
{ V{ CHAR: ( T{ ast-value f 1 } CHAR: + T{ ast-value f 2 } CHAR: ) } }
|
||||
{ V{ CHAR: \( T{ ast-value f 1 } CHAR: + T{ ast-value f 2 } CHAR: \) } }
|
||||
[ "(1+2)" tokenize-infix ] unit-test
|
||||
{ V{ T{ ast-value f 1 } CHAR: + T{ ast-value f 2 } CHAR: / T{ ast-value f 3 } } }
|
||||
[ "1\n+\r2\t/ 3" tokenize-infix ] unit-test
|
||||
|
|
|
@ -17,7 +17,7 @@ NameFirst = Letter | "_" => [[ CHAR: _ ]]
|
|||
NameRest = NameFirst | Digit
|
||||
Name = NameFirst NameRest* => [[ first2 swap prefix >string ]]
|
||||
Special = [+*/%(),] | "-" => [[ CHAR: - ]]
|
||||
| "[" => [[ CHAR: \[ ]] | "]" => [[ CHAR: ] ]]
|
||||
| "[" => [[ CHAR: \[ ]] | "]" => [[ CHAR: \] ]]
|
||||
| ":" => [[ CHAR: \: ]]
|
||||
Tok = Spaces (Name | Number | String | Special )
|
||||
End = !(.)
|
||||
|
|
|
@ -24,7 +24,7 @@ IN: ini-file
|
|||
{ CHAR: ? CHAR: ? }
|
||||
{ CHAR: ; CHAR: ; }
|
||||
{ CHAR: \[ CHAR: \[ }
|
||||
{ CHAR: ] CHAR: ] }
|
||||
{ CHAR: \] CHAR: \] }
|
||||
{ CHAR: = CHAR: = }
|
||||
} ?at [ bad-escape ] unless ;
|
||||
|
||||
|
@ -55,7 +55,7 @@ USE: xml.entities
|
|||
{ CHAR: ? "\\?" }
|
||||
{ CHAR: ; "\\;" }
|
||||
{ CHAR: \[ "\\[" }
|
||||
{ CHAR: ] "\\]" }
|
||||
{ CHAR: \] "\\]" }
|
||||
{ CHAR: = "\\=" }
|
||||
} escape-string-by ;
|
||||
|
||||
|
@ -88,11 +88,11 @@ SYMBOL: option
|
|||
{
|
||||
[ length 1 > ]
|
||||
[ first CHAR: \[ = ]
|
||||
[ CHAR: ] swap last-index ]
|
||||
[ CHAR: \] swap last-index ]
|
||||
} 1&& ;
|
||||
|
||||
: line-continues? ( line -- ? )
|
||||
{ [ empty? not ] [ last CHAR: \ = ] } 1&& ;
|
||||
{ [ empty? not ] [ last CHAR: \\ = ] } 1&& ;
|
||||
|
||||
: section, ( -- )
|
||||
section get [ , ] when* ;
|
||||
|
|
|
@ -44,7 +44,7 @@ MACRO:: read-double-matched ( open-ch -- quot: ( n string tag ch -- n' string se
|
|||
} case
|
||||
] ;
|
||||
|
||||
: read-double-matched-paren ( n string tag ch -- n' string seq ) CHAR: ( read-double-matched ;
|
||||
: read-double-matched-paren ( n string tag ch -- n' string seq ) CHAR: \( read-double-matched ;
|
||||
: read-double-matched-bracket ( n string tag ch -- n' string seq ) CHAR: \[ read-double-matched ;
|
||||
: read-double-matched-brace ( n string tag ch -- n' string seq ) CHAR: \{ read-double-matched ;
|
||||
|
||||
|
@ -105,13 +105,13 @@ MACRO:: read-matched ( ch -- quot: ( n string tag -- n' string slice' ) )
|
|||
|
||||
: read-bracket ( n string slice -- n' string slice' ) CHAR: \[ read-matched ;
|
||||
: read-brace ( n string slice -- n' string slice' ) CHAR: \{ read-matched ;
|
||||
: read-paren ( n string slice -- n' string slice' ) CHAR: ( read-matched ;
|
||||
: read-paren ( n string slice -- n' string slice' ) CHAR: \( read-matched ;
|
||||
: read-string-payload ( n string -- n' string )
|
||||
over [
|
||||
{ CHAR: \ CHAR: " } slice-til-separator-inclusive {
|
||||
{ CHAR: \\ CHAR: \" } slice-til-separator-inclusive {
|
||||
{ f [ drop ] }
|
||||
{ CHAR: " [ drop ] }
|
||||
{ CHAR: \ [ drop next-char-from drop read-string-payload ] }
|
||||
{ CHAR: \" [ drop ] }
|
||||
{ CHAR: \\ [ drop next-char-from drop read-string-payload ] }
|
||||
} case
|
||||
] [
|
||||
string-expected-got-eof
|
||||
|
@ -230,9 +230,9 @@ ERROR: mismatched-terminator n string slice ;
|
|||
over [
|
||||
skip-whitespace "\"\\!:[{(<>\s\r\n" slice-til-either {
|
||||
! { CHAR: ` [ read-backtick ] }
|
||||
{ CHAR: " [ read-string ] }
|
||||
{ CHAR: \ [ read-backslash ] }
|
||||
{ CHAR: ! [ read-exclamation ] }
|
||||
{ CHAR: \" [ read-string ] }
|
||||
{ CHAR: \\ [ read-backslash ] }
|
||||
{ CHAR: \! [ read-exclamation ] }
|
||||
{ CHAR: \: [
|
||||
dup strict-upper? strict-upper get and [
|
||||
length swap [ - ] dip f
|
||||
|
@ -269,7 +269,7 @@ ERROR: mismatched-terminator n string slice ;
|
|||
] }
|
||||
{ CHAR: \[ [ read-bracket ] }
|
||||
{ CHAR: \{ [ read-brace ] }
|
||||
{ CHAR: ( [ read-paren ] }
|
||||
{ CHAR: \( [ read-paren ] }
|
||||
{ CHAR: \s [ read-token-or-whitespace ] }
|
||||
{ CHAR: \r [ read-token-or-whitespace ] }
|
||||
{ CHAR: \n [ read-token-or-whitespace ] }
|
||||
|
|
|
@ -9,9 +9,9 @@ IN: modern.slices
|
|||
|
||||
: matching-delimiter ( ch -- ch' )
|
||||
H{
|
||||
{ CHAR: ( CHAR: ) }
|
||||
{ CHAR: \[ CHAR: ] }
|
||||
{ CHAR: \{ CHAR: } }
|
||||
{ CHAR: \( CHAR: \) }
|
||||
{ CHAR: \[ CHAR: \] }
|
||||
{ CHAR: \{ CHAR: \} }
|
||||
{ CHAR: < CHAR: > }
|
||||
{ CHAR: \: CHAR: ; }
|
||||
} ?at drop ;
|
||||
|
@ -144,7 +144,7 @@ ERROR: unexpected-end n string ;
|
|||
|
||||
:: merge-slice-til-eol-slash' ( n string slice -- n' string slice/f ch/f )
|
||||
n string merge-slice-til-eol-slash'' :> ( n' string' slice' ch' )
|
||||
ch' CHAR: \ = [
|
||||
ch' CHAR: \\ = [
|
||||
n' 1 + string' ?nth' "\r\n" member? [
|
||||
n' 2 + string' slice slice' span-slices merge-slice-til-eol-slash'
|
||||
] [
|
||||
|
@ -203,9 +203,9 @@ ERROR: subseq-expected-but-got-eof n string expected ;
|
|||
[ [ from>> ] [ to>> ] [ seq>> ] tri ] dip
|
||||
swap [ + ] dip <slice> ;
|
||||
|
||||
! { CHAR: ] [ read-closing ] }
|
||||
! { CHAR: } [ read-closing ] }
|
||||
! { CHAR: ) [ read-closing ] }
|
||||
! { CHAR: \] [ read-closing ] }
|
||||
! { CHAR: \} [ read-closing ] }
|
||||
! { CHAR: \) [ read-closing ] }
|
||||
: read-closing ( n string tok -- n string tok )
|
||||
dup length 1 = [
|
||||
-1 modify-to [ 1 - ] 2dip
|
||||
|
|
|
@ -60,10 +60,10 @@ CONSTANT: morse-code-table $[
|
|||
{ CHAR: , "--..--" }
|
||||
{ CHAR: ? "..--.." }
|
||||
{ CHAR: ' ".----." }
|
||||
{ CHAR: ! "-.-.--" }
|
||||
{ CHAR: \! "-.-.--" }
|
||||
{ CHAR: / "-..-." }
|
||||
{ CHAR: ( "-.--." }
|
||||
{ CHAR: ) "-.--.-" }
|
||||
{ CHAR: \( "-.--." }
|
||||
{ CHAR: \) "-.--.-" }
|
||||
{ CHAR: & ".-..." }
|
||||
{ CHAR: \: "---..." }
|
||||
{ CHAR: ; "-.-.-." }
|
||||
|
|
|
@ -18,8 +18,8 @@ IN: pdf.values
|
|||
{ CHAR: \r "\\r" }
|
||||
{ CHAR: \t "\\t" }
|
||||
{ CHAR: \\ "\\\\" }
|
||||
{ CHAR: ( "\\(" }
|
||||
{ CHAR: ) "\\)" }
|
||||
{ CHAR: \( "\\(" }
|
||||
{ CHAR: \) "\\)" }
|
||||
} escape-string-by ;
|
||||
|
||||
PRIVATE>
|
||||
|
|
|
@ -27,7 +27,7 @@ IN: rosetta-code.balanced-brackets
|
|||
str [
|
||||
{
|
||||
{ CHAR: \[ [ 1 ] }
|
||||
{ CHAR: ] [ -1 ] }
|
||||
{ CHAR: \] [ -1 ] }
|
||||
[ drop 0 ]
|
||||
} case counter + counter!
|
||||
counter 0 < [ f ok! ] when
|
||||
|
|
|
@ -20,7 +20,7 @@ WhitespaceCharacter = [ \t\n\r]
|
|||
DecimalDigit = [0-9]
|
||||
Letter = [A-Za-z]
|
||||
|
||||
CommentCharacter = [^"] | '""' => [[ CHAR: " ]]
|
||||
CommentCharacter = [^"] | '""' => [[ CHAR: \" ]]
|
||||
Comment = '"' (CommentCharacter)*:s '"' => [[ s >string ast-comment boa ]]
|
||||
|
||||
OptionalWhiteSpace = (WhitespaceCharacter | Comment)*
|
||||
|
|
|
@ -17,8 +17,8 @@ IN: text-to-pdf
|
|||
{ CHAR: \r "\\r" }
|
||||
{ CHAR: \t "\\t" }
|
||||
{ CHAR: \\ "\\\\" }
|
||||
{ CHAR: ( "\\(" }
|
||||
{ CHAR: ) "\\)" }
|
||||
{ CHAR: \( "\\(" }
|
||||
{ CHAR: \) "\\)" }
|
||||
} escape-string-by "(" ")" surround ;
|
||||
|
||||
: pdf-object ( str n -- str' )
|
||||
|
|
|
@ -45,9 +45,9 @@ DEFER: parse-tnetstring
|
|||
parse-payload {
|
||||
{ CHAR: # [ string>number ] }
|
||||
{ CHAR: \" [ ] }
|
||||
{ CHAR: } [ parse-dict ] }
|
||||
{ CHAR: ] [ parse-list ] }
|
||||
{ CHAR: ! [ parse-bool ] }
|
||||
{ CHAR: \} [ parse-dict ] }
|
||||
{ CHAR: \] [ parse-list ] }
|
||||
{ CHAR: \! [ parse-bool ] }
|
||||
{ CHAR: ~ [ parse-null ] }
|
||||
{ CHAR: , [ ] }
|
||||
[ "Invalid payload type: %c" sprintf throw ]
|
||||
|
|
|
@ -13,7 +13,7 @@ IN: txon
|
|||
"\\`" "`" replace ;
|
||||
|
||||
: `? ( ch1 ch2 -- ? )
|
||||
[ CHAR: \ = not ] [ CHAR: ` = ] bi* and ;
|
||||
[ CHAR: \\ = not ] [ CHAR: ` = ] bi* and ;
|
||||
|
||||
: (find-`) ( string -- n/f )
|
||||
2 clump [ first2 `? ] find drop [ 1 + ] [ f ] if* ;
|
||||
|
|
Loading…
Reference in New Issue