From d635604026db24a64d2cb809595d1b7e73258ec3 Mon Sep 17 00:00:00 2001 From: Doug Coleman Date: Sat, 28 Sep 2019 10:59:07 -0500 Subject: [PATCH] modern: Removing ch'a syntax. It's cool but I don't like it right now. --- basis/alien/parser/parser.factor | 2 +- basis/base64/base64.factor | 30 ++-- basis/calendar/format/format.factor | 6 +- basis/calendar/parser/parser.factor | 14 +- .../checksums/adler-32/adler-32-tests.factor | 2 +- basis/checksums/bsd/bsd-tests.factor | 2 +- basis/checksums/crc16/crc16.factor | 2 +- basis/checksums/ripemd/ripemd-tests.factor | 2 +- basis/checksums/sha/sha-tests.factor | 2 +- basis/circular/circular-tests.factor | 12 +- basis/cocoa/messages/messages.factor | 12 +- basis/colors/constants/constants.factor | 2 +- .../cfg/linear-scan/debugger/debugger.factor | 4 +- basis/compiler/tests/intrinsics.factor | 12 +- basis/csv/csv-docs.factor | 2 +- basis/csv/csv-tests.factor | 2 +- basis/csv/csv.factor | 16 +- basis/db/postgresql/postgresql.factor | 2 +- basis/db/tester/tester.factor | 2 +- basis/db/tuples/tuples-tests.factor | 2 +- basis/english/english-tests.factor | 2 +- basis/environment/environment-tests.factor | 2 +- basis/escape-strings/escape-strings.factor | 12 +- basis/farkup/farkup.factor | 46 +++--- basis/formatting/formatting-tests.factor | 10 +- basis/formatting/formatting.factor | 28 ++-- basis/ftp/client/client.factor | 4 +- .../listing-parser/listing-parser.factor | 30 ++-- .../auth/providers/couchdb/couchdb.factor | 2 +- .../gobject-introspection/types/types.factor | 2 +- basis/help/html/html.factor | 28 ++-- basis/help/lint/spaces/spaces.factor | 2 +- basis/help/tutorial/tutorial.factor | 4 +- basis/html/streams/streams-tests.factor | 2 +- basis/html/streams/streams.factor | 2 +- basis/html/templates/chloe/chloe-tests.factor | 2 +- basis/html/templates/fhtml/fhtml.factor | 2 +- basis/http/parsers/parsers.factor | 2 +- basis/io/backend/unix/unix.factor | 2 +- basis/io/buffers/buffers-tests.factor | 6 +- basis/io/crlf/crlf-tests.factor | 6 +- basis/io/crlf/crlf.factor | 6 +- basis/io/encodings/8-bit/8-bit-tests.factor | 4 +- basis/io/encodings/euc/euc-tests.factor | 2 +- .../io/encodings/gb18030/gb18030-tests.factor | 8 +- .../io/encodings/iso2022/iso2022-tests.factor | 44 ++--- basis/io/encodings/iso2022/iso2022.factor | 24 +-- basis/io/encodings/latin1/latin1-tests.factor | 6 +- .../shift-jis/shift-jis-tests.factor | 20 +-- basis/io/encodings/utf32/utf32-tests.factor | 26 +-- basis/io/encodings/utf7/utf7.factor | 10 +- basis/io/files/info/unix/linux/linux.factor | 2 +- basis/io/files/info/unix/unix.factor | 30 ++-- basis/io/files/info/windows/windows.factor | 2 +- basis/io/files/unique/unique-tests.factor | 2 +- basis/io/files/windows/windows.factor | 2 +- basis/io/launcher/windows/windows.factor | 10 +- basis/io/mmap/mmap-tests.factor | 2 +- .../io/sockets/secure/openssl/openssl.factor | 2 +- basis/io/streams/limited/limited-tests.factor | 6 +- .../io/streams/throwing/throwing-tests.factor | 2 +- basis/ip-parser/ip-parser.factor | 2 +- basis/json/prettyprint/prettyprint.factor | 2 +- basis/json/reader/reader.factor | 50 +++--- basis/json/writer/writer.factor | 36 ++--- basis/lcs/diff2html/diff2html.factor | 2 +- basis/lcs/lcs-tests.factor | 16 +- basis/listener/listener.factor | 2 +- basis/logging/server/server.factor | 2 +- basis/mime/multipart/multipart-tests.factor | 2 +- basis/pack/pack.factor | 90 +++++------ basis/peg/ebnf/ebnf-docs.factor | 2 +- basis/peg/ebnf/ebnf-tests.factor | 16 +- basis/peg/ebnf/ebnf.factor | 12 +- basis/peg/parsers/parsers-docs.factor | 2 +- basis/peg/parsers/parsers.factor | 8 +- basis/peg/peg-docs.factor | 4 +- basis/peg/peg-tests.factor | 26 +-- .../hashtables/hashtables-tests.factor | 2 +- basis/porter-stemmer/porter-stemmer.factor | 8 +- basis/prettyprint/backend/backend-docs.factor | 2 +- basis/prettyprint/backend/backend.factor | 24 +-- basis/prettyprint/prettyprint-tests.factor | 2 +- basis/prettyprint/sections/sections.factor | 2 +- .../quoted-printable/quoted-printable.factor | 14 +- basis/random/data/data.factor | 6 +- basis/regexp/classes/classes-tests.factor | 8 +- basis/regexp/classes/classes.factor | 14 +- basis/regexp/compiler/compiler.factor | 4 +- basis/regexp/minimize/minimize-tests.factor | 16 +- basis/regexp/negation/negation-tests.factor | 4 +- basis/regexp/nfa/nfa.factor | 4 +- basis/regexp/parser/parser.factor | 52 +++--- basis/regexp/regexp.factor | 2 +- basis/sequences/parser/parser-tests.factor | 2 +- basis/serialize/serialize.factor | 62 ++++---- basis/smtp/smtp.factor | 2 +- basis/strings/tables/tables.factor | 2 +- basis/tools/deploy/config/config-docs.factor | 4 +- basis/tools/disassembler/udis/udis.factor | 4 +- basis/tools/files/files.factor | 10 +- basis/tools/hexdump/hexdump.factor | 18 +-- basis/tools/profiler/sampling/sampling.factor | 2 +- basis/tools/scaffold/scaffold.factor | 6 +- basis/tools/test/test.factor | 2 +- basis/tools/trace/trace.factor | 2 +- basis/ui/backend/x11/x11.factor | 6 +- basis/ui/gadgets/grids/grids.factor | 2 +- basis/ui/gadgets/labels/labels.factor | 2 +- basis/ui/gadgets/panes/panes.factor | 2 +- basis/ui/text/pango/pango.factor | 2 +- .../listener/completion/completion.factor | 2 +- basis/ui/tools/listener/listener-docs.factor | 2 +- basis/ui/tools/listener/listener-tests.factor | 2 +- basis/ui/tools/listener/listener.factor | 4 +- basis/unicode/breaks/breaks.factor | 4 +- basis/unicode/case/case.factor | 18 +-- .../unicode/collation/collation-tests.factor | 2 +- basis/unicode/collation/collation.factor | 2 +- basis/unicode/data/data.factor | 2 +- .../unicode/normalize/normalize-tests.factor | 2 +- basis/unicode/script/script-tests.factor | 2 +- basis/unix/linux/proc/proc.factor | 8 +- basis/urls/encoding/encoding.factor | 4 +- basis/uuid/uuid.factor | 12 +- basis/validators/validators.factor | 2 +- basis/vocabs/hierarchy/hierarchy.factor | 2 +- basis/windows/ole32/ole32.factor | 22 +-- basis/windows/registry/registry.factor | 2 +- basis/windows/user32/user32.factor | 72 ++++----- basis/wrap/strings/strings.factor | 2 +- basis/xml/autoencoding/autoencoding.factor | 6 +- basis/xml/dtd/dtd.factor | 6 +- basis/xml/elements/elements.factor | 26 +-- basis/xml/entities/entities.factor | 24 +-- basis/xml/name/name.factor | 4 +- basis/xml/tests/state-parser-tests.factor | 2 +- basis/xml/tokenize/tokenize.factor | 20 +-- basis/xml/writer/writer.factor | 8 +- core/alien/c-types/c-types.factor | 2 +- core/alien/libraries/finder/finder.factor | 2 +- core/alien/libraries/freebsd/freebsd.factor | 2 +- core/ascii/ascii-tests.factor | 14 +- core/ascii/ascii.factor | 8 +- core/checksums/checksums.factor | 4 +- core/checksums/crc32/crc32.factor | 2 +- core/effects/effects.factor | 4 +- core/io/encodings/ascii/ascii-tests.factor | 4 +- core/io/encodings/encodings-tests.factor | 2 +- core/io/encodings/encodings.factor | 10 +- core/io/encodings/utf16/utf16-tests.factor | 26 +-- core/io/encodings/utf8/utf8-tests.factor | 22 +-- core/io/files/files-tests.factor | 8 +- core/io/io.factor | 4 +- core/io/pathnames/pathnames-docs.factor | 2 +- core/io/pathnames/pathnames.factor | 2 +- .../byte-array/byte-array-tests.factor | 4 +- core/io/streams/plain/plain.factor | 2 +- core/io/streams/string/string-tests.factor | 8 +- core/io/streams/string/string.factor | 4 +- core/lexer/lexer.factor | 18 +-- core/locals/locals-tests.factor | 4 +- core/make/make-docs.factor | 2 +- core/math/parser/parser-tests.factor | 18 +-- core/math/parser/parser.factor | 78 ++++----- core/multiline/multiline.factor | 2 +- core/parser/parser-tests.factor | 6 +- core/parser/parser.factor | 2 +- core/sbufs/sbufs-tests.factor | 10 +- .../generalizations-docs.factor | 2 +- core/sequences/sequences-docs.factor | 6 +- core/sequences/sequences-tests.factor | 40 ++--- core/splitting/splitting.factor | 4 +- core/strings/parser/parser-docs.factor | 2 +- core/strings/parser/parser.factor | 60 +++---- core/strings/strings-tests.factor | 30 ++-- core/syntax/modern/modern.factor | 28 ++-- core/syntax/syntax-docs.factor | 14 +- core/vocabs/loader/loader.factor | 2 +- core/vocabs/vocabs.factor | 2 +- extra/alien/fortran/fortran.factor | 4 +- .../base32-crockford/base32-crockford.factor | 8 +- extra/base32/base32.factor | 8 +- extra/base85/base85.factor | 2 +- extra/benchmark/fannkuch/fannkuch.factor | 2 +- extra/benchmark/fasta/fasta.factor | 38 ++--- extra/benchmark/fib7/fib7.factor | 2 +- .../benchmark/knucleotide/knucleotide.factor | 2 +- extra/benchmark/make/make.factor | 2 +- extra/benchmark/regexp/regexp.factor | 2 +- extra/benchmark/sockets/sockets.factor | 8 +- extra/bencode/bencode.factor | 12 +- extra/c/lexer/lexer-tests.factor | 12 +- extra/c/lexer/lexer.factor | 20 +-- extra/c/preprocessor/preprocessor.factor | 8 +- extra/cgi/cgi.factor | 2 +- extra/codebook/codebook.factor | 6 +- extra/color-table/color-table.factor | 2 +- extra/colors/flex-hex/flex-hex.factor | 6 +- extra/couchdb/couchdb.factor | 4 +- extra/cpu/8080/emulator/emulator.factor | 44 ++--- extra/crontab/crontab.factor | 6 +- extra/crypto/passwd-md5/passwd-md5.factor | 2 +- extra/ctags/ctags.factor | 4 +- extra/cuda/ptx/ptx.factor | 2 +- extra/cuesheet/cuesheet.factor | 4 +- extra/curses/curses.factor | 6 +- extra/curses/listener/listener.factor | 4 +- extra/dbf/dbf.factor | 62 ++++---- extra/decimals/decimals.factor | 4 +- extra/enigma/enigma.factor | 4 +- extra/find/extras/extras-tests.factor | 12 +- extra/find/extras/extras.factor | 6 +- extra/fjsc/fjsc.factor | 10 +- extra/flip-text/flip-text.factor | 150 +++++++++--------- extra/fuel/help/help.factor | 2 +- extra/fuel/pprint/pprint.factor | 2 +- extra/gap-buffer/gap-buffer-tests.factor | 12 +- extra/git/git.factor | 12 +- extra/gopher/gopher.factor | 54 +++---- extra/gpu/demos/bunny/bunny.factor | 4 +- extra/hamurabi/hamurabi.factor | 2 +- extra/hashcash/hashcash.factor | 6 +- extra/html/entities/entities.factor | 10 +- extra/html/parser/parser.factor | 20 +-- extra/html/parser/utils/utils.factor | 2 +- extra/images/bitmap/bitmap.factor | 2 +- extra/images/pbm/pbm.factor | 6 +- extra/images/pgm/pgm.factor | 2 +- extra/images/ppm/ppm.factor | 2 +- extra/images/tiff/tiff.factor | 4 +- extra/imap/imap-tests.factor | 2 +- extra/infix/infix-tests.factor | 4 +- extra/infix/tokenizer/tokenizer-tests.factor | 14 +- extra/infix/tokenizer/tokenizer.factor | 8 +- extra/ini-file/ini-file.factor | 62 ++++---- extra/io/encodings/detect/detect.factor | 2 +- extra/io/streams/peek/peek-tests.factor | 8 +- .../client/participants/participants.factor | 6 +- extra/irc/messages/base/base.factor | 2 +- extra/lcd/lcd.factor | 2 +- extra/mason/platform/platform.factor | 2 +- extra/math/extras/extras-tests.factor | 2 +- .../math/floating-point/floating-point.factor | 4 +- extra/math/text/english/english.factor | 2 +- extra/math/text/french/french.factor | 4 +- extra/metar/metar.factor | 24 +-- extra/modern/modern-tests.factor | 14 +- extra/modern/modern.factor | 80 +++++----- extra/modern/out/out.factor | 4 +- extra/modern/slices/slices.factor | 12 +- extra/money/money.factor | 4 +- extra/morse/morse-tests.factor | 8 +- extra/morse/morse.factor | 122 +++++++------- extra/odbc/odbc.factor | 2 +- extra/parser-combinators/simple/simple.factor | 10 +- extra/path-finding/path-finding-tests.factor | 2 +- extra/pcre/pcre-tests.factor | 2 +- extra/pdf/layout/layout.factor | 4 +- extra/pdf/values/values.factor | 12 +- .../peg/javascript/tokenizer/tokenizer.factor | 20 +-- extra/poker/poker.factor | 16 +- extra/progress-bars/progress-bars.factor | 2 +- extra/project-euler/051/051.factor | 4 +- extra/project-euler/common/common.factor | 2 +- extra/project-euler/project-euler.factor | 2 +- .../response-parser/response-parser.factor | 10 +- .../align-columns/align-columns.factor | 8 +- .../balanced-brackets.factor | 4 +- .../multiplication-tables.factor | 6 +- .../number-reversal/number-reversal.factor | 2 +- .../one-d-cellular/one-d-cellular.factor | 2 +- extra/rot13/rot13.factor | 4 +- .../semantic-versioning.factor | 4 +- extra/sequences/extras/extras-docs.factor | 4 +- extra/sequences/extras/extras-tests.factor | 22 +-- .../interleaved/interleaved-tests.factor | 10 +- extra/smalltalk/parser/parser-tests.factor | 4 +- extra/smalltalk/parser/parser.factor | 10 +- extra/smalltalk/printer/printer.factor | 2 +- extra/smalltalk/selectors/selectors.factor | 4 +- extra/spelling/spelling.factor | 2 +- extra/successor/successor.factor | 4 +- extra/svg/svg.factor | 2 +- extra/tar/tar.factor | 42 ++--- extra/terminal/macosx/macosx.factor | 2 +- extra/text-analysis/text-analysis.factor | 4 +- extra/text-to-pdf/text-to-pdf.factor | 12 +- extra/tnetstrings/tnetstrings.factor | 14 +- extra/toml/toml.factor | 24 +-- extra/tools/cal/cal.factor | 6 +- extra/tools/wc/wc.factor | 4 +- extra/turing/turing.factor | 2 +- extra/txon/txon.factor | 8 +- extra/ulid/ulid-tests.factor | 4 +- extra/units/reduction/reduction.factor | 6 +- extra/uu/uu.factor | 10 +- extra/yaml/conversion/conversion.factor | 10 +- extra/yaml/yaml-tests.factor | 2 +- extra/zealot/factor/factor.factor | 2 +- extra/zoneinfo/zoneinfo.factor | 2 +- misc/fuel/strange-syntax.factor | 8 +- 302 files changed, 1591 insertions(+), 1591 deletions(-) diff --git a/basis/alien/parser/parser.factor b/basis/alien/parser/parser.factor index 05d7325e46..d58afbbfa2 100644 --- a/basis/alien/parser/parser.factor +++ b/basis/alien/parser/parser.factor @@ -21,7 +21,7 @@ ERROR: bad-array-type ; : (parse-c-type) ( string -- type ) { { [ "*" ?tail ] [ (parse-c-type) ] } - { [ ch'\] over member? ] [ parse-array-type ] } + { [ char: \] over member? ] [ parse-array-type ] } { [ dup search ] [ parse-word ] } [ parse-word ] } cond ; diff --git a/basis/base64/base64.factor b/basis/base64/base64.factor index 45d71b65de..37f17c4ee8 100644 --- a/basis/base64/base64.factor +++ b/basis/base64/base64.factor @@ -26,7 +26,7 @@ CONSTANT: alphabet $[ alphabet nth ; inline : base64>ch ( ch -- ch ) - $[ alphabet alphabet-inverse 0 ch'= pick set-nth ] nth + $[ alphabet alphabet-inverse 0 char: = pick set-nth ] nth [ malformed-base64 ] unless* { fixnum } declare ; inline : encode3 ( x y z -- a b c d ) @@ -41,7 +41,7 @@ CONSTANT: alphabet $[ [ stream stream-write1 1 + dup 76 = [ drop 0 - B{ ch'\r ch'\n } stream stream-write + B{ char: \r char: \n } stream stream-write ] when ] each ] [ @@ -61,8 +61,8 @@ CONSTANT: alphabet $[ input stream-read1 [ [ 0 or ] bi@ encode3 ] 2keep [ 0 1 ? ] bi@ + { { 0 [ ] } - { 1 [ drop ch'= ] } - { 2 [ 2drop ch'= ch'= ] } + { 1 [ drop char: = ] } + { 2 [ 2drop char: = char: = ] } } case data (4sequence) output stream-write-lines ] while 2drop ; inline @@ -95,12 +95,12 @@ PRIVATE> :: (decode-base64) ( input output -- ) 3 :> data - [ B{ ch'\n ch'\r } input read1-ignoring dup ] [ - B{ ch'\n ch'\r } input read1-ignoring ch'= or - B{ ch'\n ch'\r } input read1-ignoring ch'= or - B{ ch'\n ch'\r } input read1-ignoring ch'= or + [ B{ char: \n char: \r } input read1-ignoring dup ] [ + B{ char: \n char: \r } input read1-ignoring char: = or + B{ char: \n char: \r } input read1-ignoring char: = or + B{ char: \n char: \r } input read1-ignoring char: = or [ decode4 data (3sequence) ] 3keep - [ ch'= eq? 1 0 ? ] tri@ + + + [ char: = eq? 1 0 ? ] tri@ + + [ head-slice* ] unless-zero output stream-write ] while drop ; @@ -142,18 +142,18 @@ PRIVATE> : >urlsafe-base64 ( seq -- base64 ) >base64 H{ - { ch'+ ch'- } - { ch'/ ch'_ } + { char: + char: - } + { char: / char: _ } } substitute ; : urlsafe-base64> ( base64 -- seq ) H{ - { ch'- ch'+ } - { ch'_ ch'/ } + { char: - char: + } + { char: _ char: / } } substitute base64> ; : >urlsafe-base64-lines ( seq -- base64 ) >base64-lines H{ - { ch'+ ch'- } - { ch'/ ch'_ } + { char: + char: - } + { char: / char: _ } } substitute ; diff --git a/basis/calendar/format/format.factor b/basis/calendar/format/format.factor index e848e28a37..65c75613c8 100644 --- a/basis/calendar/format/format.factor +++ b/basis/calendar/format/format.factor @@ -15,14 +15,14 @@ MACRO: formatted ( spec -- quot ) } cond ] map [ cleave ] curry ; -: pad-00 ( n -- str ) number>string 2 ch'0 pad-head ; +: pad-00 ( n -- str ) number>string 2 char: 0 pad-head ; : formatted>string ( spec -- string ) '[ _ formatted ] with-string-writer ; inline -: pad-0000 ( n -- str ) number>string 4 ch'0 pad-head ; +: pad-0000 ( n -- str ) number>string 4 char: 0 pad-head ; -: pad-00000 ( n -- str ) number>string 5 ch'0 pad-head ; +: pad-00000 ( n -- str ) number>string 5 char: 0 pad-head ; : write-00 ( n -- ) pad-00 write ; diff --git a/basis/calendar/parser/parser.factor b/basis/calendar/parser/parser.factor index 88536c0800..9f7e78c6ca 100644 --- a/basis/calendar/parser/parser.factor +++ b/basis/calendar/parser/parser.factor @@ -28,16 +28,16 @@ ERROR: invalid-timestamp-format ; : read-sp ( -- token ) " " read-token ; : signed-gmt-offset ( dt ch -- dt' ) - { { ch'+ [ 1 ] } { ch'- [ -1 ] } } case time* ; + { { char: + [ 1 ] } { char: - [ -1 ] } } case time* ; : read-rfc3339-gmt-offset ( ch -- dt ) { { f [ instant ] } - { ch'Z [ instant ] } + { char: Z [ instant ] } [ [ read-00 hours - read1 { { ch'\: [ read-00 ] } { f [ 0 ] } } case minutes + read1 { { char: \: [ read-00 ] } { f [ 0 ] } } case minutes time+ ] dip signed-gmt-offset ] @@ -58,7 +58,7 @@ ERROR: invalid-timestamp-format ; read-ymd "Tt \t" expect read-hms - read1 { { ch'. [ read-rfc3339-seconds ] } [ ] } case + read1 { { char: . [ read-rfc3339-seconds ] } [ ] } case read-rfc3339-gmt-offset ; @@ -66,7 +66,7 @@ ERROR: invalid-timestamp-format ; [ (rfc3339>timestamp) ] with-string-reader ; : parse-rfc822-military-offset ( string -- dt ) - first ch'A - { + first char: A - { -1 -2 -3 -4 -5 -6 -7 -8 -9 f -10 -11 -12 1 2 3 4 5 6 7 8 9 10 11 12 0 } nth hours ; @@ -101,7 +101,7 @@ CONSTANT: rfc822-named-zones H{ : (rfc822>timestamp) ( -- timestamp ) "," read-token day-abbreviations3 member? check-timestamp drop - read1 ch'\s assert= + read1 char: \s assert= read-sp checked-number read-sp month-abbreviations index 1 + check-timestamp read-sp checked-number spin @@ -117,7 +117,7 @@ CONSTANT: rfc822-named-zones H{ : (cookie-string>timestamp-1) ( -- timestamp ) "," read-token check-day-name - read1 ch'\s assert= + read1 char: \s assert= "-" read-token checked-number "-" read-token month-abbreviations index 1 + check-timestamp read-sp checked-number spin diff --git a/basis/checksums/adler-32/adler-32-tests.factor b/basis/checksums/adler-32/adler-32-tests.factor index 6f939aa195..a496c5b28d 100644 --- a/basis/checksums/adler-32/adler-32-tests.factor +++ b/basis/checksums/adler-32/adler-32-tests.factor @@ -1,4 +1,4 @@ USING: checksums checksums.adler-32 strings tools.test ; { 300286872 } [ "Wikipedia" adler-32 checksum-bytes ] unit-test -{ 2679885283 } [ 10000 ch'a adler-32 checksum-bytes ] unit-test +{ 2679885283 } [ 10000 char: a adler-32 checksum-bytes ] unit-test diff --git a/basis/checksums/bsd/bsd-tests.factor b/basis/checksums/bsd/bsd-tests.factor index 32c5d20292..47c03c0cf3 100644 --- a/basis/checksums/bsd/bsd-tests.factor +++ b/basis/checksums/bsd/bsd-tests.factor @@ -1,4 +1,4 @@ USING: checksums checksums.bsd strings tools.test ; { 15816 } [ "Wikipedia" bsd checksum-bytes ] unit-test -{ 47937 } [ 10000 ch'a bsd checksum-bytes ] unit-test +{ 47937 } [ 10000 char: a bsd checksum-bytes ] unit-test diff --git a/basis/checksums/crc16/crc16.factor b/basis/checksums/crc16/crc16.factor index 9d3f253c7b..cee408d3b9 100644 --- a/basis/checksums/crc16/crc16.factor +++ b/basis/checksums/crc16/crc16.factor @@ -36,5 +36,5 @@ M: crc16 checksum-bytes M: crc16 checksum-lines init-crc16 - [ [ (crc16) ] each ch'\n (crc16) ] each + [ [ (crc16) ] each char: \n (crc16) ] each finish-crc16 ; inline diff --git a/basis/checksums/ripemd/ripemd-tests.factor b/basis/checksums/ripemd/ripemd-tests.factor index 932db3b237..f6ada81481 100644 --- a/basis/checksums/ripemd/ripemd-tests.factor +++ b/basis/checksums/ripemd/ripemd-tests.factor @@ -64,4 +64,4 @@ USING: checksums checksums.ripemd strings tools.test ; 0x69 0x7b 0xdb 0xe1 0x6d 0x37 0xf9 0x7f 0x68 0xf0 0x83 0x25 0xdc 0x15 0x28 -} } [ 1000000 ch'a ripemd-160 checksum-bytes ] unit-test +} } [ 1000000 char: a ripemd-160 checksum-bytes ] unit-test diff --git a/basis/checksums/sha/sha-tests.factor b/basis/checksums/sha/sha-tests.factor index d933abc869..51af927448 100644 --- a/basis/checksums/sha/sha-tests.factor +++ b/basis/checksums/sha/sha-tests.factor @@ -5,7 +5,7 @@ IN: checksums.sha.tests { "a9993e364706816aba3e25717850c26c9cd0d89d" } [ "abc" sha1 checksum-bytes bytes>hex-string ] unit-test { "84983e441c3bd26ebaae4aa1f95129e5e54670f1" } [ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" sha1 checksum-bytes bytes>hex-string ] unit-test -! [ "34aa973cd4c4daa4f61eeb2bdbad27316534016f" ] [ 1000000 ch'a fill string>sha1str ] unit-test ! takes a long time... +! [ "34aa973cd4c4daa4f61eeb2bdbad27316534016f" ] [ 1000000 char: a fill string>sha1str ] unit-test ! takes a long time... { "dea356a2cddd90c7a7ecedc5ebb563934f460452" } [ "0123456701234567012345670123456701234567012345670123456701234567" 10 swap concat sha1 checksum-bytes bytes>hex-string ] unit-test diff --git a/basis/circular/circular-tests.factor b/basis/circular/circular-tests.factor index 35acafa967..7b1b39cb23 100644 --- a/basis/circular/circular-tests.factor +++ b/basis/circular/circular-tests.factor @@ -7,10 +7,10 @@ IN: circular.tests { 0 } [ { 0 1 2 3 4 } 0 swap virtual@ drop ] unit-test { 2 } [ { 0 1 2 3 4 } 2 swap virtual@ drop ] unit-test -{ ch't } [ "test" 0 swap nth ] unit-test +{ char: t } [ "test" 0 swap nth ] unit-test { "test" } [ "test" >string ] unit-test -{ ch'e } [ "test" 5 swap nth-unsafe ] unit-test +{ char: e } [ "test" 5 swap nth-unsafe ] unit-test { [ 1 2 3 ] } [ { 1 2 3 } [ ] like ] unit-test { [ 2 3 1 ] } [ { 1 2 3 } [ rotate-circular ] keep [ ] like ] unit-test @@ -19,9 +19,9 @@ IN: circular.tests { [ 3 1 2 ] } [ { 1 2 3 } 1 over change-circular-start 1 over change-circular-start [ ] like ] unit-test { [ 3 1 2 ] } [ { 1 2 3 } -100 over change-circular-start [ ] like ] unit-test -{ "fob" } [ "foo" ch'b 2 pick set-nth >string ] unit-test -{ "boo" } [ "foo" ch'b 3 pick set-nth-unsafe >string ] unit-test -{ "ornact" } [ "factor" 4 over change-circular-start ch'n 2 pick set-nth >string ] unit-test +{ "fob" } [ "foo" char: b 2 pick set-nth >string ] unit-test +{ "boo" } [ "foo" char: b 3 pick set-nth-unsafe >string ] unit-test +{ "ornact" } [ "factor" 4 over change-circular-start char: n 2 pick set-nth >string ] unit-test { "bcd" } [ 3 "abcd" [ over circular-push ] each >string ] unit-test @@ -29,7 +29,7 @@ IN: circular.tests ! This no longer fails ! [ "test" 5 swap nth ] must-fail -! [ "foo" ch'b 3 rot set-nth ] must-fail +! [ "foo" char: b 3 rot set-nth ] must-fail { { } } [ 3 >array ] unit-test { { 1 2 } } [ diff --git a/basis/cocoa/messages/messages.factor b/basis/cocoa/messages/messages.factor index 94a12865bc..a14b12a710 100644 --- a/basis/cocoa/messages/messages.factor +++ b/basis/cocoa/messages/messages.factor @@ -42,7 +42,7 @@ INITIALIZED-SYMBOL: super-message-senders [ H{ } clone ] TUPLE: selector-tuple name object ; : selector-name ( name -- name' ) - ch'. over index [ 0 > [ "." split1 nip ] when ] when* ; + char: . over index [ 0 > [ "." split1 nip ] when ] when* ; MEMO: ( name -- sel ) selector-name f selector-tuple boa ; @@ -185,7 +185,7 @@ cell { assoc-union alien>objc-types set-global : objc-struct-type ( i string -- ctype ) - [ ch'= ] 2keep index-from swap subseq + [ char: = ] 2keep index-from swap subseq objc>struct-types get at* [ drop void* ] unless ; ERROR: no-objc-type name ; @@ -197,9 +197,9 @@ ERROR: no-objc-type name ; : (parse-objc-type) ( i string -- ctype ) [ [ 1 + ] dip ] [ nth ] 2bi { { [ dup "rnNoORV" member? ] [ drop (parse-objc-type) ] } - { [ dup ch'^ = ] [ 3drop void* ] } - { [ dup ch'\{ = ] [ drop objc-struct-type ] } - { [ dup ch'\[ = ] [ 3drop void* ] } + { [ dup char: ^ = ] [ 3drop void* ] } + { [ dup char: \{ = ] [ drop objc-struct-type ] } + { [ dup char: \[ = ] [ 3drop void* ] } [ 2nip decode-type ] } cond ; @@ -235,7 +235,7 @@ ERROR: no-objc-type name ; : method-collisions ( -- collisions ) objc-methods get >alist - [ first ch'. swap member? ] filter + [ first char: . swap member? ] filter [ first "." split1 nip ] collect-by [ nip values members length 1 > ] assoc-filter ; diff --git a/basis/colors/constants/constants.factor b/basis/colors/constants/constants.factor index e00ffb7bc2..85d34b0218 100644 --- a/basis/colors/constants/constants.factor +++ b/basis/colors/constants/constants.factor @@ -9,7 +9,7 @@ IN: colors.constants : parse-color ( line -- name color ) first4 [ [ string>number 255 /f ] tri@ 1.0 ] dip - [ blank? ] trim-head H{ { ch'\s ch'- } } substitute swap ; + [ blank? ] trim-head H{ { char: \s char: - } } substitute swap ; : parse-colors ( lines -- assoc ) [ "!" head? ] reject diff --git a/basis/compiler/cfg/linear-scan/debugger/debugger.factor b/basis/compiler/cfg/linear-scan/debugger/debugger.factor index 9f67471bf8..24116fa41b 100644 --- a/basis/compiler/cfg/linear-scan/debugger/debugger.factor +++ b/basis/compiler/cfg/linear-scan/debugger/debugger.factor @@ -14,8 +14,8 @@ IN: compiler.cfg.linear-scan.debugger allocate-registers drop ; : picture ( uses -- str ) - dup last 1 + ch'space - [ '[ ch'* swap _ set-nth ] each ] keep ; + dup last 1 + char: space + [ '[ char: * swap _ set-nth ] each ] keep ; : interval-picture ( interval -- str ) [ uses>> picture ] diff --git a/basis/compiler/tests/intrinsics.factor b/basis/compiler/tests/intrinsics.factor index 467a38682c..4e37686d0f 100644 --- a/basis/compiler/tests/intrinsics.factor +++ b/basis/compiler/tests/intrinsics.factor @@ -42,12 +42,12 @@ IN: compiler.tests.intrinsics ! Write barrier hits on the wrong value were causing segfaults [ -3 ] [ -3 1 2 [ 2array [ 3 set-slot ] keep ] compile-call second ] unit-test -[ ch'a ] [ 0 "abc" [ string-nth ] compile-call ] unit-test -[ ch'a ] [ 0 [ "abc" string-nth ] compile-call ] unit-test -[ ch'a ] [ [ 0 "abc" string-nth ] compile-call ] unit-test -[ ch'b ] [ 1 "abc" [ string-nth ] compile-call ] unit-test -[ ch'b ] [ 1 [ "abc" string-nth ] compile-call ] unit-test -[ ch'b ] [ [ 1 "abc" string-nth ] compile-call ] unit-test +[ char: a ] [ 0 "abc" [ string-nth ] compile-call ] unit-test +[ char: a ] [ 0 [ "abc" string-nth ] compile-call ] unit-test +[ char: a ] [ [ 0 "abc" string-nth ] compile-call ] unit-test +[ char: b ] [ 1 "abc" [ string-nth ] compile-call ] unit-test +[ char: b ] [ 1 [ "abc" string-nth ] compile-call ] unit-test +[ char: b ] [ [ 1 "abc" string-nth ] compile-call ] unit-test [ 0x123456 ] [ 0 "\u123456bc" [ string-nth ] compile-call ] unit-test [ 0x123456 ] [ 0 [ "\u123456bc" string-nth ] compile-call ] unit-test diff --git a/basis/csv/csv-docs.factor b/basis/csv/csv-docs.factor index 66eafc3d90..9277fb9eca 100644 --- a/basis/csv/csv-docs.factor +++ b/basis/csv/csv-docs.factor @@ -46,7 +46,7 @@ HELP: write-csv { $description "Writes a sequence of sequences of comma-separated-values to the output stream, escaping where necessary." } ; HELP: with-delimiter -{ $values { "ch" "field delimiter (e.g. ch'\\t)" } +{ $values { "ch" "field delimiter (e.g. char: \\t)" } { "quot" quotation } } { $description "Sets the field delimiter for read-csv, read-row, write-csv, or write-row words." } ; diff --git a/basis/csv/csv-tests.factor b/basis/csv/csv-tests.factor index deec0f5375..560e6ba50d 100644 --- a/basis/csv/csv-tests.factor +++ b/basis/csv/csv-tests.factor @@ -61,7 +61,7 @@ IN: csv.tests "allows setting of delimiting character" [ { { "foo" "bah" "baz" } } ] -[ "foo\tbah\tbaz\n" ch'\t [ string>csv ] with-delimiter ] named-unit-test +[ "foo\tbah\tbaz\n" char: \t [ string>csv ] with-delimiter ] named-unit-test "Quoted field followed immediately by newline" [ { { "foo" "bar" } diff --git a/basis/csv/csv.factor b/basis/csv/csv.factor index a3c8857f2e..f93ba58e41 100644 --- a/basis/csv/csv.factor +++ b/basis/csv/csv.factor @@ -7,7 +7,7 @@ IN: csv SYMBOL: delimiter -ch', delimiter set-global +char: , delimiter set-global '[ dup "\n\"\r" member? [ drop t ] [ _ = ] if ] any? ; inline : escape-quotes ( cell stream -- ) - ch'\" over stream-write1 swap [ + char: \" over stream-write1 swap [ [ over stream-write1 ] - [ dup ch'\" = [ over stream-write1 ] [ drop ] if ] bi - ] each ch'\" swap stream-write1 ; + [ dup char: \" = [ over stream-write1 ] [ drop ] if ] bi + ] each char: \" swap stream-write1 ; : escape-if-required ( cell delimiter stream -- ) [ dupd needs-escaping? ] dip diff --git a/basis/db/postgresql/postgresql.factor b/basis/db/postgresql/postgresql.factor index ece22f38ea..5b14898984 100644 --- a/basis/db/postgresql/postgresql.factor +++ b/basis/db/postgresql/postgresql.factor @@ -117,7 +117,7 @@ M: postgresql-db-connection ( sql in out -- statement ) dup prepare-statement ; : bind-name% ( -- ) - ch'$ 0, + char: $ 0, sql-counter [ inc ] [ get 0# ] bi ; M: postgresql-db-connection bind% ( spec -- ) diff --git a/basis/db/tester/tester.factor b/basis/db/tester/tester.factor index d3734551aa..a4381fd77e 100644 --- a/basis/db/tester/tester.factor +++ b/basis/db/tester/tester.factor @@ -8,7 +8,7 @@ IN: db.tester : postgresql-test-db-name ( -- string ) cpu name>> "-" "factor-test" 3append - H{ { ch'- ch'_ } { ch'. ch'_ } } substitute ; + H{ { char: - char: _ } { char: . char: _ } } substitute ; : postgresql-test-db ( -- postgresql-db ) \ postgresql-db get-global clone postgresql-test-db-name >>database ; diff --git a/basis/db/tuples/tuples-tests.factor b/basis/db/tuples/tuples-tests.factor index 09b9853e4b..87eee59123 100644 --- a/basis/db/tuples/tuples-tests.factor +++ b/basis/db/tuples/tuples-tests.factor @@ -305,7 +305,7 @@ TUPLE: exam id name score ; : random-exam ( -- exam ) f - 6 [ ch'a ch'z [a,b] random ] replicate >string + 6 [ char: a char: z [a,b] random ] replicate >string 100 random exam boa ; diff --git a/basis/english/english-tests.factor b/basis/english/english-tests.factor index d4e3cc43ca..3976d334be 100644 --- a/basis/english/english-tests.factor +++ b/basis/english/english-tests.factor @@ -71,7 +71,7 @@ FROM: english => a/an ; { "i18n" } [ "internationalization" a10n ] unit-test { "f28n" } [ "floccinauccinihilipilification" a10n ] unit-test { "p43s" } [ "pneumonoultramicroscopicsilicovolcanoconiosis" a10n ] unit-test -{ "a10000c" } [ 10000 ch'b "a" "c" surround a10n ] unit-test +{ "a10000c" } [ 10000 char: b "a" "c" surround a10n ] unit-test { "an" } [ "object" a/an ] unit-test { "an" } [ "elephant" a/an ] unit-test diff --git a/basis/environment/environment-tests.factor b/basis/environment/environment-tests.factor index 19f599002a..2b35ca50c1 100644 --- a/basis/environment/environment-tests.factor +++ b/basis/environment/environment-tests.factor @@ -20,7 +20,7 @@ os unix? [ { f } [ "factor-test-key-1" os-env ] unit-test { } [ - 32766 ch'a "factor-test-key-long" set-os-env + 32766 char: a "factor-test-key-long" set-os-env ] unit-test { 32766 } [ "factor-test-key-long" os-env length ] unit-test { } [ "factor-test-key-long" unset-os-env ] unit-test diff --git a/basis/escape-strings/escape-strings.factor b/basis/escape-strings/escape-strings.factor index 269152b454..0f40e1c7d3 100644 --- a/basis/escape-strings/escape-strings.factor +++ b/basis/escape-strings/escape-strings.factor @@ -8,8 +8,8 @@ IN: escape-strings [ HS{ } clone 0 0 ] dip [ { - { ch'\] [ 1 + dup 2 = [ drop over adjoin 0 1 ] when ] } - { ch'= [ dup 1 = [ [ 1 + ] dip ] when ] } + { char: \] [ 1 + dup 2 = [ drop over adjoin 0 1 ] when ] } + { char: = [ dup 1 = [ [ 1 + ] dip ] when ] } [ 3drop 0 0 ] } case ] each 0 > [ over adjoin ] [ drop ] if ; @@ -19,7 +19,7 @@ IN: escape-strings [ nip ] [ drop length ] if ; : escape-string* ( str n -- str' ) - ch'= + char: = [ "[" dup surround ] [ "]" dup surround ] bi surround ; : escape-string ( str -- str' ) @@ -32,8 +32,8 @@ IN: escape-strings [ escape-string ] dip prepend ; : escape-simplest ( str -- str' ) - dup { ch'\' ch'\" ch'\r ch'\n ch'\s } counts { - ! { [ dup { ch'\' ch'\r ch'\n ch'\s } values-of sum 0 = ] [ drop "'" prepend ] } - { [ dup ch'\" of not ] [ drop "\"" "\"" surround ] } + dup { char: \' char: \" char: \r char: \n char: \s } counts { + ! { [ dup { char: \' char: \r char: \n char: \s } values-of sum 0 = ] [ drop "'" prepend ] } + { [ dup char: \" of not ] [ drop "\"" "\"" surround ] } [ drop escape-string ] } cond ; diff --git a/basis/farkup/farkup.factor b/basis/farkup/farkup.factor index 33e72d90c7..71ed380316 100644 --- a/basis/farkup/farkup.factor +++ b/basis/farkup/farkup.factor @@ -63,11 +63,11 @@ DEFER: (parse-paragraph) : delimiter-class ( delimiter -- class ) H{ - { ch'* strong } - { ch'_ emphasis } - { ch'^ superscript } - { ch'~ subscript } - { ch'% inline-code } + { char: * strong } + { char: _ emphasis } + { char: ^ superscript } + { char: ~ subscript } + { char: % inline-code } } at ; : or-simple-title ( ... url title/f quot: ( ... title -- ... title' ) -- ... url title' ) @@ -82,9 +82,9 @@ DEFER: (parse-paragraph) ] dip [ (parse-paragraph) cons ] [ 1list ] if* ; : parse-big-link ( before after -- link rest ) - dup ?first ch'\[ = + dup ?first char: \[ = [ parse-link ] - [ [ ch'\[ suffix ] [ (parse-paragraph) ] bi* ] + [ [ char: \[ suffix ] [ (parse-paragraph) ] bi* ] if ; : escape ( before after -- before' after' ) @@ -94,8 +94,8 @@ DEFER: (parse-paragraph) [ nil ] [ [ "*_^~%[\\" member? ] find-cut [ { - { ch'\[ [ parse-big-link ] } - { ch'\\ [ escape ] } + { char: \[ [ parse-big-link ] } + { char: \\ [ escape ] } [ dup delimiter-class parse-delimiter ] } case cons ] [ drop "" like 1list ] if* @@ -124,10 +124,10 @@ DEFER: (parse-paragraph) V{ } clone (take-until) ; : count= ( string -- n ) - dup [ [ ch'= = not ] find drop 0 or ] bi@ min ; + dup [ [ char: = = not ] find drop 0 or ] bi@ min ; : trim= ( string -- string' ) - [ ch'= = ] trim ; + [ char: = = ] trim ; : make-heading ( string class -- heading ) [ trim= parse-paragraph ] dip boa ; inline @@ -149,14 +149,14 @@ DEFER: (parse-paragraph) : coalesce ( rows -- rows' ) V{ } clone [ '[ - _ dup ?last ?last ch'\\ = + _ dup ?last ?last char: \\ = [ [ pop "|" rot 3append ] keep ] when push ] each ] keep ; : parse-table ( state -- state' table ) - ch'| take-lines [ + char: | take-lines [ "|" split trim-row coalesce @@ -175,13 +175,13 @@ DEFER: (parse-paragraph) ] dip boa ; inline : parse-ul ( state -- state' ul ) - ch'- unordered-list parse-list ; + char: - unordered-list parse-list ; : parse-ol ( state -- state' ul ) - ch'# ordered-list parse-list ; + char: # ordered-list parse-list ; : parse-code ( state -- state' item ) - dup 1 look ch'\[ = + dup 1 look char: \[ = [ unclip-slice make-paragraph ] [ dup "{" take-until [ [ nip rest ] dip @@ -192,12 +192,12 @@ DEFER: (parse-paragraph) : parse-item ( state -- state' item ) dup 0 look { - { ch'= [ parse-heading ] } - { ch'| [ parse-table ] } - { ch'_ [ parse-line ] } - { ch'- [ parse-ul ] } - { ch'# [ parse-ol ] } - { ch'\[ [ parse-code ] } + { char: = [ parse-heading ] } + { char: | [ parse-table ] } + { char: _ [ parse-line ] } + { char: - [ parse-ul ] } + { char: # [ parse-ol ] } + { char: \[ [ parse-code ] } { f [ rest-slice f ] } [ drop unclip-slice make-paragraph ] } case ; @@ -212,7 +212,7 @@ CONSTANT: invalid-url "javascript:alert('Invalid URL in farkup');" { [ dup empty? ] [ drop invalid-url ] } { [ dup [ 127 > ] any? ] [ drop invalid-url ] } { [ dup first "/\\" member? ] [ drop invalid-url ] } - { [ ch'\: over member? ] [ dup absolute-url? [ drop invalid-url ] unless ] } + { [ char: \: over member? ] [ dup absolute-url? [ drop invalid-url ] unless ] } [ relative-link-prefix get prepend "" like url-encode ] } cond ; diff --git a/basis/formatting/formatting-tests.factor b/basis/formatting/formatting-tests.factor index 53403dbcb0..79f386ff3d 100644 --- a/basis/formatting/formatting-tests.factor +++ b/basis/formatting/formatting-tests.factor @@ -62,21 +62,21 @@ IN: formatting.tests { "1.00000e-1000" } [ -1000 10^ "%.5e" sprintf ] unit-test { t } [ 1000 10^ "%.5f" sprintf - "1" ".00000" 1000 ch'0 glue = + "1" ".00000" 1000 char: 0 glue = ] unit-test { t } [ -1000 10^ "%.1004f" sprintf - "0." "10000" 999 ch'0 glue = + "0." "10000" 999 char: 0 glue = ] unit-test { "-1.00000e+1000" } [ 1000 10^ neg "%.5e" sprintf ] unit-test { "-1.00000e-1000" } [ -1000 10^ neg "%.5e" sprintf ] unit-test { t } [ 1000 10^ neg "%.5f" sprintf - "-1" ".00000" 1000 ch'0 glue = + "-1" ".00000" 1000 char: 0 glue = ] unit-test { t } [ -1000 10^ neg "%.1004f" sprintf - "-0." "10000" 999 ch'0 glue = + "-0." "10000" 999 char: 0 glue = ] unit-test { "9007199254740991.0" } [ 53 2^ 1 - "%.1f" sprintf ] unit-test { "9007199254740992.0" } [ 53 2^ "%.1f" sprintf ] unit-test @@ -121,7 +121,7 @@ IN: formatting.tests { "2008-09-10" } [ 2008 9 10 "%04d-%02d-%02d" sprintf ] unit-test { "Hello, World!" } [ "Hello, World!" "%s" sprintf ] unit-test { "printf test" } [ "printf test" sprintf ] unit-test -{ "char a = 'a'" } [ ch'a "char %c = 'a'" sprintf ] unit-test +{ "char a = 'a'" } [ char: a "char %c = 'a'" sprintf ] unit-test { "00" } [ 0x0 "%02x" sprintf ] unit-test { "ff" } [ 0xff "%02x" sprintf ] unit-test { "0 message(s)" } [ 0 "message" "%d %s(s)" sprintf ] unit-test diff --git a/basis/formatting/formatting.factor b/basis/formatting/formatting.factor index f7a870f4b5..697616280b 100644 --- a/basis/formatting/formatting.factor +++ b/basis/formatting/formatting.factor @@ -17,12 +17,12 @@ ERROR: unknown-format-directive value ; [ ] [ compose ] reduce ; inline : fix-sign ( string -- string ) - dup first ch'0 = [ - dup [ [ ch'0 = not ] [ digit? ] bi and ] find + dup first char: 0 = [ + dup [ [ char: 0 = not ] [ digit? ] bi and ] find [ 1 - swap 2dup nth { - { ch'- [ remove-nth "-" prepend ] } - { ch'+ [ remove-nth "+" prepend ] } + { char: - [ remove-nth "-" prepend ] } + { char: + [ remove-nth "+" prepend ] } [ drop nip ] } case ] [ drop ] if @@ -35,9 +35,9 @@ ERROR: unknown-format-directive value ; [ [ abs ] dip [ 10^ * round-to-even >integer number>string ] - [ 1 + ch'0 pad-head ] + [ 1 + char: 0 pad-head ] [ cut* ] tri [ "." glue ] unless-empty - ] keepd neg? [ ch'- prefix ] when ; + ] keepd neg? [ char: - prefix ] when ; : format-scientific-mantissa ( x log10x digits -- string rounded-up? ) [ swap - 10^ * round-to-even >integer number>string ] keep @@ -47,15 +47,15 @@ ERROR: unknown-format-directive value ; ] keep ; : format-scientific-exponent ( rounded-up? log10x -- string ) - swap [ 1 + ] when number>string 2 ch'0 pad-head - dup ch'- swap index "e" "e+" ? prepend ; + swap [ 1 + ] when number>string 2 char: 0 pad-head + dup char: - swap index "e" "e+" ? prepend ; : format-scientific-simple ( x digits -- string ) [ [ abs dup integer-log10 ] dip [ format-scientific-mantissa ] [ drop nip format-scientific-exponent ] 3bi append - ] keepd neg? [ ch'- prefix ] when ; + ] keepd neg? [ char: - prefix ] when ; : format-float-fast ( x digits string -- string ) [ "" -1 ] 2dip "C" format-float ; @@ -95,15 +95,15 @@ ERROR: unknown-format-directive value ; EBNF: parse-printf [=[ -zero = "0" => [[ ch'0 ]] +zero = "0" => [[ char: 0 ]] char = "'" (.) => [[ second ]] -pad-char = (zero|char)? => [[ ch'\s or ]] +pad-char = (zero|char)? => [[ char: \s or ]] pad-align = ("-")? => [[ \ pad-tail \ pad-head ? ]] pad-width = ([0-9])* => [[ >digits ]] pad = pad-align pad-char pad-width => [[ >quotation dup first 0 = [ drop [ ] ] when ]] -sign_ = [+ ] => [[ '[ dup first ch'- = [ _ prefix ] unless ] ]] +sign_ = [+ ] => [[ '[ dup first char: - = [ _ prefix ] unless ] ]] sign = (sign_)? => [[ [ ] or ]] width_ = "." ([0-9])* => [[ second >digits '[ _ shorted head ] ]] @@ -179,10 +179,10 @@ MACRO: sprintf ( format-string -- quot ) string 2 ch'0 pad-head ; inline + number>string 2 char: 0 pad-head ; inline : pad-000 ( n -- string ) - number>string 3 ch'0 pad-head ; inline + number>string 3 char: 0 pad-head ; inline : >time ( timestamp -- string ) [ hour>> ] [ minute>> ] [ second>> floor ] tri diff --git a/basis/ftp/client/client.factor b/basis/ftp/client/client.factor index 6222cf2b93..9984669f9c 100644 --- a/basis/ftp/client/client.factor +++ b/basis/ftp/client/client.factor @@ -11,7 +11,7 @@ IN: ftp.client 3 head string>number ; : ftp-response-code ( string -- n/f ) - dup fourth ch'- = [ drop f ] [ (ftp-response-code) ] if ; + dup fourth char: - = [ drop f ] [ (ftp-response-code) ] if ; : read-response-loop ( ftp-response -- ftp-response ) readln @@ -22,7 +22,7 @@ IN: ftp.client readln [ (ftp-response-code) >>n ] [ add-response-line ] - [ fourth ch'- = ] tri + [ fourth char: - = ] tri [ read-response-loop ] when ; ERROR: ftp-error got expected ; diff --git a/basis/ftp/client/listing-parser/listing-parser.factor b/basis/ftp/client/listing-parser/listing-parser.factor index a64dbc2835..138644b157 100644 --- a/basis/ftp/client/listing-parser/listing-parser.factor +++ b/basis/ftp/client/listing-parser/listing-parser.factor @@ -6,26 +6,26 @@ IN: ftp.client.listing-parser : ch>file-type ( ch -- type ) { - { ch'b [ +block-device+ ] } - { ch'c [ +character-device+ ] } - { ch'd [ +directory+ ] } - { ch'l [ +symbolic-link+ ] } - { ch's [ +socket+ ] } - { ch'p [ +fifo+ ] } - { ch'- [ +regular-file+ ] } + { char: b [ +block-device+ ] } + { char: c [ +character-device+ ] } + { char: d [ +directory+ ] } + { char: l [ +symbolic-link+ ] } + { char: s [ +socket+ ] } + { char: p [ +fifo+ ] } + { char: - [ +regular-file+ ] } [ drop +unknown+ ] } case ; : file-type>ch ( type -- string ) { - { +block-device+ [ ch'b ] } - { +character-device+ [ ch'c ] } - { +directory+ [ ch'd ] } - { +symbolic-link+ [ ch'l ] } - { +socket+ [ ch's ] } - { +fifo+ [ ch'p ] } - { +regular-file+ [ ch'- ] } - [ drop ch'- ] + { +block-device+ [ char: b ] } + { +character-device+ [ char: c ] } + { +directory+ [ char: d ] } + { +symbolic-link+ [ char: l ] } + { +socket+ [ char: s ] } + { +fifo+ [ char: p ] } + { +regular-file+ [ char: - ] } + [ drop char: - ] } case ; : parse-permissions ( remote-file str -- remote-file ) diff --git a/basis/furnace/auth/providers/couchdb/couchdb.factor b/basis/furnace/auth/providers/couchdb/couchdb.factor index 23838e6685..714e0310eb 100644 --- a/basis/furnace/auth/providers/couchdb/couchdb.factor +++ b/basis/furnace/auth/providers/couchdb/couchdb.factor @@ -111,7 +111,7 @@ TUPLE: couchdb-auth-provider url>user ; : strip-hash ( hash1 -- hash2 ) - [ drop first ch'_ = ] assoc-reject ; + [ drop first char: _ = ] assoc-reject ; : at-or-k ( key hash -- newkey ) dupd at [ nip ] when* ; diff --git a/basis/gobject-introspection/types/types.factor b/basis/gobject-introspection/types/types.factor index 358ffb3f21..44d8b3260c 100644 --- a/basis/gobject-introspection/types/types.factor +++ b/basis/gobject-introspection/types/types.factor @@ -68,7 +68,7 @@ PREDICATE: fixed-size-array-type < c-array-type fixed-size>> >boolean ; : qualified-type-name ( data-type -- name ) [ name>> ] keep { - [ name>> ch'. swap member? ] + [ name>> char: . swap member? ] [ none-type? ] [ standard-type? ] } 1|| [ qualified-name ] unless ; diff --git a/basis/help/html/html.factor b/basis/help/html/html.factor index 955d674672..49d188b2aa 100644 --- a/basis/help/html/html.factor +++ b/basis/help/html/html.factor @@ -15,19 +15,19 @@ IN: help.html : escape-char ( ch -- ) dup ascii? [ dup H{ - { ch'\" "__quo__" } - { ch'* "__star__" } - { ch'\: "__colon__" } - { ch'< "__lt__" } - { ch'> "__gt__" } - { ch'? "__que__" } - { ch'\\ "__back__" } - { ch'| "__pipe__" } - { ch'/ "__slash__" } - { ch', "__comma__" } - { ch'@ "__at__" } - { ch'# "__hash__" } - { ch'% "__percent__" } + { char: \" "__quo__" } + { char: * "__star__" } + { char: \: "__colon__" } + { char: < "__lt__" } + { char: > "__gt__" } + { char: ? "__que__" } + { char: \\ "__back__" } + { char: | "__pipe__" } + { char: / "__slash__" } + { char: , "__comma__" } + { char: @ "__at__" } + { char: # "__hash__" } + { char: % "__percent__" } } at [ % ] [ , ] ?if ] [ number>string "__" "__" surround % ] if ; @@ -87,7 +87,7 @@ M: pathname url-of ]] ; : bijective-base26 ( n -- name ) - [ dup 0 > ] [ 1 - 26 /mod ch'a + ] "" produce-as nip reverse! ; + [ dup 0 > ] [ 1 - 26 /mod char: a + ] "" produce-as nip reverse! ; : css-class ( style classes -- name ) dup '[ drop _ assoc-size 1 + bijective-base26 ] cache ; diff --git a/basis/help/lint/spaces/spaces.factor b/basis/help/lint/spaces/spaces.factor index a7d59274cc..60a26fb18d 100644 --- a/basis/help/lint/spaces/spaces.factor +++ b/basis/help/lint/spaces/spaces.factor @@ -17,7 +17,7 @@ IN: help.lint.spaces dup utf8 file-lines [ 1 + 2array ] map-index [ first [ - { [ ch'space = ] [ ch'\" = ] } 1|| + { [ char: space = ] [ char: \" = ] } 1|| ] trim-head " " swap subseq? ] filter diff --git a/basis/help/tutorial/tutorial.factor b/basis/help/tutorial/tutorial.factor index 91962c282f..131cbb5ad0 100644 --- a/basis/help/tutorial/tutorial.factor +++ b/basis/help/tutorial/tutorial.factor @@ -142,7 +142,7 @@ $nl "We begin by writing a word which removes blanks and non-alphabetical characters from a string, and then converts the string to lower case. We call this word " { $snippet "normalize" } ". To figure out how to write this word, we begin with some interactive experimentation in the listener." $nl "Start by pushing a character on the stack; notice that characters are really just integers:" -{ $code "ch'a" } +{ $code "char: a" } $nl "Now, use the " { $link Letter? } " word to test if it is an alphabetical character, upper or lower case:" { $unchecked-example "Letter? ." "t" } @@ -151,7 +151,7 @@ $nl "This gives the expected result." $nl "Now try with a non-alphabetical character:" -{ $code "ch'#" } +{ $code "char: #" } { $unchecked-example "Letter? ." "f" } $nl "What we want to do is given a string, remove all characters which do not match the " { $link Letter? } " predicate. Let's push a string on the stack:" diff --git a/basis/html/streams/streams-tests.factor b/basis/html/streams/streams-tests.factor index 6c16dd290c..6c95767939 100644 --- a/basis/html/streams/streams-tests.factor +++ b/basis/html/streams/streams-tests.factor @@ -14,7 +14,7 @@ IN: html.streams.tests ] unit-test { "a" } [ - [ ch'a write1 ] make-html-string + [ char: a write1 ] make-html-string ] unit-test { "<" } [ diff --git a/basis/html/streams/streams.factor b/basis/html/streams/streams.factor index 430c14c583..e0634ef807 100644 --- a/basis/html/streams/streams.factor +++ b/basis/html/streams/streams.factor @@ -36,7 +36,7 @@ TUPLE: html-sub-stream < html-writer style parent ; : hex-color, ( color -- ) [ red>> ] [ green>> ] [ blue>> ] tri - [ 255 * >integer >hex 2 ch'0 pad-head % ] tri@ ; + [ 255 * >integer >hex 2 char: 0 pad-head % ] tri@ ; : fg-css, ( color -- ) "color: #" % hex-color, "; " % ; diff --git a/basis/html/templates/chloe/chloe-tests.factor b/basis/html/templates/chloe/chloe-tests.factor index 8ce920d79d..b29dd6cfe6 100644 --- a/basis/html/templates/chloe/chloe-tests.factor +++ b/basis/html/templates/chloe/chloe-tests.factor @@ -10,7 +10,7 @@ IN: html.templates.chloe.tests : run-template ( quot -- string ) with-string-writer [ "\r\n\t" member? ] reject - [ ch'\s = ] trim ; inline + [ char: \s = ] trim ; inline : test-template ( name -- template ) "vocab:html/templates/chloe/test/" diff --git a/basis/html/templates/fhtml/fhtml.factor b/basis/html/templates/fhtml/fhtml.factor index 0cfd6b5949..474b008530 100644 --- a/basis/html/templates/fhtml/fhtml.factor +++ b/basis/html/templates/fhtml/fhtml.factor @@ -16,7 +16,7 @@ TUPLE: template-lexer < lexer ; M: template-lexer skip-word [ { - { [ 2dup nth ch'\" = ] [ drop 1 + ] } + { [ 2dup nth char: \" = ] [ drop 1 + ] } { [ 2dup swap tail-slice "%>" head? ] [ drop 2 + ] } [ f skip ] } cond diff --git a/basis/http/parsers/parsers.factor b/basis/http/parsers/parsers.factor index 090d67faa0..03f1d82798 100644 --- a/basis/http/parsers/parsers.factor +++ b/basis/http/parsers/parsers.factor @@ -97,7 +97,7 @@ PEG: parse-response-line ( string -- triple ) [ " \t" member? ] satisfy repeat1 ; : qdtext-parser ( -- parser ) - { [ ch'\" = ] [ control? ] } except-these ; + { [ char: \" = ] [ control? ] } except-these ; : quoted-char-parser ( -- parser ) "\\" token hide any-char 2seq ; diff --git a/basis/io/backend/unix/unix.factor b/basis/io/backend/unix/unix.factor index dcdc01061c..0a4326ccea 100644 --- a/basis/io/backend/unix/unix.factor +++ b/basis/io/backend/unix/unix.factor @@ -147,7 +147,7 @@ M: stdin dispose* ] with-destructors ; : wait-for-stdin ( stdin -- size ) - [ control>> ch'X over io:stream-write1 io:stream-flush ] + [ control>> char: X over io:stream-write1 io:stream-flush ] [ size>> ssize_t heap-size swap io:stream-read ssize_t deref ] bi ; diff --git a/basis/io/buffers/buffers-tests.factor b/basis/io/buffers/buffers-tests.factor index a3a388ed17..679b6afd78 100644 --- a/basis/io/buffers/buffers-tests.factor +++ b/basis/io/buffers/buffers-tests.factor @@ -44,7 +44,7 @@ io.buffers kernel libc namespaces sequences strings tools.test ; dup buffer-read-all >string swap dispose ] unit-test -{ ch'e } [ +{ char: e } [ "hello" string>buffer 1 over buffer-consume [ buffer-pop ] keep dispose ] unit-test @@ -58,11 +58,11 @@ io.buffers kernel libc namespaces sequences strings tools.test ; "b" get dispose "hello world" string>buffer "b" set -{ "hello" ch'\s } [ " " "b" get buffer-read-until [ >string ] dip ] unit-test +{ "hello" char: \s } [ " " "b" get buffer-read-until [ >string ] dip ] unit-test "b" get dispose "hello world" string>buffer "b" set -{ "hello worl" ch'd } [ "d" "b" get buffer-read-until [ >string ] dip ] unit-test +{ "hello worl" char: d } [ "d" "b" get buffer-read-until [ >string ] dip ] unit-test "b" get dispose "hello world" string>buffer "b" set diff --git a/basis/io/crlf/crlf-tests.factor b/basis/io/crlf/crlf-tests.factor index 6b0ba20ad1..a962c21e82 100644 --- a/basis/io/crlf/crlf-tests.factor +++ b/basis/io/crlf/crlf-tests.factor @@ -15,9 +15,9 @@ USING: io.crlf tools.test io.streams.string io ; { "foo\r\nbar" } [ "foo\nbar" lf>crlf ] unit-test { f } [ "" [ read1-ignoring-crlf ] with-string-reader ] unit-test -{ ch'a } [ "a" [ read1-ignoring-crlf ] with-string-reader ] unit-test -{ ch'b } [ "\nb" [ read1-ignoring-crlf ] with-string-reader ] unit-test -{ ch'c } [ "\r\nc" [ read1-ignoring-crlf ] with-string-reader ] unit-test +{ char: a } [ "a" [ read1-ignoring-crlf ] with-string-reader ] unit-test +{ char: b } [ "\nb" [ read1-ignoring-crlf ] with-string-reader ] unit-test +{ char: c } [ "\r\nc" [ read1-ignoring-crlf ] with-string-reader ] unit-test { f } [ "" [ 5 read-ignoring-crlf ] with-string-reader ] unit-test { "a" } [ "a" [ 5 read-ignoring-crlf ] with-string-reader ] unit-test diff --git a/basis/io/crlf/crlf.factor b/basis/io/crlf/crlf.factor index c0e24de842..179f8d890b 100644 --- a/basis/io/crlf/crlf.factor +++ b/basis/io/crlf/crlf.factor @@ -9,7 +9,7 @@ IN: io.crlf :: stream-read-crlf ( stream -- seq ) "\r" stream stream-read-until [ - ch'\r assert= stream stream-read1 ch'\n assert= + char: \r assert= stream stream-read1 char: \n assert= ] [ f like ] if* ; : read-crlf ( -- seq ) @@ -17,14 +17,14 @@ IN: io.crlf :: stream-read-?crlf ( stream -- seq ) "\r\n" stream stream-read-until [ - ch'\r = [ stream stream-read1 ch'\n assert= ] when + char: \r = [ stream stream-read1 char: \n assert= ] when ] [ f like ] if* ; : read-?crlf ( -- seq ) input-stream get stream-read-?crlf ; : crlf>lf ( str -- str' ) - ch'\r swap remove ; + char: \r swap remove ; : lf>crlf ( str -- str' ) "\n" split "\r\n" join ; diff --git a/basis/io/encodings/8-bit/8-bit-tests.factor b/basis/io/encodings/8-bit/8-bit-tests.factor index f079778c45..2f27f79437 100644 --- a/basis/io/encodings/8-bit/8-bit-tests.factor +++ b/basis/io/encodings/8-bit/8-bit-tests.factor @@ -1,10 +1,10 @@ USING: arrays io.encodings.8-bit io.encodings.string strings tools.test ; -{ B{ ch'f ch'o ch'o } } [ "foo" latin2 encode ] unit-test +{ B{ char: f char: o char: o } } [ "foo" latin2 encode ] unit-test [ { 256 } >string latin2 encode ] must-fail { "bar" } [ "bar" latin2 decode ] unit-test -{ { ch'b 233 ch'r } } [ B{ ch'b 233 ch'r } latin2 decode >array ] unit-test +{ { char: b 233 char: r } } [ B{ char: b 233 char: r } latin2 decode >array ] unit-test { { 0xfffd 0x20AC } } [ B{ 0x81 0x80 } windows-1252 decode >array ] unit-test diff --git a/basis/io/encodings/euc/euc-tests.factor b/basis/io/encodings/euc/euc-tests.factor index 860505a09e..808f85a017 100644 --- a/basis/io/encodings/euc/euc-tests.factor +++ b/basis/io/encodings/euc/euc-tests.factor @@ -47,4 +47,4 @@ IN: io.encodings.euc.tests { t } [ phrase-euc-kr 3 head* euc-kr decode phrase-unicode 2 head* = ] unit-test -{ t } [ phrase-euc-kr 2 head* euc-kr decode phrase-unicode 2 head* ch'replacement-character suffix = ] unit-test +{ t } [ phrase-euc-kr 2 head* euc-kr decode phrase-unicode 2 head* char: replacement-character suffix = ] unit-test diff --git a/basis/io/encodings/gb18030/gb18030-tests.factor b/basis/io/encodings/gb18030/gb18030-tests.factor index 267ec589ac..8cf2858711 100644 --- a/basis/io/encodings/gb18030/gb18030-tests.factor +++ b/basis/io/encodings/gb18030/gb18030-tests.factor @@ -8,15 +8,15 @@ USING: io.encodings.gb18030 io.encodings.string strings tools.test arrays ; [ B{ 0xB7 0xB8 } >string gb18030 encode ] unit-test { { 0xB7 0xB8 } } [ B{ 0xA1 0xA4 0x81 0x30 0x86 0x30 } gb18030 decode >array ] unit-test -{ { 0xB7 ch'replacement-character } } +{ { 0xB7 char: replacement-character } } [ B{ 0xA1 0xA4 0x81 0x30 0x86 } gb18030 decode >array ] unit-test -{ { 0xB7 ch'replacement-character } } +{ { 0xB7 char: replacement-character } } [ B{ 0xA1 0xA4 0x81 0x30 } gb18030 decode >array ] unit-test -{ { 0xB7 ch'replacement-character } } +{ { 0xB7 char: replacement-character } } [ B{ 0xA1 0xA4 0x81 } gb18030 decode >array ] unit-test { { 0xB7 } } [ B{ 0xA1 0xA4 } gb18030 decode >array ] unit-test -{ { ch'replacement-character } } +{ { char: replacement-character } } [ B{ 0xA1 } >string gb18030 decode >array ] unit-test { { 0x44D7 0x464B } } [ B{ 0x82 0x33 0xA3 0x39 0x82 0x33 0xC9 0x31 } diff --git a/basis/io/encodings/iso2022/iso2022-tests.factor b/basis/io/encodings/iso2022/iso2022-tests.factor index b51557f219..5d26260ffd 100644 --- a/basis/io/encodings/iso2022/iso2022-tests.factor +++ b/basis/io/encodings/iso2022/iso2022-tests.factor @@ -7,30 +7,30 @@ strings tools.test ; { "hello" } [ "hello" >byte-array iso2022 decode ] unit-test { "hello" } [ "hello" iso2022 encode >string ] unit-test -{ "hi" } [ B{ ch'h $ ESC ch'\( ch'B ch'i } iso2022 decode ] unit-test -{ "hi" } [ B{ ch'h ch'i $ ESC ch'\( ch'B } iso2022 decode ] unit-test -{ "hi\u00fffd" } [ B{ ch'h ch'i $ ESC ch'\( } iso2022 decode ] unit-test -{ "hi\u00fffd" } [ B{ ch'h ch'i $ ESC } iso2022 decode ] unit-test +{ "hi" } [ B{ char: h $ ESC char: \( char: B char: i } iso2022 decode ] unit-test +{ "hi" } [ B{ char: h char: i $ ESC char: \( char: B } iso2022 decode ] unit-test +{ "hi\u00fffd" } [ B{ char: h char: i $ ESC char: \( } iso2022 decode ] unit-test +{ "hi\u00fffd" } [ B{ char: h char: i $ ESC } iso2022 decode ] unit-test -{ B{ ch'h $ ESC ch'\( ch'J 0xD8 } } [ "h\u00ff98" iso2022 encode ] unit-test -{ "h\u00ff98" } [ B{ ch'h $ ESC ch'\( ch'J 0xD8 } iso2022 decode ] unit-test -{ "hi" } [ B{ ch'h $ ESC ch'\( ch'J ch'i } iso2022 decode ] unit-test -{ "h" } [ B{ ch'h $ ESC ch'\( ch'J } iso2022 decode ] unit-test -{ "h\u00fffd" } [ B{ ch'h $ ESC ch'\( ch'J 0x80 } iso2022 decode ] unit-test +{ B{ char: h $ ESC char: \( char: J 0xD8 } } [ "h\u00ff98" iso2022 encode ] unit-test +{ "h\u00ff98" } [ B{ char: h $ ESC char: \( char: J 0xD8 } iso2022 decode ] unit-test +{ "hi" } [ B{ char: h $ ESC char: \( char: J char: i } iso2022 decode ] unit-test +{ "h" } [ B{ char: h $ ESC char: \( char: J } iso2022 decode ] unit-test +{ "h\u00fffd" } [ B{ char: h $ ESC char: \( char: J 0x80 } iso2022 decode ] unit-test -{ B{ ch'h $ ESC ch'$ ch'B 0x3E 0x47 } } [ "h\u007126" iso2022 encode ] unit-test -{ "h\u007126" } [ B{ ch'h $ ESC ch'$ ch'B 0x3E 0x47 } iso2022 decode ] unit-test -{ "h\u00fffd" } [ B{ ch'h $ ESC ch'$ ch'B 0x3E } iso2022 decode ] unit-test -{ "h" } [ B{ ch'h $ ESC ch'$ ch'B } iso2022 decode ] unit-test -{ "h\u00fffd" } [ B{ ch'h $ ESC ch'$ } iso2022 decode ] unit-test -{ "h\u00fffd" } [ B{ ch'h $ ESC } iso2022 decode ] unit-test -{ "h\u00fffd" } [ B{ ch'h $ ESC ch'$ ch'B 0x80 0x80 } iso2022 decode ] unit-test +{ B{ char: h $ ESC char: $ char: B 0x3E 0x47 } } [ "h\u007126" iso2022 encode ] unit-test +{ "h\u007126" } [ B{ char: h $ ESC char: $ char: B 0x3E 0x47 } iso2022 decode ] unit-test +{ "h\u00fffd" } [ B{ char: h $ ESC char: $ char: B 0x3E } iso2022 decode ] unit-test +{ "h" } [ B{ char: h $ ESC char: $ char: B } iso2022 decode ] unit-test +{ "h\u00fffd" } [ B{ char: h $ ESC char: $ } iso2022 decode ] unit-test +{ "h\u00fffd" } [ B{ char: h $ ESC } iso2022 decode ] unit-test +{ "h\u00fffd" } [ B{ char: h $ ESC char: $ char: B 0x80 0x80 } iso2022 decode ] unit-test -{ B{ ch'h $ ESC ch'$ ch'\( ch'D 0x38 0x54 } } [ "h\u0058ce" iso2022 encode ] unit-test -{ "h\u0058ce" } [ B{ ch'h $ ESC ch'$ ch'\( ch'D 0x38 0x54 } iso2022 decode ] unit-test -{ "h\u00fffd" } [ B{ ch'h $ ESC ch'$ ch'\( ch'D 0x38 } iso2022 decode ] unit-test -{ "h" } [ B{ ch'h $ ESC ch'$ ch'\( ch'D } iso2022 decode ] unit-test -{ "h\u00fffd" } [ B{ ch'h $ ESC ch'$ ch'\( } iso2022 decode ] unit-test -{ "h\u00fffd" } [ B{ ch'h $ ESC ch'$ ch'\( ch'D 0x70 0x70 } iso2022 decode ] unit-test +{ B{ char: h $ ESC char: $ char: \( char: D 0x38 0x54 } } [ "h\u0058ce" iso2022 encode ] unit-test +{ "h\u0058ce" } [ B{ char: h $ ESC char: $ char: \( char: D 0x38 0x54 } iso2022 decode ] unit-test +{ "h\u00fffd" } [ B{ char: h $ ESC char: $ char: \( char: D 0x38 } iso2022 decode ] unit-test +{ "h" } [ B{ char: h $ ESC char: $ char: \( char: D } iso2022 decode ] unit-test +{ "h\u00fffd" } [ B{ char: h $ ESC char: $ char: \( } iso2022 decode ] unit-test +{ "h\u00fffd" } [ B{ char: h $ ESC char: $ char: \( char: D 0x70 0x70 } iso2022 decode ] unit-test [ "\u{syriac-music}" iso2022 encode ] must-fail diff --git a/basis/io/encodings/iso2022/iso2022.factor b/basis/io/encodings/iso2022/iso2022.factor index 0868f3ee47..fcf671c33a 100644 --- a/basis/io/encodings/iso2022/iso2022.factor +++ b/basis/io/encodings/iso2022/iso2022.factor @@ -33,10 +33,10 @@ M: iso2022 CONSTANT: ESC 0x16 -CONSTANT: switch-ascii B{ $ ESC ch'\( ch'B } -CONSTANT: switch-jis201 B{ $ ESC ch'\( ch'J } -CONSTANT: switch-jis208 B{ $ ESC ch'$ ch'B } -CONSTANT: switch-jis212 B{ $ ESC ch'$ ch'\( ch'D } +CONSTANT: switch-ascii B{ $ ESC char: \( char: B } +CONSTANT: switch-jis201 B{ $ ESC char: \( char: J } +CONSTANT: switch-jis208 B{ $ ESC char: $ char: B } +CONSTANT: switch-jis212 B{ $ ESC char: $ char: \( char: D } : find-type ( char -- code type ) { @@ -62,19 +62,19 @@ M:: iso2022-state encode-char ( char stream encoding -- ) : read-escape ( stream -- type/f ) dup stream-read1 { - { ch'\( [ + { char: \( [ stream-read1 { - { ch'B [ ascii get-global ] } - { ch'J [ jis201 get-global ] } + { char: B [ ascii get-global ] } + { char: J [ jis201 get-global ] } [ drop f ] } case ] } - { ch'$ [ + { char: $ [ dup stream-read1 { - { ch'@ [ drop jis208 get-global ] } ! want: JIS X 0208-1978 - { ch'B [ drop jis208 get-global ] } - { ch'\( [ - stream-read1 ch'D = jis212 get-global f ? + { char: @ [ drop jis208 get-global ] } ! want: JIS X 0208-1978 + { char: B [ drop jis208 get-global ] } + { char: \( [ + stream-read1 char: D = jis212 get-global f ? ] } [ 2drop f ] } case diff --git a/basis/io/encodings/latin1/latin1-tests.factor b/basis/io/encodings/latin1/latin1-tests.factor index 071f6ad9c2..43d2492533 100644 --- a/basis/io/encodings/latin1/latin1-tests.factor +++ b/basis/io/encodings/latin1/latin1-tests.factor @@ -1,7 +1,7 @@ USING: arrays io.encodings.latin1 io.encodings.string strings tools.test ; -{ B{ ch'f ch'o ch'o } } [ "foo" latin1 encode ] unit-test +{ B{ char: f char: o char: o } } [ "foo" latin1 encode ] unit-test [ { 256 } >string latin1 encode ] must-fail @@ -9,6 +9,6 @@ tools.test ; { "bar" } [ "bar" latin1 decode ] unit-test -{ { ch'b 233 ch'r } } [ - B{ ch'b 233 ch'r } latin1 decode >array +{ { char: b 233 char: r } } [ + B{ char: b 233 char: r } latin1 decode >array ] unit-test diff --git a/basis/io/encodings/shift-jis/shift-jis-tests.factor b/basis/io/encodings/shift-jis/shift-jis-tests.factor index e09e5f6224..b5db28fef8 100644 --- a/basis/io/encodings/shift-jis/shift-jis-tests.factor +++ b/basis/io/encodings/shift-jis/shift-jis-tests.factor @@ -3,15 +3,15 @@ USING: arrays io.encodings.shift-jis io.encodings.string strings tools.test ; -{ { ch'replacement-character } } [ { 141 } shift-jis decode >array ] unit-test +{ { char: replacement-character } } [ { 141 } shift-jis decode >array ] unit-test { "" } [ "" shift-jis decode >string ] unit-test { "" } [ "" shift-jis encode >string ] unit-test -[ { ch'replacement-character } shift-jis encode ] must-fail -{ "ab¥ィ" } [ { ch'a ch'b 0x5C 0xA8 } shift-jis decode ] unit-test -{ { ch'a ch'b 0x5C 0xA8 } } [ "ab¥ィ" shift-jis encode >array ] unit-test -{ "ab\\ィ" } [ { ch'a ch'b 0x5C 0xA8 } windows-31j decode ] unit-test -{ { ch'a ch'b 0x5C 0xA8 } } [ "ab\\ィ" windows-31j encode >array ] unit-test -{ "\u000081\u0000c8" } [ ch'logical-and 1string windows-31j encode >string ] unit-test -{ "\u000081\u0000c8" } [ ch'logical-and 1string shift-jis encode >string ] unit-test -{ { ch'logical-and } } [ "\u000081\u0000c8" windows-31j decode >array ] unit-test -{ { ch'logical-and } } [ "\u000081\u0000c8" shift-jis decode >array ] unit-test +[ { char: replacement-character } shift-jis encode ] must-fail +{ "ab¥ィ" } [ { char: a char: b 0x5C 0xA8 } shift-jis decode ] unit-test +{ { char: a char: b 0x5C 0xA8 } } [ "ab¥ィ" shift-jis encode >array ] unit-test +{ "ab\\ィ" } [ { char: a char: b 0x5C 0xA8 } windows-31j decode ] unit-test +{ { char: a char: b 0x5C 0xA8 } } [ "ab\\ィ" windows-31j encode >array ] unit-test +{ "\u000081\u0000c8" } [ char: logical-and 1string windows-31j encode >string ] unit-test +{ "\u000081\u0000c8" } [ char: logical-and 1string shift-jis encode >string ] unit-test +{ { char: logical-and } } [ "\u000081\u0000c8" windows-31j decode >array ] unit-test +{ { char: logical-and } } [ "\u000081\u0000c8" shift-jis decode >array ] unit-test diff --git a/basis/io/encodings/utf32/utf32-tests.factor b/basis/io/encodings/utf32/utf32-tests.factor index 1826818bc8..902cfe3748 100644 --- a/basis/io/encodings/utf32/utf32-tests.factor +++ b/basis/io/encodings/utf32/utf32-tests.factor @@ -4,25 +4,25 @@ USING: kernel tools.test io.encodings.utf32 arrays sbufs io.streams.byte-array sequences io.encodings io strings io.encodings.string alien.c-types alien.strings accessors classes ; -{ { ch'x } } [ B{ 0 0 0 ch'x } utf32be decode >array ] unit-test +{ { char: x } } [ B{ 0 0 0 char: x } utf32be decode >array ] unit-test { { 0x1D11E } } [ B{ 0 1 0xD1 0x1E } utf32be decode >array ] unit-test -{ { ch'replacement-character } } [ B{ 0 1 0xD1 } utf32be decode >array ] unit-test -{ { ch'replacement-character } } [ B{ 0 1 } utf32be decode >array ] unit-test -{ { ch'replacement-character } } [ B{ 0 } utf32be decode >array ] unit-test +{ { char: replacement-character } } [ B{ 0 1 0xD1 } utf32be decode >array ] unit-test +{ { char: replacement-character } } [ B{ 0 1 } utf32be decode >array ] unit-test +{ { char: replacement-character } } [ B{ 0 } utf32be decode >array ] unit-test { { } } [ { } utf32be decode >array ] unit-test -{ B{ 0 0 0 ch'x 0 1 0xD1 0x1E } } [ { ch'x 0x1d11e } >string utf32be encode ] unit-test +{ B{ 0 0 0 char: x 0 1 0xD1 0x1E } } [ { char: x 0x1d11e } >string utf32be encode ] unit-test -{ { ch'x } } [ B{ ch'x 0 0 0 } utf32le decode >array ] unit-test +{ { char: x } } [ B{ char: x 0 0 0 } utf32le decode >array ] unit-test { { 0x1d11e } } [ B{ 0x1e 0xd1 1 0 } utf32le decode >array ] unit-test -{ { ch'replacement-character } } [ B{ 0x1e 0xd1 1 } utf32le decode >array ] unit-test -{ { ch'replacement-character } } [ B{ 0x1e 0xd1 } utf32le decode >array ] unit-test -{ { ch'replacement-character } } [ B{ 0x1e } utf32le decode >array ] unit-test +{ { char: replacement-character } } [ B{ 0x1e 0xd1 1 } utf32le decode >array ] unit-test +{ { char: replacement-character } } [ B{ 0x1e 0xd1 } utf32le decode >array ] unit-test +{ { char: replacement-character } } [ B{ 0x1e } utf32le decode >array ] unit-test { { } } [ { } utf32le decode >array ] unit-test -{ B{ 120 0 0 0 0x1e 0xd1 1 0 } } [ { ch'x 0x1d11e } >string utf32le encode ] unit-test +{ B{ 120 0 0 0 0x1e 0xd1 1 0 } } [ { char: x 0x1d11e } >string utf32le encode ] unit-test -{ { ch'x } } [ B{ 0xff 0xfe 0 0 ch'x 0 0 0 } utf32 decode >array ] unit-test -{ { ch'x } } [ B{ 0 0 0xfe 0xff 0 0 0 ch'x } utf32 decode >array ] unit-test +{ { char: x } } [ B{ 0xff 0xfe 0 0 char: x 0 0 0 } utf32 decode >array ] unit-test +{ { char: x } } [ B{ 0 0 0xfe 0xff 0 0 0 char: x } utf32 decode >array ] unit-test -{ B{ 0xff 0xfe 0 0 120 0 0 0 0x1e 0xd1 1 0 } } [ { ch'x 0x1d11e } >string utf32 encode ] unit-test +{ B{ 0xff 0xfe 0 0 120 0 0 0 0x1e 0xd1 1 0 } } [ { char: x 0x1d11e } >string utf32 encode ] unit-test diff --git a/basis/io/encodings/utf7/utf7.factor b/basis/io/encodings/utf7/utf7.factor index b91de2fe36..56f9da0ee8 100644 --- a/basis/io/encodings/utf7/utf7.factor +++ b/basis/io/encodings/utf7/utf7.factor @@ -13,20 +13,20 @@ TUPLE: utf7codec dialect buffer ; : utf7 ( -- utf7codec ) { { { } { } } - { { ch'+ } { ch'- } } + { { char: + } { char: - } } } V{ } utf7codec boa ; : utf7imap4 ( -- utf7codec ) { - { { ch'/ } { ch', } } - { { ch'& } { ch'- } } + { { char: / } { char: , } } + { { char: & } { char: - } } } V{ } utf7codec boa ; : >raw-base64 ( bytes -- bytes' ) - >string utf16be encode >base64 [ ch'= = ] trim-tail ; + >string utf16be encode >base64 [ char: = = ] trim-tail ; : raw-base64> ( str -- str' ) - dup length 4 / ceiling 4 * ch'= pad-tail base64> utf16be decode ; + dup length 4 / ceiling 4 * char: = pad-tail base64> utf16be decode ; : encode-chunk ( repl-pair surround-pair chunk ascii? -- bytes ) [ swap [ first ] [ concat ] bi replace nip ] diff --git a/basis/io/files/info/unix/linux/linux.factor b/basis/io/files/info/unix/linux/linux.factor index 496de1b7f5..56e722bf21 100644 --- a/basis/io/files/info/unix/linux/linux.factor +++ b/basis/io/files/info/unix/linux/linux.factor @@ -65,7 +65,7 @@ frequency pass-number ; } cleave ; : parse-mtab ( -- array ) - ch'\s [ "/etc/mtab" utf8 file>csv ] with-delimiter + char: \s [ "/etc/mtab" utf8 file>csv ] with-delimiter [ mtab-csv>mtab-entry ] map ; : mtab-entry>file-system-info ( mtab-entry -- file-system-info/f ) diff --git a/basis/io/files/info/unix/unix.factor b/basis/io/files/info/unix/unix.factor index b2dc81ce91..3134954730 100644 --- a/basis/io/files/info/unix/unix.factor +++ b/basis/io/files/info/unix/unix.factor @@ -242,26 +242,26 @@ M: string set-file-group ( path string -- ) : ch>file-type ( ch -- type ) { - { ch'b [ +block-device+ ] } - { ch'c [ +character-device+ ] } - { ch'd [ +directory+ ] } - { ch'l [ +symbolic-link+ ] } - { ch's [ +socket+ ] } - { ch'p [ +fifo+ ] } - { ch'- [ +regular-file+ ] } + { char: b [ +block-device+ ] } + { char: c [ +character-device+ ] } + { char: d [ +directory+ ] } + { char: l [ +symbolic-link+ ] } + { char: s [ +socket+ ] } + { char: p [ +fifo+ ] } + { char: - [ +regular-file+ ] } [ drop +unknown+ ] } case ; : file-type>ch ( type -- ch ) { - { +block-device+ [ ch'b ] } - { +character-device+ [ ch'c ] } - { +directory+ [ ch'd ] } - { +symbolic-link+ [ ch'l ] } - { +socket+ [ ch's ] } - { +fifo+ [ ch'p ] } - { +regular-file+ [ ch'- ] } - [ drop ch'- ] + { +block-device+ [ char: b ] } + { +character-device+ [ char: c ] } + { +directory+ [ char: d ] } + { +symbolic-link+ [ char: l ] } + { +socket+ [ char: s ] } + { +fifo+ [ char: p ] } + { +regular-file+ [ char: - ] } + [ drop char: - ] } case ; = ] - [ second ch'\: = ] + [ second char: \: = ] [ first Letter? ] } 1&& [ 2 head "\\" append ] [ not-absolute-path ] if ; diff --git a/basis/io/files/unique/unique-tests.factor b/basis/io/files/unique/unique-tests.factor index 55d5e001e9..d36191ea96 100644 --- a/basis/io/files/unique/unique-tests.factor +++ b/basis/io/files/unique/unique-tests.factor @@ -6,7 +6,7 @@ namespaces sequences strings tools.test ; { 123 } [ [ "core" ".test" [ - [ [ 123 ch'a ] dip ascii set-file-contents ] + [ [ 123 char: a ] dip ascii set-file-contents ] [ file-info size>> ] bi ] cleanup-unique-file ] with-temp-directory diff --git a/basis/io/files/windows/windows.factor b/basis/io/files/windows/windows.factor index 35343ce303..d65818f202 100644 --- a/basis/io/files/windows/windows.factor +++ b/basis/io/files/windows/windows.factor @@ -320,7 +320,7 @@ M: windows root-directory? ( path -- ? ) { [ dup empty? ] [ drop f ] } { [ dup [ path-separator? ] all? ] [ drop t ] } { [ dup trim-tail-separators { [ length 2 = ] - [ second ch'\: = ] } 1&& ] [ drop t ] } + [ second char: \: = ] } 1&& ] [ drop t ] } { [ dup unicode-prefix head? ] [ trim-tail-separators length unicode-prefix length 2 + = ] } [ drop f ] diff --git a/basis/io/launcher/windows/windows.factor b/basis/io/launcher/windows/windows.factor index f6d32f272c..f6e4698a94 100644 --- a/basis/io/launcher/windows/windows.factor +++ b/basis/io/launcher/windows/windows.factor @@ -57,17 +57,17 @@ TUPLE: CreateProcess-args : fix-trailing-backslashes ( str -- str' ) 0 count-trailing-backslashes - 2 * ch'\\ append ; + 2 * char: \\ append ; ! Find groups of \, groups of \ followed by ", or naked " : escape-double-quote ( str -- newstr ) [ - { [ drop ch'\\ = ] [ nip "\\\"" member? ] } 2&& + { [ drop char: \\ = ] [ nip "\\\"" member? ] } 2&& ] monotonic-split [ - dup last ch'\" = [ + dup last char: \" = [ dup length 1 > [ ! String of backslashes + double-quote - length 1 - 2 * ch'\\ "\\\"" append + length 1 - 2 * char: \\ "\\\"" append ] [ ! Single double-quote drop "\\\"" @@ -81,7 +81,7 @@ TUPLE: CreateProcess-args ! See http://msdn.microsoft.com/en-us/library/ms647232.aspx : escape-argument ( str -- newstr ) escape-double-quote - ch'\s over member? [ + char: \s over member? [ fix-trailing-backslashes "\"" dup surround ] when ; diff --git a/basis/io/mmap/mmap-tests.factor b/basis/io/mmap/mmap-tests.factor index bb2bf0d7d0..e878251b4c 100644 --- a/basis/io/mmap/mmap-tests.factor +++ b/basis/io/mmap/mmap-tests.factor @@ -6,7 +6,7 @@ SPECIALIZED-ARRAY: uint |[ path | "12345" path ascii set-file-contents - { } [ path [ char ch'2 0 pick set-nth drop ] with-mapped-file ] unit-test + { } [ path [ char char: 2 0 pick set-nth drop ] with-mapped-file ] unit-test { 5 } [ path [ char length ] with-mapped-file ] unit-test { 5 } [ path [ char length ] with-mapped-file-reader ] unit-test { "22345" } [ path ascii file-contents ] unit-test diff --git a/basis/io/sockets/secure/openssl/openssl.factor b/basis/io/sockets/secure/openssl/openssl.factor index 353d9283d4..5ace8af9c5 100644 --- a/basis/io/sockets/secure/openssl/openssl.factor +++ b/basis/io/sockets/secure/openssl/openssl.factor @@ -361,7 +361,7 @@ M: ssl-handle dispose* "*." ?head [ { [ tail? ] - [ [ [ ch'. = ] count ] bi@ - 1 <= ] + [ [ [ char: . = ] count ] bi@ - 1 <= ] } 2&& ] [ = diff --git a/basis/io/streams/limited/limited-tests.factor b/basis/io/streams/limited/limited-tests.factor index 3f9eeb9ecc..ff93cb61fb 100644 --- a/basis/io/streams/limited/limited-tests.factor +++ b/basis/io/streams/limited/limited-tests.factor @@ -11,7 +11,7 @@ namespaces strings tools.test ; { } [ "data" get 24 "limited" set ] unit-test -{ ch'h } [ "limited" get stream-read1 ] unit-test +{ char: h } [ "limited" get stream-read1 ] unit-test { } [ "limited" get ascii "decoded" set ] unit-test @@ -30,13 +30,13 @@ namespaces strings tools.test ; { } [ "data" get 4 "limited" set ] unit-test -{ "abc" ch'\n } +{ "abc" char: \n } [ "\n" "limited" get stream-read-until [ >string ] dip ] unit-test { "" f } [ "\n" "limited" get stream-read-until [ >string ] dip ] unit-test -{ ch'a } +{ char: a } [ "a" 1 stream-read1 ] unit-test { "abc" } diff --git a/basis/io/streams/throwing/throwing-tests.factor b/basis/io/streams/throwing/throwing-tests.factor index 284d48799e..1c2d50d232 100644 --- a/basis/io/streams/throwing/throwing-tests.factor +++ b/basis/io/streams/throwing/throwing-tests.factor @@ -51,7 +51,7 @@ io.streams.throwing kernel namespaces tools.test ; ] with-byte-reader ] [ stream-exhausted? ] must-fail-with -{ "asd" ch'f } [ +{ "asd" char: f } [ "asdf" [ [ "f" read-until ] throw-on-eof ] with-string-reader ] unit-test diff --git a/basis/ip-parser/ip-parser.factor b/basis/ip-parser/ip-parser.factor index 957edfe5e8..fb94b16244 100644 --- a/basis/ip-parser/ip-parser.factor +++ b/basis/ip-parser/ip-parser.factor @@ -62,7 +62,7 @@ ERROR: more-than-8-components ; dup hex> [ ] [ bad-ipv6-component ] ?if ; : split-ipv6 ( string -- seq ) - ":" split ch'. over last member? [ unclip-last ] [ f ] if + ":" split char: . over last member? [ unclip-last ] [ f ] if [ [ ipv6-component ] map ] [ [ parse-ipv4 append ] unless-empty ] bi* ; diff --git a/basis/json/prettyprint/prettyprint.factor b/basis/json/prettyprint/prettyprint.factor index 96537fb85f..f746567d1e 100644 --- a/basis/json/prettyprint/prettyprint.factor +++ b/basis/json/prettyprint/prettyprint.factor @@ -14,7 +14,7 @@ GENERIC: pprint-json* ( obj -- ) : write-spaces ( -- ) indent-level get 0 > [ indent-level get nspaces * - ch'\s + char: \s write ] when ; diff --git a/basis/json/reader/reader.factor b/basis/json/reader/reader.factor index 7e30a96a37..826851e347 100644 --- a/basis/json/reader/reader.factor +++ b/basis/json/reader/reader.factor @@ -44,25 +44,25 @@ DEFER: (read-json-string) : (read-json-escape) ( stream accum -- accum ) { sbuf } declare over stream-read1 { - { ch'\" [ ch'\" ] } - { ch'\\ [ ch'\\ ] } - { ch'/ [ ch'/ ] } - { ch'b [ ch'\b ] } - { ch'f [ ch'\f ] } - { ch'n [ ch'\n ] } - { ch'r [ ch'\r ] } - { ch't [ ch'\t ] } - { ch'u [ over read-json-escape-unicode ] } + { char: \" [ char: \" ] } + { char: \\ [ char: \\ ] } + { char: / [ char: / ] } + { char: b [ char: \b ] } + { char: f [ char: \f ] } + { char: n [ char: \n ] } + { char: r [ char: \r ] } + { char: t [ char: \t ] } + { char: u [ over read-json-escape-unicode ] } [ ] } case [ suffix! (read-json-string) ] [ json-error ] if* ; : (read-json-string) ( stream accum -- accum ) { sbuf } declare "\\\"" pick stream-read-until [ append! ] dip - ch'\" = [ nip ] [ (read-json-escape) ] if ; + char: \" = [ nip ] [ (read-json-escape) ] if ; : read-json-string ( stream -- str ) - "\\\"" over stream-read-until ch'\" = + "\\\"" over stream-read-until char: \" = [ nip ] [ >sbuf (read-json-escape) { sbuf } declare "" like ] if ; : second-last-unsafe ( seq -- second-last ) @@ -108,20 +108,20 @@ DEFER: (read-json-string) ! 2dup 1string swap . . ! Great for debug... { object vector object } declare { - { ch'\" [ over read-json-string suffix! ] } - { ch'\[ [ json-open-array ] } - { ch', [ v-over-push ] } - { ch'\] [ json-close-array ] } - { ch'\{ [ json-open-hash ] } - { ch'\: [ v-pick-push ] } - { ch'\} [ json-close-hash ] } - { ch'\s [ ] } - { ch'\t [ ] } - { ch'\r [ ] } - { ch'\n [ ] } - { ch't [ "rue" pick json-expect t suffix! ] } - { ch'f [ "alse" pick json-expect f suffix! ] } - { ch'n [ "ull" pick json-expect json-null suffix! ] } + { char: \" [ over read-json-string suffix! ] } + { char: \[ [ json-open-array ] } + { char: , [ v-over-push ] } + { char: \] [ json-close-array ] } + { char: \{ [ json-open-hash ] } + { char: \: [ v-pick-push ] } + { char: \} [ json-close-hash ] } + { char: \s [ ] } + { char: \t [ ] } + { char: \r [ ] } + { char: \n [ ] } + { char: t [ "rue" pick json-expect t suffix! ] } + { char: f [ "alse" pick json-expect f suffix! ] } + { char: n [ "ull" pick json-expect json-null suffix! ] } [ pick json-number [ suffix! ] dip [ scan ] when* ] } case ; diff --git a/basis/json/writer/writer.factor b/basis/json/writer/writer.factor index 15e004d0a8..d2f10700ff 100644 --- a/basis/json/writer/writer.factor +++ b/basis/json/writer/writer.factor @@ -59,20 +59,20 @@ M: json-null stream-json-print PRIVATE> M: string stream-json-print - ch'\" over stream-write1 swap [ + char: \" over stream-write1 swap [ { - { ch'\" [ "\\\"" over stream-write ] } - { ch'\\ [ "\\\\" over stream-write ] } - { ch'/ [ + { char: \" [ "\\\"" over stream-write ] } + { char: \\ [ "\\\\" over stream-write ] } + { char: / [ json-escape-slashes? get [ "\\/" over stream-write ] - [ ch'/ over stream-write1 ] if + [ char: / over stream-write1 ] if ] } - { ch'\b [ "\\b" over stream-write ] } - { ch'\f [ "\\f" over stream-write ] } - { ch'\n [ "\\n" over stream-write ] } - { ch'\r [ "\\r" over stream-write ] } - { ch'\t [ "\\t" over stream-write ] } + { char: \b [ "\\b" over stream-write ] } + { char: \f [ "\\f" over stream-write ] } + { char: \n [ "\\n" over stream-write ] } + { char: \r [ "\\r" over stream-write ] } + { char: \t [ "\\t" over stream-write ] } { 0x2028 [ "\\u2028" over stream-write ] } { 0x2029 [ "\\u2029" over stream-write ] } [ @@ -87,7 +87,7 @@ M: string stream-json-print ] if ] } case - ] each ch'\" swap stream-write1 ; + ] each char: \" swap stream-write1 ; M: integer stream-json-print [ number>string ] [ stream-write ] bi* ; @@ -111,10 +111,10 @@ M: real stream-json-print [ >float number>string ] [ stream-write ] bi* ; M: sequence stream-json-print - ch'\[ over stream-write1 swap - over '[ ch', _ stream-write1 ] + char: \[ over stream-write1 swap + over '[ char: , _ stream-write1 ] pick '[ _ stream-json-print ] interleave - ch'\] swap stream-write1 ; + char: \] swap stream-write1 ; json ; M: real json-coerce >float number>string ; :: json-print-assoc ( obj stream -- ) - ch'\{ stream stream-write1 obj >alist - [ ch', stream stream-write1 ] + char: \{ stream stream-write1 obj >alist + [ char: , stream stream-write1 ] json-friendly-keys? get json-coerce-keys? get '[ first2 [ @@ -140,11 +140,11 @@ M: real json-coerce >float number>string ; [ _ [ json-coerce ] when ] if stream stream-json-print ] [ - ch'\: stream stream-write1 + char: \: stream stream-write1 stream stream-json-print ] bi* ] interleave - ch'\} stream stream-write1 ; + char: \} stream stream-write1 ; PRIVATE> diff --git a/basis/lcs/diff2html/diff2html.factor b/basis/lcs/diff2html/diff2html.factor index 2ed16f6666..ef7f15d80e 100644 --- a/basis/lcs/diff2html/diff2html.factor +++ b/basis/lcs/diff2html/diff2html.factor @@ -7,7 +7,7 @@ IN: lcs.diff2html GENERIC: diff-line ( obj -- xml ) : item-string ( item -- string ) - item>> [ ch'no-break-space 1string ] when-empty ; + item>> [ char: no-break-space 1string ] when-empty ; M: retain diff-line item-string diff --git a/basis/lcs/lcs-tests.factor b/basis/lcs/lcs-tests.factor index da3b68cf31..749ea2ddc7 100644 --- a/basis/lcs/lcs-tests.factor +++ b/basis/lcs/lcs-tests.factor @@ -14,12 +14,12 @@ USING: tools.test lcs ; { "abd" } [ "faxbcd" "abdef" lcs ] unit-test { { - T{ delete f ch'f } - T{ retain f ch'a } - T{ delete f ch'x } - T{ retain f ch'b } - T{ delete f ch'c } - T{ retain f ch'd } - T{ insert f ch'e } - T{ insert f ch'f } + T{ delete f char: f } + T{ retain f char: a } + T{ delete f char: x } + T{ retain f char: b } + T{ delete f char: c } + T{ retain f char: d } + T{ insert f char: e } + T{ insert f char: f } } } [ "faxbcd" "abdef" lcs-diff ] unit-test diff --git a/basis/listener/listener.factor b/basis/listener/listener.factor index 9746e2f6b6..fe94c7301b 100644 --- a/basis/listener/listener.factor +++ b/basis/listener/listener.factor @@ -119,7 +119,7 @@ t error-summary? set-global : datastack. ( datastack -- ) display-stacks? get [ ! [ nl "--- Data stack:" title. trimmed-stack. ] unless-empty - [ nl "data-stack" over length ch'\: append title. trimmed-stack. ] unless-empty + [ nl "data-stack" over length char: \: append title. trimmed-stack. ] unless-empty ] [ drop ] if ; :: listener-step ( datastack -- datastack' ) diff --git a/basis/logging/server/server.factor b/basis/logging/server/server.factor index a2ee11cd2a..671be0e33b 100644 --- a/basis/logging/server/server.factor +++ b/basis/logging/server/server.factor @@ -38,7 +38,7 @@ SYMBOL: log-files : timestamp-header. ( -- ) "[" write now (timestamp>rfc3339) "] " write ; -: multiline-header ( -- str ) 20 ch'- ; foldable +: multiline-header ( -- str ) 20 char: - ; foldable : multiline-header. ( -- ) "[" write multiline-header write "] " write ; diff --git a/basis/mime/multipart/multipart-tests.factor b/basis/mime/multipart/multipart-tests.factor index d6bed6b45f..4e73d5da68 100644 --- a/basis/mime/multipart/multipart-tests.factor +++ b/basis/mime/multipart/multipart-tests.factor @@ -71,7 +71,7 @@ SYMBOL: mime-test-server mime-test-server get insecure>> ; : a-stream ( n -- stream ) - ch'a ; + char: a ; { } [ [ diff --git a/basis/pack/pack.factor b/basis/pack/pack.factor index 7427a576dc..6da4041b03 100644 --- a/basis/pack/pack.factor +++ b/basis/pack/pack.factor @@ -33,64 +33,64 @@ M: object >n-byte-array ( n string -- byte-array ) heap-size >n-byte-array ; : expand-pack-format ( str -- str' ) f swap [ dup digit? - [ [ 0 or 10 * ] [ ch'0 - ] bi* + f ] + [ [ 0 or 10 * ] [ char: 0 - ] bi* + f ] [ [ 1 or ] [ ] bi* f swap ] if ] { } map-as "" concat-as nip ; foldable CONSTANT: pack-table H{ - { ch'c s8>byte-array } - { ch'C u8>byte-array } - { ch's s16>byte-array } - { ch'S u16>byte-array } - { ch't s24>byte-array } - { ch'T u24>byte-array } - { ch'i s32>byte-array } - { ch'I u32>byte-array } - { ch'q s64>byte-array } - { ch'Q u64>byte-array } - { ch'f write-float } - { ch'F write-float } - { ch'd write-double } - { ch'D write-double } - { ch'a write-c-string } + { char: c s8>byte-array } + { char: C u8>byte-array } + { char: s s16>byte-array } + { char: S u16>byte-array } + { char: t s24>byte-array } + { char: T u24>byte-array } + { char: i s32>byte-array } + { char: I u32>byte-array } + { char: q s64>byte-array } + { char: Q u64>byte-array } + { char: f write-float } + { char: F write-float } + { char: d write-double } + { char: D write-double } + { char: a write-c-string } } CONSTANT: unpack-table H{ - { ch'c [ 8 signed-endian> ] } - { ch'C [ unsigned-endian> ] } - { ch's [ 16 signed-endian> ] } - { ch'S [ unsigned-endian> ] } - { ch't [ 24 signed-endian> ] } - { ch'T [ unsigned-endian> ] } - { ch'i [ 32 signed-endian> ] } - { ch'I [ unsigned-endian> ] } - { ch'q [ 64 signed-endian> ] } - { ch'Q [ unsigned-endian> ] } - { ch'f [ unsigned-endian> bits>float ] } - { ch'F [ unsigned-endian> bits>float ] } - { ch'd [ unsigned-endian> bits>double ] } - { ch'D [ unsigned-endian> bits>double ] } - ! { ch'a read-c-string } + { char: c [ 8 signed-endian> ] } + { char: C [ unsigned-endian> ] } + { char: s [ 16 signed-endian> ] } + { char: S [ unsigned-endian> ] } + { char: t [ 24 signed-endian> ] } + { char: T [ unsigned-endian> ] } + { char: i [ 32 signed-endian> ] } + { char: I [ unsigned-endian> ] } + { char: q [ 64 signed-endian> ] } + { char: Q [ unsigned-endian> ] } + { char: f [ unsigned-endian> bits>float ] } + { char: F [ unsigned-endian> bits>float ] } + { char: d [ unsigned-endian> bits>double ] } + { char: D [ unsigned-endian> bits>double ] } + ! { char: a read-c-string } } CONSTANT: packed-length-table H{ - { ch'c 1 } - { ch'C 1 } - { ch's 2 } - { ch'S 2 } - { ch't 3 } - { ch'T 3 } - { ch'i 4 } - { ch'I 4 } - { ch'q 8 } - { ch'Q 8 } - { ch'f 4 } - { ch'F 4 } - { ch'd 8 } - { ch'D 8 } + { char: c 1 } + { char: C 1 } + { char: s 2 } + { char: S 2 } + { char: t 3 } + { char: T 3 } + { char: i 4 } + { char: I 4 } + { char: q 8 } + { char: Q 8 } + { char: f 4 } + { char: F 4 } + { char: d 8 } + { char: D 8 } } PRIVATE> diff --git a/basis/peg/ebnf/ebnf-docs.factor b/basis/peg/ebnf/ebnf-docs.factor index 7cea9ecbb6..8be1d03f32 100644 --- a/basis/peg/ebnf/ebnf-docs.factor +++ b/basis/peg/ebnf/ebnf-docs.factor @@ -342,7 +342,7 @@ ARTICLE: "peg.ebnf.tokenizers" "EBNF Tokenizers" } } "This parser when run with the string \"++--\" or the array " -"{ ch'+ ch'+ ch'- ch'- } will succeed with an AST of { \"++\" \"--\" }. " +"{ char: + char: + char: - char: - } will succeed with an AST of { \"++\" \"--\" }. " "If you want to add whitespace handling to the grammar you need to put it " "between the terminals:" { $examples diff --git a/basis/peg/ebnf/ebnf-tests.factor b/basis/peg/ebnf/ebnf-tests.factor index 3d48919bed..c953348eee 100644 --- a/basis/peg/ebnf/ebnf-tests.factor +++ b/basis/peg/ebnf/ebnf-tests.factor @@ -156,11 +156,11 @@ IN: peg.ebnf.tests "ab" EBNF[=[ foo=('a') [[ drop 1 ]] ('b') [[ drop 2 ]] ]=] ] unit-test -{ ch'A } [ +{ char: A } [ "A" EBNF[=[ foo=[A-Z] ]=] ] unit-test -{ ch'Z } [ +{ char: Z } [ "Z" EBNF[=[ foo=[A-Z] ]=] ] unit-test @@ -168,7 +168,7 @@ IN: peg.ebnf.tests "0" EBNF[=[ foo=[A-Z] ]=] ] must-fail -{ ch'0 } [ +{ char: 0 } [ "0" EBNF[=[ foo=[^A-Z] ]=] ] unit-test @@ -498,7 +498,7 @@ foo= 'd' "ac" parser3 ] unit-test -{ V{ ch'a "d" } } [ +{ V{ char: a "d" } } [ "ad" parser4 ] unit-test @@ -517,7 +517,7 @@ foo= 'd' ] unit-test ! Tokenizer tests -{ V{ "a" ch'b } } [ +{ V{ "a" char: b } } [ "ab" EBNF[=[ tokenizer=default foo="a" . ]=] ] unit-test @@ -541,7 +541,7 @@ Special = "(" | ")" | "{" | "}" | "[" | "]" | "," | " Tok = Spaces (Number | Special ) ]=] -{ V{ ch'1 T{ ast-number f 23 } ";" ch'x } } [ +{ V{ char: 1 T{ ast-number f 23 } ";" char: x } } [ "123;x" EBNF[=[ bar = . tokenizer = foo=. tokenizer=default baz=. @@ -549,7 +549,7 @@ Tok = Spaces (Number | Special ) ]=] ] unit-test -{ V{ ch'5 "+" ch'2 } } [ +{ V{ char: 5 "+" char: 2 } } [ "5+2" EBNF[=[ space=(" " | "\n") number=[0-9] @@ -560,7 +560,7 @@ Tok = Spaces (Number | Special ) ]=] ] unit-test -{ V{ ch'5 "+" ch'2 } } [ +{ V{ char: 5 "+" char: 2 } } [ "5 + 2" EBNF[=[ space=(" " | "\n") number=[0-9] diff --git a/basis/peg/ebnf/ebnf.factor b/basis/peg/ebnf/ebnf.factor index 0773defbd6..93c4f4824c 100644 --- a/basis/peg/ebnf/ebnf.factor +++ b/basis/peg/ebnf/ebnf.factor @@ -112,11 +112,11 @@ C: ebnf ! between the quotes. [ [ - [ ch'\\ = ] satisfy + [ char: \\ = ] satisfy [ "\"\\" member? ] satisfy 2seq , - [ ch'\" = not ] satisfy , + [ char: \" = not ] satisfy , ] choice* repeat1 "\"" "\"" surrounded-by , - [ ch'\' = not ] satisfy repeat1 "'" "'" surrounded-by , + [ char: \' = not ] satisfy repeat1 "'" "'" surrounded-by , ] choice* [ "" flatten-as unescape-string ] action ; : non-terminal-parser ( -- parser ) @@ -140,7 +140,7 @@ C: ebnf [ { [ blank? ] - [ ch'> = ] + [ char: > = ] } 1|| not ] satisfy repeat1 [ >string ] action ; @@ -155,13 +155,13 @@ C: ebnf : any-character-parser ( -- parser ) ! A parser to match the symbol for any character match. - [ ch'. = ] satisfy [ drop ] action ; + [ char: . = ] satisfy [ drop ] action ; : range-parser-parser ( -- parser ) ! Match the syntax for declaring character ranges [ [ "[" syntax , "[" token ensure-not , ] seq* hide , - [ ch'\] = not ] satisfy repeat1 , + [ char: \] = not ] satisfy repeat1 , "]" syntax , ] seq* [ first >string unescape-string ] action ; diff --git a/basis/peg/parsers/parsers-docs.factor b/basis/peg/parsers/parsers-docs.factor index 09caec34c7..ef6674ee43 100644 --- a/basis/peg/parsers/parsers-docs.factor +++ b/basis/peg/parsers/parsers-docs.factor @@ -11,7 +11,7 @@ HELP: 1token } { $description "Calls 1string on a character and returns a parser that matches that character." } { $examples - { $example "USING: peg peg.parsers prettyprint ;" "\"a\" ch'a 1token parse ." "\"a\"" } + { $example "USING: peg peg.parsers prettyprint ;" "\"a\" char: a 1token parse ." "\"a\"" } } { $see-also string-parser } ; HELP: (list-of) diff --git a/basis/peg/parsers/parsers.factor b/basis/peg/parsers/parsers.factor index 80ed7a22e7..77390a2c1e 100644 --- a/basis/peg/parsers/parsers.factor +++ b/basis/peg/parsers/parsers.factor @@ -76,9 +76,9 @@ PRIVATE> : string-parser ( -- parser ) [ - [ ch'\" = ] satisfy hide , - [ ch'\" = not ] satisfy repeat0 , - [ ch'\" = ] satisfy hide , + [ char: \" = ] satisfy hide , + [ char: \" = not ] satisfy repeat0 , + [ char: \" = ] satisfy hide , ] seq* [ first >string ] action ; : (range-pattern) ( pattern -- string ) @@ -86,7 +86,7 @@ PRIVATE> ! all characters within that range. [ any-char , - [ ch'- = ] satisfy hide , + [ char: - = ] satisfy hide , any-char , ] seq* [ first2 [a,b] >string diff --git a/basis/peg/peg-docs.factor b/basis/peg/peg-docs.factor index 4a545632af..71e634adde 100644 --- a/basis/peg/peg-docs.factor +++ b/basis/peg/peg-docs.factor @@ -49,7 +49,7 @@ HELP: range } { $description "Returns a parser that matches a single character that lies within the range of characters given, inclusive." } -{ $examples { $code ": digit ( -- parser ) ch'0 ch'9 range ;" } } ; +{ $examples { $code ": digit ( -- parser ) char: 0 char: 9 range ;" } } ; HELP: seq { $values @@ -137,7 +137,7 @@ HELP: action "from that parse. The result of the quotation is then used as the final AST. This can be used " "for manipulating the parse tree to produce a AST better suited for the task at hand rather than " "the default AST. If the quotation returns " { $link fail } " then the parser fails." } -{ $code "ch'0 ch'9 range [ to-digit ] action" } ; +{ $code "char: 0 char: 9 range [ to-digit ] action" } ; HELP: sp { $values diff --git a/basis/peg/peg-tests.factor b/basis/peg/peg-tests.factor index 8c14bf80d6..f75fd3d990 100644 --- a/basis/peg/peg-tests.factor +++ b/basis/peg/peg-tests.factor @@ -18,19 +18,19 @@ IN: peg.tests ] unit-test [ - "" ch'a ch'z range parse + "" char: a char: z range parse ] must-fail [ - "1bcd" ch'a ch'z range parse + "1bcd" char: a char: z range parse ] must-fail -{ ch'a } [ - "abcd" ch'a ch'z range parse +{ char: a } [ + "abcd" char: a char: z range parse ] unit-test -{ ch'z } [ - "zbcd" ch'a ch'z range parse +{ char: z } [ + "zbcd" char: a char: z range parse ] unit-test [ @@ -93,12 +93,12 @@ IN: peg.tests "cb" "a" token optional "b" token 2array seq parse ] must-fail -{ V{ ch'a ch'b } } [ - "ab" "a" token ensure ch'a ch'z range dup 3array seq parse +{ V{ char: a char: b } } [ + "ab" "a" token ensure char: a char: z range dup 3array seq parse ] unit-test [ - "bb" "a" token ensure ch'a ch'z range 2array seq parse + "bb" "a" token ensure char: a char: z range 2array seq parse ] must-fail { t } [ @@ -138,11 +138,11 @@ IN: peg.tests ] must-fail [ - "b" [ ch'a = ] satisfy parse + "b" [ char: a = ] satisfy parse ] must-fail -{ ch'a } [ - "a" [ ch'a = ] satisfy parse +{ char: a } [ + "a" [ char: a = ] satisfy parse ] unit-test { "a" } [ @@ -191,7 +191,7 @@ IN: peg.tests "A" [ drop t ] satisfy [ 66 >= ] semantic parse ] must-fail -{ ch'B } [ +{ char: B } [ "B" [ drop t ] satisfy [ 66 >= ] semantic parse ] unit-test diff --git a/basis/persistent/hashtables/hashtables-tests.factor b/basis/persistent/hashtables/hashtables-tests.factor index 64ddecc437..f3635ce1b1 100644 --- a/basis/persistent/hashtables/hashtables-tests.factor +++ b/basis/persistent/hashtables/hashtables-tests.factor @@ -82,7 +82,7 @@ M: hash-0-b hashcode* 2drop 0 ; : random-string ( -- str ) 1000000 random ; - ! [ ch'a ch'z [a,b] random ] "" replicate-as ; + ! [ char: a char: z [a,b] random ] "" replicate-as ; : random-assocs ( n -- hash phash ) [ random-string ] replicate diff --git a/basis/porter-stemmer/porter-stemmer.factor b/basis/porter-stemmer/porter-stemmer.factor index 252bcaf138..465434d7d2 100644 --- a/basis/porter-stemmer/porter-stemmer.factor +++ b/basis/porter-stemmer/porter-stemmer.factor @@ -5,7 +5,7 @@ IN: porter-stemmer 2dup nth dup "aeiou" member? [ 3drop f ] [ - ch'y = [ + char: y = [ over zero? [ 2drop t ] [ [ 1 - ] dip consonant? not ] if ] [ @@ -67,7 +67,7 @@ IN: porter-stemmer pick consonant-seq 0 > [ nip ] [ drop ] if append ; : step1a ( str -- newstr ) - dup last ch's = [ + dup last char: s = [ { { [ "sses" ?tail ] [ "ss" append ] } { [ "ies" ?tail ] [ "i" append ] } @@ -199,13 +199,13 @@ IN: porter-stemmer [ 1 = [ but-last-slice cvc? not ] [ drop f ] if ] if ; : remove-e ( str -- newstr ) - dup last ch'e = [ + dup last char: e = [ dup remove-e? [ but-last-slice ] when ] when ; : ll->l ( str -- newstr ) { - { [ dup last ch'l = not ] [ ] } + { [ dup last char: l = not ] [ ] } { [ dup length 1 - over double-consonant? not ] [ ] } { [ dup consonant-seq 1 > ] [ but-last-slice ] } [ ] diff --git a/basis/prettyprint/backend/backend-docs.factor b/basis/prettyprint/backend/backend-docs.factor index 0388cbb621..8b72758607 100644 --- a/basis/prettyprint/backend/backend-docs.factor +++ b/basis/prettyprint/backend/backend-docs.factor @@ -10,7 +10,7 @@ HELP: pprint-word $prettyprinting-note ; HELP: ch>ascii-escape -{ $values { "ch" "a character" } { "ch'" "a character" } { "?" boolean } } +{ $values { "ch" "a character" } { "char: " "a character" } { "?" boolean } } { $description "Converts a character to an escape code." } ; HELP: unparse-ch diff --git a/basis/prettyprint/backend/backend.factor b/basis/prettyprint/backend/backend.factor index 78edb529e8..6e7a24c212 100644 --- a/basis/prettyprint/backend/backend.factor +++ b/basis/prettyprint/backend/backend.factor @@ -97,21 +97,21 @@ M: f pprint* drop \ f pprint-word ; ! Strings : ch>ascii-escape ( ch -- ch' ? ) H{ - { ch'\a ch'a } - { ch'\b ch'b } - { ch'\e ch'e } - { ch'\f ch'f } - { ch'\n ch'n } - { ch'\r ch'r } - { ch'\t ch't } - { ch'\v ch'v } - { ch'\0 ch'0 } - { ch'\\ ch'\\ } - { ch'\" ch'\" } + { char: \a char: a } + { char: \b char: b } + { char: \e char: e } + { char: \f char: f } + { char: \n char: n } + { char: \r char: r } + { char: \t char: t } + { char: \v char: v } + { char: \0 char: 0 } + { char: \\ char: \\ } + { char: \" char: \" } } ?at ; inline : unparse-ch ( ch -- ) - ch>ascii-escape [ ch'\\ , , ] [ + ch>ascii-escape [ char: \\ , , ] [ dup 32 < [ dup 16 < "\\x0" "\\x" ? % >hex % ] [ , ] if ] if ; diff --git a/basis/prettyprint/prettyprint-tests.factor b/basis/prettyprint/prettyprint-tests.factor index 1c386a1c7a..44a1715a73 100644 --- a/basis/prettyprint/prettyprint-tests.factor +++ b/basis/prettyprint/prettyprint-tests.factor @@ -468,7 +468,7 @@ TUPLE: fo { a intersection{ integer fixnum } initial: 0 } ; : margin-test ( number-of-a-s -- str ) [ - [ ch'a text "b" text ] with-pprint + [ char: a text "b" text ] with-pprint ] with-string-writer ; { diff --git a/basis/prettyprint/sections/sections.factor b/basis/prettyprint/sections/sections.factor index 9c66a753dd..87c3722924 100644 --- a/basis/prettyprint/sections/sections.factor +++ b/basis/prettyprint/sections/sections.factor @@ -43,7 +43,7 @@ M: maybe vocabulary-name line-limit get dup [ pprinter get line-count>> <= ] when ; : do-indent ( -- ) - pprinter get indent>> [ ch'\s write ] unless-zero ; + pprinter get indent>> [ char: \s write ] unless-zero ; : fresh-line ( n -- ) pprinter get 2dup last-newline>> = [ diff --git a/basis/quoted-printable/quoted-printable.factor b/basis/quoted-printable/quoted-printable.factor index b4069d828a..e79c696505 100644 --- a/basis/quoted-printable/quoted-printable.factor +++ b/basis/quoted-printable/quoted-printable.factor @@ -15,16 +15,16 @@ IN: quoted-printable : printable? ( ch -- ? ) { - [ ch'\s ch'< between? ] - [ ch'> ch'~ between? ] - [ ch'\t = ] + [ char: \s char: < between? ] + [ char: > char: ~ between? ] + [ char: \t = ] } 1|| ; : char>quoted ( ch -- str ) dup printable? [ 1string ] [ assure-small >hex >upper - 2 ch'0 pad-head - ch'= prefix + 2 char: 0 pad-head + char: = prefix ] if ; : take-some ( seqs -- seqs seq ) @@ -46,8 +46,8 @@ PRIVATE> ] if ] when ; diff --git a/basis/random/data/data.factor b/basis/random/data/data.factor index 12891ea74a..e5301a93f4 100644 --- a/basis/random/data/data.factor +++ b/basis/random/data/data.factor @@ -10,11 +10,11 @@ CONSTANT: letters-count 26 >> : random-digit ( -- ch ) - digits-count random ch'0 + ; + digits-count random char: 0 + ; -: random-LETTER ( -- ch ) letters-count random ch'A + ; +: random-LETTER ( -- ch ) letters-count random char: A + ; -: random-letter ( -- ch ) letters-count random ch'a + ; +: random-letter ( -- ch ) letters-count random char: a + ; : random-Letter ( -- ch ) { random-LETTER random-letter } execute-random ; diff --git a/basis/regexp/classes/classes-tests.factor b/basis/regexp/classes/classes-tests.factor index 3d415fd5c5..a4f9c5cb26 100644 --- a/basis/regexp/classes/classes-tests.factor +++ b/basis/regexp/classes/classes-tests.factor @@ -8,10 +8,10 @@ IN: regexp.classes.tests { f } [ { 1 2 } ] unit-test { T{ or-class f { 1 2 } } } [ { 1 2 } ] unit-test { 3 } [ { 1 2 } 3 2array ] unit-test -{ ch'A } [ ch'A LETTER-class 2array ] unit-test -{ ch'A } [ LETTER-class ch'A 2array ] unit-test -{ T{ primitive-class { class LETTER-class } } } [ ch'A LETTER-class 2array ] unit-test -{ T{ primitive-class { class LETTER-class } } } [ LETTER-class ch'A 2array ] unit-test +{ char: A } [ char: A LETTER-class 2array ] unit-test +{ char: A } [ LETTER-class char: A 2array ] unit-test +{ T{ primitive-class { class LETTER-class } } } [ char: A LETTER-class 2array ] unit-test +{ T{ primitive-class { class LETTER-class } } } [ LETTER-class char: A 2array ] unit-test { t } [ { t 1 } ] unit-test { t } [ { 1 t } ] unit-test { f } [ { f 1 } ] unit-test diff --git a/basis/regexp/classes/classes.factor b/basis/regexp/classes/classes.factor index 24e8e41977..643e6d06a4 100644 --- a/basis/regexp/classes/classes.factor +++ b/basis/regexp/classes/classes.factor @@ -55,7 +55,7 @@ M: digit-class class-member? ( obj class -- ? ) drop digit? ; inline : c-identifier-char? ( ch -- ? ) - { [ alpha? ] [ ch'_ = ] } 1|| ; + { [ alpha? ] [ char: _ = ] } 1|| ; M: c-identifier-class class-member? ( obj class -- ? ) drop c-identifier-char? ; inline @@ -76,16 +76,16 @@ M: java-printable-class class-member? ( obj class -- ? ) drop java-printable? ; inline M: non-newline-blank-class class-member? ( obj class -- ? ) - drop { [ blank? ] [ ch'\n = not ] } 1&& ; inline + drop { [ blank? ] [ char: \n = not ] } 1&& ; inline M: control-character-class class-member? ( obj class -- ? ) drop control? ; inline : hex-digit? ( ch -- ? ) { - [ ch'A ch'F between? ] - [ ch'a ch'f between? ] - [ ch'0 ch'9 between? ] + [ char: A char: F between? ] + [ char: a char: f between? ] + [ char: 0 char: 9 between? ] } 1|| ; M: hex-digit-class class-member? ( obj class -- ? ) @@ -93,8 +93,8 @@ M: hex-digit-class class-member? ( obj class -- ? ) : java-blank? ( ch -- ? ) { - ch'\s ch'\t ch'\n - ch'\v ch'\a ch'\r + char: \s char: \t char: \n + char: \v char: \a char: \r } member? ; M: java-blank-class class-member? ( obj class -- ? ) diff --git a/basis/regexp/compiler/compiler.factor b/basis/regexp/compiler/compiler.factor index 365b8d333f..a62561ac10 100644 --- a/basis/regexp/compiler/compiler.factor +++ b/basis/regexp/compiler/compiler.factor @@ -37,10 +37,10 @@ M: ^ question>quot drop [ { [ drop zero? ] [ [ 1 - ] dip ?nth "\r\n" member? ] } 2|| ] ; M: $unix question>quot - drop [ { [ length = ] [ ?nth ch'\n = ] } 2|| ] ; + drop [ { [ length = ] [ ?nth char: \n = ] } 2|| ] ; M: ^unix question>quot - drop [ { [ drop zero? ] [ [ 1 - ] dip ?nth ch'\n = ] } 2|| ] ; + drop [ { [ drop zero? ] [ [ 1 - ] dip ?nth char: \n = ] } 2|| ] ; M: word-break question>quot drop [ word-break-at? ] ; diff --git a/basis/regexp/minimize/minimize-tests.factor b/basis/regexp/minimize/minimize-tests.factor index 7485a1ed0c..f9f2a52907 100644 --- a/basis/regexp/minimize/minimize-tests.factor +++ b/basis/regexp/minimize/minimize-tests.factor @@ -26,9 +26,9 @@ IN: regexp.minimize.tests { T{ transition-table { transitions H{ - { 0 H{ { ch'a 1 } { ch'b 1 } } } - { 1 H{ { ch'a 2 } { ch'b 2 } } } - { 2 H{ { ch'c 3 } } } + { 0 H{ { char: a 1 } { char: b 1 } } } + { 1 H{ { char: a 2 } { char: b 2 } } } + { 2 H{ { char: c 3 } } } { 3 H{ } } } } { start-state 0 } @@ -37,12 +37,12 @@ IN: regexp.minimize.tests } [ T{ transition-table { transitions H{ - { 0 H{ { ch'a 1 } { ch'b 4 } } } - { 1 H{ { ch'a 2 } { ch'b 5 } } } - { 2 H{ { ch'c 3 } } } + { 0 H{ { char: a 1 } { char: b 4 } } } + { 1 H{ { char: a 2 } { char: b 5 } } } + { 2 H{ { char: c 3 } } } { 3 H{ } } - { 4 H{ { ch'a 2 } { ch'b 5 } } } - { 5 H{ { ch'c 6 } } } + { 4 H{ { char: a 2 } { char: b 5 } } } + { 5 H{ { char: c 6 } } } { 6 H{ } } } } { start-state 0 } diff --git a/basis/regexp/negation/negation-tests.factor b/basis/regexp/negation/negation-tests.factor index 0345edce71..fc548a110c 100644 --- a/basis/regexp/negation/negation-tests.factor +++ b/basis/regexp/negation/negation-tests.factor @@ -7,7 +7,7 @@ IN: regexp.negation.tests ! R/ |[^a]|.+/ T{ transition-table { transitions H{ - { 0 H{ { ch'a 1 } { T{ not-class f ch'a } -1 } } } + { 0 H{ { char: a 1 } { T{ not-class f char: a } -1 } } } { 1 H{ { t -1 } } } { -1 H{ { t -1 } } } } } @@ -18,7 +18,7 @@ IN: regexp.negation.tests ! R/ a/ T{ transition-table { transitions H{ - { 0 H{ { ch'a 1 } } } + { 0 H{ { char: a 1 } } } { 1 H{ } } } } { start-state 0 } diff --git a/basis/regexp/nfa/nfa.factor b/basis/regexp/nfa/nfa.factor index a92891df81..d6feb793a3 100644 --- a/basis/regexp/nfa/nfa.factor +++ b/basis/regexp/nfa/nfa.factor @@ -116,10 +116,10 @@ M: not-class modify-class class>> modify-class ; MEMO: unix-dot ( -- class ) - ch'\n ; + char: \n ; MEMO: nonl-dot ( -- class ) - { ch'\n ch'\r } ; + { char: \n char: \r } ; M: dot modify-class drop dotall option? [ t ] [ diff --git a/basis/regexp/parser/parser.factor b/basis/regexp/parser/parser.factor index 83d5727042..2d4de4a537 100644 --- a/basis/regexp/parser/parser.factor +++ b/basis/regexp/parser/parser.factor @@ -71,39 +71,39 @@ MEMO: simple-category-table ( -- table ) : lookup-escape ( char -- ast ) { - { ch'a [ 0x7 ] } - { ch'e [ 0x1b ] } - { ch'f [ 0xc ] } - ! { ch'f [ ch'\f ] } - { ch'n [ ch'\n ] } - { ch'r [ ch'\r ] } - { ch't [ ch'\t ] } - { ch'v [ ch'\v ] } - { ch'0 [ ch'\0 ] } - { ch'\\ [ ch'\\ ] } + { char: a [ 0x7 ] } + { char: e [ 0x1b ] } + { char: f [ 0xc ] } + ! { char: f [ char: \f ] } + { char: n [ char: \n ] } + { char: r [ char: \r ] } + { char: t [ char: \t ] } + { char: v [ char: \v ] } + { char: 0 [ char: \0 ] } + { char: \\ [ char: \\ ] } - { ch'w [ c-identifier-class ] } - { ch'W [ c-identifier-class ] } - { ch's [ java-blank-class ] } - { ch'S [ java-blank-class ] } - { ch'd [ digit-class ] } - { ch'D [ digit-class ] } + { char: w [ c-identifier-class ] } + { char: W [ c-identifier-class ] } + { char: s [ java-blank-class ] } + { char: S [ java-blank-class ] } + { char: d [ digit-class ] } + { char: D [ digit-class ] } - { ch'z [ end-of-input ] } - { ch'Z [ end-of-file ] } - { ch'A [ beginning-of-input ] } - { ch'b [ word-break ] } - { ch'B [ word-break ] } + { char: z [ end-of-input ] } + { char: Z [ end-of-file ] } + { char: A [ beginning-of-input ] } + { char: b [ word-break ] } + { char: B [ word-break ] } [ ] } case ; : options-assoc ( -- assoc ) H{ - { ch'i case-insensitive } - { ch'd unix-lines } - { ch'm multiline } - { ch'r reversed-regexp } - { ch's dotall } + { char: i case-insensitive } + { char: d unix-lines } + { char: m multiline } + { char: r reversed-regexp } + { char: s dotall } } ; ERROR: nonexistent-option name ; diff --git a/basis/regexp/regexp.factor b/basis/regexp/regexp.factor index 37ad68eebc..42239d3f00 100644 --- a/basis/regexp/regexp.factor +++ b/basis/regexp/regexp.factor @@ -202,7 +202,7 @@ PRIVATE> dup skip-blank [ dupd [ [ [ "\\/" member? ] find-from ] keep swap [ - ch'\ = [ [ 2 + ] dip t ] [ f ] if + char: \ = [ [ 2 + ] dip t ] [ f ] if ] [ "Unterminated regexp" throw ] if* diff --git a/basis/sequences/parser/parser-tests.factor b/basis/sequences/parser/parser-tests.factor index 065f62e8c9..966b4ddbf7 100644 --- a/basis/sequences/parser/parser-tests.factor +++ b/basis/sequences/parser/parser-tests.factor @@ -12,7 +12,7 @@ USING: tools.test sequences.parser unicode kernel accessors ; { "foo" ";bar" } [ "foo;bar" [ - [ ch'\; take-until-object ] [ take-rest ] bi + [ char: \; take-until-object ] [ take-rest ] bi ] parse-sequence ] unit-test diff --git a/basis/serialize/serialize.factor b/basis/serialize/serialize.factor index 5a00f1d78c..1223d1d9f7 100644 --- a/basis/serialize/serialize.factor +++ b/basis/serialize/serialize.factor @@ -62,22 +62,22 @@ SYMBOL: serialized : serialize-shared ( obj quot -- ) [ dup object-id - [ ch'o write1 serialize-cell drop ] + [ char: o write1 serialize-cell drop ] ] dip if* ; inline M: f (serialize) ( obj -- ) - drop ch'n write1 ; + drop char: n write1 ; M: integer (serialize) ( obj -- ) [ - ch'z write1 + char: z write1 ] [ - dup 0 < [ neg ch'm ] [ ch'p ] if write1 + dup 0 < [ neg char: m ] [ char: p ] if write1 serialize-cell ] if-zero ; M: float (serialize) ( obj -- ) - ch'F write1 + char: F write1 double>bits serialize-cell ; : serialize-seq ( obj code -- ) @@ -90,7 +90,7 @@ M: float (serialize) ( obj -- ) M: tuple (serialize) ( obj -- ) [ - ch'T write1 + char: T write1 [ class-of (serialize) ] [ add-object ] [ tuple-slots (serialize) ] @@ -98,23 +98,23 @@ M: tuple (serialize) ( obj -- ) ] serialize-shared ; M: array (serialize) ( obj -- ) - ch'a serialize-seq ; + char: a serialize-seq ; M: quotation (serialize) ( obj -- ) [ - ch'q write1 + char: q write1 [ >array (serialize) ] [ add-object ] bi ] serialize-shared ; M: hashtable (serialize) ( obj -- ) [ - ch'h write1 + char: h write1 [ add-object ] [ >alist (serialize) ] bi ] serialize-shared ; M: byte-array (serialize) ( obj -- ) [ - ch'A write1 + char: A write1 [ add-object ] [ length serialize-cell ] [ write ] tri @@ -122,7 +122,7 @@ M: byte-array (serialize) ( obj -- ) M: string (serialize) ( obj -- ) [ - ch's write1 + char: s write1 [ add-object ] [ utf8 encode @@ -132,11 +132,11 @@ M: string (serialize) ( obj -- ) ] serialize-shared ; : serialize-true ( word -- ) - drop ch't write1 ; + drop char: t write1 ; : serialize-gensym ( word -- ) [ - ch'G write1 + char: G write1 [ add-object ] [ def>> (serialize) ] [ props>> (serialize) ] @@ -144,7 +144,7 @@ M: string (serialize) ( obj -- ) ] serialize-shared ; : serialize-word ( word -- ) - ch'w write1 + char: w write1 [ name>> (serialize) ] [ vocabulary>> (serialize) ] bi ; @@ -157,7 +157,7 @@ M: word (serialize) ( obj -- ) } cond ; M: wrapper (serialize) ( obj -- ) - ch'W write1 + char: W write1 wrapped>> (serialize) ; DEFER: (deserialize) @@ -246,22 +246,22 @@ SYMBOL: deserialized : deserialize* ( -- object ? ) read1 [ { - { ch'A [ deserialize-byte-array ] } - { ch'F [ deserialize-float ] } - { ch'T [ deserialize-tuple ] } - { ch'W [ deserialize-wrapper ] } - { ch'a [ deserialize-array ] } - { ch'h [ deserialize-hashtable ] } - { ch'm [ deserialize-negative-integer ] } - { ch'n [ deserialize-false ] } - { ch't [ deserialize-true ] } - { ch'o [ deserialize-unknown ] } - { ch'p [ deserialize-positive-integer ] } - { ch'q [ deserialize-quotation ] } - { ch's [ deserialize-string ] } - { ch'w [ deserialize-word ] } - { ch'G [ deserialize-word ] } - { ch'z [ deserialize-zero ] } + { char: A [ deserialize-byte-array ] } + { char: F [ deserialize-float ] } + { char: T [ deserialize-tuple ] } + { char: W [ deserialize-wrapper ] } + { char: a [ deserialize-array ] } + { char: h [ deserialize-hashtable ] } + { char: m [ deserialize-negative-integer ] } + { char: n [ deserialize-false ] } + { char: t [ deserialize-true ] } + { char: o [ deserialize-unknown ] } + { char: p [ deserialize-positive-integer ] } + { char: q [ deserialize-quotation ] } + { char: s [ deserialize-string ] } + { char: w [ deserialize-word ] } + { char: G [ deserialize-word ] } + { char: z [ deserialize-zero ] } } case t ] [ f f diff --git a/basis/smtp/smtp.factor b/basis/smtp/smtp.factor index d5280972fe..246360fdc7 100644 --- a/basis/smtp/smtp.factor +++ b/basis/smtp/smtp.factor @@ -97,7 +97,7 @@ ERROR: bad-email-address email ; LOG: smtp-response DEBUG : multiline? ( response -- ? ) - 3 swap ?nth ch'- = ; + 3 swap ?nth char: - = ; : (receive-response) ( -- ) read-crlf diff --git a/basis/strings/tables/tables.factor b/basis/strings/tables/tables.factor index e0e1646e1c..e325d01290 100644 --- a/basis/strings/tables/tables.factor +++ b/basis/strings/tables/tables.factor @@ -9,7 +9,7 @@ IN: strings.tables dup longest length '[ _ "" pad-tail ] map! ; : format-column ( seq -- seq ) - dup longest length '[ _ ch'\s pad-tail ] map! ; + dup longest length '[ _ char: \s pad-tail ] map! ; PRIVATE> diff --git a/basis/tools/deploy/config/config-docs.factor b/basis/tools/deploy/config/config-docs.factor index 0f40f7c28a..7ceefad9b4 100644 --- a/basis/tools/deploy/config/config-docs.factor +++ b/basis/tools/deploy/config/config-docs.factor @@ -82,9 +82,9 @@ $nl "Off by default. Programs wishing to use the UI must be deployed with this flag on." } ; HELP: deploy-unicode? -{ $description "Deploy flag. If set, full Unicode " { $snippet "ch'" } " syntax is included." +{ $description "Deploy flag. If set, full Unicode " { $snippet "char: " } " syntax is included." $nl -"Off by default. If your program needs to use " { $snippet "ch'" } " with named characters, enable this flag." } ; +"Off by default. If your program needs to use " { $snippet "char: " } " with named characters, enable this flag." } ; HELP: deploy-console? { $description "Deploy flag. If set, the deployed executable will be configured as a console application. On Windows, this means the application will be deployed in the console subsystem and will be attached to a console window. On Mac OS X, this means the application will be deployed as a Unix executable instead of a Mac application bundle. On other Unix platforms, the flag has no effect." diff --git a/basis/tools/disassembler/udis/udis.factor b/basis/tools/disassembler/udis/udis.factor index 20e30417c8..fc39f728b3 100644 --- a/basis/tools/disassembler/udis/udis.factor +++ b/basis/tools/disassembler/udis/udis.factor @@ -63,8 +63,8 @@ SINGLETON: udis-disassembler dup [ second length ] [ max ] map-reduce '[ [ - [ first >hex cell 2 * ch'0 pad-head % ": " % ] - [ second _ ch'\s pad-tail % " " % ] + [ first >hex cell 2 * char: 0 pad-head % ": " % ] + [ second _ char: \s pad-tail % " " % ] [ third resolve-call % ] tri ] "" make diff --git a/basis/tools/files/files.factor b/basis/tools/files/files.factor index a49ecaf3ab..aac366e93f 100644 --- a/basis/tools/files/files.factor +++ b/basis/tools/files/files.factor @@ -9,22 +9,22 @@ IN: tools.files : dir-or-size ( file-info -- str ) dup directory? [ - drop "" 20 ch'\s pad-tail + drop "" 20 char: \s pad-tail ] [ - size>> number>string 20 ch'\s pad-head + size>> number>string 20 char: \s pad-head ] if ; : listing-time ( timestamp -- string ) [ hour>> ] [ minute>> ] bi - [ number>string 2 ch'0 pad-head ] bi@ ":" glue ; + [ number>string 2 char: 0 pad-head ] bi@ ":" glue ; : listing-date ( timestamp -- string ) [ month>> month-abbreviation ] - [ day>> number>string 2 ch'\s pad-head ] + [ day>> number>string 2 char: \s pad-head ] [ dup year>> dup now year>> = [ drop listing-time ] [ nip number>string ] if - 5 ch'\s pad-head + 5 char: \s pad-head ] tri 3array " " join ; : read>string ( ? -- string ) "r" "-" ? ; inline diff --git a/basis/tools/hexdump/hexdump.factor b/basis/tools/hexdump/hexdump.factor index 0e72e0c9af..783baefe1a 100644 --- a/basis/tools/hexdump/hexdump.factor +++ b/basis/tools/hexdump/hexdump.factor @@ -13,21 +13,21 @@ IN: tools.hexdump CONSTANT: line-number "00000000 " : inc-line-number ( -- ) - 7 [ ch'0 = over 0 > and ] [ + 7 [ char: 0 = over 0 > and ] [ 1 - dup line-number [ { - { ch'9 [ ch'a ] } - { ch'f [ ch'0 ] } + { char: 9 [ char: a ] } + { char: f [ char: 0 ] } [ 1 + ] } case dup ] change-nth-unsafe ] do while drop ; : reset-line-number ( -- ) - 8 [ ch'0 swap line-number set-nth ] each-integer ; + 8 [ char: 0 swap line-number set-nth ] each-integer ; CONSTANT: hex-digits $[ - 256 [ >hex 2 ch'0 pad-head " " append ] map + 256 [ >hex 2 char: 0 pad-head " " append ] map ] : all-bytes ( bytes -- from to bytes ) @@ -41,12 +41,12 @@ CONSTANT: hex-digits $[ : write-space ( from to bytes stream -- ) [ drop - 16 + ] dip '[ - 3 * ch'\s _ stream-write + 3 * char: \s _ stream-write ] unless-zero ; inline : write-ascii ( from to bytes stream -- ) dup stream-bl '[ - [ printable? ] keep ch'. ? _ stream-write1 + [ printable? ] keep char: . ? _ stream-write1 ] each-byte ; inline TYPED: write-hex-line ( from: fixnum to: fixnum bytes: byte-array -- ) @@ -62,12 +62,12 @@ TYPED: write-hex-line ( from: fixnum to: fixnum bytes: byte-array -- ) len 16 /mod [ [ 16 * dup 16 + bytes write-hex-line ] each-integer ] [ [ len swap - len bytes write-hex-line ] unless-zero ] bi* - len >hex 8 ch'0 pad-head print ; + len >hex 8 char: 0 pad-head print ; : hexdump-stream ( stream -- ) reset-line-number 0 swap [ all-bytes [ write-hex-line ] [ length + ] bi - ] 16 (each-stream-block) >hex 8 ch'0 pad-head print ; + ] 16 (each-stream-block) >hex 8 char: 0 pad-head print ; PRIVATE> diff --git a/basis/tools/profiler/sampling/sampling.factor b/basis/tools/profiler/sampling/sampling.factor index fc0a7c2bda..f599a8ab0c 100644 --- a/basis/tools/profiler/sampling/sampling.factor +++ b/basis/tools/profiler/sampling/sampling.factor @@ -211,7 +211,7 @@ DEFER: (profile.) :: times. ( node -- ) node { - [ depth>> number>string 4 ch'\s pad-head write bl ] + [ depth>> number>string 4 char: \s pad-head write bl ] [ total-time>> duration. bl ] [ [ gc-time>> ] [ total-time>> ] bi percentage. bl ] [ [ jit-time>> ] [ total-time>> ] bi percentage. bl ] diff --git a/basis/tools/scaffold/scaffold.factor b/basis/tools/scaffold/scaffold.factor index dc6b9cb5c7..445a9d9fb6 100644 --- a/basis/tools/scaffold/scaffold.factor +++ b/basis/tools/scaffold/scaffold.factor @@ -31,7 +31,7 @@ ERROR: vocab-must-not-exist string ; [ check-root ] [ check-vocab-name ] bi* ; : replace-vocab-separators ( vocab -- path ) - path-separator first ch'. associate substitute ; inline + path-separator first char: . associate substitute ; inline : vocab-root/vocab>path ( vocab-root vocab -- path ) check-vocab-root/vocab @@ -104,7 +104,7 @@ ERROR: vocab-must-not-exist string ; : lookup-type ( string -- object/string ? ) "/f" ?tail swap - "new" ?head drop [ { [ ch'\' = ] [ digit? ] } 1|| ] trim-tail + "new" ?head drop [ { [ char: \' = ] [ digit? ] } 1|| ] trim-tail H{ { "object" object } { "obj" object } @@ -331,7 +331,7 @@ SYMBOL: nested-examples : example-using ( using -- ) " " join "example-using" [ - nested-examples get 4 0 ? ch'\s "example-indent" [ + nested-examples get 4 0 ? char: \s "example-indent" [ "${example-indent}\"Example:\" ${example-indent}{ $example \"USING: ${example-using} ;\" ${example-indent} \"\" diff --git a/basis/tools/test/test.factor b/basis/tools/test/test.factor index b4e8b75adc..7a4fdbc0c3 100644 --- a/basis/tools/test/test.factor +++ b/basis/tools/test/test.factor @@ -97,7 +97,7 @@ M: did-not-fail summary drop "Did not fail" ; : experiment-title ( word -- string ) "(" ?head drop ")" ?tail drop - H{ { ch'- ch'\s } } substitute >title ; + H{ { char: - char: \s } } substitute >title ; MACRO: ( word -- quot ) [ stack-effect in>> length dup ] diff --git a/basis/tools/trace/trace.factor b/basis/tools/trace/trace.factor index f82b440fd7..c32c32fda9 100644 --- a/basis/tools/trace/trace.factor +++ b/basis/tools/trace/trace.factor @@ -45,7 +45,7 @@ M: trace-step-state summary : print-depth ( continuation -- ) call>> callstack-depth - [ ch'\s write ] + [ char: \s write ] [ number>string write ": " write ] bi ; : trace-into? ( continuation -- ? ) diff --git a/basis/ui/backend/x11/x11.factor b/basis/ui/backend/x11/x11.factor index 5dd8d5ba5b..07eb14ca23 100644 --- a/basis/ui/backend/x11/x11.factor +++ b/basis/ui/backend/x11/x11.factor @@ -104,9 +104,9 @@ M: x11-ui-backend (free-pixel-format) : valid-input? ( string gesture -- ? ) over empty? [ 2drop f ] [ mods>> { f { S+ } } member? [ - [ { [ 127 = not ] [ ch'\s >= ] } 1&& ] all? + [ { [ 127 = not ] [ char: \s >= ] } 1&& ] all? ] [ - [ { [ 127 = not ] [ ch'\s >= ] [ alpha? not ] } 1&& ] all? + [ { [ 127 = not ] [ char: \s >= ] [ alpha? not ] } 1&& ] all? ] if ] if ; @@ -332,7 +332,7 @@ M: x11-ui-backend beep ( -- ) > [ [ gadget-text ] map ] map format-table - [ ch'\n , ] [ % ] interleave ; + [ char: \n , ] [ % ] interleave ; PRIVATE> diff --git a/basis/ui/gadgets/labels/labels.factor b/basis/ui/gadgets/labels/labels.factor index 6051277c54..8a03ca75e8 100644 --- a/basis/ui/gadgets/labels/labels.factor +++ b/basis/ui/gadgets/labels/labels.factor @@ -21,7 +21,7 @@ PREDICATE: string-array < array [ string? ] all? ; PRIVATE> : ?string-lines ( string -- string/array ) - ch'\n over member-eq? [ string-lines ] when ; + char: \n over member-eq? [ string-lines ] when ; ERROR: not-a-string object ; diff --git a/basis/ui/gadgets/panes/panes.factor b/basis/ui/gadgets/panes/panes.factor index ca16319637..5f20c91176 100644 --- a/basis/ui/gadgets/panes/panes.factor +++ b/basis/ui/gadgets/panes/panes.factor @@ -337,7 +337,7 @@ M: paragraph stream-write M: pack stream-write1 gadget-write1 ; M: paragraph stream-write1 - over ch'\s = + over char: \s = [ H{ } swap gadget-bl drop ] [ gadget-write1 ] if ; : empty-output? ( string style -- ? ) diff --git a/basis/ui/text/pango/pango.factor b/basis/ui/text/pango/pango.factor index c1f38a6c9c..f088afde30 100644 --- a/basis/ui/text/pango/pango.factor +++ b/basis/ui/text/pango/pango.factor @@ -120,7 +120,7 @@ SYMBOL: dpi : escape-nulls ( str -- str' ) ! Replace nulls with something else since Pango uses null-terminated ! strings - H{ { 0 ch'zero-width-no-break-space } } substitute ; + H{ { 0 char: zero-width-no-break-space } } substitute ; : unpack-selection ( layout string/selection -- layout ) dup selection? [ diff --git a/basis/ui/tools/listener/completion/completion.factor b/basis/ui/tools/listener/completion/completion.factor index 32050946a6..5d86de9683 100644 --- a/basis/ui/tools/listener/completion/completion.factor +++ b/basis/ui/tools/listener/completion/completion.factor @@ -19,7 +19,7 @@ SLOT: history : history-list ( interactor -- alist ) history>> elements>> - [ dup string>> H{ { ch'\n ch'\s } } substitute ] { } map>assoc + [ dup string>> H{ { char: \n char: \s } } substitute ] { } map>assoc ; : history-completions ( short interactor -- seq ) diff --git a/basis/ui/tools/listener/listener-docs.factor b/basis/ui/tools/listener/listener-docs.factor index 72284ced0d..7b42a72530 100644 --- a/basis/ui/tools/listener/listener-docs.factor +++ b/basis/ui/tools/listener/listener-docs.factor @@ -89,7 +89,7 @@ TIP: "When you mouse over certain objects, a black border will appear. Left-clic TIP: "The status bar displays stack effects of recognized words as they are being typed in." ; -TIP: "Press " { $command interactor "completion" code-completion-popup } " to complete word, vocabulary and Unicode character names. The latter two features become available if the cursor is after a " { $link postpone: \USE: } ", " { $link postpone: \USING: } " or " { $snippet "ch'" } "." ; +TIP: "Press " { $command interactor "completion" code-completion-popup } " to complete word, vocabulary and Unicode character names. The latter two features become available if the cursor is after a " { $link postpone: \USE: } ", " { $link postpone: \USING: } " or " { $snippet "char: " } "." ; TIP: "If a word's vocabulary is loaded, but not in the search path, you can use restarts to add the vocabulary to the search path. Auto-use mode (" { $command listener-gadget "toolbar" com-auto-use } ") invokes restarts automatically if there is only one restart." ; diff --git a/basis/ui/tools/listener/listener-tests.factor b/basis/ui/tools/listener/listener-tests.factor index 98633c0f1c..83ff72d649 100644 --- a/basis/ui/tools/listener/listener-tests.factor +++ b/basis/ui/tools/listener/listener-tests.factor @@ -154,7 +154,7 @@ CONSTANT: text "Hello world.\nThis is a test." { } [ "interactor" get evaluate-input ] unit-test -{ ch'H } [ "promise" get 2 seconds ?promise-timeout ] unit-test +{ char: H } [ "promise" get 2 seconds ?promise-timeout ] unit-test { } [ [ ] with-grafted-gadget ] unit-test diff --git a/basis/ui/tools/listener/listener.factor b/basis/ui/tools/listener/listener.factor index e063c62bcd..f201ad61d8 100644 --- a/basis/ui/tools/listener/listener.factor +++ b/basis/ui/tools/listener/listener.factor @@ -169,14 +169,14 @@ M: interactor stream-read1 dup interactor-read { { [ dup not ] [ 2drop f ] } { [ dup empty? ] [ drop stream-read1 ] } - { [ dup first empty? ] [ 2drop ch'\n ] } + { [ dup first empty? ] [ 2drop char: \n ] } [ nip first first ] } cond ; M: interactor stream-read-until ( seps stream -- seq sep/f ) swap '[ _ interactor-read [ - "\n" join ch'\n suffix + "\n" join char: \n suffix [ _ member? ] dupd find [ [ head ] when* ] dip dup not ] [ f f f ] if* diff --git a/basis/unicode/breaks/breaks.factor b/basis/unicode/breaks/breaks.factor index e2b886a48c..8377ed7866 100644 --- a/basis/unicode/breaks/breaks.factor +++ b/basis/unicode/breaks/breaks.factor @@ -66,8 +66,8 @@ CATEGORY: grapheme-control Zl Zp Cc Cf ; : control-class ( ch -- class ) { - { [ dup ch'\r = ] [ drop CR ] } - { [ dup ch'\n = ] [ drop LF ] } + { [ dup char: \r = ] [ drop CR ] } + { [ dup char: \n = ] [ drop LF ] } { [ dup 0x200C = ] [ drop Extend ] } { [ dup 0x200D = ] [ drop ZWJ ] } { [ dup "Other_Grapheme_Extend" property? ] [ drop Extend ] } diff --git a/basis/unicode/case/case.factor b/basis/unicode/case/case.factor index 052de62ed2..8279bea96a 100644 --- a/basis/unicode/case/case.factor +++ b/basis/unicode/case/case.factor @@ -28,8 +28,8 @@ SYMBOL: locale ! Just casing locale, or overall? [ [ { } ] [ [ dup first - { [ mark-above? ] [ ch'combining-ogonek = ] } 1|| - [ ch'combining-dot-above prefix ] when + { [ mark-above? ] [ char: combining-ogonek = ] } 1|| + [ char: combining-dot-above prefix ] when ] map ] if-empty ] with-rest ; inline @@ -46,24 +46,24 @@ SYMBOL: locale ! Just casing locale, or overall? : fix-sigma-end ( string -- string ) [ "" ] [ - dup last ch'greek-small-letter-sigma = - [ but-last ch'greek-small-letter-final-sigma suffix ] when + dup last char: greek-small-letter-sigma = + [ but-last char: greek-small-letter-final-sigma suffix ] when ] if-empty ; inline ! this duplicate unicode to prevent dependencies CATEGORY-NOT: (uncased) Lu Ll Lt Lm Mn Me ; : sigma-map ( string -- string ) - { ch'greek-capital-letter-sigma } split [ [ - [ { ch'greek-small-letter-sigma } ] [ + { char: greek-capital-letter-sigma } split [ [ + [ { char: greek-small-letter-sigma } ] [ dup first (uncased)? - ch'greek-small-letter-final-sigma - ch'greek-small-letter-sigma ? prefix + char: greek-small-letter-final-sigma + char: greek-small-letter-sigma ? prefix ] if-empty ] map ] with-rest concat fix-sigma-end ; inline : final-sigma ( string -- string ) - ch'greek-capital-letter-sigma + char: greek-capital-letter-sigma over member? [ sigma-map ] when "" like ; inline diff --git a/basis/unicode/collation/collation-tests.factor b/basis/unicode/collation/collation-tests.factor index f1a4a97a12..135c6562c1 100644 --- a/basis/unicode/collation/collation-tests.factor +++ b/basis/unicode/collation/collation-tests.factor @@ -31,7 +31,7 @@ IN: unicode.collation.tests ";" split1 [ " " split [ hex> ] map ] [ - "#" split1 nip ch'\[ tail-from-last + "#" split1 nip char: \[ tail-from-last "]" split1 drop "|" split 4 head [ " " split harvest [ hex> ] map ] map diff --git a/basis/unicode/collation/collation.factor b/basis/unicode/collation/collation.factor index 1354b436dc..9bfc3b295e 100644 --- a/basis/unicode/collation/collation.factor +++ b/basis/unicode/collation/collation.factor @@ -19,7 +19,7 @@ TUPLE: weight-levels primary secondary tertiary ignorable? ; : parse-weight ( string -- weight ) "]" split but-last [ - weight-levels new swap rest unclip ch'* = swapd >>ignorable? + weight-levels new swap rest unclip char: * = swapd >>ignorable? swap "." split first3 [ hex> ] tri@ [ >>primary ] [ >>secondary ] [ >>tertiary ] tri* ] map ; diff --git a/basis/unicode/data/data.factor b/basis/unicode/data/data.factor index 44e7818dbd..1c1ee7b66f 100644 --- a/basis/unicode/data/data.factor +++ b/basis/unicode/data/data.factor @@ -153,7 +153,7 @@ PRIVATE> : process-names ( data -- names-hash ) 1 swap (process-data) [ - >lower H{ { ch'\s ch'- } } substitute swap + >lower H{ { char: \s char: - } } substitute swap ] H{ } assoc-map-as ; : multihex ( hexstring -- string ) diff --git a/basis/unicode/normalize/normalize-tests.factor b/basis/unicode/normalize/normalize-tests.factor index c2b38692ae..42c6a92f1b 100644 --- a/basis/unicode/normalize/normalize-tests.factor +++ b/basis/unicode/normalize/normalize-tests.factor @@ -33,7 +33,7 @@ IN: unicode.normalize.tests 2 [ first2 [ first ] dip 2array ] map values [ [ - "#@" split first [ ch'\s = ] trim-tail ";" split harvest + "#@" split first [ char: \s = ] trim-tail ";" split harvest [ " " split [ hex> ] "" map-as ] map ] map ] map concat ; diff --git a/basis/unicode/script/script-tests.factor b/basis/unicode/script/script-tests.factor index 20f5718794..2cdff1178e 100644 --- a/basis/unicode/script/script-tests.factor +++ b/basis/unicode/script/script-tests.factor @@ -1,4 +1,4 @@ USING: unicode tools.test ; -{ "Latin" } [ ch'a script-of ] unit-test +{ "Latin" } [ char: a script-of ] unit-test { "Common" } [ 0 script-of ] unit-test diff --git a/basis/unix/linux/proc/proc.factor b/basis/unix/linux/proc/proc.factor index f39d7e2c15..e655f04c5d 100644 --- a/basis/unix/linux/proc/proc.factor +++ b/basis/unix/linux/proc/proc.factor @@ -55,16 +55,16 @@ ERROR: unknown-cpuinfo-line string ; : line>processor-info ( processor-info string -- processor-info ) ":" split first2 swap - [ ch'\t = ] trim-tail [ [ ch'\s = ] trim ] bi@ + [ char: \t = ] trim-tail [ [ char: \s = ] trim ] bi@ { { "address sizes" [ - "," split [ [ ch'\s = ] trim " " split first string>number ] map + "," split [ [ char: \s = ] trim " " split first string>number ] map >>address-sizes ] } { "apicid" [ string>number >>apicid ] } { "bogomips" [ string>number >>bogomips ] } { "cache size" [ - " " split first [ ch'\s = ] trim + " " split first [ char: \s = ] trim string>number 1024 * >>cache-size ] } { "cache_alignment" [ string>number >>cache-alignment ] } @@ -208,7 +208,7 @@ TUPLE: proc-cpu-stat name user nice system idle iowait irq softirq steal guest g : line>cpu ( string -- cpu ) " " split unclip-slice - [ [ [ ch'\s = ] trim string>number ] map ] dip prefix + [ [ [ char: \s = ] trim string>number ] map ] dip prefix [ proc-cpu-stat boa ] inputhex >upper 2 ch'0 pad-head % ] each ; + [ char: % , >hex >upper 2 char: 0 pad-head % ] each ; : (url-encode) ( str quot: ( ch -- ? ) -- encoded ) '[ [ dup @ [ , ] [ push-utf8 ] if ] each ] "" make ; inline @@ -62,7 +62,7 @@ PRIVATE> 2dup length >= [ 2drop ] [ - 2dup nth dup ch'% = [ + 2dup nth dup char: % = [ drop 2dup url-decode-hex [ 3 + ] dip ] [ , [ 1 + ] dip diff --git a/basis/uuid/uuid.factor b/basis/uuid/uuid.factor index 4abe3c8c29..aeb6b5d974 100644 --- a/basis/uuid/uuid.factor +++ b/basis/uuid/uuid.factor @@ -41,14 +41,14 @@ IN: uuid ] dip 76 shift bitor ; : uuid>string ( n -- string ) - >hex 32 ch'0 pad-head - [ ch'- 20 ] dip insert-nth - [ ch'- 16 ] dip insert-nth - [ ch'- 12 ] dip insert-nth - [ ch'- 8 ] dip insert-nth ; + >hex 32 char: 0 pad-head + [ char: - 20 ] dip insert-nth + [ char: - 16 ] dip insert-nth + [ char: - 12 ] dip insert-nth + [ char: - 8 ] dip insert-nth ; : string>uuid ( string -- n ) - [ ch'- = ] reject hex> ; + [ char: - = ] reject hex> ; PRIVATE> diff --git a/basis/validators/validators.factor b/basis/validators/validators.factor index eeb836e326..769160743c 100644 --- a/basis/validators/validators.factor +++ b/basis/validators/validators.factor @@ -98,7 +98,7 @@ IN: validators : v-credit-card ( str -- n ) "- " without - dup ch'0 ch'9 [a,b] diff empty? [ + dup char: 0 char: 9 [a,b] diff empty? [ 13 v-min-length 16 v-max-length dup luhn? [ string>number ] [ diff --git a/basis/vocabs/hierarchy/hierarchy.factor b/basis/vocabs/hierarchy/hierarchy.factor index 6256920362..db90d8c364 100644 --- a/basis/vocabs/hierarchy/hierarchy.factor +++ b/basis/vocabs/hierarchy/hierarchy.factor @@ -61,7 +61,7 @@ ERROR: vocab-root-required root ; : unrooted-disk-vocabs ( prefix -- seq ) [ loaded-vocab-names no-rooted ] dip - dup empty? [ ch'. suffix ] unless + dup empty? [ char: . suffix ] unless '[ vocab-name _ one-level-only? ] filter ; : unrooted-disk-vocabs-recursive ( prefix -- seq ) diff --git a/basis/windows/ole32/ole32.factor b/basis/windows/ole32/ole32.factor index 1a26e9e220..7256a221bc 100644 --- a/basis/windows/ole32/ole32.factor +++ b/basis/windows/ole32/ole32.factor @@ -131,20 +131,20 @@ CONSTANT: GUID-STRING-LENGTH : guid>string ( guid -- string ) [ [ "{" ] dip { - [ Data1>> >hex 8 ch'0 pad-head "-" ] - [ Data2>> >hex 4 ch'0 pad-head "-" ] - [ Data3>> >hex 4 ch'0 pad-head "-" ] + [ Data1>> >hex 8 char: 0 pad-head "-" ] + [ Data2>> >hex 4 char: 0 pad-head "-" ] + [ Data3>> >hex 4 char: 0 pad-head "-" ] [ Data4>> [ { - [ >hex 2 ch'0 pad-head ] - [ >hex 2 ch'0 pad-head "-" ] - [ >hex 2 ch'0 pad-head ] - [ >hex 2 ch'0 pad-head ] - [ >hex 2 ch'0 pad-head ] - [ >hex 2 ch'0 pad-head ] - [ >hex 2 ch'0 pad-head ] - [ >hex 2 ch'0 pad-head ] + [ >hex 2 char: 0 pad-head ] + [ >hex 2 char: 0 pad-head "-" ] + [ >hex 2 char: 0 pad-head ] + [ >hex 2 char: 0 pad-head ] + [ >hex 2 char: 0 pad-head ] + [ >hex 2 char: 0 pad-head ] + [ >hex 2 char: 0 pad-head ] + [ >hex 2 char: 0 pad-head ] } spread ] input ] unless ; inline + dup string? [ char: \s ] unless ; inline PRIVATE> diff --git a/basis/xml/autoencoding/autoencoding.factor b/basis/xml/autoencoding/autoencoding.factor index 91368e4ce1..eaba50e420 100644 --- a/basis/xml/autoencoding/autoencoding.factor +++ b/basis/xml/autoencoding/autoencoding.factor @@ -52,8 +52,8 @@ IN: xml.autoencoding ! What if first letter of processing instruction is non-ASCII? get-next { { 0 [ next next start-utf16le ] } - { ch'? [ go-utf8 instruct dup instruct-encoding ] } - { ch'\! [ go-utf8 direct ] } + { char: ? [ go-utf8 instruct dup instruct-encoding ] } + { char: \! [ go-utf8 direct ] } [ check start ; diff --git a/basis/xml/elements/elements.factor b/basis/xml/elements/elements.factor index 5b92bb27fb..a9a6c11b5c 100644 --- a/basis/xml/elements/elements.factor +++ b/basis/xml/elements/elements.factor @@ -8,7 +8,7 @@ IN: xml.elements : take-interpolated ( quot -- interpolated ) interpolating? get [ - drop get-char ch'> eq? + drop get-char char: > eq? [ next f ] [ "->" take-string [ blank? ] trim ] if @@ -19,7 +19,7 @@ IN: xml.elements : start-tag ( -- name ? ) ! Outputs the name and whether this is a closing tag - get-char ch'/ eq? dup [ next ] when + get-char char: / eq? dup [ next ] when parse-name swap ; : assure-no-duplicates ( attrs-alist -- attrs-alist ) @@ -29,7 +29,7 @@ IN: xml.elements : parse-attr ( -- array ) parse-name pass-blank "=" expect pass-blank - get-char ch'< eq? + get-char char: < eq? [ "<-" expect interpolate-quote ] [ t parse-quote* ] if 2array ; @@ -40,7 +40,7 @@ IN: xml.elements dup length 1 > [ assure-no-duplicates ] when ; : end-tag ( name attrs-alist -- tag ) - tag-ns pass-blank get-char ch'/ eq? + tag-ns pass-blank get-char char: / eq? [ pop-ns next ">" expect ] [ depth inc close ] if ; @@ -109,9 +109,9 @@ DEFER: make-tag ! Is this unavoidable? : dtd-loop ( -- ) pass-blank get-char { - { ch'\] [ next ] } - { ch'% [ expand-pe ] } - { ch'< [ + { char: \] [ next ] } + { char: % [ expand-pe ] } + { char: < [ next make-tag dup dtd-acceptable? [ bad-doctype ] unless , dtd-loop ] } @@ -132,7 +132,7 @@ DEFER: make-tag ! Is this unavoidable? [ take-external-id ] [ f ] if ; : take-internal ( -- dtd/f ) - get-char ch'\[ eq? + get-char char: \[ eq? [ next take-internal-subset ] [ f ] if ; : take-doctype-decl ( -- doctype-decl ) @@ -151,8 +151,8 @@ DEFER: make-tag ! Is this unavoidable? : direct ( -- object ) get-char { - { ch'- [ take-comment ] } - { ch'\[ [ take-cdata ] } + { char: - [ take-comment ] } + { char: \[ [ take-cdata ] } [ drop take-directive ] } case ; @@ -166,8 +166,8 @@ DEFER: make-tag ! Is this unavoidable? : make-tag ( -- tag ) get-char { - { ch'\! [ next direct ] } - { ch'? [ next instruct ] } - { ch'- [ next interpolate-tag ] } + { char: \! [ next direct ] } + { char: ? [ next instruct ] } + { char: - [ next interpolate-tag ] } [ drop normal-tag ] } case ; diff --git a/basis/xml/entities/entities.factor b/basis/xml/entities/entities.factor index 50a3e7f574..c56fb58218 100644 --- a/basis/xml/entities/entities.factor +++ b/basis/xml/entities/entities.factor @@ -6,17 +6,17 @@ IN: xml.entities CONSTANT: entities-out H{ - { ch'< "<" } - { ch'> ">" } - { ch'& "&" } + { char: < "<" } + { char: > ">" } + { char: & "&" } } CONSTANT: quoted-entities-out H{ - { ch'& "&" } - { ch'\' "'" } - { ch'\" """ } - { ch'< "<" } + { char: & "&" } + { char: \' "'" } + { char: \" """ } + { char: < "<" } } : escape-string-by ( str table -- escaped ) @@ -31,11 +31,11 @@ CONSTANT: quoted-entities-out CONSTANT: entities H{ - { "lt" ch'< } - { "gt" ch'> } - { "amp" ch'& } - { "apos" ch'\' } - { "quot" ch'\" } + { "lt" char: < } + { "gt" char: > } + { "amp" char: & } + { "apos" char: \' } + { "quot" char: \" } } : with-entities ( entities quot -- ) diff --git a/basis/xml/name/name.factor b/basis/xml/name/name.factor index be980c6c09..580eaa7d24 100644 --- a/basis/xml/name/name.factor +++ b/basis/xml/name/name.factor @@ -65,8 +65,8 @@ SYMBOL: ns-stack } 2&& [ f ] [ 2drop f ] if ; : prefixed-name ( str -- name/f ) - ch'\: over index [ - ch'\: 2over 1 + swap index-from + char: \: over index [ + char: \: 2over 1 + swap index-from [ 2drop f ] [ [ head ] [ 1 + tail ] 2bi maybe-name ] if diff --git a/basis/xml/tests/state-parser-tests.factor b/basis/xml/tests/state-parser-tests.factor index 3a5e87d9b7..d33b15ed11 100644 --- a/basis/xml/tests/state-parser-tests.factor +++ b/basis/xml/tests/state-parser-tests.factor @@ -13,6 +13,6 @@ IN: xml.test.state [ "hello" ] [ "hello" [ take-rest ] string-parse ] unit-test [ 2 3 ] [ "12\n123" [ take-rest drop get-line get-column ] string-parse ] unit-test [ "hi" " how are you?" ] [ "hi how are you?" [ [ blank? ] take-until take-rest ] string-parse ] unit-test -[ "foo" ";bar" ] [ "foo;bar" [ ch'\; take-char take-rest ] string-parse ] unit-test +[ "foo" ";bar" ] [ "foo;bar" [ char: \; take-char take-rest ] string-parse ] unit-test [ "foo " " bar" ] [ "foo and bar" [ "and" take-string take-rest ] string-parse ] unit-test [ "baz" ] [ " \n\t baz" [ pass-blank take-rest ] string-parse ] unit-test diff --git a/basis/xml/tokenize/tokenize.factor b/basis/xml/tokenize/tokenize.factor index 8798c894b5..c30d8a41cb 100644 --- a/basis/xml/tokenize/tokenize.factor +++ b/basis/xml/tokenize/tokenize.factor @@ -24,7 +24,7 @@ HINTS: assure-good-char { spot fixnum } ; : record ( spot char -- spot ) over char>> [ - ch'\n eq? + char: \n eq? [ [ 1 + ] change-line -1 ] [ dup column>> 1 + ] if >>column ] [ drop ] if ; @@ -34,9 +34,9 @@ HINTS: record { spot fixnum } ; :: (next) ( spot -- spot char ) spot next>> :> old-next spot stream>> stream-read1 :> new-next - old-next ch'\r eq? [ - spot ch'\n >>char - new-next ch'\n eq? + old-next char: \r eq? [ + spot char: \n >>char + new-next char: \n eq? [ spot stream>> stream-read1 >>next ] [ new-next >>next ] if ] [ spot old-next >>char new-next >>next ] if @@ -134,11 +134,11 @@ HINTS: next* { spot } ; { { [ char not ] [ ] } { [ char quot call ] [ spot next* ] } - { [ char ch'& eq? ] [ + { [ char char: & eq? ] [ accum parse-entity quot accum spot (parse-char) ] } - { [ char ch'% eq? [ in-dtd? get ] [ f ] if ] [ + { [ char char: % eq? [ in-dtd? get ] [ f ] if ] [ accum parse-pe quot accum spot (parse-char) ] } @@ -162,23 +162,23 @@ HINTS: next* { spot } ; |[ char | pos char assure-no-double-bracket pos! no-text [ - char blank? char ch'< eq? or [ + char blank? char char: < eq? or [ char 1string t pre/post-content ] unless ] when - char ch'< eq? + char char: < eq? ] parse-char ; : close ( -- ) pass-blank ">" expect ; : normalize-quote ( str -- str ) - [ dup "\t\r\n" member? [ drop ch'\s ] when ] map! ; + [ dup "\t\r\n" member? [ drop char: \s ] when ] map! ; : (parse-quote) ( <-disallowed? ch -- string ) swap '[ dup _ eq? [ drop t ] - [ ch'< eq? _ and [ attr-w/< ] [ f ] if ] if + [ char: < eq? _ and [ attr-w/< ] [ f ] if ] if ] parse-char normalize-quote get-char [ unclosed-quote ] unless ; inline diff --git a/basis/xml/writer/writer.factor b/basis/xml/writer/writer.factor index 3c1e9728e9..543fa2f5b4 100644 --- a/basis/xml/writer/writer.factor +++ b/basis/xml/writer/writer.factor @@ -48,7 +48,7 @@ PRIVATE> > print-attrs ; : write-start-tag ( tag -- ) @@ -84,7 +84,7 @@ M: contained-tag write-xml [ write-xml ] each unindent ; : write-end-tag ( tag -- ) - ?indent " write1 ; + ?indent " write1 ; M: open-tag write-xml xml-pprint? get [ @@ -148,7 +148,7 @@ M: doctype-decl write-xml ">" write ; M: directive write-xml - "> write ch'> write1 nl ; + "> write char: > write1 nl ; M: instruction write-xml "> write "?>" write ; diff --git a/core/alien/c-types/c-types.factor b/core/alien/c-types/c-types.factor index 734d1c2e02..65eeaf72ca 100644 --- a/core/alien/c-types/c-types.factor +++ b/core/alien/c-types/c-types.factor @@ -503,7 +503,7 @@ M: double-2-rep rep-component-type drop double ; GENERIC: pointer-string ( pointer -- string/f ) M: object pointer-string drop f ; M: word pointer-string name>> ; -M: pointer pointer-string to>> pointer-string [ ch'* suffix ] [ f ] if* ; +M: pointer pointer-string to>> pointer-string [ char: * suffix ] [ f ] if* ; GENERIC: c-type-string ( c-type -- string ) diff --git a/core/alien/libraries/finder/finder.factor b/core/alien/libraries/finder/finder.factor index 63adfb4b77..77ac327219 100644 --- a/core/alien/libraries/finder/finder.factor +++ b/core/alien/libraries/finder/finder.factor @@ -53,7 +53,7 @@ CONSTANT: mach-map { mach-map cpu of { "libc6" } or ; : name-matches? ( lib triple -- ? ) - first swap ?head [ ?first ch'. = ] [ drop f ] if ; + first swap ?head [ ?first char: . = ] [ drop f ] if ; : arch-matches? ( lib triple -- ? ) [ drop ldconfig-arch ] [ second swap subset? ] bi* ; diff --git a/core/alien/libraries/freebsd/freebsd.factor b/core/alien/libraries/freebsd/freebsd.factor index 01865a243f..91eacb687b 100644 --- a/core/alien/libraries/freebsd/freebsd.factor +++ b/core/alien/libraries/freebsd/freebsd.factor @@ -16,7 +16,7 @@ IN: alien.libraries.finder.freebsd rest parse-ldconfig-lines ; : name-matches? ( lib double -- ? ) - first swap ?head [ ?first ch'. = ] [ drop f ] if ; + first swap ?head [ ?first char: . = ] [ drop f ] if ; PRIVATE> diff --git a/core/ascii/ascii-tests.factor b/core/ascii/ascii-tests.factor index 97a800cd4d..a4e7da915e 100644 --- a/core/ascii/ascii-tests.factor +++ b/core/ascii/ascii-tests.factor @@ -1,18 +1,18 @@ USING: ascii kernel math sequences strings tools.test ; -{ t } [ ch'a letter? ] unit-test -{ f } [ ch'A letter? ] unit-test -{ f } [ ch'a LETTER? ] unit-test -{ t } [ ch'A LETTER? ] unit-test -{ t } [ ch'0 digit? ] unit-test -{ f } [ ch'x digit? ] unit-test +{ t } [ char: a letter? ] unit-test +{ f } [ char: A letter? ] unit-test +{ f } [ char: a LETTER? ] unit-test +{ t } [ char: A LETTER? ] unit-test +{ t } [ char: 0 digit? ] unit-test +{ f } [ char: x digit? ] unit-test { 4 } [ 0 "There are Four Upper Case characters" [ LETTER? [ 1 + ] when ] each ] unit-test -{ t f } [ ch'\s ascii? 400 ascii? ] unit-test +{ t f } [ char: \s ascii? 400 ascii? ] unit-test { "HELLO HOW ARE YOU?" } [ "hellO hOw arE YOU?" >upper ] unit-test { "i'm good thx bai" } [ "I'm Good THX bai" >lower ] unit-test diff --git a/core/ascii/ascii.factor b/core/ascii/ascii.factor index 7e7340dd80..d49d7961f0 100644 --- a/core/ascii/ascii.factor +++ b/core/ascii/ascii.factor @@ -6,10 +6,10 @@ IN: ascii : ascii? ( ch -- ? ) 0 127 between? ; inline : blank? ( ch -- ? ) " \t\n\r" member? ; inline -: letter? ( ch -- ? ) ch'a ch'z between? ; inline -: LETTER? ( ch -- ? ) ch'A ch'Z between? ; inline -: digit? ( ch -- ? ) ch'0 ch'9 between? ; inline -: printable? ( ch -- ? ) ch'\s ch'~ between? ; inline +: letter? ( ch -- ? ) char: a char: z between? ; inline +: LETTER? ( ch -- ? ) char: A char: Z between? ; inline +: digit? ( ch -- ? ) char: 0 char: 9 between? ; inline +: printable? ( ch -- ? ) char: \s char: ~ between? ; inline : control? ( ch -- ? ) { [ 0 0x1F between? ] [ 0x7F = ] } 1|| ; inline : quotable? ( ch -- ? ) { [ printable? ] [ "\"\\" member? not ] } 1&& ; inline : Letter? ( ch -- ? ) { [ letter? ] [ LETTER? ] } 1|| ; inline diff --git a/core/checksums/checksums.factor b/core/checksums/checksums.factor index b055d749ed..d01a1518dc 100644 --- a/core/checksums/checksums.factor +++ b/core/checksums/checksums.factor @@ -18,7 +18,7 @@ M: checksum checksum-stream [ stream-contents ] dip checksum-bytes ; M: checksum checksum-lines - [ B{ ch'\n } join ] dip checksum-bytes ; + [ B{ char: \n } join ] dip checksum-bytes ; : checksum-file ( path checksum -- value ) [ binary ] dip checksum-stream ; @@ -45,7 +45,7 @@ GENERIC: get-checksum ( checksum-state -- value ) [ [ add-checksum-bytes ] each-block ] with-input-stream ; : add-checksum-lines ( checksum-state lines -- checksum-state ) - [ B{ ch'\n } add-checksum-bytes ] + [ B{ char: \n } add-checksum-bytes ] [ add-checksum-bytes ] interleave ; : add-checksum-file ( checksum-state path -- checksum-state ) diff --git a/core/checksums/crc32/crc32.factor b/core/checksums/crc32/crc32.factor index 9bdcc58b4a..32cff0ca12 100644 --- a/core/checksums/crc32/crc32.factor +++ b/core/checksums/crc32/crc32.factor @@ -36,5 +36,5 @@ M: crc32 checksum-bytes M: crc32 checksum-lines init-crc32 - [ [ (crc32) ] each ch'\n (crc32) ] each + [ [ (crc32) ] each char: \n (crc32) ] each finish-crc32 ; inline diff --git a/core/effects/effects.factor b/core/effects/effects.factor index 15dca5c623..4efeb72579 100644 --- a/core/effects/effects.factor +++ b/core/effects/effects.factor @@ -64,10 +64,10 @@ M: pair effect>string ] if ; : stack-picture% ( seq -- ) - [ effect>string % ch'\s , ] each ; + [ effect>string % char: \s , ] each ; : var-picture% ( var -- ) - [ ".." % % ch'\s , ] when* ; + [ ".." % % char: \s , ] when* ; M: effect effect>string ( effect -- string ) [ diff --git a/core/io/encodings/ascii/ascii-tests.factor b/core/io/encodings/ascii/ascii-tests.factor index 1506c6b8d1..bcec0b1a8f 100644 --- a/core/io/encodings/ascii/ascii-tests.factor +++ b/core/io/encodings/ascii/ascii-tests.factor @@ -1,9 +1,9 @@ USING: arrays io.encodings.ascii io.encodings.string strings tools.test ; -{ B{ ch'f ch'o ch'o } } [ "foo" ascii encode ] unit-test +{ B{ char: f char: o char: o } } [ "foo" ascii encode ] unit-test [ { 128 } >string ascii encode ] must-fail { B{ 127 } } [ { 127 } >string ascii encode ] unit-test { "bar" } [ "bar" ascii decode ] unit-test -{ { ch'b 0xfffd ch'r } } [ B{ ch'b 233 ch'r } ascii decode >array ] unit-test +{ { char: b 0xfffd char: r } } [ B{ char: b 233 char: r } ascii decode >array ] unit-test diff --git a/core/io/encodings/encodings-tests.factor b/core/io/encodings/encodings-tests.factor index 27bfb9b41b..1dafc69975 100644 --- a/core/io/encodings/encodings-tests.factor +++ b/core/io/encodings/encodings-tests.factor @@ -52,7 +52,7 @@ unit-test ] unit-test { - ch'1 + char: 1 } [ "Hello world\r\n1234" dup stream-readln drop diff --git a/core/io/encodings/encodings.factor b/core/io/encodings/encodings.factor index 73933f0259..207651d512 100644 --- a/core/io/encodings/encodings.factor +++ b/core/io/encodings/encodings.factor @@ -106,7 +106,7 @@ M: decoder stream-element-type : fix-cr ( decoder c -- c' ) over cr>> [ - over cr- dup ch'\n eq? [ drop (read1) ] [ nip ] if + over cr- dup char: \n eq? [ drop (read1) ] [ nip ] if ] [ nip ] if ; inline M: decoder stream-read1 ( decoder -- ch ) @@ -152,8 +152,8 @@ M: decoder stream-contents* : handle-readln ( stream str ch -- str ) { { f [ line-ends/eof ] } - { ch'\r [ line-ends\r ] } - { ch'\n [ line-ends\n ] } + { char: \r [ line-ends\r ] } + { char: \n [ line-ends\n ] } } case ; inline M: decoder stream-read-until @@ -161,13 +161,13 @@ M: decoder stream-read-until dup cr- 2dup >decoder< decode-until over [ - dup ch'\n = [ + dup char: \n = [ 2drop stream-read-until ] [ 2nipd ] if ] [ - first-unsafe ch'\n = [ [ rest ] dip ] when + first-unsafe char: \n = [ [ rest ] dip ] when 2nipd ] if-empty ] [ diff --git a/core/io/encodings/utf16/utf16-tests.factor b/core/io/encodings/utf16/utf16-tests.factor index f1bd18e6f5..70b42ceddc 100644 --- a/core/io/encodings/utf16/utf16-tests.factor +++ b/core/io/encodings/utf16/utf16-tests.factor @@ -3,26 +3,26 @@ USING: arrays io.encodings.string io.encodings.utf16 strings tools.test ; -{ { ch'x } } [ B{ 0 ch'x } utf16be decode >array ] unit-test +{ { char: x } } [ B{ 0 char: x } utf16be decode >array ] unit-test { { 0x1D11E } } [ B{ 0xD8 0x34 0xDD 0x1E } utf16be decode >array ] unit-test -{ { ch'replacement-character } } [ B{ 0b11011111 ch'q } utf16be decode >array ] unit-test -{ { ch'replacement-character } } [ B{ 0b11011011 ch'x 0b11011011 ch'x } utf16be decode >array ] unit-test +{ { char: replacement-character } } [ B{ 0b11011111 char: q } utf16be decode >array ] unit-test +{ { char: replacement-character } } [ B{ 0b11011011 char: x 0b11011011 char: x } utf16be decode >array ] unit-test -{ { 0 120 216 52 221 30 } } [ { ch'x 0x1d11e } >string utf16be encode >array ] unit-test +{ { 0 120 216 52 221 30 } } [ { char: x 0x1d11e } >string utf16be encode >array ] unit-test -{ { ch'x } } [ B{ ch'x 0 } utf16le decode >array ] unit-test +{ { char: x } } [ B{ char: x 0 } utf16le decode >array ] unit-test { { 119070 } } [ B{ 0x34 0xD8 0x1E 0xDD } >string utf16le decode >array ] unit-test -{ { ch'replacement-character } } [ { 0 0b11011111 } >string utf16le decode >array ] unit-test -{ { ch'replacement-character } } [ { 0 0b11011011 0 0 } >string utf16le decode >array ] unit-test +{ { char: replacement-character } } [ { 0 0b11011111 } >string utf16le decode >array ] unit-test +{ { char: replacement-character } } [ { 0 0b11011011 0 0 } >string utf16le decode >array ] unit-test -{ { 120 0 52 216 30 221 } } [ { ch'x 0x1d11e } >string utf16le encode >array ] unit-test +{ { 120 0 52 216 30 221 } } [ { char: x 0x1d11e } >string utf16le encode >array ] unit-test -{ { ch'x } } [ B{ 0xff 0xfe ch'x 0 } utf16 decode >array ] unit-test -{ { ch'x } } [ B{ 0xfe 0xff 0 ch'x } utf16 decode >array ] unit-test +{ { char: x } } [ B{ 0xff 0xfe char: x 0 } utf16 decode >array ] unit-test +{ { char: x } } [ B{ 0xfe 0xff 0 char: x } utf16 decode >array ] unit-test -{ { 0xff 0xfe 120 0 52 216 30 221 } } [ { ch'x 0x1d11e } >string utf16 encode >array ] unit-test +{ { 0xff 0xfe 120 0 52 216 30 221 } } [ { char: x 0x1d11e } >string utf16 encode >array ] unit-test ! test ascii encoding path -{ B{ ch'a 0 ch'b 0 ch'c 0 } } [ "abc" utf16le encode ] unit-test -{ B{ 0 ch'a 0 ch'b 0 ch'c } } [ "abc" utf16be encode ] unit-test +{ B{ char: a 0 char: b 0 char: c 0 } } [ "abc" utf16le encode ] unit-test +{ B{ 0 char: a 0 char: b 0 char: c } } [ "abc" utf16be encode ] unit-test diff --git a/core/io/encodings/utf8/utf8-tests.factor b/core/io/encodings/utf8/utf8-tests.factor index cf1017f65b..d7a1551b7f 100644 --- a/core/io/encodings/utf8/utf8-tests.factor +++ b/core/io/encodings/utf8/utf8-tests.factor @@ -8,18 +8,18 @@ IN: io.encodings.utf8.tests : encode-utf8-w/stream ( array -- newarray ) >string utf8 encode >array ; -{ { ch'replacement-character } } [ { 0b11110,101 0b10,111111 0b10,000000 0b11111111 } decode-utf8-w/stream ] unit-test +{ { char: replacement-character } } [ { 0b11110,101 0b10,111111 0b10,000000 0b11111111 } decode-utf8-w/stream ] unit-test { "x" } [ "x" decode-utf8-w/stream >string ] unit-test { { 0b11111000000 } } [ { 0b110,11111 0b10,000000 } decode-utf8-w/stream >array ] unit-test -{ { ch'replacement-character } } [ { 0b10000000 } decode-utf8-w/stream ] unit-test +{ { char: replacement-character } } [ { 0b10000000 } decode-utf8-w/stream ] unit-test { { 0b1111000000111111 } } [ { 0b1110,1111 0b10,000000 0b10,111111 } decode-utf8-w/stream >array ] unit-test -{ { 0b11110,101 0b10,111111 0b10,000000 0b10,111111 0b1110,1111 0b10,000000 0b10,111111 0b110,11111 0b10,000000 ch'x } } -[ { 0b101111111000000111111 0b1111000000111111 0b11111000000 ch'x } encode-utf8-w/stream ] unit-test +{ { 0b11110,101 0b10,111111 0b10,000000 0b10,111111 0b1110,1111 0b10,000000 0b10,111111 0b110,11111 0b10,000000 char: x } } +[ { 0b101111111000000111111 0b1111000000111111 0b11111000000 char: x } encode-utf8-w/stream ] unit-test { 3 } [ 1 "日本語" >utf8-index ] unit-test { 3 } [ 9 "日本語" utf8-index> ] unit-test @@ -28,16 +28,16 @@ IN: io.encodings.utf8.tests { V{ } } [ 100000 [ [ code-point-length ] [ 1string utf8 encode length ] bi = ] reject ] unit-test -{ { ch'replacement-character } } [ { 0b110,00000 0b10,000000 } decode-utf8-w/stream ] unit-test -{ { ch'replacement-character } } [ { 0b110,00001 0b10,111111 } decode-utf8-w/stream ] unit-test +{ { char: replacement-character } } [ { 0b110,00000 0b10,000000 } decode-utf8-w/stream ] unit-test +{ { char: replacement-character } } [ { 0b110,00001 0b10,111111 } decode-utf8-w/stream ] unit-test { { 0x80 } } [ { 0b110,00010 0b10,000000 } decode-utf8-w/stream ] unit-test -{ { ch'replacement-character } } [ { 0b1110,0000 0b10,000000 0b10,000000 } decode-utf8-w/stream ] unit-test -{ { ch'replacement-character } } [ { 0b1110,0000 0b10,011111 0b10,111111 } decode-utf8-w/stream ] unit-test +{ { char: replacement-character } } [ { 0b1110,0000 0b10,000000 0b10,000000 } decode-utf8-w/stream ] unit-test +{ { char: replacement-character } } [ { 0b1110,0000 0b10,011111 0b10,111111 } decode-utf8-w/stream ] unit-test { { 0x800 } } [ { 0b1110,0000 0b10,100000 0b10,000000 } decode-utf8-w/stream ] unit-test -{ { ch'replacement-character } } [ { 0b11110,000 0b10,000000 0b10,000000 0b10,000000 } decode-utf8-w/stream ] unit-test -{ { ch'replacement-character } } [ { 0b11110,000 0b10,001111 0b10,111111 0b10,111111 } decode-utf8-w/stream ] unit-test -{ { ch'replacement-character } } [ { 0b11110,100 0b10,010000 0b10,000000 0b10,000000 } decode-utf8-w/stream ] unit-test +{ { char: replacement-character } } [ { 0b11110,000 0b10,000000 0b10,000000 0b10,000000 } decode-utf8-w/stream ] unit-test +{ { char: replacement-character } } [ { 0b11110,000 0b10,001111 0b10,111111 0b10,111111 } decode-utf8-w/stream ] unit-test +{ { char: replacement-character } } [ { 0b11110,100 0b10,010000 0b10,000000 0b10,000000 } decode-utf8-w/stream ] unit-test { { 0x10000 } } [ { 0b11110,000 0b10,010000 0b10,000000 0b10,000000 } decode-utf8-w/stream ] unit-test { { 0x10FFFF } } [ { 0b11110,100 0b10,001111 0b10,111111 0b10,111111 } decode-utf8-w/stream ] unit-test diff --git a/core/io/files/files-tests.factor b/core/io/files/files-tests.factor index 7b1188a6b7..3aab39d7aa 100644 --- a/core/io/files/files-tests.factor +++ b/core/io/files/files-tests.factor @@ -29,14 +29,14 @@ SPECIALIZED-ARRAY: int ] unit-test { - "This" ch'\s + "This" char: \s } [ "vocab:io/test/read-until-test.txt" ascii [ " " read-until ] with-file-reader ] unit-test { - "This" ch'\s + "This" char: \s } [ "vocab:io/test/read-until-test.txt" binary [ " " read-until [ ascii decode ] dip ] with-file-reader @@ -50,8 +50,8 @@ SPECIALIZED-ARRAY: int { { - { "It seems " ch'J } - { "obs has lost h" ch'i } + { "It seems " char: J } + { "obs has lost h" char: i } { "s grasp on reality again.\n" f } } } [ diff --git a/core/io/io.factor b/core/io/io.factor index 0aae0a86a5..6b52a38c16 100644 --- a/core/io/io.factor +++ b/core/io/io.factor @@ -101,7 +101,7 @@ SYMBOL: error-stream : print ( str -- ) output-stream get stream-print ; inline -: stream-bl ( stream -- ) ch'\s swap stream-write1 ; inline +: stream-bl ( stream -- ) char: \s swap stream-write1 ; inline : bl ( -- ) output-stream get stream-bl ; @@ -253,7 +253,7 @@ M: input-stream stream-length drop f ; inline M: output-stream stream-write [ stream-write1 ] curry each ; inline M: output-stream stream-flush drop ; inline -M: output-stream stream-nl ch'\n swap stream-write1 ; inline +M: output-stream stream-nl char: \n swap stream-write1 ; inline M: output-stream stream-seekable? drop f ; inline M: output-stream stream-length drop f ; inline diff --git a/core/io/pathnames/pathnames-docs.factor b/core/io/pathnames/pathnames-docs.factor index bb3fef84e1..382e3ccd58 100644 --- a/core/io/pathnames/pathnames-docs.factor +++ b/core/io/pathnames/pathnames-docs.factor @@ -7,7 +7,7 @@ HELP: path-separator? { $description "Tests if the code point is a platform-specific path separator." } { $examples "On Unix:" - { $example "USING: io.pathnames prettyprint ;" "ch'/ path-separator? ." "t" } + { $example "USING: io.pathnames prettyprint ;" "char: / path-separator? ." "t" } } ; HELP: parent-directory diff --git a/core/io/pathnames/pathnames.factor b/core/io/pathnames/pathnames.factor index 65d3529782..2d2290fb83 100644 --- a/core/io/pathnames/pathnames.factor +++ b/core/io/pathnames/pathnames.factor @@ -65,7 +65,7 @@ ERROR: no-parent-directory path ; { { [ dup "\\\\?\\" head? ] [ t ] } { [ dup length 2 < ] [ f ] } - { [ dup second ch'\: = ] [ t ] } + { [ dup second char: \: = ] [ t ] } [ f ] } cond ; diff --git a/core/io/streams/byte-array/byte-array-tests.factor b/core/io/streams/byte-array/byte-array-tests.factor index faefc8f7cf..7632c72628 100644 --- a/core/io/streams/byte-array/byte-array-tests.factor +++ b/core/io/streams/byte-array/byte-array-tests.factor @@ -18,8 +18,8 @@ SPECIALIZED-ARRAY: int { B{ 1 2 3 4 5 6 } } [ binary [ B{ 1 2 3 } write B{ 4 5 6 } write ] with-byte-writer ] unit-test { B{ 1 2 3 } } [ { 1 2 3 } binary [ 3 read ] with-byte-reader ] unit-test -{ B{ 0b11110101 0b10111111 0b10000000 0b10111111 0b11101111 0b10000000 0b10111111 0b11011111 0b10000000 ch'x } } -[ { 0b101111111000000111111 0b1111000000111111 0b11111000000 ch'x } >string utf8 [ write ] with-byte-writer ] unit-test +{ B{ 0b11110101 0b10111111 0b10000000 0b10111111 0b11101111 0b10000000 0b10111111 0b11011111 0b10000000 char: x } } +[ { 0b101111111000000111111 0b1111000000111111 0b11111000000 char: x } >string utf8 [ write ] with-byte-writer ] unit-test { { 0b1111111000000111111 } t } [ { 0b11110001 0b10111111 0b10000000 0b10111111 } utf8 stream-contents dup >array swap string? ] unit-test { B{ 121 120 } 0 } [ diff --git a/core/io/streams/plain/plain.factor b/core/io/streams/plain/plain.factor index cc882441a6..d2905b98e9 100644 --- a/core/io/streams/plain/plain.factor +++ b/core/io/streams/plain/plain.factor @@ -6,4 +6,4 @@ IN: io.streams.plain MIXIN: plain-writer M: plain-writer stream-nl - ch'\n swap stream-write1 ; + char: \n swap stream-write1 ; diff --git a/core/io/streams/string/string-tests.factor b/core/io/streams/string/string-tests.factor index 43cc752ea1..a028c1acf3 100644 --- a/core/io/streams/string/string-tests.factor +++ b/core/io/streams/string/string-tests.factor @@ -3,7 +3,7 @@ tools.test ; { "" } [ "" [ contents ] with-string-reader ] unit-test -{ "line 1" ch'l } +{ "line 1" char: l } [ "line 1\nline 2\nline 3" [ readln read1 ] with-string-reader ] @@ -28,8 +28,8 @@ unit-test { "abc" f } [ "abc" [ 3 read read1 ] with-string-reader ] unit-test { - { "It seems " ch'J } - { "obs has lost h" ch'i } + { "It seems " char: J } + { "obs has lost h" char: i } { "s grasp on reality again.\n" f } } [ "It seems Jobs has lost his grasp on reality again.\n" [ @@ -39,7 +39,7 @@ unit-test ] with-string-reader ] unit-test -{ "" ch'\r } [ "\r\n" [ "\r" read-until ] with-string-reader ] unit-test +{ "" char: \r } [ "\r\n" [ "\r" read-until ] with-string-reader ] unit-test { f f } [ "" [ "\r" read-until ] with-string-reader ] unit-test { "hello" "hi" } [ diff --git a/core/io/streams/string/string.factor b/core/io/streams/string/string.factor index 3f437d3edd..fb9d4cee2f 100644 --- a/core/io/streams/string/string.factor +++ b/core/io/streams/string/string.factor @@ -15,8 +15,8 @@ M: string-reader stream-read1 sequence-read1 ; M: string-reader stream-read-until sequence-read-until ; M: string-reader stream-readln dup >sequence-stream< bounds-check? [ - "\r\n" over sequence-read-until ch'\r eq? [ - over >sequence-stream< dupd ?nth ch'\n eq? + "\r\n" over sequence-read-until char: \r eq? [ + over >sequence-stream< dupd ?nth char: \n eq? [ 1 + pick i<< ] [ drop ] if ] when nip "" or ] [ drop f ] if ; diff --git a/core/lexer/lexer.factor b/core/lexer/lexer.factor index 58bf8ff342..3aeea59a06 100644 --- a/core/lexer/lexer.factor +++ b/core/lexer/lexer.factor @@ -51,11 +51,11 @@ ERROR: not-a-lexer object ; ERROR: unexpected want got ; : forbid-tab ( c -- c ) - [ ch'\t eq? [ "[space]" "[tab]" unexpected ] when ] keep ; inline + [ char: \t eq? [ "[space]" "[tab]" unexpected ] when ] keep ; inline : skip ( i seq ? -- n ) over length [ - [ swap forbid-tab ch'\s eq? xor ] curry find-from drop + [ swap forbid-tab char: \s eq? xor ] curry find-from drop ] dip or ; inline : change-lexer-column ( ..a lexer quot: ( ..a col line -- ..b newcol ) -- ..b ) @@ -86,7 +86,7 @@ GENERIC: skip-word ( lexer -- ) : find-container-delimiter ( i str delim-str -- n/f ) [ 2dup ] dip '[ _ member? ] find-from [ - [ swap subseq [ ch'= = ] all? ] keep and + [ swap subseq [ char: = = ] all? ] keep and ] [ 3drop f ] if ; @@ -95,16 +95,16 @@ M: lexer skip-word [ 2dup [ " \"[{(" member? ] find-from { - { ch'\" [ 2nip 1 + ] } - { ch'\[ [ + { char: \" [ 2nip 1 + ] } + { char: \[ [ 1 + over "[" find-container-delimiter dup [ 2nip 1 + ] [ drop f skip ] if ] } - { ch'\{ [ + { char: \{ [ 1 + over "{" find-container-delimiter dup [ 2nip 1 + ] [ drop f skip ] if ] } - { ch'\( [ + { char: \( [ 1 + over "(" find-container-delimiter dup [ 2nip 1 + ] [ drop f skip ] if ] } @@ -199,14 +199,14 @@ M: lexer-error error-line [ error>> error-line ] [ line>> ] bi or ; [ line>> number>string ": " append ] [ line-text>> ] [ column>> ] tri - pick length + ch'\s + pick length + char: \s [ write ] [ print ] [ write "^" print ] tri* ; : (parsing-word-lexer-dump) ( error parsing-word -- ) [ line>> number>string over line>> number>string length - ch'\s pad-head + char: \s pad-head ": " append write ] [ line-text>> print ] bi simple-lexer-dump ; diff --git a/core/locals/locals-tests.factor b/core/locals/locals-tests.factor index 942012d257..4341d36c88 100644 --- a/core/locals/locals-tests.factor +++ b/core/locals/locals-tests.factor @@ -108,14 +108,14 @@ GENERIC#: lambda-generic-1 1 ( a b -- c ) M:: integer lambda-generic-1 ( a b -- c ) a b * ; M:: string lambda-generic-1 ( a b -- c ) - a b ch'x lambda-generic ; + a b char: x lambda-generic ; M:: integer lambda-generic ( a b -- c ) a b lambda-generic-1 ; GENERIC#: lambda-generic-2 1 ( a b -- c ) M:: integer lambda-generic-2 ( a b -- c ) - a ch'x b lambda-generic ; + a char: x b lambda-generic ; M:: string lambda-generic-2 ( a b -- c ) a b append ; diff --git a/core/make/make-docs.factor b/core/make/make-docs.factor index b3815cc4b0..67658a3f6b 100644 --- a/core/make/make-docs.factor +++ b/core/make/make-docs.factor @@ -56,7 +56,7 @@ $nl { $subsections building } { $example "USING: make math.parser ;" - "[ \"Language #\" % ch'\\s , 5 # ] \"\" make print" + "[ \"Language #\" % char: \\s , 5 # ] \"\" make print" "Language # 5" } { $subsections "make-philosophy" } ; diff --git a/core/math/parser/parser-tests.factor b/core/math/parser/parser-tests.factor index 60d0dee4fe..659f4ba6b5 100644 --- a/core/math/parser/parser-tests.factor +++ b/core/math/parser/parser-tests.factor @@ -434,12 +434,12 @@ unit-test { t } [ most-negative-fixnum number>string string>number fixnum? ] unit-test ! large/small numbers/exponents correctly cancel out -{ 1.0 } [ "1" 3000 [ ch'0 ] "" replicate-as append "e-3000" append string>number ] unit-test -{ 1.0 } [ "0x1" 1000 [ ch'0 ] "" replicate-as append "p-4000" append string>number ] unit-test -{ 1.0 } [ "0." 3000 [ ch'0 ] "" replicate-as append "1e3001" append string>number ] unit-test -{ 1.0 } [ "0x0." 1000 [ ch'0 ] "" replicate-as append "1p4004" append string>number ] unit-test -{ 1.0 } [ "1" 3000 [ ch'0 ] "" replicate-as append "." append - 3000 [ ch'0 ] "" replicate-as append "e-3000" append string>number ] unit-test +{ 1.0 } [ "1" 3000 [ char: 0 ] "" replicate-as append "e-3000" append string>number ] unit-test +{ 1.0 } [ "0x1" 1000 [ char: 0 ] "" replicate-as append "p-4000" append string>number ] unit-test +{ 1.0 } [ "0." 3000 [ char: 0 ] "" replicate-as append "1e3001" append string>number ] unit-test +{ 1.0 } [ "0x0." 1000 [ char: 0 ] "" replicate-as append "1p4004" append string>number ] unit-test +{ 1.0 } [ "1" 3000 [ char: 0 ] "" replicate-as append "." append + 3000 [ char: 0 ] "" replicate-as append "e-3000" append string>number ] unit-test ! We correctly parse the biggest/smallest float correctly ! (ie the 1/0. or 0/0. short-circuit optimization doesn't apply) @@ -459,9 +459,9 @@ unit-test { 1.79769313486231571e+308 } [ "1.797693134862315807e+308" string>number ] unit-test ! works with ratios -{ 0.25 } [ "1/4" 3000 [ ch'0 ] "" replicate-as append "e-3000" append string>number ] unit-test +{ 0.25 } [ "1/4" 3000 [ char: 0 ] "" replicate-as append "e-3000" append string>number ] unit-test ! XXX: disable for right now, see #1362 or #1408 -! { 1.25 } [ "1+1/4" 3000 [ ch'0 ] "" replicate-as append "e-3000" append string>number ] unit-test +! { 1.25 } [ "1+1/4" 3000 [ char: 0 ] "" replicate-as append "e-3000" append string>number ] unit-test ! #1356 #1231 { 1/0. } [ "1e100000" string>number ] unit-test @@ -499,5 +499,5 @@ unit-test "1.55E+05" "1.550e+05" } [ - trouble ch'e 0 rot set-nth trouble drop + trouble char: e 0 rot set-nth trouble drop ] unit-test diff --git a/core/math/parser/parser.factor b/core/math/parser/parser.factor index 0399625662..ced218914d 100644 --- a/core/math/parser/parser.factor +++ b/core/math/parser/parser.factor @@ -10,16 +10,16 @@ PRIVATE> : digit> ( ch -- n ) { - { [ dup ch'9 <= ] [ ch'0 - dup 0 < [ drop 255 ] when ] } - { [ dup ch'a < ] [ ch'A 10 - - dup 10 < [ drop 255 ] when ] } - [ ch'a 10 - - dup 10 < [ drop 255 ] when ] + { [ dup char: 9 <= ] [ char: 0 - dup 0 < [ drop 255 ] when ] } + { [ dup char: a < ] [ char: A 10 - - dup 10 < [ drop 255 ] when ] } + [ char: a 10 - - dup 10 < [ drop 255 ] when ] } cond ; inline : string>digits ( str -- digits ) [ digit> ] B{ } map-as ; inline : >digit ( n -- ch ) - dup 10 < [ ch'0 + ] [ 10 - ch'a + ] if ; inline + dup 10 < [ char: 0 + ] [ 10 - char: a + ] if ; inline ERROR: invalid-radix radix ; @@ -201,7 +201,7 @@ DEFER: @neg-digit : @exponent-digit-or-punc ( float-parse i number-parse n char -- float-parse n/f ) { - { ch', [ [ @exponent-digit ] require-next-digit ] } + { char: , [ [ @exponent-digit ] require-next-digit ] } [ @exponent-digit ] } case ; inline @@ -211,8 +211,8 @@ DEFER: @neg-digit : @exponent-first-char ( float-parse i number-parse n char -- float-parse n/f ) { - { ch'- [ [ @exponent-digit ] require-next-digit ?neg ] } - { ch'+ [ [ @exponent-digit ] require-next-digit ] } + { char: - [ [ @exponent-digit ] require-next-digit ?neg ] } + { char: + [ [ @exponent-digit ] require-next-digit ] } [ @exponent-digit ] } case ; inline @@ -233,7 +233,7 @@ DEFER: @neg-digit : @mantissa-digit-or-punc ( float-parse i number-parse n char -- float-parse n/f ) { - { ch', [ [ @mantissa-digit ] require-next-digit ] } + { char: , [ [ @mantissa-digit ] require-next-digit ] } [ @mantissa-digit ] } case ; inline @@ -253,8 +253,8 @@ DEFER: @neg-digit : @denom-digit-or-punc ( i number-parse n char -- n/f ) { - { ch', [ [ @denom-digit ] require-next-digit ] } - { ch'. [ ->mantissa ] } + { char: , [ [ @denom-digit ] require-next-digit ] } + { char: . [ ->mantissa ] } [ [ @denom-digit ] or-exponent ] } case ; inline @@ -264,7 +264,7 @@ DEFER: @neg-digit : @denom-first-digit ( i number-parse n char -- n/f ) { - { ch'. [ ->mantissa ] } + { char: . [ ->mantissa ] } [ @denom-digit ] } case ; inline @@ -274,8 +274,8 @@ DEFER: @neg-digit : @num-digit-or-punc ( i number-parse n char -- n/f ) { - { ch', [ [ @num-digit ] require-next-digit ] } - { ch'/ [ ->denominator ] } + { char: , [ [ @num-digit ] require-next-digit ] } + { char: / [ ->denominator ] } [ @num-digit ] } case ; inline @@ -289,10 +289,10 @@ DEFER: @neg-digit : @pos-digit-or-punc ( i number-parse n char -- n/f ) { - { ch', [ [ @pos-digit ] require-next-digit ] } - { ch'+ [ ->numerator ] } - { ch'/ [ ->denominator ] } - { ch'. [ ->mantissa ] } + { char: , [ [ @pos-digit ] require-next-digit ] } + { char: + [ ->numerator ] } + { char: / [ ->denominator ] } + { char: . [ ->mantissa ] } [ [ @pos-digit ] or-exponent ] } case ; inline @@ -315,17 +315,17 @@ DEFER: @neg-digit : @pos-first-digit ( i number-parse n char -- n/f ) { - { ch'. [ ->required-mantissa ] } - { ch'0 [ [ @pos-digit ] [ @pos-digit-or-punc ] with-radix-char ] } + { char: . [ ->required-mantissa ] } + { char: 0 [ [ @pos-digit ] [ @pos-digit-or-punc ] with-radix-char ] } [ @pos-digit ] } case ; inline : @neg-digit-or-punc ( i number-parse n char -- n/f ) { - { ch', [ [ @neg-digit ] require-next-digit ] } - { ch'- [ ->numerator ] } - { ch'/ [ ->denominator ] } - { ch'. [ ->mantissa ] } + { char: , [ [ @neg-digit ] require-next-digit ] } + { char: - [ ->numerator ] } + { char: / [ ->denominator ] } + { char: . [ ->mantissa ] } [ [ @neg-digit ] or-exponent ] } case ; inline @@ -335,34 +335,34 @@ DEFER: @neg-digit : @neg-first-digit ( i number-parse n char -- n/f ) { - { ch'. [ ->required-mantissa ] } - { ch'0 [ [ @neg-digit ] [ @neg-digit-or-punc ] with-radix-char ] } + { char: . [ ->required-mantissa ] } + { char: 0 [ [ @neg-digit ] [ @neg-digit-or-punc ] with-radix-char ] } [ @neg-digit ] } case ; inline : @first-char ( i number-parse n char -- n/f ) { - { ch'- [ [ @neg-first-digit ] require-next-digit ?neg ] } - { ch'+ [ [ @pos-first-digit ] require-next-digit ] } + { char: - [ [ @neg-first-digit ] require-next-digit ?neg ] } + { char: + [ [ @pos-first-digit ] require-next-digit ] } [ @pos-first-digit ] } case ; inline : @neg-first-digit-no-radix ( i number-parse n char -- n/f ) { - { ch'. [ ->required-mantissa ] } + { char: . [ ->required-mantissa ] } [ @neg-digit ] } case ; inline : @pos-first-digit-no-radix ( i number-parse n char -- n/f ) { - { ch'. [ ->required-mantissa ] } + { char: . [ ->required-mantissa ] } [ @pos-digit ] } case ; inline : @first-char-no-radix ( i number-parse n char -- n/f ) { - { ch'- [ [ @neg-first-digit-no-radix ] require-next-digit ?neg ] } - { ch'+ [ [ @pos-first-digit-no-radix ] require-next-digit ] } + { char: - [ [ @neg-first-digit-no-radix ] require-next-digit ?neg ] } + { char: + [ [ @pos-first-digit-no-radix ] require-next-digit ] } [ @pos-first-digit-no-radix ] } case ; inline @@ -403,7 +403,7 @@ CONSTANT: ONES B{ ] dip [ push ] keep [ push ] keep ; inline : (one-digit) ( num accum -- num' accum ) - [ 10 /mod ch'0 + ] dip [ push ] keep ; inline + [ 10 /mod char: 0 + ] dip [ push ] keep ; inline : (bignum>dec) ( num accum -- num' accum ) [ over most-positive-fixnum > ] @@ -476,21 +476,21 @@ M: integer >base { { [ over 0 = ] [ 2drop "0" ] } { [ over 0 > ] [ positive>base ] } - [ [ neg ] dip positive>base ch'- prefix ] + [ [ neg ] dip positive>base char: - prefix ] } cond ; M: ratio >base [ fraction>parts [ /mod ] keep ] [ [ >base ] curry tri@ ] bi* "/" glue over first-unsafe { - { ch'0 [ nip ] } - { ch'- [ append ] } + { char: 0 [ nip ] } + { char: - [ append ] } [ drop "+" glue ] } case ; base -0.0 double>bits bitand zero? "" "-" ? ; : bin-float-value ( str size -- str' ) - ch'0 pad-head [ ch'0 = ] trim-tail + char: 0 pad-head [ char: 0 = ] trim-tail [ "0" ] when-empty "1." prepend ; : float>hex-value ( mantissa -- str ) @@ -547,7 +547,7 @@ M: ratio >base [ format-string ] 4dip [ format-string ] bi@ (format-float) >string ] [ - "C" = [ [ "G" = ] [ "E" = ] bi or ch'E ch'e ? fix-float ] + "C" = [ [ "G" = ] [ "E" = ] bi or char: E char: e ? fix-float ] [ drop ] if ] 2bi ; inline @@ -583,7 +583,7 @@ M: float >base ] keep ; : bytes>hex-string ( bytes -- hex-string ) - dup length 2 * ch'0 [ + dup length 2 * char: 0 [ [ [ 16 /mod [ >digit ] bi@ ] [ 2 * dup 1 + ] diff --git a/core/multiline/multiline.factor b/core/multiline/multiline.factor index c3be991598..8687b93d16 100644 --- a/core/multiline/multiline.factor +++ b/core/multiline/multiline.factor @@ -11,7 +11,7 @@ IN: multiline end text i subseq-start-from |[ j | i j text subseq % j end length + ] [ - text i shorted tail % ch'\n , + text i shorted tail % char: \n , lexer next-line 0 end lexer (scan-multiline-string) ] if* diff --git a/core/parser/parser-tests.factor b/core/parser/parser-tests.factor index 52d47a45fc..284c91dd16 100644 --- a/core/parser/parser-tests.factor +++ b/core/parser/parser-tests.factor @@ -421,8 +421,8 @@ DEFER: foo [ ": foo ;" eval( -- ) ] [ error>> error>> no-current-vocab-error? ] must-fail-with -{ 92 } [ "ch'\\" eval( -- n ) ] unit-test -{ 92 } [ "ch'\\\\" eval( -- n ) ] unit-test +{ 92 } [ "char: \\" eval( -- n ) ] unit-test +{ 92 } [ "char: \\\\" eval( -- n ) ] unit-test { } [ { @@ -488,7 +488,7 @@ DEFER: blahy [ "IN: parser.tests USE: kernel TUPLE: blahy < tuple ; : blahy ( -- ) ; TUPLE: blahy < tuple ; : blahy ( -- ) ;" eval( -- ) ] [ error>> error>> def>> \ blahy eq? ] must-fail-with -[ "ch'\\u9999999999999" eval( -- n ) ] must-fail +[ "char: \\u9999999999999" eval( -- n ) ] must-fail SYMBOLS: a b c ; diff --git a/core/parser/parser.factor b/core/parser/parser.factor index fe697f599d..9cf0d4d0c0 100644 --- a/core/parser/parser.factor +++ b/core/parser/parser.factor @@ -64,7 +64,7 @@ ERROR: number-expected ; DEFER: scan-object : parse-lower-colon2 ( obj -- obj' ) - [ ch'\: = ] cut-tail length [ scan-object ] replicate 2array + [ char: \: = ] cut-tail length [ scan-object ] replicate 2array handle-lower-colon ; : parse-single-quote ( obj -- obj' ) diff --git a/core/sbufs/sbufs-tests.factor b/core/sbufs/sbufs-tests.factor index ecd2ba4b6c..4088c0fa89 100644 --- a/core/sbufs/sbufs-tests.factor +++ b/core/sbufs/sbufs-tests.factor @@ -11,12 +11,12 @@ sequences.private strings tools.test ; "buf" get >string ] unit-test -{ ch'h } [ 0 sbuf"hello world" nth ] unit-test -{ ch'H } [ - ch'H 0 sbuf"hello world" [ set-nth ] keep first +{ char: h } [ 0 sbuf"hello world" nth ] unit-test +{ char: H } [ + char: H 0 sbuf"hello world" [ set-nth ] keep first ] unit-test -{ sbuf"x" } [ 1 ch'x >bignum suffix! ] unit-test +{ sbuf"x" } [ 1 char: x >bignum suffix! ] unit-test { fixnum } [ 1 >bignum sbuf"" new-sequence length class-of ] unit-test @@ -24,4 +24,4 @@ sequences.private strings tools.test ; [ 1.5 sbuf"" new-sequence ] must-fail -[ ch'A 0.5 0.5 sbuf"a" set-nth-unsafe ] must-fail +[ char: A 0.5 0.5 sbuf"a" set-nth-unsafe ] must-fail diff --git a/core/sequences/generalizations/generalizations-docs.factor b/core/sequences/generalizations/generalizations-docs.factor index 6896cc08ec..2cba1b8aca 100644 --- a/core/sequences/generalizations/generalizations-docs.factor +++ b/core/sequences/generalizations/generalizations-docs.factor @@ -11,7 +11,7 @@ HELP: nsequence "that constructs a sequence from the top " { $snippet "n" } " elements of the stack." } { $examples - { $example "USING: prettyprint sequences.generalizations ;" "ch'f ch'i ch's ch'h 4 \"\" nsequence ." "\"fish\"" } + { $example "USING: prettyprint sequences.generalizations ;" "char: f char: i char: s char: h 4 \"\" nsequence ." "\"fish\"" } } ; HELP: narray diff --git a/core/sequences/sequences-docs.factor b/core/sequences/sequences-docs.factor index 0d7a70dbd4..2c35a6bf99 100644 --- a/core/sequences/sequences-docs.factor +++ b/core/sequences/sequences-docs.factor @@ -613,7 +613,7 @@ HELP: member? "Is a letter in a string:" { $example "USING: sequences prettyprint ;" - "ch'a \"abc\" member? ." + "char: a \"abc\" member? ." "t" } $nl "Is a number in a sequence:" @@ -799,12 +799,12 @@ HELP: padding HELP: pad-head { $values { "seq" sequence } { "n" "a non-negative integer" } { "elt" object } { "padded" "a new sequence" } } { $description "Outputs a new sequence consisting of " { $snippet "seq" } " padded on the left with enough repetitions of " { $snippet "elt" } " to have the result be of length " { $snippet "n" } "." } -{ $examples { $example "USING: io sequences ;" "{ \"ab\" \"quux\" } [ 5 ch'- pad-head print ] each" "---ab\n-quux" } } ; +{ $examples { $example "USING: io sequences ;" "{ \"ab\" \"quux\" } [ 5 char: - pad-head print ] each" "---ab\n-quux" } } ; HELP: pad-tail { $values { "seq" sequence } { "n" "a non-negative integer" } { "elt" object } { "padded" "a new sequence" } } { $description "Outputs a new sequence consisting of " { $snippet "seq" } " padded on the right with enough repetitions of " { $snippet "elt" } " to have the result be of length " { $snippet "n" } "." } -{ $examples { $example "USING: io sequences ;" "{ \"ab\" \"quux\" } [ 5 ch'- pad-tail print ] each" "ab---\nquux-" } } ; +{ $examples { $example "USING: io sequences ;" "{ \"ab\" \"quux\" } [ 5 char: - pad-tail print ] each" "ab---\nquux-" } } ; HELP: sequence= { $values { "seq1" sequence } { "seq2" sequence } { "?" boolean } } diff --git a/core/sequences/sequences-tests.factor b/core/sequences/sequences-tests.factor index 31615d3d75..b0a5bd84ac 100644 --- a/core/sequences/sequences-tests.factor +++ b/core/sequences/sequences-tests.factor @@ -51,20 +51,20 @@ IN: sequences.tests { 2 3 } [ [ 1 2 3 ] [ 2 > ] find ] unit-test { f f } [ [ 1 2 3 ] [ 10 > ] find ] unit-test -{ 1 ch'e } +{ 1 char: e } [ "hello world" "aeiou" [ member? ] curry find ] unit-test -{ 4 ch'o } +{ 4 char: o } [ 3 "hello world" "aeiou" [ member? ] curry find-from ] unit-test { f f } [ "abcd" [ 10 > nip ] find-index ] unit-test -{ f f } [ "abcd" [ drop ch'e = ] find-index ] unit-test -{ 3 ch'd } [ "abcdefg" [ 3 = nip ] find-index ] unit-test -{ 3 ch'd } [ "abcdefg" [ drop ch'd = ] find-index ] unit-test +{ f f } [ "abcd" [ drop char: e = ] find-index ] unit-test +{ 3 char: d } [ "abcdefg" [ 3 = nip ] find-index ] unit-test +{ 3 char: d } [ "abcdefg" [ drop char: d = ] find-index ] unit-test -{ 0 ch'a } [ 0 "abcdef" [ drop ch'a >= ] find-index-from ] unit-test -{ 1 ch'b } [ 0 "abcdef" [ drop ch'a > ] find-index-from ] unit-test -{ 2 ch'c } [ 1 "abcdef" [ drop ch'b > ] find-index-from ] unit-test +{ 0 char: a } [ 0 "abcdef" [ drop char: a >= ] find-index-from ] unit-test +{ 1 char: b } [ 0 "abcdef" [ drop char: a > ] find-index-from ] unit-test +{ 2 char: c } [ 1 "abcdef" [ drop char: b > ] find-index-from ] unit-test { f } [ 3 [ ] member? ] unit-test { f } [ 3 [ 1 2 ] member? ] unit-test @@ -74,11 +74,11 @@ IN: sequences.tests { t } [ [ "hello" "world" ] [ second ] keep member-eq? ] unit-test -{ 4 } [ ch'x "tuvwxyz" >vector index ] unit-test +{ 4 } [ char: x "tuvwxyz" >vector index ] unit-test -{ f } [ ch'x 5 "tuvwxyz" >vector index-from ] unit-test +{ f } [ char: x 5 "tuvwxyz" >vector index-from ] unit-test -{ f } [ ch'a 0 "tuvwxyz" >vector index-from ] unit-test +{ f } [ char: a 0 "tuvwxyz" >vector index-from ] unit-test { f } [ [ "Hello" { } 0.75 ] [ string? ] all? ] unit-test { t } [ [ ] [ ] all? ] unit-test @@ -163,7 +163,7 @@ IN: sequences.tests { { 1 3 2 4 } } [ { 1 2 3 4 } clone 1 2 pick exchange ] unit-test { { "" "a" "aa" "aaa" } } -[ 4 [ ch'a ] { } map-integers ] +[ 4 [ char: a ] { } map-integers ] unit-test { V{ 1 3 5 7 9 } } [ 10 >vector [ even? ] reject! ] unit-test @@ -279,14 +279,14 @@ unit-test { 9 } [ 10 ?last ] unit-test [ -1/0. 0 remove-nth! ] must-fail -{ "" } [ "" [ ch'\s = ] trim ] unit-test -{ "" } [ "" [ ch'\s = ] trim-head ] unit-test -{ "" } [ "" [ ch'\s = ] trim-tail ] unit-test -{ "" } [ " " [ ch'\s = ] trim-head ] unit-test -{ "" } [ " " [ ch'\s = ] trim-tail ] unit-test -{ "asdf" } [ " asdf " [ ch'\s = ] trim ] unit-test -{ "asdf " } [ " asdf " [ ch'\s = ] trim-head ] unit-test -{ " asdf" } [ " asdf " [ ch'\s = ] trim-tail ] unit-test +{ "" } [ "" [ char: \s = ] trim ] unit-test +{ "" } [ "" [ char: \s = ] trim-head ] unit-test +{ "" } [ "" [ char: \s = ] trim-tail ] unit-test +{ "" } [ " " [ char: \s = ] trim-head ] unit-test +{ "" } [ " " [ char: \s = ] trim-tail ] unit-test +{ "asdf" } [ " asdf " [ char: \s = ] trim ] unit-test +{ "asdf " } [ " asdf " [ char: \s = ] trim-head ] unit-test +{ " asdf" } [ " asdf " [ char: \s = ] trim-tail ] unit-test { 328350 } [ 100 [ sq ] map-sum ] unit-test diff --git a/core/splitting/splitting.factor b/core/splitting/splitting.factor index 62e645e68a..6730d40296 100644 --- a/core/splitting/splitting.factor +++ b/core/splitting/splitting.factor @@ -116,8 +116,8 @@ M: string string-lines 2dup [ "\r\n" member? ] find-from swapd [ over [ [ nip length ] keep ] unless [ "" subseq-as suffix! ] 2keep [ 1 + ] dip - ] dip ch'\r eq? [ - 2dup ?nth ch'\n eq? [ [ 1 + ] dip ] when + ] dip char: \r eq? [ + 2dup ?nth char: \n eq? [ [ 1 + ] dip ] when ] when ] while 2drop { } like ; diff --git a/core/strings/parser/parser-docs.factor b/core/strings/parser/parser-docs.factor index bb0e1deaad..f05dd2ae9d 100644 --- a/core/strings/parser/parser-docs.factor +++ b/core/strings/parser/parser-docs.factor @@ -7,7 +7,7 @@ HELP: bad-escape HELP: escape { $values { "escape" "a single-character escape" } { "ch" "a character" } } { $description "Converts from a single-character escape code and the corresponding character." } -{ $examples { $example "USING: kernel prettyprint strings.parser ;" "ch'n escape ch'\\n = ." "t" } } ; +{ $examples { $example "USING: kernel prettyprint strings.parser ;" "char: n escape char: \\n = ." "t" } } ; HELP: parse-string { $values { "str" "a new " { $link string } } } diff --git a/core/strings/parser/parser.factor b/core/strings/parser/parser.factor index 4102e80856..bce99680c3 100644 --- a/core/strings/parser/parser.factor +++ b/core/strings/parser/parser.factor @@ -9,29 +9,29 @@ ERROR: bad-escape char ; : escape ( escape -- ch ) H{ - { ch'a ch'\a } - { ch'b ch'\b } - { ch'e ch'\e } - { ch'f ch'\f } - { ch'n ch'\n } - { ch'r ch'\r } - { ch't ch'\t } - { ch's ch'\s } - { ch'v ch'\v } - { ch'\s ch'\s } - { ch'0 ch'\0 } - { ch'\! ch'\! } - { ch'\\ ch'\\ } - { ch'\" ch'\" } - { ch'\: ch'\: } - { ch'\[ ch'\[ } - { ch'\{ ch'\{ } - { ch'\( ch'\( } - { ch'\; ch'\; } - { ch'\] ch'\] } - { ch'\} ch'\} } - { ch'\) ch'\) } - { ch'\' ch'\' } + { char: a char: \a } + { char: b char: \b } + { char: e char: \e } + { char: f char: \f } + { char: n char: \n } + { char: r char: \r } + { char: t char: \t } + { char: s char: \s } + { char: v char: \v } + { char: \s char: \s } + { char: 0 char: \0 } + { char: \! char: \! } + { char: \\ char: \\ } + { char: \" char: \" } + { char: \: char: \: } + { char: \[ char: \[ } + { char: \{ char: \{ } + { char: \( char: \( } + { char: \; char: \; } + { char: \] char: \] } + { char: \} char: \} } + { char: \) char: \) } + { char: \' char: \' } } ?at [ bad-escape ] unless ; INITIALIZED-SYMBOL: name>char-hook [ @@ -43,7 +43,7 @@ INITIALIZED-SYMBOL: name>char-hook [ : unicode-escape ( str -- ch str' ) "{" ?head-slice [ - ch'\} over index cut-slice [ + char: \} over index cut-slice [ dup hex> [ nip ] [ @@ -56,8 +56,8 @@ INITIALIZED-SYMBOL: name>char-hook [ : next-escape ( str -- ch str' ) unclip-slice { - { ch'u [ unicode-escape ] } - { ch'x [ hex-escape ] } + { char: u [ unicode-escape ] } + { char: x [ hex-escape ] } [ escape swap ] } case ; @@ -68,7 +68,7 @@ INITIALIZED-SYMBOL: name>char-hook [ [ cut-slice [ append! ] dip rest-slice next-escape [ suffix! ] dip - ch'\\ over index (unescape-string) + char: \\ over index (unescape-string) ] [ append! ] if* ; @@ -76,7 +76,7 @@ INITIALIZED-SYMBOL: name>char-hook [ PRIVATE> : unescape-string ( str -- str' ) - ch'\\ over index [ + char: \\ over index [ [ [ length ] keep ] dip (unescape-string) ] when* "" like ; @@ -121,7 +121,7 @@ DEFER: (parse-string) : parse-found-token ( accum lexer i elt -- ) { sbuf lexer fixnum fixnum } declare [ over lexer-subseq pick push-all ] dip - ch'\\ = [ + char: \\ = [ dup dup [ next-char ] bi@ [ [ pick push ] bi@ ] [ drop 2dup next-line% ] if* @@ -137,7 +137,7 @@ DEFER: (parse-string) parse-found-token ] [ drop 2dup next-line% - ch'\n pick push + char: \n pick push (parse-string) ] if* ] [ diff --git a/core/strings/strings-tests.factor b/core/strings/strings-tests.factor index c7af3203b3..12c8deedfa 100644 --- a/core/strings/strings-tests.factor +++ b/core/strings/strings-tests.factor @@ -3,7 +3,7 @@ io.streams.null kernel kernel.private make math math.order memory namespaces prettyprint sbufs sequences strings strings.private tools.test vectors ; -{ ch'b } [ 1 >bignum "abc" nth ] unit-test +{ char: b } [ 1 >bignum "abc" nth ] unit-test { } [ 10 [ [ -1000000 ] ignore-errors ] times ] unit-test @@ -23,7 +23,7 @@ strings.private tools.test vectors ; { "Beginning" } [ "Beginning and end" 9 head ] unit-test -{ f } [ ch'I "team" member? ] unit-test +{ f } [ char: I "team" member? ] unit-test { t } [ "ea" "team" subseq? ] unit-test { f } [ "actore" "Factor" subseq? ] unit-test @@ -39,12 +39,12 @@ strings.private tools.test vectors ; { "Replacing+spaces+with+plus" } [ "Replacing spaces with plus" - [ dup ch'\s = [ drop ch'+ ] when ] map + [ dup char: \s = [ drop char: + ] when ] map ] unit-test -{ "05" } [ "5" 2 ch'0 pad-head ] unit-test -{ "666" } [ "666" 2 ch'0 pad-head ] unit-test +{ "05" } [ "5" 2 char: 0 pad-head ] unit-test +{ "666" } [ "666" 2 char: 0 pad-head ] unit-test [ 1 "" nth ] must-fail [ -6 "hello" nth ] must-fail @@ -75,17 +75,17 @@ unit-test { { - ch'h + char: h 0x1234 - ch'l + char: l 0x4321 - ch'o + char: o 0x654321 - ch'w - ch'o - ch'r - ch'l - ch'd + char: w + char: o + char: r + char: l + char: d } } [ "s" get >array @@ -100,7 +100,7 @@ unit-test ! Make sure aux vector is not shared { "\udeadbe" } [ "\udeadbe" clone - ch'\u123456 over clone set-first + char: \u123456 over clone set-first ] unit-test ! Regressions @@ -123,7 +123,7 @@ unit-test { t } [ 10000 [ drop - 300 100 ch'\u123456 + 300 100 char: \u123456 [ clone resize-string first ] keep = ] all-integers? ] unit-test diff --git a/core/syntax/modern/modern.factor b/core/syntax/modern/modern.factor index 745c58c5c1..ed730de1ff 100644 --- a/core/syntax/modern/modern.factor +++ b/core/syntax/modern/modern.factor @@ -6,11 +6,11 @@ IN: syntax.modern : matching-delimiter ( ch -- ch' ) H{ - { ch'\( ch'\) } - { ch'\[ ch'\] } - { ch'\{ ch'\} } - { ch'< ch'> } - { ch'\: ch'\; } + { char: \( char: \) } + { char: \[ char: \] } + { char: \{ char: \} } + { char: < char: > } + { char: \: char: \; } } ?at drop ; : matching-delimiter-string ( string -- string' ) @@ -94,12 +94,12 @@ ERROR: no-paren-container-word payload word ; [ no-paren-container-word ] if ; -: lower-char? ( str -- ? ) [ ch'a ch'z between? ] [ ch'- = ] bi or ; -: upper-char? ( str -- ? ) [ ch'A ch'Z between? ] [ ch'- = ] bi or ; +: lower-char? ( str -- ? ) [ char: a char: z between? ] [ char: - = ] bi or ; +: upper-char? ( str -- ? ) [ char: A char: Z between? ] [ char: - = ] bi or ; : strict-lower-colon? ( string -- ? ) - [ ch'\: = ] cut-tail + [ char: \: = ] cut-tail [ [ length 0 > ] [ [ lower-char? ] all? ] bi and ] [ length 0 > ] bi* and ; @@ -111,14 +111,14 @@ ERROR: no-paren-container-word payload word ; ! All chars must... [ [ - [ ch'A ch'Z between? ] [ "':-\\#" member? ] bi or + [ char: A char: Z between? ] [ "':-\\#" member? ] bi or ] all? ] ! At least one char must... - [ [ [ ch'A ch'Z between? ] [ ch'\' = ] bi or ] any? ] bi and ; + [ [ [ char: A char: Z between? ] [ char: \' = ] bi or ] any? ] bi and ; : strict-upper-colon? ( string -- ? ) - [ [ ch'\: = ] all? ] + [ [ char: \: = ] all? ] [ (strict-upper-colon?) ] bi or ; @@ -137,7 +137,7 @@ ERROR: no-paren-container-word payload word ; [ [ length 0 > ] [ - ! ch'\' + ! char: \' [ "\\'" tail? ] [ "'" tail? not ] bi or ] bi and ] bi* and ; @@ -146,12 +146,12 @@ ERROR: no-paren-container-word payload word ; dup (strict-single-quote?) [ "'[" sequence= not ] [ drop f ] if ; -: strict-double-quote? ( string -- ? ) ?last ch'\" = ; +: strict-double-quote? ( string -- ? ) ?last char: \" = ; : strict-container? ( string open-str -- ? ) [ split1 ] [ split1 ] bi [ ] - [ [ ch'= = ] all? ] + [ [ char: = = ] all? ] [ "" = ] tri* and and ; : strict-bracket-container? ( string -- ? ) "[" strict-container? ; diff --git a/core/syntax/syntax-docs.factor b/core/syntax/syntax-docs.factor index 367710494f..f21fdb364a 100644 --- a/core/syntax/syntax-docs.factor +++ b/core/syntax/syntax-docs.factor @@ -594,17 +594,17 @@ HELP: \IN: { $description "Sets the current vocabulary where new words will be defined, creating the vocabulary first if it does not exist. After the vocabulary has been created, it can be listed in " { $link \ \USE: } " and " { $link \ \USING: } " declarations." } ; HELP: \char: -{ $syntax "ch'token" } +{ $syntax "char: token" } { $values { "token" "a literal character, escape code, or Unicode code point name" } } { $description "Adds a Unicode code point to the parse tree." } { $examples { $code - "ch'x" - "ch'\\u000032" - "ch'\\u{32}" - "ch'\\u{exclamation-mark}" - "ch'exclamation-mark" - "ch'ugaritic-letter-samka" + "char: x" + "char: \\u000032" + "char: \\u{32}" + "char: \\u{exclamation-mark}" + "char: exclamation-mark" + "char: ugaritic-letter-samka" } } ; diff --git a/core/vocabs/loader/loader.factor b/core/vocabs/loader/loader.factor index 8c7efa44bb..07d1178e86 100644 --- a/core/vocabs/loader/loader.factor +++ b/core/vocabs/loader/loader.factor @@ -41,7 +41,7 @@ M: string vocab-path ( string -- path/f ) PRIVATE> : vocab-dir ( vocab -- dir ) - vocab-name H{ { ch'. ch'/ } } substitute ; + vocab-name H{ { char: . char: / } } substitute ; : append-vocab-dir ( vocab str/f -- path ) [ vocab-name "." split ] dip diff --git a/core/vocabs/vocabs.factor b/core/vocabs/vocabs.factor index 78957dfe13..22db8b53cf 100644 --- a/core/vocabs/vocabs.factor +++ b/core/vocabs/vocabs.factor @@ -111,7 +111,7 @@ ERROR: no-vocab name ; swap [ drop t ] [ 2dup = [ 2drop t ] [ 2dup head? [ - length swap ?nth ch'. = + length swap ?nth char: . = ] [ 2drop f ] if ] if ] if-empty ; diff --git a/extra/alien/fortran/fortran.factor b/extra/alien/fortran/fortran.factor index 9f4e4f0acf..fbaf17564b 100644 --- a/extra/alien/fortran/fortran.factor +++ b/extra/alien/fortran/fortran.factor @@ -34,7 +34,7 @@ SYMBOL: fortran-abi : lowercase-name-with-underscore ( name -- name' ) >lower "_" append ; : lowercase-name-with-extra-underscore ( name -- name' ) - >lower ch'_ over member? + >lower char: _ over member? [ "__" append ] [ "_" append ] if ; HOOK: fortran-c-abi fortran-abi ( -- abi ) @@ -346,7 +346,7 @@ M: character-type () [ (fortran-result>) ] } cond ; -: letters ( -- seq ) ch'a ch'z [a,b] ; +: letters ( -- seq ) char: a char: z [a,b] ; : (shuffle-map) ( return parameters -- ret par ) [ diff --git a/extra/base32-crockford/base32-crockford.factor b/extra/base32-crockford/base32-crockford.factor index 8ea421e0ea..7b2e359f88 100644 --- a/extra/base32-crockford/base32-crockford.factor +++ b/extra/base32-crockford/base32-crockford.factor @@ -16,10 +16,10 @@ CONSTANT: INVERSE $[ 256 [ ALPHABET index 0xff or ] B{ } map-integers ] CONSTANT: CHECKSUM $[ ALPHABET "*~$=U" append ] : normalize-base32 ( base32 -- base32' ) - ch'- swap remove >upper H{ - { ch'I ch'1 } - { ch'L ch'1 } - { ch'O ch'0 } + char: - swap remove >upper H{ + { char: I char: 1 } + { char: L char: 1 } + { char: O char: 0 } } substitute ; : parse-base32 ( base32 -- n ) diff --git a/extra/base32/base32.factor b/extra/base32/base32.factor index 8b66d37761..263b3f1981 100644 --- a/extra/base32/base32.factor +++ b/extra/base32/base32.factor @@ -21,7 +21,7 @@ CONSTANT: alphabet $[ "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" >byte-array ] alphabet nth ; inline : base32>ch ( ch -- ch ) - $[ alphabet alphabet-inverse 0 ch'= pick set-nth ] nth + $[ alphabet alphabet-inverse 0 char: = pick set-nth ] nth [ malformed-base32 ] unless* { fixnum } declare ; inline : encode5 ( seq -- byte-array ) @@ -31,7 +31,7 @@ CONSTANT: alphabet $[ "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567" >byte-array ] : encode-pad ( seq n -- byte-array ) [ 5 0 pad-tail encode5 ] [ B{ 0 2 4 5 7 } nth ] bi* head-slice - 8 ch'= pad-tail ; inline + 8 char: = pad-tail ; inline : (encode-base32) ( stream column -- ) 5 pick stream-read dup length { @@ -52,14 +52,14 @@ PRIVATE> : decode8 ( seq -- ) [ 0 [ base32>ch swap 5 shift bitor ] reduce 5 >be ] - [ [ ch'= = ] count ] bi + [ [ char: = = ] count ] bi [ write ] [ B{ 0 4 0 3 2 0 1 } nth head-slice write ] if-zero ; inline : (decode-base32) ( stream -- ) 8 "\n\r" pick read-ignoring dup length { { 0 [ 2drop ] } { 8 [ decode8 (decode-base32) ] } - [ drop 8 ch'= pad-tail decode8 (decode-base32) ] + [ drop 8 char: = pad-tail decode8 (decode-base32) ] } case ; PRIVATE> diff --git a/extra/base85/base85.factor b/extra/base85/base85.factor index cc973d09bc..914eef2bfc 100644 --- a/extra/base85/base85.factor +++ b/extra/base85/base85.factor @@ -57,7 +57,7 @@ PRIVATE> { 5 [ decode5 write (decode-base85) ] } [ drop - [ 5 ch'~ pad-tail decode5 ] + [ 5 char: ~ pad-tail decode5 ] [ length 5 swap - head-slice* write ] bi (decode-base85) ] diff --git a/extra/benchmark/fannkuch/fannkuch.factor b/extra/benchmark/fannkuch/fannkuch.factor index fae652a88b..383657bed2 100644 --- a/extra/benchmark/fannkuch/fannkuch.factor +++ b/extra/benchmark/fannkuch/fannkuch.factor @@ -16,7 +16,7 @@ IN: benchmark.fannkuch ] count ; inline : write-permutation ( perm -- ) - [ ch'0 + write1 ] each nl ; inline + [ char: 0 + write1 ] each nl ; inline : fannkuch-step ( counter max-flips perm -- counter max-flips ) pick 30 < [ [ 1 + ] [ ] [ dup write-permutation ] tri* ] when diff --git a/extra/benchmark/fasta/fasta.factor b/extra/benchmark/fasta/fasta.factor index 4668b7278e..d4e957d00c 100644 --- a/extra/benchmark/fasta/fasta.factor +++ b/extra/benchmark/fasta/fasta.factor @@ -19,30 +19,30 @@ CONSTANT: ALU "GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGATCACC CONSTANT: IUB { - { ch'a 0.27 } - { ch'c 0.12 } - { ch'g 0.12 } - { ch't 0.27 } + { char: a 0.27 } + { char: c 0.12 } + { char: g 0.12 } + { char: t 0.27 } - { ch'B 0.02 } - { ch'D 0.02 } - { ch'H 0.02 } - { ch'K 0.02 } - { ch'M 0.02 } - { ch'N 0.02 } - { ch'R 0.02 } - { ch'S 0.02 } - { ch'V 0.02 } - { ch'W 0.02 } - { ch'Y 0.02 } + { char: B 0.02 } + { char: D 0.02 } + { char: H 0.02 } + { char: K 0.02 } + { char: M 0.02 } + { char: N 0.02 } + { char: R 0.02 } + { char: S 0.02 } + { char: V 0.02 } + { char: W 0.02 } + { char: Y 0.02 } } CONSTANT: homo-sapiens { - { ch'a 0.3029549426680 } - { ch'c 0.1979883004921 } - { ch'g 0.1975473066391 } - { ch't 0.3015094502008 } + { char: a 0.3029549426680 } + { char: c 0.1979883004921 } + { char: g 0.1975473066391 } + { char: t 0.3015094502008 } } TYPED: make-cumulative ( freq -- chars: byte-array floats: double-array ) diff --git a/extra/benchmark/fib7/fib7.factor b/extra/benchmark/fib7/fib7.factor index 37c130cb50..3c9febcd7c 100644 --- a/extra/benchmark/fib7/fib7.factor +++ b/extra/benchmark/fib7/fib7.factor @@ -3,7 +3,7 @@ IN: benchmark.fib7 :: matrix-fib ( m -- n ) m 0 >= [ m throw ] unless - m 2 >base [ ch'1 = ] { } map-as :> bits + m 2 >base [ char: 1 = ] { } map-as :> bits 1 :> a! 0 :> b! 1 :> c! bits [ [ diff --git a/extra/benchmark/knucleotide/knucleotide.factor b/extra/benchmark/knucleotide/knucleotide.factor index ec58a62f35..06147f9c28 100644 --- a/extra/benchmark/knucleotide/knucleotide.factor +++ b/extra/benchmark/knucleotide/knucleotide.factor @@ -15,7 +15,7 @@ CONSTANT: knucleotide-in "vocab:benchmark/knucleotide/knucleotide-input.txt" : read-input ( -- input ) discard-lines ">" read-until drop - ch'\n swap remove >upper ; + char: \n swap remove >upper ; : handle-table ( inputs n -- ) clump diff --git a/extra/benchmark/make/make.factor b/extra/benchmark/make/make.factor index 213a22e80d..dd69c777f9 100644 --- a/extra/benchmark/make/make.factor +++ b/extra/benchmark/make/make.factor @@ -6,7 +6,7 @@ USING: kernel make math sequences ; IN: benchmark.make : make-strings ( n -- seq ) - [ [ ch'a , ] times ] "" make ; + [ [ char: a , ] times ] "" make ; : make-arrays ( n -- seq ) [ % ] { } make ; diff --git a/extra/benchmark/regexp/regexp.factor b/extra/benchmark/regexp/regexp.factor index 05152ea4c8..7aaf70d519 100644 --- a/extra/benchmark/regexp/regexp.factor +++ b/extra/benchmark/regexp/regexp.factor @@ -7,7 +7,7 @@ IN: benchmark.regexp : regexp-benchmark ( -- ) 200 20,000 [ number>string ] map - 200 [ 1 + ch'a ] map + 200 [ 1 + char: a ] map '[ _ R/ \d+/ [ matches? ] curry all? t assert= _ R/ [a]+/ [ matches? ] curry all? t assert= diff --git a/extra/benchmark/sockets/sockets.factor b/extra/benchmark/sockets/sockets.factor index a926aa992a..ee9e74619f 100644 --- a/extra/benchmark/sockets/sockets.factor +++ b/extra/benchmark/sockets/sockets.factor @@ -19,7 +19,7 @@ CONSTANT: number-of-requests 1000 : server-loop ( server -- ) dup accept drop [ [ - read1 ch'x = [ + read1 char: x = [ server get dispose ] [ number-of-requests @@ -35,16 +35,16 @@ CONSTANT: number-of-requests 1000 : simple-client ( -- ) [ server-addr ascii [ - ch'b write1 flush + char: b write1 flush number-of-requests - [ ch'a dup write1 flush read1 assert= ] times + [ char: a dup write1 flush read1 assert= ] times ] with-client ] try counter get count-down ; : stop-server ( -- ) server-addr ascii [ - ch'x write1 + char: x write1 ] with-client ; : clients ( n -- ) diff --git a/extra/bencode/bencode.factor b/extra/bencode/bencode.factor index 555d0892bd..1e4ad130f3 100644 --- a/extra/bencode/bencode.factor +++ b/extra/bencode/bencode.factor @@ -23,7 +23,7 @@ M: assoc >bencode DEFER: read-bencode : read-integer ( -- obj ) - "e" read-until ch'e assert= string>number ; + "e" read-until char: e assert= string>number ; : read-list ( -- obj ) [ read-bencode dup ] [ ] produce nip ; @@ -34,15 +34,15 @@ DEFER: read-bencode ] [ ] produce nip >hashtable ; : read-string ( prefix -- obj ) - ":" read-until ch'\: assert= swap prefix + ":" read-until char: \: assert= swap prefix string>number read ascii decode ; : read-bencode ( -- obj ) read1 { - { ch'i [ read-integer ] } - { ch'l [ read-list ] } - { ch'd [ read-dictionary ] } - { ch'e [ f ] } + { char: i [ read-integer ] } + { char: l [ read-list ] } + { char: d [ read-dictionary ] } + { char: e [ f ] } [ read-string ] } case ; diff --git a/extra/c/lexer/lexer-tests.factor b/extra/c/lexer/lexer-tests.factor index b21402ed91..d489416952 100644 --- a/extra/c/lexer/lexer-tests.factor +++ b/extra/c/lexer/lexer-tests.factor @@ -51,32 +51,32 @@ IN: c.lexer.tests { f } [ "\"abc\" asdf" - [ ch'\\ ch'\" take-quoted-string drop ] [ "asdf" take-sequence ] bi + [ char: \\ char: \" take-quoted-string drop ] [ "asdf" take-sequence ] bi ] unit-test { "abc\\\"def" } [ "\"abc\\\"def\" asdf" - ch'\\ ch'\" take-quoted-string + char: \\ char: \" take-quoted-string ] unit-test { "asdf" } [ "\"abc\" asdf" - [ ch'\\ ch'\" take-quoted-string drop ] + [ char: \\ char: \" take-quoted-string drop ] [ skip-whitespace "asdf" take-sequence ] bi ] unit-test { f } [ "\"abc asdf" - ch'\\ ch'\" take-quoted-string + char: \\ char: \" take-quoted-string ] unit-test { "\"abc" } [ "\"abc asdf" - [ ch'\\ ch'\" take-quoted-string drop ] + [ char: \\ char: \" take-quoted-string drop ] [ "\"abc" take-sequence ] bi ] unit-test @@ -87,7 +87,7 @@ IN: c.lexer.tests [ "" take-token ] unit-test { "abcd e \\\"f g" } -[ "\"abcd e \\\"f g\"" ch'\\ ch'\" take-token* ] unit-test +[ "\"abcd e \\\"f g\"" char: \\ char: \" take-token* ] unit-test { "123" } [ "123jjj" take-c-integer ] unit-test diff --git a/extra/c/lexer/lexer.factor b/extra/c/lexer/lexer.factor index 5c102bfd80..9584e928c2 100644 --- a/extra/c/lexer/lexer.factor +++ b/extra/c/lexer/lexer.factor @@ -20,7 +20,7 @@ IN: c.lexer dup "//" take-sequence [ [ [ - { [ current ch'\n = ] [ sequence-parse-end? ] } 1|| + { [ current char: \n = ] [ sequence-parse-end? ] } 1|| ] take-until ] [ advance drop @@ -40,7 +40,7 @@ IN: c.lexer : take-define-identifier ( sequence-parser -- string ) skip-whitespace/comments - [ current { [ blank? ] [ ch'\( = ] } 1|| ] take-until ; + [ current { [ blank? ] [ char: \( = ] } 1|| ] take-until ; :: take-quoted-string ( sequence-parser escape-char quote-char -- string ) sequence-parser n>> :> start-n @@ -69,18 +69,18 @@ IN: c.lexer } case ; : take-token ( sequence-parser -- string/f ) - ch'\\ ch'\" take-token* ; + char: \\ char: \" take-token* ; : c-identifier-begin? ( ch -- ? ) - ch'a ch'z [a,b] - ch'A ch'Z [a,b] - { ch'_ } 3append member? ; + char: a char: z [a,b] + char: A char: Z [a,b] + { char: _ } 3append member? ; : c-identifier-ch? ( ch -- ? ) - ch'a ch'z [a,b] - ch'A ch'Z [a,b] - ch'0 ch'9 [a,b] - { ch'_ } 4 nappend member? ; + char: a char: z [a,b] + char: A char: Z [a,b] + char: 0 char: 9 [a,b] + { char: _ } 4 nappend member? ; : (take-c-identifier) ( sequence-parser -- string/f ) dup current c-identifier-begin? [ diff --git a/extra/c/preprocessor/preprocessor.factor b/extra/c/preprocessor/preprocessor.factor index 11424080ec..c8ecd80ad3 100644 --- a/extra/c/preprocessor/preprocessor.factor +++ b/extra/c/preprocessor/preprocessor.factor @@ -73,8 +73,8 @@ ERROR: header-file-missing path ; : handle-include ( preprocessor-state sequence-parser -- ) skip-whitespace/comments advance dup previous { - { ch'< [ ch'> take-until-object read-standard-include ] } - { ch'\" [ ch'\" take-until-object read-local-include ] } + { char: < [ char: > take-until-object read-standard-include ] } + { char: \" [ char: \" take-until-object read-local-include ] } [ bad-include-line ] } case ; @@ -85,7 +85,7 @@ ERROR: header-file-missing path ; : take-define-identifier ( sequence-parser -- string ) skip-whitespace/comments - [ current { [ blank? ] [ ch'\( = ] } 1|| ] take-until ; + [ current { [ blank? ] [ char: \( = ] } 1|| ] take-until ; :: handle-define ( preprocessor-state sequence-parser -- ) sequence-parser take-define-identifier :> ident @@ -165,7 +165,7 @@ ERROR: header-file-missing path ; ] if ; : preprocess-line ( preprocessor-state sequence-parser -- ) - skip-whitespace/comments dup current ch'# = + skip-whitespace/comments dup current char: # = [ parse-directive-line ] [ swap processing-disabled?>> [ drop ] [ write-full nl ] if ] if ; diff --git a/extra/cgi/cgi.factor b/extra/cgi/cgi.factor index 279555afac..9541d4d19b 100644 --- a/extra/cgi/cgi.factor +++ b/extra/cgi/cgi.factor @@ -11,7 +11,7 @@ IN: cgi : query-string ( string -- assoc ) query>assoc [ nip ] assoc-filter [ - [ [ ch'\s = ] trim ] + [ [ char: \s = ] trim ] [ dup string? [ 1array ] when ] bi* ] assoc-map ; diff --git a/extra/codebook/codebook.factor b/extra/codebook/codebook.factor index e2573b2b1d..332920f571 100644 --- a/extra/codebook/codebook.factor +++ b/extra/codebook/codebook.factor @@ -62,7 +62,7 @@ TUPLE: code-file { { [ dup alpha? ] [ 1string ] } { [ dup digit? ] [ 1string ] } - [ >hex 6 ch'0 pad-head "_" "_" surround ] + [ >hex 6 char: 0 pad-head "_" "_" surround ] } cond ; : file-html-name ( name -- name ) @@ -76,7 +76,7 @@ TUPLE: code-file ! insert zero-width non-joiner between all characters so words can wrap anywhere : zwnj ( string -- s|t|r|i|n|g ) - [ ch'\u00200c "" 2sequence ] { } map-as concat ; + [ char: \u00200c "" 2sequence ] { } map-as concat ; ! We wrap every line in because Kindle tends to forget the font when ! moving back pages @@ -87,7 +87,7 @@ TUPLE: code-file "\n" 2array ; : line-number>string ( i line-number-len -- i-string ) - [ number>string ] [ ch'\s pad-head ] bi* ; + [ number>string ] [ char: \s pad-head ] bi* ; :: code>html ( dir file -- page ) file name>> :> name diff --git a/extra/color-table/color-table.factor b/extra/color-table/color-table.factor index dd99652eeb..fdb2c7b8d7 100644 --- a/extra/color-table/color-table.factor +++ b/extra/color-table/color-table.factor @@ -11,7 +11,7 @@ SINGLETON: color-renderer ] +CONSTANT: full-block-string $[ 10 char: full-block ] PRIVATE> diff --git a/extra/colors/flex-hex/flex-hex.factor b/extra/colors/flex-hex/flex-hex.factor index c32f8c9b73..c3b74706f6 100644 --- a/extra/colors/flex-hex/flex-hex.factor +++ b/extra/colors/flex-hex/flex-hex.factor @@ -9,19 +9,19 @@ IN: colors.flex-hex diff --git a/extra/couchdb/couchdb.factor b/extra/couchdb/couchdb.factor index a9c9a3db3e..60c0196a64 100644 --- a/extra/couchdb/couchdb.factor +++ b/extra/couchdb/couchdb.factor @@ -74,7 +74,7 @@ CONSTANT: default-uuids-to-cache 100 default-couch-host default-couch-port ; : (server-url) ( server -- ) - "http://" % [ host>> % ] [ ch'\: , port>> number>string % ] bi ch'/ , ; inline + "http://" % [ host>> % ] [ char: \: , port>> number>string % ] bi char: / , ; inline : server-url ( server -- url ) [ (server-url) ] "" make ; @@ -102,7 +102,7 @@ TUPLE: db { server server } { name string } ; C: db : (db-url) ( db -- ) - [ server>> server-url % ] [ name>> % ] bi ch'/ , ; inline + [ server>> server-url % ] [ name>> % ] bi char: / , ; inline : db-url ( db -- url ) [ (db-url) ] "" make ; diff --git a/extra/cpu/8080/emulator/emulator.factor b/extra/cpu/8080/emulator/emulator.factor index c06eecbce1..4b33db371a 100644 --- a/extra/cpu/8080/emulator/emulator.factor +++ b/extra/cpu/8080/emulator/emulator.factor @@ -512,34 +512,34 @@ ERROR: undefined-8080-opcode n ; : cpu. ( cpu -- ) { - [ " PC: " write pc>> >hex 4 ch'\s pad-head write ] - [ " B: " write b>> >hex 2 ch'\s pad-head write ] - [ " C: " write c>> >hex 2 ch'\s pad-head write ] - [ " D: " write d>> >hex 2 ch'\s pad-head write ] - [ " E: " write e>> >hex 2 ch'\s pad-head write ] - [ " F: " write f>> >hex 2 ch'\s pad-head write ] - [ " H: " write h>> >hex 2 ch'\s pad-head write ] - [ " L: " write l>> >hex 2 ch'\s pad-head write ] - [ " A: " write a>> >hex 2 ch'\s pad-head write ] - [ " SP: " write sp>> >hex 4 ch'\s pad-head write ] - [ " cycles: " write cycles>> number>string 5 ch'\s pad-head write ] + [ " PC: " write pc>> >hex 4 char: \s pad-head write ] + [ " B: " write b>> >hex 2 char: \s pad-head write ] + [ " C: " write c>> >hex 2 char: \s pad-head write ] + [ " D: " write d>> >hex 2 char: \s pad-head write ] + [ " E: " write e>> >hex 2 char: \s pad-head write ] + [ " F: " write f>> >hex 2 char: \s pad-head write ] + [ " H: " write h>> >hex 2 char: \s pad-head write ] + [ " L: " write l>> >hex 2 char: \s pad-head write ] + [ " A: " write a>> >hex 2 char: \s pad-head write ] + [ " SP: " write sp>> >hex 4 char: \s pad-head write ] + [ " cycles: " write cycles>> number>string 5 char: \s pad-head write ] [ bl peek-instruction name>> write bl ] [ nl drop ] } cleave ; : cpu*. ( cpu -- ) { - [ " PC: " write pc>> >hex 4 ch'\s pad-head write ] - [ " B: " write b>> >hex 2 ch'\s pad-head write ] - [ " C: " write c>> >hex 2 ch'\s pad-head write ] - [ " D: " write d>> >hex 2 ch'\s pad-head write ] - [ " E: " write e>> >hex 2 ch'\s pad-head write ] - [ " F: " write f>> >hex 2 ch'\s pad-head write ] - [ " H: " write h>> >hex 2 ch'\s pad-head write ] - [ " L: " write l>> >hex 2 ch'\s pad-head write ] - [ " A: " write a>> >hex 2 ch'\s pad-head write ] - [ " SP: " write sp>> >hex 4 ch'\s pad-head write ] - [ " cycles: " write cycles>> number>string 5 ch'\s pad-head write ] + [ " PC: " write pc>> >hex 4 char: \s pad-head write ] + [ " B: " write b>> >hex 2 char: \s pad-head write ] + [ " C: " write c>> >hex 2 char: \s pad-head write ] + [ " D: " write d>> >hex 2 char: \s pad-head write ] + [ " E: " write e>> >hex 2 char: \s pad-head write ] + [ " F: " write f>> >hex 2 char: \s pad-head write ] + [ " H: " write h>> >hex 2 char: \s pad-head write ] + [ " L: " write l>> >hex 2 char: \s pad-head write ] + [ " A: " write a>> >hex 2 char: \s pad-head write ] + [ " SP: " write sp>> >hex 4 char: \s pad-head write ] + [ " cycles: " write cycles>> number>string 5 char: \s pad-head write ] [ nl drop ] } cleave ; diff --git a/extra/crontab/crontab.factor b/extra/crontab/crontab.factor index cfcad5f815..a091c27706 100644 --- a/extra/crontab/crontab.factor +++ b/extra/crontab/crontab.factor @@ -12,13 +12,13 @@ ERROR: invalid-cronentry value ; :: parse-value ( value quot: ( value -- value' ) seq -- value ) value { - { [ ch', over member? ] [ + { [ char: , over member? ] [ "," split [ quot seq parse-value ] map concat ] } { [ dup "*" = ] [ drop seq ] } - { [ ch'/ over member? ] [ + { [ char: / over member? ] [ "/" split1 [ quot seq parse-value 0 over length 1 - ] dip string>number swap nths ] } - { [ ch'- over member? ] [ + { [ char: - over member? ] [ "-" split1 quot bi@ [a,b] ] } [ quot call 1array ] } cond ; inline recursive diff --git a/extra/crypto/passwd-md5/passwd-md5.factor b/extra/crypto/passwd-md5/passwd-md5.factor index b77dff778b..778985b497 100644 --- a/extra/crypto/passwd-md5/passwd-md5.factor +++ b/extra/crypto/passwd-md5/passwd-md5.factor @@ -23,7 +23,7 @@ PRIVATE> [ 16 / ceiling swap concat ] keep head-slice append password [ length make-bits ] [ first ] bi - '[ ch'\0 _ ? ] "" map-as append + '[ char: \0 _ ? ] "" map-as append md5 checksum-bytes :> final! 1000 [ diff --git a/extra/ctags/ctags.factor b/extra/ctags/ctags.factor index 236980b491..b3c903c3ca 100644 --- a/extra/ctags/ctags.factor +++ b/extra/ctags/ctags.factor @@ -15,8 +15,8 @@ IN: ctags : ctag ( word path lineno -- str ) [ - [ present % ch'\t , ] - [ normalize-path % ch'\t , ] + [ present % char: \t , ] + [ normalize-path % char: \t , ] [ number>string % ] tri* ] "" make ; diff --git a/extra/cuda/ptx/ptx.factor b/extra/cuda/ptx/ptx.factor index 2ed8008679..11b9bffa2e 100644 --- a/extra/cuda/ptx/ptx.factor +++ b/extra/cuda/ptx/ptx.factor @@ -346,7 +346,7 @@ GENERIC: write-ptx-operand ( operand -- ) M: string write-ptx-operand write ; M: integer write-ptx-operand number>string write ; -M: float write-ptx-operand "0d" write double>bits >hex 16 ch'0 pad-head write ; +M: float write-ptx-operand "0d" write double>bits >hex 16 char: 0 pad-head write ; M: ptx-negation write-ptx-operand "!" write var>> write ; M: ptx-vector write-ptx-operand "{" write diff --git a/extra/cuesheet/cuesheet.factor b/extra/cuesheet/cuesheet.factor index 62c8d41b75..2b5b4c1f68 100644 --- a/extra/cuesheet/cuesheet.factor +++ b/extra/cuesheet/cuesheet.factor @@ -55,10 +55,10 @@ ERROR: unknown-syntax syntax ; > last tracks>> last ; diff --git a/extra/curses/curses.factor b/extra/curses/curses.factor index 166e1f3849..ab43de21f3 100644 --- a/extra/curses/curses.factor +++ b/extra/curses/curses.factor @@ -328,18 +328,18 @@ PRIVATE> : waddch ( ch window -- ) ptr>> (waddch) ; : addch ( ch -- ) current-window get waddch ; -: wcnl ( window -- ) [ ch'\n ] dip waddch ; +: wcnl ( window -- ) [ char: \n ] dip waddch ; : cnl ( -- ) current-window get wcnl ; : wcwrite ( string window -- ) ptr>> (wcwrite) ; : cwrite ( string -- ) current-window get wcwrite ; : wcprint ( string window -- ) - ptr>> [ (wcwrite) ] [ ch'\n swap (waddch) ] bi ; + ptr>> [ (wcwrite) ] [ char: \n swap (waddch) ] bi ; : cprint ( string -- ) current-window get wcprint ; : wcprintf ( string window -- ) - ptr>> [ (wcwrite) ] [ ch'\n swap (waddch) ] + ptr>> [ (wcwrite) ] [ char: \n swap (waddch) ] [ (wcrefresh) ] tri ; : cprintf ( string -- ) current-window get wcprintf ; diff --git a/extra/curses/listener/listener.factor b/extra/curses/listener/listener.factor index e487814e6b..f6c7ecaeb6 100644 --- a/extra/curses/listener/listener.factor +++ b/extra/curses/listener/listener.factor @@ -16,7 +16,7 @@ IN: curses.listener ; : delchar ( y x -- ) - [ cmove ch'space addch ] [ cmove ] 2bi ; + [ cmove char: space addch ] [ cmove ] 2bi ; : move-left ( -- ) get-yx [ @@ -27,7 +27,7 @@ IN: curses.listener building get [ pop* move-left ] unless-empty ; : curses-stream-readln ( -- ) - getch dup ch'\n = [ addch ] [ + getch dup char: \n = [ addch ] [ { { KEY_MOUSE [ handle-mouse-click ] } { 127 [ handle-backspace ] } diff --git a/extra/dbf/dbf.factor b/extra/dbf/dbf.factor index b1db990a19..e37b18dd6c 100644 --- a/extra/dbf/dbf.factor +++ b/extra/dbf/dbf.factor @@ -149,20 +149,20 @@ STRUCT: dbf-field-header { index-field-flag uint8_t } ; : read-field-headers ( -- field-headers ) - [ read1 dup { ch'\r ch'\n f } member? not ] [ + [ read1 dup { char: \r char: \n f } member? not ] [ dbf-field-header heap-size 1 - read swap prefix dbf-field-header memory>struct ] produce nip ; : check-field-header ( field-header -- field-header ) dup type>> { - { ch'I [ dup length>> 4 assert= ] } - { ch'L [ dup length>> 1 assert= ] } - { ch'O [ dup length>> 8 assert= ] } - { ch'Y [ dup length>> 8 assert= ] } - { ch'D [ dup length>> 8 assert= ] } - { ch'T [ dup length>> 8 assert= ] } - { ch'M [ dup length>> 10 assert= ] } + { char: I [ dup length>> 4 assert= ] } + { char: L [ dup length>> 1 assert= ] } + { char: O [ dup length>> 8 assert= ] } + { char: Y [ dup length>> 8 assert= ] } + { char: D [ dup length>> 8 assert= ] } + { char: T [ dup length>> 8 assert= ] } + { char: M [ dup length>> 10 assert= ] } [ drop ] } case ; @@ -176,7 +176,7 @@ TUPLE: record deleted? values ; : read-records ( field-headers -- records ) [ read1 dup { 0x1a f } member? not ] [ - ch'* = over [ + char: * = over [ [ length>> read ] [ type>> parse-field ] bi ] map record boa @@ -229,7 +229,7 @@ ERROR: illegal-logical value ; : parse-numeric ( byte-array -- n ) [ "\r\n\t *" member? ] trim - H{ { ch', ch'. } } substitute string>number ; + H{ { char: , char: . } } substitute string>number ; : parse-double ( byte-array -- n ) dup length 8 assert= le> bits>double ; @@ -245,27 +245,27 @@ ERROR: unsupported-field-type type ; : parse-field ( byte-array type -- data ) { - { ch'\0 [ ] } - { ch'2 [ parse-short ] } - { ch'4 [ parse-int ] } - { ch'8 [ parse-double ] } - { ch'C [ parse-string ] } - { ch'D [ parse-date ] } - { ch'F [ parse-float ] } - { ch'I [ parse-int ] } - { ch'L [ parse-logical ] } - { ch'N [ parse-numeric ] } - { ch'O [ parse-double ] } - { ch'V [ parse-string ] } - { ch'Y [ parse-currency ] } - { ch'@ [ parse-timestamp ] } - ! { ch'+ [ parse-autoincrement ] } - ! { ch'M [ parse-memo ] } - ! { ch'T [ parse-datetime ] } - ! { ch'B [ parse-double? ] } ! (only on dbversion in [0x30, 0x31, 0x32]) - ! { ch'G [ parse-general ] } - ! { ch'P [ parse-picture ] } - ! { ch'Q [ parse-varbinary ] } + { char: \0 [ ] } + { char: 2 [ parse-short ] } + { char: 4 [ parse-int ] } + { char: 8 [ parse-double ] } + { char: C [ parse-string ] } + { char: D [ parse-date ] } + { char: F [ parse-float ] } + { char: I [ parse-int ] } + { char: L [ parse-logical ] } + { char: N [ parse-numeric ] } + { char: O [ parse-double ] } + { char: V [ parse-string ] } + { char: Y [ parse-currency ] } + { char: @ [ parse-timestamp ] } + ! { char: + [ parse-autoincrement ] } + ! { char: M [ parse-memo ] } + ! { char: T [ parse-datetime ] } + ! { char: B [ parse-double? ] } ! (only on dbversion in [0x30, 0x31, 0x32]) + ! { char: G [ parse-general ] } + ! { char: P [ parse-picture ] } + ! { char: Q [ parse-varbinary ] } [ unsupported-field-type ] } case ; diff --git a/extra/decimals/decimals.factor b/extra/decimals/decimals.factor index a067ff3ffd..d8ee2d4a6c 100644 --- a/extra/decimals/decimals.factor +++ b/extra/decimals/decimals.factor @@ -13,8 +13,8 @@ C: decimal : string>mantissa-exponent ( string -- mantissa exponent ) "." split1 - [ [ ch'0 = ] trim-head [ "0" ] when-empty ] - [ [ ch'0 = ] trim-tail [ "" ] when-empty ] bi* + [ [ char: 0 = ] trim-head [ "0" ] when-empty ] + [ [ char: 0 = ] trim-tail [ "" ] when-empty ] bi* [ append string>number ] [ nip length neg ] 2bi ; inline : string>decimal ( string -- decimal ) diff --git a/extra/enigma/enigma.factor b/extra/enigma/enigma.factor index 26f2bfdd34..c0714909e6 100644 --- a/extra/enigma/enigma.factor +++ b/extra/enigma/enigma.factor @@ -36,10 +36,10 @@ TUPLE: enigma cogs prev-cogs reflector ; enigma cogs>> :> cogs enigma reflector>> :> reflector text >lower [ - ch'a mod dup special? [ + char: a mod dup special? [ ln 1 + ln! cogs [ nth ] each reflector nth - cogs reverse [ index ] each ch'a + + cogs reverse [ index ] each char: a + cogs length [ 6 * 1 + ln mod zero? ] filter cogs [ unclip prefix ] change-nths ] unless diff --git a/extra/find/extras/extras-tests.factor b/extra/find/extras/extras-tests.factor index 277d1a31ff..48c3879464 100644 --- a/extra/find/extras/extras-tests.factor +++ b/extra/find/extras/extras-tests.factor @@ -16,7 +16,7 @@ IN: find.extras.tests "lol" 3 T{ slice f 0 3 "lol" } -} [ "lol" 0 [ ch'a ch'z between? ] take-empty-from ] unit-test +} [ "lol" 0 [ char: a char: z between? ] take-empty-from ] unit-test { "lol" @@ -28,7 +28,7 @@ IN: find.extras.tests { "[=[" 3 T{ slice f 0 3 "[=[" } } [ "[=[" 0 { [ - { [ "[" head-from ] [ [ ch'= = ] take-empty-from ] [ "[" head-from ] } find-quots + { [ "[" head-from ] [ [ char: = = ] take-empty-from ] [ "[" head-from ] } find-quots dup [ slices-combine ] when ] } find-quots slices-combine @@ -37,7 +37,7 @@ IN: find.extras.tests { "[=" 0 f } [ "[=" 0 { [ - { [ "[" head-from ] [ [ ch'= = ] take-empty-from ] [ "[" head-from ] } find-quots + { [ "[" head-from ] [ [ char: = = ] take-empty-from ] [ "[" head-from ] } find-quots dup [ slices-combine ] when ] } find-quots slices-combine @@ -46,7 +46,7 @@ IN: find.extras.tests { "[" 0 f } [ "[" 0 { [ - { [ "[" head-from ] [ [ ch'= = ] take-empty-from ] [ "[" head-from ] } find-quots + { [ "[" head-from ] [ [ char: = = ] take-empty-from ] [ "[" head-from ] } find-quots dup [ slices-combine ] when ] } find-quots slices-combine @@ -55,7 +55,7 @@ IN: find.extras.tests { "" 0 f } [ "" 0 { [ - { [ "[" head-from ] [ [ ch'= = ] take-empty-from ] [ "[" head-from ] } find-quots + { [ "[" head-from ] [ [ char: = = ] take-empty-from ] [ "[" head-from ] } find-quots dup [ slices-combine ] when ] } find-quots slices-combine @@ -65,7 +65,7 @@ IN: find.extras.tests { f 0 f } [ f 0 { [ - { [ "[" head-from ] [ [ ch'= = ] take-empty-from ] [ "[" head-from ] } find-quots + { [ "[" head-from ] [ [ char: = = ] take-empty-from ] [ "[" head-from ] } find-quots dup [ slices-combine ] when ] } find-quots slices-combine diff --git a/extra/find/extras/extras.factor b/extra/find/extras/extras.factor index 9cb0b80dea..0ee2298a35 100644 --- a/extra/find/extras/extras.factor +++ b/extra/find/extras/extras.factor @@ -144,15 +144,15 @@ CONSTANT: all-separators $[ whitespace-separators modern-separators append ] '[ _ head-from ] dip swap ; : opening-lua-bracket-from ( seq n -- seq n' slices ) - { [ tag-from ] [ "[" head-from ] [ [ ch'= = ] take-empty-from ] [ "[" head-from ] } + { [ tag-from ] [ "[" head-from ] [ [ char: = = ] take-empty-from ] [ "[" head-from ] } find-quots ; : opening-lua-brace-from ( seq n -- seq n' slices ) - { [ tag-from ] [ "{" head-from ] [ [ ch'= = ] take-empty-from ] [ "{" head-from ] } + { [ tag-from ] [ "{" head-from ] [ [ char: = = ] take-empty-from ] [ "{" head-from ] } find-quots ; : opening-lua-paren-from ( seq n -- seq n' slices ) - { [ tag-from ] [ "(" head-from ] [ [ ch'= = ] take-empty-from ] [ "(" head-from ] } + { [ tag-from ] [ "(" head-from ] [ [ char: = = ] take-empty-from ] [ "(" head-from ] } find-quots ; : read-lua-string ( seq n pair -- seq n' ) diff --git a/extra/fjsc/fjsc.factor b/extra/fjsc/fjsc.factor index dbfdd82631..306825438f 100644 --- a/extra/fjsc/fjsc.factor +++ b/extra/fjsc/fjsc.factor @@ -32,8 +32,8 @@ TUPLE: ast-hashtable elements ; [ { [ blank? not ] - [ ch'\" = not ] - [ ch'\; = not ] + [ char: \" = not ] + [ char: \; = not ] [ LETTER? not ] [ letter? not ] [ identifier-middle? not ] @@ -59,8 +59,8 @@ DEFER: expression-parser [ { [ blank? not ] - [ ch'\) = not ] - [ ch'- = not ] + [ char: \) = not ] + [ char: - = not ] } 1&& ] satisfy repeat1 [ >string ] action ; @@ -115,7 +115,7 @@ DEFER: expression-parser [ "!" token hide , [ - dup ch'\n = swap ch'\r = or not + dup char: \n = swap char: \r = or not ] satisfy repeat0 , ] seq* [ drop ast-comment boa ] action ; diff --git a/extra/flip-text/flip-text.factor b/extra/flip-text/flip-text.factor index 60333a7aec..954229a6e6 100644 --- a/extra/flip-text/flip-text.factor +++ b/extra/flip-text/flip-text.factor @@ -8,84 +8,84 @@ IN: flip-text } - { ch'_ 0x203E } + { char: A 0x2200 } + { char: B 0x10412 } + { char: C 0x03FD } + { char: D 0x15E1 } + { char: E 0x018E } + { char: F 0x2132 } + { char: G 0x2141 } + ! { char: H char: H } + ! { char: I char: I } + { char: J 0x148B } + { char: K 0x004B } + { char: L 0x2142 } + { char: M char: W } + ! { char: N char: N } + ! { char: O char: O } + { char: P 0x0500 } + { char: Q 0x038C } + { char: R 0x1D1A } + ! { char: S char: S } + { char: T 0x22A5 } + { char: U 0x0548 } + { char: V 0x039B } + { char: W char: M } + ! { char: X char: X } + { char: Y 0x2144 } + ! { char: Z char: Z } + { char: a 0x0250 } + { char: b char: q } + { char: c 0x0254 } + { char: d char: p } + { char: e 0x01DD } + { char: f 0x025F } + { char: g 0x1D77 } ! or 0183 + { char: h 0x0265 } + { char: i 0x1D09 } ! or 0131 + { char: j 0x027E } ! or 1E37 + { char: k 0x029E } + { char: l 0x0283 } ! or 237 + { char: m 0x026F } + { char: n char: u } + ! { char: o char: o } + { char: p char: d } + { char: q char: b } + { char: r 0x0279 } + ! { char: s char: s } + { char: t 0x0287 } + { char: u char: n } + { char: v 0x028C } + { char: w 0x028D } + { char: y 0x028E } + ! { char: z char: z } + ! { char: 0 char: 0 } + { char: 1 0x21C2 } + { char: 2 0x1105 } + { char: 3 0x0190 } ! or 1110 + { char: 4 0x152D } + ! { char: 5 char: 5 } + { char: 6 char: 9 } + { char: 7 0x2C62 } + ! { char: 8 char: 8 } + { char: 9 char: 6 } + { char: & 0x214B } + { char: \! 0x00A1 } + { char: \" 0x201E } + { char: . 0x02D9 } + { char: \; 0x061B } + { char: \[ char: \] } + { char: \( char: \) } + { char: \{ char: \} } + { char: ? 0x00BF } + { char: \! 0x00A1 } + { char: \' char: , } + { char: < char: > } + { char: _ 0x203E } { 0x203F 0x2040 } { 0x2045 0x2046 } { 0x2234 0x2235 } - { ch'\r ch'\n } + { char: \r char: \n } } CHARS [ CHARS set-at ] assoc-each diff --git a/extra/fuel/help/help.factor b/extra/fuel/help/help.factor index fa83144030..6929ed8e9a 100644 --- a/extra/fuel/help/help.factor +++ b/extra/fuel/help/help.factor @@ -18,7 +18,7 @@ SYMBOLS: $doc-path $next-link $prev-link $fuel-nav-crumbs ; : vocab-own-crumbs ( vocab-name -- crumbs ) "." split unclip [ - [ ch'. suffix ] dip append + [ char: . suffix ] dip append ] accumulate swap suffix [ dup "." split last \ vocab 3array ] map ; diff --git a/extra/fuel/pprint/pprint.factor b/extra/fuel/pprint/pprint.factor index 978415a095..251bfced9e 100644 --- a/extra/fuel/pprint/pprint.factor +++ b/extra/fuel/pprint/pprint.factor @@ -12,7 +12,7 @@ GENERIC: fuel-pprint ( obj -- ) ch'y 6 pick insert* >string ] unit-test +[ "factor" char: y 6 pick insert* >string ] unit-test ! test inserting multiple elements in different places. buffer should grow { "refractory" } -[ "factor" ch'y 6 pick insert* "re" 0 pick insert* ch'r 3 pick insert* >string ] unit-test +[ "factor" char: y 6 pick insert* "re" 0 pick insert* char: r 3 pick insert* >string ] unit-test ! test deleting elements. buffer should shrink { "for" } @@ -60,19 +60,19 @@ USING: kernel sequences tools.test gap-buffer strings math ; ! more testing of nth and set-nth { "raptor" } -[ "factor" ch'p 2 pick set-nth 5 over nth 0 pick set-nth >string ] unit-test +[ "factor" char: p 2 pick set-nth 5 over nth 0 pick set-nth >string ] unit-test ! test stack/queue operations { "slaughter" } -[ "laughter" ch's over push-start >string ] unit-test +[ "laughter" char: s over push-start >string ] unit-test { "pantonio" } [ "pant" "onio" over push-end >string ] unit-test -{ ch'f "actor" } +{ char: f "actor" } [ "factor" dup pop-start swap >string ] unit-test -{ ch's "pant" } +{ char: s "pant" } [ "pants" dup pop-end swap >string ] unit-test { "end this is the " } diff --git a/extra/git/git.factor b/extra/git/git.factor index 0221faeeec..a0918064dc 100644 --- a/extra/git/git.factor +++ b/extra/git/git.factor @@ -164,8 +164,8 @@ ERROR: unexpected-text text ; : parse-commit-lines ( obj -- obj ) " \n" read-until { - { ch'\s [ parse-commit-field parse-commit-lines ] } - { ch'\n [ drop contents >>message ] } + { char: \s [ parse-commit-field parse-commit-lines ] } + { char: \n [ drop contents >>message ] } [ unexpected-text ] } case ; @@ -186,8 +186,8 @@ ERROR: unexpected-text text ; : parse-tree-lines ( obj -- obj ) "\s\n" read-until { - { ch'\s [ parse-tree-field parse-tree-lines ] } - { ch'\n [ drop contents >>message ] } + { char: \s [ parse-tree-field parse-tree-lines ] } + { char: \n [ drop contents >>message ] } [ unexpected-text ] } case ; @@ -198,8 +198,8 @@ ERROR: key-already-set value key assoc ; : parse-object-line>assoc ( hashtable -- hashtable ) "\s\n" read-until { - { ch'\s [ [ "\r\n" read-until* ] dip pick over "parent" = [ push-at ] [ set-at-once ] if parse-object-line>assoc ] } - { ch'\n [ drop contents "message" pick set-at ] } + { char: \s [ [ "\r\n" read-until* ] dip pick over "parent" = [ push-at ] [ set-at-once ] if parse-object-line>assoc ] } + { char: \n [ drop contents "message" pick set-at ] } } case ; : assoc>commit ( assoc -- commit ) diff --git a/extra/gopher/gopher.factor b/extra/gopher/gopher.factor index 3759d04837..af48391f1f 100644 --- a/extra/gopher/gopher.factor +++ b/extra/gopher/gopher.factor @@ -13,31 +13,31 @@ IN: gopher url present ; unclip swap "\t" split first4 gopher-link boa ; M: gopher-link >url - dup type>> ch'h = [ + dup type>> char: h = [ selector>> "URL:" ?head drop ] [ { @@ -78,7 +78,7 @@ M: gopher-link >url ] if >url ; : gopher-link. ( gopher-link -- ) - dup type>> ch'i = [ + dup type>> char: i = [ name>> print ] [ [ name>> ] keep [ diff --git a/extra/gpu/demos/bunny/bunny.factor b/extra/gpu/demos/bunny/bunny.factor index 747affdde4..74093a2e44 100644 --- a/extra/gpu/demos/bunny/bunny.factor +++ b/extra/gpu/demos/bunny/bunny.factor @@ -94,8 +94,8 @@ UNIFORM-TUPLE: loading-uniforms [ [ pick push ] unless-empty ] [ { - { ch'\s [ (read-line-tokens) ] } - { ch'\n [ drop ] } + { char: \s [ (read-line-tokens) ] } + { char: \n [ drop ] } [ 2drop [ f ] when-empty ] } case ] bi* ; inline recursive diff --git a/extra/hamurabi/hamurabi.factor b/extra/hamurabi/hamurabi.factor index c6cc711599..3a784c301a 100644 --- a/extra/hamurabi/hamurabi.factor +++ b/extra/hamurabi/hamurabi.factor @@ -220,7 +220,7 @@ total-births total-deaths ; check-starvation ; : spaces ( n -- ) - ch'\s write ; + char: \s write ; : welcome ( -- ) 32 spaces "HAMURABI" print diff --git a/extra/hashcash/hashcash.factor b/extra/hashcash/hashcash.factor index a5476571ed..c53850d956 100644 --- a/extra/hashcash/hashcash.factor +++ b/extra/hashcash/hashcash.factor @@ -25,7 +25,7 @@ IN: hashcash ! Random salt is formed by ascii characters ! between 33 and 126 : available-chars ( -- seq ) - 33 126 [a,b] [ ch'\: = ] reject ; + 33 126 [a,b] [ char: \: = ] reject ; PRIVATE> @@ -54,14 +54,14 @@ M: hashcash string>> >hex >>suffix ; : get-bits ( bytes -- str ) - [ >bin 8 ch'0 pad-head ] { } map-as concat ; + [ >bin 8 char: 0 pad-head ] { } map-as concat ; : checksummed-bits ( tuple -- relevant-bits ) dup string>> sha1-checksum swap bits>> 8 / ceiling head get-bits ; : all-char-zero? ( seq -- ? ) - [ ch'0 = ] all? ; inline + [ char: 0 = ] all? ; inline : valid-guess? ( checksum tuple -- ? ) bits>> head all-char-zero? ; diff --git a/extra/html/entities/entities.factor b/extra/html/entities/entities.factor index 22e036aef6..74899145a1 100644 --- a/extra/html/entities/entities.factor +++ b/extra/html/entities/entities.factor @@ -10,11 +10,11 @@ IN: html.entities ">" } - { ch'\" """ } - { ch'\' "'" } + { char: & "&" } + { char: < "<" } + { char: > ">" } + { char: \" """ } + { char: \' "'" } } : next-escape ( seq -- i elt ) diff --git a/extra/html/parser/parser.factor b/extra/html/parser/parser.factor index 455b1489b2..be5a302f63 100644 --- a/extra/html/parser/parser.factor +++ b/extra/html/parser/parser.factor @@ -20,7 +20,7 @@ SYMBOL: tagstack : closing-tag? ( string -- ? ) [ f ] - [ { [ first ch'/ = ] [ last ch'/ = ] } 1|| ] if-empty ; + [ { [ first char: / = ] [ last char: / = ] } 1|| ] if-empty ; : ( name attributes closing? -- tag ) tag new @@ -40,18 +40,18 @@ SYMBOL: tagstack '[ [ current _ = ] take-until ] [ advance drop ] bi ; : read-single-quote ( sequence-parser -- string ) - ch'\' (read-quote) ; + char: \' (read-quote) ; : read-double-quote ( sequence-parser -- string ) - ch'\" (read-quote) ; + char: \" (read-quote) ; : read-quote ( sequence-parser -- string ) - dup get+increment ch'\' = + dup get+increment char: \' = [ read-single-quote ] [ read-double-quote ] if ; : read-key ( sequence-parser -- string ) skip-whitespace - [ current { [ ch'= = ] [ blank? ] } 1|| ] take-until ; + [ current { [ char: = = ] [ blank? ] } 1|| ] take-until ; : read-token ( sequence-parser -- string ) [ current blank? ] take-until ; @@ -70,17 +70,17 @@ SYMBOL: tagstack [ advance drop ] bi ; : read-bang ( sequence-parser -- ) - advance dup { [ current ch'- = ] [ peek-next ch'- = ] } 1&& + advance dup { [ current char: - = ] [ peek-next char: - = ] } 1&& [ advance advance read-comment ] [ read-dtd ] if ; : read-tag ( sequence-parser -- string ) [ [ current "><" member? ] take-until - [ ch'/ = ] trim-tail - ] [ dup current ch'< = [ advance ] unless drop ] bi ; + [ char: / = ] trim-tail + ] [ dup current char: < = [ advance ] unless drop ] bi ; : read-until-< ( sequence-parser -- string ) - [ current ch'< = ] take-until ; + [ current char: < = ] take-until ; : parse-text ( sequence-parser -- ) read-until-< [ text new-tag push-tag ] unless-empty ; @@ -108,7 +108,7 @@ SYMBOL: tagstack : read-< ( sequence-parser -- string/f ) advance dup current [ - ch'\! = [ read-bang f ] [ read-tag ] if + char: \! = [ read-bang f ] [ read-tag ] if ] [ drop f ] if* ; diff --git a/extra/html/parser/utils/utils.factor b/extra/html/parser/utils/utils.factor index e819ee655d..f4805e29ae 100644 --- a/extra/html/parser/utils/utils.factor +++ b/extra/html/parser/utils/utils.factor @@ -11,7 +11,7 @@ IN: html.parser.utils : double-quote ( str -- newstr ) "\"" dup surround ; : quote ( str -- newstr ) - ch'\' over member? + char: \' over member? [ double-quote ] [ single-quote ] if ; : ?quote ( str -- newstr ) dup quoted? [ quote ] unless ; diff --git a/extra/images/bitmap/bitmap.factor b/extra/images/bitmap/bitmap.factor index 9cc39b0adb..21c88eb0bc 100644 --- a/extra/images/bitmap/bitmap.factor +++ b/extra/images/bitmap/bitmap.factor @@ -387,7 +387,7 @@ M: bmp-image stream>image* ( stream bmp-image -- bitmap ) ] bi ; : output-bmp ( image -- ) - B{ ch'B ch'M } write + B{ char: B char: M } write [ bitmap>> length 14 + 40 + write4 0 write4 diff --git a/extra/images/pbm/pbm.factor b/extra/images/pbm/pbm.factor index 1870afc8ef..2c14f3c6d7 100644 --- a/extra/images/pbm/pbm.factor +++ b/extra/images/pbm/pbm.factor @@ -15,7 +15,7 @@ SINGLETON: pbm-image read1 dup blank? [ t ] [ - dup ch'# = + dup char: # = [ "\n" read-until 2drop t ] [ f ] if ] if @@ -28,8 +28,8 @@ SINGLETON: pbm-image : read-ascii-bits ( -- ) read1 { - { ch'1 [ 0 , read-ascii-bits ] } - { ch'0 [ 255 , read-ascii-bits ] } + { char: 1 [ 0 , read-ascii-bits ] } + { char: 0 [ 255 , read-ascii-bits ] } { f [ ] } [ drop read-ascii-bits ] } case ; diff --git a/extra/images/pgm/pgm.factor b/extra/images/pgm/pgm.factor index c39a215b14..f880a516cb 100644 --- a/extra/images/pgm/pgm.factor +++ b/extra/images/pgm/pgm.factor @@ -12,7 +12,7 @@ SINGLETON: pgm-image : read-token ( -- token ) [ read1 dup blank? [ t ] - [ dup ch'# = + [ dup char: # = [ "\n" read-until 2drop t ] [ f ] if ] if diff --git a/extra/images/ppm/ppm.factor b/extra/images/ppm/ppm.factor index a33e20086a..d6225216c6 100644 --- a/extra/images/ppm/ppm.factor +++ b/extra/images/ppm/ppm.factor @@ -11,7 +11,7 @@ SINGLETON: ppm-image : read-token ( -- token ) [ read1 dup blank? [ t ] - [ dup ch'# = + [ dup char: # = [ "\n" read-until 2drop t ] [ f ] if ] if diff --git a/extra/images/tiff/tiff.factor b/extra/images/tiff/tiff.factor index 401f4c5ee0..1598f68e33 100644 --- a/extra/images/tiff/tiff.factor +++ b/extra/images/tiff/tiff.factor @@ -230,8 +230,8 @@ ERROR: bad-jpeg-proc n ; ERROR: bad-tiff-magic bytes ; : tiff-endianness ( byte-array -- ? ) { - { B{ ch'M ch'M } [ big-endian ] } - { B{ ch'I ch'I } [ little-endian ] } + { B{ char: M char: M } [ big-endian ] } + { B{ char: I char: I } [ little-endian ] } [ bad-tiff-magic ] } case ; diff --git a/extra/imap/imap-tests.factor b/extra/imap/imap-tests.factor index 7c19cb76ad..2e8a5d7e2b 100644 --- a/extra/imap/imap-tests.factor +++ b/extra/imap/imap-tests.factor @@ -7,7 +7,7 @@ FROM: pcre => findall ; IN: imap.tests : random-ascii ( n -- str ) - [ ch'a ch'z [a,b] random ] "" replicate-as ; + [ char: a char: z [a,b] random ] "" replicate-as ; : make-mail ( from -- mail ) now timestamp>rfc822 swap 10000 random diff --git a/extra/infix/infix-tests.factor b/extra/infix/infix-tests.factor index 47fcbe1160..5c4a2a6d5e 100644 --- a/extra/infix/infix-tests.factor +++ b/extra/infix/infix-tests.factor @@ -34,8 +34,8 @@ IN: infix.tests { -1 } [ let[ 1 :> a infix[[ -a ]] ] ] unit-test -{ ch'f } [ let[ "foo" :> s infix[[ s[0] ]] ] ] unit-test -{ ch'r } [ let[ "bar" :> s infix[[ s[-1] ]] ] ] unit-test +{ char: f } [ let[ "foo" :> s infix[[ s[0] ]] ] ] unit-test +{ char: r } [ let[ "bar" :> s infix[[ s[-1] ]] ] ] unit-test { "foo" } [ let[ "foobar" :> s infix[[ s[0:3] ]] ] ] unit-test { "foo" } [ let[ "foobar" :> s infix[[ s[:3] ]] ] ] unit-test { "bar" } [ let[ "foobar" :> s infix[[ s[-3:] ]] ] ] unit-test diff --git a/extra/infix/tokenizer/tokenizer-tests.factor b/extra/infix/tokenizer/tokenizer-tests.factor index d4f528e533..dfb54f302c 100644 --- a/extra/infix/tokenizer/tokenizer-tests.factor +++ b/extra/infix/tokenizer/tokenizer-tests.factor @@ -4,18 +4,18 @@ USING: infix.ast infix.tokenizer tools.test ; IN: infix.tokenizer.tests { V{ T{ ast-value f 1 } } } [ "1" tokenize-infix ] unit-test -{ V{ T{ ast-value f 1.02 } ch'* T{ ast-value f 3 } } } [ "1.02*3" tokenize-infix ] unit-test -{ V{ T{ ast-value f 3 } ch'/ ch'\( T{ ast-value f 3 } ch'+ T{ ast-value f 4 } ch'\) } } +{ V{ T{ ast-value f 1.02 } char: * T{ ast-value f 3 } } } [ "1.02*3" tokenize-infix ] unit-test +{ V{ T{ ast-value f 3 } char: / char: \( T{ ast-value f 3 } char: + T{ ast-value f 4 } char: \) } } [ "3/(3+4)" tokenize-infix ] unit-test -{ V{ "foo" ch'\( "x" ch', "y" ch', "z" ch'\) } } [ "foo(x,y,z)" tokenize-infix ] unit-test -{ V{ "arr" ch'\[ "x" ch'+ T{ ast-value f 3 } ch'\] } } +{ V{ "foo" char: \( "x" char: , "y" char: , "z" char: \) } } [ "foo(x,y,z)" tokenize-infix ] unit-test +{ V{ "arr" char: \[ "x" char: + T{ ast-value f 3 } char: \] } } [ "arr[x+3]" tokenize-infix ] unit-test [ "1.0.4" tokenize-infix ] must-fail -{ V{ ch'+ ch'\] T{ ast-value f 3.4 } ch', "bar" } } +{ V{ char: + char: \] T{ ast-value f 3.4 } char: , "bar" } } [ "+]3.4,bar" tokenize-infix ] unit-test { V{ "baz_34c" } } [ "baz_34c" tokenize-infix ] unit-test { V{ T{ ast-value f 34 } "c_baz" } } [ "34c_baz" tokenize-infix ] unit-test -{ V{ ch'\( T{ ast-value f 1 } ch'+ T{ ast-value f 2 } ch'\) } } +{ V{ char: \( T{ ast-value f 1 } char: + T{ ast-value f 2 } char: \) } } [ "(1+2)" tokenize-infix ] unit-test -{ V{ T{ ast-value f 1 } ch'+ T{ ast-value f 2 } ch'/ T{ ast-value f 3 } } } +{ V{ T{ ast-value f 1 } char: + T{ ast-value f 2 } char: / T{ ast-value f 3 } } } [ "1\n+\r2\t/ 3" tokenize-infix ] unit-test diff --git a/extra/infix/tokenizer/tokenizer.factor b/extra/infix/tokenizer/tokenizer.factor index c548bc3961..8c0aafac63 100644 --- a/extra/infix/tokenizer/tokenizer.factor +++ b/extra/infix/tokenizer/tokenizer.factor @@ -13,12 +13,12 @@ Number = Digits '.' Digits => [[ "" concat-as string>number ast-val String = '"' [^"]* '"' => [[ second >string ast-value boa ]] Space = [ \t\n\r] Spaces = Space* => [[ ignore ]] -NameFirst = Letter | "_" => [[ ch'_ ]] +NameFirst = Letter | "_" => [[ char: _ ]] NameRest = NameFirst | Digit Name = NameFirst NameRest* => [[ first2 swap prefix >string ]] -Special = [+*/%(),] | "-" => [[ ch'- ]] - | "[" => [[ ch'\[ ]] | "]" => [[ ch'\] ]] - | ":" => [[ ch'\: ]] +Special = [+*/%(),] | "-" => [[ char: - ]] + | "[" => [[ char: \[ ]] | "]" => [[ char: \] ]] + | ":" => [[ char: \: ]] Tok = Spaces (Name | Number | String | Special ) End = !(.) Toks = Tok* Spaces End diff --git a/extra/ini-file/ini-file.factor b/extra/ini-file/ini-file.factor index 00e7d082da..01780dafde 100644 --- a/extra/ini-file/ini-file.factor +++ b/extra/ini-file/ini-file.factor @@ -11,25 +11,25 @@ IN: ini-file : escape ( ch -- ch' ) H{ - { ch'a ch'\a } - { ch'b ch'\b } - { ch'f ch'\f } - { ch'n ch'\n } - { ch'r ch'\r } - { ch't ch'\t } - { ch'v ch'\v } - { ch'\' ch'\' } - { ch'\" ch'\" } - { ch'\\ ch'\\ } - { ch'? ch'? } - { ch'\; ch'\; } - { ch'\[ ch'\[ } - { ch'\] ch'\] } - { ch'= ch'= } + { char: a char: \a } + { char: b char: \b } + { char: f char: \f } + { char: n char: \n } + { char: r char: \r } + { char: t char: \t } + { char: v char: \v } + { char: \' char: \' } + { char: \" char: \" } + { char: \\ char: \\ } + { char: ? char: ? } + { char: \; char: \; } + { char: \[ char: \[ } + { char: \] char: \] } + { char: = char: = } } ?at [ bad-escape ] unless ; : (unescape-string) ( str -- ) - ch'\\ over index [ + char: \\ over index [ cut-slice [ % ] dip rest-slice dup empty? [ "Missing escape code" throw ] when unclip-slice escape , (unescape-string) @@ -42,21 +42,21 @@ USE: xml.entities : escape-string ( str -- str' ) H{ - { ch'\a "\\a" } + { char: \a "\\a" } { 0x08 "\\b" } { 0x0c "\\f" } - { ch'\n "\\n" } - { ch'\r "\\r" } - { ch'\t "\\t" } + { char: \n "\\n" } + { char: \r "\\r" } + { char: \t "\\t" } { 0x0b "\\v" } - { ch'\' "\\'" } - { ch'\" "\\\"" } - { ch'\\ "\\\\" } - { ch'? "\\?" } - { ch'\; "\\;" } - { ch'\[ "\\[" } - { ch'\] "\\]" } - { ch'= "\\=" } + { char: \' "\\'" } + { char: \" "\\\"" } + { char: \\ "\\\\" } + { char: ? "\\?" } + { char: \; "\\;" } + { char: \[ "\\[" } + { char: \] "\\]" } + { char: = "\\=" } } escape-string-by ; : space? ( ch -- ? ) @@ -80,12 +80,12 @@ SYMBOL: option : section? ( line -- index/f ) { [ length 1 > ] - [ first ch'\[ = ] - [ ch'\] swap last-index ] + [ first char: \[ = ] + [ char: \] swap last-index ] } 1&& ; : line-continues? ( line -- ? ) - ?last ch'\\ = ; + ?last char: \\ = ; : section, ( -- ) section get [ , ] when* ; diff --git a/extra/io/encodings/detect/detect.factor b/extra/io/encodings/detect/detect.factor index 5b321c391b..f8e7e7e5be 100644 --- a/extra/io/encodings/detect/detect.factor +++ b/extra/io/encodings/detect/detect.factor @@ -12,7 +12,7 @@ INITIALIZED-SYMBOL: default-encoding [ latin1 ] over index [ 1 + head-slice ] when* >string ; + char: > over index [ 1 + head-slice ] when* >string ; : prolog-encoding ( string -- iana-encoding ) '[ diff --git a/extra/io/streams/peek/peek-tests.factor b/extra/io/streams/peek/peek-tests.factor index b0cea6c7c0..bea4b038e4 100644 --- a/extra/io/streams/peek/peek-tests.factor +++ b/extra/io/streams/peek/peek-tests.factor @@ -5,16 +5,16 @@ io.encodings.binary io.files io.streams.byte-array io.streams.memory io.streams.peek io.streams.string kernel make namespaces sequences strings tools.test ; -{ ch'a } +{ char: a } [ "abc" stream-read1 ] unit-test -{ ch'a } +{ char: a } [ "abc" stream-peek1 ] unit-test { f } [ "" stream-peek1 ] unit-test -{ ch'a } +{ char: a } [ "abc" stream-peek1 ] unit-test { "ab" 99 } @@ -23,7 +23,7 @@ namespaces sequences strings tools.test ; { "ab" f } [ "ab" "c" swap stream-read-until ] unit-test -{ ch'a } +{ char: a } [ "abc" [ stream-peek1 drop ] diff --git a/extra/irc/client/participants/participants.factor b/extra/irc/client/participants/participants.factor index 5a58312a38..95c8e5fa75 100644 --- a/extra/irc/client/participants/participants.factor +++ b/extra/irc/client/participants/participants.factor @@ -37,13 +37,13 @@ M: irc-channel-chat has-participant? participants>> key? ; : apply-mode ( ? participant mode -- ) { - { ch'o [ operator<< ] } - { ch'v [ voice<< ] } + { char: o [ operator<< ] } + { char: v [ voice<< ] } [ 3drop ] } case ; : apply-modes ( mode-line participant -- ) - [ unclip ch'+ = ] dip + [ unclip char: + = ] dip '[ [ _ _ ] dip apply-mode ] each ; : change-participant-mode ( mode channel nick -- ) diff --git a/extra/irc/messages/base/base.factor b/extra/irc/messages/base/base.factor index da499b7cc9..2d726143bf 100644 --- a/extra/irc/messages/base/base.factor +++ b/extra/irc/messages/base/base.factor @@ -80,7 +80,7 @@ M: irc-message set-irc-command [ prefix>> ] [ command>> ] [ parameters>> " " join ] - [ trailing>> dup [ ch'\: prefix ] when ] + [ trailing>> dup [ char: \: prefix ] when ] } cleave 4array sift " " join ; > target-cpu get name>> (platform) diff --git a/extra/math/extras/extras-tests.factor b/extra/math/extras/extras-tests.factor index d6c9c67519..fb4d0159f8 100644 --- a/extra/math/extras/extras-tests.factor +++ b/extra/math/extras/extras-tests.factor @@ -97,7 +97,7 @@ tools.test ; { f } [ { 1 1 2 2 } majority ] unit-test { 2 } [ { 1 1 2 2 2 } majority ] unit-test { 3 } [ { 1 2 3 1 2 3 1 2 3 3 } majority ] unit-test -{ ch'C } [ "AAACCBBCCCBCC" majority ] unit-test +{ char: C } [ "AAACCBBCCCBCC" majority ] unit-test { 0.0 } [ 0 2 round-to-decimal ] unit-test { 1.0 } [ 1 2 round-to-decimal ] unit-test diff --git a/extra/math/floating-point/floating-point.factor b/extra/math/floating-point/floating-point.factor index 56d421cb60..22780ee236 100644 --- a/extra/math/floating-point/floating-point.factor +++ b/extra/math/floating-point/floating-point.factor @@ -32,9 +32,9 @@ IN: math.floating-point : double. ( double -- ) double>bits [ (double-sign) .b ] - [ (double-exponent-bits) >bin 11 ch'0 pad-head bl print ] + [ (double-exponent-bits) >bin 11 char: 0 pad-head bl print ] [ - (double-mantissa-bits) >bin 52 ch'0 pad-head + (double-mantissa-bits) >bin 52 char: 0 pad-head 11 [ bl ] times print ] tri ; diff --git a/extra/math/text/english/english.factor b/extra/math/text/english/english.factor index de90755296..bd7da1586a 100644 --- a/extra/math/text/english/english.factor +++ b/extra/math/text/english/english.factor @@ -104,7 +104,7 @@ M: float number>text [ string>number number>text ] [ [ "negative " prepend ] when ] bi* ] [ - [ ch'0 - small-numbers ] { } map-as " " join + [ char: 0 - small-numbers ] { } map-as " " join ] bi* " point " glue ; M: complex number>text diff --git a/extra/math/text/french/french.factor b/extra/math/text/french/french.factor index d89dc406c3..4a15384eec 100644 --- a/extra/math/text/french/french.factor +++ b/extra/math/text/french/french.factor @@ -29,7 +29,7 @@ MEMO: units ( -- seq ) ! up to 10^99 ! The only plurals we have to remove are "quatre-vingts" and "cents", ! which are also the only strings ending with "ts". : unpluralize ( str -- newstr ) dup "ts" tail? [ but-last ] when ; -: pluralize ( str -- newstr ) dup "s" tail? [ ch's suffix ] unless ; +: pluralize ( str -- newstr ) dup "s" tail? [ char: s suffix ] unless ; : space-append ( str1 str2 -- str ) " " glue ; @@ -91,7 +91,7 @@ MEMO: units ( -- seq ) ! up to 10^99 : ieme ( str -- str ) dup "ts" tail? [ but-last ] when dup "e" tail? [ but-last ] when - dup "q" tail? [ ch'u suffix ] when + dup "q" tail? [ char: u suffix ] when "ième" append ; : divisor ( n -- str ) diff --git a/extra/metar/metar.factor b/extra/metar/metar.factor index 11eabefd1f..5883bce8c4 100644 --- a/extra/metar/metar.factor +++ b/extra/metar/metar.factor @@ -29,19 +29,19 @@ ERROR: bad-location str ; dup R{{ \d+-\d+(-\d+(\.\d+)?)?[WE]}} matches? [ unclip-last [ parse-location ] - [ ch'W = [ neg ] when ] bi* + [ char: W = [ neg ] when ] bi* ] [ drop f ] if ; : string>latitude ( str -- lat/f ) dup R{{ \d+-\d+(-\d+(\.\d+)?)?[NS]}} matches? [ unclip-last [ parse-location ] - [ ch'S = [ neg ] when ] bi* + [ char: S = [ neg ] when ] bi* ] [ drop f ] if ; : stations-data ( -- seq ) url"http://tgftp.nws.noaa.gov/data/nsd_cccc.txt" - http-get nip ch'\; [ string>csv ] with-delimiter ; + http-get nip char: \; [ string>csv ] with-delimiter ; PRIVATE> @@ -201,11 +201,11 @@ CONSTANT: compass-directions H{ : parse-visibility ( str -- str' ) dup first { - { ch'M [ rest "less than " ] } - { ch'P [ rest "more than " ] } + { char: M [ rest "less than " ] } + { char: P [ rest "more than " ] } [ drop "" ] } case swap "SM" ?tail drop - ch'/ over index [ 1 > [ 1 cut "+" glue ] when ] when* + char: / over index [ 1 > [ 1 cut "+" glue ] when ] when* string>number "%s%s statute miles" sprintf ; : parse-rvr ( str -- str' ) @@ -220,8 +220,8 @@ CONSTANT: compass-directions H{ : (parse-weather) ( str -- str' ) dup "+FC" = [ drop "tornadoes or waterspouts" ] [ dup first { - { ch'+ [ rest "heavy " ] } - { ch'- [ rest "light " ] } + { char: + [ rest "heavy " ] } + { char: - [ rest "light " ] } [ drop f ] } case [ 2 group dup [ weather key? ] all? @@ -278,7 +278,7 @@ CONSTANT: sky H{ ] bi@ ; : parse-altimeter ( str -- str' ) - unclip [ string>number ] [ ch'A = ] bi* + unclip [ string>number ] [ char: A = ] bi* [ 100 /f "%.2f Hg" sprintf ] [ "%s hPa" sprintf ] if ; CONSTANT: re-timestamp R[[\d{6}Z]] @@ -424,14 +424,14 @@ CONSTANT: high-clouds H{ } : parse-cloud-cover ( str -- str' ) - "8/" ?head drop first3 [ ch'0 - ] tri@ + "8/" ?head drop first3 [ char: 0 - ] tri@ [ [ f ] [ low-clouds at "low clouds are %s" sprintf ] if-zero ] [ [ f ] [ mid-clouds at "middle clouds are %s" sprintf ] if-zero ] [ [ f ] [ high-clouds at "high clouds are %s" sprintf ] if-zero ] tri* 3array " " join ; : parse-inches ( str -- str' ) - dup [ ch'/ = ] all? [ drop "unknown" ] [ + dup [ char: / = ] all? [ drop "unknown" ] [ string>number [ "trace" ] [ 100 /f "%.2f inches" sprintf ] if-zero ] if ; @@ -470,7 +470,7 @@ CONSTANT: re-recent-weather R[[((\w{2})?[BE]\d{2,4}((\w{2})?[BE]\d{2,4})?)+]] : parse-began/ended ( str -- str' ) unclip swap - [ ch'B = "began" "ended" ? ] + [ char: B = "began" "ended" ? ] [ parse-recent-time ] bi* "%s at %s" sprintf ; : split-recent-weather ( str -- seq ) diff --git a/extra/modern/modern-tests.factor b/extra/modern/modern-tests.factor index 08740d8d8e..37fe2c07f5 100644 --- a/extra/modern/modern-tests.factor +++ b/extra/modern/modern-tests.factor @@ -128,14 +128,14 @@ IN: modern.tests { { "foo\\bar{" { "1" } "}" } } } [ "foo\\bar{ 1 }" string>literals >strings ] unit-test -{ { { "char:" { "\\{" } } } } [ "ch'\\{" string>literals >strings ] unit-test -[ "ch'{" string>literals >strings ] must-fail -[ "ch'[" string>literals >strings ] must-fail -[ "ch'{" string>literals >strings ] must-fail -[ "ch'\"" string>literals >strings ] must-fail -! { { { "char:" { "\\\\" } } } } [ "ch'\\\\" string>literals >strings ] unit-test +{ { { "char:" { "\\{" } } } } [ "char: \\{" string>literals >strings ] unit-test +[ "char: {" string>literals >strings ] must-fail +[ "char: [" string>literals >strings ] must-fail +[ "char: {" string>literals >strings ] must-fail +[ "char: \"" string>literals >strings ] must-fail +! { { { "char:" { "\\\\" } } } } [ "char: \\\\" string>literals >strings ] unit-test -[ "ch'\\" string>literals >strings ] must-fail ! ch'\ should be legal eventually +[ "char: \\" string>literals >strings ] must-fail ! char: \ should be legal eventually { { { "\\" { "(" } } } } [ "\\ (" string>literals >strings ] unit-test diff --git a/extra/modern/modern.factor b/extra/modern/modern.factor index 985469ac0d..12d78bacb9 100644 --- a/extra/modern/modern.factor +++ b/extra/modern/modern.factor @@ -57,7 +57,7 @@ MACRO:: read-double-matched ( open-ch -- quot: ( n string tag ch -- n' string se } 2cleave :> ( openstr2 openstr1 closestr2 ) |[ n string tag! ch | ch { - { ch'= [ + { char: = [ tag 1 cut-slice* drop tag! ! tag of (=( is ( here, fix it n string openstr1 slice-til-separator-inclusive [ -1 modify-from ] dip :> ( n' string' opening ch ) ch open-ch = [ tag openstr2 n string ch long-opening-mismatch ] unless @@ -77,9 +77,9 @@ MACRO:: read-double-matched ( open-ch -- quot: ( n string tag ch -- n' string se } case ] ; -: read-double-matched-paren ( n string tag ch -- n' string seq ) ch'\( read-double-matched ; -: read-double-matched-bracket ( n string tag ch -- n' string seq ) ch'\[ read-double-matched ; -: read-double-matched-brace ( n string tag ch -- n' string seq ) ch'\{ read-double-matched ; +: read-double-matched-paren ( n string tag ch -- n' string seq ) char: \( read-double-matched ; +: read-double-matched-bracket ( n string tag ch -- n' string seq ) char: \[ read-double-matched ; +: read-double-matched-brace ( n string tag ch -- n' string seq ) char: \{ read-double-matched ; DEFER: lex-factor-top DEFER: lex-factor @@ -145,15 +145,15 @@ MACRO:: read-matched ( ch -- quot: ( n string tag -- n' string slice' ) ) } cond ] ; -: read-bracket ( n string slice -- n' string slice' ) ch'\[ read-matched ; -: read-brace ( n string slice -- n' string slice' ) ch'\{ read-matched ; -: read-paren ( n string slice -- n' string slice' ) ch'\( read-matched ; +: read-bracket ( n string slice -- n' string slice' ) char: \[ read-matched ; +: read-brace ( n string slice -- n' string slice' ) char: \{ read-matched ; +: read-paren ( n string slice -- n' string slice' ) char: \( read-matched ; : read-string-payload ( n string -- n' string ) over [ - { ch'\\ ch'\" } slice-til-separator-inclusive { + { char: \\ char: \" } slice-til-separator-inclusive { { f [ drop ] } - { ch'\" [ drop ] } - { ch'\\ [ drop next-char-from drop read-string-payload ] } + { char: \" [ drop ] } + { char: \\ [ drop next-char-from drop read-string-payload ] } } case ] [ string-expected-got-eof @@ -168,7 +168,7 @@ MACRO:: read-matched ( ch -- quot: ( n string tag -- n' string slice' ) ) tag -rot 3array ; : take-comment ( n string slice -- n' string comment ) - 2over ?nth ch'\[ = [ + 2over ?nth char: \[ = [ [ 1 + ] 2dip 1 modify-to 2over ?nth read-double-matched-bracket ] [ [ slice-til-eol drop ] dip swap 2array @@ -189,7 +189,7 @@ ERROR: expected-length-tokens n string length seq ; ERROR: token-expected n string obj ; ERROR: unexpected-terminator n string slice ; : read-lowercase-colon ( n string slice -- n' string lowercase-colon ) - dup [ ch'\: = ] count-tail + dup [ char: \: = ] count-tail '[ _ [ slice-til-non-whitespace drop [ lex-factor ] dip swap 2array ] replicate ensure-no-false dup [ token-expected ] unless dup terminator? [ unexpected-terminator ] when @@ -200,11 +200,11 @@ ERROR: unexpected-terminator n string slice ; ! All chars must... [ [ - { [ ch'A ch'Z between? ] [ "':-\\#" member? ] } 1|| + { [ char: A char: Z between? ] [ "':-\\#" member? ] } 1|| ] all? ] ! At least one char must... - [ [ { [ ch'A ch'Z between? ] [ ch'\' = ] } 1|| ] any? ] + [ [ { [ char: A char: Z between? ] [ char: \' = ] } 1|| ] any? ] } 1&& ; : strict-upper? ( string -- ? ) @@ -232,7 +232,7 @@ ERROR: unexpected-terminator n string slice ; { [ "<" head? ] [ length 2 >= ] - [ second ch'/ = not ] + [ second char: / = not ] [ rest strict-upper? not ] [ [ blank? ] any? not ] [ ">" tail? ] @@ -242,7 +242,7 @@ ERROR: unexpected-terminator n string slice ; { [ "<" head? ] [ length 2 >= ] - [ second ch'/ = not ] + [ second char: / = not ] [ rest strict-upper? not ] [ [ blank? ] any? not ] [ ">" tail? not ] @@ -267,14 +267,14 @@ ERROR: unexpected-terminator n string slice ; } 1|| ; : upper-colon? ( string -- ? ) - dup { [ length 0 > ] [ [ ch'\: = ] all? ] } 1&& [ + dup { [ length 0 > ] [ [ char: \: = ] all? ] } 1&& [ drop t ] [ { [ length 2 >= ] [ "\\" head? not ] ! XXX: good? [ ":" tail? ] - [ dup [ ch'\: = ] find drop head strict-upper? ] + [ dup [ char: \: = ] find drop head strict-upper? ] } 1&& ] if ; @@ -370,7 +370,7 @@ ERROR: no-backslash-payload n string slice ; : (read-backslash) ( n string slice -- n' string obj ) merge-slice-til-whitespace dup "\\" tail? [ ! \ foo, M\ foo - dup [ ch'\\ = ] count-tail + dup [ char: \\ = ] count-tail '[ _ [ slice-til-not-whitespace drop [ slice-til-whitespace drop ] dip swap 2array ] replicate ensure-no-false @@ -410,17 +410,17 @@ DEFER: lex-factor-top* : lex-factor-fallthrough ( n/f string slice/f ch/f -- n'/f string literal ) { - { ch'\\ [ read-backslash ] } - { ch'\[ [ read-bracket ] } - { ch'\{ [ read-brace ] } - { ch'\( [ read-paren ] } - { ch'\] [ ] } - { ch'\} [ ] } - { ch'\) [ ] } - { ch'\" [ read-string ] } - { ch'\! [ read-exclamation ] } - { ch'> [ - [ [ ch'> = not ] slice-until ] dip merge-slices + { char: \\ [ read-backslash ] } + { char: \[ [ read-bracket ] } + { char: \{ [ read-brace ] } + { char: \( [ read-paren ] } + { char: \] [ ] } + { char: \} [ ] } + { char: \) [ ] } + { char: \" [ read-string ] } + { char: \! [ read-exclamation ] } + { char: > [ + [ [ char: > = not ] slice-until ] dip merge-slices dup section-close? [ [ slice-til-whitespace drop ] dip ?span-slices ] unless @@ -432,7 +432,7 @@ DEFER: lex-factor-top* : lex-factor-nested* ( n/f string slice/f ch/f -- n'/f string literal ) { ! Nested ``A: a B: b`` so rewind and let the parser get it top-level - { ch'\: [ + { char: \: [ ! A: B: then interrupt the current parser ! A: b: then keep going merge-slice-til-whitespace @@ -441,7 +441,7 @@ DEFER: lex-factor-top* [ rewind-slice f ] [ read-colon ] if ] } - { ch'< [ + { char: < [ ! FOO: a b ! FOO: a b ! FOO: a b @@ -453,9 +453,9 @@ DEFER: lex-factor-top* [ slice-til-whitespace drop ] dip span-slices dup section-open? [ rewind-slice f ] when ] } - { ch'\s [ read-token-or-whitespace-nested ] } - { ch'\r [ read-token-or-whitespace-nested ] } - { ch'\n [ read-token-or-whitespace-nested ] } + { char: \s [ read-token-or-whitespace-nested ] } + { char: \r [ read-token-or-whitespace-nested ] } + { char: \n [ read-token-or-whitespace-nested ] } [ lex-factor-fallthrough ] } case ; @@ -466,8 +466,8 @@ DEFER: lex-factor-top* : lex-factor-top* ( n/f string slice/f ch/f -- n'/f string literal ) { - { ch'\: [ merge-slice-til-whitespace read-colon ] } - { ch'< [ + { char: \: [ merge-slice-til-whitespace read-colon ] } + { char: < [ ! FOO: a b ! FOO: a b ! FOO: a b @@ -480,9 +480,9 @@ DEFER: lex-factor-top* dup section-open? [ read-acute ] when ] } - { ch'\s [ read-token-or-whitespace-top ] } - { ch'\r [ read-token-or-whitespace-top ] } - { ch'\n [ read-token-or-whitespace-top ] } + { char: \s [ read-token-or-whitespace-top ] } + { char: \r [ read-token-or-whitespace-top ] } + { char: \n [ read-token-or-whitespace-top ] } [ lex-factor-fallthrough ] } case ; diff --git a/extra/modern/out/out.factor b/extra/modern/out/out.factor index 541c6a1bca..be090c825c 100644 --- a/extra/modern/out/out.factor +++ b/extra/modern/out/out.factor @@ -14,8 +14,8 @@ TUPLE: renamed slice string ; CONSTRUCTOR: renamed ( slice string -- obj ) ; : trim-before-newline ( seq -- seq' ) - dup [ ch'\s = not ] find - { ch'\r ch'\n } member? + dup [ char: \s = not ] find + { char: \r char: \n } member? [ tail-slice ] [ drop ] if ; : write-whitespace ( last obj -- ) diff --git a/extra/modern/slices/slices.factor b/extra/modern/slices/slices.factor index 41e86fcab1..5f34b841c6 100644 --- a/extra/modern/slices/slices.factor +++ b/extra/modern/slices/slices.factor @@ -154,15 +154,15 @@ ERROR: unexpected-end n string ; over [ (?nth) ] [ 2drop f ] if ; :: merge-slice-til-eol-slash' ( n string slice -- n' string slice/f ch/f ) - n string merge-slice-til-eol-slash'' :> ( n' string' slice' ch' ) - ch' ch'\\ = [ + n string merge-slice-til-eol-slash'' :> ( n' string' slice' char' ) + char' char: \\ = [ n' 1 + string' (?nth) "\r\n" member? [ n' 2 + string' slice slice' span-slices merge-slice-til-eol-slash' ] [ "omg" throw ] if ] [ - n' string' slice slice' span-slices ch' + n' string' slice slice' span-slices char' ] if ; ! Supports \ at eol (with no space after it) @@ -215,9 +215,9 @@ ERROR: subseq-expected-but-got-eof n string expected ; [ [ from>> ] [ to>> ] [ seq>> ] tri ] dip swap [ + ] dip ; -! { ch'\] [ read-closing ] } -! { ch'\} [ read-closing ] } -! { ch'\) [ read-closing ] } +! { char: \] [ read-closing ] } +! { char: \} [ read-closing ] } +! { char: \) [ read-closing ] } : read-closing ( n string tok -- n string tok ) dup length 1 = [ -1 modify-to [ 1 - ] 2dip diff --git a/extra/money/money.factor b/extra/money/money.factor index 237a76528f..8bf71c3dac 100644 --- a/extra/money/money.factor +++ b/extra/money/money.factor @@ -5,7 +5,7 @@ namespaces sequences splitting ; IN: money SYMBOL: currency-token -ch'$ currency-token set-global +char: $ currency-token set-global : dollars/cents ( dollars -- dollars cents ) 100 * 100 /mod round >integer ; @@ -13,7 +13,7 @@ ch'$ currency-token set-global : format-money ( dollars cents -- string ) [ number>string ] bi@ [ 3 group "," join ] - [ 2 ch'0 pad-head ] bi* "." glue ; + [ 2 char: 0 pad-head ] bi* "." glue ; : money>string ( number -- string ) dollars/cents format-money currency-token get prefix ; diff --git a/extra/morse/morse-tests.factor b/extra/morse/morse-tests.factor index cd909e721e..719a77ba5d 100644 --- a/extra/morse/morse-tests.factor +++ b/extra/morse/morse-tests.factor @@ -2,10 +2,10 @@ ! See http://factorcode.org/license.txt for BSD license. USING: arrays morse strings tools.test ; -{ "?" } [ ch'\\ ch>morse ] unit-test -{ "..." } [ ch's ch>morse ] unit-test -{ ch's } [ "..." morse>ch ] unit-test -{ ch'\s } [ "..--..--.." morse>ch ] unit-test +{ "?" } [ char: \\ ch>morse ] unit-test +{ "..." } [ char: s ch>morse ] unit-test +{ char: s } [ "..." morse>ch ] unit-test +{ char: \s } [ "..--..--.." morse>ch ] unit-test { "-- --- .-. ... . / -.-. --- -.. ." } [ "morse code" >morse ] unit-test { "morse code" } [ "-- --- .-. ... . / -.-. --- -.. ." morse> ] unit-test { "hello, world!" } [ "Hello, World!" >morse morse> ] unit-test diff --git a/extra/morse/morse.factor b/extra/morse/morse.factor index 38d2475a30..cf2db4b657 100644 --- a/extra/morse/morse.factor +++ b/extra/morse/morse.factor @@ -10,71 +10,71 @@ ERROR: no-morse-ch ch ; CONSTANT: morse-code-table $[ H{ - { ch'a ".-" } - { ch'b "-..." } - { ch'c "-.-." } - { ch'd "-.." } - { ch'e "." } - { ch'f "..-." } - { ch'g "--." } - { ch'h "...." } - { ch'i ".." } - { ch'j ".---" } - { ch'k "-.-" } - { ch'l ".-.." } - { ch'm "--" } - { ch'n "-." } - { ch'o "---" } - { ch'p ".--." } - { ch'q "--.-" } - { ch'r ".-." } - { ch's "..." } - { ch't "-" } - { ch'u "..-" } - { ch'v "...-" } - { ch'w ".--" } - { ch'x "-..-" } - { ch'y "-.--" } - { ch'z "--.." } - { ch'1 ".----" } - { ch'2 "..---" } - { ch'3 "...--" } - { ch'4 "....-" } - { ch'5 "....." } - { ch'6 "-...." } - { ch'7 "--..." } - { ch'8 "---.." } - { ch'9 "----." } - { ch'0 "-----" } - { ch'. ".-.-.-" } - { ch', "--..--" } - { ch'? "..--.." } - { ch'\' ".----." } - { ch'\! "-.-.--" } - { ch'/ "-..-." } - { ch'\( "-.--." } - { ch'\) "-.--.-" } - { ch'& ".-..." } - { ch'\: "---..." } - { ch'\; "-.-.-." } - { ch'= "-...- " } - { ch'+ ".-.-." } - { ch'- "-....-" } - { ch'_ "..--.-" } - { ch'\" ".-..-." } - { ch'$ "...-..-" } - { ch'@ ".--.-." } - { ch'\s "/" } + { char: a ".-" } + { char: b "-..." } + { char: c "-.-." } + { char: d "-.." } + { char: e "." } + { char: f "..-." } + { char: g "--." } + { char: h "...." } + { char: i ".." } + { char: j ".---" } + { char: k "-.-" } + { char: l ".-.." } + { char: m "--" } + { char: n "-." } + { char: o "---" } + { char: p ".--." } + { char: q "--.-" } + { char: r ".-." } + { char: s "..." } + { char: t "-" } + { char: u "..-" } + { char: v "...-" } + { char: w ".--" } + { char: x "-..-" } + { char: y "-.--" } + { char: z "--.." } + { char: 1 ".----" } + { char: 2 "..---" } + { char: 3 "...--" } + { char: 4 "....-" } + { char: 5 "....." } + { char: 6 "-...." } + { char: 7 "--..." } + { char: 8 "---.." } + { char: 9 "----." } + { char: 0 "-----" } + { char: . ".-.-.-" } + { char: , "--..--" } + { char: ? "..--.." } + { char: \' ".----." } + { char: \! "-.-.--" } + { char: / "-..-." } + { char: \( "-.--." } + { char: \) "-.--.-" } + { char: & ".-..." } + { char: \: "---..." } + { char: \; "-.-.-." } + { char: = "-...- " } + { char: + ".-.-." } + { char: - "-....-" } + { char: _ "..--.-" } + { char: \" ".-..-." } + { char: $ "...-..-" } + { char: @ ".--.-." } + { char: \s "/" } } >biassoc ] @@ -102,7 +102,7 @@ CONSTANT: morse-code-table $[ "/" split [ trim-blanks morse>word ] map " " join ; : replace-underscores ( str -- str' ) - [ dup ch'_ = [ drop ch'- ] when ] map ; + [ dup char: _ = [ drop char: - ] when ] map ; PRIVATE> diff --git a/extra/odbc/odbc.factor b/extra/odbc/odbc.factor index 9ce6d3ef33..b8320b99eb 100644 --- a/extra/odbc/odbc.factor +++ b/extra/odbc/odbc.factor @@ -143,7 +143,7 @@ FUNCTION: SQLRETURN SQLGetDiagRec ( ascii string>alien ; + char: space ascii string>alien ; PRIVATE> diff --git a/extra/parser-combinators/simple/simple.factor b/extra/parser-combinators/simple/simple.factor index 4706e29600..746c4c775a 100644 --- a/extra/parser-combinators/simple/simple.factor +++ b/extra/parser-combinators/simple/simple.factor @@ -11,18 +11,18 @@ IN: parser-combinators.simple [ digit? ] satisfy <*> [ string>number ] <@ ; : string-parser ( -- parser ) - [ ch'\" = ] satisfy - [ ch'\" = not ] satisfy <*> &> - [ ch'\" = ] satisfy <& [ >string ] <@ ; + [ char: \" = ] satisfy + [ char: \" = not ] satisfy <*> &> + [ char: \" = ] satisfy <& [ >string ] <@ ; : bold-parser ( -- parser ) "*" token - [ ch'* = not ] satisfy <*> [ >string ] <@ &> + [ char: * = not ] satisfy <*> [ >string ] <@ &> "*" token <& ; : italic-parser ( -- parser ) "_" token - [ ch'_ = not ] satisfy <*> [ >string ] <@ &> + [ char: _ = not ] satisfy <*> [ >string ] <@ &> "_" token <& ; : comma-list ( element -- parser ) diff --git a/extra/path-finding/path-finding-tests.factor b/extra/path-finding/path-finding-tests.factor index d32129d6ec..5c9f66eada 100644 --- a/extra/path-finding/path-finding-tests.factor +++ b/extra/path-finding/path-finding-tests.factor @@ -28,7 +28,7 @@ TUPLE: maze < astar ; 6 X X X X X X e X 7 X g X X 8 X X X X X X X X X X" - "\n" split ] nth nth ch'X = not ; + "\n" split ] nth nth char: X = not ; M: maze neighbours drop diff --git a/extra/pcre/pcre-tests.factor b/extra/pcre/pcre-tests.factor index 21e38e5d33..00686dc310 100644 --- a/extra/pcre/pcre-tests.factor +++ b/extra/pcre/pcre-tests.factor @@ -90,7 +90,7 @@ os unix? [ [ 10 ] [ PCRE_CONFIG_NEWLINE pcre-config ] unit-test ] when ] unit-test : long-string ( -- x ) - 10000 [ ch'a ch'z [a,b] random ] "" replicate-as ; + 10000 [ char: a char: z [a,b] random ] "" replicate-as ; ! Performance { 0 } [ long-string ".{0,15}foobar.{0,10}" findall length ] unit-test diff --git a/extra/pdf/layout/layout.factor b/extra/pdf/layout/layout.factor index 4cf85f16d7..524152e789 100644 --- a/extra/pdf/layout/layout.factor +++ b/extra/pdf/layout/layout.factor @@ -84,8 +84,8 @@ M: div pdf-width : convert-string ( str -- str' ) { - { ch'“ "\"" } - { ch'” "\"" } + { char: “ "\"" } + { char: ” "\"" } } escape-string-by [ 256 < ] filter ; PRIVATE> diff --git a/extra/pdf/values/values.factor b/extra/pdf/values/values.factor index ad40a50547..b62b373bf3 100644 --- a/extra/pdf/values/values.factor +++ b/extra/pdf/values/values.factor @@ -14,12 +14,12 @@ IN: pdf.values H{ { 0x08 "\\b" } { 0x0c "\\f" } - { ch'\n "\\n" } - { ch'\r "\\r" } - { ch'\t "\\t" } - { ch'\\ "\\\\" } - { ch'\( "\\(" } - { ch'\) "\\)" } + { char: \n "\\n" } + { char: \r "\\r" } + { char: \t "\\t" } + { char: \\ "\\\\" } + { char: \( "\\(" } + { char: \) "\\)" } } escape-string-by ; PRIVATE> diff --git a/extra/peg/javascript/tokenizer/tokenizer.factor b/extra/peg/javascript/tokenizer/tokenizer.factor index d28b17b14c..1fa4dfad26 100644 --- a/extra/peg/javascript/tokenizer/tokenizer.factor +++ b/extra/peg/javascript/tokenizer/tokenizer.factor @@ -17,7 +17,7 @@ SingleLineComment = "//" (!("\n") .)* "\n" => [[ ignore ]] MultiLineComment = "/*" (!("*/") .)* "*/" => [[ ignore ]] Space = [ \t\r\n] | SingleLineComment | MultiLineComment Spaces = Space* => [[ ignore ]] -NameFirst = Letter | "$" => [[ ch'$ ]] | "_" => [[ ch'_ ]] +NameFirst = Letter | "$" => [[ char: $ ]] | "_" => [[ char: _ ]] NameRest = NameFirst | Digit iName = NameFirst NameRest* => [[ first2 swap prefix >string ]] Keyword = ("break" @@ -49,15 +49,15 @@ Name = !(Keyword) iName => [[ ast-name boa ]] Number = Digits:ws '.' Digits:fs => [[ ws "." fs 3array "" concat-as string>number ast-number boa ]] | Digits => [[ >string string>number ast-number boa ]] -SingleEscape = "b" => [[ ch'\b ]] - | "f" => [[ ch'\f ]] - | "n" => [[ ch'\n ]] - | "r" => [[ ch'\r ]] - | "t" => [[ ch'\t ]] - | "v" => [[ ch'\v ]] - | "'" => [[ ch'\' ]] - | "\"" => [[ ch'\" ]] - | "\\" => [[ ch'\\ ]] +SingleEscape = "b" => [[ char: \b ]] + | "f" => [[ char: \f ]] + | "n" => [[ char: \n ]] + | "r" => [[ char: \r ]] + | "t" => [[ char: \t ]] + | "v" => [[ char: \v ]] + | "'" => [[ char: \' ]] + | "\"" => [[ char: \" ]] + | "\\" => [[ char: \\ ]] HexDigit = [0-9a-fA-F] HexEscape = "x" (HexDigit HexDigit):d => [[ d hex> ]] UnicodeEscape = "u" (HexDigit HexDigit HexDigit HexDigit):d => [[ d hex> ]] diff --git a/extra/poker/poker.factor b/extra/poker/poker.factor index fb761d87c9..bca205a47d 100644 --- a/extra/poker/poker.factor +++ b/extra/poker/poker.factor @@ -206,14 +206,14 @@ ERROR: bad-suit-symbol ch ; : symbol>suit ( ch -- ch' ) ch>upper H{ - { ch'♠ ch'S } - { ch'♦ ch'D } - { ch'♥ ch'H } - { ch'♣ ch'C } - { ch'S ch'S } - { ch'D ch'D } - { ch'H ch'H } - { ch'C ch'C } + { char: ♠ char: S } + { char: ♦ char: D } + { char: ♥ char: H } + { char: ♣ char: C } + { char: S char: S } + { char: D char: D } + { char: H char: H } + { char: C char: C } } ?at [ bad-suit-symbol ] unless ; : card> ( string -- card ) diff --git a/extra/progress-bars/progress-bars.factor b/extra/progress-bars/progress-bars.factor index bd53813d92..c71c09b6a0 100644 --- a/extra/progress-bars/progress-bars.factor +++ b/extra/progress-bars/progress-bars.factor @@ -20,4 +20,4 @@ ERROR: invalid-length x ; : make-progress-bar ( percent length -- string ) [ check-percent ] [ check-length ] bi* - ch'= ch'- (make-progress-bar) ; + char: = char: - (make-progress-bar) ; diff --git a/extra/project-euler/051/051.factor b/extra/project-euler/051/051.factor index 1b1c3c29b5..6c74e25d58 100644 --- a/extra/project-euler/051/051.factor +++ b/extra/project-euler/051/051.factor @@ -41,7 +41,7 @@ SYMBOL: large-families H{ } clone [ '[ swap _ push-at ] each-index ] keep ; : *-if-index ( char combination index -- char ) - member? [ drop ch'* ] when ; + member? [ drop char: * ] when ; : replace-positions-with-* ( str positions -- str ) [ *-if-index ] curry map-index ; : all-positions-combinations ( seq -- combinations ) @@ -68,7 +68,7 @@ SYMBOL: large-families large-families get members ; : fill-*-with-ones ( str -- str ) - [ dup ch'* = [ drop ch'1 ] when ] map ; + [ dup char: * = [ drop char: 1 ] when ] map ; ! recursively test all primes by length until we find an answer : (euler051) ( i -- answer ) diff --git a/extra/project-euler/common/common.factor b/extra/project-euler/common/common.factor index 425fbcc80a..2db0cf4d35 100644 --- a/extra/project-euler/common/common.factor +++ b/extra/project-euler/common/common.factor @@ -66,7 +66,7 @@ HINTS: count-digits fixnum ; PRIVATE> : alpha-value ( str -- n ) - >lower [ ch'a - 1 + ] map-sum ; + >lower [ char: a - 1 + ] map-sum ; : mediant ( a/c b/d -- (a+b)/(c+d) ) 2fraction>parts [ + ] 2bi@ / ; diff --git a/extra/project-euler/project-euler.factor b/extra/project-euler/project-euler.factor index 43d7b5c362..d33264a435 100644 --- a/extra/project-euler/project-euler.factor +++ b/extra/project-euler/project-euler.factor @@ -36,7 +36,7 @@ IN: project-euler print readln string>number ; : number>euler ( n -- str ) - number>string 3 ch'0 pad-head ; + number>string 3 char: 0 pad-head ; : solution-path ( n -- str/f ) number>euler "project-euler." prepend diff --git a/extra/redis/response-parser/response-parser.factor b/extra/redis/response-parser/response-parser.factor index c8f5355934..5a612c0c6c 100644 --- a/extra/redis/response-parser/response-parser.factor +++ b/extra/redis/response-parser/response-parser.factor @@ -30,11 +30,11 @@ PRIVATE> : read-response ( -- response ) readln unclip { - { ch'\: [ string>number ] } - { ch'+ [ handle-response ] } - { ch'$ [ string>number read-bulk ] } - { ch'* [ string>number read-multi-bulk ] } - { ch'- [ handle-error ] } + { char: \: [ string>number ] } + { char: + [ handle-response ] } + { char: $ [ string>number read-bulk ] } + { char: * [ string>number read-multi-bulk ] } + { char: - [ handle-error ] } } case ; : check-response ( -- ) diff --git a/extra/rosetta-code/align-columns/align-columns.factor b/extra/rosetta-code/align-columns/align-columns.factor index aab4d094a7..4028a31823 100644 --- a/extra/rosetta-code/align-columns/align-columns.factor +++ b/extra/rosetta-code/align-columns/align-columns.factor @@ -55,14 +55,14 @@ SINGLETONS: +left+ +middle+ +right+ ; GENERIC: align-string ( str n alignment -- str' ) -M: +left+ align-string drop ch'space pad-tail ; -M: +right+ align-string drop ch'space pad-head ; +M: +left+ align-string drop char: space pad-tail ; +M: +right+ align-string drop char: space pad-head ; M: +middle+ align-string drop over length - 2 / - [ floor ch'space ] - [ ceiling ch'space ] bi surround ; + [ floor char: space ] + [ ceiling char: space ] bi surround ; : align-columns ( columns alignment -- columns' ) [ dup column-widths ] dip '[ diff --git a/extra/rosetta-code/balanced-brackets/balanced-brackets.factor b/extra/rosetta-code/balanced-brackets/balanced-brackets.factor index 15f61d685b..4210ddaa48 100644 --- a/extra/rosetta-code/balanced-brackets/balanced-brackets.factor +++ b/extra/rosetta-code/balanced-brackets/balanced-brackets.factor @@ -26,8 +26,8 @@ IN: rosetta-code.balanced-brackets t :> ok! str [ { - { ch'\[ [ 1 ] } - { ch'\] [ -1 ] } + { char: \[ [ 1 ] } + { char: \] [ -1 ] } [ drop 0 ] } case counter + counter! counter 0 < [ f ok! ] when diff --git a/extra/rosetta-code/multiplication-tables/multiplication-tables.factor b/extra/rosetta-code/multiplication-tables/multiplication-tables.factor index c055308805..b4e6384914 100644 --- a/extra/rosetta-code/multiplication-tables/multiplication-tables.factor +++ b/extra/rosetta-code/multiplication-tables/multiplication-tables.factor @@ -11,16 +11,16 @@ IN: rosetta-code.multiplication-tables ! Only print the top half triangle of products. : print-row ( n -- ) - [ number>string 2 ch'space pad-head write " |" write ] + [ number>string 2 char: space pad-head write " |" write ] [ 1 - [ " " write ] times ] [ dup 12 [a,b] - [ * number>string 4 ch'space pad-head write ] with each + [ * number>string 4 char: space pad-head write ] with each ] tri nl ; : print-table ( -- ) " " write - 1 12 [a,b] [ number>string 4 ch'space pad-head write ] each nl + 1 12 [a,b] [ number>string 4 char: space pad-head write ] each nl " +" write 12 [ "----" write ] times nl 1 12 [a,b] [ print-row ] each ; diff --git a/extra/rosetta-code/number-reversal/number-reversal.factor b/extra/rosetta-code/number-reversal/number-reversal.factor index f8b07f6e73..1c968cbefc 100644 --- a/extra/rosetta-code/number-reversal/number-reversal.factor +++ b/extra/rosetta-code/number-reversal/number-reversal.factor @@ -17,7 +17,7 @@ IN: rosetta-code.number-reversal ! Note: Assume the players input does not need extra validation. : make-jumbled-array ( -- sorted jumbled ) - ch'1 ch'9 [a,b] [ 1string ] map dup clone randomize + char: 1 char: 9 [a,b] [ 1string ] map dup clone randomize [ 2dup = ] [ randomize ] while ; SYMBOL: trials diff --git a/extra/rosetta-code/one-d-cellular/one-d-cellular.factor b/extra/rosetta-code/one-d-cellular/one-d-cellular.factor index dcfeb6fa53..c9c670a603 100644 --- a/extra/rosetta-code/one-d-cellular/one-d-cellular.factor +++ b/extra/rosetta-code/one-d-cellular/one-d-cellular.factor @@ -42,7 +42,7 @@ IN: rosetta-code.one-d-cellular dup count-neighbours [ life-law ] ?{ } 2map-as ; : print-cellular ( world -- ) - [ ch'# ch'_ ? ] "" map-as print ; + [ char: # char: _ ? ] "" map-as print ; : main-cellular ( -- ) ?{ f t t t f t t f t f t f t f t f f t f f } diff --git a/extra/rot13/rot13.factor b/extra/rot13/rot13.factor index 7dc6333fad..9bb08130b6 100644 --- a/extra/rot13/rot13.factor +++ b/extra/rot13/rot13.factor @@ -7,8 +7,8 @@ IN: rot13 : rot-letter ( ch -- ch ) { - { [ dup letter? ] [ ch'a rotate ] } - { [ dup LETTER? ] [ ch'A rotate ] } + { [ dup letter? ] [ char: a rotate ] } + { [ dup LETTER? ] [ char: A rotate ] } [ ] } cond ; diff --git a/extra/semantic-versioning/semantic-versioning.factor b/extra/semantic-versioning/semantic-versioning.factor index 70cae99f5d..b8898a88a4 100644 --- a/extra/semantic-versioning/semantic-versioning.factor +++ b/extra/semantic-versioning/semantic-versioning.factor @@ -17,8 +17,8 @@ PRIVATE> : split-version ( string -- array ) "+" split1 [ - dup [ [ digit? not ] [ ch'. = not ] bi and ] find [ - [ cut ] [ ch'- = [ rest [ f ] when-empty ] when ] bi* + dup [ [ digit? not ] [ char: . = not ] bi and ] find [ + [ cut ] [ char: - = [ rest [ f ] when-empty ] when ] bi* ] [ drop f ] if* [ "." split [ string>number 0 or ] map 3 0 pad-tail ] dip ] dip 3array ; diff --git a/extra/sequences/extras/extras-docs.factor b/extra/sequences/extras/extras-docs.factor index 898719cfce..ac019d16d4 100644 --- a/extra/sequences/extras/extras-docs.factor +++ b/extra/sequences/extras/extras-docs.factor @@ -81,7 +81,7 @@ HELP: collapse { $see-also compact } { $examples "Collapse multiple spaces in a string down to a single space" - { $example "USING: kernel prettyprint sequences.extras ;" "\" Hello, crazy world \" [ ch'\\s = ] \" \" collapse ." "\" Hello, crazy world \"" } } ; + { $example "USING: kernel prettyprint sequences.extras ;" "\" Hello, crazy world \" [ char: \\s = ] \" \" collapse ." "\" Hello, crazy world \"" } } ; HELP: compact { $values @@ -93,7 +93,7 @@ HELP: compact { $see-also collapse } { $examples "Collapse multiple spaces in a string down to a single space" - { $example "USING: kernel prettyprint sequences.extras ;" "\" Hello, crazy world \" [ ch'\\s = ] \" \" compact ." "\"Hello, crazy world\"" } } ; + { $example "USING: kernel prettyprint sequences.extras ;" "\" Hello, crazy world \" [ char: \\s = ] \" \" compact ." "\"Hello, crazy world\"" } } ; HELP: { $values { "seq" sequence } { "evens" evens } } diff --git a/extra/sequences/extras/extras-tests.factor b/extra/sequences/extras/extras-tests.factor index cd1e9db9bc..a00576b0fb 100644 --- a/extra/sequences/extras/extras-tests.factor +++ b/extra/sequences/extras/extras-tests.factor @@ -16,11 +16,11 @@ tools.test vectors vocabs ; { "foo" } [ "foo" "foobar" longest-subseq ] unit-test { "foo" } [ "foobar" "foo" longest-subseq ] unit-test -{ "" "" } [ "" "" ch'? pad-longest ] unit-test -{ "abc" "def" } [ "abc" "def" ch'? pad-longest ] unit-test -{ " " "abc" } [ "" "abc" ch'\s pad-longest ] unit-test -{ "abc" " " } [ "abc" "" ch'\s pad-longest ] unit-test -{ "abc..." "foobar" } [ "abc" "foobar" ch'. pad-longest ] unit-test +{ "" "" } [ "" "" char: ? pad-longest ] unit-test +{ "abc" "def" } [ "abc" "def" char: ? pad-longest ] unit-test +{ " " "abc" } [ "" "abc" char: \s pad-longest ] unit-test +{ "abc" " " } [ "abc" "" char: \s pad-longest ] unit-test +{ "abc..." "foobar" } [ "abc" "foobar" char: . pad-longest ] unit-test { { @@ -34,7 +34,7 @@ tools.test vectors vocabs ; "--ABC--" } } [ - "ABC" 8 [ ch'- pad-center ] with map + "ABC" 8 [ char: - pad-center ] with map ] unit-test { { 0 1 0 1 } } [ @@ -268,11 +268,11 @@ tools.test vectors vocabs ; { { } } [ { } ", " interleaved ] unit-test { { 1 } } [ { 1 } ", " interleaved ] unit-test { { 1 ", " 2 } } [ { 1 2 } ", " interleaved ] unit-test -{ "" } [ "" ch'_ interleaved ] unit-test -{ "a" } [ "a" ch'_ interleaved ] unit-test -{ "a_b" } [ "ab" ch'_ interleaved ] unit-test -{ "a_b_c" } [ "abc" ch'_ interleaved ] unit-test -{ "a_b_c_d" } [ "abcd" ch'_ interleaved ] unit-test +{ "" } [ "" char: _ interleaved ] unit-test +{ "a" } [ "a" char: _ interleaved ] unit-test +{ "a_b" } [ "ab" char: _ interleaved ] unit-test +{ "a_b_c" } [ "abc" char: _ interleaved ] unit-test +{ "a_b_c_d" } [ "abcd" char: _ interleaved ] unit-test { 0 } [ { 1 2 3 4 } [ 5 > ] count-head ] unit-test { 2 } [ { 1 2 3 4 } [ 3 < ] count-head ] unit-test diff --git a/extra/sequences/interleaved/interleaved-tests.factor b/extra/sequences/interleaved/interleaved-tests.factor index ff4eb1608e..680ddcaaf6 100644 --- a/extra/sequences/interleaved/interleaved-tests.factor +++ b/extra/sequences/interleaved/interleaved-tests.factor @@ -1,7 +1,7 @@ USING: sequences.interleaved strings tools.test ; -{ "" } [ "" ch'_ >string ] unit-test -{ "a" } [ "a" ch'_ >string ] unit-test -{ "a_b" } [ "ab" ch'_ >string ] unit-test -{ "a_b_c" } [ "abc" ch'_ >string ] unit-test -{ "a_b_c_d" } [ "abcd" ch'_ >string ] unit-test +{ "" } [ "" char: _ >string ] unit-test +{ "a" } [ "a" char: _ >string ] unit-test +{ "a_b" } [ "ab" char: _ >string ] unit-test +{ "a_b_c" } [ "abc" char: _ >string ] unit-test +{ "a_b_c_d" } [ "abcd" char: _ >string ] unit-test diff --git a/extra/smalltalk/parser/parser-tests.factor b/extra/smalltalk/parser/parser-tests.factor index ab6bac5446..60a9938ea0 100644 --- a/extra/smalltalk/parser/parser-tests.factor +++ b/extra/smalltalk/parser/parser-tests.factor @@ -6,7 +6,7 @@ EBNF: test-Character [=[ test = ]=] -{ ch'a } [ "a" test-Character ] unit-test +{ char: a } [ "a" test-Character ] unit-test EBNF: test-Comment [=[ test = @@ -39,7 +39,7 @@ test = { 12.4e7 } [ "12.4e7" test-Literal ] unit-test { 12.4e-7 } [ "12.4e-7" test-Literal ] unit-test { -12.4e7 } [ "-12.4e7" test-Literal ] unit-test -{ ch'x } [ "$x" test-Literal ] unit-test +{ char: x } [ "$x" test-Literal ] unit-test { "Hello, world" } [ "'Hello, world'" test-Literal ] unit-test { "Hello, 'funny' world" } [ "'Hello, ''funny'' world'" test-Literal ] unit-test { T{ symbol f "foo" } } [ "#foo" test-Literal ] unit-test diff --git a/extra/smalltalk/parser/parser.factor b/extra/smalltalk/parser/parser.factor index 452fe8c9b5..5777bb40cc 100644 --- a/extra/smalltalk/parser/parser.factor +++ b/extra/smalltalk/parser/parser.factor @@ -21,7 +21,7 @@ WhitespaceCharacter = [ \t\n\r] DecimalDigit = [0-9] Letter = [A-Za-z] -CommentCharacter = [^"] | '""' => [[ ch'\" ]] +CommentCharacter = [^"] | '""' => [[ char: \" ]] Comment = '"' (CommentCharacter)*:s '"' => [[ s >string ast-comment boa ]] OptionalWhiteSpace = (WhitespaceCharacter | Comment)* @@ -49,7 +49,7 @@ BinarySelectorChar = "~" | "!" | "@" | "%" | "&" | "*" | "-" | "+" | "=" | "|" | "\\" | "<" | ">" | "," | "?" | "/" BinaryMessageSelector = BinarySelectorChar+ => [[ concat ]] -OptionalMinus = ("-" => [[ ch'- ]])? +OptionalMinus = ("-" => [[ char: - ]])? IntegerLiteral = (OptionalMinus:m UnsignedIntegerLiteral:i) => [[ i m [ neg ] when ]] UnsignedIntegerLiteral = Radix:r "r" BaseNIntegerLiteral:b => [[ b >string r base> ]] | DecimalIntegerLiteral => [[ check-number ]] @@ -58,13 +58,13 @@ Radix = DecimalIntegerLiteral => [[ check-number ]] BaseNIntegerLiteral = LetterOrDigit+ FloatingPointLiteral = (OptionalMinus DecimalIntegerLiteral - ("." => [[ ch'. ]] DecimalIntegerLiteral Exponent? | Exponent)) + ("." => [[ char: . ]] DecimalIntegerLiteral Exponent? | Exponent)) => [[ flatten check-number ]] -Exponent = "e" => [[ ch'e ]] (OptionalMinus DecimalIntegerLiteral)? +Exponent = "e" => [[ char: e ]] (OptionalMinus DecimalIntegerLiteral)? CharacterLiteral = "$" Character:c => [[ c ]] -StringLiteral = "'" (StringLiteralCharacter | "''" => [[ ch'\' ]])*:s "'" +StringLiteral = "'" (StringLiteralCharacter | "''" => [[ char: \' ]])*:s "'" => [[ s >string ]] StringLiteralCharacter = [^'] diff --git a/extra/smalltalk/printer/printer.factor b/extra/smalltalk/printer/printer.factor index dbe96e1b69..f27d6edf58 100644 --- a/extra/smalltalk/printer/printer.factor +++ b/extra/smalltalk/printer/printer.factor @@ -11,7 +11,7 @@ M: real smalltalk>string number>string ; M: string smalltalk>string [ "'" % - [ dup ch'\' = [ dup , , ] [ , ] if ] each + [ dup char: \' = [ dup , , ] [ , ] if ] each "'" % ] "" make ; diff --git a/extra/smalltalk/selectors/selectors.factor b/extra/smalltalk/selectors/selectors.factor index 4aaa720e0a..6c4648b77c 100644 --- a/extra/smalltalk/selectors/selectors.factor +++ b/extra/smalltalk/selectors/selectors.factor @@ -9,7 +9,7 @@ SYMBOLS: unary binary keyword ; : selector-type ( selector -- type ) { { [ dup [ "~!@%&*-+=|\\<>,?/" member? ] all? ] [ binary ] } - { [ ch'\: over member? ] [ keyword ] } + { [ char: \: over member? ] [ keyword ] } [ unary ] } cond nip ; @@ -17,7 +17,7 @@ SYMBOLS: unary binary keyword ; dup selector-type { { unary [ drop { } ] } { binary [ drop { "x" } ] } - { keyword [ [ ch'\: = ] count "x" ] } + { keyword [ [ char: \: = ] count "x" ] } } case "receiver" suffix { "result" } ; : selector>generic ( selector -- generic ) diff --git a/extra/spelling/spelling.factor b/extra/spelling/spelling.factor index ef61ce730f..80b1b399b1 100644 --- a/extra/spelling/spelling.factor +++ b/extra/spelling/spelling.factor @@ -25,7 +25,7 @@ CONSTANT: ALPHABET "abcdefghijklmnopqrstuvwxyz" : inserts ( word -- edits ) [ length [0,b] ] keep '[ - ch'? over _ insert-nth ALPHABET swap [ + char: ? over _ insert-nth ALPHABET swap [ swapd clone [ set-nth-unsafe ] keep ] curry with { } map-as ] map concat ; diff --git a/extra/successor/successor.factor b/extra/successor/successor.factor index a9b4aa24d5..4c6867a8e7 100644 --- a/extra/successor/successor.factor +++ b/extra/successor/successor.factor @@ -12,10 +12,10 @@ IN: successor '[ _ > dup _ ] keep ? ; : next-digit ( ch -- ? ch' ) - 1 + ch'9 ch'0 carry ; + 1 + char: 9 char: 0 carry ; : next-letter ( ch -- ? ch' ) - [ ch>lower 1 + ch'z ch'a carry ] [ LETTER? ] bi + [ ch>lower 1 + char: z char: a carry ] [ LETTER? ] bi [ ch>upper ] when ; : next-char ( ch -- ? ch' ) diff --git a/extra/svg/svg.factor b/extra/svg/svg.factor index f2ef450309..f53cf573c1 100644 --- a/extra/svg/svg.factor +++ b/extra/svg/svg.factor @@ -13,7 +13,7 @@ XML-NS: sodipodi-name http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd XML-NS: inkscape-name http://www.inkscape.org/namespaces/inkscape : svg-string>number ( string -- number ) - H{ { ch'E ch'e } } substitute "e" split1 + H{ { char: E char: e } } substitute "e" split1 [ string>number ] [ [ string>number 10^ ] [ 1 ] if* ] bi* * >float ; diff --git a/extra/tar/tar.factor b/extra/tar/tar.factor index 7209cec5bc..a81019461b 100644 --- a/extra/tar/tar.factor +++ b/extra/tar/tar.factor @@ -178,27 +178,27 @@ C: symbolic-link dup typeflag>> { { 0 [ typeflag-0 ] } - { ch'0 [ typeflag-0 ] } - ! { ch'1 [ typeflag-1 ] } - { ch'2 [ typeflag-2 ] } - ! { ch'3 [ typeflag-3 ] } - ! { ch'4 [ typeflag-4 ] } - { ch'5 [ typeflag-5 ] } - ! { ch'6 [ typeflag-6 ] } - ! { ch'7 [ typeflag-7 ] } - { ch'g [ typeflag-g ] } - ! { ch'x [ typeflag-x ] } - ! { ch'A [ typeflag-A ] } - ! { ch'D [ typeflag-D ] } - ! { ch'E [ typeflag-E ] } - ! { ch'I [ typeflag-I ] } - ! { ch'K [ typeflag-K ] } - { ch'L [ typeflag-L ] } - ! { ch'M [ typeflag-M ] } - ! { ch'N [ typeflag-N ] } - ! { ch'S [ typeflag-S ] } - ! { ch'V [ typeflag-V ] } - ! { ch'X [ typeflag-X ] } + { char: 0 [ typeflag-0 ] } + ! { char: 1 [ typeflag-1 ] } + { char: 2 [ typeflag-2 ] } + ! { char: 3 [ typeflag-3 ] } + ! { char: 4 [ typeflag-4 ] } + { char: 5 [ typeflag-5 ] } + ! { char: 6 [ typeflag-6 ] } + ! { char: 7 [ typeflag-7 ] } + { char: g [ typeflag-g ] } + ! { char: x [ typeflag-x ] } + ! { char: A [ typeflag-A ] } + ! { char: D [ typeflag-D ] } + ! { char: E [ typeflag-E ] } + ! { char: I [ typeflag-I ] } + ! { char: K [ typeflag-K ] } + { char: L [ typeflag-L ] } + ! { char: M [ typeflag-M ] } + ! { char: N [ typeflag-N ] } + ! { char: S [ typeflag-S ] } + ! { char: V [ typeflag-V ] } + ! { char: X [ typeflag-X ] } { f [ drop ] } } case parse-tar ] [ diff --git a/extra/terminal/macosx/macosx.factor b/extra/terminal/macosx/macosx.factor index 75af3b8c9b..ab781df878 100644 --- a/extra/terminal/macosx/macosx.factor +++ b/extra/terminal/macosx/macosx.factor @@ -31,7 +31,7 @@ STRUCT: winsize { ws_xpixel c:short } { ws_ypixel c:short } ; -MEMO: TIOCGWINSZ ( -- x ) ch't 104 winsize heap-size _IOCR ; +MEMO: TIOCGWINSZ ( -- x ) char: t 104 winsize heap-size _IOCR ; PRIVATE> diff --git a/extra/text-analysis/text-analysis.factor b/extra/text-analysis/text-analysis.factor index 011fb5f12b..dadae9c815 100644 --- a/extra/text-analysis/text-analysis.factor +++ b/extra/text-analysis/text-analysis.factor @@ -57,7 +57,7 @@ CONSTANT: ABBREVIATIONS { $[ ABBREVIATIONS "|" join "(" ")\\.\x01" surround "i" - ] [ ch'. over index head ] re-replace-with + ] [ char: . over index head ] re-replace-with ! Split on EOS marker "\x01" split trimmed ; @@ -94,7 +94,7 @@ CONSTANT: add-syllable { : syllables ( str -- n ) dup length 1 = [ drop 1 ] [ - >lower ch'. swap remove + >lower char: . swap remove [ R/ [aeiouy]+/ count-matches ] [ sub-syllable [ matches? ] with count - ] [ add-syllable [ matches? ] with count + ] tri diff --git a/extra/text-to-pdf/text-to-pdf.factor b/extra/text-to-pdf/text-to-pdf.factor index 98586b7527..a8ef12b856 100644 --- a/extra/text-to-pdf/text-to-pdf.factor +++ b/extra/text-to-pdf/text-to-pdf.factor @@ -13,12 +13,12 @@ IN: text-to-pdf H{ { 0x08 "\\b" } { 0x0c "\\f" } - { ch'\n "\\n" } - { ch'\r "\\r" } - { ch'\t "\\t" } - { ch'\\ "\\\\" } - { ch'\( "\\(" } - { ch'\) "\\)" } + { char: \n "\\n" } + { char: \r "\\r" } + { char: \t "\\t" } + { char: \\ "\\\\" } + { char: \( "\\(" } + { char: \) "\\)" } } escape-string-by "(" ")" surround ; : pdf-object ( str n -- str' ) diff --git a/extra/tnetstrings/tnetstrings.factor b/extra/tnetstrings/tnetstrings.factor index f1b9fa428b..cfb847aeb4 100644 --- a/extra/tnetstrings/tnetstrings.factor +++ b/extra/tnetstrings/tnetstrings.factor @@ -43,13 +43,13 @@ DEFER: parse-tnetstring : parse-tnetstring ( data -- remain value ) parse-payload { - { ch'# [ string>number ] } - { ch'\" [ ] } - { ch'\} [ parse-dict ] } - { ch'\] [ parse-list ] } - { ch'\! [ parse-bool ] } - { ch'~ [ parse-null ] } - { ch', [ ] } + { char: # [ string>number ] } + { char: \" [ ] } + { char: \} [ parse-dict ] } + { char: \] [ parse-list ] } + { char: \! [ parse-bool ] } + { char: ~ [ parse-null ] } + { char: , [ ] } [ "Invalid payload type: %c" sprintf throw ] } case ; diff --git a/extra/toml/toml.factor b/extra/toml/toml.factor index 64d4782586..4a805f3ca1 100644 --- a/extra/toml/toml.factor +++ b/extra/toml/toml.factor @@ -32,28 +32,28 @@ TUPLE: entry key value ; : hexdigit ( -- parser ) [ - ch'0 ch'9 range , - ch'a ch'f range , - ch'A ch'F range , + char: 0 char: 9 range , + char: a char: f range , + char: A char: F range , ] choice* ; : hex ( -- parser ) "0x" token hide hexdigit digits 2seq [ first hex> ] action ; : decdigit ( -- parser ) - ch'0 ch'9 range ; + char: 0 char: 9 range ; : dec ( -- parser ) decdigit digits [ dec> ] action ; : octdigit ( -- parser ) - ch'0 ch'7 range ; + char: 0 char: 7 range ; : oct ( -- parser ) "0o" token hide octdigit digits 2seq [ first oct> ] action ; : bindigit ( -- parser ) - ch'0 ch'1 range ; + char: 0 char: 1 range ; : bin ( -- parser ) "0b" token hide bindigit digits 2seq [ first bin> ] action ; @@ -106,11 +106,11 @@ TUPLE: entry key value ; basic-string literal-string 2choice [ "" like ] action ; : multi-basic-string ( -- parser ) - escaped unicode [ ch'\" = not ] satisfy 3choice repeat0 + escaped unicode [ char: \" = not ] satisfy 3choice repeat0 "\"\"\"" dup surrounded-by ; : multi-literal-string ( -- parser ) - [ ch'\' = not ] satisfy repeat0 + [ char: \' = not ] satisfy repeat0 "'''" dup surrounded-by ; : multi-string ( -- parser ) @@ -203,9 +203,9 @@ DEFER: key-value-parser : name-parser ( -- parser ) [ - ch'A ch'Z range , - ch'a ch'z range , - ch'0 ch'9 range , + char: A char: Z range , + char: a char: z range , + char: 0 char: 9 range , "_" token [ first ] action , "-" token [ first ] action , ] choice* repeat1 [ "" like ] action single-string 2choice ; @@ -214,7 +214,7 @@ DEFER: key-value-parser [ space hide , "#" token , - [ ch'\n = not ] satisfy repeat0 , + [ char: \n = not ] satisfy repeat0 , ] seq* [ drop f ] action ; : key-parser ( -- parser ) diff --git a/extra/tools/cal/cal.factor b/extra/tools/cal/cal.factor index 9d37cb1491..68e51c1832 100644 --- a/extra/tools/cal/cal.factor +++ b/extra/tools/cal/cal.factor @@ -14,10 +14,10 @@ IN: tools.cal 42 " " pad-tail ; : month-header ( timestamp -- str ) - "%B %Y" strftime 20 ch'\s pad-center ; + "%B %Y" strftime 20 char: \s pad-center ; : year-header ( timestamp -- str ) - "%Y" strftime 64 ch'\s pad-center ; + "%Y" strftime 64 char: \s pad-center ; : month-rows ( timestamp -- rows ) days 7 group day-abbreviations2 prefix format-table ; @@ -30,7 +30,7 @@ PRIVATE> : year. ( timestamp -- ) dup year-header print nl 12 [1,b] [ >>month [ month-rows ] [ month-name ] bi - 20 ch'\s pad-center prefix + 20 char: \s pad-center prefix ] with map 3 group [ first3 [ "%s %s %s\n" printf ] 3each ] each ; diff --git a/extra/tools/wc/wc.factor b/extra/tools/wc/wc.factor index 2f0e5d5334..42ed2945d4 100644 --- a/extra/tools/wc/wc.factor +++ b/extra/tools/wc/wc.factor @@ -22,8 +22,8 @@ IN: tools.wc 0 [ aligned-slices [ uchar-16 cast-array swap - [ ch'\n uchar-16-with v= vcount + >fixnum ] reduce - ] [ [ ch'\n = ] count + >fixnum ] bi* + [ char: \n uchar-16-with v= vcount + >fixnum ] reduce + ] [ [ char: \n = ] count + >fixnum ] bi* ] each-block-slice ; inline : wc-stdin ( -- n ) diff --git a/extra/turing/turing.factor b/extra/turing/turing.factor index b2d6eff22a..f1e2293dbb 100644 --- a/extra/turing/turing.factor +++ b/extra/turing/turing.factor @@ -63,7 +63,7 @@ SYMBOL: tape ! Print current turing machine state. state get . tape get . - 2 position get 2 * + ch'\s write "^" print ; + 2 position get 2 * + char: \s write "^" print ; : n ( -- ) ! Do one step and print new state. diff --git a/extra/txon/txon.factor b/extra/txon/txon.factor index d3029849a2..1330bde00a 100644 --- a/extra/txon/txon.factor +++ b/extra/txon/txon.factor @@ -13,13 +13,13 @@ IN: txon "\\`" "`" replace ; : `? ( ch1 ch2 -- ? ) - [ ch'\\ = not ] [ ch'` = ] bi* and ; + [ char: \\ = not ] [ char: ` = ] bi* and ; : (find-`) ( string -- n/f ) 2 clump [ first2 `? ] find drop [ 1 + ] [ f ] if* ; : find-` ( string -- n/f ) - dup ?first ch'` = [ drop 0 ] [ (find-`) ] if ; + dup ?first char: ` = [ drop 0 ] [ (find-`) ] if ; : parse-name ( string -- remain name ) ":`" split1 swap decode-value ; @@ -32,7 +32,7 @@ DEFER: name/values : parse-value ( string -- remain value ) dup find-` [ - dup 1 - pick ?nth ch'\: = + dup 1 - pick ?nth char: \: = [ drop name/values ] [ cut swap (parse-value) ] if [ rest [ blank? ] trim-head ] dip ] [ f swap ] if* ; @@ -45,7 +45,7 @@ DEFER: name/values ":`" over subseq? [ (name=value) ] [ f swap ] if ; : name/values ( string -- remain terms ) - [ dup { [ empty? not ] [ first ch'` = not ] } 1&& ] + [ dup { [ empty? not ] [ first char: ` = not ] } 1&& ] [ name=value ] produce assoc-combine ; : parse-txon ( string -- objects ) diff --git a/extra/ulid/ulid-tests.factor b/extra/ulid/ulid-tests.factor index 2c7183ea88..89de773627 100644 --- a/extra/ulid/ulid-tests.factor +++ b/extra/ulid/ulid-tests.factor @@ -15,11 +15,11 @@ IN: ulid.tests ] must-fail-with [ "aBCDEFGH1JK1MN0PQRSTUVWXYZ" ulid>bytes ] [ - [ ulid>bytes-bad-character? ] keep ch>> ch'a = and + [ ulid>bytes-bad-character? ] keep ch>> char: a = and ] must-fail-with [ "ABCDEFGH1JK1MN0PQRSTUVWXYZ" ulid>bytes ] [ - [ ulid>bytes-bad-character? ] keep ch>> ch'U = and + [ ulid>bytes-bad-character? ] keep ch>> char: U = and ] must-fail-with [ "ABCDEFGH1JK1MN0PQRST0VWXYZ" ulid>bytes ] diff --git a/extra/units/reduction/reduction.factor b/extra/units/reduction/reduction.factor index a5605227f8..0512b8e974 100644 --- a/extra/units/reduction/reduction.factor +++ b/extra/units/reduction/reduction.factor @@ -7,9 +7,9 @@ IN: units.reduction CONSTANT: storage-suffixes { "B" "K" "M" "G" "T" "P" "E" "Z" "Y" } CONSTANT: unit-suffix-hash H{ - { ch'B 0 } { ch'K 1 } { ch'M 2 } { ch'G 3 } - { ch'T 4 } { ch'P 5 } { ch'E 6 } { ch'Z 7 } - { ch'Y 8 } + { char: B 0 } { char: K 1 } { char: M 2 } { char: G 3 } + { char: T 4 } { char: P 5 } { char: E 6 } { char: Z 7 } + { char: Y 8 } } : threshhold ( n multiplier base -- x ) diff --git a/extra/uu/uu.factor b/extra/uu/uu.factor index 78f3ae98c2..80de3f36a4 100644 --- a/extra/uu/uu.factor +++ b/extra/uu/uu.factor @@ -17,7 +17,7 @@ ERROR: bad-length seq ; 0 :> char! 0 :> bits! seq check-length [ - dup length ch'\s + , + dup length char: \s + , [ dup empty? bits zero? and ] [ @@ -30,7 +30,7 @@ ERROR: bad-length seq ; [ bits 6 >= ] [ bits 6 - - [ char swap neg shift 0x3f bitand ch'\s + , ] + [ char swap neg shift 0x3f bitand char: \s + , ] [ bits! ] bi ] while @@ -40,13 +40,13 @@ ERROR: bad-length seq ; ERROR: illegal-character ch ; : check-illegal-character ( ch -- ch ) - dup ch'\s dup 64 + between? [ illegal-character ] unless ; + dup char: \s dup 64 + between? [ illegal-character ] unless ; :: ascii>binary ( seq -- seq' ) 0 :> char! 0 :> bits! - seq unclip-slice ch'\s - :> len! + seq unclip-slice char: \s - :> len! [ [ dup empty? not len 0 > and ] [ @@ -55,7 +55,7 @@ ERROR: illegal-character ch ; drop 0 ] [ check-illegal-character - ch'\s - + char: \s - ] if char 6 shift bitor char! diff --git a/extra/yaml/conversion/conversion.factor b/extra/yaml/conversion/conversion.factor index aeff26bdd1..0ec0a68053 100644 --- a/extra/yaml/conversion/conversion.factor +++ b/extra/yaml/conversion/conversion.factor @@ -92,7 +92,7 @@ CONSTANT: YAML_SET_TAG "tag:yaml.org,2002:set" : construct-int ( str -- n ) string>number ; : construct-infinity ( str -- -inf/+inf ) - first ch'- = -1/0. 1/0. ? ; + first char: - = -1/0. 1/0. ? ; : construct-float ( str -- x ) { @@ -107,11 +107,11 @@ CONSTANT: YAML_SET_TAG "tag:yaml.org,2002:set" ! - months, days and hours on 1 digit ! preprocess to fix this mess... : yaml>rfc3339 ( str -- str' ) - R[=[ -[0-9][^0-9]]=] [ [ ch'0 1 ] dip insert-nth ] re-replace-with - R[=[ -[0-9][^0-9]]=] [ [ ch'0 1 ] dip insert-nth ] re-replace-with - R[=[ [^0-9][0-9]:]=] [ [ ch'0 1 ] dip insert-nth ] re-replace-with + R[=[ -[0-9][^0-9]]=] [ [ char: 0 1 ] dip insert-nth ] re-replace-with + R[=[ -[0-9][^0-9]]=] [ [ char: 0 1 ] dip insert-nth ] re-replace-with + R[=[ [^0-9][0-9]:]=] [ [ char: 0 1 ] dip insert-nth ] re-replace-with R[=[ [ \t]+]=] " " re-replace - ch'\: over index cut ch'space swap remove append ; + char: \: over index cut char: space swap remove append ; : construct-timestamp ( obj -- obj' ) dup R[=[ [0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]]=] matches? diff --git a/extra/yaml/yaml-tests.factor b/extra/yaml/yaml-tests.factor index 486a6ebf77..d212d9ae35 100644 --- a/extra/yaml/yaml-tests.factor +++ b/extra/yaml/yaml-tests.factor @@ -747,7 +747,7 @@ f value [ ! misc ! Don't use aliases/anchors for equal fixnums -{ f } [ ch'& { 0 0 } >yaml member? ] unit-test +{ f } [ char: & { 0 0 } >yaml member? ] unit-test ! !!!!!!!!!!!!!!! ! Config diff --git a/extra/zealot/factor/factor.factor b/extra/zealot/factor/factor.factor index c1d3d9d41d..ebb0599d0d 100644 --- a/extra/zealot/factor/factor.factor +++ b/extra/zealot/factor/factor.factor @@ -170,7 +170,7 @@ M: windows factor-path "./factor.com" ; } 2cleave ; : factor-clean-branch ( -- str ) - os cpu [ name>> ] bi@ { { ch'. ch'- } } substitute + os cpu [ name>> ] bi@ { { char: . char: - } } substitute "-" glue "origin/clean-" prepend ; : vocab-path>vocab ( path -- vocab ) diff --git a/extra/zoneinfo/zoneinfo.factor b/extra/zoneinfo/zoneinfo.factor index 969ebe5fa9..a99bc28d03 100644 --- a/extra/zoneinfo/zoneinfo.factor +++ b/extra/zoneinfo/zoneinfo.factor @@ -151,7 +151,7 @@ ERROR: zone-not-found name ; : on>value ( n -- n' ) ! "3", "Thu>=8" always >=, "lastFri" { - { [ dup 3 swap ?nth ch'> = ] [ + { [ dup 3 swap ?nth char: > = ] [ 3 cut 2 tail [ day-abbreviation3-predicate ] [ string>number ] bi* 2array ] } { [ dup "last" head? ] [ 4 tail day-abbreviation3-index ] } diff --git a/misc/fuel/strange-syntax.factor b/misc/fuel/strange-syntax.factor index 6304e772f8..3ec4a8585d 100644 --- a/misc/fuel/strange-syntax.factor +++ b/misc/fuel/strange-syntax.factor @@ -55,9 +55,9 @@ C-TYPE: cairo_snurface_t ! ! CHAR : stuff-with-chars ( -- K \n ) - ch'K - ch'\n - ch'\" ! <- \" should be highlighted + char: K + char: \n + char: \" ! <- \" should be highlighted drop ; ! ! MAIN @@ -96,7 +96,7 @@ FUNCTION: int futimes ( int id, FUNCTION: int booyah ( int x ) FUNCTION-ALIAS: test int bah ( int* ah, int[] eh ) -hexcolor: ffffff color: green nan: 1234 ch'm alien: 93 +hexcolor: ffffff color: green nan: 1234 char: m alien: 93 2drop 2drop drop PRIMITIVE: one ( a -- b )