diff --git a/basis/db/db-docs.factor b/basis/db/db-docs.factor index c392ec6b85..154d8961a2 100644 --- a/basis/db/db-docs.factor +++ b/basis/db/db-docs.factor @@ -279,7 +279,7 @@ ARTICLE: "db-custom-database-combinators" "Custom database combinators" "SQLite example combinator:" { $code <" -USING: db.sqlite db io.files ; +USING: db.sqlite db io.files io.files.temp ; : with-sqlite-db ( quot -- ) "my-database.db" temp-file swap with-db ; inline"> } diff --git a/basis/editors/notepad/tags.txt b/basis/editors/notepad/tags.txt new file mode 100644 index 0000000000..6bf68304bb --- /dev/null +++ b/basis/editors/notepad/tags.txt @@ -0,0 +1 @@ +unportable diff --git a/basis/farkup/authors.txt b/basis/farkup/authors.txt index 5674120196..a4a77d97e9 100644 --- a/basis/farkup/authors.txt +++ b/basis/farkup/authors.txt @@ -1,2 +1,2 @@ Doug Coleman -Slava Pestov +Daniel Ehrenberg diff --git a/basis/farkup/farkup-tests.factor b/basis/farkup/farkup-tests.factor index 246da48b32..cc379810ac 100644 --- a/basis/farkup/farkup-tests.factor +++ b/basis/farkup/farkup-tests.factor @@ -20,50 +20,50 @@ link-no-follow? off ] unit-test [ "

a-b

" ] [ "a-b" convert-farkup ] unit-test -[ "

*foo\nbar\n

" ] [ "*foo\nbar\n" convert-farkup ] unit-test +[ "

foo

bar

" ] [ "*foo\nbar\n" convert-farkup ] unit-test [ "

Wow!

" ] [ "*Wow!*" convert-farkup ] unit-test [ "

Wow.

" ] [ "_Wow._" convert-farkup ] unit-test -[ "

*

" ] [ "*" convert-farkup ] unit-test +[ "

" ] [ "*" convert-farkup ] unit-test [ "

*

" ] [ "\\*" convert-farkup ] unit-test -[ "

**

" ] [ "\\**" convert-farkup ] unit-test +[ "

*

" ] [ "\\**" convert-farkup ] unit-test [ "" ] [ "-a-b" convert-farkup ] unit-test [ "" ] [ "-foo" convert-farkup ] unit-test -[ "" ] [ "-foo\n" convert-farkup ] unit-test -[ "" ] [ "-foo\n-bar" convert-farkup ] unit-test -[ "" ] [ "-foo\n-bar\n" convert-farkup ] unit-test +[ "" ] [ "-foo\n" convert-farkup ] unit-test +[ "" ] [ "-foo\n-bar" convert-farkup ] unit-test +[ "" ] [ "-foo\n-bar\n" convert-farkup ] unit-test -[ "

bar\n

" ] [ "-foo\nbar\n" convert-farkup ] unit-test +[ "

bar

" ] [ "-foo\nbar\n" convert-farkup ] unit-test [ "
  1. a-b
" ] [ "#a-b" convert-farkup ] unit-test [ "
  1. foo
" ] [ "#foo" convert-farkup ] unit-test -[ "
  1. foo
  2. \n
" ] [ "#foo\n" convert-farkup ] unit-test -[ "
  1. foo
  2. \n
  3. bar
" ] [ "#foo\n#bar" convert-farkup ] unit-test -[ "
  1. foo
  2. \n
  3. bar
  4. \n
" ] [ "#foo\n#bar\n" convert-farkup ] unit-test +[ "
  1. foo
" ] [ "#foo\n" convert-farkup ] unit-test +[ "
  1. foo
  2. bar
" ] [ "#foo\n#bar" convert-farkup ] unit-test +[ "
  1. foo
  2. bar
" ] [ "#foo\n#bar\n" convert-farkup ] unit-test -[ "
  1. foo
  2. \n

bar\n

" ] [ "#foo\nbar\n" convert-farkup ] unit-test +[ "
  1. foo

bar

" ] [ "#foo\nbar\n" convert-farkup ] unit-test -[ "\n\n" ] [ "\n\n" convert-farkup ] unit-test -[ "\n\n" ] [ "\r\n\r\n" convert-farkup ] unit-test -[ "\n\n\n\n" ] [ "\r\r\r\r" convert-farkup ] unit-test -[ "\n\n\n" ] [ "\r\r\r" convert-farkup ] unit-test -[ "\n\n\n" ] [ "\n\n\n" convert-farkup ] unit-test -[ "

foo\n

bar

" ] [ "foo\n\nbar" convert-farkup ] unit-test -[ "

foo\n

bar

" ] [ "foo\r\n\r\nbar" convert-farkup ] unit-test -[ "

foo\n

bar

" ] [ "foo\r\rbar" convert-farkup ] unit-test -[ "

foo\n

bar

" ] [ "foo\r\r\nbar" convert-farkup ] unit-test +[ "" ] [ "\n\n" convert-farkup ] unit-test +[ "" ] [ "\r\n\r\n" convert-farkup ] unit-test +[ "" ] [ "\r\r\r\r" convert-farkup ] unit-test +[ "" ] [ "\r\r\r" convert-farkup ] unit-test +[ "" ] [ "\n\n\n" convert-farkup ] unit-test +[ "

foo

bar

" ] [ "foo\n\nbar" convert-farkup ] unit-test +[ "

foo

bar

" ] [ "foo\r\n\r\nbar" convert-farkup ] unit-test +[ "

foo

bar

" ] [ "foo\r\rbar" convert-farkup ] unit-test +[ "

foo

bar

" ] [ "foo\r\r\nbar" convert-farkup ] unit-test -[ "\n

bar\n

" ] [ "\nbar\n" convert-farkup ] unit-test -[ "\n

bar\n

" ] [ "\rbar\r" convert-farkup ] unit-test -[ "\n

bar\n

" ] [ "\r\nbar\r\n" convert-farkup ] unit-test +[ "

bar

" ] [ "\nbar\n" convert-farkup ] unit-test +[ "

bar

" ] [ "\rbar\r" convert-farkup ] unit-test +[ "

bar

" ] [ "\r\nbar\r\n" convert-farkup ] unit-test -[ "

foo\n

bar

" ] [ "foo\n\n\nbar" convert-farkup ] unit-test +[ "

foo

bar

" ] [ "foo\n\n\nbar" convert-farkup ] unit-test [ "" ] [ "" convert-farkup ] unit-test -[ "

|a

" ] +[ "
a
" ] [ "|a" convert-farkup ] unit-test [ "
a
" ] @@ -78,24 +78,24 @@ link-no-follow? off [ "
ab
cd
" ] [ "|a|b|\n|c|d|\n" convert-farkup ] unit-test -[ "

foo\n

aheading

\n

adfasd

" ] +[ "

foo

aheading

adfasd

" ] [ "*foo*\n=aheading=\nadfasd" convert-farkup ] unit-test -[ "

foo

\n" ] [ "=foo=\n" convert-farkup ] unit-test -[ "

lol

foo

\n" ] [ "lol=foo=\n" convert-farkup ] unit-test -[ "

=foo\n

" ] [ "=foo\n" convert-farkup ] unit-test +[ "

foo

" ] [ "=foo=\n" convert-farkup ] unit-test +[ "

lol=foo=

" ] [ "lol=foo=\n" convert-farkup ] unit-test +[ "

=foo

" ] [ "=foo\n" convert-farkup ] unit-test [ "

=foo

" ] [ "=foo" convert-farkup ] unit-test [ "

==foo

" ] [ "==foo" convert-farkup ] unit-test -[ "

=

foo

" ] [ "==foo=" convert-farkup ] unit-test +[ "

foo

" ] [ "==foo=" convert-farkup ] unit-test [ "

foo

" ] [ "==foo==" convert-farkup ] unit-test [ "

foo

" ] [ "==foo==" convert-farkup ] unit-test -[ "

=

foo

" ] [ "===foo==" convert-farkup ] unit-test -[ "

foo

=

" ] [ "=foo==" convert-farkup ] unit-test +[ "

foo

" ] [ "===foo==" convert-farkup ] unit-test +[ "

foo

" ] [ "=foo==" convert-farkup ] unit-test [ "
int main()
" ] [ "[c{int main()}]" convert-farkup ] unit-test -[ "

" ] [ "[[image:lol.jpg]]" convert-farkup ] unit-test +[ "

\"image:lol.jpg\"/

" ] [ "[[image:lol.jpg]]" convert-farkup ] unit-test [ "

\"teh

" ] [ "[[image:lol.jpg|teh lol]]" convert-farkup ] unit-test [ "

http://lol.com

" ] [ "[[http://lol.com]]" convert-farkup ] unit-test [ "

haha

" ] [ "[[http://lol.com|haha]]" convert-farkup ] unit-test @@ -111,11 +111,11 @@ link-no-follow? off [ "
hello
" ] [ "[{hello}]" convert-farkup ] unit-test [ - "

Feature comparison:\n
aFactorJavaLisp
CoolnessYesNoNo
BadassYesNoNo
EnterpriseYesYesNo
KosherYesNoYes

" + "

Feature comparison:

aFactorJavaLisp
CoolnessYesNoNo
BadassYesNoNo
EnterpriseYesYesNo
KosherYesNoYes
" ] [ "Feature comparison:\n|a|Factor|Java|Lisp|\n|Coolness|Yes|No|No|\n|Badass|Yes|No|No|\n|Enterprise|Yes|Yes|No|\n|Kosher|Yes|No|Yes|\n" convert-farkup ] unit-test [ - "

Feature comparison:\n

aFactorJavaLisp
CoolnessYesNoNo
BadassYesNoNo
EnterpriseYesYesNo
KosherYesNoYes
" + "

Feature comparison:

aFactorJavaLisp
CoolnessYesNoNo
BadassYesNoNo
EnterpriseYesYesNo
KosherYesNoYes
" ] [ "Feature comparison:\n\n|a|Factor|Java|Lisp|\n|Coolness|Yes|No|No|\n|Badass|Yes|No|No|\n|Enterprise|Yes|Yes|No|\n|Kosher|Yes|No|Yes|\n" convert-farkup ] unit-test [ @@ -131,33 +131,33 @@ link-no-follow? off [ "

<foo>

" ] [ "" convert-farkup ] unit-test -[ "

asdf\n

" ] [ "asdf\n-lol\n-haha" convert-farkup ] unit-test +[ "

asdf

" ] [ "asdf\n-lol\n-haha" convert-farkup ] unit-test -[ "

asdf\n

" ] +[ "

asdf

" ] [ "asdf\n\n-lol\n-haha" convert-farkup ] unit-test [ "
" ] [ "___" convert-farkup ] unit-test -[ "
\n" ] [ "___\n" convert-farkup ] unit-test +[ "
" ] [ "___\n" convert-farkup ] unit-test -[ "

before:\n

{ 1 2 3 } 1 tail

" ] +[ "

before:

{ 1 2 3 } 1 tail
" ] [ "before:\n[factor{{ 1 2 3 } 1 tail}]" convert-farkup ] unit-test [ "

Factor-rific!

" ] [ "[[Factor]]-rific!" convert-farkup ] unit-test -[ "

[ factor { 1 2 3 }]

" ] +[ "
 1 2 3 
" ] [ "[ factor { 1 2 3 }]" convert-farkup ] unit-test -[ "

paragraph\n


" ] +[ "

paragraph


" ] [ "paragraph\n___" convert-farkup ] unit-test -[ "

paragraph\n a ___ b

" ] +[ "

paragraph

a b

" ] [ "paragraph\n a ___ b" convert-farkup ] unit-test -[ "\n
" ] +[ "
" ] [ "\n- a\n___" convert-farkup ] unit-test -[ "

hello_world how are you today?\n

" ] +[ "

helloworld how are you today?

" ] [ "hello_world how are you today?\n- hello_world how are you today?" convert-farkup ] unit-test : check-link-escaping ( string -- link ) @@ -168,3 +168,15 @@ link-no-follow? off [ "" ] [ "[[]]" check-link-escaping ] unit-test [ "&blah;" ] [ "[[&blah;]]" check-link-escaping ] unit-test [ "C++" ] [ "[[C++]]" check-link-escaping ] unit-test + +[ "

The important thing

" ] [ "=The _important_ thing=" convert-farkup ] unit-test +[ "

emphasized text

" ] [ "[[Foo|*emphasized* text]]" convert-farkup ] unit-test +[ "
bolditalics
" ] +[ "|*bold*|_italics_|" convert-farkup ] unit-test +[ "

italicsboth

" ] [ "_italics*both" convert-farkup ] unit-test +[ "

italicsboth

" ] [ "_italics*both*" convert-farkup ] unit-test +[ "

italicsboth

" ] [ "_italics*both*_" convert-farkup ] unit-test +[ "

italicsboth

" ] [ "_italics*both_" convert-farkup ] unit-test +[ "

italicsbothafter

" ] [ "_italics*both_after*" convert-farkup ] unit-test +[ "
foo|bar
" ] [ "|foo\\|bar|" convert-farkup ] unit-test +[ "

" ] [ "\\" convert-farkup ] unit-test diff --git a/basis/farkup/farkup.factor b/basis/farkup/farkup.factor old mode 100755 new mode 100644 index 4041d92773..23a9023835 --- a/basis/farkup/farkup.factor +++ b/basis/farkup/farkup.factor @@ -1,10 +1,9 @@ -! Copyright (C) 2008 Doug Coleman. +! Copyright (C) 2008, 2009 Doug Coleman, Daniel Ehrenberg. ! See http://factorcode.org/license.txt for BSD license. -USING: accessors arrays combinators io -io.streams.string kernel math namespaces peg peg.ebnf -sequences sequences.deep strings xml.entities xml.syntax -vectors splitting xmode.code2html urls.encoding xml.data -xml.writer ; +USING: sequences kernel splitting lists fry accessors assocs math.order +math combinators namespaces urls.encoding xml.syntax xmode.code2html +xml.data arrays strings vectors xml.writer io.streams.string locals +unicode.categories ; IN: farkup SYMBOL: relative-link-prefix @@ -39,123 +38,174 @@ TUPLE: line-break ; : simple-link-title ( string -- string' ) dup absolute-url? [ "/" split1-last swap or ] unless ; -EBNF: parse-farkup -nl = ("\r\n" | "\r" | "\n") => [[ drop "\n" ]] -whitespace = " " | "\t" | nl +! _foo*bar_baz*bing works like foo*barbazbing +! I could support overlapping, but there's not a good use case for it. -heading1 = "=" (!("=" | nl).)+ "=" - => [[ second >string heading1 boa ]] +DEFER: (parse-paragraph) -heading2 = "==" (!("=" | nl).)+ "==" - => [[ second >string heading2 boa ]] +: parse-paragraph ( string -- seq ) + (parse-paragraph) list>array ; -heading3 = "===" (!("=" | nl).)+ "===" - => [[ second >string heading3 boa ]] +: make-paragraph ( string -- paragraph ) + parse-paragraph paragraph boa ; -heading4 = "====" (!("=" | nl).)+ "====" - => [[ second >string heading4 boa ]] +: cut-half-slice ( string i -- before after-slice ) + [ head ] [ 1+ short tail-slice ] 2bi ; -heading = heading4 | heading3 | heading2 | heading1 +: find-cut ( string quot -- before after delimiter ) + dupd find + [ [ cut-half-slice ] [ f ] if* ] dip ; inline +: parse-delimiter ( string delimiter class -- paragraph ) + [ '[ _ = ] find-cut drop ] dip + '[ parse-paragraph _ new swap >>child ] + [ (parse-paragraph) ] bi* cons ; +: delimiter-class ( delimiter -- class ) + H{ + { CHAR: * strong } + { CHAR: _ emphasis } + { CHAR: ^ superscript } + { CHAR: ~ subscript } + { CHAR: % inline-code } + } at ; -strong = "*" (!("*" | nl).)+ "*" - => [[ second >string strong boa ]] +: parse-link ( string -- paragraph-list ) + rest-slice "]]" split1-slice [ + "|" split1 + [ "" like dup simple-link-title ] unless* + [ "image:" ?head ] dip swap [ image boa ] [ parse-paragraph link boa ] if + ] dip [ (parse-paragraph) cons ] when* ; -emphasis = "_" (!("_" | nl).)+ "_" - => [[ second >string emphasis boa ]] +: ?first ( seq -- elt ) 0 swap ?nth ; -superscript = "^" (!("^" | nl).)+ "^" - => [[ second >string superscript boa ]] +: parse-big-link ( before after -- link rest ) + dup ?first CHAR: [ = + [ parse-link ] + [ [ CHAR: [ suffix ] [ (parse-paragraph) ] bi* ] + if ; -subscript = "~" (!("~" | nl).)+ "~" - => [[ second >string subscript boa ]] +: escape ( before after -- before' after' ) + [ nil ] [ unclip-slice swap [ suffix ] dip (parse-paragraph) ] if-empty ; -inline-code = "%" (!("%" | nl).)+ "%" - => [[ second >string inline-code boa ]] +: (parse-paragraph) ( string -- list ) + [ nil ] [ + [ "*_^~%[\\" member? ] find-cut [ + { + { CHAR: [ [ parse-big-link ] } + { CHAR: \\ [ escape ] } + [ dup delimiter-class parse-delimiter ] + } case cons + ] [ drop "" like 1list ] if* + ] if-empty ; -link-content = (!("|"|"]").)+ - => [[ >string ]] +: ( string -- state ) string-lines ; +: look ( state i -- char ) swap first ?nth ; +: done? ( state -- ? ) empty? ; +: take-line ( state -- state' line ) unclip-slice ; -image-link = "[[image:" link-content "|" link-content "]]" - => [[ [ second >string ] [ fourth >string ] bi image boa ]] - | "[[image:" link-content "]]" - => [[ second >string f image boa ]] +: take-lines ( state char -- state' lines ) + dupd '[ ?first _ = not ] find drop + [ cut-slice ] [ f ] if* swap ; -simple-link = "[[" link-content "]]" - => [[ second >string dup simple-link-title link boa ]] +:: (take-until) ( state delimiter accum -- string/f state' ) + state empty? [ accum "\n" join f ] [ + state unclip-slice :> first :> rest + first delimiter split1 :> after :> before + before accum push + after [ + accum "\n" join + rest after prefix + ] [ + rest delimiter accum (take-until) + ] if + ] if ; -labeled-link = "[[" link-content "|" link-content "]]" - => [[ [ second >string ] [ fourth >string ] bi link boa ]] +: take-until ( state delimiter -- string/f state' ) + V{ } clone (take-until) ; -link = image-link | labeled-link | simple-link +: count= ( string -- n ) + dup [ [ CHAR: = = not ] find drop 0 or ] bi@ min ; -escaped-char = "\" . - => [[ second 1string ]] +: trim= ( string -- string' ) + [ CHAR: = = ] trim ; -inline-tag = strong | emphasis | superscript | subscript | inline-code - | link | escaped-char +: make-heading ( string class -- heading ) + [ trim= parse-paragraph ] dip boa ; inline +: parse-heading ( state -- state' heading ) + take-line dup count= { + { 0 [ make-paragraph ] } + { 1 [ heading1 make-heading ] } + { 2 [ heading2 make-heading ] } + { 3 [ heading3 make-heading ] } + { 4 [ heading4 make-heading ] } + [ drop heading4 make-heading ] + } case ; +: trim-row ( seq -- seq' ) + rest + dup peek empty? [ but-last ] when ; -inline-delimiter = '*' | '_' | '^' | '~' | '%' | '\' | '[' +: ?peek ( seq -- elt/f ) + [ f ] [ peek ] if-empty ; -cell = (!(inline-delimiter | '|' | nl).)+ - => [[ >string ]] - -table-column = (list | cell | inline-tag | inline-delimiter ) '|' - => [[ first ]] -table-row = "|" (table-column)+ - => [[ second table-row boa ]] -table = ((table-row nl => [[ first ]] )+ table-row? | table-row) - => [[ table boa ]] +: coalesce ( rows -- rows' ) + V{ } clone [ + '[ + _ dup ?peek ?peek CHAR: \\ = + [ [ pop "|" rot 3append ] keep ] when + push + ] each + ] keep ; -text = (!(nl | code | heading | inline-delimiter | table ).)+ - => [[ >string ]] +: parse-table ( state -- state' table ) + CHAR: | take-lines [ + "|" split + trim-row + coalesce + [ parse-paragraph ] map + table-row boa + ] map table boa ; -paragraph-nl-item = nl list - | nl line - | nl => [[ line-breaks? get [ drop line-break new ] when ]] -paragraph-item = (table | code | text | inline-tag | inline-delimiter)+ -paragraph = ((paragraph-item paragraph-nl-item)+ nl+ => [[ first ]] - | (paragraph-item paragraph-nl-item)+ paragraph-item? - | paragraph-item) - => [[ paragraph boa ]] +: parse-line ( state -- state' item ) + take-line dup "___" = + [ drop line new ] [ make-paragraph ] if ; +: parse-list ( state char class -- state' list ) + [ + take-lines + [ rest parse-paragraph list-item boa ] map + ] dip boa ; inline -list-item = (cell | inline-tag | inline-delimiter)* +: parse-ul ( state -- state' ul ) + CHAR: - unordered-list parse-list ; -ordered-list-item = '#' list-item - => [[ second list-item boa ]] -ordered-list = ((ordered-list-item nl)+ ordered-list-item? | ordered-list-item) - => [[ ordered-list boa ]] +: parse-ol ( state -- state' ul ) + CHAR: # ordered-list parse-list ; -unordered-list-item = '-' list-item - => [[ second list-item boa ]] -unordered-list = ((unordered-list-item nl)+ unordered-list-item? | unordered-list-item) - => [[ unordered-list boa ]] +: parse-code ( state -- state' item ) + dup 1 look CHAR: [ = + [ unclip-slice make-paragraph ] [ + "{" take-until [ rest ] dip + "}]" take-until + [ code boa ] dip swap + ] if ; -list = ordered-list | unordered-list +: parse-item ( state -- state' item ) + dup 0 look { + { CHAR: = [ parse-heading ] } + { CHAR: | [ parse-table ] } + { CHAR: _ [ parse-line ] } + { CHAR: - [ parse-ul ] } + { CHAR: # [ parse-ol ] } + { CHAR: [ [ parse-code ] } + { f [ rest-slice f ] } + [ drop take-line make-paragraph ] + } case ; - -line = '___' - => [[ drop line new ]] - - -named-code - = '[' (!('{' | whitespace | '[').)+ '{' (!("}]").)+ "}]" - => [[ [ second >string ] [ fourth >string ] bi code boa ]] - -simple-code - = "[{" (!("}]").)+ "}]" - => [[ second >string f swap code boa ]] - -code = named-code | simple-code - - -stand-alone - = (line | code | heading | list | table | paragraph | nl)* -;EBNF +: parse-farkup ( string -- farkup ) + [ dup done? not ] [ parse-item ] produce nip sift ; CONSTANT: invalid-url "javascript:alert('Invalid URL in farkup');" @@ -168,19 +218,6 @@ CONSTANT: invalid-url "javascript:alert('Invalid URL in farkup');" [ relative-link-prefix get prepend "" like url-encode ] } cond ; -: write-link ( href text -- xml ) - [ check-url link-no-follow? get "nofollow" and ] dip - [XML rel=<->><-> XML] ; - -: write-image-link ( href text -- xml ) - disable-images? get [ - 2drop - [XML Images are not allowed XML] - ] [ - [ check-url ] [ f like ] bi* - [XML alt=<->/> XML] - ] if ; - : render-code ( string mode -- xml ) [ string-lines ] dip htmlize-lines [XML
<->
XML] ; @@ -206,11 +243,27 @@ M: ordered-list (write-farkup) "ol" farkup-inside ; M: paragraph (write-farkup) "p" farkup-inside ; M: table (write-farkup) "table" farkup-inside ; +: write-link ( href text -- xml ) + [ check-url link-no-follow? get "nofollow" and ] dip + [XML rel=<->><-> XML] ; + +: write-image-link ( href text -- xml ) + disable-images? get [ + 2drop + [XML Images are not allowed XML] + ] [ + [ check-url ] [ f like ] bi* + [XML alt=<->/> XML] + ] if ; + +: open-link ( link -- href text ) + [ href>> ] [ text>> (write-farkup) ] bi ; + M: link (write-farkup) - [ href>> ] [ text>> ] bi write-link ; + open-link write-link ; M: image (write-farkup) - [ href>> ] [ text>> ] bi write-image-link ; + open-link write-image-link ; M: code (write-farkup) [ string>> ] [ mode>> ] bi render-code ; @@ -228,9 +281,7 @@ M: table-row (write-farkup) M: string (write-farkup) ; -M: vector (write-farkup) [ (write-farkup) ] map ; - -M: f (write-farkup) ; +M: array (write-farkup) [ (write-farkup) ] map ; : farkup>xml ( string -- xml ) parse-farkup (write-farkup) ; @@ -240,3 +291,4 @@ M: f (write-farkup) ; : convert-farkup ( string -- string' ) [ write-farkup ] with-string-writer ; + diff --git a/basis/help/handbook/handbook.factor b/basis/help/handbook/handbook.factor index f20732c7ee..e048b66b7c 100644 --- a/basis/help/handbook/handbook.factor +++ b/basis/help/handbook/handbook.factor @@ -162,8 +162,7 @@ ARTICLE: "encodings-introduction" "An introduction to encodings" { $code "\"file.txt\" utf16 file-contents" } "Encoding descriptors are also used by " { $link "io.streams.byte-array" } " and taken by combinators like " { $link with-file-writer } " and " { $link with-byte-reader } " which deal with streams. It is " { $emphasis "not" } " used with " { $link "io.streams.string" } " because these deal with abstract text." $nl -"When the " { $link binary } " encoding is used, a " { $link byte-array } " is expected for writing and returned for reading, since the stream deals with bytes. All other encodings deal with strings, since they are used to represent text." -{ $see-also "stream-elements" } ; +"When the " { $link binary } " encoding is used, a " { $link byte-array } " is expected for writing and returned for reading, since the stream deals with bytes. All other encodings deal with strings, since they are used to represent text." ; ARTICLE: "io" "Input and output" { $heading "Streams" } diff --git a/basis/html/components/components-tests.factor b/basis/html/components/components-tests.factor index 0b85455c2e..72ceea20a0 100644 --- a/basis/html/components/components-tests.factor +++ b/basis/html/components/components-tests.factor @@ -163,7 +163,7 @@ M: link-test link-href drop "http://www.apple.com/foo&bar" ; [ ] [ "-foo\n-bar" "farkup" set-value ] unit-test -[ "
  • foo
  • \n
  • bar
" ] [ +[ "
  • foo
  • bar
" ] [ [ "farkup" T{ farkup } render ] with-string-writer ] unit-test diff --git a/basis/images/bitmap/bitmap.factor b/basis/images/bitmap/bitmap.factor index ffe3adff48..8209159a8e 100755 --- a/basis/images/bitmap/bitmap.factor +++ b/basis/images/bitmap/bitmap.factor @@ -83,14 +83,15 @@ ERROR: bmp-not-supported n ; :: fixup-color-index ( loading-bitmap -- loading-bitmap ) loading-bitmap width>> :> width + width 3 * :> width*3 loading-bitmap height>> abs :> height loading-bitmap color-index>> length :> color-index-length - height 3 * :> height*3 - color-index-length width height*3 * - height*3 /i :> misaligned - misaligned 0 > [ + color-index-length height /i :> stride + color-index-length width*3 height * - height /i :> padding + padding 0 > [ loading-bitmap [ - loading-bitmap width>> misaligned + 3 * - [ 3 misaligned * head* ] map concat + stride + [ width*3 head-slice ] map concat ] change-color-index ] [ loading-bitmap diff --git a/basis/images/test-images/40red24bit.bmp b/basis/images/test-images/40red24bit.bmp new file mode 100644 index 0000000000..5e694559c6 Binary files /dev/null and b/basis/images/test-images/40red24bit.bmp differ diff --git a/basis/images/test-images/41red24bit.bmp b/basis/images/test-images/41red24bit.bmp new file mode 100644 index 0000000000..6599dcc107 Binary files /dev/null and b/basis/images/test-images/41red24bit.bmp differ diff --git a/basis/images/test-images/42red24bit.bmp b/basis/images/test-images/42red24bit.bmp new file mode 100644 index 0000000000..e95a4f75f5 Binary files /dev/null and b/basis/images/test-images/42red24bit.bmp differ diff --git a/basis/images/test-images/43red24bit.bmp b/basis/images/test-images/43red24bit.bmp new file mode 100644 index 0000000000..d88f2d4c32 Binary files /dev/null and b/basis/images/test-images/43red24bit.bmp differ diff --git a/basis/images/test-images/elephants.tiff b/basis/images/test-images/elephants.tiff new file mode 100644 index 0000000000..f462a0c043 Binary files /dev/null and b/basis/images/test-images/elephants.tiff differ diff --git a/basis/images/tiff/tiff.factor b/basis/images/tiff/tiff.factor index 2ea1b08e20..80eaff8140 100755 --- a/basis/images/tiff/tiff.factor +++ b/basis/images/tiff/tiff.factor @@ -477,26 +477,24 @@ ERROR: unknown-component-order ifd ; [ unknown-component-order ] } case ; +: normalize-alpha-data ( seq -- byte-array ) + ! [ normalize-alpha-data ] change-bitmap + B{ } like dup + byte-array>float-array + 4 + [ + dup fourth dup 0 = [ + 2drop + ] [ + [ 3 head-slice ] dip '[ _ / ] change-each + ] if + ] each ; + : handle-alpha-data ( ifd -- ifd ) dup extra-samples find-tag { - { extra-samples-associated-alpha-data [ - [ - B{ } like dup - byte-array>float-array - 4 - [ - dup fourth dup 0 = [ - 2drop - ] [ - [ 3 head-slice ] dip '[ _ / ] change-each - ] if - ] each - ] change-bitmap - ] } - { extra-samples-unspecified-alpha-data [ - ] } - { extra-samples-unassociated-alpha-data [ - ] } + { extra-samples-associated-alpha-data [ ] } + { extra-samples-unspecified-alpha-data [ ] } + { extra-samples-unassociated-alpha-data [ ] } [ bad-extra-samples ] } case ; diff --git a/basis/math/partial-dispatch/partial-dispatch-tests.factor b/basis/math/partial-dispatch/partial-dispatch-tests.factor index bcf7bb77b0..29979b62d3 100644 --- a/basis/math/partial-dispatch/partial-dispatch-tests.factor +++ b/basis/math/partial-dispatch/partial-dispatch-tests.factor @@ -26,3 +26,8 @@ tools.test math kernel sequences ; [ fixnum-bitnot ] [ \ bitnot modular-variant ] unit-test [ fixnum+fast ] [ \ fixnum+ modular-variant ] unit-test [ fixnum+fast ] [ \ fixnum+fast modular-variant ] unit-test + +[ 3 ] [ 1 2 +-integer-integer ] unit-test +[ 3 ] [ 1 >bignum 2 +-integer-integer ] unit-test +[ 3 ] [ 1 2 >bignum +-integer-integer ] unit-test +[ 3 ] [ 1 >bignum 2 >bignum +-integer-integer ] unit-test \ No newline at end of file diff --git a/basis/math/partial-dispatch/partial-dispatch.factor b/basis/math/partial-dispatch/partial-dispatch.factor index 08cd8fb470..6679e81fcd 100644 --- a/basis/math/partial-dispatch/partial-dispatch.factor +++ b/basis/math/partial-dispatch/partial-dispatch.factor @@ -45,31 +45,41 @@ M: word integer-op-input-classes { bitnot fixnum-bitnot } } at swap or ; +: bignum-fixnum-op-quot ( big-word -- quot ) + '[ fixnum>bignum _ execute ] ; + +: fixnum-bignum-op-quot ( big-word -- quot ) + '[ [ fixnum>bignum ] dip _ execute ] ; + : integer-fixnum-op-quot ( fix-word big-word -- quot ) [ [ over fixnum? ] % - [ '[ _ execute ] , ] - [ '[ fixnum>bignum _ execute ] , ] bi* - \ if , + [ '[ _ execute ] , ] [ bignum-fixnum-op-quot , ] bi* \ if , ] [ ] make ; : fixnum-integer-op-quot ( fix-word big-word -- quot ) [ [ dup fixnum? ] % - [ '[ _ execute ] , ] - [ '[ [ fixnum>bignum ] dip _ execute ] , ] bi* - \ if , + [ '[ _ execute ] , ] [ fixnum-bignum-op-quot , ] bi* \ if , + ] [ ] make ; + +: integer-bignum-op-quot ( big-word -- quot ) + [ + [ over fixnum? ] % + [ fixnum-bignum-op-quot , ] [ '[ _ execute ] , ] bi \ if , ] [ ] make ; : integer-integer-op-quot ( fix-word big-word -- quot ) [ - [ dup fixnum? ] % - 2dup integer-fixnum-op-quot , + [ 2dup both-fixnums? ] % + [ '[ _ execute ] , ] [ - [ over fixnum? [ [ fixnum>bignum ] dip ] when ] % - nip , - ] [ ] make , - \ if , + [ + [ dup fixnum? ] % + [ bignum-fixnum-op-quot , ] + [ integer-bignum-op-quot , ] bi \ if , + ] [ ] make , + ] bi* \ if , ] [ ] make ; : integer-op-word ( triple -- word ) diff --git a/basis/openssl/libcrypto/libcrypto.factor b/basis/openssl/libcrypto/libcrypto.factor index 9cbed1f752..1a25b4d019 100644 --- a/basis/openssl/libcrypto/libcrypto.factor +++ b/basis/openssl/libcrypto/libcrypto.factor @@ -1,4 +1,4 @@ -! Copyright (C) 2007 Elie CHAFTARI +! Copyright (C) 2007 Elie CHAFTARI, 2009 Maxim Savchenko ! See http://factorcode.org/license.txt for BSD license. ! ! Tested with OpenSSL 0.9.8a_0 on Mac OS X 10.4.9 PowerPC @@ -159,3 +159,65 @@ FUNCTION: int RSA_check_key ( void* rsa ) ; FUNCTION: void RSA_free ( void* rsa ) ; FUNCTION: int RSA_print_fp ( void* fp, void* x, int offset ) ; + +! =============================================== +! objects.h +! =============================================== + +FUNCTION: int OBJ_sn2nid ( char* s ) ; + +! =============================================== +! bn.h +! =============================================== + +FUNCTION: int BN_num_bits ( void* a ) ; + +FUNCTION: void* BN_bin2bn ( void* s, int len, void* ret ) ; + +FUNCTION: int BN_bn2bin ( void* a, void* to ) ; + +FUNCTION: void BN_clear_free ( void* a ) ; + +! =============================================== +! ec.h +! =============================================== + +CONSTANT: POINT_CONVERSION_COMPRESSED 2 +CONSTANT: POINT_CONVERSION_UNCOMPRESSED 4 +CONSTANT: POINT_CONVERSION_HYBRID 6 + +FUNCTION: int EC_GROUP_get_degree ( void* group ) ; + +FUNCTION: void* EC_POINT_new ( void* group ) ; + +FUNCTION: void EC_POINT_clear_free ( void* point ) ; + +FUNCTION: int EC_POINT_point2oct ( void* group, void* point, int form, void* buf, int len, void* ctx ) ; + +FUNCTION: int EC_POINT_oct2point ( void* group, void* point, void* buf, int len, void* ctx ) ; + +FUNCTION: void* EC_KEY_new_by_curve_name ( int nid ) ; + +FUNCTION: void EC_KEY_free ( void* r ) ; + +FUNCTION: int EC_KEY_set_private_key ( void* key, void* priv_key ) ; + +FUNCTION: int EC_KEY_set_public_key ( void* key, void* pub_key ) ; + +FUNCTION: int EC_KEY_generate_key ( void* eckey ) ; + +FUNCTION: void* EC_KEY_get0_group ( void* key ) ; + +FUNCTION: void* EC_KEY_get0_private_key ( void* key ) ; + +FUNCTION: void* EC_KEY_get0_public_key ( void* key ) ; + +! =============================================== +! ecdsa.h +! =============================================== + +FUNCTION: int ECDSA_size ( void* eckey ) ; + +FUNCTION: int ECDSA_sign ( int type, void* dgst, int dgstlen, void* sig, void* siglen, void* eckey ) ; + +FUNCTION: int ECDSA_verify ( int type, void* dgst, int dgstlen, void* sig, int siglen, void* eckey ) ; diff --git a/basis/peg/ebnf/ebnf.factor b/basis/peg/ebnf/ebnf.factor index 91af91b3a1..db29ce1ee7 100644 --- a/basis/peg/ebnf/ebnf.factor +++ b/basis/peg/ebnf/ebnf.factor @@ -530,7 +530,7 @@ M: ebnf-non-terminal (transform) ( ast -- parser ) : EBNF: reset-tokenizer CREATE-WORD dup ";EBNF" parse-multiline-string - ebnf>quot swapd 1 1 define-declared "ebnf-parser" set-word-prop + ebnf>quot swapd (( input -- ast )) define-declared "ebnf-parser" set-word-prop reset-tokenizer ; parsing diff --git a/basis/regexp/ast/ast.factor b/basis/regexp/ast/ast.factor index ffaed2db62..1c11ed5c7d 100644 --- a/basis/regexp/ast/ast.factor +++ b/basis/regexp/ast/ast.factor @@ -37,8 +37,7 @@ C: with-options TUPLE: options on off ; C: options -SINGLETONS: unix-lines dotall multiline comments case-insensitive -unicode-case reversed-regexp ; +SINGLETONS: unix-lines dotall multiline case-insensitive reversed-regexp ; : ( term -- term' ) f 2array ; diff --git a/basis/regexp/classes/classes.factor b/basis/regexp/classes/classes.factor index d26ff7f69c..e3a1774585 100644 --- a/basis/regexp/classes/classes.factor +++ b/basis/regexp/classes/classes.factor @@ -12,7 +12,7 @@ ascii-class punctuation-class java-printable-class blank-class control-character-class hex-digit-class java-blank-class c-identifier-class unmatchable-class terminator-class word-boundary-class ; -SINGLETONS: beginning-of-input ^ end-of-input $ end-of-file word-break ; +SINGLETONS: beginning-of-input ^ end-of-input $ end-of-file ^unix $unix word-break ; TUPLE: range from to ; C: range diff --git a/basis/regexp/compiler/compiler.factor b/basis/regexp/compiler/compiler.factor index b55cab6294..6c7896dcca 100644 --- a/basis/regexp/compiler/compiler.factor +++ b/basis/regexp/compiler/compiler.factor @@ -17,9 +17,6 @@ SYMBOL: backwards? M: t question>quot drop [ 2drop t ] ; M: f question>quot drop [ 2drop f ] ; -M: not-class question>quot - class>> question>quot [ not ] compose ; - M: beginning-of-input question>quot drop [ drop zero? ] ; @@ -40,6 +37,12 @@ M: $ question>quot M: ^ question>quot drop [ { [ drop zero? ] [ [ 1- ] dip ?nth "\r\n" member? ] } 2|| ] ; +M: $unix question>quot + drop [ { [ length = ] [ ?nth CHAR: \n = ] } 2|| ] ; + +M: ^unix question>quot + drop [ { [ drop zero? ] [ [ 1- ] dip ?nth CHAR: \n = ] } 2|| ] ; + M: word-break question>quot drop [ word-break-at? ] ; @@ -104,13 +107,11 @@ C: box transitions>quot ; : states>code ( words dfa -- ) - [ - '[ - dup _ word>quot - (( last-match index string -- ? )) - define-declared - ] each - ] with-compilation-unit ; + '[ + dup _ word>quot + (( last-match index string -- ? )) + define-declared + ] each ; : states>words ( dfa -- words dfa ) dup transitions>> keys [ gensym ] H{ } map>assoc @@ -123,12 +124,9 @@ C: box PRIVATE> -: simple-define-temp ( quot effect -- word ) - [ define-temp ] with-compilation-unit ; - : dfa>word ( dfa -- quot ) dfa>main-word execution-quot '[ drop [ f ] 2dip @ ] - (( start-index string regexp -- i/f )) simple-define-temp ; + (( start-index string regexp -- i/f )) define-temp ; : dfa>shortest-word ( dfa -- word ) t shortest? [ dfa>word ] with-variable ; diff --git a/basis/regexp/disambiguate/disambiguate.factor b/basis/regexp/disambiguate/disambiguate.factor index 67b1503f9b..876d898cb4 100644 --- a/basis/regexp/disambiguate/disambiguate.factor +++ b/basis/regexp/disambiguate/disambiguate.factor @@ -1,7 +1,8 @@ ! Copyright (C) 2009 Daniel Ehrenberg. ! See http://factorcode.org/license.txt for BSD license. USING: kernel accessors regexp.classes math.bits assocs sequences -arrays sets regexp.dfa math fry regexp.minimize regexp.ast regexp.transition-tables ; +arrays sets regexp.dfa math fry regexp.minimize regexp.ast +locals regexp.transition-tables ; IN: regexp.disambiguate TUPLE: parts in out ; @@ -9,7 +10,7 @@ TUPLE: parts in out ; : make-partition ( choices classes -- partition ) zip [ first ] partition [ values ] bi@ parts boa ; -: powerset-partition ( classes -- partitions ) +: powerset-partition ( sequence -- partitions ) [ length [ 2^ ] keep ] keep '[ _ _ make-partition ] map rest ; @@ -19,19 +20,49 @@ TUPLE: parts in out ; [ in>> ] bi prefix ; -: get-transitions ( partition state-transitions -- next-states ) - [ in>> ] dip '[ _ at ] gather sift ; +: singleton-partition ( integer non-integers -- {class,partition} ) + dupd + '[ _ [ class-member? ] with filter ] keep + prefix f parts boa + 2array ; + +: add-out ( seq partition -- partition' ) + [ out>> append ] [ in>> ] bi swap parts boa ; + +: intersection ( seq -- elts ) + [ f ] [ unclip [ intersect ] reduce ] if-empty ; + +: meaningful-integers ( partition table -- integers ) + [ [ in>> ] [ out>> ] bi ] dip + '[ [ _ at ] map intersection ] bi@ diff ; + +: class-integers ( classes integers -- table ) + '[ _ over '[ _ class-member? ] filter ] H{ } map>assoc ; + +: add-integers ( partitions classes integers -- partitions ) + class-integers '[ + [ _ meaningful-integers ] keep add-out + ] map ; + +: class-partitions ( classes -- assoc ) + [ integer? ] partition [ + dup powerset-partition spin add-integers + [ [ partition>class ] keep 2array ] map + [ first ] filter + ] [ '[ _ singleton-partition ] map ] 2bi append ; : new-transitions ( transitions -- assoc ) ! assoc is class, partition values [ keys ] gather [ tagged-epsilon? not ] filter - powerset-partition - [ [ partition>class ] keep ] { } map>assoc - [ drop ] assoc-filter ; + class-partitions ; + +: get-transitions ( partition state-transitions -- next-states ) + [ in>> ] dip '[ _ at ] gather sift ; : preserving-epsilon ( state-transitions quot -- new-state-transitions ) [ [ drop tagged-epsilon? ] assoc-filter ] bi assoc-union H{ } assoc-like ; inline + : disambiguate ( nfa -- nfa ) expand-ors [ dup new-transitions '[ diff --git a/basis/regexp/nfa/nfa.factor b/basis/regexp/nfa/nfa.factor index 20be6b87d8..d59d4818ec 100644 --- a/basis/regexp/nfa/nfa.factor +++ b/basis/regexp/nfa/nfa.factor @@ -60,11 +60,16 @@ GENERIC: modify-epsilon ( tag -- newtag ) M: object modify-epsilon ; +: line-option ( multiline unix-lines default -- option ) + multiline option? [ + drop [ unix-lines option? ] 2dip swap ? + ] [ 2nip ] if ; + M: $ modify-epsilon - multiline option? [ drop end-of-input ] unless ; + $unix end-of-input line-option ; M: ^ modify-epsilon - multiline option? [ drop beginning-of-input ] unless ; + ^unix beginning-of-input line-option ; M: tagged-epsilon nfa-node clone [ modify-epsilon ] change-tag add-simple-entry ; diff --git a/basis/regexp/parser/parser.factor b/basis/regexp/parser/parser.factor index c6a69f2508..7b2d6af2c1 100644 --- a/basis/regexp/parser/parser.factor +++ b/basis/regexp/parser/parser.factor @@ -2,7 +2,7 @@ ! See http://factorcode.org/license.txt for BSD license. USING: peg.ebnf kernel math.parser sequences assocs arrays fry math combinators regexp.classes strings splitting peg locals accessors -regexp.ast ; +regexp.ast unicode.case ; IN: regexp.parser : allowed-char? ( ch -- ? ) @@ -19,20 +19,19 @@ ERROR: bad-number ; ERROR: bad-class name ; : name>class ( name -- class ) - { - { "Lower" letter-class } - { "Upper" LETTER-class } - { "Alpha" Letter-class } - { "ASCII" ascii-class } - { "Digit" digit-class } - { "Alnum" alpha-class } - { "Punct" punctuation-class } - { "Graph" java-printable-class } - { "Print" java-printable-class } - { "Blank" non-newline-blank-class } - { "Cntrl" control-character-class } - { "XDigit" hex-digit-class } - { "Space" java-blank-class } + >string >case-fold { + { "lower" letter-class } + { "upper" LETTER-class } + { "alpha" Letter-class } + { "ascii" ascii-class } + { "digit" digit-class } + { "alnum" alpha-class } + { "punct" punctuation-class } + { "graph" java-printable-class } + { "blank" non-newline-blank-class } + { "cntrl" control-character-class } + { "xdigit" hex-digit-class } + { "space" java-blank-class } ! TODO: unicode-character-class } [ bad-class ] at-error ; @@ -66,11 +65,8 @@ ERROR: bad-class name ; { CHAR: i case-insensitive } { CHAR: d unix-lines } { CHAR: m multiline } - { CHAR: n multiline } { CHAR: r reversed-regexp } { CHAR: s dotall } - { CHAR: u unicode-case } - { CHAR: x comments } } ; : ch>option ( ch -- singleton ) @@ -101,8 +97,8 @@ CharacterInBracket = !("}") Character QuotedCharacter = !("\\E") . -Escape = "p{" CharacterInBracket*:s "}" => [[ s >string name>class ]] - | "P{" CharacterInBracket*:s "}" => [[ s >string name>class ]] +Escape = "p{" CharacterInBracket*:s "}" => [[ s name>class ]] + | "P{" CharacterInBracket*:s "}" => [[ s name>class ]] | "Q" QuotedCharacter*:s "\\E" => [[ s ]] | "u" Character:a Character:b Character:c Character:d => [[ { a b c d } hex> ensure-number ]] diff --git a/basis/regexp/regexp-docs.factor b/basis/regexp/regexp-docs.factor index b35f8d1cf3..6ad340a82d 100644 --- a/basis/regexp/regexp-docs.factor +++ b/basis/regexp/regexp-docs.factor @@ -1,6 +1,7 @@ ! Copyright (C) 2008, 2009 Doug Coleman, Daniel Ehrenberg. ! See http://factorcode.org/license.txt for BSD license. -USING: kernel strings help.markup help.syntax math regexp.parser regexp.ast ; +USING: kernel strings help.markup help.syntax math regexp.parser +regexp.ast multiline ; IN: regexp ABOUT: "regexp" @@ -21,8 +22,17 @@ ARTICLE: "regexp" "Regular expressions" { $subsection { "regexp" "deploy" } } ; ARTICLE: { "regexp" "intro" } "A quick introduction to regular expressions" - -; +"Regular expressions are a terse way to do certain simple string processing tasks. For example, to replace all instances of " { $snippet "foo" } " in one string with " { $snippet "bar" } ", the following can be used:" +{ $code "R/ foo/ \"bar\" re-replace" } +"That could be done with sequence operations, but consider doing this replacement for an arbitrary number of o's, at least two:" +{ $code "R/ foo+/ \"bar\" re-replace" } +"The " { $snippet "+" } " operator matches one or more occurrences of the previous expression; in this case " { $snippet "o" } ". Another useful feature is alternation. Say we want to do this replacement with fooooo or boooo. Then we could use the code" +{ $code "R/ (f|b)oo+/ \"bar\" re-replace" } +"To search a file for all lines that match a given regular expression, you could use code like this:" +{ $code <" "file.txt" ascii file-lines [ R/ (f|b)oo+/ re-contains? ] filter "> } +"To test if a string in its entirety matches a regular expression, the following can be used:" +{ $example <" USING: regexp prettyprint ; "fooo" R/ (b|f)oo+/ matches? . "> "t" } +"Regular expressions can't be used for all parsing tasks. For example, they are not powerful enough to match balancing parentheses." ; ARTICLE: { "regexp" "construction" } "Constructing regular expressions" "Most of the time, regular expressions are literals and the parsing word should be used, to construct them at parse time. This ensures that they are only compiled once, and gives parse time syntax checking." @@ -33,20 +43,71 @@ ARTICLE: { "regexp" "construction" } "Constructing regular expressions" "Another approach is to use " { $vocab-link "regexp.combinators" } "." ; ARTICLE: { "regexp" "syntax" } "Regular expression syntax" -"Regexp syntax is largely compatible with Perl, Java and extended POSIX regexps, but not completely. A new addition is the inclusion of a negation operator, with the syntax " { $snippet "(?~foo)" } " to match everything that does not match " { $snippet "foo" } "." +"Regexp syntax is largely compatible with Perl, Java and extended POSIX regexps, but not completely. Below, the syntax is documented." { $heading "Characters" } +"At its core, regular expressions consist of character literals. For example, " { $snippet "R/ f/" } " is a regular expression matching just the string 'f'. In addition, the normal escape codes are provided, like " { $snippet "\\t" } " for the tab character and " { $snippet "\\uxxxxxx" } "for an arbitrary Unicode code point, by its hex value. In addition, any character can be preceded by a backslash to escape it, unless this has special meaning. For example, to match a literal opening parenthesis, use " { $snippet "\\(" } "." +{ $heading "Concatenation, alternation and grouping" } +"Regular expressions can be built out of multiple characters by concatenation. For example, " { $snippet "R/ ab/" } " matches a followed by b. The " { $snippet "|" } " (alternation) operator can construct a regexp which matches one of two alternatives. Parentheses can be used for gropuing. So " { $snippet "R/ f(oo|ar)/" } " would match either 'foo' or 'far'." { $heading "Character classes" } +"Square brackets define a convenient way to refer to a set of characters. For example, " { $snippet "[ab]" } " refers to either a or b. And " { $snippet "[a-z]" } " refers to all of the characters between a and z, in code point order. You can use these together, as in " { $snippet "[ac-fz]" } " which matches all of the characters between c and f, in addition to a and z. Character classes can be negated using a carat, as in " { $snippet "[^a]" } " which matches all characters which are not a." { $heading "Predefined character classes" } +"Several character classes are predefined, both for convenience and because they are too large to represent directly. In Factor regular expressions, all character classes are Unicode-aware." +{ $table + { { $snippet "\\d" } "Digits" } + { { $snippet "\\D" } "Not digits" } + { { $snippet "\\s" } "Whitespace" } + { { $snippet "\\S" } "Not whitespace" } + { { $snippet "\\w" } "Word character (alphanumeric or underscore)" } + { { $snippet "\\W" } "Not word character" } + { { $snippet "\\p{property}" } "Character which fulfils the property" } + { { $snippet "\\P{property}" } "Character which does not fulfil the property" } } +"Properties for " { $snippet "\\p" } " and " { $snippet "\\P" } " (case-insensitive):" +{ $table + { { $snippet "\\p{lower}" } "Lower case letters" } + { { $snippet "\\p{upper}" } "Upper case letters" } + { { $snippet "\\p{alpha}" } "Letters" } + { { $snippet "\\p{ascii}" } "Characters in the ASCII range" } + { { $snippet "\\p{alnum}" } "Letters or numbers" } + { { $snippet "\\p{punct}" } "Punctuation" } + { { $snippet "\\p{blank}" } "Non-newline whitespace" } + { { $snippet "\\p{cntrl}" } "Control character" } + { { $snippet "\\p{space}" } "Whitespace" } + { { $snippet "\\p{xdigit}" } "Hexidecimal digit" } } ! In the future: Unicode +"Full unicode properties are not yet supported." { $heading "Boundaries" } +"Special operators exist to match certain points in the string. These are called 'zero-width' because they do not consume any characters." +{ $table + { { $snippet "^" } "Beginning of a line" } + { { $snippet "$" } "End of a line" } + { { $snippet "\\A" } "Beginning of text" } + { { $snippet "\\z" } "End of text" } + { { $snippet "\\Z" } "Almost end of text: only thing after is newline" } + { { $snippet "\\b" } "Word boundary (by Unicode word boundaries)" } + { { $snippet "\\b" } "Not word boundary (by Unicode word boundaries)" } } { $heading "Greedy quantifiers" } -{ $heading "Reluctant quantifiers" } -{ $heading "Posessive quantifiers" } -{ $heading "Logical operations" } +"It is possible to have a regular expression which matches a variable number of occurrences of another regular expression." +{ $table + { { $snippet "a*" } "Zero or more occurrences of a" } + { { $snippet "a+" } "One or more occurrences of a" } + { { $snippet "a?" } "Zero or one occurrences of a" } + { { $snippet "a{n}" } "n occurrences of a" } + { { $snippet "a{n,}" } "At least n occurrences of a" } + { { $snippet "a{,m}" } "At most m occurrences of a" } + { { $snippet "a{n,m}" } "Between n and m occurrences of a" } } +"All of these quantifiers are " { $emphasis "greedy" } ", meaning that they take as many repetitions as possible within the larger regular expression. Reluctant and posessive quantifiers are not yet supported." { $heading "Lookaround" } +"Operators are provided to look ahead and behind the current point in the regular expression. These can be used in any context, but they're the most useful at the beginning or end of a regular expression." +{ $table + { { $snippet "(?=a)" } "Asserts that the current position is immediately followed by a" } + { { $snippet "(?!a)" } "Asserts that the current position is not immediately followed by a" } + { { $snippet "(?<=a)" } "Asserts that the current position is immediately preceded by a" } + { { $snippet "(? matches? ] unit-test [ 3 ] [ "caba" "(?<=b)a" first-match from>> ] unit-test + +[ t ] [ "\ra" R/ .^a/ms matches? ] unit-test +[ f ] [ "\ra" R/ .^a/mds matches? ] unit-test +[ t ] [ "\na" R/ .^a/ms matches? ] unit-test +[ t ] [ "\na" R/ .^a/mds matches? ] unit-test + +[ t ] [ "a\r" R/ a$./ms matches? ] unit-test +[ f ] [ "a\r" R/ a$./mds matches? ] unit-test +[ t ] [ "a\n" R/ a$./ms matches? ] unit-test +[ t ] [ "a\n" R/ a$./mds matches? ] unit-test diff --git a/basis/regexp/regexp.factor b/basis/regexp/regexp.factor index 29f7e3e84e..63a2f25885 100644 --- a/basis/regexp/regexp.factor +++ b/basis/regexp/regexp.factor @@ -4,7 +4,7 @@ USING: accessors combinators kernel kernel.private math sequences sequences.private strings sets assocs prettyprint.backend prettyprint.custom make lexer namespaces parser arrays fry locals regexp.parser splitting sorting regexp.ast regexp.negation -regexp.compiler words call call.private math.ranges ; +regexp.compiler compiler.units words call call.private math.ranges ; IN: regexp TUPLE: regexp @@ -35,7 +35,7 @@ M: lookbehind question>quot ! Returns ( index string -- ? ) : match-index-from ( i string regexp -- index/f ) ! This word is unsafe. It assumes that i is a fixnum ! and that string is a string. - dup dfa>> execute-unsafe( index string regexp -- i/f ) ; + dup dfa>> execute-unsafe( index string regexp -- i/f ) ; inline GENERIC: end/start ( string regexp -- end start ) M: regexp end/start drop length 0 ; @@ -129,31 +129,28 @@ PRIVATE> GENERIC: compile-regexp ( regex -- regexp ) : regexp-initial-word ( i string regexp -- i/f ) - compile-regexp match-index-from ; + [ compile-regexp ] with-compilation-unit match-index-from ; -: do-compile-regexp ( regexp -- regexp ) +M: regexp compile-regexp ( regexp -- regexp ) dup '[ dup \ regexp-initial-word = [ drop _ get-ast ast>dfa dfa>word ] when ] change-dfa ; -M: regexp compile-regexp ( regexp -- regexp ) - do-compile-regexp ; - M: reverse-regexp compile-regexp ( regexp -- regexp ) - t backwards? [ do-compile-regexp ] with-variable ; + t backwards? [ call-next-method ] with-variable ; DEFER: compile-next-match : next-initial-word ( i string regexp -- i start end string ) - compile-next-match do-next-match ; + [ compile-next-match ] with-compilation-unit do-next-match ; : compile-next-match ( regexp -- regexp ) dup '[ dup \ next-initial-word = [ drop _ [ compile-regexp dfa>> def>> ] [ reverse-regexp? ] bi '[ { array-capacity string regexp } declare _ _ next-match ] - (( i string regexp -- i start end string )) simple-define-temp + (( i string regexp -- i start end string )) define-temp ] when ] change-next-match ; diff --git a/basis/ui/backend/cocoa/cocoa.factor b/basis/ui/backend/cocoa/cocoa.factor index 9888fc4e77..5cb02f5ad6 100755 --- a/basis/ui/backend/cocoa/cocoa.factor +++ b/basis/ui/backend/cocoa/cocoa.factor @@ -8,7 +8,7 @@ ui.backend ui.clipboards ui.gadgets ui.gadgets.worlds ui.backend.cocoa.views core-foundation core-foundation.run-loop core-graphics.types threads math.rectangles fry libc generalizations alien.c-types cocoa.views -combinators io.thread locals ; +combinators io.thread locals call ; IN: ui.backend.cocoa TUPLE: handle ; @@ -152,7 +152,7 @@ M: cocoa-ui-backend (with-ui) "UI" assert.app [ [ init-clipboard - cocoa-init-hook get call + cocoa-init-hook get call( -- ) start-ui f io-thread-running? set-global init-thread-timer diff --git a/basis/ui/ui.factor b/basis/ui/ui.factor index 42885aecb7..fe318101ee 100644 --- a/basis/ui/ui.factor +++ b/basis/ui/ui.factor @@ -153,7 +153,7 @@ PRIVATE> "UI update" spawn drop ; : start-ui ( quot -- ) - call notify-ui-thread start-ui-thread ; + call( -- ) notify-ui-thread start-ui-thread ; : restore-windows ( -- ) [ @@ -193,6 +193,6 @@ M: object close-window ] "ui" add-init-hook : with-ui ( quot -- ) - ui-running? [ call ] [ '[ init-ui @ ] (with-ui) ] if ; + ui-running? [ call( -- ) ] [ '[ init-ui @ ] (with-ui) ] if ; HOOK: beep ui-backend ( -- ) \ No newline at end of file diff --git a/basis/unicode/data/data.factor b/basis/unicode/data/data.factor index bff4ddeaab..74914e8537 100644 --- a/basis/unicode/data/data.factor +++ b/basis/unicode/data/data.factor @@ -5,7 +5,7 @@ io.files hashtables quotations splitting grouping arrays io math.parser hash2 math.order byte-arrays words namespaces words compiler.units parser io.encodings.ascii values interval-maps ascii sets combinators locals math.ranges sorting make -strings.parser io.encodings.utf8 ; +strings.parser io.encodings.utf8 memoize ; IN: unicode.data VALUE: simple-lower @@ -108,6 +108,9 @@ CONSTANT: categories "Zs" "Zl" "Zp" "Cc" "Cf" "Cs" "Co" } +MEMO: categories-map ( -- hashtable ) + categories [ swap ] H{ } assoc-map-as ; + CONSTANT: num-chars HEX: 2FA1E ! the maximum unicode char in the first 3 planes @@ -124,10 +127,10 @@ CONSTANT: num-chars HEX: 2FA1E ] assoc-each table ; :: process-category ( data -- category-listing ) - [let | table [ num-chars ] | - 2 data (process-data) [| char cat | - cat categories index char table ?set-nth - ] assoc-each table fill-ranges ] ; + num-chars :> table + 2 data (process-data) [| char cat | + cat categories-map at char table ?set-nth + ] assoc-each table fill-ranges ; : process-names ( data -- names-hash ) 1 swap (process-data) [ diff --git a/basis/xml/tests/test.factor b/basis/xml/tests/test.factor index 818a28c892..1d07aa9406 100644 --- a/basis/xml/tests/test.factor +++ b/basis/xml/tests/test.factor @@ -74,3 +74,4 @@ SYMBOL: xml-file [ "foo" ] [ "]>&bar;" string>xml children>string ] unit-test [ T{ xml-chunk f V{ "hello" } } ] [ "hello" string>xml-chunk ] unit-test [ "1.1" ] [ "" string>xml prolog>> version>> ] unit-test +[ "ß" ] [ "ß" read-xml children>string ] unit-test diff --git a/basis/xml/traversal/traversal-docs.factor b/basis/xml/traversal/traversal-docs.factor index 1329c4975e..9f26774647 100644 --- a/basis/xml/traversal/traversal-docs.factor +++ b/basis/xml/traversal/traversal-docs.factor @@ -1,6 +1,6 @@ ! Copyright (C) 2005, 2009 Daniel Ehrenberg ! See http://factorcode.org/license.txt for BSD license. -USING: help.markup help.syntax xml.data sequences strings ; +USING: help.markup help.syntax xml.data sequences strings multiline ; IN: xml.traversal ABOUT: "xml.traversal" @@ -8,7 +8,7 @@ ABOUT: "xml.traversal" ARTICLE: "xml.traversal" "Utilities for traversing XML" "The " { $vocab-link "xml.traversal" } " vocabulary provides utilities for traversing an XML DOM tree and viewing the contents of a single tag. The following words are defined:" $nl - "Note: the difference between deep-tag-named and tag-named is that the former searches recursively among all children and children of children of the tag, while the latter only looks at the direct children, and is therefore more efficient." + { $subsection { "xml.traversal" "intro" } } { $subsection tag-named } { $subsection tags-named } { $subsection deep-tag-named } @@ -20,6 +20,20 @@ ARTICLE: "xml.traversal" "Utilities for traversing XML" { $subsection first-child-tag } { $subsection assert-tag } ; +ARTICLE: { "xml.traversal" "intro" } "An example of XML processing" +"To illustrate how to use the XML library, we develop a simple Atom parser in Factor. Atom is an XML-based syndication format, like RSS. To see the full version of what we develop here, look at " { $snippet "basis/syndication" } " at the " { $snippet "atom1.0" } " word. First, we want to load a file and get a DOM tree for it." +{ $code <" "file.xml" file>xml "> } +"No encoding descriptor is needed, because XML files contain sufficient information to auto-detect the encoding. Next, we want to extract information from the tree. To get the title, we can use the following:" +{ $code <" "title" tag-named children>string "> } +"The " { $link tag-named } " word finds the first tag named " { $snippet "title" } " in the top level (just under the main tag). Then, with a tag on the stack, its children are asserted to be a string, and the string is returned." $nl +"For a slightly more complicated example, we can look at how entries are parsed. To get a sequence of tags with the name " { $snippet "entry" } ":" +{ $code <" "entry" tags-named "> } +"Imagine that, for each of these, we want to get the URL of the entry. In Atom, the URLs are in a " { $snippet "link" } " tag which is contained in the " { $snippet "entry" } " tag. There are multiple " { $snippet "link" } " tags, but one of them contains the attribute " { $snippet "rel=alternate" } ", and the " { $snippet "href" } " attribute has the URL. So, given an element of the sequence produced in the above quotation, we run the code:" +{ $code <" "link" tags-named [ "rel" attr "alternate" = ] find nip "> } +"to get the link tag on the stack, and" +{ $code <" "href" attr >url "> } +"to extract the URL from it." ; + HELP: deep-tag-named { $values { "tag" "an XML tag or document" } { "name/string" "an XML name or string representing a name" } { "matching-tag" tag } } { $description "Finds an XML tag with a matching name, recursively searching children and children of children." } diff --git a/basis/xml/xml-docs.factor b/basis/xml/xml-docs.factor index 77969c55cd..434209620b 100644 --- a/basis/xml/xml-docs.factor +++ b/basis/xml/xml-docs.factor @@ -67,9 +67,9 @@ HELP: string>dtd ARTICLE: { "xml" "reading" } "Reading XML" "The following words are used to read something into an XML document" - { $subsection string>xml } { $subsection read-xml } { $subsection read-xml-chunk } + { $subsection string>xml } { $subsection string>xml-chunk } { $subsection file>xml } { $subsection bytes>xml } @@ -90,10 +90,16 @@ ARTICLE: { "xml" "events" } "Event-based XML parsing" { $subsection pull-event } { $subsection pull-elem } ; +ARTICLE: { "xml" "namespaces" } "Working with XML namespaces" +"The Factor XML parser implements XML namespaces, and provides convenient utilities for working with them. Anywhere in the public API that a name is accepted as an argument, either a string or an XML name is accepted. If a string is used, it is coerced into a name by giving it a null namespace. Names are stored as " { $link name } " tuples, which have slots for the namespace prefix and namespace URL as well as the main part of the tag name." $nl +"To make it easier to create XML names, the parsing word " { $snippet "XML-NS:" } " is provided in the " { $vocab-link "xml.syntax" } " vocabulary." $nl +"When parsing XML, names are automatically augmented with the appropriate namespace URL when the information is available. This does not take into account any XML schema which might allow for such prefixes to be omitted. When generating XML to be written, keep in mind that the XML writer knows only about the literal prefixes and ignores the URLs. It is your job to make sure that they match up correctly, and that there is the appropriate " { $snippet "xmlns" } " declaration." ; + ARTICLE: "xml" "XML parser" "The " { $vocab-link "xml" } " vocabulary implements the XML 1.0 and 1.1 standards, converting strings of text into XML and vice versa. The parser checks for well-formedness but is not validating. There is only partial support for processing DTDs." { $subsection { "xml" "reading" } } { $subsection { "xml" "events" } } + { $subsection { "xml" "namespaces" } } { $vocab-subsection "Writing XML" "xml.writer" } { $vocab-subsection "XML parsing errors" "xml.errors" } { $vocab-subsection "XML entities" "xml.entities" } diff --git a/basis/xml/xml.factor b/basis/xml/xml.factor index 073f46cbae..fba2eafaba 100755 --- a/basis/xml/xml.factor +++ b/basis/xml/xml.factor @@ -4,7 +4,8 @@ USING: accessors arrays io io.encodings.binary io.files io.streams.string kernel namespaces sequences strings io.encodings.utf8 xml.data xml.errors xml.elements ascii xml.entities xml.writer xml.state xml.autoencoding assocs xml.tokenize -combinators.short-circuit xml.name splitting io.streams.byte-array ; +combinators.short-circuit xml.name splitting io.streams.byte-array +combinators ; IN: xml xml-stack get first second ] with-state ; inline +: make-xml ( stream quot -- xml ) + 0 read-seq make-xml-doc ; inline + PRIVATE> : each-element ( stream quot: ( xml-elem -- ) -- ) @@ -169,14 +173,16 @@ PRIVATE> ] with-state ; inline : read-xml ( stream -- xml ) - [ start-document [ process ] when* ] - 0 read-seq make-xml-doc ; + dup stream-element-type { + { +character+ [ [ check ] make-xml ] } + { +byte+ [ [ start-document [ process ] when* ] make-xml ] } + } case ; : read-xml-chunk ( stream -- seq ) [ check ] 1 read-seq ; : string>xml ( string -- xml ) - [ check ] 0 read-seq make-xml-doc ; + read-xml ; : string>xml-chunk ( string -- xml ) read-xml-chunk ; diff --git a/core/classes/mixin/mixin.factor b/core/classes/mixin/mixin.factor index 1261d44a69..4bdb893d9a 100644 --- a/core/classes/mixin/mixin.factor +++ b/core/classes/mixin/mixin.factor @@ -1,4 +1,4 @@ -! Copyright (C) 2004, 2008 Slava Pestov. +! Copyright (C) 2004, 2009 Slava Pestov. ! See http://factorcode.org/license.txt for BSD license. USING: classes classes.union words kernel sequences definitions combinators arrays assocs generic accessors ; @@ -21,8 +21,9 @@ M: mixin-class rank-class drop 3 ; drop ] [ [ { } redefine-mixin-class ] + [ H{ } clone "instances" set-word-prop ] [ update-classes ] - bi + tri ] if ; TUPLE: check-mixin-class class ; @@ -44,6 +45,11 @@ TUPLE: check-mixin-class class ; [ [ update-class ] each ] [ implementors [ remake-generic ] each ] bi ; +: (add-mixin-instance) ( class mixin -- ) + [ [ suffix ] change-mixin-class ] + [ [ f ] 2dip "instances" word-prop set-at ] + 2bi ; + : add-mixin-instance ( class mixin -- ) #! Note: we call update-classes on the new member, not the #! mixin. This ensures that we only have to update the @@ -53,20 +59,22 @@ TUPLE: check-mixin-class class ; #! updated by transitivity; the mixins usages appear in #! class-usages of the member, now that it's been added. [ 2drop ] [ - [ [ suffix ] change-mixin-class ] 2keep - [ nip ] [ [ new-class? ] either? ] 2bi [ - update-classes/new - ] [ - update-classes - ] if + [ (add-mixin-instance) ] 2keep + [ nip ] [ [ new-class? ] either? ] 2bi + [ update-classes/new ] [ update-classes ] if ] if-mixin-member? ; +: (remove-mixin-instance) ( class mixin -- ) + [ [ swap remove ] change-mixin-class ] + [ "instances" word-prop delete-at ] + 2bi ; + : remove-mixin-instance ( class mixin -- ) #! The order of the three clauses is important here. The last #! one must come after the other two so that the entries it #! adds to changed-generics are not overwritten. [ - [ [ swap remove ] change-mixin-class ] + [ (remove-mixin-instance) ] [ nip update-classes ] [ class-usages update-methods ] 2tri @@ -76,32 +84,21 @@ M: mixin-class class-forgotten remove-mixin-instance ; ! Definition protocol implementation ensures that removing an ! INSTANCE: declaration from a source file updates the mixin. -TUPLE: mixin-instance loc class mixin ; +TUPLE: mixin-instance class mixin ; -M: mixin-instance equal? - { - { [ over mixin-instance? not ] [ f ] } - { [ 2dup [ class>> ] bi@ = not ] [ f ] } - { [ 2dup [ mixin>> ] bi@ = not ] [ f ] } - [ t ] - } cond 2nip ; +C: mixin-instance -M: mixin-instance hashcode* - [ class>> ] [ mixin>> ] bi 2array hashcode* ; +: >mixin-instance< ( mixin-instance -- class mixin ) + [ class>> ] [ mixin>> ] bi ; inline -: ( class mixin -- definition ) - mixin-instance new - swap >>mixin - swap >>class ; +M: mixin-instance where >mixin-instance< "instances" word-prop at ; -M: mixin-instance where loc>> ; - -M: mixin-instance set-where (>>loc) ; +M: mixin-instance set-where >mixin-instance< "instances" word-prop set-at ; M: mixin-instance definer drop \ INSTANCE: f ; M: mixin-instance definition drop f ; M: mixin-instance forget* - [ class>> ] [ mixin>> ] bi + >mixin-instance< dup mixin-class? [ remove-mixin-instance ] [ 2drop ] if ; diff --git a/core/generic/generic-tests.factor b/core/generic/generic-tests.factor index db404f4850..aea7875b20 100755 --- a/core/generic/generic-tests.factor +++ b/core/generic/generic-tests.factor @@ -247,4 +247,4 @@ GENERIC: move-method-generic ( a -- b ) [ ] [ "IN: generic.tests.a" "move-method-test-1" parse-stream drop ] unit-test -[ { string } ] [ move-method-generic order ] unit-test \ No newline at end of file +[ { string } ] [ \ move-method-generic order ] unit-test \ No newline at end of file diff --git a/core/io/encodings/encodings-docs.factor b/core/io/encodings/encodings-docs.factor index e13e05bf40..204441c19a 100644 --- a/core/io/encodings/encodings-docs.factor +++ b/core/io/encodings/encodings-docs.factor @@ -124,6 +124,6 @@ ARTICLE: "io.encodings" "I/O encodings" "Combinators to change the encoding:" { $subsection with-encoded-output } { $subsection with-decoded-input } -{ $see-also "encodings-introduction" "stream-elements" } ; +{ $see-also "encodings-introduction" } ; ABOUT: "io.encodings" diff --git a/core/io/io-docs.factor b/core/io/io-docs.factor index 2305f497af..ebc248bbbf 100644 --- a/core/io/io-docs.factor +++ b/core/io/io-docs.factor @@ -262,7 +262,6 @@ $nl { $subsection stream-nl } "This word is for streams that allow seeking:" { $subsection stream-seek } -"For a discussion of the distinction between binary and string streams, see " { $link "stream-elements" } "." { $see-also "io.timeouts" } ; ARTICLE: "stdio-motivation" "Motivation for default streams" @@ -313,7 +312,7 @@ $nl { $subsection read } { $subsection read-until } { $subsection read-partial } -"If the default input stream is a string stream (" { $link "stream-elements" } "), lines of text can be read:" +"If the default input stream is a character stream (" { $link stream-element-type } " outputs " { $link +character+ } "), lines of text can be read:" { $subsection readln } "Seeking on the default input stream:" { $subsection seek-input } @@ -328,7 +327,7 @@ $nl { $subsection flush } { $subsection write1 } { $subsection write } -"If the default output stream is a string stream (" { $link "stream-elements" } "), lines of text can be written:" +"If the default output stream is a character stream (" { $link stream-element-type } " outputs " { $link +character+ } "), lines of text can be written:" { $subsection readln } { $subsection print } { $subsection nl } diff --git a/core/parser/parser-tests.factor b/core/parser/parser-tests.factor index 5ec9ea9b3c..6b90abeced 100644 --- a/core/parser/parser-tests.factor +++ b/core/parser/parser-tests.factor @@ -3,7 +3,7 @@ io.streams.string namespaces classes effects source-files assocs sequences strings io.files io.pathnames definitions continuations sorting classes.tuple compiler.units debugger vocabs vocabs.loader accessors eval combinators lexer -vocabs.parser words.symbol ; +vocabs.parser words.symbol multiline ; IN: parser.tests \ run-file must-infer @@ -560,7 +560,7 @@ EXCLUDE: qualified.tests.bar => x ; ! Two similar bugs ! Replace : def with something in << >> -[ [ ] ] [ +/* [ [ ] ] [ "IN: parser.tests : was-once-a-word-bug ( -- ) ;" "was-once-a-word-test" parse-stream ] unit-test @@ -572,7 +572,7 @@ EXCLUDE: qualified.tests.bar => x ; "was-once-a-word-test" parse-stream ] unit-test -[ t ] [ "was-once-a-word-bug" "parser.tests" lookup >boolean ] unit-test +[ t ] [ "was-once-a-word-bug" "parser.tests" lookup >boolean ] unit-test */ ! Replace : def with DEFER: [ [ ] ] [ diff --git a/extra/benchmark/benchmark.factor b/extra/benchmark/benchmark.factor index 9afd211876..489dc5e73f 100755 --- a/extra/benchmark/benchmark.factor +++ b/extra/benchmark/benchmark.factor @@ -24,10 +24,10 @@ IN: benchmark [ [ [ [ 1array $vocab-link ] with-cell ] - [ [ 1000000 /f pprint-cell ] [ "failed" write ] if* ] bi* + [ [ 1000000 /f pprint-cell ] [ [ "failed" write ] with-cell ] if* ] bi* ] with-row ] assoc-each - ] tabular-output ; + ] tabular-output nl ; : benchmarks ( -- ) run-benchmarks benchmarks. ; diff --git a/extra/drills/drills.factor b/extra/drills/drills.factor new file mode 100644 index 0000000000..98da12959b --- /dev/null +++ b/extra/drills/drills.factor @@ -0,0 +1,42 @@ +USING: accessors arrays cocoa.dialogs combinators continuations +fry grouping io.encodings.utf8 io.files io.styles kernel math +math.parser models models.arrow models.history namespaces random +sequences splitting ui ui.gadgets.alerts ui.gadgets.book-extras +ui.gadgets.books ui.gadgets.buttons ui.gadgets.frames +ui.gadgets.grids ui.gadgets.labels ui.gadgets.tracks ui.gestures +ui.gadgets.corners ; + +IN: drills +SYMBOLS: it startLength ; +: big ( gadget -- gadget ) { "sans-serif" plain 30 } >>font ; +: card ( model quot -- button ) big [ next ] ; +: op ( quot str -- gadget )