Merge branch 'master' of git://factorcode.org/git/factor
commit
7a010063c0
|
@ -279,7 +279,7 @@ ARTICLE: "db-custom-database-combinators" "Custom database combinators"
|
||||||
|
|
||||||
"SQLite example combinator:"
|
"SQLite example combinator:"
|
||||||
{ $code <"
|
{ $code <"
|
||||||
USING: db.sqlite db io.files ;
|
USING: db.sqlite db io.files io.files.temp ;
|
||||||
: with-sqlite-db ( quot -- )
|
: with-sqlite-db ( quot -- )
|
||||||
"my-database.db" temp-file <sqlite-db> swap with-db ; inline"> }
|
"my-database.db" temp-file <sqlite-db> swap with-db ; inline"> }
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
unportable
|
|
@ -1,2 +1,2 @@
|
||||||
Doug Coleman
|
Doug Coleman
|
||||||
Slava Pestov
|
Daniel Ehrenberg
|
||||||
|
|
|
@ -20,50 +20,50 @@ link-no-follow? off
|
||||||
] unit-test
|
] unit-test
|
||||||
|
|
||||||
[ "<p>a-b</p>" ] [ "a-b" convert-farkup ] unit-test
|
[ "<p>a-b</p>" ] [ "a-b" convert-farkup ] unit-test
|
||||||
[ "<p>*foo\nbar\n</p>" ] [ "*foo\nbar\n" convert-farkup ] unit-test
|
[ "<p><strong>foo</strong></p><p>bar</p>" ] [ "*foo\nbar\n" convert-farkup ] unit-test
|
||||||
[ "<p><strong>Wow!</strong></p>" ] [ "*Wow!*" convert-farkup ] unit-test
|
[ "<p><strong>Wow!</strong></p>" ] [ "*Wow!*" convert-farkup ] unit-test
|
||||||
[ "<p><em>Wow.</em></p>" ] [ "_Wow._" convert-farkup ] unit-test
|
[ "<p><em>Wow.</em></p>" ] [ "_Wow._" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>*</p>" ] [ "*" convert-farkup ] unit-test
|
[ "<p><strong></strong></p>" ] [ "*" convert-farkup ] unit-test
|
||||||
[ "<p>*</p>" ] [ "\\*" convert-farkup ] unit-test
|
[ "<p>*</p>" ] [ "\\*" convert-farkup ] unit-test
|
||||||
[ "<p>**</p>" ] [ "\\**" convert-farkup ] unit-test
|
[ "<p>*<strong></strong></p>" ] [ "\\**" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<ul><li>a-b</li></ul>" ] [ "-a-b" convert-farkup ] unit-test
|
[ "<ul><li>a-b</li></ul>" ] [ "-a-b" convert-farkup ] unit-test
|
||||||
[ "<ul><li>foo</li></ul>" ] [ "-foo" convert-farkup ] unit-test
|
[ "<ul><li>foo</li></ul>" ] [ "-foo" convert-farkup ] unit-test
|
||||||
[ "<ul><li>foo</li>\n</ul>" ] [ "-foo\n" convert-farkup ] unit-test
|
[ "<ul><li>foo</li></ul>" ] [ "-foo\n" convert-farkup ] unit-test
|
||||||
[ "<ul><li>foo</li>\n<li>bar</li></ul>" ] [ "-foo\n-bar" convert-farkup ] unit-test
|
[ "<ul><li>foo</li><li>bar</li></ul>" ] [ "-foo\n-bar" convert-farkup ] unit-test
|
||||||
[ "<ul><li>foo</li>\n<li>bar</li>\n</ul>" ] [ "-foo\n-bar\n" convert-farkup ] unit-test
|
[ "<ul><li>foo</li><li>bar</li></ul>" ] [ "-foo\n-bar\n" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<ul><li>foo</li>\n</ul><p>bar\n</p>" ] [ "-foo\nbar\n" convert-farkup ] unit-test
|
[ "<ul><li>foo</li></ul><p>bar</p>" ] [ "-foo\nbar\n" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<ol><li>a-b</li></ol>" ] [ "#a-b" convert-farkup ] unit-test
|
[ "<ol><li>a-b</li></ol>" ] [ "#a-b" convert-farkup ] unit-test
|
||||||
[ "<ol><li>foo</li></ol>" ] [ "#foo" convert-farkup ] unit-test
|
[ "<ol><li>foo</li></ol>" ] [ "#foo" convert-farkup ] unit-test
|
||||||
[ "<ol><li>foo</li>\n</ol>" ] [ "#foo\n" convert-farkup ] unit-test
|
[ "<ol><li>foo</li></ol>" ] [ "#foo\n" convert-farkup ] unit-test
|
||||||
[ "<ol><li>foo</li>\n<li>bar</li></ol>" ] [ "#foo\n#bar" convert-farkup ] unit-test
|
[ "<ol><li>foo</li><li>bar</li></ol>" ] [ "#foo\n#bar" convert-farkup ] unit-test
|
||||||
[ "<ol><li>foo</li>\n<li>bar</li>\n</ol>" ] [ "#foo\n#bar\n" convert-farkup ] unit-test
|
[ "<ol><li>foo</li><li>bar</li></ol>" ] [ "#foo\n#bar\n" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<ol><li>foo</li>\n</ol><p>bar\n</p>" ] [ "#foo\nbar\n" convert-farkup ] unit-test
|
[ "<ol><li>foo</li></ol><p>bar</p>" ] [ "#foo\nbar\n" convert-farkup ] unit-test
|
||||||
|
|
||||||
|
|
||||||
[ "\n\n" ] [ "\n\n" convert-farkup ] unit-test
|
[ "" ] [ "\n\n" convert-farkup ] unit-test
|
||||||
[ "\n\n" ] [ "\r\n\r\n" convert-farkup ] unit-test
|
[ "" ] [ "\r\n\r\n" convert-farkup ] unit-test
|
||||||
[ "\n\n\n\n" ] [ "\r\r\r\r" convert-farkup ] unit-test
|
[ "" ] [ "\r\r\r\r" convert-farkup ] unit-test
|
||||||
[ "\n\n\n" ] [ "\r\r\r" convert-farkup ] unit-test
|
[ "" ] [ "\r\r\r" convert-farkup ] unit-test
|
||||||
[ "\n\n\n" ] [ "\n\n\n" convert-farkup ] unit-test
|
[ "" ] [ "\n\n\n" convert-farkup ] unit-test
|
||||||
[ "<p>foo\n</p><p>bar</p>" ] [ "foo\n\nbar" convert-farkup ] unit-test
|
[ "<p>foo</p><p>bar</p>" ] [ "foo\n\nbar" convert-farkup ] unit-test
|
||||||
[ "<p>foo\n</p><p>bar</p>" ] [ "foo\r\n\r\nbar" convert-farkup ] unit-test
|
[ "<p>foo</p><p>bar</p>" ] [ "foo\r\n\r\nbar" convert-farkup ] unit-test
|
||||||
[ "<p>foo\n</p><p>bar</p>" ] [ "foo\r\rbar" convert-farkup ] unit-test
|
[ "<p>foo</p><p>bar</p>" ] [ "foo\r\rbar" convert-farkup ] unit-test
|
||||||
[ "<p>foo\n</p><p>bar</p>" ] [ "foo\r\r\nbar" convert-farkup ] unit-test
|
[ "<p>foo</p><p>bar</p>" ] [ "foo\r\r\nbar" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "\n<p>bar\n</p>" ] [ "\nbar\n" convert-farkup ] unit-test
|
[ "<p>bar</p>" ] [ "\nbar\n" convert-farkup ] unit-test
|
||||||
[ "\n<p>bar\n</p>" ] [ "\rbar\r" convert-farkup ] unit-test
|
[ "<p>bar</p>" ] [ "\rbar\r" convert-farkup ] unit-test
|
||||||
[ "\n<p>bar\n</p>" ] [ "\r\nbar\r\n" convert-farkup ] unit-test
|
[ "<p>bar</p>" ] [ "\r\nbar\r\n" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>foo\n</p><p>bar</p>" ] [ "foo\n\n\nbar" convert-farkup ] unit-test
|
[ "<p>foo</p><p>bar</p>" ] [ "foo\n\n\nbar" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "" ] [ "" convert-farkup ] unit-test
|
[ "" ] [ "" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>|a</p>" ]
|
[ "<table><tr><td>a</td></tr></table>" ]
|
||||||
[ "|a" convert-farkup ] unit-test
|
[ "|a" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<table><tr><td>a</td></tr></table>" ]
|
[ "<table><tr><td>a</td></tr></table>" ]
|
||||||
|
@ -78,24 +78,24 @@ link-no-follow? off
|
||||||
[ "<table><tr><td>a</td><td>b</td></tr><tr><td>c</td><td>d</td></tr></table>" ]
|
[ "<table><tr><td>a</td><td>b</td></tr><tr><td>c</td><td>d</td></tr></table>" ]
|
||||||
[ "|a|b|\n|c|d|\n" convert-farkup ] unit-test
|
[ "|a|b|\n|c|d|\n" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p><strong>foo</strong>\n</p><h1>aheading</h1>\n<p>adfasd</p>" ]
|
[ "<p><strong>foo</strong></p><h1>aheading</h1><p>adfasd</p>" ]
|
||||||
[ "*foo*\n=aheading=\nadfasd" convert-farkup ] unit-test
|
[ "*foo*\n=aheading=\nadfasd" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<h1>foo</h1>\n" ] [ "=foo=\n" convert-farkup ] unit-test
|
[ "<h1>foo</h1>" ] [ "=foo=\n" convert-farkup ] unit-test
|
||||||
[ "<p>lol</p><h1>foo</h1>\n" ] [ "lol=foo=\n" convert-farkup ] unit-test
|
[ "<p>lol=foo=</p>" ] [ "lol=foo=\n" convert-farkup ] unit-test
|
||||||
[ "<p>=foo\n</p>" ] [ "=foo\n" convert-farkup ] unit-test
|
[ "<p>=foo</p>" ] [ "=foo\n" convert-farkup ] unit-test
|
||||||
[ "<p>=foo</p>" ] [ "=foo" convert-farkup ] unit-test
|
[ "<p>=foo</p>" ] [ "=foo" convert-farkup ] unit-test
|
||||||
[ "<p>==foo</p>" ] [ "==foo" convert-farkup ] unit-test
|
[ "<p>==foo</p>" ] [ "==foo" convert-farkup ] unit-test
|
||||||
[ "<p>=</p><h1>foo</h1>" ] [ "==foo=" convert-farkup ] unit-test
|
[ "<h1>foo</h1>" ] [ "==foo=" convert-farkup ] unit-test
|
||||||
[ "<h2>foo</h2>" ] [ "==foo==" convert-farkup ] unit-test
|
[ "<h2>foo</h2>" ] [ "==foo==" convert-farkup ] unit-test
|
||||||
[ "<h2>foo</h2>" ] [ "==foo==" convert-farkup ] unit-test
|
[ "<h2>foo</h2>" ] [ "==foo==" convert-farkup ] unit-test
|
||||||
[ "<p>=</p><h2>foo</h2>" ] [ "===foo==" convert-farkup ] unit-test
|
[ "<h2>foo</h2>" ] [ "===foo==" convert-farkup ] unit-test
|
||||||
[ "<h1>foo</h1><p>=</p>" ] [ "=foo==" convert-farkup ] unit-test
|
[ "<h1>foo</h1>" ] [ "=foo==" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<pre><span class=\"KEYWORD3\">int</span> <span class=\"FUNCTION\">main</span><span class=\"OPERATOR\">(</span><span class=\"OPERATOR\">)</span></pre>" ]
|
[ "<pre><span class=\"KEYWORD3\">int</span> <span class=\"FUNCTION\">main</span><span class=\"OPERATOR\">(</span><span class=\"OPERATOR\">)</span></pre>" ]
|
||||||
[ "[c{int main()}]" convert-farkup ] unit-test
|
[ "[c{int main()}]" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p><img src=\"lol.jpg\"/></p>" ] [ "[[image:lol.jpg]]" convert-farkup ] unit-test
|
[ "<p><img src=\"lol.jpg\" alt=\"image:lol.jpg\"/></p>" ] [ "[[image:lol.jpg]]" convert-farkup ] unit-test
|
||||||
[ "<p><img src=\"lol.jpg\" alt=\"teh lol\"/></p>" ] [ "[[image:lol.jpg|teh lol]]" convert-farkup ] unit-test
|
[ "<p><img src=\"lol.jpg\" alt=\"teh lol\"/></p>" ] [ "[[image:lol.jpg|teh lol]]" convert-farkup ] unit-test
|
||||||
[ "<p><a href=\"http://lol.com\">http://lol.com</a></p>" ] [ "[[http://lol.com]]" convert-farkup ] unit-test
|
[ "<p><a href=\"http://lol.com\">http://lol.com</a></p>" ] [ "[[http://lol.com]]" convert-farkup ] unit-test
|
||||||
[ "<p><a href=\"http://lol.com\">haha</a></p>" ] [ "[[http://lol.com|haha]]" convert-farkup ] unit-test
|
[ "<p><a href=\"http://lol.com\">haha</a></p>" ] [ "[[http://lol.com|haha]]" convert-farkup ] unit-test
|
||||||
|
@ -111,11 +111,11 @@ link-no-follow? off
|
||||||
[ "<pre>hello</pre>" ] [ "[{hello}]" convert-farkup ] unit-test
|
[ "<pre>hello</pre>" ] [ "[{hello}]" convert-farkup ] unit-test
|
||||||
|
|
||||||
[
|
[
|
||||||
"<p>Feature comparison:\n<table><tr><td>a</td><td>Factor</td><td>Java</td><td>Lisp</td></tr><tr><td>Coolness</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Badass</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Enterprise</td><td>Yes</td><td>Yes</td><td>No</td></tr><tr><td>Kosher</td><td>Yes</td><td>No</td><td>Yes</td></tr></table></p>"
|
"<p>Feature comparison:</p><table><tr><td>a</td><td>Factor</td><td>Java</td><td>Lisp</td></tr><tr><td>Coolness</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Badass</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Enterprise</td><td>Yes</td><td>Yes</td><td>No</td></tr><tr><td>Kosher</td><td>Yes</td><td>No</td><td>Yes</td></tr></table>"
|
||||||
] [ "Feature comparison:\n|a|Factor|Java|Lisp|\n|Coolness|Yes|No|No|\n|Badass|Yes|No|No|\n|Enterprise|Yes|Yes|No|\n|Kosher|Yes|No|Yes|\n" convert-farkup ] unit-test
|
] [ "Feature comparison:\n|a|Factor|Java|Lisp|\n|Coolness|Yes|No|No|\n|Badass|Yes|No|No|\n|Enterprise|Yes|Yes|No|\n|Kosher|Yes|No|Yes|\n" convert-farkup ] unit-test
|
||||||
|
|
||||||
[
|
[
|
||||||
"<p>Feature comparison:\n</p><table><tr><td>a</td><td>Factor</td><td>Java</td><td>Lisp</td></tr><tr><td>Coolness</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Badass</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Enterprise</td><td>Yes</td><td>Yes</td><td>No</td></tr><tr><td>Kosher</td><td>Yes</td><td>No</td><td>Yes</td></tr></table>"
|
"<p>Feature comparison:</p><table><tr><td>a</td><td>Factor</td><td>Java</td><td>Lisp</td></tr><tr><td>Coolness</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Badass</td><td>Yes</td><td>No</td><td>No</td></tr><tr><td>Enterprise</td><td>Yes</td><td>Yes</td><td>No</td></tr><tr><td>Kosher</td><td>Yes</td><td>No</td><td>Yes</td></tr></table>"
|
||||||
] [ "Feature comparison:\n\n|a|Factor|Java|Lisp|\n|Coolness|Yes|No|No|\n|Badass|Yes|No|No|\n|Enterprise|Yes|Yes|No|\n|Kosher|Yes|No|Yes|\n" convert-farkup ] unit-test
|
] [ "Feature comparison:\n\n|a|Factor|Java|Lisp|\n|Coolness|Yes|No|No|\n|Badass|Yes|No|No|\n|Enterprise|Yes|Yes|No|\n|Kosher|Yes|No|Yes|\n" convert-farkup ] unit-test
|
||||||
|
|
||||||
[
|
[
|
||||||
|
@ -131,33 +131,33 @@ link-no-follow? off
|
||||||
|
|
||||||
[ "<p><foo></p>" ] [ "<foo>" convert-farkup ] unit-test
|
[ "<p><foo></p>" ] [ "<foo>" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>asdf\n<ul><li>lol</li>\n<li>haha</li></ul></p>" ] [ "asdf\n-lol\n-haha" convert-farkup ] unit-test
|
[ "<p>asdf</p><ul><li>lol</li><li>haha</li></ul>" ] [ "asdf\n-lol\n-haha" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>asdf\n</p><ul><li>lol</li>\n<li>haha</li></ul>" ]
|
[ "<p>asdf</p><ul><li>lol</li><li>haha</li></ul>" ]
|
||||||
[ "asdf\n\n-lol\n-haha" convert-farkup ] unit-test
|
[ "asdf\n\n-lol\n-haha" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<hr/>" ] [ "___" convert-farkup ] unit-test
|
[ "<hr/>" ] [ "___" convert-farkup ] unit-test
|
||||||
[ "<hr/>\n" ] [ "___\n" convert-farkup ] unit-test
|
[ "<hr/>" ] [ "___\n" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>before:\n<pre><span class=\"OPERATOR\">{</span> <span class=\"DIGIT\">1</span> <span class=\"DIGIT\">2</span> <span class=\"DIGIT\">3</span> <span class=\"OPERATOR\">}</span> <span class=\"DIGIT\">1</span> tail</pre></p>" ]
|
[ "<p>before:</p><pre><span class=\"OPERATOR\">{</span> <span class=\"DIGIT\">1</span> <span class=\"DIGIT\">2</span> <span class=\"DIGIT\">3</span> <span class=\"OPERATOR\">}</span> <span class=\"DIGIT\">1</span> tail</pre>" ]
|
||||||
[ "before:\n[factor{{ 1 2 3 } 1 tail}]" convert-farkup ] unit-test
|
[ "before:\n[factor{{ 1 2 3 } 1 tail}]" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p><a href=\"Factor\">Factor</a>-rific!</p>" ]
|
[ "<p><a href=\"Factor\">Factor</a>-rific!</p>" ]
|
||||||
[ "[[Factor]]-rific!" convert-farkup ] unit-test
|
[ "[[Factor]]-rific!" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>[ factor { 1 2 3 }]</p>" ]
|
[ "<pre> 1 2 3 </pre>" ]
|
||||||
[ "[ factor { 1 2 3 }]" convert-farkup ] unit-test
|
[ "[ factor { 1 2 3 }]" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>paragraph\n<hr/></p>" ]
|
[ "<p>paragraph</p><hr/>" ]
|
||||||
[ "paragraph\n___" convert-farkup ] unit-test
|
[ "paragraph\n___" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>paragraph\n a ___ b</p>" ]
|
[ "<p>paragraph</p><p> a <em></em><em> b</em></p>" ]
|
||||||
[ "paragraph\n a ___ b" convert-farkup ] unit-test
|
[ "paragraph\n a ___ b" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "\n<ul><li> a</li>\n</ul><hr/>" ]
|
[ "<ul><li> a</li></ul><hr/>" ]
|
||||||
[ "\n- a\n___" convert-farkup ] unit-test
|
[ "\n- a\n___" convert-farkup ] unit-test
|
||||||
|
|
||||||
[ "<p>hello_world how are you today?\n<ul><li> hello_world how are you today?</li></ul></p>" ]
|
[ "<p>hello<em>world how are you today?</em></p><ul><li> hello<em>world how are you today?</em></li></ul>" ]
|
||||||
[ "hello_world how are you today?\n- hello_world how are you today?" convert-farkup ] unit-test
|
[ "hello_world how are you today?\n- hello_world how are you today?" convert-farkup ] unit-test
|
||||||
|
|
||||||
: check-link-escaping ( string -- link )
|
: check-link-escaping ( string -- link )
|
||||||
|
@ -168,3 +168,15 @@ link-no-follow? off
|
||||||
[ "<foo>" ] [ "[[<foo>]]" check-link-escaping ] unit-test
|
[ "<foo>" ] [ "[[<foo>]]" check-link-escaping ] unit-test
|
||||||
[ "&blah;" ] [ "[[&blah;]]" check-link-escaping ] unit-test
|
[ "&blah;" ] [ "[[&blah;]]" check-link-escaping ] unit-test
|
||||||
[ "C++" ] [ "[[C++]]" check-link-escaping ] unit-test
|
[ "C++" ] [ "[[C++]]" check-link-escaping ] unit-test
|
||||||
|
|
||||||
|
[ "<h1>The <em>important</em> thing</h1>" ] [ "=The _important_ thing=" convert-farkup ] unit-test
|
||||||
|
[ "<p><a href=\"Foo\"><strong>emphasized</strong> text</a></p>" ] [ "[[Foo|*emphasized* text]]" convert-farkup ] unit-test
|
||||||
|
[ "<table><tr><td><strong>bold</strong></td><td><em>italics</em></td></tr></table>" ]
|
||||||
|
[ "|*bold*|_italics_|" convert-farkup ] unit-test
|
||||||
|
[ "<p><em>italics<strong>both</strong></em></p>" ] [ "_italics*both" convert-farkup ] unit-test
|
||||||
|
[ "<p><em>italics<strong>both</strong></em></p>" ] [ "_italics*both*" convert-farkup ] unit-test
|
||||||
|
[ "<p><em>italics<strong>both</strong></em></p>" ] [ "_italics*both*_" convert-farkup ] unit-test
|
||||||
|
[ "<p><em>italics<strong>both</strong></em></p>" ] [ "_italics*both_" convert-farkup ] unit-test
|
||||||
|
[ "<p><em>italics<strong>both</strong></em>after<strong></strong></p>" ] [ "_italics*both_after*" convert-farkup ] unit-test
|
||||||
|
[ "<table><tr><td>foo|bar</td></tr></table>" ] [ "|foo\\|bar|" convert-farkup ] unit-test
|
||||||
|
[ "<p></p>" ] [ "\\" convert-farkup ] unit-test
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
! Copyright (C) 2008 Doug Coleman.
|
! Copyright (C) 2008, 2009 Doug Coleman, Daniel Ehrenberg.
|
||||||
! See http://factorcode.org/license.txt for BSD license.
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
USING: accessors arrays combinators io
|
USING: sequences kernel splitting lists fry accessors assocs math.order
|
||||||
io.streams.string kernel math namespaces peg peg.ebnf
|
math combinators namespaces urls.encoding xml.syntax xmode.code2html
|
||||||
sequences sequences.deep strings xml.entities xml.syntax
|
xml.data arrays strings vectors xml.writer io.streams.string locals
|
||||||
vectors splitting xmode.code2html urls.encoding xml.data
|
unicode.categories ;
|
||||||
xml.writer ;
|
|
||||||
IN: farkup
|
IN: farkup
|
||||||
|
|
||||||
SYMBOL: relative-link-prefix
|
SYMBOL: relative-link-prefix
|
||||||
|
@ -39,123 +38,174 @@ TUPLE: line-break ;
|
||||||
: simple-link-title ( string -- string' )
|
: simple-link-title ( string -- string' )
|
||||||
dup absolute-url? [ "/" split1-last swap or ] unless ;
|
dup absolute-url? [ "/" split1-last swap or ] unless ;
|
||||||
|
|
||||||
EBNF: parse-farkup
|
! _foo*bar_baz*bing works like <i>foo*bar</i>baz<b>bing</b>
|
||||||
nl = ("\r\n" | "\r" | "\n") => [[ drop "\n" ]]
|
! I could support overlapping, but there's not a good use case for it.
|
||||||
whitespace = " " | "\t" | nl
|
|
||||||
|
|
||||||
heading1 = "=" (!("=" | nl).)+ "="
|
DEFER: (parse-paragraph)
|
||||||
=> [[ second >string heading1 boa ]]
|
|
||||||
|
|
||||||
heading2 = "==" (!("=" | nl).)+ "=="
|
: parse-paragraph ( string -- seq )
|
||||||
=> [[ second >string heading2 boa ]]
|
(parse-paragraph) list>array ;
|
||||||
|
|
||||||
heading3 = "===" (!("=" | nl).)+ "==="
|
: make-paragraph ( string -- paragraph )
|
||||||
=> [[ second >string heading3 boa ]]
|
parse-paragraph paragraph boa ;
|
||||||
|
|
||||||
heading4 = "====" (!("=" | nl).)+ "===="
|
: cut-half-slice ( string i -- before after-slice )
|
||||||
=> [[ second >string heading4 boa ]]
|
[ head ] [ 1+ short tail-slice ] 2bi ;
|
||||||
|
|
||||||
heading = heading4 | heading3 | heading2 | heading1
|
: find-cut ( string quot -- before after delimiter )
|
||||||
|
dupd find
|
||||||
|
[ [ cut-half-slice ] [ f ] if* ] dip ; inline
|
||||||
|
|
||||||
|
: parse-delimiter ( string delimiter class -- paragraph )
|
||||||
|
[ '[ _ = ] find-cut drop ] dip
|
||||||
|
'[ parse-paragraph _ new swap >>child ]
|
||||||
|
[ (parse-paragraph) ] bi* cons ;
|
||||||
|
|
||||||
|
: delimiter-class ( delimiter -- class )
|
||||||
|
H{
|
||||||
|
{ CHAR: * strong }
|
||||||
|
{ CHAR: _ emphasis }
|
||||||
|
{ CHAR: ^ superscript }
|
||||||
|
{ CHAR: ~ subscript }
|
||||||
|
{ CHAR: % inline-code }
|
||||||
|
} at ;
|
||||||
|
|
||||||
strong = "*" (!("*" | nl).)+ "*"
|
: parse-link ( string -- paragraph-list )
|
||||||
=> [[ second >string strong boa ]]
|
rest-slice "]]" split1-slice [
|
||||||
|
"|" split1
|
||||||
|
[ "" like dup simple-link-title ] unless*
|
||||||
|
[ "image:" ?head ] dip swap [ image boa ] [ parse-paragraph link boa ] if
|
||||||
|
] dip [ (parse-paragraph) cons ] when* ;
|
||||||
|
|
||||||
emphasis = "_" (!("_" | nl).)+ "_"
|
: ?first ( seq -- elt ) 0 swap ?nth ;
|
||||||
=> [[ second >string emphasis boa ]]
|
|
||||||
|
|
||||||
superscript = "^" (!("^" | nl).)+ "^"
|
: parse-big-link ( before after -- link rest )
|
||||||
=> [[ second >string superscript boa ]]
|
dup ?first CHAR: [ =
|
||||||
|
[ parse-link ]
|
||||||
|
[ [ CHAR: [ suffix ] [ (parse-paragraph) ] bi* ]
|
||||||
|
if ;
|
||||||
|
|
||||||
subscript = "~" (!("~" | nl).)+ "~"
|
: escape ( before after -- before' after' )
|
||||||
=> [[ second >string subscript boa ]]
|
[ nil ] [ unclip-slice swap [ suffix ] dip (parse-paragraph) ] if-empty ;
|
||||||
|
|
||||||
inline-code = "%" (!("%" | nl).)+ "%"
|
: (parse-paragraph) ( string -- list )
|
||||||
=> [[ second >string inline-code boa ]]
|
[ nil ] [
|
||||||
|
[ "*_^~%[\\" member? ] find-cut [
|
||||||
|
{
|
||||||
|
{ CHAR: [ [ parse-big-link ] }
|
||||||
|
{ CHAR: \\ [ escape ] }
|
||||||
|
[ dup delimiter-class parse-delimiter ]
|
||||||
|
} case cons
|
||||||
|
] [ drop "" like 1list ] if*
|
||||||
|
] if-empty ;
|
||||||
|
|
||||||
link-content = (!("|"|"]").)+
|
: <farkup-state> ( string -- state ) string-lines ;
|
||||||
=> [[ >string ]]
|
: look ( state i -- char ) swap first ?nth ;
|
||||||
|
: done? ( state -- ? ) empty? ;
|
||||||
|
: take-line ( state -- state' line ) unclip-slice ;
|
||||||
|
|
||||||
image-link = "[[image:" link-content "|" link-content "]]"
|
: take-lines ( state char -- state' lines )
|
||||||
=> [[ [ second >string ] [ fourth >string ] bi image boa ]]
|
dupd '[ ?first _ = not ] find drop
|
||||||
| "[[image:" link-content "]]"
|
[ cut-slice ] [ f ] if* swap ;
|
||||||
=> [[ second >string f image boa ]]
|
|
||||||
|
|
||||||
simple-link = "[[" link-content "]]"
|
:: (take-until) ( state delimiter accum -- string/f state' )
|
||||||
=> [[ second >string dup simple-link-title link boa ]]
|
state empty? [ accum "\n" join f ] [
|
||||||
|
state unclip-slice :> first :> rest
|
||||||
|
first delimiter split1 :> after :> before
|
||||||
|
before accum push
|
||||||
|
after [
|
||||||
|
accum "\n" join
|
||||||
|
rest after prefix
|
||||||
|
] [
|
||||||
|
rest delimiter accum (take-until)
|
||||||
|
] if
|
||||||
|
] if ;
|
||||||
|
|
||||||
labeled-link = "[[" link-content "|" link-content "]]"
|
: take-until ( state delimiter -- string/f state' )
|
||||||
=> [[ [ second >string ] [ fourth >string ] bi link boa ]]
|
V{ } clone (take-until) ;
|
||||||
|
|
||||||
link = image-link | labeled-link | simple-link
|
: count= ( string -- n )
|
||||||
|
dup <reversed> [ [ CHAR: = = not ] find drop 0 or ] bi@ min ;
|
||||||
|
|
||||||
escaped-char = "\" .
|
: trim= ( string -- string' )
|
||||||
=> [[ second 1string ]]
|
[ CHAR: = = ] trim ;
|
||||||
|
|
||||||
inline-tag = strong | emphasis | superscript | subscript | inline-code
|
: make-heading ( string class -- heading )
|
||||||
| link | escaped-char
|
[ trim= parse-paragraph ] dip boa ; inline
|
||||||
|
|
||||||
|
: parse-heading ( state -- state' heading )
|
||||||
|
take-line dup count= {
|
||||||
|
{ 0 [ make-paragraph ] }
|
||||||
|
{ 1 [ heading1 make-heading ] }
|
||||||
|
{ 2 [ heading2 make-heading ] }
|
||||||
|
{ 3 [ heading3 make-heading ] }
|
||||||
|
{ 4 [ heading4 make-heading ] }
|
||||||
|
[ drop heading4 make-heading ]
|
||||||
|
} case ;
|
||||||
|
|
||||||
|
: trim-row ( seq -- seq' )
|
||||||
|
rest
|
||||||
|
dup peek empty? [ but-last ] when ;
|
||||||
|
|
||||||
inline-delimiter = '*' | '_' | '^' | '~' | '%' | '\' | '['
|
: ?peek ( seq -- elt/f )
|
||||||
|
[ f ] [ peek ] if-empty ;
|
||||||
|
|
||||||
cell = (!(inline-delimiter | '|' | nl).)+
|
: coalesce ( rows -- rows' )
|
||||||
=> [[ >string ]]
|
V{ } clone [
|
||||||
|
'[
|
||||||
table-column = (list | cell | inline-tag | inline-delimiter ) '|'
|
_ dup ?peek ?peek CHAR: \\ =
|
||||||
=> [[ first ]]
|
[ [ pop "|" rot 3append ] keep ] when
|
||||||
table-row = "|" (table-column)+
|
push
|
||||||
=> [[ second table-row boa ]]
|
] each
|
||||||
table = ((table-row nl => [[ first ]] )+ table-row? | table-row)
|
] keep ;
|
||||||
=> [[ table boa ]]
|
|
||||||
|
|
||||||
text = (!(nl | code | heading | inline-delimiter | table ).)+
|
: parse-table ( state -- state' table )
|
||||||
=> [[ >string ]]
|
CHAR: | take-lines [
|
||||||
|
"|" split
|
||||||
|
trim-row
|
||||||
|
coalesce
|
||||||
|
[ parse-paragraph ] map
|
||||||
|
table-row boa
|
||||||
|
] map table boa ;
|
||||||
|
|
||||||
paragraph-nl-item = nl list
|
: parse-line ( state -- state' item )
|
||||||
| nl line
|
take-line dup "___" =
|
||||||
| nl => [[ line-breaks? get [ drop line-break new ] when ]]
|
[ drop line new ] [ make-paragraph ] if ;
|
||||||
paragraph-item = (table | code | text | inline-tag | inline-delimiter)+
|
|
||||||
paragraph = ((paragraph-item paragraph-nl-item)+ nl+ => [[ first ]]
|
|
||||||
| (paragraph-item paragraph-nl-item)+ paragraph-item?
|
|
||||||
| paragraph-item)
|
|
||||||
=> [[ paragraph boa ]]
|
|
||||||
|
|
||||||
|
: parse-list ( state char class -- state' list )
|
||||||
|
[
|
||||||
|
take-lines
|
||||||
|
[ rest parse-paragraph list-item boa ] map
|
||||||
|
] dip boa ; inline
|
||||||
|
|
||||||
list-item = (cell | inline-tag | inline-delimiter)*
|
: parse-ul ( state -- state' ul )
|
||||||
|
CHAR: - unordered-list parse-list ;
|
||||||
|
|
||||||
ordered-list-item = '#' list-item
|
: parse-ol ( state -- state' ul )
|
||||||
=> [[ second list-item boa ]]
|
CHAR: # ordered-list parse-list ;
|
||||||
ordered-list = ((ordered-list-item nl)+ ordered-list-item? | ordered-list-item)
|
|
||||||
=> [[ ordered-list boa ]]
|
|
||||||
|
|
||||||
unordered-list-item = '-' list-item
|
: parse-code ( state -- state' item )
|
||||||
=> [[ second list-item boa ]]
|
dup 1 look CHAR: [ =
|
||||||
unordered-list = ((unordered-list-item nl)+ unordered-list-item? | unordered-list-item)
|
[ unclip-slice make-paragraph ] [
|
||||||
=> [[ unordered-list boa ]]
|
"{" take-until [ rest ] dip
|
||||||
|
"}]" take-until
|
||||||
|
[ code boa ] dip swap
|
||||||
|
] if ;
|
||||||
|
|
||||||
list = ordered-list | unordered-list
|
: parse-item ( state -- state' item )
|
||||||
|
dup 0 look {
|
||||||
|
{ CHAR: = [ parse-heading ] }
|
||||||
|
{ CHAR: | [ parse-table ] }
|
||||||
|
{ CHAR: _ [ parse-line ] }
|
||||||
|
{ CHAR: - [ parse-ul ] }
|
||||||
|
{ CHAR: # [ parse-ol ] }
|
||||||
|
{ CHAR: [ [ parse-code ] }
|
||||||
|
{ f [ rest-slice f ] }
|
||||||
|
[ drop take-line make-paragraph ]
|
||||||
|
} case ;
|
||||||
|
|
||||||
|
: parse-farkup ( string -- farkup )
|
||||||
line = '___'
|
<farkup-state> [ dup done? not ] [ parse-item ] produce nip sift ;
|
||||||
=> [[ drop line new ]]
|
|
||||||
|
|
||||||
|
|
||||||
named-code
|
|
||||||
= '[' (!('{' | whitespace | '[').)+ '{' (!("}]").)+ "}]"
|
|
||||||
=> [[ [ second >string ] [ fourth >string ] bi code boa ]]
|
|
||||||
|
|
||||||
simple-code
|
|
||||||
= "[{" (!("}]").)+ "}]"
|
|
||||||
=> [[ second >string f swap code boa ]]
|
|
||||||
|
|
||||||
code = named-code | simple-code
|
|
||||||
|
|
||||||
|
|
||||||
stand-alone
|
|
||||||
= (line | code | heading | list | table | paragraph | nl)*
|
|
||||||
;EBNF
|
|
||||||
|
|
||||||
CONSTANT: invalid-url "javascript:alert('Invalid URL in farkup');"
|
CONSTANT: invalid-url "javascript:alert('Invalid URL in farkup');"
|
||||||
|
|
||||||
|
@ -168,19 +218,6 @@ CONSTANT: invalid-url "javascript:alert('Invalid URL in farkup');"
|
||||||
[ relative-link-prefix get prepend "" like url-encode ]
|
[ relative-link-prefix get prepend "" like url-encode ]
|
||||||
} cond ;
|
} cond ;
|
||||||
|
|
||||||
: write-link ( href text -- xml )
|
|
||||||
[ check-url link-no-follow? get "nofollow" and ] dip
|
|
||||||
[XML <a href=<-> rel=<->><-></a> XML] ;
|
|
||||||
|
|
||||||
: write-image-link ( href text -- xml )
|
|
||||||
disable-images? get [
|
|
||||||
2drop
|
|
||||||
[XML <strong>Images are not allowed</strong> XML]
|
|
||||||
] [
|
|
||||||
[ check-url ] [ f like ] bi*
|
|
||||||
[XML <img src=<-> alt=<->/> XML]
|
|
||||||
] if ;
|
|
||||||
|
|
||||||
: render-code ( string mode -- xml )
|
: render-code ( string mode -- xml )
|
||||||
[ string-lines ] dip htmlize-lines
|
[ string-lines ] dip htmlize-lines
|
||||||
[XML <pre><-></pre> XML] ;
|
[XML <pre><-></pre> XML] ;
|
||||||
|
@ -206,11 +243,27 @@ M: ordered-list (write-farkup) "ol" farkup-inside ;
|
||||||
M: paragraph (write-farkup) "p" farkup-inside ;
|
M: paragraph (write-farkup) "p" farkup-inside ;
|
||||||
M: table (write-farkup) "table" farkup-inside ;
|
M: table (write-farkup) "table" farkup-inside ;
|
||||||
|
|
||||||
|
: write-link ( href text -- xml )
|
||||||
|
[ check-url link-no-follow? get "nofollow" and ] dip
|
||||||
|
[XML <a href=<-> rel=<->><-></a> XML] ;
|
||||||
|
|
||||||
|
: write-image-link ( href text -- xml )
|
||||||
|
disable-images? get [
|
||||||
|
2drop
|
||||||
|
[XML <strong>Images are not allowed</strong> XML]
|
||||||
|
] [
|
||||||
|
[ check-url ] [ f like ] bi*
|
||||||
|
[XML <img src=<-> alt=<->/> XML]
|
||||||
|
] if ;
|
||||||
|
|
||||||
|
: open-link ( link -- href text )
|
||||||
|
[ href>> ] [ text>> (write-farkup) ] bi ;
|
||||||
|
|
||||||
M: link (write-farkup)
|
M: link (write-farkup)
|
||||||
[ href>> ] [ text>> ] bi write-link ;
|
open-link write-link ;
|
||||||
|
|
||||||
M: image (write-farkup)
|
M: image (write-farkup)
|
||||||
[ href>> ] [ text>> ] bi write-image-link ;
|
open-link write-image-link ;
|
||||||
|
|
||||||
M: code (write-farkup)
|
M: code (write-farkup)
|
||||||
[ string>> ] [ mode>> ] bi render-code ;
|
[ string>> ] [ mode>> ] bi render-code ;
|
||||||
|
@ -228,9 +281,7 @@ M: table-row (write-farkup)
|
||||||
|
|
||||||
M: string (write-farkup) ;
|
M: string (write-farkup) ;
|
||||||
|
|
||||||
M: vector (write-farkup) [ (write-farkup) ] map ;
|
M: array (write-farkup) [ (write-farkup) ] map ;
|
||||||
|
|
||||||
M: f (write-farkup) ;
|
|
||||||
|
|
||||||
: farkup>xml ( string -- xml )
|
: farkup>xml ( string -- xml )
|
||||||
parse-farkup (write-farkup) ;
|
parse-farkup (write-farkup) ;
|
||||||
|
@ -240,3 +291,4 @@ M: f (write-farkup) ;
|
||||||
|
|
||||||
: convert-farkup ( string -- string' )
|
: convert-farkup ( string -- string' )
|
||||||
[ write-farkup ] with-string-writer ;
|
[ write-farkup ] with-string-writer ;
|
||||||
|
|
||||||
|
|
|
@ -162,8 +162,7 @@ ARTICLE: "encodings-introduction" "An introduction to encodings"
|
||||||
{ $code "\"file.txt\" utf16 file-contents" }
|
{ $code "\"file.txt\" utf16 file-contents" }
|
||||||
"Encoding descriptors are also used by " { $link "io.streams.byte-array" } " and taken by combinators like " { $link with-file-writer } " and " { $link with-byte-reader } " which deal with streams. It is " { $emphasis "not" } " used with " { $link "io.streams.string" } " because these deal with abstract text."
|
"Encoding descriptors are also used by " { $link "io.streams.byte-array" } " and taken by combinators like " { $link with-file-writer } " and " { $link with-byte-reader } " which deal with streams. It is " { $emphasis "not" } " used with " { $link "io.streams.string" } " because these deal with abstract text."
|
||||||
$nl
|
$nl
|
||||||
"When the " { $link binary } " encoding is used, a " { $link byte-array } " is expected for writing and returned for reading, since the stream deals with bytes. All other encodings deal with strings, since they are used to represent text."
|
"When the " { $link binary } " encoding is used, a " { $link byte-array } " is expected for writing and returned for reading, since the stream deals with bytes. All other encodings deal with strings, since they are used to represent text." ;
|
||||||
{ $see-also "stream-elements" } ;
|
|
||||||
|
|
||||||
ARTICLE: "io" "Input and output"
|
ARTICLE: "io" "Input and output"
|
||||||
{ $heading "Streams" }
|
{ $heading "Streams" }
|
||||||
|
|
|
@ -163,7 +163,7 @@ M: link-test link-href drop "http://www.apple.com/foo&bar" ;
|
||||||
|
|
||||||
[ ] [ "-foo\n-bar" "farkup" set-value ] unit-test
|
[ ] [ "-foo\n-bar" "farkup" set-value ] unit-test
|
||||||
|
|
||||||
[ "<ul><li>foo</li>\n<li>bar</li></ul>" ] [
|
[ "<ul><li>foo</li><li>bar</li></ul>" ] [
|
||||||
[ "farkup" T{ farkup } render ] with-string-writer
|
[ "farkup" T{ farkup } render ] with-string-writer
|
||||||
] unit-test
|
] unit-test
|
||||||
|
|
||||||
|
|
|
@ -83,14 +83,15 @@ ERROR: bmp-not-supported n ;
|
||||||
|
|
||||||
:: fixup-color-index ( loading-bitmap -- loading-bitmap )
|
:: fixup-color-index ( loading-bitmap -- loading-bitmap )
|
||||||
loading-bitmap width>> :> width
|
loading-bitmap width>> :> width
|
||||||
|
width 3 * :> width*3
|
||||||
loading-bitmap height>> abs :> height
|
loading-bitmap height>> abs :> height
|
||||||
loading-bitmap color-index>> length :> color-index-length
|
loading-bitmap color-index>> length :> color-index-length
|
||||||
height 3 * :> height*3
|
color-index-length height /i :> stride
|
||||||
color-index-length width height*3 * - height*3 /i :> misaligned
|
color-index-length width*3 height * - height /i :> padding
|
||||||
misaligned 0 > [
|
padding 0 > [
|
||||||
loading-bitmap [
|
loading-bitmap [
|
||||||
loading-bitmap width>> misaligned + 3 * <sliced-groups>
|
stride <sliced-groups>
|
||||||
[ 3 misaligned * head* ] map concat
|
[ width*3 head-slice ] map concat
|
||||||
] change-color-index
|
] change-color-index
|
||||||
] [
|
] [
|
||||||
loading-bitmap
|
loading-bitmap
|
||||||
|
|
Binary file not shown.
After Width: | Height: | Size: 4.7 KiB |
Binary file not shown.
After Width: | Height: | Size: 4.9 KiB |
Binary file not shown.
After Width: | Height: | Size: 5.1 KiB |
Binary file not shown.
After Width: | Height: | Size: 5.2 KiB |
Binary file not shown.
|
@ -477,26 +477,24 @@ ERROR: unknown-component-order ifd ;
|
||||||
[ unknown-component-order ]
|
[ unknown-component-order ]
|
||||||
} case ;
|
} case ;
|
||||||
|
|
||||||
|
: normalize-alpha-data ( seq -- byte-array )
|
||||||
|
! [ normalize-alpha-data ] change-bitmap
|
||||||
|
B{ } like dup
|
||||||
|
byte-array>float-array
|
||||||
|
4 <sliced-groups>
|
||||||
|
[
|
||||||
|
dup fourth dup 0 = [
|
||||||
|
2drop
|
||||||
|
] [
|
||||||
|
[ 3 head-slice ] dip '[ _ / ] change-each
|
||||||
|
] if
|
||||||
|
] each ;
|
||||||
|
|
||||||
: handle-alpha-data ( ifd -- ifd )
|
: handle-alpha-data ( ifd -- ifd )
|
||||||
dup extra-samples find-tag {
|
dup extra-samples find-tag {
|
||||||
{ extra-samples-associated-alpha-data [
|
{ extra-samples-associated-alpha-data [ ] }
|
||||||
[
|
{ extra-samples-unspecified-alpha-data [ ] }
|
||||||
B{ } like dup
|
{ extra-samples-unassociated-alpha-data [ ] }
|
||||||
byte-array>float-array
|
|
||||||
4 <sliced-groups>
|
|
||||||
[
|
|
||||||
dup fourth dup 0 = [
|
|
||||||
2drop
|
|
||||||
] [
|
|
||||||
[ 3 head-slice ] dip '[ _ / ] change-each
|
|
||||||
] if
|
|
||||||
] each
|
|
||||||
] change-bitmap
|
|
||||||
] }
|
|
||||||
{ extra-samples-unspecified-alpha-data [
|
|
||||||
] }
|
|
||||||
{ extra-samples-unassociated-alpha-data [
|
|
||||||
] }
|
|
||||||
[ bad-extra-samples ]
|
[ bad-extra-samples ]
|
||||||
} case ;
|
} case ;
|
||||||
|
|
||||||
|
|
|
@ -26,3 +26,8 @@ tools.test math kernel sequences ;
|
||||||
[ fixnum-bitnot ] [ \ bitnot modular-variant ] unit-test
|
[ fixnum-bitnot ] [ \ bitnot modular-variant ] unit-test
|
||||||
[ fixnum+fast ] [ \ fixnum+ modular-variant ] unit-test
|
[ fixnum+fast ] [ \ fixnum+ modular-variant ] unit-test
|
||||||
[ fixnum+fast ] [ \ fixnum+fast modular-variant ] unit-test
|
[ fixnum+fast ] [ \ fixnum+fast modular-variant ] unit-test
|
||||||
|
|
||||||
|
[ 3 ] [ 1 2 +-integer-integer ] unit-test
|
||||||
|
[ 3 ] [ 1 >bignum 2 +-integer-integer ] unit-test
|
||||||
|
[ 3 ] [ 1 2 >bignum +-integer-integer ] unit-test
|
||||||
|
[ 3 ] [ 1 >bignum 2 >bignum +-integer-integer ] unit-test
|
|
@ -45,31 +45,41 @@ M: word integer-op-input-classes
|
||||||
{ bitnot fixnum-bitnot }
|
{ bitnot fixnum-bitnot }
|
||||||
} at swap or ;
|
} at swap or ;
|
||||||
|
|
||||||
|
: bignum-fixnum-op-quot ( big-word -- quot )
|
||||||
|
'[ fixnum>bignum _ execute ] ;
|
||||||
|
|
||||||
|
: fixnum-bignum-op-quot ( big-word -- quot )
|
||||||
|
'[ [ fixnum>bignum ] dip _ execute ] ;
|
||||||
|
|
||||||
: integer-fixnum-op-quot ( fix-word big-word -- quot )
|
: integer-fixnum-op-quot ( fix-word big-word -- quot )
|
||||||
[
|
[
|
||||||
[ over fixnum? ] %
|
[ over fixnum? ] %
|
||||||
[ '[ _ execute ] , ]
|
[ '[ _ execute ] , ] [ bignum-fixnum-op-quot , ] bi* \ if ,
|
||||||
[ '[ fixnum>bignum _ execute ] , ] bi*
|
|
||||||
\ if ,
|
|
||||||
] [ ] make ;
|
] [ ] make ;
|
||||||
|
|
||||||
: fixnum-integer-op-quot ( fix-word big-word -- quot )
|
: fixnum-integer-op-quot ( fix-word big-word -- quot )
|
||||||
[
|
[
|
||||||
[ dup fixnum? ] %
|
[ dup fixnum? ] %
|
||||||
[ '[ _ execute ] , ]
|
[ '[ _ execute ] , ] [ fixnum-bignum-op-quot , ] bi* \ if ,
|
||||||
[ '[ [ fixnum>bignum ] dip _ execute ] , ] bi*
|
] [ ] make ;
|
||||||
\ if ,
|
|
||||||
|
: integer-bignum-op-quot ( big-word -- quot )
|
||||||
|
[
|
||||||
|
[ over fixnum? ] %
|
||||||
|
[ fixnum-bignum-op-quot , ] [ '[ _ execute ] , ] bi \ if ,
|
||||||
] [ ] make ;
|
] [ ] make ;
|
||||||
|
|
||||||
: integer-integer-op-quot ( fix-word big-word -- quot )
|
: integer-integer-op-quot ( fix-word big-word -- quot )
|
||||||
[
|
[
|
||||||
[ dup fixnum? ] %
|
[ 2dup both-fixnums? ] %
|
||||||
2dup integer-fixnum-op-quot ,
|
[ '[ _ execute ] , ]
|
||||||
[
|
[
|
||||||
[ over fixnum? [ [ fixnum>bignum ] dip ] when ] %
|
[
|
||||||
nip ,
|
[ dup fixnum? ] %
|
||||||
] [ ] make ,
|
[ bignum-fixnum-op-quot , ]
|
||||||
\ if ,
|
[ integer-bignum-op-quot , ] bi \ if ,
|
||||||
|
] [ ] make ,
|
||||||
|
] bi* \ if ,
|
||||||
] [ ] make ;
|
] [ ] make ;
|
||||||
|
|
||||||
: integer-op-word ( triple -- word )
|
: integer-op-word ( triple -- word )
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
! Copyright (C) 2007 Elie CHAFTARI
|
! Copyright (C) 2007 Elie CHAFTARI, 2009 Maxim Savchenko
|
||||||
! See http://factorcode.org/license.txt for BSD license.
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
!
|
!
|
||||||
! Tested with OpenSSL 0.9.8a_0 on Mac OS X 10.4.9 PowerPC
|
! Tested with OpenSSL 0.9.8a_0 on Mac OS X 10.4.9 PowerPC
|
||||||
|
@ -159,3 +159,65 @@ FUNCTION: int RSA_check_key ( void* rsa ) ;
|
||||||
FUNCTION: void RSA_free ( void* rsa ) ;
|
FUNCTION: void RSA_free ( void* rsa ) ;
|
||||||
|
|
||||||
FUNCTION: int RSA_print_fp ( void* fp, void* x, int offset ) ;
|
FUNCTION: int RSA_print_fp ( void* fp, void* x, int offset ) ;
|
||||||
|
|
||||||
|
! ===============================================
|
||||||
|
! objects.h
|
||||||
|
! ===============================================
|
||||||
|
|
||||||
|
FUNCTION: int OBJ_sn2nid ( char* s ) ;
|
||||||
|
|
||||||
|
! ===============================================
|
||||||
|
! bn.h
|
||||||
|
! ===============================================
|
||||||
|
|
||||||
|
FUNCTION: int BN_num_bits ( void* a ) ;
|
||||||
|
|
||||||
|
FUNCTION: void* BN_bin2bn ( void* s, int len, void* ret ) ;
|
||||||
|
|
||||||
|
FUNCTION: int BN_bn2bin ( void* a, void* to ) ;
|
||||||
|
|
||||||
|
FUNCTION: void BN_clear_free ( void* a ) ;
|
||||||
|
|
||||||
|
! ===============================================
|
||||||
|
! ec.h
|
||||||
|
! ===============================================
|
||||||
|
|
||||||
|
CONSTANT: POINT_CONVERSION_COMPRESSED 2
|
||||||
|
CONSTANT: POINT_CONVERSION_UNCOMPRESSED 4
|
||||||
|
CONSTANT: POINT_CONVERSION_HYBRID 6
|
||||||
|
|
||||||
|
FUNCTION: int EC_GROUP_get_degree ( void* group ) ;
|
||||||
|
|
||||||
|
FUNCTION: void* EC_POINT_new ( void* group ) ;
|
||||||
|
|
||||||
|
FUNCTION: void EC_POINT_clear_free ( void* point ) ;
|
||||||
|
|
||||||
|
FUNCTION: int EC_POINT_point2oct ( void* group, void* point, int form, void* buf, int len, void* ctx ) ;
|
||||||
|
|
||||||
|
FUNCTION: int EC_POINT_oct2point ( void* group, void* point, void* buf, int len, void* ctx ) ;
|
||||||
|
|
||||||
|
FUNCTION: void* EC_KEY_new_by_curve_name ( int nid ) ;
|
||||||
|
|
||||||
|
FUNCTION: void EC_KEY_free ( void* r ) ;
|
||||||
|
|
||||||
|
FUNCTION: int EC_KEY_set_private_key ( void* key, void* priv_key ) ;
|
||||||
|
|
||||||
|
FUNCTION: int EC_KEY_set_public_key ( void* key, void* pub_key ) ;
|
||||||
|
|
||||||
|
FUNCTION: int EC_KEY_generate_key ( void* eckey ) ;
|
||||||
|
|
||||||
|
FUNCTION: void* EC_KEY_get0_group ( void* key ) ;
|
||||||
|
|
||||||
|
FUNCTION: void* EC_KEY_get0_private_key ( void* key ) ;
|
||||||
|
|
||||||
|
FUNCTION: void* EC_KEY_get0_public_key ( void* key ) ;
|
||||||
|
|
||||||
|
! ===============================================
|
||||||
|
! ecdsa.h
|
||||||
|
! ===============================================
|
||||||
|
|
||||||
|
FUNCTION: int ECDSA_size ( void* eckey ) ;
|
||||||
|
|
||||||
|
FUNCTION: int ECDSA_sign ( int type, void* dgst, int dgstlen, void* sig, void* siglen, void* eckey ) ;
|
||||||
|
|
||||||
|
FUNCTION: int ECDSA_verify ( int type, void* dgst, int dgstlen, void* sig, int siglen, void* eckey ) ;
|
||||||
|
|
|
@ -530,7 +530,7 @@ M: ebnf-non-terminal (transform) ( ast -- parser )
|
||||||
|
|
||||||
: EBNF:
|
: EBNF:
|
||||||
reset-tokenizer CREATE-WORD dup ";EBNF" parse-multiline-string
|
reset-tokenizer CREATE-WORD dup ";EBNF" parse-multiline-string
|
||||||
ebnf>quot swapd 1 1 <effect> define-declared "ebnf-parser" set-word-prop
|
ebnf>quot swapd (( input -- ast )) define-declared "ebnf-parser" set-word-prop
|
||||||
reset-tokenizer ; parsing
|
reset-tokenizer ; parsing
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -37,8 +37,7 @@ C: <with-options> with-options
|
||||||
TUPLE: options on off ;
|
TUPLE: options on off ;
|
||||||
C: <options> options
|
C: <options> options
|
||||||
|
|
||||||
SINGLETONS: unix-lines dotall multiline comments case-insensitive
|
SINGLETONS: unix-lines dotall multiline case-insensitive reversed-regexp ;
|
||||||
unicode-case reversed-regexp ;
|
|
||||||
|
|
||||||
: <maybe> ( term -- term' )
|
: <maybe> ( term -- term' )
|
||||||
f <concatenation> 2array <alternation> ;
|
f <concatenation> 2array <alternation> ;
|
||||||
|
|
|
@ -12,7 +12,7 @@ ascii-class punctuation-class java-printable-class blank-class
|
||||||
control-character-class hex-digit-class java-blank-class c-identifier-class
|
control-character-class hex-digit-class java-blank-class c-identifier-class
|
||||||
unmatchable-class terminator-class word-boundary-class ;
|
unmatchable-class terminator-class word-boundary-class ;
|
||||||
|
|
||||||
SINGLETONS: beginning-of-input ^ end-of-input $ end-of-file word-break ;
|
SINGLETONS: beginning-of-input ^ end-of-input $ end-of-file ^unix $unix word-break ;
|
||||||
|
|
||||||
TUPLE: range from to ;
|
TUPLE: range from to ;
|
||||||
C: <range> range
|
C: <range> range
|
||||||
|
|
|
@ -17,9 +17,6 @@ SYMBOL: backwards?
|
||||||
M: t question>quot drop [ 2drop t ] ;
|
M: t question>quot drop [ 2drop t ] ;
|
||||||
M: f question>quot drop [ 2drop f ] ;
|
M: f question>quot drop [ 2drop f ] ;
|
||||||
|
|
||||||
M: not-class question>quot
|
|
||||||
class>> question>quot [ not ] compose ;
|
|
||||||
|
|
||||||
M: beginning-of-input question>quot
|
M: beginning-of-input question>quot
|
||||||
drop [ drop zero? ] ;
|
drop [ drop zero? ] ;
|
||||||
|
|
||||||
|
@ -40,6 +37,12 @@ M: $ question>quot
|
||||||
M: ^ question>quot
|
M: ^ question>quot
|
||||||
drop [ { [ drop zero? ] [ [ 1- ] dip ?nth "\r\n" member? ] } 2|| ] ;
|
drop [ { [ drop zero? ] [ [ 1- ] dip ?nth "\r\n" member? ] } 2|| ] ;
|
||||||
|
|
||||||
|
M: $unix question>quot
|
||||||
|
drop [ { [ length = ] [ ?nth CHAR: \n = ] } 2|| ] ;
|
||||||
|
|
||||||
|
M: ^unix question>quot
|
||||||
|
drop [ { [ drop zero? ] [ [ 1- ] dip ?nth CHAR: \n = ] } 2|| ] ;
|
||||||
|
|
||||||
M: word-break question>quot
|
M: word-break question>quot
|
||||||
drop [ word-break-at? ] ;
|
drop [ word-break-at? ] ;
|
||||||
|
|
||||||
|
@ -104,13 +107,11 @@ C: <box> box
|
||||||
transitions>quot ;
|
transitions>quot ;
|
||||||
|
|
||||||
: states>code ( words dfa -- )
|
: states>code ( words dfa -- )
|
||||||
[
|
'[
|
||||||
'[
|
dup _ word>quot
|
||||||
dup _ word>quot
|
(( last-match index string -- ? ))
|
||||||
(( last-match index string -- ? ))
|
define-declared
|
||||||
define-declared
|
] each ;
|
||||||
] each
|
|
||||||
] with-compilation-unit ;
|
|
||||||
|
|
||||||
: states>words ( dfa -- words dfa )
|
: states>words ( dfa -- words dfa )
|
||||||
dup transitions>> keys [ gensym ] H{ } map>assoc
|
dup transitions>> keys [ gensym ] H{ } map>assoc
|
||||||
|
@ -123,12 +124,9 @@ C: <box> box
|
||||||
|
|
||||||
PRIVATE>
|
PRIVATE>
|
||||||
|
|
||||||
: simple-define-temp ( quot effect -- word )
|
|
||||||
[ define-temp ] with-compilation-unit ;
|
|
||||||
|
|
||||||
: dfa>word ( dfa -- quot )
|
: dfa>word ( dfa -- quot )
|
||||||
dfa>main-word execution-quot '[ drop [ f ] 2dip @ ]
|
dfa>main-word execution-quot '[ drop [ f ] 2dip @ ]
|
||||||
(( start-index string regexp -- i/f )) simple-define-temp ;
|
(( start-index string regexp -- i/f )) define-temp ;
|
||||||
|
|
||||||
: dfa>shortest-word ( dfa -- word )
|
: dfa>shortest-word ( dfa -- word )
|
||||||
t shortest? [ dfa>word ] with-variable ;
|
t shortest? [ dfa>word ] with-variable ;
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
! Copyright (C) 2009 Daniel Ehrenberg.
|
! Copyright (C) 2009 Daniel Ehrenberg.
|
||||||
! See http://factorcode.org/license.txt for BSD license.
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
USING: kernel accessors regexp.classes math.bits assocs sequences
|
USING: kernel accessors regexp.classes math.bits assocs sequences
|
||||||
arrays sets regexp.dfa math fry regexp.minimize regexp.ast regexp.transition-tables ;
|
arrays sets regexp.dfa math fry regexp.minimize regexp.ast
|
||||||
|
locals regexp.transition-tables ;
|
||||||
IN: regexp.disambiguate
|
IN: regexp.disambiguate
|
||||||
|
|
||||||
TUPLE: parts in out ;
|
TUPLE: parts in out ;
|
||||||
|
@ -9,7 +10,7 @@ TUPLE: parts in out ;
|
||||||
: make-partition ( choices classes -- partition )
|
: make-partition ( choices classes -- partition )
|
||||||
zip [ first ] partition [ values ] bi@ parts boa ;
|
zip [ first ] partition [ values ] bi@ parts boa ;
|
||||||
|
|
||||||
: powerset-partition ( classes -- partitions )
|
: powerset-partition ( sequence -- partitions )
|
||||||
[ length [ 2^ ] keep ] keep '[
|
[ length [ 2^ ] keep ] keep '[
|
||||||
_ <bits> _ make-partition
|
_ <bits> _ make-partition
|
||||||
] map rest ;
|
] map rest ;
|
||||||
|
@ -19,19 +20,49 @@ TUPLE: parts in out ;
|
||||||
[ in>> <and-class> ] bi
|
[ in>> <and-class> ] bi
|
||||||
prefix <and-class> ;
|
prefix <and-class> ;
|
||||||
|
|
||||||
: get-transitions ( partition state-transitions -- next-states )
|
: singleton-partition ( integer non-integers -- {class,partition} )
|
||||||
[ in>> ] dip '[ _ at ] gather sift ;
|
dupd
|
||||||
|
'[ _ [ class-member? ] with filter ] keep
|
||||||
|
prefix f parts boa
|
||||||
|
2array ;
|
||||||
|
|
||||||
|
: add-out ( seq partition -- partition' )
|
||||||
|
[ out>> append ] [ in>> ] bi swap parts boa ;
|
||||||
|
|
||||||
|
: intersection ( seq -- elts )
|
||||||
|
[ f ] [ unclip [ intersect ] reduce ] if-empty ;
|
||||||
|
|
||||||
|
: meaningful-integers ( partition table -- integers )
|
||||||
|
[ [ in>> ] [ out>> ] bi ] dip
|
||||||
|
'[ [ _ at ] map intersection ] bi@ diff ;
|
||||||
|
|
||||||
|
: class-integers ( classes integers -- table )
|
||||||
|
'[ _ over '[ _ class-member? ] filter ] H{ } map>assoc ;
|
||||||
|
|
||||||
|
: add-integers ( partitions classes integers -- partitions )
|
||||||
|
class-integers '[
|
||||||
|
[ _ meaningful-integers ] keep add-out
|
||||||
|
] map ;
|
||||||
|
|
||||||
|
: class-partitions ( classes -- assoc )
|
||||||
|
[ integer? ] partition [
|
||||||
|
dup powerset-partition spin add-integers
|
||||||
|
[ [ partition>class ] keep 2array ] map
|
||||||
|
[ first ] filter
|
||||||
|
] [ '[ _ singleton-partition ] map ] 2bi append ;
|
||||||
|
|
||||||
: new-transitions ( transitions -- assoc ) ! assoc is class, partition
|
: new-transitions ( transitions -- assoc ) ! assoc is class, partition
|
||||||
values [ keys ] gather
|
values [ keys ] gather
|
||||||
[ tagged-epsilon? not ] filter
|
[ tagged-epsilon? not ] filter
|
||||||
powerset-partition
|
class-partitions ;
|
||||||
[ [ partition>class ] keep ] { } map>assoc
|
|
||||||
[ drop ] assoc-filter ;
|
: get-transitions ( partition state-transitions -- next-states )
|
||||||
|
[ in>> ] dip '[ _ at ] gather sift ;
|
||||||
|
|
||||||
: preserving-epsilon ( state-transitions quot -- new-state-transitions )
|
: preserving-epsilon ( state-transitions quot -- new-state-transitions )
|
||||||
[ [ drop tagged-epsilon? ] assoc-filter ] bi
|
[ [ drop tagged-epsilon? ] assoc-filter ] bi
|
||||||
assoc-union H{ } assoc-like ; inline
|
assoc-union H{ } assoc-like ; inline
|
||||||
|
|
||||||
: disambiguate ( nfa -- nfa )
|
: disambiguate ( nfa -- nfa )
|
||||||
expand-ors [
|
expand-ors [
|
||||||
dup new-transitions '[
|
dup new-transitions '[
|
||||||
|
|
|
@ -60,11 +60,16 @@ GENERIC: modify-epsilon ( tag -- newtag )
|
||||||
|
|
||||||
M: object modify-epsilon ;
|
M: object modify-epsilon ;
|
||||||
|
|
||||||
|
: line-option ( multiline unix-lines default -- option )
|
||||||
|
multiline option? [
|
||||||
|
drop [ unix-lines option? ] 2dip swap ?
|
||||||
|
] [ 2nip ] if ;
|
||||||
|
|
||||||
M: $ modify-epsilon
|
M: $ modify-epsilon
|
||||||
multiline option? [ drop end-of-input ] unless ;
|
$unix end-of-input line-option ;
|
||||||
|
|
||||||
M: ^ modify-epsilon
|
M: ^ modify-epsilon
|
||||||
multiline option? [ drop beginning-of-input ] unless ;
|
^unix beginning-of-input line-option ;
|
||||||
|
|
||||||
M: tagged-epsilon nfa-node
|
M: tagged-epsilon nfa-node
|
||||||
clone [ modify-epsilon ] change-tag add-simple-entry ;
|
clone [ modify-epsilon ] change-tag add-simple-entry ;
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
! See http://factorcode.org/license.txt for BSD license.
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
USING: peg.ebnf kernel math.parser sequences assocs arrays fry math
|
USING: peg.ebnf kernel math.parser sequences assocs arrays fry math
|
||||||
combinators regexp.classes strings splitting peg locals accessors
|
combinators regexp.classes strings splitting peg locals accessors
|
||||||
regexp.ast ;
|
regexp.ast unicode.case ;
|
||||||
IN: regexp.parser
|
IN: regexp.parser
|
||||||
|
|
||||||
: allowed-char? ( ch -- ? )
|
: allowed-char? ( ch -- ? )
|
||||||
|
@ -19,20 +19,19 @@ ERROR: bad-number ;
|
||||||
ERROR: bad-class name ;
|
ERROR: bad-class name ;
|
||||||
|
|
||||||
: name>class ( name -- class )
|
: name>class ( name -- class )
|
||||||
{
|
>string >case-fold {
|
||||||
{ "Lower" letter-class }
|
{ "lower" letter-class }
|
||||||
{ "Upper" LETTER-class }
|
{ "upper" LETTER-class }
|
||||||
{ "Alpha" Letter-class }
|
{ "alpha" Letter-class }
|
||||||
{ "ASCII" ascii-class }
|
{ "ascii" ascii-class }
|
||||||
{ "Digit" digit-class }
|
{ "digit" digit-class }
|
||||||
{ "Alnum" alpha-class }
|
{ "alnum" alpha-class }
|
||||||
{ "Punct" punctuation-class }
|
{ "punct" punctuation-class }
|
||||||
{ "Graph" java-printable-class }
|
{ "graph" java-printable-class }
|
||||||
{ "Print" java-printable-class }
|
{ "blank" non-newline-blank-class }
|
||||||
{ "Blank" non-newline-blank-class }
|
{ "cntrl" control-character-class }
|
||||||
{ "Cntrl" control-character-class }
|
{ "xdigit" hex-digit-class }
|
||||||
{ "XDigit" hex-digit-class }
|
{ "space" java-blank-class }
|
||||||
{ "Space" java-blank-class }
|
|
||||||
! TODO: unicode-character-class
|
! TODO: unicode-character-class
|
||||||
} [ bad-class ] at-error ;
|
} [ bad-class ] at-error ;
|
||||||
|
|
||||||
|
@ -66,11 +65,8 @@ ERROR: bad-class name ;
|
||||||
{ CHAR: i case-insensitive }
|
{ CHAR: i case-insensitive }
|
||||||
{ CHAR: d unix-lines }
|
{ CHAR: d unix-lines }
|
||||||
{ CHAR: m multiline }
|
{ CHAR: m multiline }
|
||||||
{ CHAR: n multiline }
|
|
||||||
{ CHAR: r reversed-regexp }
|
{ CHAR: r reversed-regexp }
|
||||||
{ CHAR: s dotall }
|
{ CHAR: s dotall }
|
||||||
{ CHAR: u unicode-case }
|
|
||||||
{ CHAR: x comments }
|
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
: ch>option ( ch -- singleton )
|
: ch>option ( ch -- singleton )
|
||||||
|
@ -101,8 +97,8 @@ CharacterInBracket = !("}") Character
|
||||||
|
|
||||||
QuotedCharacter = !("\\E") .
|
QuotedCharacter = !("\\E") .
|
||||||
|
|
||||||
Escape = "p{" CharacterInBracket*:s "}" => [[ s >string name>class <primitive-class> ]]
|
Escape = "p{" CharacterInBracket*:s "}" => [[ s name>class <primitive-class> ]]
|
||||||
| "P{" CharacterInBracket*:s "}" => [[ s >string name>class <primitive-class> <negation> ]]
|
| "P{" CharacterInBracket*:s "}" => [[ s name>class <primitive-class> <negation> ]]
|
||||||
| "Q" QuotedCharacter*:s "\\E" => [[ s <concatenation> ]]
|
| "Q" QuotedCharacter*:s "\\E" => [[ s <concatenation> ]]
|
||||||
| "u" Character:a Character:b Character:c Character:d
|
| "u" Character:a Character:b Character:c Character:d
|
||||||
=> [[ { a b c d } hex> ensure-number ]]
|
=> [[ { a b c d } hex> ensure-number ]]
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
! Copyright (C) 2008, 2009 Doug Coleman, Daniel Ehrenberg.
|
! Copyright (C) 2008, 2009 Doug Coleman, Daniel Ehrenberg.
|
||||||
! See http://factorcode.org/license.txt for BSD license.
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
USING: kernel strings help.markup help.syntax math regexp.parser regexp.ast ;
|
USING: kernel strings help.markup help.syntax math regexp.parser
|
||||||
|
regexp.ast multiline ;
|
||||||
IN: regexp
|
IN: regexp
|
||||||
|
|
||||||
ABOUT: "regexp"
|
ABOUT: "regexp"
|
||||||
|
@ -21,8 +22,17 @@ ARTICLE: "regexp" "Regular expressions"
|
||||||
{ $subsection { "regexp" "deploy" } } ;
|
{ $subsection { "regexp" "deploy" } } ;
|
||||||
|
|
||||||
ARTICLE: { "regexp" "intro" } "A quick introduction to regular expressions"
|
ARTICLE: { "regexp" "intro" } "A quick introduction to regular expressions"
|
||||||
|
"Regular expressions are a terse way to do certain simple string processing tasks. For example, to replace all instances of " { $snippet "foo" } " in one string with " { $snippet "bar" } ", the following can be used:"
|
||||||
;
|
{ $code "R/ foo/ \"bar\" re-replace" }
|
||||||
|
"That could be done with sequence operations, but consider doing this replacement for an arbitrary number of o's, at least two:"
|
||||||
|
{ $code "R/ foo+/ \"bar\" re-replace" }
|
||||||
|
"The " { $snippet "+" } " operator matches one or more occurrences of the previous expression; in this case " { $snippet "o" } ". Another useful feature is alternation. Say we want to do this replacement with fooooo or boooo. Then we could use the code"
|
||||||
|
{ $code "R/ (f|b)oo+/ \"bar\" re-replace" }
|
||||||
|
"To search a file for all lines that match a given regular expression, you could use code like this:"
|
||||||
|
{ $code <" "file.txt" ascii file-lines [ R/ (f|b)oo+/ re-contains? ] filter "> }
|
||||||
|
"To test if a string in its entirety matches a regular expression, the following can be used:"
|
||||||
|
{ $example <" USING: regexp prettyprint ; "fooo" R/ (b|f)oo+/ matches? . "> "t" }
|
||||||
|
"Regular expressions can't be used for all parsing tasks. For example, they are not powerful enough to match balancing parentheses." ;
|
||||||
|
|
||||||
ARTICLE: { "regexp" "construction" } "Constructing regular expressions"
|
ARTICLE: { "regexp" "construction" } "Constructing regular expressions"
|
||||||
"Most of the time, regular expressions are literals and the parsing word should be used, to construct them at parse time. This ensures that they are only compiled once, and gives parse time syntax checking."
|
"Most of the time, regular expressions are literals and the parsing word should be used, to construct them at parse time. This ensures that they are only compiled once, and gives parse time syntax checking."
|
||||||
|
@ -33,20 +43,71 @@ ARTICLE: { "regexp" "construction" } "Constructing regular expressions"
|
||||||
"Another approach is to use " { $vocab-link "regexp.combinators" } "." ;
|
"Another approach is to use " { $vocab-link "regexp.combinators" } "." ;
|
||||||
|
|
||||||
ARTICLE: { "regexp" "syntax" } "Regular expression syntax"
|
ARTICLE: { "regexp" "syntax" } "Regular expression syntax"
|
||||||
"Regexp syntax is largely compatible with Perl, Java and extended POSIX regexps, but not completely. A new addition is the inclusion of a negation operator, with the syntax " { $snippet "(?~foo)" } " to match everything that does not match " { $snippet "foo" } "."
|
"Regexp syntax is largely compatible with Perl, Java and extended POSIX regexps, but not completely. Below, the syntax is documented."
|
||||||
{ $heading "Characters" }
|
{ $heading "Characters" }
|
||||||
|
"At its core, regular expressions consist of character literals. For example, " { $snippet "R/ f/" } " is a regular expression matching just the string 'f'. In addition, the normal escape codes are provided, like " { $snippet "\\t" } " for the tab character and " { $snippet "\\uxxxxxx" } "for an arbitrary Unicode code point, by its hex value. In addition, any character can be preceded by a backslash to escape it, unless this has special meaning. For example, to match a literal opening parenthesis, use " { $snippet "\\(" } "."
|
||||||
|
{ $heading "Concatenation, alternation and grouping" }
|
||||||
|
"Regular expressions can be built out of multiple characters by concatenation. For example, " { $snippet "R/ ab/" } " matches a followed by b. The " { $snippet "|" } " (alternation) operator can construct a regexp which matches one of two alternatives. Parentheses can be used for gropuing. So " { $snippet "R/ f(oo|ar)/" } " would match either 'foo' or 'far'."
|
||||||
{ $heading "Character classes" }
|
{ $heading "Character classes" }
|
||||||
|
"Square brackets define a convenient way to refer to a set of characters. For example, " { $snippet "[ab]" } " refers to either a or b. And " { $snippet "[a-z]" } " refers to all of the characters between a and z, in code point order. You can use these together, as in " { $snippet "[ac-fz]" } " which matches all of the characters between c and f, in addition to a and z. Character classes can be negated using a carat, as in " { $snippet "[^a]" } " which matches all characters which are not a."
|
||||||
{ $heading "Predefined character classes" }
|
{ $heading "Predefined character classes" }
|
||||||
|
"Several character classes are predefined, both for convenience and because they are too large to represent directly. In Factor regular expressions, all character classes are Unicode-aware."
|
||||||
|
{ $table
|
||||||
|
{ { $snippet "\\d" } "Digits" }
|
||||||
|
{ { $snippet "\\D" } "Not digits" }
|
||||||
|
{ { $snippet "\\s" } "Whitespace" }
|
||||||
|
{ { $snippet "\\S" } "Not whitespace" }
|
||||||
|
{ { $snippet "\\w" } "Word character (alphanumeric or underscore)" }
|
||||||
|
{ { $snippet "\\W" } "Not word character" }
|
||||||
|
{ { $snippet "\\p{property}" } "Character which fulfils the property" }
|
||||||
|
{ { $snippet "\\P{property}" } "Character which does not fulfil the property" } }
|
||||||
|
"Properties for " { $snippet "\\p" } " and " { $snippet "\\P" } " (case-insensitive):"
|
||||||
|
{ $table
|
||||||
|
{ { $snippet "\\p{lower}" } "Lower case letters" }
|
||||||
|
{ { $snippet "\\p{upper}" } "Upper case letters" }
|
||||||
|
{ { $snippet "\\p{alpha}" } "Letters" }
|
||||||
|
{ { $snippet "\\p{ascii}" } "Characters in the ASCII range" }
|
||||||
|
{ { $snippet "\\p{alnum}" } "Letters or numbers" }
|
||||||
|
{ { $snippet "\\p{punct}" } "Punctuation" }
|
||||||
|
{ { $snippet "\\p{blank}" } "Non-newline whitespace" }
|
||||||
|
{ { $snippet "\\p{cntrl}" } "Control character" }
|
||||||
|
{ { $snippet "\\p{space}" } "Whitespace" }
|
||||||
|
{ { $snippet "\\p{xdigit}" } "Hexidecimal digit" } } ! In the future: Unicode
|
||||||
|
"Full unicode properties are not yet supported."
|
||||||
{ $heading "Boundaries" }
|
{ $heading "Boundaries" }
|
||||||
|
"Special operators exist to match certain points in the string. These are called 'zero-width' because they do not consume any characters."
|
||||||
|
{ $table
|
||||||
|
{ { $snippet "^" } "Beginning of a line" }
|
||||||
|
{ { $snippet "$" } "End of a line" }
|
||||||
|
{ { $snippet "\\A" } "Beginning of text" }
|
||||||
|
{ { $snippet "\\z" } "End of text" }
|
||||||
|
{ { $snippet "\\Z" } "Almost end of text: only thing after is newline" }
|
||||||
|
{ { $snippet "\\b" } "Word boundary (by Unicode word boundaries)" }
|
||||||
|
{ { $snippet "\\b" } "Not word boundary (by Unicode word boundaries)" } }
|
||||||
{ $heading "Greedy quantifiers" }
|
{ $heading "Greedy quantifiers" }
|
||||||
{ $heading "Reluctant quantifiers" }
|
"It is possible to have a regular expression which matches a variable number of occurrences of another regular expression."
|
||||||
{ $heading "Posessive quantifiers" }
|
{ $table
|
||||||
{ $heading "Logical operations" }
|
{ { $snippet "a*" } "Zero or more occurrences of a" }
|
||||||
|
{ { $snippet "a+" } "One or more occurrences of a" }
|
||||||
|
{ { $snippet "a?" } "Zero or one occurrences of a" }
|
||||||
|
{ { $snippet "a{n}" } "n occurrences of a" }
|
||||||
|
{ { $snippet "a{n,}" } "At least n occurrences of a" }
|
||||||
|
{ { $snippet "a{,m}" } "At most m occurrences of a" }
|
||||||
|
{ { $snippet "a{n,m}" } "Between n and m occurrences of a" } }
|
||||||
|
"All of these quantifiers are " { $emphasis "greedy" } ", meaning that they take as many repetitions as possible within the larger regular expression. Reluctant and posessive quantifiers are not yet supported."
|
||||||
{ $heading "Lookaround" }
|
{ $heading "Lookaround" }
|
||||||
|
"Operators are provided to look ahead and behind the current point in the regular expression. These can be used in any context, but they're the most useful at the beginning or end of a regular expression."
|
||||||
|
{ $table
|
||||||
|
{ { $snippet "(?=a)" } "Asserts that the current position is immediately followed by a" }
|
||||||
|
{ { $snippet "(?!a)" } "Asserts that the current position is not immediately followed by a" }
|
||||||
|
{ { $snippet "(?<=a)" } "Asserts that the current position is immediately preceded by a" }
|
||||||
|
{ { $snippet "(?<!a)" } "Asserts that the current position is not immediately preceded by a" } }
|
||||||
|
{ $heading "Quotation" }
|
||||||
|
"To make it convenient to have a long string which uses regexp operators, a special syntax is provided. If a substring begins with " { $snippet "\\Q" } " then everything until " { $snippet "\\E" } " is quoted (escaped). For example, " { $snippet "R/ \\Qfoo\\bar|baz()\\E/" } " matches exactly the string " { $snippet "\"foo\\bar|baz()\"" } "."
|
||||||
{ $heading "Unsupported features" }
|
{ $heading "Unsupported features" }
|
||||||
"One missing feature is backreferences. This is because of a design decision to allow only regular expressions following the formal theory of regular languages. For more information, see " { $link { "regexp" "theory" } } ". You can create a new regular expression to match a particular string using " { $vocab-link "regexp.combinators" } " and group capture is available to extract parts of a regular expression match." $nl
|
"One missing feature is backreferences. This is because of a design decision to allow only regular expressions following the formal theory of regular languages. For more information, see " { $link { "regexp" "theory" } } ". You can create a new regular expression to match a particular string using " { $vocab-link "regexp.combinators" } " and group capture is available to extract parts of a regular expression match." $nl
|
||||||
"Another feature is Perl's " { $snippet "\\G" } " syntax, which references the previous match, is not included. This is because that sequence is inherently stateful, and Factor regexps don't hold state." $nl
|
"Another feature is Perl's " { $snippet "\\G" } " syntax, which references the previous match, is not included. This is because that sequence is inherently stateful, and Factor regexps don't hold state." $nl
|
||||||
"Additionally, none of the operations which embed code into a regexp are supported, as this would require the inclusion of the Factor parser and compiler in any application which wants to expose regexps to the user. None of the casing operations are included, for simplicity." ; ! Also describe syntax, from the beginning
|
"None of the operations which embed code into a regexp are supported, as this would require the inclusion of the Factor parser and compiler in any application which wants to expose regexps to the user. None of the casing operations are included of Perl like \\L, for simplicity." ; ! Also describe syntax, from the beginning
|
||||||
|
|
||||||
ARTICLE: { "regexp" "options" } "Regular expression options"
|
ARTICLE: { "regexp" "options" } "Regular expression options"
|
||||||
"When " { $link { "regexp" "construction" } } ", various options can be provided. Options have single-character names. A string of options has one of the following two forms:"
|
"When " { $link { "regexp" "construction" } } ", various options can be provided. Options have single-character names. A string of options has one of the following two forms:"
|
||||||
|
@ -58,13 +119,30 @@ $nl
|
||||||
{ "i" { $link case-insensitive } }
|
{ "i" { $link case-insensitive } }
|
||||||
{ "d" { $link unix-lines } }
|
{ "d" { $link unix-lines } }
|
||||||
{ "m" { $link multiline } }
|
{ "m" { $link multiline } }
|
||||||
{ "n" { $link multiline } }
|
|
||||||
{ "r" { $link reversed-regexp } }
|
|
||||||
{ "s" { $link dotall } }
|
{ "s" { $link dotall } }
|
||||||
{ "u" { $link unicode-case } }
|
{ "r" { $link reversed-regexp } }
|
||||||
{ "x" { $link comments } }
|
|
||||||
} ;
|
} ;
|
||||||
|
|
||||||
|
HELP: case-insensitive
|
||||||
|
{ $syntax "R/ .../i" }
|
||||||
|
{ $description "On regexps, the " { $snippet "i" } " option makes the match case-insenstive. Currently, this is handled incorrectly with respect to Unicode, as characters like ß do not expand into SS in upper case. This should be fixed in a future version." } ;
|
||||||
|
|
||||||
|
HELP: unix-lines
|
||||||
|
{ $syntax "R/ .../d" }
|
||||||
|
{ $description "With this mode, only newlines (" { $snippet "\\n" } ") are recognized for line breaking. This affects " { $snippet "$" } " and " { $snippet "^" } " when in multiline mode." } ;
|
||||||
|
|
||||||
|
HELP: multiline
|
||||||
|
{ $syntax "R/ .../m" }
|
||||||
|
{ $description "This mode makes the zero-width constraints " { $snippet "$" } " and " { $snippet "^" } " match the beginning or end of a line. Otherwise, they only match the beginning or end of the input text. This can be used together with " { $link dotall } "." } ;
|
||||||
|
|
||||||
|
HELP: dotall
|
||||||
|
{ $syntax "R/ .../s" }
|
||||||
|
{ $description "This mode, traditionally called single line mode, makes " { $snippet "." } " match everything, including line breaks. By default, it does not match line breaking characters. This can be used together with " { $link multiline } "." } ;
|
||||||
|
|
||||||
|
HELP: reversed-regexp
|
||||||
|
{ $syntax "R/ .../r" }
|
||||||
|
{ $description "When running a regexp compiled with this mode, matches will start from the end of the input string, going towards the beginning." } ;
|
||||||
|
|
||||||
ARTICLE: { "regexp" "theory" } "The theory of regular expressions"
|
ARTICLE: { "regexp" "theory" } "The theory of regular expressions"
|
||||||
"Far from being just a practical tool invented by Unix hackers, regular expressions were studied formally before computer programs were written to process them." $nl
|
"Far from being just a practical tool invented by Unix hackers, regular expressions were studied formally before computer programs were written to process them." $nl
|
||||||
"A regular language is a set of strings that is matched by a regular expression, which is defined to have characters and the empty string, along with the operations concatenation, disjunction and Kleene star. Another way to define the class of regular languages is as the class of languages which can be recognized with constant space overhead, ie with a DFA. These two definitions are provably equivalent." $nl
|
"A regular language is a set of strings that is matched by a regular expression, which is defined to have characters and the empty string, along with the operations concatenation, disjunction and Kleene star. Another way to define the class of regular languages is as the class of languages which can be recognized with constant space overhead, ie with a DFA. These two definitions are provably equivalent." $nl
|
||||||
|
|
|
@ -470,3 +470,13 @@ IN: regexp-tests
|
||||||
[ t ] [ "abcdefg" "a(?:bcdefg)" <regexp> matches? ] unit-test
|
[ t ] [ "abcdefg" "a(?:bcdefg)" <regexp> matches? ] unit-test
|
||||||
|
|
||||||
[ 3 ] [ "caba" "(?<=b)a" <regexp> first-match from>> ] unit-test
|
[ 3 ] [ "caba" "(?<=b)a" <regexp> first-match from>> ] unit-test
|
||||||
|
|
||||||
|
[ t ] [ "\ra" R/ .^a/ms matches? ] unit-test
|
||||||
|
[ f ] [ "\ra" R/ .^a/mds matches? ] unit-test
|
||||||
|
[ t ] [ "\na" R/ .^a/ms matches? ] unit-test
|
||||||
|
[ t ] [ "\na" R/ .^a/mds matches? ] unit-test
|
||||||
|
|
||||||
|
[ t ] [ "a\r" R/ a$./ms matches? ] unit-test
|
||||||
|
[ f ] [ "a\r" R/ a$./mds matches? ] unit-test
|
||||||
|
[ t ] [ "a\n" R/ a$./ms matches? ] unit-test
|
||||||
|
[ t ] [ "a\n" R/ a$./mds matches? ] unit-test
|
||||||
|
|
|
@ -4,7 +4,7 @@ USING: accessors combinators kernel kernel.private math sequences
|
||||||
sequences.private strings sets assocs prettyprint.backend
|
sequences.private strings sets assocs prettyprint.backend
|
||||||
prettyprint.custom make lexer namespaces parser arrays fry locals
|
prettyprint.custom make lexer namespaces parser arrays fry locals
|
||||||
regexp.parser splitting sorting regexp.ast regexp.negation
|
regexp.parser splitting sorting regexp.ast regexp.negation
|
||||||
regexp.compiler words call call.private math.ranges ;
|
regexp.compiler compiler.units words call call.private math.ranges ;
|
||||||
IN: regexp
|
IN: regexp
|
||||||
|
|
||||||
TUPLE: regexp
|
TUPLE: regexp
|
||||||
|
@ -35,7 +35,7 @@ M: lookbehind question>quot ! Returns ( index string -- ? )
|
||||||
: match-index-from ( i string regexp -- index/f )
|
: match-index-from ( i string regexp -- index/f )
|
||||||
! This word is unsafe. It assumes that i is a fixnum
|
! This word is unsafe. It assumes that i is a fixnum
|
||||||
! and that string is a string.
|
! and that string is a string.
|
||||||
dup dfa>> execute-unsafe( index string regexp -- i/f ) ;
|
dup dfa>> execute-unsafe( index string regexp -- i/f ) ; inline
|
||||||
|
|
||||||
GENERIC: end/start ( string regexp -- end start )
|
GENERIC: end/start ( string regexp -- end start )
|
||||||
M: regexp end/start drop length 0 ;
|
M: regexp end/start drop length 0 ;
|
||||||
|
@ -129,31 +129,28 @@ PRIVATE>
|
||||||
GENERIC: compile-regexp ( regex -- regexp )
|
GENERIC: compile-regexp ( regex -- regexp )
|
||||||
|
|
||||||
: regexp-initial-word ( i string regexp -- i/f )
|
: regexp-initial-word ( i string regexp -- i/f )
|
||||||
compile-regexp match-index-from ;
|
[ compile-regexp ] with-compilation-unit match-index-from ;
|
||||||
|
|
||||||
: do-compile-regexp ( regexp -- regexp )
|
M: regexp compile-regexp ( regexp -- regexp )
|
||||||
dup '[
|
dup '[
|
||||||
dup \ regexp-initial-word =
|
dup \ regexp-initial-word =
|
||||||
[ drop _ get-ast ast>dfa dfa>word ] when
|
[ drop _ get-ast ast>dfa dfa>word ] when
|
||||||
] change-dfa ;
|
] change-dfa ;
|
||||||
|
|
||||||
M: regexp compile-regexp ( regexp -- regexp )
|
|
||||||
do-compile-regexp ;
|
|
||||||
|
|
||||||
M: reverse-regexp compile-regexp ( regexp -- regexp )
|
M: reverse-regexp compile-regexp ( regexp -- regexp )
|
||||||
t backwards? [ do-compile-regexp ] with-variable ;
|
t backwards? [ call-next-method ] with-variable ;
|
||||||
|
|
||||||
DEFER: compile-next-match
|
DEFER: compile-next-match
|
||||||
|
|
||||||
: next-initial-word ( i string regexp -- i start end string )
|
: next-initial-word ( i string regexp -- i start end string )
|
||||||
compile-next-match do-next-match ;
|
[ compile-next-match ] with-compilation-unit do-next-match ;
|
||||||
|
|
||||||
: compile-next-match ( regexp -- regexp )
|
: compile-next-match ( regexp -- regexp )
|
||||||
dup '[
|
dup '[
|
||||||
dup \ next-initial-word = [
|
dup \ next-initial-word = [
|
||||||
drop _ [ compile-regexp dfa>> def>> ] [ reverse-regexp? ] bi
|
drop _ [ compile-regexp dfa>> def>> ] [ reverse-regexp? ] bi
|
||||||
'[ { array-capacity string regexp } declare _ _ next-match ]
|
'[ { array-capacity string regexp } declare _ _ next-match ]
|
||||||
(( i string regexp -- i start end string )) simple-define-temp
|
(( i string regexp -- i start end string )) define-temp
|
||||||
] when
|
] when
|
||||||
] change-next-match ;
|
] change-next-match ;
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ ui.backend ui.clipboards ui.gadgets ui.gadgets.worlds
|
||||||
ui.backend.cocoa.views core-foundation core-foundation.run-loop
|
ui.backend.cocoa.views core-foundation core-foundation.run-loop
|
||||||
core-graphics.types threads math.rectangles fry libc
|
core-graphics.types threads math.rectangles fry libc
|
||||||
generalizations alien.c-types cocoa.views
|
generalizations alien.c-types cocoa.views
|
||||||
combinators io.thread locals ;
|
combinators io.thread locals call ;
|
||||||
IN: ui.backend.cocoa
|
IN: ui.backend.cocoa
|
||||||
|
|
||||||
TUPLE: handle ;
|
TUPLE: handle ;
|
||||||
|
@ -152,7 +152,7 @@ M: cocoa-ui-backend (with-ui)
|
||||||
"UI" assert.app [
|
"UI" assert.app [
|
||||||
[
|
[
|
||||||
init-clipboard
|
init-clipboard
|
||||||
cocoa-init-hook get call
|
cocoa-init-hook get call( -- )
|
||||||
start-ui
|
start-ui
|
||||||
f io-thread-running? set-global
|
f io-thread-running? set-global
|
||||||
init-thread-timer
|
init-thread-timer
|
||||||
|
|
|
@ -153,7 +153,7 @@ PRIVATE>
|
||||||
"UI update" spawn drop ;
|
"UI update" spawn drop ;
|
||||||
|
|
||||||
: start-ui ( quot -- )
|
: start-ui ( quot -- )
|
||||||
call notify-ui-thread start-ui-thread ;
|
call( -- ) notify-ui-thread start-ui-thread ;
|
||||||
|
|
||||||
: restore-windows ( -- )
|
: restore-windows ( -- )
|
||||||
[
|
[
|
||||||
|
@ -193,6 +193,6 @@ M: object close-window
|
||||||
] "ui" add-init-hook
|
] "ui" add-init-hook
|
||||||
|
|
||||||
: with-ui ( quot -- )
|
: with-ui ( quot -- )
|
||||||
ui-running? [ call ] [ '[ init-ui @ ] (with-ui) ] if ;
|
ui-running? [ call( -- ) ] [ '[ init-ui @ ] (with-ui) ] if ;
|
||||||
|
|
||||||
HOOK: beep ui-backend ( -- )
|
HOOK: beep ui-backend ( -- )
|
|
@ -5,7 +5,7 @@ io.files hashtables quotations splitting grouping arrays io
|
||||||
math.parser hash2 math.order byte-arrays words namespaces words
|
math.parser hash2 math.order byte-arrays words namespaces words
|
||||||
compiler.units parser io.encodings.ascii values interval-maps
|
compiler.units parser io.encodings.ascii values interval-maps
|
||||||
ascii sets combinators locals math.ranges sorting make
|
ascii sets combinators locals math.ranges sorting make
|
||||||
strings.parser io.encodings.utf8 ;
|
strings.parser io.encodings.utf8 memoize ;
|
||||||
IN: unicode.data
|
IN: unicode.data
|
||||||
|
|
||||||
VALUE: simple-lower
|
VALUE: simple-lower
|
||||||
|
@ -108,6 +108,9 @@ CONSTANT: categories
|
||||||
"Zs" "Zl" "Zp"
|
"Zs" "Zl" "Zp"
|
||||||
"Cc" "Cf" "Cs" "Co" }
|
"Cc" "Cf" "Cs" "Co" }
|
||||||
|
|
||||||
|
MEMO: categories-map ( -- hashtable )
|
||||||
|
categories <enum> [ swap ] H{ } assoc-map-as ;
|
||||||
|
|
||||||
CONSTANT: num-chars HEX: 2FA1E
|
CONSTANT: num-chars HEX: 2FA1E
|
||||||
|
|
||||||
! the maximum unicode char in the first 3 planes
|
! the maximum unicode char in the first 3 planes
|
||||||
|
@ -124,10 +127,10 @@ CONSTANT: num-chars HEX: 2FA1E
|
||||||
] assoc-each table ;
|
] assoc-each table ;
|
||||||
|
|
||||||
:: process-category ( data -- category-listing )
|
:: process-category ( data -- category-listing )
|
||||||
[let | table [ num-chars <byte-array> ] |
|
num-chars <byte-array> :> table
|
||||||
2 data (process-data) [| char cat |
|
2 data (process-data) [| char cat |
|
||||||
cat categories index char table ?set-nth
|
cat categories-map at char table ?set-nth
|
||||||
] assoc-each table fill-ranges ] ;
|
] assoc-each table fill-ranges ;
|
||||||
|
|
||||||
: process-names ( data -- names-hash )
|
: process-names ( data -- names-hash )
|
||||||
1 swap (process-data) [
|
1 swap (process-data) [
|
||||||
|
|
|
@ -74,3 +74,4 @@ SYMBOL: xml-file
|
||||||
[ "foo" ] [ "<!DOCTYPE foo [<!ENTITY bar 'foo'>]><x>&bar;</x>" string>xml children>string ] unit-test
|
[ "foo" ] [ "<!DOCTYPE foo [<!ENTITY bar 'foo'>]><x>&bar;</x>" string>xml children>string ] unit-test
|
||||||
[ T{ xml-chunk f V{ "hello" } } ] [ "hello" string>xml-chunk ] unit-test
|
[ T{ xml-chunk f V{ "hello" } } ] [ "hello" string>xml-chunk ] unit-test
|
||||||
[ "1.1" ] [ "<?xml version='1.1'?><x/>" string>xml prolog>> version>> ] unit-test
|
[ "1.1" ] [ "<?xml version='1.1'?><x/>" string>xml prolog>> version>> ] unit-test
|
||||||
|
[ "ß" ] [ "<x>ß</x>" <string-reader> read-xml children>string ] unit-test
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
! Copyright (C) 2005, 2009 Daniel Ehrenberg
|
! Copyright (C) 2005, 2009 Daniel Ehrenberg
|
||||||
! See http://factorcode.org/license.txt for BSD license.
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
USING: help.markup help.syntax xml.data sequences strings ;
|
USING: help.markup help.syntax xml.data sequences strings multiline ;
|
||||||
IN: xml.traversal
|
IN: xml.traversal
|
||||||
|
|
||||||
ABOUT: "xml.traversal"
|
ABOUT: "xml.traversal"
|
||||||
|
@ -8,7 +8,7 @@ ABOUT: "xml.traversal"
|
||||||
ARTICLE: "xml.traversal" "Utilities for traversing XML"
|
ARTICLE: "xml.traversal" "Utilities for traversing XML"
|
||||||
"The " { $vocab-link "xml.traversal" } " vocabulary provides utilities for traversing an XML DOM tree and viewing the contents of a single tag. The following words are defined:"
|
"The " { $vocab-link "xml.traversal" } " vocabulary provides utilities for traversing an XML DOM tree and viewing the contents of a single tag. The following words are defined:"
|
||||||
$nl
|
$nl
|
||||||
"Note: the difference between deep-tag-named and tag-named is that the former searches recursively among all children and children of children of the tag, while the latter only looks at the direct children, and is therefore more efficient."
|
{ $subsection { "xml.traversal" "intro" } }
|
||||||
{ $subsection tag-named }
|
{ $subsection tag-named }
|
||||||
{ $subsection tags-named }
|
{ $subsection tags-named }
|
||||||
{ $subsection deep-tag-named }
|
{ $subsection deep-tag-named }
|
||||||
|
@ -20,6 +20,20 @@ ARTICLE: "xml.traversal" "Utilities for traversing XML"
|
||||||
{ $subsection first-child-tag }
|
{ $subsection first-child-tag }
|
||||||
{ $subsection assert-tag } ;
|
{ $subsection assert-tag } ;
|
||||||
|
|
||||||
|
ARTICLE: { "xml.traversal" "intro" } "An example of XML processing"
|
||||||
|
"To illustrate how to use the XML library, we develop a simple Atom parser in Factor. Atom is an XML-based syndication format, like RSS. To see the full version of what we develop here, look at " { $snippet "basis/syndication" } " at the " { $snippet "atom1.0" } " word. First, we want to load a file and get a DOM tree for it."
|
||||||
|
{ $code <" "file.xml" file>xml "> }
|
||||||
|
"No encoding descriptor is needed, because XML files contain sufficient information to auto-detect the encoding. Next, we want to extract information from the tree. To get the title, we can use the following:"
|
||||||
|
{ $code <" "title" tag-named children>string "> }
|
||||||
|
"The " { $link tag-named } " word finds the first tag named " { $snippet "title" } " in the top level (just under the main tag). Then, with a tag on the stack, its children are asserted to be a string, and the string is returned." $nl
|
||||||
|
"For a slightly more complicated example, we can look at how entries are parsed. To get a sequence of tags with the name " { $snippet "entry" } ":"
|
||||||
|
{ $code <" "entry" tags-named "> }
|
||||||
|
"Imagine that, for each of these, we want to get the URL of the entry. In Atom, the URLs are in a " { $snippet "link" } " tag which is contained in the " { $snippet "entry" } " tag. There are multiple " { $snippet "link" } " tags, but one of them contains the attribute " { $snippet "rel=alternate" } ", and the " { $snippet "href" } " attribute has the URL. So, given an element of the sequence produced in the above quotation, we run the code:"
|
||||||
|
{ $code <" "link" tags-named [ "rel" attr "alternate" = ] find nip "> }
|
||||||
|
"to get the link tag on the stack, and"
|
||||||
|
{ $code <" "href" attr >url "> }
|
||||||
|
"to extract the URL from it." ;
|
||||||
|
|
||||||
HELP: deep-tag-named
|
HELP: deep-tag-named
|
||||||
{ $values { "tag" "an XML tag or document" } { "name/string" "an XML name or string representing a name" } { "matching-tag" tag } }
|
{ $values { "tag" "an XML tag or document" } { "name/string" "an XML name or string representing a name" } { "matching-tag" tag } }
|
||||||
{ $description "Finds an XML tag with a matching name, recursively searching children and children of children." }
|
{ $description "Finds an XML tag with a matching name, recursively searching children and children of children." }
|
||||||
|
|
|
@ -67,9 +67,9 @@ HELP: string>dtd
|
||||||
|
|
||||||
ARTICLE: { "xml" "reading" } "Reading XML"
|
ARTICLE: { "xml" "reading" } "Reading XML"
|
||||||
"The following words are used to read something into an XML document"
|
"The following words are used to read something into an XML document"
|
||||||
{ $subsection string>xml }
|
|
||||||
{ $subsection read-xml }
|
{ $subsection read-xml }
|
||||||
{ $subsection read-xml-chunk }
|
{ $subsection read-xml-chunk }
|
||||||
|
{ $subsection string>xml }
|
||||||
{ $subsection string>xml-chunk }
|
{ $subsection string>xml-chunk }
|
||||||
{ $subsection file>xml }
|
{ $subsection file>xml }
|
||||||
{ $subsection bytes>xml }
|
{ $subsection bytes>xml }
|
||||||
|
@ -90,10 +90,16 @@ ARTICLE: { "xml" "events" } "Event-based XML parsing"
|
||||||
{ $subsection pull-event }
|
{ $subsection pull-event }
|
||||||
{ $subsection pull-elem } ;
|
{ $subsection pull-elem } ;
|
||||||
|
|
||||||
|
ARTICLE: { "xml" "namespaces" } "Working with XML namespaces"
|
||||||
|
"The Factor XML parser implements XML namespaces, and provides convenient utilities for working with them. Anywhere in the public API that a name is accepted as an argument, either a string or an XML name is accepted. If a string is used, it is coerced into a name by giving it a null namespace. Names are stored as " { $link name } " tuples, which have slots for the namespace prefix and namespace URL as well as the main part of the tag name." $nl
|
||||||
|
"To make it easier to create XML names, the parsing word " { $snippet "XML-NS:" } " is provided in the " { $vocab-link "xml.syntax" } " vocabulary." $nl
|
||||||
|
"When parsing XML, names are automatically augmented with the appropriate namespace URL when the information is available. This does not take into account any XML schema which might allow for such prefixes to be omitted. When generating XML to be written, keep in mind that the XML writer knows only about the literal prefixes and ignores the URLs. It is your job to make sure that they match up correctly, and that there is the appropriate " { $snippet "xmlns" } " declaration." ;
|
||||||
|
|
||||||
ARTICLE: "xml" "XML parser"
|
ARTICLE: "xml" "XML parser"
|
||||||
"The " { $vocab-link "xml" } " vocabulary implements the XML 1.0 and 1.1 standards, converting strings of text into XML and vice versa. The parser checks for well-formedness but is not validating. There is only partial support for processing DTDs."
|
"The " { $vocab-link "xml" } " vocabulary implements the XML 1.0 and 1.1 standards, converting strings of text into XML and vice versa. The parser checks for well-formedness but is not validating. There is only partial support for processing DTDs."
|
||||||
{ $subsection { "xml" "reading" } }
|
{ $subsection { "xml" "reading" } }
|
||||||
{ $subsection { "xml" "events" } }
|
{ $subsection { "xml" "events" } }
|
||||||
|
{ $subsection { "xml" "namespaces" } }
|
||||||
{ $vocab-subsection "Writing XML" "xml.writer" }
|
{ $vocab-subsection "Writing XML" "xml.writer" }
|
||||||
{ $vocab-subsection "XML parsing errors" "xml.errors" }
|
{ $vocab-subsection "XML parsing errors" "xml.errors" }
|
||||||
{ $vocab-subsection "XML entities" "xml.entities" }
|
{ $vocab-subsection "XML entities" "xml.entities" }
|
||||||
|
|
|
@ -4,7 +4,8 @@ USING: accessors arrays io io.encodings.binary io.files
|
||||||
io.streams.string kernel namespaces sequences strings io.encodings.utf8
|
io.streams.string kernel namespaces sequences strings io.encodings.utf8
|
||||||
xml.data xml.errors xml.elements ascii xml.entities
|
xml.data xml.errors xml.elements ascii xml.entities
|
||||||
xml.writer xml.state xml.autoencoding assocs xml.tokenize
|
xml.writer xml.state xml.autoencoding assocs xml.tokenize
|
||||||
combinators.short-circuit xml.name splitting io.streams.byte-array ;
|
combinators.short-circuit xml.name splitting io.streams.byte-array
|
||||||
|
combinators ;
|
||||||
IN: xml
|
IN: xml
|
||||||
|
|
||||||
<PRIVATE
|
<PRIVATE
|
||||||
|
@ -159,6 +160,9 @@ PRIVATE>
|
||||||
xml-stack get first second
|
xml-stack get first second
|
||||||
] with-state ; inline
|
] with-state ; inline
|
||||||
|
|
||||||
|
: make-xml ( stream quot -- xml )
|
||||||
|
0 read-seq make-xml-doc ; inline
|
||||||
|
|
||||||
PRIVATE>
|
PRIVATE>
|
||||||
|
|
||||||
: each-element ( stream quot: ( xml-elem -- ) -- )
|
: each-element ( stream quot: ( xml-elem -- ) -- )
|
||||||
|
@ -169,14 +173,16 @@ PRIVATE>
|
||||||
] with-state ; inline
|
] with-state ; inline
|
||||||
|
|
||||||
: read-xml ( stream -- xml )
|
: read-xml ( stream -- xml )
|
||||||
[ start-document [ process ] when* ]
|
dup stream-element-type {
|
||||||
0 read-seq make-xml-doc ;
|
{ +character+ [ [ check ] make-xml ] }
|
||||||
|
{ +byte+ [ [ start-document [ process ] when* ] make-xml ] }
|
||||||
|
} case ;
|
||||||
|
|
||||||
: read-xml-chunk ( stream -- seq )
|
: read-xml-chunk ( stream -- seq )
|
||||||
[ check ] 1 read-seq <xml-chunk> ;
|
[ check ] 1 read-seq <xml-chunk> ;
|
||||||
|
|
||||||
: string>xml ( string -- xml )
|
: string>xml ( string -- xml )
|
||||||
<string-reader> [ check ] 0 read-seq make-xml-doc ;
|
<string-reader> read-xml ;
|
||||||
|
|
||||||
: string>xml-chunk ( string -- xml )
|
: string>xml-chunk ( string -- xml )
|
||||||
<string-reader> read-xml-chunk ;
|
<string-reader> read-xml-chunk ;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
! Copyright (C) 2004, 2008 Slava Pestov.
|
! Copyright (C) 2004, 2009 Slava Pestov.
|
||||||
! See http://factorcode.org/license.txt for BSD license.
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
USING: classes classes.union words kernel sequences
|
USING: classes classes.union words kernel sequences
|
||||||
definitions combinators arrays assocs generic accessors ;
|
definitions combinators arrays assocs generic accessors ;
|
||||||
|
@ -21,8 +21,9 @@ M: mixin-class rank-class drop 3 ;
|
||||||
drop
|
drop
|
||||||
] [
|
] [
|
||||||
[ { } redefine-mixin-class ]
|
[ { } redefine-mixin-class ]
|
||||||
|
[ H{ } clone "instances" set-word-prop ]
|
||||||
[ update-classes ]
|
[ update-classes ]
|
||||||
bi
|
tri
|
||||||
] if ;
|
] if ;
|
||||||
|
|
||||||
TUPLE: check-mixin-class class ;
|
TUPLE: check-mixin-class class ;
|
||||||
|
@ -44,6 +45,11 @@ TUPLE: check-mixin-class class ;
|
||||||
[ [ update-class ] each ]
|
[ [ update-class ] each ]
|
||||||
[ implementors [ remake-generic ] each ] bi ;
|
[ implementors [ remake-generic ] each ] bi ;
|
||||||
|
|
||||||
|
: (add-mixin-instance) ( class mixin -- )
|
||||||
|
[ [ suffix ] change-mixin-class ]
|
||||||
|
[ [ f ] 2dip "instances" word-prop set-at ]
|
||||||
|
2bi ;
|
||||||
|
|
||||||
: add-mixin-instance ( class mixin -- )
|
: add-mixin-instance ( class mixin -- )
|
||||||
#! Note: we call update-classes on the new member, not the
|
#! Note: we call update-classes on the new member, not the
|
||||||
#! mixin. This ensures that we only have to update the
|
#! mixin. This ensures that we only have to update the
|
||||||
|
@ -53,20 +59,22 @@ TUPLE: check-mixin-class class ;
|
||||||
#! updated by transitivity; the mixins usages appear in
|
#! updated by transitivity; the mixins usages appear in
|
||||||
#! class-usages of the member, now that it's been added.
|
#! class-usages of the member, now that it's been added.
|
||||||
[ 2drop ] [
|
[ 2drop ] [
|
||||||
[ [ suffix ] change-mixin-class ] 2keep
|
[ (add-mixin-instance) ] 2keep
|
||||||
[ nip ] [ [ new-class? ] either? ] 2bi [
|
[ nip ] [ [ new-class? ] either? ] 2bi
|
||||||
update-classes/new
|
[ update-classes/new ] [ update-classes ] if
|
||||||
] [
|
|
||||||
update-classes
|
|
||||||
] if
|
|
||||||
] if-mixin-member? ;
|
] if-mixin-member? ;
|
||||||
|
|
||||||
|
: (remove-mixin-instance) ( class mixin -- )
|
||||||
|
[ [ swap remove ] change-mixin-class ]
|
||||||
|
[ "instances" word-prop delete-at ]
|
||||||
|
2bi ;
|
||||||
|
|
||||||
: remove-mixin-instance ( class mixin -- )
|
: remove-mixin-instance ( class mixin -- )
|
||||||
#! The order of the three clauses is important here. The last
|
#! The order of the three clauses is important here. The last
|
||||||
#! one must come after the other two so that the entries it
|
#! one must come after the other two so that the entries it
|
||||||
#! adds to changed-generics are not overwritten.
|
#! adds to changed-generics are not overwritten.
|
||||||
[
|
[
|
||||||
[ [ swap remove ] change-mixin-class ]
|
[ (remove-mixin-instance) ]
|
||||||
[ nip update-classes ]
|
[ nip update-classes ]
|
||||||
[ class-usages update-methods ]
|
[ class-usages update-methods ]
|
||||||
2tri
|
2tri
|
||||||
|
@ -76,32 +84,21 @@ M: mixin-class class-forgotten remove-mixin-instance ;
|
||||||
|
|
||||||
! Definition protocol implementation ensures that removing an
|
! Definition protocol implementation ensures that removing an
|
||||||
! INSTANCE: declaration from a source file updates the mixin.
|
! INSTANCE: declaration from a source file updates the mixin.
|
||||||
TUPLE: mixin-instance loc class mixin ;
|
TUPLE: mixin-instance class mixin ;
|
||||||
|
|
||||||
M: mixin-instance equal?
|
C: <mixin-instance> mixin-instance
|
||||||
{
|
|
||||||
{ [ over mixin-instance? not ] [ f ] }
|
|
||||||
{ [ 2dup [ class>> ] bi@ = not ] [ f ] }
|
|
||||||
{ [ 2dup [ mixin>> ] bi@ = not ] [ f ] }
|
|
||||||
[ t ]
|
|
||||||
} cond 2nip ;
|
|
||||||
|
|
||||||
M: mixin-instance hashcode*
|
: >mixin-instance< ( mixin-instance -- class mixin )
|
||||||
[ class>> ] [ mixin>> ] bi 2array hashcode* ;
|
[ class>> ] [ mixin>> ] bi ; inline
|
||||||
|
|
||||||
: <mixin-instance> ( class mixin -- definition )
|
M: mixin-instance where >mixin-instance< "instances" word-prop at ;
|
||||||
mixin-instance new
|
|
||||||
swap >>mixin
|
|
||||||
swap >>class ;
|
|
||||||
|
|
||||||
M: mixin-instance where loc>> ;
|
M: mixin-instance set-where >mixin-instance< "instances" word-prop set-at ;
|
||||||
|
|
||||||
M: mixin-instance set-where (>>loc) ;
|
|
||||||
|
|
||||||
M: mixin-instance definer drop \ INSTANCE: f ;
|
M: mixin-instance definer drop \ INSTANCE: f ;
|
||||||
|
|
||||||
M: mixin-instance definition drop f ;
|
M: mixin-instance definition drop f ;
|
||||||
|
|
||||||
M: mixin-instance forget*
|
M: mixin-instance forget*
|
||||||
[ class>> ] [ mixin>> ] bi
|
>mixin-instance<
|
||||||
dup mixin-class? [ remove-mixin-instance ] [ 2drop ] if ;
|
dup mixin-class? [ remove-mixin-instance ] [ 2drop ] if ;
|
||||||
|
|
|
@ -247,4 +247,4 @@ GENERIC: move-method-generic ( a -- b )
|
||||||
|
|
||||||
[ ] [ "IN: generic.tests.a" <string-reader> "move-method-test-1" parse-stream drop ] unit-test
|
[ ] [ "IN: generic.tests.a" <string-reader> "move-method-test-1" parse-stream drop ] unit-test
|
||||||
|
|
||||||
[ { string } ] [ move-method-generic order ] unit-test
|
[ { string } ] [ \ move-method-generic order ] unit-test
|
|
@ -124,6 +124,6 @@ ARTICLE: "io.encodings" "I/O encodings"
|
||||||
"Combinators to change the encoding:"
|
"Combinators to change the encoding:"
|
||||||
{ $subsection with-encoded-output }
|
{ $subsection with-encoded-output }
|
||||||
{ $subsection with-decoded-input }
|
{ $subsection with-decoded-input }
|
||||||
{ $see-also "encodings-introduction" "stream-elements" } ;
|
{ $see-also "encodings-introduction" } ;
|
||||||
|
|
||||||
ABOUT: "io.encodings"
|
ABOUT: "io.encodings"
|
||||||
|
|
|
@ -262,7 +262,6 @@ $nl
|
||||||
{ $subsection stream-nl }
|
{ $subsection stream-nl }
|
||||||
"This word is for streams that allow seeking:"
|
"This word is for streams that allow seeking:"
|
||||||
{ $subsection stream-seek }
|
{ $subsection stream-seek }
|
||||||
"For a discussion of the distinction between binary and string streams, see " { $link "stream-elements" } "."
|
|
||||||
{ $see-also "io.timeouts" } ;
|
{ $see-also "io.timeouts" } ;
|
||||||
|
|
||||||
ARTICLE: "stdio-motivation" "Motivation for default streams"
|
ARTICLE: "stdio-motivation" "Motivation for default streams"
|
||||||
|
@ -313,7 +312,7 @@ $nl
|
||||||
{ $subsection read }
|
{ $subsection read }
|
||||||
{ $subsection read-until }
|
{ $subsection read-until }
|
||||||
{ $subsection read-partial }
|
{ $subsection read-partial }
|
||||||
"If the default input stream is a string stream (" { $link "stream-elements" } "), lines of text can be read:"
|
"If the default input stream is a character stream (" { $link stream-element-type } " outputs " { $link +character+ } "), lines of text can be read:"
|
||||||
{ $subsection readln }
|
{ $subsection readln }
|
||||||
"Seeking on the default input stream:"
|
"Seeking on the default input stream:"
|
||||||
{ $subsection seek-input }
|
{ $subsection seek-input }
|
||||||
|
@ -328,7 +327,7 @@ $nl
|
||||||
{ $subsection flush }
|
{ $subsection flush }
|
||||||
{ $subsection write1 }
|
{ $subsection write1 }
|
||||||
{ $subsection write }
|
{ $subsection write }
|
||||||
"If the default output stream is a string stream (" { $link "stream-elements" } "), lines of text can be written:"
|
"If the default output stream is a character stream (" { $link stream-element-type } " outputs " { $link +character+ } "), lines of text can be written:"
|
||||||
{ $subsection readln }
|
{ $subsection readln }
|
||||||
{ $subsection print }
|
{ $subsection print }
|
||||||
{ $subsection nl }
|
{ $subsection nl }
|
||||||
|
|
|
@ -3,7 +3,7 @@ io.streams.string namespaces classes effects source-files assocs
|
||||||
sequences strings io.files io.pathnames definitions
|
sequences strings io.files io.pathnames definitions
|
||||||
continuations sorting classes.tuple compiler.units debugger
|
continuations sorting classes.tuple compiler.units debugger
|
||||||
vocabs vocabs.loader accessors eval combinators lexer
|
vocabs vocabs.loader accessors eval combinators lexer
|
||||||
vocabs.parser words.symbol ;
|
vocabs.parser words.symbol multiline ;
|
||||||
IN: parser.tests
|
IN: parser.tests
|
||||||
|
|
||||||
\ run-file must-infer
|
\ run-file must-infer
|
||||||
|
@ -560,7 +560,7 @@ EXCLUDE: qualified.tests.bar => x ;
|
||||||
! Two similar bugs
|
! Two similar bugs
|
||||||
|
|
||||||
! Replace : def with something in << >>
|
! Replace : def with something in << >>
|
||||||
[ [ ] ] [
|
/* [ [ ] ] [
|
||||||
"IN: parser.tests : was-once-a-word-bug ( -- ) ;"
|
"IN: parser.tests : was-once-a-word-bug ( -- ) ;"
|
||||||
<string-reader> "was-once-a-word-test" parse-stream
|
<string-reader> "was-once-a-word-test" parse-stream
|
||||||
] unit-test
|
] unit-test
|
||||||
|
@ -572,7 +572,7 @@ EXCLUDE: qualified.tests.bar => x ;
|
||||||
<string-reader> "was-once-a-word-test" parse-stream
|
<string-reader> "was-once-a-word-test" parse-stream
|
||||||
] unit-test
|
] unit-test
|
||||||
|
|
||||||
[ t ] [ "was-once-a-word-bug" "parser.tests" lookup >boolean ] unit-test
|
[ t ] [ "was-once-a-word-bug" "parser.tests" lookup >boolean ] unit-test */
|
||||||
|
|
||||||
! Replace : def with DEFER:
|
! Replace : def with DEFER:
|
||||||
[ [ ] ] [
|
[ [ ] ] [
|
||||||
|
|
|
@ -24,10 +24,10 @@ IN: benchmark
|
||||||
[
|
[
|
||||||
[
|
[
|
||||||
[ [ 1array $vocab-link ] with-cell ]
|
[ [ 1array $vocab-link ] with-cell ]
|
||||||
[ [ 1000000 /f pprint-cell ] [ "failed" write ] if* ] bi*
|
[ [ 1000000 /f pprint-cell ] [ [ "failed" write ] with-cell ] if* ] bi*
|
||||||
] with-row
|
] with-row
|
||||||
] assoc-each
|
] assoc-each
|
||||||
] tabular-output ;
|
] tabular-output nl ;
|
||||||
|
|
||||||
: benchmarks ( -- )
|
: benchmarks ( -- )
|
||||||
run-benchmarks benchmarks. ;
|
run-benchmarks benchmarks. ;
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
USING: accessors arrays cocoa.dialogs combinators continuations
|
||||||
|
fry grouping io.encodings.utf8 io.files io.styles kernel math
|
||||||
|
math.parser models models.arrow models.history namespaces random
|
||||||
|
sequences splitting ui ui.gadgets.alerts ui.gadgets.book-extras
|
||||||
|
ui.gadgets.books ui.gadgets.buttons ui.gadgets.frames
|
||||||
|
ui.gadgets.grids ui.gadgets.labels ui.gadgets.tracks ui.gestures
|
||||||
|
ui.gadgets.corners ;
|
||||||
|
|
||||||
|
IN: drills
|
||||||
|
SYMBOLS: it startLength ;
|
||||||
|
: big ( gadget -- gadget ) { "sans-serif" plain 30 } >>font ;
|
||||||
|
: card ( model quot -- button ) <arrow> <label-control> big [ next ] <book-btn> ;
|
||||||
|
: op ( quot str -- gadget ) <label> big swap <book-bevel-btn> ;
|
||||||
|
|
||||||
|
: show ( model -- gadget ) dup it set-global [ random ] <arrow>
|
||||||
|
{ [ [ first ] card ]
|
||||||
|
[ [ [ second ] [ drop [ "malformed input" throw ] "Malformed Input" alert ] recover ] card ]
|
||||||
|
[ '[ |<< [ it get [
|
||||||
|
_ value>> swap remove
|
||||||
|
[ [ it get go-back ] "Drill Complete" alert return ] when-empty
|
||||||
|
] change-model ] with-return ] "Yes" op ]
|
||||||
|
[ '[ |<< it get _ model-changed ] "No" op ] } cleave
|
||||||
|
2array { 1 0 } <track> swap [ 0.5 track-add ] each
|
||||||
|
3array <book*> 3 3 <frame> { 450 175 } >>pref-dim swap @center grid-add
|
||||||
|
it get [ length startLength get swap - number>string "/" startLength get number>string 3append ] <arrow> <label-control> @bottom grid-add ;
|
||||||
|
|
||||||
|
: drill ( -- ) [
|
||||||
|
open-panel [
|
||||||
|
[ utf8 file-lines [ "\t" split
|
||||||
|
[ " " split 4 group [ " " join ] map ] map ] map ] map concat dup [ [ first ] [ second ] bi swap 2array ] map append
|
||||||
|
[ length startLength set-global ] keep <history> [ add-history ] [ show ] bi
|
||||||
|
"Got it?" open-window
|
||||||
|
] when*
|
||||||
|
] with-ui ;
|
||||||
|
|
||||||
|
|
||||||
|
MAIN: drill
|
||||||
|
|
||||||
|
|
||||||
|
! FIXME: command-line opening
|
||||||
|
! TODO: Menu bar
|
||||||
|
! TODO: Pious hot-buttons
|
|
@ -0,0 +1 @@
|
||||||
|
unportable
|
|
@ -0,0 +1 @@
|
||||||
|
Maxim Savchenko
|
|
@ -0,0 +1,30 @@
|
||||||
|
! Copyright (C) 2009 Maxim Savchenko
|
||||||
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
|
|
||||||
|
USING: namespaces ecdsa tools.test checksums checksums.openssl ;
|
||||||
|
IN: ecdsa.tests
|
||||||
|
|
||||||
|
SYMBOLS: priv-key pub-key signature ;
|
||||||
|
|
||||||
|
: message ( -- msg ) "Hello world!" ;
|
||||||
|
|
||||||
|
[ ] ! Generating keys
|
||||||
|
[
|
||||||
|
"prime256v1" [ generate-key get-private-key get-public-key ] with-ec
|
||||||
|
pub-key set priv-key set
|
||||||
|
] unit-test
|
||||||
|
|
||||||
|
[ ] ! Signing message
|
||||||
|
[
|
||||||
|
message "sha256" <openssl-checksum> checksum-bytes
|
||||||
|
priv-key get
|
||||||
|
"prime256v1" [ set-private-key ecdsa-sign ] with-ec
|
||||||
|
signature set
|
||||||
|
] unit-test
|
||||||
|
|
||||||
|
[ t ] ! Verifying signature
|
||||||
|
[
|
||||||
|
message "sha256" <openssl-checksum> checksum-bytes
|
||||||
|
signature get pub-key get
|
||||||
|
"prime256v1" [ set-public-key ecdsa-verify ] with-ec
|
||||||
|
] unit-test
|
|
@ -0,0 +1,75 @@
|
||||||
|
! Copyright (C) 2009 Maxim Savchenko
|
||||||
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
|
|
||||||
|
USING: kernel accessors sequences sequences.private destructors math namespaces
|
||||||
|
locals openssl openssl.libcrypto byte-arrays bit-arrays.private
|
||||||
|
alien.c-types alien.destructors ;
|
||||||
|
|
||||||
|
IN: ecdsa
|
||||||
|
|
||||||
|
<PRIVATE
|
||||||
|
|
||||||
|
TUPLE: ec-key handle ;
|
||||||
|
|
||||||
|
M: ec-key dispose
|
||||||
|
[ EC_KEY_free f ] change-handle drop ;
|
||||||
|
|
||||||
|
: <ec-key> ( curve -- key )
|
||||||
|
OBJ_sn2nid dup zero? [ "Unknown curve name" throw ] when
|
||||||
|
EC_KEY_new_by_curve_name dup ssl-error ec-key boa ;
|
||||||
|
|
||||||
|
: ec-key-handle ( -- handle )
|
||||||
|
ec-key get dup handle>> [ nip ] [ already-disposed ] if* ;
|
||||||
|
|
||||||
|
DESTRUCTOR: BN_clear_free
|
||||||
|
|
||||||
|
DESTRUCTOR: EC_POINT_clear_free
|
||||||
|
|
||||||
|
PRIVATE>
|
||||||
|
|
||||||
|
: with-ec ( curve quot -- )
|
||||||
|
swap <ec-key> [ ec-key rot with-variable ] with-disposal ; inline
|
||||||
|
|
||||||
|
: generate-key ( -- )
|
||||||
|
ec-key get handle>> EC_KEY_generate_key ssl-error ;
|
||||||
|
|
||||||
|
: set-private-key ( bin -- )
|
||||||
|
ec-key-handle swap
|
||||||
|
dup length f BN_bin2bn dup ssl-error
|
||||||
|
[ &BN_clear_free EC_KEY_set_private_key ssl-error ] with-destructors ;
|
||||||
|
|
||||||
|
:: set-public-key ( BIN -- )
|
||||||
|
ec-key-handle :> KEY
|
||||||
|
KEY EC_KEY_get0_group :> GROUP
|
||||||
|
GROUP EC_POINT_new dup ssl-error
|
||||||
|
[
|
||||||
|
&EC_POINT_clear_free :> POINT
|
||||||
|
GROUP POINT BIN dup length f EC_POINT_oct2point ssl-error
|
||||||
|
KEY POINT EC_KEY_set_public_key ssl-error
|
||||||
|
] with-destructors ;
|
||||||
|
|
||||||
|
: get-private-key ( -- bin/f )
|
||||||
|
ec-key-handle EC_KEY_get0_private_key
|
||||||
|
dup [ dup BN_num_bits bits>bytes <byte-array> tuck BN_bn2bin drop ] when ;
|
||||||
|
|
||||||
|
:: get-public-key ( -- bin/f )
|
||||||
|
ec-key-handle :> KEY
|
||||||
|
KEY EC_KEY_get0_public_key dup
|
||||||
|
[| PUB |
|
||||||
|
KEY EC_KEY_get0_group :> GROUP
|
||||||
|
GROUP EC_GROUP_get_degree bits>bytes 1+ :> LEN
|
||||||
|
LEN <byte-array> :> BIN
|
||||||
|
GROUP PUB POINT_CONVERSION_COMPRESSED BIN LEN f
|
||||||
|
EC_POINT_point2oct ssl-error
|
||||||
|
BIN
|
||||||
|
] when ;
|
||||||
|
|
||||||
|
:: ecdsa-sign ( DGST -- sig )
|
||||||
|
ec-key-handle :> KEY
|
||||||
|
KEY ECDSA_size dup ssl-error <byte-array> :> SIG
|
||||||
|
"uint" <c-object> :> LEN
|
||||||
|
0 DGST dup length SIG LEN KEY ECDSA_sign ssl-error
|
||||||
|
LEN *uint SIG resize ;
|
||||||
|
|
||||||
|
: ecdsa-verify ( dgst sig -- ? )
|
||||||
|
ec-key-handle [ 0 -rot [ dup length ] bi@ ] dip ECDSA_verify 0 > ;
|
|
@ -0,0 +1 @@
|
||||||
|
Elliptic Curve Digital Signature Algorithm (OpenSSL realisation)
|
|
@ -1,7 +1,8 @@
|
||||||
IN: game-input.tests
|
IN: game-input.tests
|
||||||
USING: game-input tools.test kernel system ;
|
USING: game-input tools.test kernel system threads ;
|
||||||
|
|
||||||
os windows? os macosx? or [
|
os windows? os macosx? or [
|
||||||
[ ] [ open-game-input ] unit-test
|
[ ] [ open-game-input ] unit-test
|
||||||
|
[ ] [ yield ] unit-test
|
||||||
[ ] [ close-game-input ] unit-test
|
[ ] [ close-game-input ] unit-test
|
||||||
] when
|
] when
|
|
@ -5,7 +5,10 @@ IN: io.serial.unix
|
||||||
|
|
||||||
: serial-obj ( -- obj )
|
: serial-obj ( -- obj )
|
||||||
serial new
|
serial new
|
||||||
"/dev/ttyS0" >>path
|
"/dev/ttyS0" >>path ! linux
|
||||||
|
! "/dev/dty00" >>path ! netbsd
|
||||||
|
! "/dev/ttyd0" >>path ! freebsd
|
||||||
|
! "/dev/ttyU0" >>path ! openbsd
|
||||||
19200 >>baud
|
19200 >>baud
|
||||||
{ IGNPAR ICRNL } flags >>iflag
|
{ IGNPAR ICRNL } flags >>iflag
|
||||||
{ } flags >>oflag
|
{ } flags >>oflag
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Sam Anklesaria
|
|
@ -0,0 +1,14 @@
|
||||||
|
USING: peg.ebnf help.syntax help.markup strings ;
|
||||||
|
IN: peg-lexer
|
||||||
|
|
||||||
|
HELP: ON-BNF:
|
||||||
|
{ $syntax "ON-BNF: word ... ;ON-BNF" }
|
||||||
|
{ $description "Creates a parsing word using a parser for lexer control, adding the resulting ast to the stack. Parser syntax is as in " { $link POSTPONE: EBNF: } } ;
|
||||||
|
|
||||||
|
HELP: create-bnf
|
||||||
|
{ $values { "name" string } { "parser" parser } }
|
||||||
|
{ $description "Runtime equivalent of " { $link POSTPONE: ON-BNF: } " also useful with manually constructed parsers." } ;
|
||||||
|
|
||||||
|
HELP: factor
|
||||||
|
{ $values { "input" string } { "ast" "a sequence of tokens" } }
|
||||||
|
{ $description "Tokenizer that acts like standard factor lexer, separating tokens by whitespace." } ;
|
|
@ -0,0 +1,14 @@
|
||||||
|
USING: tools.test peg-lexer.test-parsers ;
|
||||||
|
IN: peg-lexer.tests
|
||||||
|
|
||||||
|
{ V{ "1234" "-end" } } [
|
||||||
|
test1 1234-end
|
||||||
|
] unit-test
|
||||||
|
|
||||||
|
{ V{ 1234 53 } } [
|
||||||
|
test2 12345
|
||||||
|
] unit-test
|
||||||
|
|
||||||
|
{ V{ "heavy" "duty" "testing" } } [
|
||||||
|
test3 heavy duty testing
|
||||||
|
] unit-test
|
|
@ -0,0 +1,52 @@
|
||||||
|
USING: hashtables assocs sequences locals math accessors multiline delegate strings
|
||||||
|
delegate.protocols kernel peg peg.ebnf lexer namespaces combinators parser words ;
|
||||||
|
IN: peg-lexer
|
||||||
|
|
||||||
|
TUPLE: lex-hash hash ;
|
||||||
|
CONSULT: assoc-protocol lex-hash hash>> ;
|
||||||
|
: <lex-hash> ( a -- lex-hash ) lex-hash boa ;
|
||||||
|
|
||||||
|
: pos-or-0 ( neg? -- pos/0 ) dup 0 < [ drop 0 ] when ;
|
||||||
|
|
||||||
|
:: prepare-pos ( v i -- c l )
|
||||||
|
[let | n [ i v head-slice ] |
|
||||||
|
v CHAR: \n n last-index -1 or 1+ -
|
||||||
|
n [ CHAR: \n = ] count 1+ ] ;
|
||||||
|
|
||||||
|
: store-pos ( v a -- ) input swap at prepare-pos
|
||||||
|
lexer get [ (>>line) ] keep (>>column) ;
|
||||||
|
|
||||||
|
M: lex-hash set-at swap {
|
||||||
|
{ pos [ store-pos ] }
|
||||||
|
[ swap hash>> set-at ] } case ;
|
||||||
|
|
||||||
|
:: at-pos ( t l c -- p ) t l head-slice [ length ] map sum l 1- + c + ;
|
||||||
|
|
||||||
|
M: lex-hash at* swap {
|
||||||
|
{ input [ drop lexer get text>> "\n" join t ] }
|
||||||
|
{ pos [ drop lexer get [ text>> ] [ line>> 1- ] [ column>> 1+ ] tri at-pos t ] }
|
||||||
|
[ swap hash>> at* ] } case ;
|
||||||
|
|
||||||
|
: with-global-lexer ( quot -- result )
|
||||||
|
[ f lrstack set
|
||||||
|
V{ } clone error-stack set H{ } clone \ heads set
|
||||||
|
H{ } clone \ packrat set ] f make-assoc <lex-hash>
|
||||||
|
swap bind ; inline
|
||||||
|
|
||||||
|
: parse* ( parser -- ast ) compile
|
||||||
|
[ execute [ error-stack get first throw ] unless* ] with-global-lexer
|
||||||
|
ast>> ;
|
||||||
|
|
||||||
|
: create-bnf ( name parser -- ) reset-tokenizer [ lexer get skip-blank parse* parsed ] curry
|
||||||
|
define word make-parsing ;
|
||||||
|
|
||||||
|
: ON-BNF: CREATE-WORD reset-tokenizer ";ON-BNF" parse-multiline-string parse-ebnf
|
||||||
|
main swap at create-bnf ; parsing
|
||||||
|
|
||||||
|
! Tokenizer like standard factor lexer
|
||||||
|
EBNF: factor
|
||||||
|
space = " " | "\n" | "\t"
|
||||||
|
spaces = space* => [[ drop ignore ]]
|
||||||
|
chunk = (!(space) .)+ => [[ >string ]]
|
||||||
|
expr = spaces chunk
|
||||||
|
;EBNF
|
|
@ -0,0 +1 @@
|
||||||
|
Use peg to write parsing words
|
|
@ -0,0 +1 @@
|
||||||
|
reflection
|
|
@ -0,0 +1,17 @@
|
||||||
|
USING: peg-lexer math.parser strings ;
|
||||||
|
IN: peg-lexer.test-parsers
|
||||||
|
|
||||||
|
ON-BNF: test1
|
||||||
|
num = [1-4]* => [[ >string ]]
|
||||||
|
expr = num ( "-end" | "-done" )
|
||||||
|
;ON-BNF
|
||||||
|
|
||||||
|
ON-BNF: test2
|
||||||
|
num = [1-4]* => [[ >string string>number ]]
|
||||||
|
expr= num [5-9]
|
||||||
|
;ON-BNF
|
||||||
|
|
||||||
|
ON-BNF: test3
|
||||||
|
tokenizer = <foreign factor>
|
||||||
|
expr= "heavy" "duty" "testing"
|
||||||
|
;ON-BNF
|
|
@ -1,9 +1,9 @@
|
||||||
! Copyright (C) 2007, 2008 Slava Pestov.
|
! Copyright (C) 2007, 2009 Slava Pestov.
|
||||||
! See http://factorcode.org/license.txt for BSD license.
|
! See http://factorcode.org/license.txt for BSD license.
|
||||||
USING: arrays hashtables help.markup help.stylesheet io
|
USING: arrays hashtables help.markup help.stylesheet io
|
||||||
io.styles kernel math models namespaces sequences ui ui.gadgets
|
io.styles kernel math models namespaces sequences ui ui.gadgets
|
||||||
ui.gadgets.books ui.gadgets.panes ui.gestures ui.pens.gradient
|
ui.gadgets.books ui.gadgets.panes ui.gestures ui.pens.gradient
|
||||||
parser accessors colors ;
|
parser accessors colors fry ;
|
||||||
IN: slides
|
IN: slides
|
||||||
|
|
||||||
CONSTANT: stylesheet
|
CONSTANT: stylesheet
|
||||||
|
@ -104,4 +104,4 @@ TUPLE: slides < book ;
|
||||||
} set-gestures
|
} set-gestures
|
||||||
|
|
||||||
: slides-window ( slides -- )
|
: slides-window ( slides -- )
|
||||||
[ <slides> "Slides" open-window ] with-ui ;
|
'[ _ <slides> "Slides" open-window ] with-ui ;
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
USING: accessors ui ui.gadgets ui.gadgets.labels ui.gadgets.buttons ui.gadgets.packs locals sequences io.styles ;
|
||||||
|
IN: ui.gadgets.alerts
|
||||||
|
:: alert ( quot string -- ) <pile> { 10 10 } >>gap 1 >>align string <label> { "sans-serif" plain 18 } >>font { 200 100 } >>pref-dim add-gadget
|
||||||
|
"okay" [ close-window ] quot append <border-button> add-gadget "" open-window ;
|
|
@ -0,0 +1,11 @@
|
||||||
|
USING: accessors kernel fry math models ui.gadgets ui.gadgets.books ui.gadgets.buttons ;
|
||||||
|
IN: ui.gadgets.book-extras
|
||||||
|
: <book*> ( pages -- book ) 0 <model> <book> ;
|
||||||
|
: |<< ( book -- ) 0 swap set-control-value ;
|
||||||
|
: next ( book -- ) model>> [ 1 + ] change-model ;
|
||||||
|
: prev ( book -- ) model>> [ 1 - ] change-model ;
|
||||||
|
: (book-t) ( quot -- quot ) '[ : owner ( gadget -- book ) parent>> dup book? [ owner ] unless ; owner @ ] ;
|
||||||
|
: <book-btn> ( label quot -- button ) (book-t) <button> ;
|
||||||
|
: <book-bevel-btn> ( label quot -- button ) (book-t) <border-button> ;
|
||||||
|
: >>> ( label -- button ) [ next ] <book-btn> ;
|
||||||
|
: <<< ( label -- button ) [ prev ] <book-btn> ;
|
|
@ -0,0 +1,6 @@
|
||||||
|
USING: accessors sequences namespaces ui.render opengl fry kernel ;
|
||||||
|
IN: ui.utils
|
||||||
|
SYMBOLS: width height ;
|
||||||
|
: store-dim ( gadget -- ) dim>> [ first width set ] [ second height set ] bi ;
|
||||||
|
: with-dim ( gadget quot -- ) '[ _ store-dim @ ] with-scope ; inline
|
||||||
|
: with-w/h ( gadget quot -- ) '[ origin get _ with-translation ] with-dim ; inline
|
|
@ -5,7 +5,7 @@
|
||||||
<t:title><t:label t:name="title" /></t:title>
|
<t:title><t:label t:name="title" /></t:title>
|
||||||
|
|
||||||
<div class="description">
|
<div class="description">
|
||||||
<t:farkup t:name="parsed" t:parsed="true" />
|
<t:farkup t:name="content" />
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<p>
|
<p>
|
||||||
|
|
|
@ -47,7 +47,7 @@ article "ARTICLES" {
|
||||||
|
|
||||||
: <article> ( title -- article ) article new swap >>title ;
|
: <article> ( title -- article ) article new swap >>title ;
|
||||||
|
|
||||||
TUPLE: revision id title author date content parsed description ;
|
TUPLE: revision id title author date content description ;
|
||||||
|
|
||||||
revision "REVISIONS" {
|
revision "REVISIONS" {
|
||||||
{ "id" "ID" INTEGER +db-assigned-id+ }
|
{ "id" "ID" INTEGER +db-assigned-id+ }
|
||||||
|
@ -55,7 +55,6 @@ revision "REVISIONS" {
|
||||||
{ "author" "AUTHOR" { VARCHAR 256 } +not-null+ } ! uid
|
{ "author" "AUTHOR" { VARCHAR 256 } +not-null+ } ! uid
|
||||||
{ "date" "DATE" TIMESTAMP +not-null+ }
|
{ "date" "DATE" TIMESTAMP +not-null+ }
|
||||||
{ "content" "CONTENT" TEXT +not-null+ }
|
{ "content" "CONTENT" TEXT +not-null+ }
|
||||||
{ "parsed" "PARSED" FACTOR-BLOB +not-null+ } ! Farkup AST
|
|
||||||
{ "description" "DESCRIPTION" TEXT }
|
{ "description" "DESCRIPTION" TEXT }
|
||||||
} define-persistent
|
} define-persistent
|
||||||
|
|
||||||
|
@ -72,9 +71,6 @@ M: revision feed-entry-url id>> revision-url ;
|
||||||
: <revision> ( id -- revision )
|
: <revision> ( id -- revision )
|
||||||
revision new swap >>id ;
|
revision new swap >>id ;
|
||||||
|
|
||||||
: compute-html ( revision -- )
|
|
||||||
dup content>> parse-farkup >>parsed drop ;
|
|
||||||
|
|
||||||
: validate-title ( -- )
|
: validate-title ( -- )
|
||||||
{ { "title" [ v-one-line ] } } validate-params ;
|
{ { "title" [ v-one-line ] } } validate-params ;
|
||||||
|
|
||||||
|
@ -141,13 +137,12 @@ M: revision feed-entry-url id>> revision-url ;
|
||||||
[ title>> ] [ id>> ] bi article boa insert-tuple ;
|
[ title>> ] [ id>> ] bi article boa insert-tuple ;
|
||||||
|
|
||||||
: add-revision ( revision -- )
|
: add-revision ( revision -- )
|
||||||
[ compute-html ]
|
|
||||||
[ insert-tuple ]
|
[ insert-tuple ]
|
||||||
[
|
[
|
||||||
dup title>> <article> select-tuple
|
dup title>> <article> select-tuple
|
||||||
[ amend-article ] [ add-article ] if*
|
[ amend-article ] [ add-article ] if*
|
||||||
]
|
]
|
||||||
tri ;
|
bi ;
|
||||||
|
|
||||||
: <edit-article-action> ( -- action )
|
: <edit-article-action> ( -- action )
|
||||||
<page-action>
|
<page-action>
|
||||||
|
|
Loading…
Reference in New Issue