Merge git://factorcode.org/git/factor
commit
7053f84244
|
@ -79,6 +79,10 @@ M: sequence hashcode*
|
|||
dup empty? [
|
||||
drop
|
||||
] [
|
||||
hash-case-table hash-dispatch-quot
|
||||
[ dup hashcode >fixnum ] swap append
|
||||
dup length 4 <= [
|
||||
case>quot
|
||||
] [
|
||||
hash-case-table hash-dispatch-quot
|
||||
[ dup hashcode >fixnum ] swap append
|
||||
] if
|
||||
] if ;
|
||||
|
|
|
@ -50,7 +50,7 @@ IN: temporary
|
|||
global keys =
|
||||
] unit-test
|
||||
|
||||
[ 3 ] [ 1 2 [ curry [ 3 ] [ 4 ] if ] compile-1 ] unit-test
|
||||
[ 3 ] [ 1 [ 2 ] [ curry [ 3 ] [ 4 ] if ] compile-1 ] unit-test
|
||||
|
||||
[ 3 ] [ t [ 3 [ ] curry 4 [ ] curry if ] compile-1 ] unit-test
|
||||
|
||||
|
|
|
@ -56,3 +56,8 @@ IN: temporary
|
|||
\ recursive compile
|
||||
|
||||
[ ] [ t recursive ] unit-test
|
||||
|
||||
! Make sure error reporting works
|
||||
|
||||
[ [ dup ] compile-1 ] unit-test-fails
|
||||
[ [ drop ] compile-1 ] unit-test-fails
|
||||
|
|
|
@ -8,7 +8,7 @@ assocs quotations sequences.private io.binary io.crc32
|
|||
io.streams.string layouts splitting math.intervals
|
||||
math.floats.private tuples tuples.private classes
|
||||
optimizer.def-use optimizer.backend optimizer.pattern-match
|
||||
float-arrays combinators.private ;
|
||||
float-arrays combinators.private combinators ;
|
||||
|
||||
! the output of <tuple> and <tuple-boa> has the class which is
|
||||
! its second-to-last input
|
||||
|
@ -50,6 +50,20 @@ float-arrays combinators.private ;
|
|||
{ [ dup disjoint-eq? ] [ [ f ] inline-literals ] }
|
||||
} define-optimizers
|
||||
|
||||
: literal-member? ( #call -- ? )
|
||||
node-in-d peek dup value?
|
||||
[ value-literal sequence? ] [ drop f ] if ;
|
||||
|
||||
: member-quot ( seq -- newquot )
|
||||
[ [ t ] ] { } map>assoc [ drop f ] add [ nip case ] curry ;
|
||||
|
||||
: expand-member ( #call -- )
|
||||
dup node-in-d peek value-literal member-quot splice-quot ;
|
||||
|
||||
\ member? {
|
||||
{ [ dup literal-member? ] [ expand-member ] }
|
||||
} define-optimizers
|
||||
|
||||
! if the result of eq? is t and the second input is a literal,
|
||||
! the first input is equal to the second
|
||||
\ eq? [
|
||||
|
|
|
@ -111,7 +111,7 @@ optimizer.def-use generic.standard ;
|
|||
|
||||
: post-process ( class interval node -- classes intervals )
|
||||
dupd won't-overflow?
|
||||
[ >r dup { f integer } memq? [ drop fixnum ] when r> ] when
|
||||
[ >r dup { f integer } member? [ drop fixnum ] when r> ] when
|
||||
[ dup [ 1array ] when ] 2apply ;
|
||||
|
||||
: math-output-interval-1 ( node word -- interval )
|
||||
|
|
|
@ -26,6 +26,8 @@ HINTS: do-trans-map string ;
|
|||
over push
|
||||
] if ;
|
||||
|
||||
HINTS: do-line vector string ;
|
||||
|
||||
: (reverse-complement) ( seq -- )
|
||||
readln [ do-line (reverse-complement) ] [ show-seq ] if* ;
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
! See http://factorcode.org/license.txt for BSD license.
|
||||
|
||||
USING: arrays hashtables io io.streams.string kernel math
|
||||
math.vectors math.functions math.parser
|
||||
namespaces sequences strings tuples system ;
|
||||
math.vectors math.functions math.parser namespaces sequences
|
||||
strings tuples system debugger ;
|
||||
IN: calendar
|
||||
|
||||
TUPLE: timestamp year month day hour minute second gmt-offset ;
|
||||
|
@ -316,7 +316,28 @@ M: timestamp <=> ( ts1 ts2 -- n )
|
|||
: timestamp>rfc3339 ( timestamp -- str )
|
||||
>gmt [
|
||||
(timestamp>rfc3339)
|
||||
] string-out ;
|
||||
] string-out ;
|
||||
|
||||
: expect read1 assert= ;
|
||||
|
||||
: (rfc3339>timestamp) ( -- timestamp )
|
||||
4 read string>number ! year
|
||||
CHAR: - expect
|
||||
2 read string>number ! month
|
||||
CHAR: - expect
|
||||
2 read string>number ! day
|
||||
CHAR: T expect
|
||||
2 read string>number ! hour
|
||||
CHAR: : expect
|
||||
2 read string>number ! minute
|
||||
CHAR: : expect
|
||||
2 read string>number ! second
|
||||
0 <timestamp> ;
|
||||
|
||||
: rfc3339>timestamp ( str -- timestamp )
|
||||
[
|
||||
(rfc3339>timestamp)
|
||||
] string-in ;
|
||||
|
||||
: file-time-string ( timestamp -- string )
|
||||
[
|
||||
|
|
|
@ -65,8 +65,8 @@ PROTOCOL: prettyprint-section-protocol
|
|||
|
||||
: define-mimic ( group mimicker mimicked -- )
|
||||
>r >r group-words r> r> [
|
||||
pick "methods" word-prop at
|
||||
[ method-def <method> spin define-method ] [ 3drop ] if*
|
||||
pick "methods" word-prop at dup
|
||||
[ method-def <method> spin define-method ] [ 3drop ] if
|
||||
] 2curry each ;
|
||||
|
||||
: MIMIC:
|
||||
|
|
|
@ -235,6 +235,7 @@ ARTICLE: "changes" "Changes in the latest release"
|
|||
{ "New, efficient implementations of " { $link bit? } " and " { $link log2 } " runs in constant time for large bignums" }
|
||||
{ "New " { $link big-random } " word for generating large random numbers quickly" }
|
||||
{ "Improved profiler no longer has to be explicitly enabled and disabled with a full recompile; instead, the " { $link profile } " word can be used at any time, and it dynamically patches words to increment call counts. There is no overhead when the profiler is not in use." }
|
||||
{ "Calls to " { $link member? } " with a literal sequence are now open-coded. If there are four or fewer elements, a series of conditionals are generated; if there are more than four elements, there is a hash dispatch followed by conditionals in each branch." }
|
||||
}
|
||||
{ $subheading "IO" }
|
||||
{ $list
|
||||
|
@ -247,7 +248,7 @@ ARTICLE: "changes" "Changes in the latest release"
|
|||
{ { $vocab-link "io.server" } " - improved logging support, logs to a file by default" }
|
||||
{ { $vocab-link "io.files" } " - several new file system manipulation words added" }
|
||||
{ { $vocab-link "tar" } " - tar file extraction in pure Factor (Doug Coleman)" }
|
||||
{ { $vocab-link "unix.linux" } ", " { $vocab-link "raptor" } " - ``Raptor Linux'', a set of alien bindings to low-level Linux features, such as network interface configuration, file system mounting/unmounting, etc, together with experimental boot scripts intended to entirely replace " { $snippet "/sbin/init" } ", " { $vocab-link "/etc/inittab" } " and " { $snippet "/etc/init.d/" } " (Eduardo Cavazos)." }
|
||||
{ { $vocab-link "unix.linux" } ", " { $vocab-link "raptor" } " - ``Raptor Linux'', a set of alien bindings to low-level Linux features, such as network interface configuration, file system mounting/unmounting, etc, together with experimental boot scripts intended to entirely replace " { $snippet "/sbin/init" } ", " { $snippet "/etc/inittab" } " and " { $snippet "/etc/init.d/" } " (Eduardo Cavazos)." }
|
||||
}
|
||||
{ $subheading "Tools" }
|
||||
{ $list
|
||||
|
@ -264,7 +265,7 @@ ARTICLE: "changes" "Changes in the latest release"
|
|||
{ "Windows can be closed on request now using " { $link close-window } }
|
||||
{ "New icons (Elie Chaftari)" }
|
||||
}
|
||||
{ $subheading "Other" }
|
||||
{ $subheading "Libraries" }
|
||||
{ $list
|
||||
{ "The " { $snippet "queues" } " vocabulary has been removed because its functionality is a subset of " { $vocab-link "dlists" } }
|
||||
{ "The " { $vocab-link "webapps.cgi" } " vocabulary implements CGI support for the Factor HTTP server." }
|
||||
|
@ -278,10 +279,14 @@ ARTICLE: "changes" "Changes in the latest release"
|
|||
{ { $vocab-link "editors.notepadpp" } " - Notepad++ integration (Doug Coleman)" }
|
||||
{ { $vocab-link "editors.ted-notepad" } " - TED Notepad integration (Doug Coleman)" }
|
||||
{ { $vocab-link "editors.ultraedit" } " - UltraEdit integration (Doug Coleman)" }
|
||||
{ { $vocab-link "globs" } " - simple Unix shell-style glob patterns" }
|
||||
{ { $vocab-link "heaps" } " - updated for new module system and cleaned up (Doug Coleman)" }
|
||||
{ { $vocab-link "peg" } " - Parser Expression Grammars, a new appoach to parser construction, similar to parser combinators (Chris Double)" }
|
||||
{ { $vocab-link "regexp" } " - revived from " { $snippet "unmaintained/" } " and completely redesigned (Doug Coleman)" }
|
||||
{ { $vocab-link "tuple.lib" } " - some utility words for working with tuples (Doug Coleman)" }
|
||||
{ { $vocab-link "rss" } " - add Atom feed generation (Daniel Ehrenberg)" }
|
||||
{ { $vocab-link "tuples.lib" } " - some utility words for working with tuples (Doug Coleman)" }
|
||||
{ { $vocab-link "webapps.pastebin" } " - improved appearance, add Atom feed generation, add syntax highlighting using " { $vocab-link "xmode" } }
|
||||
{ { $vocab-link "webapps.planet" } " - add Atom feed generation" }
|
||||
}
|
||||
{ $heading "Factor 0.90" }
|
||||
{ $subheading "Core" }
|
||||
|
|
|
@ -32,7 +32,7 @@ M: nehe4-gadget draw-gadget* ( gadget -- )
|
|||
glLoadIdentity
|
||||
-1.5 0.0 -6.0 glTranslatef
|
||||
dup nehe4-gadget-rtri 0.0 1.0 0.0 glRotatef
|
||||
|
||||
|
||||
GL_TRIANGLES [
|
||||
1.0 0.0 0.0 glColor3f
|
||||
0.0 1.0 0.0 glVertex3f
|
||||
|
@ -52,23 +52,23 @@ M: nehe4-gadget draw-gadget* ( gadget -- )
|
|||
1.0 1.0 0.0 glVertex3f
|
||||
1.0 -1.0 0.0 glVertex3f
|
||||
-1.0 -1.0 0.0 glVertex3f
|
||||
] do-state
|
||||
] do-state
|
||||
dup nehe4-gadget-rtri 0.2 + over set-nehe4-gadget-rtri
|
||||
dup nehe4-gadget-rquad 0.15 - swap set-nehe4-gadget-rquad ;
|
||||
|
||||
: nehe4-update-thread ( gadget -- )
|
||||
dup nehe4-gadget-quit? [
|
||||
redraw-interval sleep
|
||||
dup relayout-1
|
||||
nehe4-update-thread
|
||||
] unless ;
|
||||
|
||||
: nehe4-update-thread ( gadget -- )
|
||||
dup nehe4-gadget-quit? [ drop ] [
|
||||
redraw-interval sleep
|
||||
dup relayout-1
|
||||
nehe4-update-thread
|
||||
] if ;
|
||||
|
||||
M: nehe4-gadget graft* ( gadget -- )
|
||||
[ f swap set-nehe4-gadget-quit? ] keep
|
||||
[ nehe4-update-thread ] in-thread drop ;
|
||||
[ f swap set-nehe4-gadget-quit? ] keep
|
||||
[ nehe4-update-thread ] in-thread drop ;
|
||||
|
||||
M: nehe4-gadget ungraft* ( gadget -- )
|
||||
t swap set-nehe4-gadget-quit? ;
|
||||
t swap set-nehe4-gadget-quit? ;
|
||||
|
||||
: run4 ( -- )
|
||||
<nehe4-gadget> "NeHe Tutorial 4" open-window ;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
! Copyright (C) 2004 Chris Double.
|
||||
! See http://factorcode.org/license.txt for BSD license.
|
||||
USING: lazy-lists promises kernel sequences strings math
|
||||
arrays splitting quotations combinators ;
|
||||
arrays splitting quotations combinators namespaces ;
|
||||
IN: parser-combinators
|
||||
|
||||
! Parser combinator protocol
|
||||
|
@ -30,16 +30,32 @@ C: <parse-result> parse-result
|
|||
rot slice-seq <slice>
|
||||
] if ;
|
||||
|
||||
TUPLE: token-parser string ;
|
||||
: string= ( str1 str2 ignore-case -- ? )
|
||||
[ [ >upper ] 2apply ] when sequence= ;
|
||||
|
||||
C: token token-parser ( string -- parser )
|
||||
: string-head? ( str head ignore-case -- ? )
|
||||
pick pick shorter? [
|
||||
3drop f
|
||||
] [
|
||||
>r [ length head-slice ] keep r> string=
|
||||
] if ;
|
||||
|
||||
: ?string-head ( str head ignore-case -- newstr ? )
|
||||
>r 2dup r> string-head?
|
||||
[ length tail-slice t ] [ drop f ] if ;
|
||||
|
||||
TUPLE: token-parser string ignore-case? ;
|
||||
|
||||
C: <token-parser> token-parser
|
||||
|
||||
: token ( string -- parser ) f <token-parser> ;
|
||||
|
||||
: case-insensitive-token ( string -- parser ) t <token-parser> ;
|
||||
|
||||
M: token-parser parse ( input parser -- list )
|
||||
token-parser-string swap over ?head-slice [
|
||||
<parse-result> 1list
|
||||
] [
|
||||
2drop nil
|
||||
] if ;
|
||||
dup token-parser-string swap token-parser-ignore-case?
|
||||
>r tuck r> ?string-head
|
||||
[ <parse-result> 1list ] [ 2drop nil ] if ;
|
||||
|
||||
: 1token ( n -- parser ) 1string token ;
|
||||
|
||||
|
@ -224,7 +240,7 @@ LAZY: <*> ( parser -- parser )
|
|||
|
||||
LAZY: <?> ( parser -- parser )
|
||||
#! Return a parser that optionally uses the parser
|
||||
#! if that parser would be successfull.
|
||||
#! if that parser would be successful.
|
||||
[ 1array ] <@ f succeed <|> ;
|
||||
|
||||
TUPLE: only-first-parser p1 ;
|
||||
|
@ -261,6 +277,10 @@ LAZY: <!?> ( parser -- parser )
|
|||
#! required.
|
||||
<?> only-first ;
|
||||
|
||||
LAZY: <(?)> ( parser -- parser )
|
||||
#! Like <?> but take shortest match first.
|
||||
f succeed swap [ 1array ] <@ <|> ;
|
||||
|
||||
LAZY: <(*)> ( parser -- parser )
|
||||
#! Like <*> but take shortest match first.
|
||||
#! Implementation by Matthew Willis.
|
||||
|
@ -290,8 +310,13 @@ LAZY: <(+)> ( parser -- parser )
|
|||
LAZY: surrounded-by ( parser start end -- parser' )
|
||||
[ token ] 2apply swapd pack ;
|
||||
|
||||
: flatten* ( obj -- )
|
||||
dup array? [ [ flatten* ] each ] [ , ] if ;
|
||||
|
||||
: flatten [ flatten* ] { } make ;
|
||||
|
||||
: exactly-n ( parser n -- parser' )
|
||||
swap <repetition> <and-parser> ;
|
||||
swap <repetition> <and-parser> [ flatten ] <@ ;
|
||||
|
||||
: at-most-n ( parser n -- parser' )
|
||||
dup zero? [
|
||||
|
@ -305,4 +330,4 @@ LAZY: surrounded-by ( parser start end -- parser' )
|
|||
dupd exactly-n swap <*> <&> ;
|
||||
|
||||
: from-m-to-n ( parser m n -- parser' )
|
||||
>r [ exactly-n ] 2keep r> swap - at-most-n <&> ;
|
||||
>r [ exactly-n ] 2keep r> swap - at-most-n <:&:> ;
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Gavin Harrison
|
|
@ -0,0 +1,84 @@
|
|||
! Copyright (C) 2007 Gavin Harrison
|
||||
! See http://factorcode.org/license.txt for BSD license.
|
||||
|
||||
USING: kernel sequences arrays vectors namespaces math strings
|
||||
combinators continuations quotations io assocs ;
|
||||
|
||||
IN: prolog
|
||||
|
||||
SYMBOL: pldb
|
||||
SYMBOL: plchoice
|
||||
|
||||
: init-pl ( -- ) V{ } clone pldb set V{ } clone plchoice set ;
|
||||
|
||||
: reset-choice ( -- ) V{ } clone plchoice set ;
|
||||
: remove-choice ( -- ) plchoice get pop drop ;
|
||||
: add-choice ( continuation -- )
|
||||
dup continuation? [ plchoice get push ] [ drop ] if ;
|
||||
: last-choice ( -- ) plchoice get pop continue ;
|
||||
|
||||
: rules ( -- vector ) pldb get ;
|
||||
: rule ( n -- rule ) dup rules length >= [ drop "No." ] [ rules nth ] if ;
|
||||
|
||||
: var? ( pl-obj -- ? )
|
||||
dup string? [ 0 swap nth LETTER? ] [ drop f ] if ;
|
||||
: const? ( pl-obj -- ? ) var? not ;
|
||||
|
||||
: check-arity ( pat fact -- pattern fact ? ) 2dup [ length ] 2apply = ;
|
||||
: check-elements ( pat fact -- ? ) [ over var? [ 2drop t ] [ = ] if ] 2all? ;
|
||||
: (double-bound) ( key value assoc -- ? )
|
||||
pick over at* [ pick = >r 3drop r> ] [ drop swapd set-at t ] if ;
|
||||
: single-bound? ( pat-d pat-f -- ? )
|
||||
H{ } clone [ (double-bound) ] curry 2all? ;
|
||||
: match-pattern ( pat fact -- ? )
|
||||
check-arity [ 2dup check-elements -rot single-bound? and ] [ 2drop f ] if ;
|
||||
: good-result? ( pat fact -- pat fact ? )
|
||||
2dup dup "No." = [ 2drop t ] [ match-pattern ] if ;
|
||||
|
||||
: add-rule ( name pat body -- ) 3array rules dup length swap set-nth ;
|
||||
|
||||
: (lookup-rule) ( name num -- pat-f rules )
|
||||
dup rule dup "No." = >r 0 swap nth swapd dupd = swapd r> or
|
||||
[ dup rule [ ] callcc0 add-choice ] when
|
||||
dup number? [ 1+ (lookup-rule) ] [ 2nip ] if ;
|
||||
|
||||
: add-bindings ( pat-d pat-f binds -- binds )
|
||||
clone
|
||||
[ over var? over const? or
|
||||
[ 2drop ] [ rot dup >r set-at r> ] if
|
||||
] 2reduce ;
|
||||
: init-binds ( pat-d pat-f -- binds ) V{ } clone add-bindings >alist ;
|
||||
|
||||
: replace-if-bound ( binds elt -- binds elt' )
|
||||
over 2dup key? [ at ] [ drop ] if ;
|
||||
: deep-replace ( binds seq -- binds seq' )
|
||||
[ dup var? [ replace-if-bound ]
|
||||
[ dup array? [ dupd deep-replace nip ] when ] if
|
||||
] map ;
|
||||
|
||||
: backtrace? ( result -- )
|
||||
dup "No." = [ remove-choice last-choice ]
|
||||
[ [ last-choice ] unless ] if ;
|
||||
|
||||
: resolve-rule ( pat-d pat-f rule-body -- binds )
|
||||
>r 2dup init-binds r> [ deep-replace >quotation call dup backtrace?
|
||||
dup t = [ drop ] when ] each ;
|
||||
|
||||
: rule>pattern ( rule -- pattern ) 1 swap nth ;
|
||||
: rule>body ( rule -- body ) 2 swap nth ;
|
||||
|
||||
: binds>fact ( pat-d pat-f binds -- fact )
|
||||
[ 2dup key? [ at ] [ drop ] if ] curry map good-result?
|
||||
[ nip ] [ last-choice ] if ;
|
||||
|
||||
: lookup-rule ( name pat -- fact )
|
||||
swap 0 (lookup-rule) dup "No." =
|
||||
[ nip ]
|
||||
[ dup rule>pattern swapd check-arity
|
||||
[ rot rule>body resolve-rule dup -roll binds>fact nip ] [ last-choice ] if
|
||||
] if ;
|
||||
|
||||
: binding-resolve ( binds name pat -- binds )
|
||||
tuck lookup-rule dup backtrace? swap rot add-bindings ;
|
||||
|
||||
: is ( binds val var -- binds ) rot [ set-at ] keep ;
|
|
@ -0,0 +1 @@
|
|||
Implementation of an embedded prolog for factor
|
|
@ -0,0 +1 @@
|
|||
prolog
|
|
@ -1,174 +1,201 @@
|
|||
USING: regexp tools.test ;
|
||||
USING: regexp tools.test kernel ;
|
||||
IN: regexp-tests
|
||||
|
||||
[ f ] [ "b" "a*" matches? ] unit-test
|
||||
[ t ] [ "" "a*" matches? ] unit-test
|
||||
[ t ] [ "a" "a*" matches? ] unit-test
|
||||
[ t ] [ "aaaaaaa" "a*" matches? ] unit-test
|
||||
[ f ] [ "ab" "a*" matches? ] unit-test
|
||||
[ f ] [ "b" "a*" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "" "a*" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "a*" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aaaaaaa" "a*" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "ab" "a*" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "abc" "abc" matches? ] unit-test
|
||||
[ t ] [ "a" "a|b|c" matches? ] unit-test
|
||||
[ t ] [ "b" "a|b|c" matches? ] unit-test
|
||||
[ t ] [ "c" "a|b|c" matches? ] unit-test
|
||||
[ f ] [ "c" "d|e|f" matches? ] unit-test
|
||||
[ t ] [ "abc" "abc" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "a|b|c" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "b" "a|b|c" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "c" "a|b|c" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "c" "d|e|f" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "aa" "a|b|c" matches? ] unit-test
|
||||
[ f ] [ "bb" "a|b|c" matches? ] unit-test
|
||||
[ f ] [ "cc" "a|b|c" matches? ] unit-test
|
||||
[ f ] [ "cc" "d|e|f" matches? ] unit-test
|
||||
[ f ] [ "aa" "a|b|c" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "bb" "a|b|c" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "cc" "a|b|c" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "cc" "d|e|f" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "" "a+" matches? ] unit-test
|
||||
[ t ] [ "a" "a+" matches? ] unit-test
|
||||
[ t ] [ "aa" "a+" matches? ] unit-test
|
||||
[ f ] [ "" "a+" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "a+" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aa" "a+" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "" "a?" matches? ] unit-test
|
||||
[ t ] [ "a" "a?" matches? ] unit-test
|
||||
[ f ] [ "aa" "a?" matches? ] unit-test
|
||||
[ t ] [ "" "a?" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "a?" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "aa" "a?" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "" "." matches? ] unit-test
|
||||
[ t ] [ "a" "." matches? ] unit-test
|
||||
[ t ] [ "." "." matches? ] unit-test
|
||||
! [ f ] [ "\n" "." matches? ] unit-test
|
||||
[ f ] [ "" "." f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "." f <regexp> matches? ] unit-test
|
||||
[ t ] [ "." "." f <regexp> matches? ] unit-test
|
||||
! [ f ] [ "\n" "." f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "" ".+" matches? ] unit-test
|
||||
[ t ] [ "a" ".+" matches? ] unit-test
|
||||
[ t ] [ "ab" ".+" matches? ] unit-test
|
||||
[ f ] [ "" ".+" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" ".+" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "ab" ".+" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "" "a|b*|c+|d?" matches? ] unit-test
|
||||
[ t ] [ "a" "a|b*|c+|d?" matches? ] unit-test
|
||||
[ t ] [ "c" "a|b*|c+|d?" matches? ] unit-test
|
||||
[ t ] [ "cc" "a|b*|c+|d?" matches? ] unit-test
|
||||
[ f ] [ "ccd" "a|b*|c+|d?" matches? ] unit-test
|
||||
[ t ] [ "d" "a|b*|c+|d?" matches? ] unit-test
|
||||
[ t ] [ "" "a|b*|c+|d?" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "a|b*|c+|d?" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "c" "a|b*|c+|d?" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "cc" "a|b*|c+|d?" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "ccd" "a|b*|c+|d?" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "d" "a|b*|c+|d?" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "foo" "foo|bar" matches? ] unit-test
|
||||
[ t ] [ "bar" "foo|bar" matches? ] unit-test
|
||||
[ f ] [ "foobar" "foo|bar" matches? ] unit-test
|
||||
[ t ] [ "foo" "foo|bar" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "bar" "foo|bar" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "foobar" "foo|bar" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "" "(a)" matches? ] unit-test
|
||||
[ t ] [ "a" "(a)" matches? ] unit-test
|
||||
[ f ] [ "aa" "(a)" matches? ] unit-test
|
||||
[ t ] [ "aa" "(a*)" matches? ] unit-test
|
||||
[ f ] [ "" "(a)" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "(a)" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "aa" "(a)" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aa" "(a*)" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "aababaaabbac" "(a|b)+" matches? ] unit-test
|
||||
[ t ] [ "ababaaabba" "(a|b)+" matches? ] unit-test
|
||||
[ f ] [ "aababaaabbac" "(a|b)+" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "ababaaabba" "(a|b)+" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "" "a{1}" matches? ] unit-test
|
||||
[ t ] [ "a" "a{1}" matches? ] unit-test
|
||||
[ f ] [ "aa" "a{1}" matches? ] unit-test
|
||||
[ f ] [ "" "a{1}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "a{1}" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "aa" "a{1}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "a" "a{2,}" matches? ] unit-test
|
||||
[ t ] [ "aaa" "a{2,}" matches? ] unit-test
|
||||
[ t ] [ "aaaa" "a{2,}" matches? ] unit-test
|
||||
[ t ] [ "aaaaa" "a{2,}" matches? ] unit-test
|
||||
[ f ] [ "a" "a{2,}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aaa" "a{2,}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aaaa" "a{2,}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aaaaa" "a{2,}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "" "a{,2}" matches? ] unit-test
|
||||
[ t ] [ "a" "a{,2}" matches? ] unit-test
|
||||
[ t ] [ "aa" "a{,2}" matches? ] unit-test
|
||||
[ f ] [ "aaa" "a{,2}" matches? ] unit-test
|
||||
[ f ] [ "aaaa" "a{,2}" matches? ] unit-test
|
||||
[ f ] [ "aaaaa" "a{,2}" matches? ] unit-test
|
||||
[ t ] [ "" "a{,2}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "a{,2}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aa" "a{,2}" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "aaa" "a{,2}" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "aaaa" "a{,2}" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "aaaaa" "a{,2}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "" "a{1,3}" matches? ] unit-test
|
||||
[ t ] [ "a" "a{1,3}" matches? ] unit-test
|
||||
[ t ] [ "aa" "a{1,3}" matches? ] unit-test
|
||||
[ t ] [ "aaa" "a{1,3}" matches? ] unit-test
|
||||
[ f ] [ "aaaa" "a{1,3}" matches? ] unit-test
|
||||
[ f ] [ "" "a{1,3}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "a{1,3}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aa" "a{1,3}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aaa" "a{1,3}" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "aaaa" "a{1,3}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "" "[a]" matches? ] unit-test
|
||||
[ t ] [ "a" "[a]" matches? ] unit-test
|
||||
[ t ] [ "a" "[abc]" matches? ] unit-test
|
||||
[ f ] [ "b" "[a]" matches? ] unit-test
|
||||
[ f ] [ "d" "[abc]" matches? ] unit-test
|
||||
[ t ] [ "ab" "[abc]{1,2}" matches? ] unit-test
|
||||
[ f ] [ "abc" "[abc]{1,2}" matches? ] unit-test
|
||||
[ f ] [ "" "[a]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[a]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[abc]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "b" "[a]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "d" "[abc]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "ab" "[abc]{1,2}" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "abc" "[abc]{1,2}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "" "[^a]" matches? ] unit-test
|
||||
[ f ] [ "a" "[^a]" matches? ] unit-test
|
||||
[ f ] [ "a" "[^abc]" matches? ] unit-test
|
||||
[ t ] [ "b" "[^a]" matches? ] unit-test
|
||||
[ t ] [ "d" "[^abc]" matches? ] unit-test
|
||||
[ f ] [ "ab" "[^abc]{1,2}" matches? ] unit-test
|
||||
[ f ] [ "abc" "[^abc]{1,2}" matches? ] unit-test
|
||||
[ f ] [ "" "[^a]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "a" "[^a]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "a" "[^abc]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "b" "[^a]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "d" "[^abc]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "ab" "[^abc]{1,2}" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "abc" "[^abc]{1,2}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "]" "[]]" matches? ] unit-test
|
||||
[ f ] [ "]" "[^]]" matches? ] unit-test
|
||||
[ t ] [ "]" "[]]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "]" "[^]]" f <regexp> matches? ] unit-test
|
||||
|
||||
! [ "^" "[^]" matches? ] unit-test-fails
|
||||
[ t ] [ "^" "[]^]" matches? ] unit-test
|
||||
[ t ] [ "]" "[]^]" matches? ] unit-test
|
||||
! [ "^" "[^]" f <regexp> matches? ] unit-test-fails
|
||||
[ t ] [ "^" "[]^]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "]" "[]^]" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "[" "[[]" matches? ] unit-test
|
||||
[ f ] [ "^" "[^^]" matches? ] unit-test
|
||||
[ t ] [ "a" "[^^]" matches? ] unit-test
|
||||
[ t ] [ "[" "[[]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "^" "[^^]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[^^]" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "-" "[-]" matches? ] unit-test
|
||||
[ f ] [ "a" "[-]" matches? ] unit-test
|
||||
[ f ] [ "-" "[^-]" matches? ] unit-test
|
||||
[ t ] [ "a" "[^-]" matches? ] unit-test
|
||||
[ t ] [ "-" "[-]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "a" "[-]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "-" "[^-]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[^-]" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "-" "[-a]" matches? ] unit-test
|
||||
[ t ] [ "a" "[-a]" matches? ] unit-test
|
||||
[ t ] [ "-" "[a-]" matches? ] unit-test
|
||||
[ t ] [ "a" "[a-]" matches? ] unit-test
|
||||
[ f ] [ "b" "[a-]" matches? ] unit-test
|
||||
[ f ] [ "-" "[^-]" matches? ] unit-test
|
||||
[ t ] [ "a" "[^-]" matches? ] unit-test
|
||||
[ t ] [ "-" "[-a]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[-a]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "-" "[a-]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[a-]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "b" "[a-]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "-" "[^-]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[^-]" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "-" "[a-c]" matches? ] unit-test
|
||||
[ t ] [ "-" "[^a-c]" matches? ] unit-test
|
||||
[ t ] [ "b" "[a-c]" matches? ] unit-test
|
||||
[ f ] [ "b" "[^a-c]" matches? ] unit-test
|
||||
[ f ] [ "-" "[a-c]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "-" "[^a-c]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "b" "[a-c]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "b" "[^a-c]" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "-" "[a-c-]" matches? ] unit-test
|
||||
[ f ] [ "-" "[^a-c-]" matches? ] unit-test
|
||||
[ t ] [ "-" "[a-c-]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "-" "[^a-c-]" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "\\" "[\\\\]" matches? ] unit-test
|
||||
[ f ] [ "a" "[\\\\]" matches? ] unit-test
|
||||
[ f ] [ "\\" "[^\\\\]" matches? ] unit-test
|
||||
[ t ] [ "a" "[^\\\\]" matches? ] unit-test
|
||||
[ t ] [ "\\" "[\\\\]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "a" "[\\\\]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "\\" "[^\\\\]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[^\\\\]" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "0" "[\\d]" matches? ] unit-test
|
||||
[ f ] [ "a" "[\\d]" matches? ] unit-test
|
||||
[ f ] [ "0" "[^\\d]" matches? ] unit-test
|
||||
[ t ] [ "a" "[^\\d]" matches? ] unit-test
|
||||
[ t ] [ "0" "[\\d]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "a" "[\\d]" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "0" "[^\\d]" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[^\\d]" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "a" "[a-z]{1,}|[A-Z]{2,4}|b*|c|(f|g)*" matches? ] unit-test
|
||||
[ t ] [ "a" "[a-z]{1,2}|[A-Z]{3,3}|b*|c|(f|g)*" matches? ] unit-test
|
||||
[ t ] [ "a" "[a-z]{1,2}|[A-Z]{3,3}" matches? ] unit-test
|
||||
[ t ] [ "a" "[a-z]{1,}|[A-Z]{2,4}|b*|c|(f|g)*" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[a-z]{1,2}|[A-Z]{3,3}|b*|c|(f|g)*" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "[a-z]{1,2}|[A-Z]{3,3}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "1000" "\\d{4,6}" matches? ] unit-test
|
||||
[ t ] [ "1000" "[0-9]{4,6}" matches? ] unit-test
|
||||
[ t ] [ "1000" "\\d{4,6}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "1000" "[0-9]{4,6}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "abc" "\\p{Lower}{3}" matches? ] unit-test
|
||||
[ f ] [ "ABC" "\\p{Lower}{3}" matches? ] unit-test
|
||||
[ t ] [ "ABC" "\\p{Upper}{3}" matches? ] unit-test
|
||||
[ f ] [ "abc" "\\p{Upper}{3}" matches? ] unit-test
|
||||
[ t ] [ "abc" "\\p{Lower}{3}" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "ABC" "\\p{Lower}{3}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "ABC" "\\p{Upper}{3}" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "abc" "\\p{Upper}{3}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "abc" "[\\p{Upper}]{3}" matches? ] unit-test
|
||||
[ t ] [ "ABC" "[\\p{Upper}]{3}" matches? ] unit-test
|
||||
[ f ] [ "abc" "[\\p{Upper}]{3}" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "ABC" "[\\p{Upper}]{3}" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "" "\\Q\\E" matches? ] unit-test
|
||||
[ f ] [ "a" "\\Q\\E" matches? ] unit-test
|
||||
[ t ] [ "|*+" "\\Q|*+\\E" matches? ] unit-test
|
||||
[ f ] [ "abc" "\\Q|*+\\E" matches? ] unit-test
|
||||
[ t ] [ "" "\\Q\\E" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "a" "\\Q\\E" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "|*+" "\\Q|*+\\E" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "abc" "\\Q|*+\\E" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "S" "\\0123" matches? ] unit-test
|
||||
[ t ] [ "SXY" "\\0123XY" matches? ] unit-test
|
||||
[ t ] [ "x" "\\x78" matches? ] unit-test
|
||||
[ f ] [ "y" "\\x78" matches? ] unit-test
|
||||
[ t ] [ "x" "\\u0078" matches? ] unit-test
|
||||
[ f ] [ "y" "\\u0078" matches? ] unit-test
|
||||
[ t ] [ "S" "\\0123" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "SXY" "\\0123XY" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "x" "\\x78" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "y" "\\x78" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "x" "\\u0078" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "y" "\\u0078" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "ab" "a+b" matches? ] unit-test
|
||||
[ f ] [ "b" "a+b" matches? ] unit-test
|
||||
[ t ] [ "aab" "a+b" matches? ] unit-test
|
||||
[ f ] [ "abb" "a+b" matches? ] unit-test
|
||||
[ t ] [ "ab" "a+b" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "b" "a+b" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aab" "a+b" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "abb" "a+b" f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "abbbb" "ab*" matches? ] unit-test
|
||||
[ t ] [ "a" "ab*" matches? ] unit-test
|
||||
[ f ] [ "abab" "ab*" matches? ] unit-test
|
||||
[ t ] [ "abbbb" "ab*" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "a" "ab*" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "abab" "ab*" f <regexp> matches? ] unit-test
|
||||
|
||||
[ f ] [ "x" "\\." matches? ] unit-test
|
||||
[ t ] [ "." "\\." matches? ] unit-test
|
||||
[ f ] [ "x" "\\." f <regexp> matches? ] unit-test
|
||||
[ t ] [ "." "\\." f <regexp> matches? ] unit-test
|
||||
|
||||
[ t ] [ "aaaab" "a+ab" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "aaaxb" "a+ab" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aaacb" "a+cb" f <regexp> matches? ] unit-test
|
||||
[ f ] [ "aaaab" "a++ab" f <regexp> matches? ] unit-test
|
||||
[ t ] [ "aaacb" "a++cb" f <regexp> matches? ] unit-test
|
||||
|
||||
[ 3 ] [ "aaacb" "a*" f <regexp> match-head ] unit-test
|
||||
[ 1 ] [ "aaacb" "a+?" f <regexp> match-head ] unit-test
|
||||
[ 2 ] [ "aaacb" "aa?" f <regexp> match-head ] unit-test
|
||||
[ 1 ] [ "aaacb" "aa??" f <regexp> match-head ] unit-test
|
||||
[ 3 ] [ "aacb" "aa?c" f <regexp> match-head ] unit-test
|
||||
[ 3 ] [ "aacb" "aa??c" f <regexp> match-head ] unit-test
|
||||
|
||||
[ t ] [ "aaa" "AAA" t <regexp> matches? ] unit-test
|
||||
[ f ] [ "aax" "AAA" t <regexp> matches? ] unit-test
|
||||
[ t ] [ "aaa" "A*" t <regexp> matches? ] unit-test
|
||||
[ f ] [ "aaba" "A*" t <regexp> matches? ] unit-test
|
||||
[ t ] [ "b" "[AB]" t <regexp> matches? ] unit-test
|
||||
[ f ] [ "c" "[AB]" t <regexp> matches? ] unit-test
|
||||
[ t ] [ "c" "[A-Z]" t <regexp> matches? ] unit-test
|
||||
[ f ] [ "3" "[A-Z]" t <regexp> matches? ] unit-test
|
||||
|
||||
[ ] [
|
||||
"(0[lL]?|[1-9]\\d{0,9}(\\d{0,9}[lL])?|0[xX]\\p{XDigit}{1,8}(\\p{XDigit}{0,8}[lL])?|0[0-7]{1,11}([0-7]{0,11}[lL])?|([0-9]+\\.[0-9]*|\\.[0-9]+)([eE][+-]?[0-9]+)?[fFdD]?|[0-9]+([eE][+-]?[0-9]+[fFdD]?|([eE][+-]?[0-9]+)?[fFdD]))"
|
||||
f <regexp> drop
|
||||
] unit-test
|
||||
|
|
|
@ -1,15 +1,36 @@
|
|||
USING: arrays combinators kernel lazy-lists math math.parser
|
||||
namespaces parser parser-combinators parser-combinators.simple
|
||||
promises quotations sequences combinators.lib strings macros
|
||||
promises quotations sequences combinators.lib strings
|
||||
assocs prettyprint.backend ;
|
||||
USE: io
|
||||
IN: regexp
|
||||
|
||||
<PRIVATE
|
||||
|
||||
SYMBOL: ignore-case?
|
||||
|
||||
: char=-quot ( ch -- quot )
|
||||
ignore-case? get
|
||||
[ ch>upper [ swap ch>upper = ] ] [ [ = ] ] if
|
||||
curry ;
|
||||
|
||||
: char-between?-quot ( ch1 ch2 -- quot )
|
||||
ignore-case? get
|
||||
[ [ ch>upper ] 2apply [ >r >r ch>upper r> r> between? ] ]
|
||||
[ [ between? ] ]
|
||||
if 2curry ;
|
||||
|
||||
: or-predicates ( quots -- quot )
|
||||
[ \ dup add* ] map [ [ t ] ] f short-circuit \ nip add ;
|
||||
|
||||
MACRO: fast-member? ( str -- quot )
|
||||
[ dup ] H{ } map>assoc [ key? ] curry ;
|
||||
: <@literal [ nip ] curry <@ ;
|
||||
|
||||
: <@delay [ curry ] curry <@ ;
|
||||
|
||||
PRIVATE>
|
||||
|
||||
: ascii? ( n -- ? )
|
||||
0 HEX: 7f between? ;
|
||||
|
||||
: octal-digit? ( n -- ? )
|
||||
CHAR: 0 CHAR: 7 between? ;
|
||||
|
@ -19,30 +40,32 @@ MACRO: fast-member? ( str -- quot )
|
|||
|
||||
: hex-digit? ( n -- ? )
|
||||
dup decimal-digit?
|
||||
swap CHAR: a CHAR: f between? or ;
|
||||
over CHAR: a CHAR: f between? or
|
||||
swap CHAR: A CHAR: F between? or ;
|
||||
|
||||
: control-char? ( n -- ? )
|
||||
dup 0 HEX: 1f between?
|
||||
swap HEX: 7f = or ;
|
||||
|
||||
: punct? ( n -- ? )
|
||||
"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" fast-member? ;
|
||||
"!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" member? ;
|
||||
|
||||
: c-identifier-char? ( ch -- ? )
|
||||
dup alpha? swap CHAR: _ = or ;
|
||||
|
||||
: java-blank? ( n -- ? )
|
||||
{
|
||||
CHAR: \s
|
||||
CHAR: \t CHAR: \n CHAR: \r
|
||||
HEX: c HEX: 7 HEX: 1b
|
||||
} fast-member? ;
|
||||
} member? ;
|
||||
|
||||
: java-printable? ( n -- ? )
|
||||
dup alpha? swap punct? or ;
|
||||
|
||||
: 'ordinary-char' ( -- parser )
|
||||
[ "\\^*+?|(){}[$" fast-member? not ] satisfy
|
||||
[ [ = ] curry ] <@ ;
|
||||
[ "\\^*+?|(){}[$" member? not ] satisfy
|
||||
[ char=-quot ] <@ ;
|
||||
|
||||
: 'octal-digit' ( -- parser ) [ octal-digit? ] satisfy ;
|
||||
|
||||
|
@ -58,7 +81,7 @@ MACRO: fast-member? ( str -- quot )
|
|||
[ hex> ] <@ ;
|
||||
|
||||
: satisfy-tokens ( assoc -- parser )
|
||||
[ >r token r> [ nip ] curry <@ ] { } assoc>map <or-parser> ;
|
||||
[ >r token r> <@literal ] { } assoc>map <or-parser> ;
|
||||
|
||||
: 'simple-escape-char' ( -- parser )
|
||||
{
|
||||
|
@ -69,7 +92,7 @@ MACRO: fast-member? ( str -- quot )
|
|||
{ "f" HEX: c }
|
||||
{ "a" HEX: 7 }
|
||||
{ "e" HEX: 1b }
|
||||
} [ [ = ] curry ] assoc-map satisfy-tokens ;
|
||||
} [ char=-quot ] assoc-map satisfy-tokens ;
|
||||
|
||||
: 'predefined-char-class' ( -- parser )
|
||||
{
|
||||
|
@ -85,7 +108,7 @@ MACRO: fast-member? ( str -- quot )
|
|||
{
|
||||
{ "Lower" [ letter? ] }
|
||||
{ "Upper" [ LETTER? ] }
|
||||
{ "ASCII" [ 0 HEX: 7f between? ] }
|
||||
{ "ASCII" [ ascii? ] }
|
||||
{ "Alpha" [ Letter? ] }
|
||||
{ "Digit" [ digit? ] }
|
||||
{ "Alnum" [ alpha? ] }
|
||||
|
@ -103,7 +126,7 @@ MACRO: fast-member? ( str -- quot )
|
|||
'hex' <|>
|
||||
"c" token [ LETTER? ] satisfy &> <|>
|
||||
any-char-parser <|>
|
||||
[ [ = ] curry ] <@ ;
|
||||
[ char=-quot ] <@ ;
|
||||
|
||||
: 'escape' ( -- parser )
|
||||
"\\" token
|
||||
|
@ -113,7 +136,7 @@ MACRO: fast-member? ( str -- quot )
|
|||
'simple-escape' <|> &> ;
|
||||
|
||||
: 'any-char'
|
||||
"." token [ drop [ drop t ] ] <@ ;
|
||||
"." token [ drop t ] <@literal ;
|
||||
|
||||
: 'char'
|
||||
'any-char' 'escape' 'ordinary-char' <|> <|> [ satisfy ] <@ ;
|
||||
|
@ -124,21 +147,24 @@ TUPLE: group-result str ;
|
|||
|
||||
C: <group-result> group-result
|
||||
|
||||
: 'grouping'
|
||||
: 'non-capturing-group' ( -- parser )
|
||||
'regexp' "(?:" ")" surrounded-by ;
|
||||
|
||||
: 'group' ( -- parser )
|
||||
'regexp' [ [ <group-result> ] <@ ] <@
|
||||
"(" ")" surrounded-by ;
|
||||
|
||||
: 'range' ( -- parser )
|
||||
any-char-parser "-" token <& any-char-parser <&>
|
||||
[ first2 [ between? ] 2curry ] <@ ;
|
||||
[ first2 char-between?-quot ] <@ ;
|
||||
|
||||
: 'character-class-term' ( -- parser )
|
||||
'range'
|
||||
'escape' <|>
|
||||
[ "\\]" member? not ] satisfy [ [ = ] curry ] <@ <|> ;
|
||||
[ "\\]" member? not ] satisfy [ char=-quot ] <@ <|> ;
|
||||
|
||||
: 'positive-character-class' ( -- parser )
|
||||
"]" token [ drop [ CHAR: ] = ] ] <@ 'character-class-term' <*> <&:>
|
||||
"]" token [ CHAR: ] = ] <@literal 'character-class-term' <*> <&:>
|
||||
'character-class-term' <+> <|>
|
||||
[ or-predicates ] <@ ;
|
||||
|
||||
|
@ -151,66 +177,101 @@ C: <group-result> group-result
|
|||
"[" "]" surrounded-by [ satisfy ] <@ ;
|
||||
|
||||
: 'escaped-seq' ( -- parser )
|
||||
any-char-parser <*> [ token ] <@ "\\Q" "\\E" surrounded-by ;
|
||||
any-char-parser <*>
|
||||
[ ignore-case? get <token-parser> ] <@
|
||||
"\\Q" "\\E" surrounded-by ;
|
||||
|
||||
: 'simple' ( -- parser )
|
||||
'escaped-seq'
|
||||
'grouping' <|>
|
||||
'non-capturing-group' <|>
|
||||
'group' <|>
|
||||
'char' <|>
|
||||
'character-class' <|> ;
|
||||
|
||||
: 'exactly-n' ( -- parser )
|
||||
'integer' [ exactly-n ] <@delay ;
|
||||
|
||||
: 'at-least-n' ( -- parser )
|
||||
'integer' "," token <& [ at-least-n ] <@delay ;
|
||||
|
||||
: 'at-most-n' ( -- parser )
|
||||
"," token 'integer' &> [ at-most-n ] <@delay ;
|
||||
|
||||
: 'from-m-to-n' ( -- parser )
|
||||
'integer' "," token <& 'integer' <&> [ first2 from-m-to-n ] <@delay ;
|
||||
|
||||
: 'greedy-interval' ( -- parser )
|
||||
'simple' 'integer' "{" "}" surrounded-by <&> [ first2 exactly-n ] <@
|
||||
'simple' 'integer' "{" ",}" surrounded-by <&> [ first2 at-least-n ] <@ <|>
|
||||
'simple' 'integer' "{," "}" surrounded-by <&> [ first2 at-most-n ] <@ <|>
|
||||
'simple' 'integer' "," token <& 'integer' <&> "{" "}" surrounded-by <&> [ first2 first2 from-m-to-n ] <@ <|> ;
|
||||
'exactly-n' 'at-least-n' <|> 'at-most-n' <|> 'from-m-to-n' <|> ;
|
||||
|
||||
: 'interval' ( -- parser )
|
||||
'greedy-interval'
|
||||
'greedy-interval' "?" token <& [ "reluctant {}" print ] <@ <|>
|
||||
'greedy-interval' "+" token <& [ "possessive {}" print ] <@ <|> ;
|
||||
|
||||
: 'greedy-repetition' ( -- parser )
|
||||
'simple' "*" token <& [ <*> ] <@
|
||||
'simple' "+" token <& [ <+> ] <@ <|>
|
||||
'simple' "?" token <& [ <?> ] <@ <|> ;
|
||||
'greedy-interval' "+" token <& [ "possessive {}" print ] <@ <|>
|
||||
"{" "}" surrounded-by ;
|
||||
|
||||
: 'repetition' ( -- parser )
|
||||
'greedy-repetition'
|
||||
'greedy-repetition' "?" token <& [ "reluctant" print ] <@ <|>
|
||||
'greedy-repetition' "+" token <& [ "possessive" print ] <@ <|> ;
|
||||
! Posessive
|
||||
"*+" token [ <!*> ] <@literal
|
||||
"++" token [ <!+> ] <@literal <|>
|
||||
"?+" token [ <!?> ] <@literal <|>
|
||||
! Reluctant
|
||||
"*?" token [ <(*)> ] <@literal <|>
|
||||
"+?" token [ <(+)> ] <@literal <|>
|
||||
"??" token [ <(?)> ] <@literal <|>
|
||||
! Greedy
|
||||
"*" token [ <*> ] <@literal <|>
|
||||
"+" token [ <+> ] <@literal <|>
|
||||
"?" token [ <?> ] <@literal <|> ;
|
||||
|
||||
: 'dummy' ( -- parser )
|
||||
epsilon [ ] <@literal ;
|
||||
|
||||
: 'term' ( -- parser )
|
||||
'simple' 'repetition' 'interval' <|> <|>
|
||||
<+> [ <and-parser> ] <@ ;
|
||||
'simple'
|
||||
'repetition' 'interval' 'dummy' <|> <|> <&> [ first2 call ] <@
|
||||
<!+> [ <and-parser> ] <@ ;
|
||||
|
||||
LAZY: 'regexp' ( -- parser )
|
||||
'term' "|" token nonempty-list-of [ <or-parser> ] <@
|
||||
"^" token 'term' "|" token nonempty-list-of [ <or-parser> ] <@
|
||||
&> [ "caret" print ] <@ <|>
|
||||
'term' "|" token nonempty-list-of [ <or-parser> ] <@
|
||||
"$" token <& [ "dollar" print ] <@ <|>
|
||||
"^" token 'term' "|" token nonempty-list-of [ <or-parser> ] <@ &>
|
||||
"$" token [ "caret dollar" print ] <@ <& <|> ;
|
||||
'term' "|" token nonempty-list-of [ <or-parser> ] <@ ;
|
||||
! "^" token 'term' "|" token nonempty-list-of [ <or-parser> ] <@
|
||||
! &> [ "caret" print ] <@ <|>
|
||||
! 'term' "|" token nonempty-list-of [ <or-parser> ] <@
|
||||
! "$" token <& [ "dollar" print ] <@ <|>
|
||||
! "^" token 'term' "|" token nonempty-list-of [ <or-parser> ] <@ &>
|
||||
! "$" token [ "caret dollar" print ] <@ <& <|> ;
|
||||
|
||||
TUPLE: regexp source parser ;
|
||||
TUPLE: regexp source parser ignore-case? ;
|
||||
|
||||
: <regexp> dup 'regexp' just parse-1 regexp construct-boa ;
|
||||
: <regexp> ( string ignore-case? -- regexp )
|
||||
[
|
||||
ignore-case? [
|
||||
dup 'regexp' just parse-1
|
||||
] with-variable
|
||||
] keep regexp construct-boa ;
|
||||
|
||||
GENERIC: >regexp ( obj -- parser )
|
||||
|
||||
M: string >regexp <regexp> ;
|
||||
|
||||
M: object >regexp ;
|
||||
: do-ignore-case ( string regexp -- string regexp )
|
||||
dup regexp-ignore-case? [ >r >upper r> ] when ;
|
||||
|
||||
: matches? ( string regexp -- ? )
|
||||
>regexp regexp-parser just parse nil? not ;
|
||||
do-ignore-case regexp-parser just parse nil? not ;
|
||||
|
||||
: match-head ( string regexp -- end )
|
||||
do-ignore-case regexp-parser parse dup nil?
|
||||
[ drop f ] [ car parse-result-unparsed slice-from ] if ;
|
||||
|
||||
! Literal syntax for regexps
|
||||
: parse-options ( string -- ? )
|
||||
#! Lame
|
||||
{
|
||||
{ "" [ f ] }
|
||||
{ "i" [ t ] }
|
||||
} case ;
|
||||
|
||||
: parse-regexp ( accum end -- accum )
|
||||
lexer get dup skip-blank [
|
||||
[ index* dup 1+ swap ] 2keep swapd subseq swap
|
||||
] change-column <regexp> parsed ;
|
||||
] change-column
|
||||
lexer get (parse-token) parse-options <regexp> parsed ;
|
||||
|
||||
: R! CHAR: ! parse-regexp ; parsing
|
||||
: R" CHAR: " parse-regexp ; parsing
|
||||
|
@ -240,4 +301,9 @@ M: object >regexp ;
|
|||
} swap [ subseq? not nip ] curry assoc-find drop ;
|
||||
|
||||
M: regexp pprint*
|
||||
dup regexp-source dup find-regexp-syntax pprint-string ;
|
||||
[
|
||||
dup regexp-source
|
||||
dup find-regexp-syntax swap % swap % %
|
||||
dup regexp-ignore-case? [ "i" % ] when
|
||||
] "" make
|
||||
swap present-text ;
|
||||
|
|
|
@ -9,6 +9,9 @@ USING: xml.utilities kernel assocs
|
|||
: ?children>string ( tag/f -- string/f )
|
||||
[ children>string ] [ f ] if* ;
|
||||
|
||||
: any-tag-named ( tag names -- tag-inside )
|
||||
f -rot [ tag-named nip dup ] curry* find 2drop ;
|
||||
|
||||
TUPLE: feed title link entries ;
|
||||
|
||||
C: <feed> feed
|
||||
|
@ -17,50 +20,51 @@ TUPLE: entry title link description pub-date ;
|
|||
|
||||
C: <entry> entry
|
||||
|
||||
: rss1.0-entry ( tag -- entry )
|
||||
[ "title" tag-named children>string ] keep
|
||||
[ "link" tag-named children>string ] keep
|
||||
[ "description" tag-named children>string ] keep
|
||||
f "date" "http://purl.org/dc/elements/1.1/" <name>
|
||||
tag-named ?children>string
|
||||
<entry> ;
|
||||
|
||||
: rss1.0 ( xml -- feed )
|
||||
[
|
||||
"channel" tag-named
|
||||
[ "title" tag-named children>string ] keep
|
||||
"link" tag-named children>string
|
||||
] keep
|
||||
"item" tags-named [
|
||||
[ "title" tag-named children>string ] keep
|
||||
[ "link" tag-named children>string ] keep
|
||||
[ "description" tag-named children>string ] keep
|
||||
f "date" "http://purl.org/dc/elements/1.1/" <name>
|
||||
tag-named ?children>string
|
||||
<entry>
|
||||
] map <feed> ;
|
||||
"item" tags-named [ rss1.0-entry ] map <feed> ;
|
||||
|
||||
: rss2.0-entry ( tag -- entry )
|
||||
[ "title" tag-named children>string ] keep
|
||||
[ "link" tag-named ] keep
|
||||
[ "guid" tag-named dupd ? children>string ] keep
|
||||
[ "description" tag-named children>string ] keep
|
||||
"pubDate" tag-named children>string <entry> ;
|
||||
|
||||
: rss2.0 ( xml -- feed )
|
||||
"channel" tag-named
|
||||
[ "title" tag-named children>string ] keep
|
||||
[ "link" tag-named children>string ] keep
|
||||
"item" tags-named [
|
||||
[ "title" tag-named children>string ] keep
|
||||
[ "link" tag-named ] keep
|
||||
[ "guid" tag-named dupd ? children>string ] keep
|
||||
[ "description" tag-named children>string ] keep
|
||||
"pubDate" tag-named children>string <entry>
|
||||
] map <feed> ;
|
||||
"item" tags-named [ rss2.0-entry ] map <feed> ;
|
||||
|
||||
: atom1.0-entry ( tag -- entry )
|
||||
[ "title" tag-named children>string ] keep
|
||||
[ "link" tag-named "href" swap at ] keep
|
||||
[
|
||||
{ "content" "summary" } any-tag-named
|
||||
dup tag-children [ string? not ] contains?
|
||||
[ tag-children [ write-chunk ] string-out ]
|
||||
[ children>string ] if
|
||||
] keep
|
||||
{ "published" "updated" "issued" "modified" } any-tag-named
|
||||
children>string <entry> ;
|
||||
|
||||
: atom1.0 ( xml -- feed )
|
||||
[ "title" tag-named children>string ] keep
|
||||
[ "link" tag-named "href" swap at ] keep
|
||||
"entry" tags-named [
|
||||
[ "title" tag-named children>string ] keep
|
||||
[ "link" tag-named "href" swap at ] keep
|
||||
[
|
||||
dup "content" tag-named
|
||||
[ nip ] [ "summary" tag-named ] if*
|
||||
dup tag-children [ tag? ] contains?
|
||||
[ tag-children [ write-chunk ] string-out ]
|
||||
[ children>string ] if
|
||||
] keep
|
||||
dup "published" tag-named
|
||||
[ nip ] [ "updated" tag-named ] if*
|
||||
children>string <entry>
|
||||
] map <feed> ;
|
||||
"entry" tags-named [ atom1.0-entry ] map <feed> ;
|
||||
|
||||
: xml>feed ( xml -- feed )
|
||||
dup name-tag {
|
||||
|
@ -92,7 +96,7 @@ C: <entry> entry
|
|||
dup entry-title "title" { { "type" "html" } } simple-tag*,
|
||||
"link" over entry-link "href" associate contained*,
|
||||
dup entry-pub-date "published" simple-tag,
|
||||
entry-description "content" { { "type" "html" } } simple-tag*,
|
||||
entry-description [ "content" { { "type" "html" } } simple-tag*, ] when*
|
||||
] tag, ;
|
||||
|
||||
: feed>xml ( feed -- xml )
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
USING: arrays kernel sequences sequences.lib math
|
||||
math.functions tools.test ;
|
||||
math.functions tools.test strings ;
|
||||
|
||||
[ 4 ] [ { 1 2 } [ sq ] [ * ] map-reduce ] unit-test
|
||||
[ 36 ] [ { 2 3 } [ sq ] [ * ] map-reduce ] unit-test
|
||||
|
@ -42,3 +42,7 @@ math.functions tools.test ;
|
|||
|
||||
[ { 1 9 25 } ] [ { 1 3 5 6 } [ sq ] [ even? ] map-until ] unit-test
|
||||
[ { 2 4 } ] [ { 2 4 1 3 } [ even? ] take-while ] unit-test
|
||||
|
||||
[ { { 0 0 } { 1 0 } { 0 1 } { 1 1 } } ] [ 2 2 exact-strings ] unit-test
|
||||
[ t ] [ "ab" 4 strings [ >string ] map "abab" swap member? ] unit-test
|
||||
[ { { } { 1 } { 2 } { 1 2 } } ] [ { 1 2 } power-set ] unit-test
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
USING: combinators.lib kernel sequences math namespaces
|
||||
random sequences.private shuffle ;
|
||||
USING: combinators.lib kernel sequences math namespaces assocs
|
||||
random sequences.private shuffle math.functions mirrors ;
|
||||
IN: sequences.lib
|
||||
|
||||
! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
@ -74,3 +74,33 @@ IN: sequences.lib
|
|||
[ not ] compose
|
||||
[ find drop [ head-slice ] when* ] curry
|
||||
[ dup ] swap compose keep like ;
|
||||
|
||||
! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
<PRIVATE
|
||||
: translate-string ( n alphabet out-len -- seq )
|
||||
[ drop /mod ] curry* map nip ;
|
||||
|
||||
: map-alphabet ( alphabet seq[seq] -- seq[seq] )
|
||||
[ [ swap nth ] curry* map ] curry* map ;
|
||||
|
||||
: exact-number-strings ( n out-len -- seqs )
|
||||
[ ^ ] 2keep [ translate-string ] 2curry map ;
|
||||
|
||||
: number-strings ( n max-length -- seqs )
|
||||
1+ [ exact-number-strings ] curry* map concat ;
|
||||
PRIVATE>
|
||||
|
||||
: exact-strings ( alphabet length -- seqs )
|
||||
>r dup length r> exact-number-strings map-alphabet ;
|
||||
|
||||
: strings ( alphabet length -- seqs )
|
||||
>r dup length r> number-strings map-alphabet ;
|
||||
|
||||
: nths ( nths seq -- subseq )
|
||||
! nths is a sequence of ones and zeroes
|
||||
>r [ length ] keep [ nth 1 = ] curry subset r>
|
||||
[ nth ] curry { } map-as ;
|
||||
|
||||
: power-set ( seq -- subsets )
|
||||
2 over length exact-number-strings swap [ nths ] curry map ;
|
||||
|
|
|
@ -1,25 +1,14 @@
|
|||
USING: kernel sequences words math math.functions arrays
|
||||
shuffle quotations parser math.parser strings namespaces
|
||||
splitting effects ;
|
||||
splitting effects sequences.lib ;
|
||||
IN: shufflers
|
||||
|
||||
: shuffle>string ( names shuffle -- string )
|
||||
swap [ [ nth ] curry map ] curry map
|
||||
first2 "-" swap 3append >string ;
|
||||
|
||||
: translate ( n alphabet out-len -- seq )
|
||||
[ drop /mod ] curry* map nip ;
|
||||
|
||||
: (combinations) ( alphabet out-len -- seq[seq] )
|
||||
[ ^ ] 2keep [ translate ] 2curry map ;
|
||||
|
||||
: combinations ( n max-out -- seq[seq] )
|
||||
! This returns a seq of length O(n^m)
|
||||
! where and m is max-out
|
||||
1+ [ (combinations) ] curry* map concat ;
|
||||
|
||||
: make-shuffles ( max-out max-in -- shuffles )
|
||||
[ 1+ dup rot combinations [ 2array ] curry* map ]
|
||||
[ 1+ dup rot strings [ 2array ] curry* map ]
|
||||
curry* map concat ;
|
||||
|
||||
: shuffle>quot ( shuffle -- quot )
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
<% USING: namespaces io ; %>
|
||||
<% USING: namespaces io furnace calendar ; %>
|
||||
|
||||
<h2>Annotation: <% "summary" get write %></h2>
|
||||
|
||||
<table>
|
||||
<tr><th>Annotation by:</th><td><% "author" get write %></td></tr>
|
||||
<tr><th>Channel:</th><td><% "channel" get write %></td></tr>
|
||||
<tr><th>Created:</th><td><% "date" get write %></td></tr>
|
||||
<tr><th>Created:</th><td><% "date" get timestamp>string write %></td></tr>
|
||||
</table>
|
||||
|
||||
<% "syntax" render-template %>
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
<% USING: furnace namespaces ; %>
|
||||
|
||||
<%
|
||||
"new paste" "title" set
|
||||
"New paste" "title" set
|
||||
"header" render-template
|
||||
%>
|
||||
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
<% USING: continuations namespaces io kernel math math.parser furnace webapps.pastebin ; %>
|
||||
<% USING: continuations namespaces io kernel math math.parser
|
||||
furnace webapps.pastebin calendar sequences ; %>
|
||||
|
||||
<tr>
|
||||
<td>
|
||||
<a href="<% model get paste-link write %>">
|
||||
<% "summary" get write %>
|
||||
<%
|
||||
"summary" get
|
||||
dup empty? [ drop "- no title -" ] when
|
||||
write
|
||||
%>
|
||||
</a>
|
||||
</td>
|
||||
<td><% "author" get write %></td>
|
||||
<td><% "date" get print %></td>
|
||||
<td><% "date" get timestamp>string print %></td>
|
||||
</tr>
|
||||
|
|
|
@ -60,7 +60,7 @@ SYMBOL: store
|
|||
paste-summary
|
||||
paste-link
|
||||
paste-date
|
||||
} get-slots "" swap <entry>
|
||||
} get-slots timestamp>rfc3339 f swap <entry>
|
||||
] map ;
|
||||
|
||||
: feed.xml ( -- )
|
||||
|
@ -75,13 +75,14 @@ SYMBOL: store
|
|||
store get-global save-store ;
|
||||
|
||||
: add-paste ( paste pastebin -- )
|
||||
>r now timestamp>http-string over set-paste-date r>
|
||||
>r now over set-paste-date r>
|
||||
pastebin-pastes 2dup length swap set-paste-n push ;
|
||||
|
||||
: submit-paste ( summary author channel mode contents -- )
|
||||
<paste>
|
||||
\ pastebin get-global add-paste
|
||||
save-pastebin-store ;
|
||||
<paste> [
|
||||
\ pastebin get-global add-paste
|
||||
save-pastebin-store
|
||||
] keep paste-link permanent-redirect ;
|
||||
|
||||
\ submit-paste {
|
||||
{ "summary" v-required }
|
||||
|
@ -91,8 +92,6 @@ SYMBOL: store
|
|||
{ "contents" v-required }
|
||||
} define-action
|
||||
|
||||
\ submit-paste [ paste-list ] define-redirect
|
||||
|
||||
: annotate-paste ( n summary author mode contents -- )
|
||||
<annotation> swap get-paste
|
||||
paste-annotations push
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
<% USING: namespaces io furnace sequences xmode.code2html ; %>
|
||||
<% USING: namespaces io furnace sequences xmode.code2html calendar ; %>
|
||||
|
||||
<%
|
||||
"Paste: " "summary" get append "title" set
|
||||
|
@ -8,7 +8,7 @@
|
|||
<table>
|
||||
<tr><th>Paste by:</th><td><% "author" get write %></td></tr>
|
||||
<tr><th>Channel:</th><td><% "channel" get write %></td></tr>
|
||||
<tr><th>Created:</th><td><% "date" get write %></td></tr>
|
||||
<tr><th>Created:</th><td><% "date" get timestamp>string write %></td></tr>
|
||||
<tr><th>File type:</th><td><% "mode" get write %></td></tr>
|
||||
</table>
|
||||
|
||||
|
|
|
@ -24,15 +24,7 @@ IN: webapps.planet
|
|||
</ul> ;
|
||||
|
||||
: format-date ( date -- string )
|
||||
10 head "-" split [ string>number ] map
|
||||
first3 0 0 0 0 <timestamp>
|
||||
[
|
||||
dup timestamp-day #
|
||||
" " %
|
||||
dup timestamp-month month-abbreviations nth %
|
||||
", " %
|
||||
timestamp-year #
|
||||
] "" make ;
|
||||
rfc3339>timestamp timestamp>string ;
|
||||
|
||||
: print-posting ( posting -- )
|
||||
<h2 "posting-title" =class h2>
|
||||
|
@ -53,8 +45,11 @@ IN: webapps.planet
|
|||
SYMBOL: default-blogroll
|
||||
SYMBOL: cached-postings
|
||||
|
||||
: safe-head ( seq n -- seq' )
|
||||
over length min head ;
|
||||
|
||||
: mini-planet-factor ( -- )
|
||||
cached-postings get 4 head print-posting-summaries ;
|
||||
cached-postings get 4 safe-head print-posting-summaries ;
|
||||
|
||||
: planet-factor ( -- )
|
||||
serving-html [ "planet" render-template ] with-html-stream ;
|
||||
|
@ -64,7 +59,7 @@ SYMBOL: cached-postings
|
|||
: planet-feed ( -- feed )
|
||||
"[ planet-factor ]"
|
||||
"http://planet.factorcode.org"
|
||||
cached-postings get 30 head <feed> ;
|
||||
cached-postings get 30 safe-head <feed> ;
|
||||
|
||||
: feed.xml ( -- )
|
||||
"text/xml" serving-content
|
||||
|
@ -126,10 +121,11 @@ SYMBOL: last-update
|
|||
{ "Elie Chaftari" "http://fun-factor.blogspot.com/feeds/posts/default" "http://fun-factor.blogspot.com/" }
|
||||
{ "Doug Coleman" "http://code-factor.blogspot.com/feeds/posts/default" "http://code-factor.blogspot.com/" }
|
||||
{ "Daniel Ehrenberg" "http://useless-factor.blogspot.com/feeds/posts/default" "http://useless-factor.blogspot.com/" }
|
||||
{ "Gavin Harrison" "http://gmh33.blogspot.com/feeds/posts/default" "http://gmh33.blogspot.com/" }
|
||||
{ "Kio M. Smallwood"
|
||||
"http://sekenre.wordpress.com/feed/atom/"
|
||||
"http://sekenre.wordpress.com/" }
|
||||
! { "Phil Dawes" "http://www.phildawes.net/blog/category/factor/feed/atom" "http://www.phildawes.net/blog/" }
|
||||
{ "Phil Dawes" "http://www.phildawes.net/blog/category/factor/feed/atom" "http://www.phildawes.net/blog/" }
|
||||
{ "Samuel Tardieu" "http://www.rfc1149.net/blog/tag/factor/feed/atom/" "http://www.rfc1149.net/blog/tag/factor/" }
|
||||
{ "Slava Pestov" "http://factor-language.blogspot.com/atom.xml" "http://factor-language.blogspot.com/" }
|
||||
} default-blogroll set-global
|
||||
|
|
|
@ -17,7 +17,7 @@ furnace ; %>
|
|||
<h1 class="planet-title">[ planet-factor ]</h1>
|
||||
<table width="100%" cellpadding="10">
|
||||
<tr>
|
||||
<td> <% cached-postings get 20 head print-postings %> </td>
|
||||
<td> <% cached-postings get 20 safe-head print-postings %> </td>
|
||||
<td valign="top" width="25%" class="infobox">
|
||||
<p>
|
||||
<b>planet-factor</b> is an Atom/RSS aggregator that collects the
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
! Copyright (C) 2007 Slava Pestov.
|
||||
! See http://factorcode.org/license.txt for BSD license.
|
||||
USING: io.files namespaces webapps.file http.server.responders
|
||||
xmode.code2html kernel ;
|
||||
xmode.code2html kernel html ;
|
||||
IN: webapps.source
|
||||
|
||||
global [
|
||||
|
@ -12,7 +12,7 @@ global [
|
|||
[
|
||||
drop
|
||||
serving-html
|
||||
swap htmlize-stream
|
||||
[ swap htmlize-stream ] with-html-stream
|
||||
] serve-file-hook set
|
||||
file-responder
|
||||
] with-scope
|
||||
|
|
|
@ -65,7 +65,6 @@ M: attrs set-at
|
|||
|
||||
M: attrs assoc-size length ;
|
||||
M: attrs new-assoc drop V{ } new <attrs> ;
|
||||
M: attrs assoc-find >r delegate r> assoc-find ;
|
||||
M: attrs >alist delegate >alist ;
|
||||
|
||||
: >attrs ( assoc -- attrs )
|
||||
|
|
|
@ -22,8 +22,6 @@ M: keyword-map set-at
|
|||
M: keyword-map clear-assoc
|
||||
[ delegate clear-assoc ] keep invalid-no-word-sep ;
|
||||
|
||||
M: keyword-map assoc-find >r delegate r> assoc-find ;
|
||||
|
||||
M: keyword-map >alist delegate >alist ;
|
||||
|
||||
: (keyword-map-no-word-sep)
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
USING: xmode.tokens xmode.rules
|
||||
xmode.keyword-map xml.data xml.utilities xml assocs
|
||||
kernel combinators sequences math.parser namespaces parser
|
||||
xmode.utilities regexp io.files ;
|
||||
USING: xmode.tokens xmode.rules xmode.keyword-map xml.data
|
||||
xml.utilities xml assocs kernel combinators sequences
|
||||
math.parser namespaces parser xmode.utilities regexp io.files ;
|
||||
IN: xmode.loader
|
||||
|
||||
! Based on org.gjt.sp.jedit.XModeHandler
|
||||
|
||||
SYMBOL: ignore-case?
|
||||
|
||||
! Attribute utilities
|
||||
: string>boolean ( string -- ? ) "TRUE" = ;
|
||||
|
||||
|
@ -33,11 +34,11 @@ IN: xmode.loader
|
|||
|
||||
: parse-literal-matcher ( tag -- matcher )
|
||||
dup children>string
|
||||
\ ignore-case? get [ <ignore-case> ] when
|
||||
ignore-case? get <string-matcher>
|
||||
swap position-attrs <matcher> ;
|
||||
|
||||
: parse-regexp-matcher ( tag -- matcher )
|
||||
dup children>string <regexp>
|
||||
dup children>string ignore-case? get <regexp>
|
||||
swap position-attrs <matcher> ;
|
||||
|
||||
! SPAN's children
|
||||
|
@ -137,13 +138,13 @@ RULE: MARK_PREVIOUS mark-previous-rule
|
|||
>r dup name-tag string>token swap children>string r> set-at ;
|
||||
|
||||
TAG: KEYWORDS ( rule-set tag -- key value )
|
||||
\ ignore-case? get <keyword-map>
|
||||
ignore-case? get <keyword-map>
|
||||
swap child-tags [ over parse-keyword-tag ] each
|
||||
swap set-rule-set-keywords ;
|
||||
|
||||
TAGS>
|
||||
|
||||
: ?<regexp> dup [ <regexp> ] when ;
|
||||
: ?<regexp> dup [ ignore-case? get <regexp> ] when ;
|
||||
|
||||
: (parse-rules-tag) ( tag -- rule-set )
|
||||
<rule-set>
|
||||
|
@ -159,10 +160,9 @@ TAGS>
|
|||
|
||||
: parse-rules-tag ( tag -- rule-set )
|
||||
dup (parse-rules-tag) [
|
||||
[
|
||||
dup rule-set-ignore-case? \ ignore-case? set
|
||||
dup rule-set-ignore-case? ignore-case? [
|
||||
swap child-tags [ parse-rule-tag ] curry* each
|
||||
] with-scope
|
||||
] with-variable
|
||||
] keep ;
|
||||
|
||||
: merge-rule-set-props ( props rule-set -- )
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
IN: xmode.marker
|
||||
USING: kernel namespaces xmode.rules xmode.tokens
|
||||
xmode.marker.state xmode.marker.context
|
||||
xmode.utilities xmode.catalog sequences math
|
||||
assocs combinators combinators.lib strings regexp splitting ;
|
||||
xmode.marker.state xmode.marker.context xmode.utilities
|
||||
xmode.catalog sequences math assocs combinators combinators.lib
|
||||
strings regexp splitting parser-combinators ;
|
||||
|
||||
! Based on org.gjt.sp.jedit.syntax.TokenMarker
|
||||
|
||||
|
@ -62,31 +62,27 @@ M: rule match-position drop position get ;
|
|||
[ over matcher-at-word-start? over last-offset get = implies ]
|
||||
} && 2nip ;
|
||||
|
||||
GENERIC: text-matches? ( position text -- match-count/f )
|
||||
: rest-of-line ( -- str )
|
||||
line get position get tail-slice ;
|
||||
|
||||
M: f text-matches? 2drop f ;
|
||||
GENERIC: text-matches? ( string text -- match-count/f )
|
||||
|
||||
M: string text-matches?
|
||||
>r line get swap tail-slice r>
|
||||
[ head? ] keep length and ;
|
||||
M: f text-matches?
|
||||
2drop f ;
|
||||
|
||||
M: ignore-case text-matches?
|
||||
>r line get swap tail-slice r>
|
||||
ignore-case-string
|
||||
2dup shorter? [
|
||||
2drop f
|
||||
] [
|
||||
[ length head-slice ] keep
|
||||
[ [ >upper ] 2apply sequence= ] keep
|
||||
length and
|
||||
] if ;
|
||||
M: string-matcher text-matches?
|
||||
[
|
||||
dup string-matcher-string
|
||||
swap string-matcher-ignore-case?
|
||||
string-head?
|
||||
] keep string-matcher-string length and ;
|
||||
|
||||
M: regexp text-matches?
|
||||
2drop f ; ! >r line get swap tail-slice r> match-head ;
|
||||
>r >string r> match-head ;
|
||||
|
||||
: rule-start-matches? ( rule -- match-count/f )
|
||||
dup rule-start tuck swap can-match-here? [
|
||||
position get swap matcher-text text-matches?
|
||||
rest-of-line swap matcher-text text-matches?
|
||||
] [
|
||||
drop f
|
||||
] if ;
|
||||
|
@ -96,8 +92,8 @@ M: regexp text-matches?
|
|||
dup rule-start swap can-match-here? 0 and
|
||||
] [
|
||||
dup rule-end tuck swap can-match-here? [
|
||||
position get swap matcher-text
|
||||
context get line-context-end or
|
||||
rest-of-line
|
||||
swap matcher-text context get line-context-end or
|
||||
text-matches?
|
||||
] [
|
||||
drop f
|
||||
|
|
|
@ -51,10 +51,6 @@ SYMBOL: delegate-end-escaped?
|
|||
dup context set
|
||||
f swap set-line-context-in-rule ;
|
||||
|
||||
: terminal-rule-set ( -- rule-set )
|
||||
get-rule-set rule-set-default standard-rule-set
|
||||
push-context ;
|
||||
|
||||
: init-token-marker ( prev-context line rules -- )
|
||||
rule-sets set
|
||||
line set
|
||||
|
|
|
@ -2,9 +2,9 @@ USING: xmode.tokens xmode.keyword-map kernel
|
|||
sequences vectors assocs strings memoize regexp ;
|
||||
IN: xmode.rules
|
||||
|
||||
TUPLE: ignore-case string ;
|
||||
TUPLE: string-matcher string ignore-case? ;
|
||||
|
||||
C: <ignore-case> ignore-case
|
||||
C: <string-matcher> string-matcher
|
||||
|
||||
! Based on org.gjt.sp.jedit.syntax.ParserRuleSet
|
||||
TUPLE: rule-set
|
||||
|
@ -97,7 +97,7 @@ TUPLE: mark-previous-rule ;
|
|||
TUPLE: escape-rule ;
|
||||
|
||||
: <escape-rule> ( string -- rule )
|
||||
f f f <matcher>
|
||||
f <string-matcher> f f f <matcher>
|
||||
escape-rule construct-rule
|
||||
[ set-rule-start ] keep ;
|
||||
|
||||
|
@ -105,9 +105,7 @@ GENERIC: text-hash-char ( text -- ch )
|
|||
|
||||
M: f text-hash-char ;
|
||||
|
||||
M: string text-hash-char first ;
|
||||
|
||||
M: ignore-case text-hash-char ignore-case-string first ;
|
||||
M: string-matcher text-hash-char string-matcher-string first ;
|
||||
|
||||
M: regexp text-hash-char drop f ;
|
||||
|
||||
|
@ -121,6 +119,10 @@ M: regexp text-hash-char drop f ;
|
|||
r> rule-set-rules inverted-index ;
|
||||
|
||||
: add-escape-rule ( string ruleset -- )
|
||||
>r <escape-rule> r>
|
||||
2dup set-rule-set-escape-rule
|
||||
add-rule ;
|
||||
over [
|
||||
>r <escape-rule> r>
|
||||
2dup set-rule-set-escape-rule
|
||||
add-rule
|
||||
] [
|
||||
2drop
|
||||
] if ;
|
||||
|
|
|
@ -1,2 +1,10 @@
|
|||
#include <ucontext.h>
|
||||
|
||||
INLINE void *ucontext_stack_pointer(void *uap)
|
||||
{
|
||||
ucontext_t *ucontext = (ucontext_t *)uap;
|
||||
return (void *)ucontext->uc_mcontext.gregs[15];
|
||||
}
|
||||
|
||||
#define UAP_PROGRAM_COUNTER(ucontext) \
|
||||
(((ucontext_t *)(ucontext))->uc_mcontext.gregs[16])
|
||||
|
|
|
@ -70,7 +70,6 @@
|
|||
#elif defined(FACTOR_ARM)
|
||||
#include "os-linux-arm.h"
|
||||
#elif defined(FACTOR_AMD64)
|
||||
#include "os-unix-ucontext.h"
|
||||
#include "os-linux-x86-64.h"
|
||||
#else
|
||||
#error "Unsupported Linux flavor"
|
||||
|
|
Loading…
Reference in New Issue