Various fixes

release
Slava Pestov 2007-12-06 00:23:18 -05:00
parent 59566c20e9
commit a969934061
7 changed files with 98 additions and 59 deletions

View File

@ -32,10 +32,10 @@ to depend on:
it inherits the value of the NO_WORD_SEP attribute from the previous
RULES tag.
The Factor implementation does not duplicate this behavior.
The Factor implementation does not duplicate this behavior. If you
find a mode file which depends on this flaw, please fix it and submit
the changes to the jEdit project.
This is still a work in progress. If you find any behavioral differences
between the Factor implementation and the original jEdit code, please
report them as bugs. Also, if you wish to contribute a new or improved
mode file, please contact the jEdit project. Updated mode files in jEdit
will be periodically imported into the Factor source tree.
If you wish to contribute a new or improved mode file, please contact
the jEdit project. Updated mode files in jEdit will be periodically
imported into the Factor source tree.

40
extra/xmode/code2html/code2html.factor Normal file → Executable file
View File

@ -15,8 +15,10 @@ IN: xmode.code2html
: htmlize-line ( line-context line rules -- line-context' )
tokenize-line htmlize-tokens ;
: htmlize-lines ( lines rules -- )
<pre> f -rot [ htmlize-line nl ] curry each drop </pre> ;
: htmlize-lines ( lines mode -- )
<pre>
f swap load-mode [ htmlize-line nl ] curry reduce drop
</pre> ;
: default-stylesheet ( -- )
<style>
@ -24,22 +26,20 @@ IN: xmode.code2html
resource-path <file-reader> contents write
</style> ;
: htmlize-stream ( path stream -- )
lines swap
<html>
<head>
default-stylesheet
<title> dup write </title>
</head>
<body>
over empty?
[ 2drop ]
[ over first find-mode htmlize-lines ] if
</body>
</html> ;
: htmlize-file ( path -- )
dup <file-reader> lines dup empty? [ 2drop ] [
swap dup ".html" append <file-writer> [
[
<html>
<head>
<title> dup write </title>
default-stylesheet
</head>
<body>
over first
find-mode
load-mode
htmlize-lines
</body>
</html>
] with-html-stream
] with-stream
] if ;
dup <file-reader> over ".html" append <file-writer>
[ htmlize-stream ] with-stream ;

View File

@ -32,10 +32,13 @@ IN: xmode.loader
swap [ at string>boolean ] curry map first3 ;
: parse-literal-matcher ( tag -- matcher )
dup children>string swap position-attrs <matcher> ;
dup children>string
\ ignore-case? get [ <ignore-case> ] when
swap position-attrs <matcher> ;
: parse-regexp-matcher ( tag -- matcher )
dup children>string <regexp> swap position-attrs <matcher> ;
dup children>string <regexp>
swap position-attrs <matcher> ;
! SPAN's children
<TAGS: parse-begin/end-tag
@ -130,22 +133,25 @@ RULE: MARK_FOLLOWING mark-following-rule
RULE: MARK_PREVIOUS mark-previous-rule
shared-tag-attrs match-type-attr literal-start ;
: parse-keyword-tag
dup name-tag string>token swap children>string rot set-at ;
: parse-keyword-tag ( tag keyword-map -- )
>r dup name-tag string>token swap children>string r> set-at ;
TAG: KEYWORDS ( rule-set tag -- key value )
>r rule-set-keywords r>
child-tags [ parse-keyword-tag ] curry* each ;
\ ignore-case? get <keyword-map>
swap child-tags [ over parse-keyword-tag ] each
swap set-rule-set-keywords ;
TAGS>
: ?<regexp> dup [ <regexp> ] when ;
: (parse-rules-tag) ( tag -- rule-set )
<rule-set>
{
{ "SET" string>rule-set-name set-rule-set-name }
{ "IGNORE_CASE" string>boolean set-rule-set-ignore-case? }
{ "HIGHLIGHT_DIGITS" string>boolean set-rule-set-highlight-digits? }
{ "DIGIT_RE" <regexp> set-rule-set-digit-re } ! XXX
{ "DIGIT_RE" ?<regexp> set-rule-set-digit-re }
{ "ESCAPE" f add-escape-rule }
{ "DEFAULT" string>token set-rule-set-default }
{ "NO_WORD_SEP" f set-rule-set-no-word-sep }
@ -153,9 +159,10 @@ TAGS>
: parse-rules-tag ( tag -- rule-set )
dup (parse-rules-tag) [
swap child-tags [
parse-rule-tag
] curry* each
[
dup rule-set-ignore-case? \ ignore-case? set
swap child-tags [ parse-rule-tag ] curry* each
] with-scope
] keep ;
: merge-rule-set-props ( props rule-set -- )

View File

@ -109,3 +109,21 @@ IN: temporary
] [
f "$FOO" "shellscript" load-mode tokenize-line nip
] unit-test
[
{
T{ token f "AND" KEYWORD1 }
}
] [
f "AND" "pascal" load-mode tokenize-line nip
] unit-test
[
{
T{ token f "Comment {" COMMENT1 }
T{ token f "XXX" COMMENT1 }
T{ token f "}" COMMENT1 }
}
] [
f "Comment {XXX}" "rebol" load-mode tokenize-line nip
] unit-test

View File

@ -15,8 +15,8 @@ assocs combinators combinators.lib strings regexp splitting ;
[ dup [ digit? ] contains? ]
[
dup [ digit? ] all? [
current-rule-set rule-set-digit-re dup
[ dupd 2drop f ] [ drop f ] if
current-rule-set rule-set-digit-re
dup [ dupd matches? ] [ drop f ] if
] unless*
]
} && nip ;
@ -26,7 +26,7 @@ assocs combinators combinators.lib strings regexp splitting ;
: resolve-delegate ( name -- rules )
dup string? [
"::" split1 [ swap load-mode at ] [ rule-sets get at ] if*
"::" split1 [ swap load-mode ] [ rule-sets get ] if* at
] when ;
: rule-set-keyword-maps ( ruleset -- seq )
@ -45,13 +45,6 @@ assocs combinators combinators.lib strings regexp splitting ;
dup mark-number [ ] [ mark-keyword ] ?if
[ prev-token, ] when* ;
: check-terminate-char ( -- )
current-rule-set rule-set-terminate-char [
position get <= [
terminated? on
] when
] when* ;
: current-char ( -- char )
position get line get nth ;
@ -74,11 +67,22 @@ GENERIC: text-matches? ( position text -- match-count/f )
M: f text-matches? 2drop f ;
M: string text-matches?
! XXX ignore case
>r line get swap tail-slice r>
[ head? ] keep length and ;
! M: regexp text-matches? ... ;
M: ignore-case text-matches?
>r line get swap tail-slice r>
ignore-case-string
2dup shorter? [
2drop f
] [
[ length head-slice ] keep
[ [ >upper ] 2apply sequence= ] keep
length and
] if ;
M: regexp text-matches?
2drop f ; ! >r line get swap tail-slice r> match-head ;
: rule-start-matches? ( rule -- match-count/f )
dup rule-start tuck swap can-match-here? [
@ -284,8 +288,6 @@ M: mark-previous-rule handle-rule-start
: mark-token-loop ( -- )
position get line get length < [
check-terminate-char
{
[ check-end-delegate ]
[ check-every-rule ]
@ -302,8 +304,7 @@ M: mark-previous-rule handle-rule-start
: unwind-no-line-break ( -- )
context get line-context-parent [
line-context-in-rule rule-no-line-break?
terminated? get or [
line-context-in-rule rule-no-line-break? [
pop-context
unwind-no-line-break
] when

View File

@ -16,7 +16,6 @@ SYMBOL: seen-whitespace-end?
SYMBOL: escaped?
SYMBOL: process-escape?
SYMBOL: delegate-end-escaped?
SYMBOL: terminated?
: current-rule ( -- rule )
context get line-context-in-rule ;

View File

@ -1,7 +1,11 @@
USING: xmode.tokens xmode.keyword-map kernel
sequences vectors assocs strings memoize ;
sequences vectors assocs strings memoize regexp ;
IN: xmode.rules
TUPLE: ignore-case string ;
C: <ignore-case> ignore-case
! Based on org.gjt.sp.jedit.syntax.ParserRuleSet
TUPLE: rule-set
name
@ -20,12 +24,11 @@ no-word-sep
: init-rule-set ( ruleset -- )
#! Call after constructor.
>r H{ } clone H{ } clone V{ } clone f <keyword-map> r>
>r H{ } clone H{ } clone V{ } clone r>
{
set-rule-set-rules
set-rule-set-props
set-rule-set-imports
set-rule-set-keywords
} set-slots ;
: <rule-set> ( -- ruleset )
@ -46,8 +49,9 @@ MEMO: standard-rule-set ( id -- ruleset )
] when* ;
: rule-set-no-word-sep* ( ruleset -- str )
dup rule-set-keywords keyword-map-no-word-sep*
swap rule-set-no-word-sep "_" 3append ;
dup rule-set-no-word-sep
swap rule-set-keywords dup [ keyword-map-no-word-sep* ] when
"_" 3append ;
! Match restrictions
TUPLE: matcher text at-line-start? at-whitespace-end? at-word-start? ;
@ -97,10 +101,20 @@ TUPLE: escape-rule ;
escape-rule construct-rule
[ set-rule-start ] keep ;
GENERIC: text-hash-char ( text -- ch )
M: f text-hash-char ;
M: string text-hash-char first ;
M: ignore-case text-hash-char ignore-case-string first ;
M: regexp text-hash-char drop f ;
: rule-chars* ( rule -- string )
dup rule-chars
swap rule-start matcher-text
dup string? [ first add ] [ drop ] if ;
text-hash-char [ add ] when* ;
: add-rule ( rule ruleset -- )
>r dup rule-chars* >upper swap