diff --git a/extra/html/parser/state/state-tests.factor b/extra/html/parser/state/state-tests.factor
index e655dbb699..63916a3c1c 100644
--- a/extra/html/parser/state/state-tests.factor
+++ b/extra/html/parser/state/state-tests.factor
@@ -88,5 +88,8 @@ IN: html.parser.state.tests
[ "c" ]
[ "c" take-token ] unit-test
-[ { "a" "b" "c" "abcd e \\\"f g" } ]
-[ "a b c \"abcd e \\\"f g\"" CHAR: \ CHAR: " tokenize-line ] unit-test
+[ f ]
+[ "" take-token ] unit-test
+
+[ "abcd e \\\"f g" ]
+[ "\"abcd e \\\"f g\"" CHAR: \ CHAR: " take-token* ] unit-test
diff --git a/extra/html/parser/state/state.factor b/extra/html/parser/state/state.factor
index 6cca9f72a9..86adb0f914 100644
--- a/extra/html/parser/state/state.factor
+++ b/extra/html/parser/state/state.factor
@@ -98,19 +98,16 @@ TUPLE: state-parser sequence n ;
start-n state-parser (>>n) f
] if ;
-: take-token ( state-parser -- string )
+: (take-token) ( state-parser -- string )
skip-whitespace [ current { [ blank? ] [ f = ] } 1|| ] take-until ;
-:: (tokenize-line) ( state-parser escape-char quote-char -- )
+:: take-token* ( state-parser escape-char quote-char -- string/f )
state-parser skip-whitespace
dup current {
- { quote-char [
- [ escape-char quote-char take-quoted-string , ]
- [ escape-char quote-char (tokenize-line) ] bi
- ] }
- { f [ drop ] }
- [ drop [ take-token , ] [ escape-char quote-char (tokenize-line) ] bi ]
+ { quote-char [ escape-char quote-char take-quoted-string ] }
+ { f [ drop f ] }
+ [ drop (take-token) ]
} case ;
-: tokenize-line ( line escape-char quote-char -- seq )
- [ ] 2dip [ (tokenize-line) ] { } make ;
+: take-token ( state-parser -- string/f )
+ CHAR: \ CHAR: " take-token* ;