}
proc tokenize str {
- set re {[\s,]*(~@|[\[\]\{\}()'`~^@]|\"(?:\\.|[^\\\"])*\"?|;.*|[^\s\[\]\{\}('\"`~^@,;)]*)}
+ set re {[\s,]*(~@|[\[\]\{\}()'`~^@]|\"(?:\\.|[^\\\"])*\"?|;[^\n]*|[^\s\[\]\{\}('\"`~^@,;)]*)}
set tokens {}
- foreach {_ capture} [regexp -line -all -inline $re $str] {
+ foreach {_ capture} [regexp -all -inline $re $str] {
if {[string length $capture] > 0 && [string range $capture 0 0] != ";"} {
lappend tokens $capture
}
#include "yeti_regex.i"
require, "types.i"
-TOKENIZER_REGEXP = regcomp("[[:space:],]*(~@|[][{}()'`~@]|\"([\\].|[^\\\"])*\"?|;.*|[^][[:space:]{}()'\"`~@,;]*)", newline=1)
+TOKENIZER_REGEXP = regcomp("[[:space:],]*(~@|[][{}()'`~@]|\"([\\].|[^\\\"])*\"?|;[^\n]*|[^][[:space:]{}()'\"`~@,;]*)")
func tokenize(str)
{