mirror of https://github.com/golang/go.git
- remove special handling of '\n' characters (used to be treated as comments
for pretty printer purposes - now properly ignored as white space since we have line/col information) - changed sample use in comment to an actually compiled function to make sure sample is actually working - added extra tests (checking line and column values, and the tokenize function) R=rsc DELTA=253 (61 added, 67 deleted, 125 changed) OCL=26143 CL=26181
This commit is contained in:
parent
0282719387
commit
6f321e28f4
|
|
@ -4,23 +4,7 @@
|
|||
|
||||
// A scanner for Go source text. Takes a []byte as source which can
|
||||
// then be tokenized through repeated calls to the Scan function.
|
||||
//
|
||||
// Sample use:
|
||||
//
|
||||
// import "token"
|
||||
// import "scanner"
|
||||
//
|
||||
// func tokenize(src []byte) {
|
||||
// var s scanner.Scanner;
|
||||
// s.Init(src, nil /* no error handler */, false /* ignore comments */);
|
||||
// for {
|
||||
// pos, tok, lit := s.Scan();
|
||||
// if tok == Scanner.EOF {
|
||||
// return;
|
||||
// }
|
||||
// println(pos, token.TokenString(tok), string(lit));
|
||||
// }
|
||||
// }
|
||||
// For a sample use of a scanner, see the implementation of Tokenize.
|
||||
//
|
||||
package scanner
|
||||
|
||||
|
|
@ -62,7 +46,7 @@ type Scanner struct {
|
|||
scan_comments bool; // if set, comments are reported as tokens
|
||||
|
||||
// scanning state
|
||||
loc Location; // location of ch
|
||||
loc Location; // location before ch (src[loc.Pos] == ch)
|
||||
pos int; // current reading position (position after ch)
|
||||
ch int; // one char look-ahead
|
||||
}
|
||||
|
|
@ -78,7 +62,7 @@ func (S *Scanner) next() {
|
|||
switch {
|
||||
case r == '\n':
|
||||
S.loc.Line++;
|
||||
S.loc.Col = 1;
|
||||
S.loc.Col = 0;
|
||||
case r >= 0x80:
|
||||
// not ASCII
|
||||
r, w = utf8.DecodeRune(S.src[S.pos : len(S.src)]);
|
||||
|
|
@ -94,9 +78,9 @@ func (S *Scanner) next() {
|
|||
|
||||
// Init prepares the scanner S to tokenize the text src. Calls to Scan
|
||||
// will use the error handler err if they encounter a syntax error. The boolean
|
||||
// scan_comments specifies whether newline characters and comments should be
|
||||
// recognized and returned by Scan as token.COMMENT. If scan_comments is false,
|
||||
// they are treated as white space and ignored.
|
||||
// scan_comments specifies whether comments should be recognized and returned
|
||||
// by Scan as token.COMMENT. If scan_comments is false, they are treated as
|
||||
// white space and ignored.
|
||||
//
|
||||
func (S *Scanner) Init(src []byte, err ErrorHandler, scan_comments bool) {
|
||||
S.src = src;
|
||||
|
|
@ -137,24 +121,6 @@ func (S *Scanner) expect(ch int) {
|
|||
}
|
||||
|
||||
|
||||
func (S *Scanner) skipWhitespace() {
|
||||
for {
|
||||
switch S.ch {
|
||||
case '\t', '\r', ' ':
|
||||
// nothing to do
|
||||
case '\n':
|
||||
if S.scan_comments {
|
||||
return;
|
||||
}
|
||||
default:
|
||||
return;
|
||||
}
|
||||
S.next();
|
||||
}
|
||||
panic("UNREACHABLE");
|
||||
}
|
||||
|
||||
|
||||
func (S *Scanner) scanComment(loc Location) {
|
||||
// first '/' already consumed
|
||||
|
||||
|
|
@ -163,9 +129,7 @@ func (S *Scanner) scanComment(loc Location) {
|
|||
for S.ch >= 0 {
|
||||
S.next();
|
||||
if S.ch == '\n' {
|
||||
// '\n' terminates comment but we do not include
|
||||
// it in the comment (otherwise we don't see the
|
||||
// start of a newline in skipWhitespace()).
|
||||
S.next(); // '\n' belongs to the comment
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
@ -412,14 +376,19 @@ func (S *Scanner) switch4(tok0, tok1, ch2, tok2, tok3 int) int {
|
|||
|
||||
// Scan scans the next token and returns the token location loc,
|
||||
// the token tok, and the literal text lit corresponding to the
|
||||
// token.
|
||||
// token. The source end is indicated by token.EOF.
|
||||
//
|
||||
func (S *Scanner) Scan() (loc Location, tok int, lit []byte) {
|
||||
scan_again:
|
||||
S.skipWhitespace();
|
||||
// skip white space
|
||||
for S.ch == ' ' || S.ch == '\t' || S.ch == '\n' || S.ch == '\r' {
|
||||
S.next();
|
||||
}
|
||||
|
||||
// current token start
|
||||
loc, tok = S.loc, token.ILLEGAL;
|
||||
|
||||
// determine token value
|
||||
switch ch := S.ch; {
|
||||
case isLetter(ch):
|
||||
tok = S.scanIdentifier();
|
||||
|
|
@ -429,7 +398,6 @@ scan_again:
|
|||
S.next(); // always make progress
|
||||
switch ch {
|
||||
case -1 : tok = token.EOF;
|
||||
case '\n': tok = token.COMMENT;
|
||||
case '"' : tok = token.STRING; S.scanString(loc);
|
||||
case '\'': tok = token.CHAR; S.scanChar();
|
||||
case '`' : tok = token.STRING; S.scanRawString(loc);
|
||||
|
|
@ -487,3 +455,17 @@ scan_again:
|
|||
|
||||
return loc, tok, S.src[loc.Pos : S.loc.Pos];
|
||||
}
|
||||
|
||||
|
||||
// Tokenize calls a function f with the token location, token value, and token
|
||||
// text for each token in the source src. The other parameters have the same
|
||||
// meaning as for the Init function. Tokenize keeps scanning until f returns
|
||||
// false (usually when the token value is token.EOF).
|
||||
//
|
||||
func Tokenize(src []byte, err ErrorHandler, scan_comments bool, f func (loc Location, tok int, lit []byte) bool) {
|
||||
var s Scanner;
|
||||
s.Init(src, err, scan_comments);
|
||||
for f(s.Scan()) {
|
||||
// action happens in f
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ func tokenclass(tok int) int {
|
|||
|
||||
|
||||
type elt struct {
|
||||
pos int;
|
||||
tok int;
|
||||
lit string;
|
||||
class int;
|
||||
|
|
@ -40,130 +39,120 @@ type elt struct {
|
|||
|
||||
var tokens = [...]elt{
|
||||
// Special tokens
|
||||
elt{ 0, token.COMMENT, "/* a comment */", special },
|
||||
elt{ 0, token.COMMENT, "\n", special },
|
||||
elt{ token.COMMENT, "/* a comment */", special },
|
||||
elt{ token.COMMENT, "// a comment \n", special },
|
||||
|
||||
// Identifiers and basic type literals
|
||||
elt{ 0, token.IDENT, "foobar", literal },
|
||||
elt{ 0, token.IDENT, "a۰۱۸", literal },
|
||||
elt{ 0, token.IDENT, "foo६४", literal },
|
||||
elt{ 0, token.IDENT, "bar9876", literal },
|
||||
elt{ 0, token.INT, "0", literal },
|
||||
elt{ 0, token.INT, "01234567", literal },
|
||||
elt{ 0, token.INT, "0xcafebabe", literal },
|
||||
elt{ 0, token.FLOAT, "0.", literal },
|
||||
elt{ 0, token.FLOAT, ".0", literal },
|
||||
elt{ 0, token.FLOAT, "3.14159265", literal },
|
||||
elt{ 0, token.FLOAT, "1e0", literal },
|
||||
elt{ 0, token.FLOAT, "1e+100", literal },
|
||||
elt{ 0, token.FLOAT, "1e-100", literal },
|
||||
elt{ 0, token.FLOAT, "2.71828e-1000", literal },
|
||||
elt{ 0, token.CHAR, "'a'", literal },
|
||||
elt{ 0, token.CHAR, "'\\000'", literal },
|
||||
elt{ 0, token.CHAR, "'\\xFF'", literal },
|
||||
elt{ 0, token.CHAR, "'\\uff16'", literal },
|
||||
elt{ 0, token.CHAR, "'\\U0000ff16'", literal },
|
||||
elt{ 0, token.STRING, "`foobar`", literal },
|
||||
elt{ token.IDENT, "foobar", literal },
|
||||
elt{ token.IDENT, "a۰۱۸", literal },
|
||||
elt{ token.IDENT, "foo६४", literal },
|
||||
elt{ token.IDENT, "bar9876", literal },
|
||||
elt{ token.INT, "0", literal },
|
||||
elt{ token.INT, "01234567", literal },
|
||||
elt{ token.INT, "0xcafebabe", literal },
|
||||
elt{ token.FLOAT, "0.", literal },
|
||||
elt{ token.FLOAT, ".0", literal },
|
||||
elt{ token.FLOAT, "3.14159265", literal },
|
||||
elt{ token.FLOAT, "1e0", literal },
|
||||
elt{ token.FLOAT, "1e+100", literal },
|
||||
elt{ token.FLOAT, "1e-100", literal },
|
||||
elt{ token.FLOAT, "2.71828e-1000", literal },
|
||||
elt{ token.CHAR, "'a'", literal },
|
||||
elt{ token.CHAR, "'\\000'", literal },
|
||||
elt{ token.CHAR, "'\\xFF'", literal },
|
||||
elt{ token.CHAR, "'\\uff16'", literal },
|
||||
elt{ token.CHAR, "'\\U0000ff16'", literal },
|
||||
elt{ token.STRING, "`foobar`", literal },
|
||||
|
||||
// Operators and delimitors
|
||||
elt{ 0, token.ADD, "+", operator },
|
||||
elt{ 0, token.SUB, "-", operator },
|
||||
elt{ 0, token.MUL, "*", operator },
|
||||
elt{ 0, token.QUO, "/", operator },
|
||||
elt{ 0, token.REM, "%", operator },
|
||||
elt{ token.ADD, "+", operator },
|
||||
elt{ token.SUB, "-", operator },
|
||||
elt{ token.MUL, "*", operator },
|
||||
elt{ token.QUO, "/", operator },
|
||||
elt{ token.REM, "%", operator },
|
||||
|
||||
elt{ 0, token.AND, "&", operator },
|
||||
elt{ 0, token.OR, "|", operator },
|
||||
elt{ 0, token.XOR, "^", operator },
|
||||
elt{ 0, token.SHL, "<<", operator },
|
||||
elt{ 0, token.SHR, ">>", operator },
|
||||
elt{ token.AND, "&", operator },
|
||||
elt{ token.OR, "|", operator },
|
||||
elt{ token.XOR, "^", operator },
|
||||
elt{ token.SHL, "<<", operator },
|
||||
elt{ token.SHR, ">>", operator },
|
||||
|
||||
elt{ 0, token.ADD_ASSIGN, "+=", operator },
|
||||
elt{ 0, token.SUB_ASSIGN, "-=", operator },
|
||||
elt{ 0, token.MUL_ASSIGN, "*=", operator },
|
||||
elt{ 0, token.QUO_ASSIGN, "/=", operator },
|
||||
elt{ 0, token.REM_ASSIGN, "%=", operator },
|
||||
elt{ token.ADD_ASSIGN, "+=", operator },
|
||||
elt{ token.SUB_ASSIGN, "-=", operator },
|
||||
elt{ token.MUL_ASSIGN, "*=", operator },
|
||||
elt{ token.QUO_ASSIGN, "/=", operator },
|
||||
elt{ token.REM_ASSIGN, "%=", operator },
|
||||
|
||||
elt{ 0, token.AND_ASSIGN, "&=", operator },
|
||||
elt{ 0, token.OR_ASSIGN, "|=", operator },
|
||||
elt{ 0, token.XOR_ASSIGN, "^=", operator },
|
||||
elt{ 0, token.SHL_ASSIGN, "<<=", operator },
|
||||
elt{ 0, token.SHR_ASSIGN, ">>=", operator },
|
||||
elt{ token.AND_ASSIGN, "&=", operator },
|
||||
elt{ token.OR_ASSIGN, "|=", operator },
|
||||
elt{ token.XOR_ASSIGN, "^=", operator },
|
||||
elt{ token.SHL_ASSIGN, "<<=", operator },
|
||||
elt{ token.SHR_ASSIGN, ">>=", operator },
|
||||
|
||||
elt{ 0, token.LAND, "&&", operator },
|
||||
elt{ 0, token.LOR, "||", operator },
|
||||
elt{ 0, token.ARROW, "<-", operator },
|
||||
elt{ 0, token.INC, "++", operator },
|
||||
elt{ 0, token.DEC, "--", operator },
|
||||
elt{ token.LAND, "&&", operator },
|
||||
elt{ token.LOR, "||", operator },
|
||||
elt{ token.ARROW, "<-", operator },
|
||||
elt{ token.INC, "++", operator },
|
||||
elt{ token.DEC, "--", operator },
|
||||
|
||||
elt{ 0, token.EQL, "==", operator },
|
||||
elt{ 0, token.LSS, "<", operator },
|
||||
elt{ 0, token.GTR, ">", operator },
|
||||
elt{ 0, token.ASSIGN, "=", operator },
|
||||
elt{ 0, token.NOT, "!", operator },
|
||||
elt{ token.EQL, "==", operator },
|
||||
elt{ token.LSS, "<", operator },
|
||||
elt{ token.GTR, ">", operator },
|
||||
elt{ token.ASSIGN, "=", operator },
|
||||
elt{ token.NOT, "!", operator },
|
||||
|
||||
elt{ 0, token.NEQ, "!=", operator },
|
||||
elt{ 0, token.LEQ, "<=", operator },
|
||||
elt{ 0, token.GEQ, ">=", operator },
|
||||
elt{ 0, token.DEFINE, ":=", operator },
|
||||
elt{ 0, token.ELLIPSIS, "...", operator },
|
||||
elt{ token.NEQ, "!=", operator },
|
||||
elt{ token.LEQ, "<=", operator },
|
||||
elt{ token.GEQ, ">=", operator },
|
||||
elt{ token.DEFINE, ":=", operator },
|
||||
elt{ token.ELLIPSIS, "...", operator },
|
||||
|
||||
elt{ 0, token.LPAREN, "(", operator },
|
||||
elt{ 0, token.LBRACK, "[", operator },
|
||||
elt{ 0, token.LBRACE, "{", operator },
|
||||
elt{ 0, token.COMMA, ",", operator },
|
||||
elt{ 0, token.PERIOD, ".", operator },
|
||||
elt{ token.LPAREN, "(", operator },
|
||||
elt{ token.LBRACK, "[", operator },
|
||||
elt{ token.LBRACE, "{", operator },
|
||||
elt{ token.COMMA, ",", operator },
|
||||
elt{ token.PERIOD, ".", operator },
|
||||
|
||||
elt{ 0, token.RPAREN, ")", operator },
|
||||
elt{ 0, token.RBRACK, "]", operator },
|
||||
elt{ 0, token.RBRACE, "}", operator },
|
||||
elt{ 0, token.SEMICOLON, ";", operator },
|
||||
elt{ 0, token.COLON, ":", operator },
|
||||
elt{ token.RPAREN, ")", operator },
|
||||
elt{ token.RBRACK, "]", operator },
|
||||
elt{ token.RBRACE, "}", operator },
|
||||
elt{ token.SEMICOLON, ";", operator },
|
||||
elt{ token.COLON, ":", operator },
|
||||
|
||||
// Keywords
|
||||
elt{ 0, token.BREAK, "break", keyword },
|
||||
elt{ 0, token.CASE, "case", keyword },
|
||||
elt{ 0, token.CHAN, "chan", keyword },
|
||||
elt{ 0, token.CONST, "const", keyword },
|
||||
elt{ 0, token.CONTINUE, "continue", keyword },
|
||||
elt{ token.BREAK, "break", keyword },
|
||||
elt{ token.CASE, "case", keyword },
|
||||
elt{ token.CHAN, "chan", keyword },
|
||||
elt{ token.CONST, "const", keyword },
|
||||
elt{ token.CONTINUE, "continue", keyword },
|
||||
|
||||
elt{ 0, token.DEFAULT, "default", keyword },
|
||||
elt{ 0, token.DEFER, "defer", keyword },
|
||||
elt{ 0, token.ELSE, "else", keyword },
|
||||
elt{ 0, token.FALLTHROUGH, "fallthrough", keyword },
|
||||
elt{ 0, token.FOR, "for", keyword },
|
||||
elt{ token.DEFAULT, "default", keyword },
|
||||
elt{ token.DEFER, "defer", keyword },
|
||||
elt{ token.ELSE, "else", keyword },
|
||||
elt{ token.FALLTHROUGH, "fallthrough", keyword },
|
||||
elt{ token.FOR, "for", keyword },
|
||||
|
||||
elt{ 0, token.FUNC, "func", keyword },
|
||||
elt{ 0, token.GO, "go", keyword },
|
||||
elt{ 0, token.GOTO, "goto", keyword },
|
||||
elt{ 0, token.IF, "if", keyword },
|
||||
elt{ 0, token.IMPORT, "import", keyword },
|
||||
elt{ token.FUNC, "func", keyword },
|
||||
elt{ token.GO, "go", keyword },
|
||||
elt{ token.GOTO, "goto", keyword },
|
||||
elt{ token.IF, "if", keyword },
|
||||
elt{ token.IMPORT, "import", keyword },
|
||||
|
||||
elt{ 0, token.INTERFACE, "interface", keyword },
|
||||
elt{ 0, token.MAP, "map", keyword },
|
||||
elt{ 0, token.PACKAGE, "package", keyword },
|
||||
elt{ 0, token.RANGE, "range", keyword },
|
||||
elt{ 0, token.RETURN, "return", keyword },
|
||||
elt{ token.INTERFACE, "interface", keyword },
|
||||
elt{ token.MAP, "map", keyword },
|
||||
elt{ token.PACKAGE, "package", keyword },
|
||||
elt{ token.RANGE, "range", keyword },
|
||||
elt{ token.RETURN, "return", keyword },
|
||||
|
||||
elt{ 0, token.SELECT, "select", keyword },
|
||||
elt{ 0, token.STRUCT, "struct", keyword },
|
||||
elt{ 0, token.SWITCH, "switch", keyword },
|
||||
elt{ 0, token.TYPE, "type", keyword },
|
||||
elt{ 0, token.VAR, "var", keyword },
|
||||
elt{ token.SELECT, "select", keyword },
|
||||
elt{ token.STRUCT, "struct", keyword },
|
||||
elt{ token.SWITCH, "switch", keyword },
|
||||
elt{ token.TYPE, "type", keyword },
|
||||
elt{ token.VAR, "var", keyword },
|
||||
}
|
||||
|
||||
|
||||
const whitespace = " \t "; // to separate tokens
|
||||
|
||||
func init() {
|
||||
// set pos fields
|
||||
pos := 0;
|
||||
for i := 0; i < len(tokens); i++ {
|
||||
tokens[i].pos = pos;
|
||||
pos += len(tokens[i].lit) + len(whitespace);
|
||||
}
|
||||
}
|
||||
|
||||
const whitespace = " \t \n\n\n"; // to separate tokens
|
||||
|
||||
type TestErrorHandler struct {
|
||||
t *testing.T
|
||||
|
|
@ -174,38 +163,61 @@ func (h *TestErrorHandler) Error(loc scanner.Location, msg string) {
|
|||
}
|
||||
|
||||
|
||||
func NewlineCount(s string) int {
|
||||
n := 0;
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == '\n' {
|
||||
n++;
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
func Test(t *testing.T) {
|
||||
// make source
|
||||
var src string;
|
||||
for i, e := range tokens {
|
||||
src += e.lit + whitespace;
|
||||
}
|
||||
|
||||
// set up scanner
|
||||
var s scanner.Scanner;
|
||||
s.Init(io.StringBytes(src), &TestErrorHandler{t}, true);
|
||||
whitespace_linecount := NewlineCount(whitespace);
|
||||
|
||||
// verify scan
|
||||
for i, e := range tokens {
|
||||
loc, tok, lit := s.Scan();
|
||||
if loc.Pos != e.pos {
|
||||
t.Errorf("bad position for %s: got %d, expected %d", e.lit, loc.Pos, e.pos);
|
||||
index := 0;
|
||||
eloc := scanner.Location{0, 1, 1};
|
||||
scanner.Tokenize(io.StringBytes(src), &TestErrorHandler{t}, true,
|
||||
func (loc Location, tok int, litb []byte) bool {
|
||||
e := elt{token.EOF, "", special};
|
||||
if index < len(tokens) {
|
||||
e = tokens[index];
|
||||
}
|
||||
lit := string(litb);
|
||||
if tok == token.EOF {
|
||||
lit = "<EOF>";
|
||||
eloc.Col = 0;
|
||||
}
|
||||
if loc.Pos != eloc.Pos {
|
||||
t.Errorf("bad position for %s: got %d, expected %d", lit, loc.Pos, eloc.Pos);
|
||||
}
|
||||
if loc.Line != eloc.Line {
|
||||
t.Errorf("bad line for %s: got %d, expected %d", lit, loc.Line, eloc.Line);
|
||||
}
|
||||
if loc.Col != eloc.Col {
|
||||
t.Errorf("bad column for %s: got %d, expected %d", lit, loc.Col, eloc.Col);
|
||||
}
|
||||
if tok != e.tok {
|
||||
t.Errorf("bad token for %s: got %s, expected %s", lit, token.TokenString(tok), token.TokenString(e.tok));
|
||||
}
|
||||
if token.IsLiteral(e.tok) && lit != e.lit {
|
||||
t.Errorf("bad literal for %s: got %s, expected %s", lit, lit, e.lit);
|
||||
}
|
||||
if tokenclass(tok) != e.class {
|
||||
t.Errorf("bad class for %s: got %d, expected %d", lit, tokenclass(tok), e.class);
|
||||
}
|
||||
eloc.Pos += len(lit) + len(whitespace);
|
||||
eloc.Line += NewlineCount(lit) + whitespace_linecount;
|
||||
index++;
|
||||
return tok != token.EOF;
|
||||
}
|
||||
if tok != e.tok {
|
||||
t.Errorf("bad token for %s: got %s, expected %s", e.lit, token.TokenString(tok), token.TokenString(e.tok));
|
||||
}
|
||||
if token.IsLiteral(e.tok) && string(lit) != e.lit {
|
||||
t.Errorf("bad literal for %s: got %s, expected %s", e.lit, string(lit), e.lit);
|
||||
}
|
||||
if tokenclass(tok) != e.class {
|
||||
t.Errorf("bad class for %s: got %d, expected %d", e.lit, tokenclass(tok), e.class);
|
||||
}
|
||||
}
|
||||
loc, tok, lit := s.Scan();
|
||||
if tok != token.EOF {
|
||||
t.Errorf("bad token at eof: got %s, expected EOF", token.TokenString(tok));
|
||||
}
|
||||
if tokenclass(tok) != special {
|
||||
t.Errorf("bad class at eof: got %d, expected %d", tokenclass(tok), special);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue