Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(298)

Delta Between Two Patch Sets: src/pkg/go/scanner/scanner_test.go

Issue 4291070: code review 4291070: go/scanner: return literal as string instead of []byte (Closed)
Left Patch Set: diff -r 6659c68c1d45 https://go.googlecode.com/hg/ Created 13 years ago
Right Patch Set: diff -r 4073ecdfc054 https://go.googlecode.com/hg/ Created 13 years ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
Left: Side by side diff | Download
Right: Side by side diff | Download
LEFTRIGHT
1 ÿ// Copyright 2009 The Go Authors. All rights reserved.ÿ 1 // Copyright 2009 The Go Authors. All rights reserved.
2 ÿ// Use of this source code is governed by a BSD-styleÿ 2 // Use of this source code is governed by a BSD-style
3 ÿ// license that can be found in the LICENSE file.ÿ 3 // license that can be found in the LICENSE file.
4 4
5 package scanner 5 package scanner
6 6
7 import ( 7 import (
8 "go/token" 8 "go/token"
9 "os" 9 "os"
10 "path/filepath" 10 "path/filepath"
11 "runtime" 11 "runtime"
12 "testing" 12 "testing"
13 ) 13 )
14 14
15 15
16 var fset = token.NewFileSet() 16 var fset = token.NewFileSet()
17 17
18 18
19 const ÿ/* class */ÿ ( 19 const /* class */ (
20 special = iota 20 special = iota
21 literal 21 literal
22 operator 22 operator
23 keyword 23 keyword
24 ) 24 )
25 25
26 26
27 func tokenclass(tok token.Token) int { 27 func tokenclass(tok token.Token) int {
28 switch { 28 switch {
29 case tok.IsLiteral(): 29 case tok.IsLiteral():
30 return literal 30 return literal
31 case tok.IsOperator(): 31 case tok.IsOperator():
32 return operator 32 return operator
33 case tok.IsKeyword(): 33 case tok.IsKeyword():
34 return keyword 34 return keyword
35 } 35 }
36 return special 36 return special
37 } 37 }
38 38
39 39
40 type elt struct { 40 type elt struct {
41 tok token.Token 41 tok token.Token
42 lit string 42 lit string
43 class int 43 class int
44 } 44 }
45 45
46 46
47 var tokens = [...]elt{ 47 var tokens = [...]elt{
48 » ÿ// Special tokensÿ 48 » // Special tokens
49 {token.COMMENT, "/* a comment */", special}, 49 {token.COMMENT, "/* a comment */", special},
50 {token.COMMENT, "// a comment \n", special}, 50 {token.COMMENT, "// a comment \n", special},
51 51
52 » ÿ// Identifiers and basic type literalsÿ 52 » // Identifiers and basic type literals
53 {token.IDENT, "foobar", literal}, 53 {token.IDENT, "foobar", literal},
54 {token.IDENT, "a۰۱۸", literal}, 54 {token.IDENT, "a۰۱۸", literal},
55 {token.IDENT, "foo६४", literal}, 55 {token.IDENT, "foo६४", literal},
56 {token.IDENT, "bar9876", literal}, 56 {token.IDENT, "bar9876", literal},
57 {token.INT, "0", literal}, 57 {token.INT, "0", literal},
58 {token.INT, "1", literal}, 58 {token.INT, "1", literal},
59 {token.INT, "123456789012345678890", literal}, 59 {token.INT, "123456789012345678890", literal},
60 {token.INT, "01234567", literal}, 60 {token.INT, "01234567", literal},
61 {token.INT, "0xcafebabe", literal}, 61 {token.INT, "0xcafebabe", literal},
62 {token.FLOAT, "0.", literal}, 62 {token.FLOAT, "0.", literal},
(...skipping 19 matching lines...) Expand all
82 {token.CHAR, "'\\xFF'", literal}, 82 {token.CHAR, "'\\xFF'", literal},
83 {token.CHAR, "'\\uff16'", literal}, 83 {token.CHAR, "'\\uff16'", literal},
84 {token.CHAR, "'\\U0000ff16'", literal}, 84 {token.CHAR, "'\\U0000ff16'", literal},
85 {token.STRING, "`foobar`", literal}, 85 {token.STRING, "`foobar`", literal},
86 {token.STRING, "`" + `foo 86 {token.STRING, "`" + `foo
87 bar` + 87 bar` +
88 "`", 88 "`",
89 literal, 89 literal,
90 }, 90 },
91 91
92 » ÿ// Operators and delimitorsÿ 92 » // Operators and delimitors
93 {token.ADD, "+", operator}, 93 {token.ADD, "+", operator},
94 {token.SUB, "-", operator}, 94 {token.SUB, "-", operator},
95 {token.MUL, "*", operator}, 95 {token.MUL, "*", operator},
96 {token.QUO, "/", operator}, 96 {token.QUO, "/", operator},
97 {token.REM, "%", operator}, 97 {token.REM, "%", operator},
98 98
99 {token.AND, "&", operator}, 99 {token.AND, "&", operator},
100 {token.OR, "|", operator}, 100 {token.OR, "|", operator},
101 {token.XOR, "^", operator}, 101 {token.XOR, "^", operator},
102 {token.SHL, "<<", operator}, 102 {token.SHL, "<<", operator},
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
139 {token.LBRACE, "{", operator}, 139 {token.LBRACE, "{", operator},
140 {token.COMMA, ",", operator}, 140 {token.COMMA, ",", operator},
141 {token.PERIOD, ".", operator}, 141 {token.PERIOD, ".", operator},
142 142
143 {token.RPAREN, ")", operator}, 143 {token.RPAREN, ")", operator},
144 {token.RBRACK, "]", operator}, 144 {token.RBRACK, "]", operator},
145 {token.RBRACE, "}", operator}, 145 {token.RBRACE, "}", operator},
146 {token.SEMICOLON, ";", operator}, 146 {token.SEMICOLON, ";", operator},
147 {token.COLON, ":", operator}, 147 {token.COLON, ":", operator},
148 148
149 » ÿ// Keywordsÿ 149 » // Keywords
150 {token.BREAK, "break", keyword}, 150 {token.BREAK, "break", keyword},
151 {token.CASE, "case", keyword}, 151 {token.CASE, "case", keyword},
152 {token.CHAN, "chan", keyword}, 152 {token.CHAN, "chan", keyword},
153 {token.CONST, "const", keyword}, 153 {token.CONST, "const", keyword},
154 {token.CONTINUE, "continue", keyword}, 154 {token.CONTINUE, "continue", keyword},
155 155
156 {token.DEFAULT, "default", keyword}, 156 {token.DEFAULT, "default", keyword},
157 {token.DEFER, "defer", keyword}, 157 {token.DEFER, "defer", keyword},
158 {token.ELSE, "else", keyword}, 158 {token.ELSE, "else", keyword},
159 {token.FALLTHROUGH, "fallthrough", keyword}, 159 {token.FALLTHROUGH, "fallthrough", keyword},
(...skipping 12 matching lines...) Expand all
172 {token.RETURN, "return", keyword}, 172 {token.RETURN, "return", keyword},
173 173
174 {token.SELECT, "select", keyword}, 174 {token.SELECT, "select", keyword},
175 {token.STRUCT, "struct", keyword}, 175 {token.STRUCT, "struct", keyword},
176 {token.SWITCH, "switch", keyword}, 176 {token.SWITCH, "switch", keyword},
177 {token.TYPE, "type", keyword}, 177 {token.TYPE, "type", keyword},
178 {token.VAR, "var", keyword}, 178 {token.VAR, "var", keyword},
179 } 179 }
180 180
181 181
182 const whitespace = " \t \n\n\n" ÿ// to separate tokensÿ 182 const whitespace = " \t \n\n\n" // to separate tokens
183 183
184 type testErrorHandler struct { 184 type testErrorHandler struct {
185 t *testing.T 185 t *testing.T
186 } 186 }
187 187
188 func (h *testErrorHandler) Error(pos token.Position, msg string) { 188 func (h *testErrorHandler) Error(pos token.Position, msg string) {
189 h.t.Errorf("Error() called (msg = %s)", msg) 189 h.t.Errorf("Error() called (msg = %s)", msg)
190 } 190 }
191 191
192 192
(...skipping 18 matching lines...) Expand all
211 } 211 }
212 if pos.Line != expected.Line { 212 if pos.Line != expected.Line {
213 t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line) 213 t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
214 } 214 }
215 if pos.Column != expected.Column { 215 if pos.Column != expected.Column {
216 t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Colu mn, expected.Column) 216 t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Colu mn, expected.Column)
217 } 217 }
218 } 218 }
219 219
220 220
221 ÿ// Verify that calling Scan() provides the correct results.ÿ 221 // Verify that calling Scan() provides the correct results.
222 func TestScan(t *testing.T) { 222 func TestScan(t *testing.T) {
223 » ÿ// make sourceÿ 223 » // make source
224 var src string 224 var src string
225 for _, e := range tokens { 225 for _, e := range tokens {
226 src += e.lit + whitespace 226 src += e.lit + whitespace
227 } 227 }
228 src_linecount := newlineCount(src) 228 src_linecount := newlineCount(src)
229 whitespace_linecount := newlineCount(whitespace) 229 whitespace_linecount := newlineCount(whitespace)
230 230
231 » ÿ// verify scanÿ 231 » // verify scan
232 var s Scanner 232 var s Scanner
233 s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &testErrorH andler{t}, ScanComments) 233 s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), &testErrorH andler{t}, ScanComments)
234 index := 0 234 index := 0
235 » epos := token.Position{"", 0, 1, 1} ÿ// expected positionÿ 235 » epos := token.Position{"", 0, 1, 1} // expected position
236 for { 236 for {
237 pos, tok, lit := s.Scan() 237 pos, tok, lit := s.Scan()
238 e := elt{token.EOF, "", special} 238 e := elt{token.EOF, "", special}
239 if index < len(tokens) { 239 if index < len(tokens) {
240 e = tokens[index] 240 e = tokens[index]
241 } 241 }
242 if tok == token.EOF { 242 if tok == token.EOF {
243 lit = "<EOF>" 243 lit = "<EOF>"
244 epos.Line = src_linecount 244 epos.Line = src_linecount
245 epos.Column = 2 245 epos.Column = 2
246 } 246 }
247 checkPos(t, lit, pos, epos) 247 checkPos(t, lit, pos, epos)
248 if tok != e.tok { 248 if tok != e.tok {
249 t.Errorf("bad token for %q: got %s, expected %s", lit, t ok.String(), e.tok.String()) 249 t.Errorf("bad token for %q: got %s, expected %s", lit, t ok.String(), e.tok.String())
250 } 250 }
251 if e.tok.IsLiteral() && lit != e.lit { 251 if e.tok.IsLiteral() && lit != e.lit {
252 t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit) 252 t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, e.lit)
253 } 253 }
254 if tokenclass(tok) != e.class { 254 if tokenclass(tok) != e.class {
255 t.Errorf("bad class for %q: got %d, expected %d", lit, t okenclass(tok), e.class) 255 t.Errorf("bad class for %q: got %d, expected %d", lit, t okenclass(tok), e.class)
256 } 256 }
257 epos.Offset += len(lit) + len(whitespace) 257 epos.Offset += len(lit) + len(whitespace)
258 epos.Line += newlineCount(lit) + whitespace_linecount 258 epos.Line += newlineCount(lit) + whitespace_linecount
259 if tok == token.COMMENT && lit[1] == '/' { 259 if tok == token.COMMENT && lit[1] == '/' {
260 » » » ÿ// correct for unaccounted '/n' in //-style commentÿ 260 » » » // correct for unaccounted '/n' in //-style comment
261 epos.Offset++ 261 epos.Offset++
262 epos.Line++ 262 epos.Line++
263 } 263 }
264 index++ 264 index++
265 if tok == token.EOF { 265 if tok == token.EOF {
266 break 266 break
267 } 267 }
268 } 268 }
269 if s.ErrorCount != 0 { 269 if s.ErrorCount != 0 {
270 t.Errorf("found %d errors", s.ErrorCount) 270 t.Errorf("found %d errors", s.ErrorCount)
271 } 271 }
272 } 272 }
273 273
274 274
275 func checkSemi(t *testing.T, line string, mode uint) { 275 func checkSemi(t *testing.T, line string, mode uint) {
276 var S Scanner 276 var S Scanner
277 file := fset.AddFile("TestSemis", fset.Base(), len(line)) 277 file := fset.AddFile("TestSemis", fset.Base(), len(line))
278 S.Init(file, []byte(line), nil, mode) 278 S.Init(file, []byte(line), nil, mode)
279 pos, tok, lit := S.Scan() 279 pos, tok, lit := S.Scan()
280 for tok != token.EOF { 280 for tok != token.EOF {
281 if tok == token.ILLEGAL { 281 if tok == token.ILLEGAL {
282 » » » ÿ// the illegal token literal indicates whatÿ 282 » » » // the illegal token literal indicates what
283 » » » ÿ// kind of semicolon literal to expectÿ 283 » » » // kind of semicolon literal to expect
284 semiLit := "\n" 284 semiLit := "\n"
285 if lit[0] == '#' { 285 if lit[0] == '#' {
286 semiLit = ";" 286 semiLit = ";"
287 } 287 }
288 » » » ÿ// next token must be a semicolonÿ 288 » » » // next token must be a semicolon
289 semiPos := file.Position(pos) 289 semiPos := file.Position(pos)
290 semiPos.Offset++ 290 semiPos.Offset++
291 semiPos.Column++ 291 semiPos.Column++
292 pos, tok, lit = S.Scan() 292 pos, tok, lit = S.Scan()
293 if tok == token.SEMICOLON { 293 if tok == token.SEMICOLON {
294 if lit != semiLit { 294 if lit != semiLit {
295 t.Errorf(`bad literal for %q: got %q, ex pected %q`, line, lit, semiLit) 295 t.Errorf(`bad literal for %q: got %q, ex pected %q`, line, lit, semiLit)
296 } 296 }
297 checkPos(t, line, pos, semiPos) 297 checkPos(t, line, pos, semiPos)
298 } else { 298 } else {
299 t.Errorf("bad token for %q: got %s, expected ;", line, tok.String()) 299 t.Errorf("bad token for %q: got %s, expected ;", line, tok.String())
300 } 300 }
301 } else if tok == token.SEMICOLON { 301 } else if tok == token.SEMICOLON {
302 t.Errorf("bad token for %q: got ;, expected no ;", line) 302 t.Errorf("bad token for %q: got ;, expected no ;", line)
303 } 303 }
304 pos, tok, lit = S.Scan() 304 pos, tok, lit = S.Scan()
305 } 305 }
306 } 306 }
307 307
308 308
309 var lines = []string{ 309 var lines = []string{
310 » ÿ// # indicates a semicolon present in the sourceÿ 310 » // # indicates a semicolon present in the source
311 » ÿ// $ indicates an automatically inserted semicolonÿ 311 » // $ indicates an automatically inserted semicolon
312 "", 312 "",
313 "#;", 313 "#;",
314 "foo$\n", 314 "foo$\n",
315 "123$\n", 315 "123$\n",
316 "1.2$\n", 316 "1.2$\n",
317 "'x'$\n", 317 "'x'$\n",
318 `"x"` + "$\n", 318 `"x"` + "$\n",
319 "`x`$\n", 319 "`x`$\n",
320 320
321 "+\n", 321 "+\n",
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
428 "package main$\n\nfunc main() {\n\tif {\n\t\treturn /* */ }$\n}$\n", 428 "package main$\n\nfunc main() {\n\tif {\n\t\treturn /* */ }$\n}$\n",
429 "package main$", 429 "package main$",
430 } 430 }
431 431
432 432
433 func TestSemis(t *testing.T) { 433 func TestSemis(t *testing.T) {
434 for _, line := range lines { 434 for _, line := range lines {
435 checkSemi(t, line, AllowIllegalChars|InsertSemis) 435 checkSemi(t, line, AllowIllegalChars|InsertSemis)
436 checkSemi(t, line, AllowIllegalChars|InsertSemis|ScanComments) 436 checkSemi(t, line, AllowIllegalChars|InsertSemis|ScanComments)
437 437
438 » » ÿ// if the input ended in newlines, the input must tokenize theÿ 438 » » // if the input ended in newlines, the input must tokenize the
439 » » ÿ// same with or without those newlinesÿ 439 » » // same with or without those newlines
440 for i := len(line) - 1; i >= 0 && line[i] == '\n'; i-- { 440 for i := len(line) - 1; i >= 0 && line[i] == '\n'; i-- {
441 checkSemi(t, line[0:i], AllowIllegalChars|InsertSemis) 441 checkSemi(t, line[0:i], AllowIllegalChars|InsertSemis)
442 checkSemi(t, line[0:i], AllowIllegalChars|InsertSemis|Sc anComments) 442 checkSemi(t, line[0:i], AllowIllegalChars|InsertSemis|Sc anComments)
443 } 443 }
444 } 444 }
445 } 445 }
446 446
447 type segment struct { 447 type segment struct {
448 » srcline string ÿ// a line of source textÿ 448 » srcline string // a line of source text
449 » filename string ÿ// filename for current tokenÿ 449 » filename string // filename for current token
450 » line int ÿ// line number for current tokenÿ 450 » line int // line number for current token
451 } 451 }
452 452
453 var segments = []segment{ 453 var segments = []segment{
454 » ÿ// exactly one token per line since the test consumes one token per seg mentÿ 454 » // exactly one token per line since the test consumes one token per segm ent
455 {" line1", filepath.Join("dir", "TestLineComments"), 1}, 455 {" line1", filepath.Join("dir", "TestLineComments"), 1},
456 {"\nline2", filepath.Join("dir", "TestLineComments"), 2}, 456 {"\nline2", filepath.Join("dir", "TestLineComments"), 2},
457 » {"\nline3 //line File1.go:100", filepath.Join("dir", "TestLineComments" ), 3}, ÿ// bad line comment, ignoredÿ 457 » {"\nline3 //line File1.go:100", filepath.Join("dir", "TestLineComments" ), 3}, // bad line comment, ignored
458 {"\nline4", filepath.Join("dir", "TestLineComments"), 4}, 458 {"\nline4", filepath.Join("dir", "TestLineComments"), 4},
459 {"\n//line File1.go:100\n line100", filepath.Join("dir", "File1.go"), 1 00}, 459 {"\n//line File1.go:100\n line100", filepath.Join("dir", "File1.go"), 1 00},
460 {"\n//line File2.go:200\n line200", filepath.Join("dir", "File2.go"), 2 00}, 460 {"\n//line File2.go:200\n line200", filepath.Join("dir", "File2.go"), 2 00},
461 {"\n//line :1\n line1", "dir", 1}, 461 {"\n//line :1\n line1", "dir", 1},
462 {"\n//line foo:42\n line42", filepath.Join("dir", "foo"), 42}, 462 {"\n//line foo:42\n line42", filepath.Join("dir", "foo"), 42},
463 » {"\n //line foo:42\n line44", filepath.Join("dir", "foo"), 44}, ÿ// bad line comment, ignoredÿ 463 » {"\n //line foo:42\n line44", filepath.Join("dir", "foo"), 44}, // bad line comment, ignored
464 » {"\n//line foo 42\n line46", filepath.Join("dir", "foo"), 46}, ÿ// bad line comment, ignoredÿ 464 » {"\n//line foo 42\n line46", filepath.Join("dir", "foo"), 46}, // bad line comment, ignored
465 » {"\n//line foo:42 extra text\n line48", filepath.Join("dir", "foo"), 48 }, ÿ// bad line comment, ignoredÿ 465 » {"\n//line foo:42 extra text\n line48", filepath.Join("dir", "foo"), 48 }, // bad line comment, ignored
466 {"\n//line /bar:42\n line42", string(filepath.Separator) + "bar", 42}, 466 {"\n//line /bar:42\n line42", string(filepath.Separator) + "bar", 42},
467 {"\n//line ./foo:42\n line42", filepath.Join("dir", "foo"), 42}, 467 {"\n//line ./foo:42\n line42", filepath.Join("dir", "foo"), 42},
468 {"\n//line a/b/c/File1.go:100\n line100", filepath.Join("dir", "a", "b" , "c", "File1.go"), 100}, 468 {"\n//line a/b/c/File1.go:100\n line100", filepath.Join("dir", "a", "b" , "c", "File1.go"), 100},
469 } 469 }
470 470
471 var winsegments = []segment{ 471 var winsegments = []segment{
472 {"\n//line c:\\dir\\File1.go:100\n line100", "c:\\dir\\File1.go", 100}, 472 {"\n//line c:\\dir\\File1.go:100\n line100", "c:\\dir\\File1.go", 100},
473 } 473 }
474 474
475 475
476 ÿ// Verify that comments of the form "//line filename:line" are interpreted corr ectly.ÿ 476 // Verify that comments of the form "//line filename:line" are interpreted corre ctly.
477 func TestLineComments(t *testing.T) { 477 func TestLineComments(t *testing.T) {
478 if runtime.GOOS == "windows" { 478 if runtime.GOOS == "windows" {
479 segments = append(segments, winsegments...) 479 segments = append(segments, winsegments...)
480 } 480 }
481 481
482 » ÿ// make sourceÿ 482 » // make source
483 var src string 483 var src string
484 for _, e := range segments { 484 for _, e := range segments {
485 src += e.srcline 485 src += e.srcline
486 } 486 }
487 487
488 » ÿ// verify scanÿ 488 » // verify scan
489 var S Scanner 489 var S Scanner
490 file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base (), len(src)) 490 file := fset.AddFile(filepath.Join("dir", "TestLineComments"), fset.Base (), len(src))
491 S.Init(file, []byte(src), nil, 0) 491 S.Init(file, []byte(src), nil, 0)
492 for _, s := range segments { 492 for _, s := range segments {
493 p, _, lit := S.Scan() 493 p, _, lit := S.Scan()
494 pos := file.Position(p) 494 pos := file.Position(p)
495 checkPos(t, lit, p, token.Position{s.filename, pos.Offset, s.lin e, pos.Column}) 495 checkPos(t, lit, p, token.Position{s.filename, pos.Offset, s.lin e, pos.Column})
496 } 496 }
497 497
498 if S.ErrorCount != 0 { 498 if S.ErrorCount != 0 {
499 t.Errorf("found %d errors", S.ErrorCount) 499 t.Errorf("found %d errors", S.ErrorCount)
500 } 500 }
501 } 501 }
502 502
503 503
504 ÿ// Verify that initializing the same scanner more then once works correctly.ÿ 504 // Verify that initializing the same scanner more then once works correctly.
505 func TestInit(t *testing.T) { 505 func TestInit(t *testing.T) {
506 var s Scanner 506 var s Scanner
507 507
508 » ÿ// 1st initÿ 508 » // 1st init
509 src1 := "if true { }" 509 src1 := "if true { }"
510 f1 := fset.AddFile("src1", fset.Base(), len(src1)) 510 f1 := fset.AddFile("src1", fset.Base(), len(src1))
511 s.Init(f1, []byte(src1), nil, 0) 511 s.Init(f1, []byte(src1), nil, 0)
512 if f1.Size() != len(src1) { 512 if f1.Size() != len(src1) {
513 t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(sr c1)) 513 t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(sr c1))
514 } 514 }
515 » s.Scan() ÿ// ifÿ 515 » s.Scan() // if
516 » s.Scan() ÿ// trueÿ 516 » s.Scan() // true
517 » _, tok, _ := s.Scan() ÿ// {ÿ 517 » _, tok, _ := s.Scan() // {
518 if tok != token.LBRACE { 518 if tok != token.LBRACE {
519 t.Errorf("bad token: got %s, expected %s", tok.String(), token.L BRACE) 519 t.Errorf("bad token: got %s, expected %s", tok.String(), token.L BRACE)
520 } 520 }
521 521
522 » ÿ// 2nd initÿ 522 » // 2nd init
523 src2 := "go true { ]" 523 src2 := "go true { ]"
524 f2 := fset.AddFile("src2", fset.Base(), len(src2)) 524 f2 := fset.AddFile("src2", fset.Base(), len(src2))
525 s.Init(f2, []byte(src2), nil, 0) 525 s.Init(f2, []byte(src2), nil, 0)
526 if f2.Size() != len(src2) { 526 if f2.Size() != len(src2) {
527 t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(sr c2)) 527 t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(sr c2))
528 } 528 }
529 » _, tok, _ = s.Scan() ÿ// goÿ 529 » _, tok, _ = s.Scan() // go
530 if tok != token.GO { 530 if tok != token.GO {
531 t.Errorf("bad token: got %s, expected %s", tok.String(), token.G O) 531 t.Errorf("bad token: got %s, expected %s", tok.String(), token.G O)
532 } 532 }
533 533
534 if s.ErrorCount != 0 { 534 if s.ErrorCount != 0 {
535 t.Errorf("found %d errors", s.ErrorCount) 535 t.Errorf("found %d errors", s.ErrorCount)
536 } 536 }
537 } 537 }
538 538
539 539
(...skipping 13 matching lines...) Expand all
553 } 553 }
554 } 554 }
555 555
556 if s.ErrorCount != 0 { 556 if s.ErrorCount != 0 {
557 t.Errorf("found %d errors", s.ErrorCount) 557 t.Errorf("found %d errors", s.ErrorCount)
558 } 558 }
559 } 559 }
560 560
561 561
562 func TestStdErrorHander(t *testing.T) { 562 func TestStdErrorHander(t *testing.T) {
563 » const src = "@\n" + ÿ// illegal character, cause an errorÿ 563 » const src = "@\n" + // illegal character, cause an error
564 » » "@ @\n" + ÿ// two errors on the same lineÿ 564 » » "@ @\n" + // two errors on the same line
565 "//line File2:20\n" + 565 "//line File2:20\n" +
566 » » "@\n" + ÿ// different file, but same lineÿ 566 » » "@\n" + // different file, but same line
567 "//line File2:1\n" + 567 "//line File2:1\n" +
568 » » "@ @\n" + ÿ// same file, decreasing line numberÿ 568 » » "@ @\n" + // same file, decreasing line number
569 "//line File1:1\n" + 569 "//line File1:1\n" +
570 » » "@ @ @" ÿ// original file, line 1 againÿ 570 » » "@ @ @" // original file, line 1 again
571 571
572 v := new(ErrorVector) 572 v := new(ErrorVector)
573 var s Scanner 573 var s Scanner
574 s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), v, 0) 574 s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), v, 0)
575 for { 575 for {
576 if _, tok, _ := s.Scan(); tok == token.EOF { 576 if _, tok, _ := s.Scan(); tok == token.EOF {
577 break 577 break
578 } 578 }
579 } 579 }
580 580
(...skipping 15 matching lines...) Expand all
596 PrintError(os.Stderr, list) 596 PrintError(os.Stderr, list)
597 } 597 }
598 598
599 if v.ErrorCount() != s.ErrorCount { 599 if v.ErrorCount() != s.ErrorCount {
600 t.Errorf("found %d errors, expected %d", v.ErrorCount(), s.Error Count) 600 t.Errorf("found %d errors, expected %d", v.ErrorCount(), s.Error Count)
601 } 601 }
602 } 602 }
603 603
604 604
605 type errorCollector struct { 605 type errorCollector struct {
606 » cnt int ÿ// number of errors encounteredÿ 606 » cnt int // number of errors encountered
607 » msg string ÿ// last error message encounteredÿ 607 » msg string // last error message encountered
608 » pos token.Position ÿ// last error position encounteredÿ 608 » pos token.Position // last error position encountered
609 } 609 }
610 610
611 611
612 func (h *errorCollector) Error(pos token.Position, msg string) { 612 func (h *errorCollector) Error(pos token.Position, msg string) {
613 h.cnt++ 613 h.cnt++
614 h.msg = msg 614 h.msg = msg
615 h.pos = pos 615 h.pos = pos
616 } 616 }
617 617
618 618
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
673 {"\"abc\x00def\"", token.STRING, 4, "illegal character NUL"}, 673 {"\"abc\x00def\"", token.STRING, 4, "illegal character NUL"},
674 {"\"abc\x80def\"", token.STRING, 4, "illegal UTF-8 encoding"}, 674 {"\"abc\x80def\"", token.STRING, 4, "illegal UTF-8 encoding"},
675 } 675 }
676 676
677 677
678 func TestScanErrors(t *testing.T) { 678 func TestScanErrors(t *testing.T) {
679 for _, e := range errors { 679 for _, e := range errors {
680 checkError(t, e.src, e.tok, e.pos, e.err) 680 checkError(t, e.src, e.tok, e.pos, e.err)
681 } 681 }
682 } 682 }
LEFTRIGHT

Powered by Google App Engine
RSS Feeds Recent Issues | This issue
This is Rietveld f62528b