1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package parser implements a parser for Go source files. Input may be
6// provided in a variety of forms (see the various Parse* functions); the
7// output is an abstract syntax tree (AST) representing the Go source. The
8// parser is invoked through one of the Parse* functions.
9//
10// The parser accepts a larger language than is syntactically permitted by
11// the Go spec, for simplicity, and for improved robustness in the presence
12// of syntax errors. For instance, in method declarations, the receiver is
13// treated like an ordinary parameter list and thus may contain multiple
14// entries where the spec permits exactly one. Consequently, the corresponding
15// field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
16package parser
17
18import (
19	"fmt"
20	"go/ast"
21	"go/build/constraint"
22	"go/internal/typeparams"
23	"go/scanner"
24	"go/token"
25	"strings"
26)
27
28// The parser structure holds the parser's internal state.
29type parser struct {
30	file    *token.File
31	errors  scanner.ErrorList
32	scanner scanner.Scanner
33
34	// Tracing/debugging
35	mode   Mode // parsing mode
36	trace  bool // == (mode&Trace != 0)
37	indent int  // indentation used for tracing output
38
39	// Comments
40	comments    []*ast.CommentGroup
41	leadComment *ast.CommentGroup // last lead comment
42	lineComment *ast.CommentGroup // last line comment
43	top         bool              // in top of file (before package clause)
44	goVersion   string            // minimum Go version found in //go:build comment
45
46	// Next token
47	pos token.Pos   // token position
48	tok token.Token // one token look-ahead
49	lit string      // token literal
50
51	// Error recovery
52	// (used to limit the number of calls to parser.advance
53	// w/o making scanning progress - avoids potential endless
54	// loops across multiple parser functions during error recovery)
55	syncPos token.Pos // last synchronization position
56	syncCnt int       // number of parser.advance calls without progress
57
58	// Non-syntactic parser control
59	exprLev int  // < 0: in control clause, >= 0: in expression
60	inRhs   bool // if set, the parser is parsing a rhs expression
61
62	imports []*ast.ImportSpec // list of imports
63
64	// nestLev is used to track and limit the recursion depth
65	// during parsing.
66	nestLev int
67}
68
69func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
70	p.file = fset.AddFile(filename, -1, len(src))
71	eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
72	p.scanner.Init(p.file, src, eh, scanner.ScanComments)
73
74	p.top = true
75	p.mode = mode
76	p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
77	p.next()
78}
79
80// ----------------------------------------------------------------------------
81// Parsing support
82
83func (p *parser) printTrace(a ...any) {
84	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
85	const n = len(dots)
86	pos := p.file.Position(p.pos)
87	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
88	i := 2 * p.indent
89	for i > n {
90		fmt.Print(dots)
91		i -= n
92	}
93	// i <= n
94	fmt.Print(dots[0:i])
95	fmt.Println(a...)
96}
97
98func trace(p *parser, msg string) *parser {
99	p.printTrace(msg, "(")
100	p.indent++
101	return p
102}
103
104// Usage pattern: defer un(trace(p, "..."))
105func un(p *parser) {
106	p.indent--
107	p.printTrace(")")
108}
109
110// maxNestLev is the deepest we're willing to recurse during parsing
111const maxNestLev int = 1e5
112
113func incNestLev(p *parser) *parser {
114	p.nestLev++
115	if p.nestLev > maxNestLev {
116		p.error(p.pos, "exceeded max nesting depth")
117		panic(bailout{})
118	}
119	return p
120}
121
122// decNestLev is used to track nesting depth during parsing to prevent stack exhaustion.
123// It is used along with incNestLev in a similar fashion to how un and trace are used.
124func decNestLev(p *parser) {
125	p.nestLev--
126}
127
128// Advance to the next token.
129func (p *parser) next0() {
130	// Because of one-token look-ahead, print the previous token
131	// when tracing as it provides a more readable output. The
132	// very first token (!p.pos.IsValid()) is not initialized
133	// (it is token.ILLEGAL), so don't print it.
134	if p.trace && p.pos.IsValid() {
135		s := p.tok.String()
136		switch {
137		case p.tok.IsLiteral():
138			p.printTrace(s, p.lit)
139		case p.tok.IsOperator(), p.tok.IsKeyword():
140			p.printTrace("\"" + s + "\"")
141		default:
142			p.printTrace(s)
143		}
144	}
145
146	for {
147		p.pos, p.tok, p.lit = p.scanner.Scan()
148		if p.tok == token.COMMENT {
149			if p.top && strings.HasPrefix(p.lit, "//go:build") {
150				if x, err := constraint.Parse(p.lit); err == nil {
151					p.goVersion = constraint.GoVersion(x)
152				}
153			}
154			if p.mode&ParseComments == 0 {
155				continue
156			}
157		} else {
158			// Found a non-comment; top of file is over.
159			p.top = false
160		}
161		break
162	}
163}
164
165// Consume a comment and return it and the line on which it ends.
166func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
167	// /*-style comments may end on a different line than where they start.
168	// Scan the comment for '\n' chars and adjust endline accordingly.
169	endline = p.file.Line(p.pos)
170	if p.lit[1] == '*' {
171		// don't use range here - no need to decode Unicode code points
172		for i := 0; i < len(p.lit); i++ {
173			if p.lit[i] == '\n' {
174				endline++
175			}
176		}
177	}
178
179	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
180	p.next0()
181
182	return
183}
184
185// Consume a group of adjacent comments, add it to the parser's
186// comments list, and return it together with the line at which
187// the last comment in the group ends. A non-comment token or n
188// empty lines terminate a comment group.
189func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
190	var list []*ast.Comment
191	endline = p.file.Line(p.pos)
192	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
193		var comment *ast.Comment
194		comment, endline = p.consumeComment()
195		list = append(list, comment)
196	}
197
198	// add comment group to the comments list
199	comments = &ast.CommentGroup{List: list}
200	p.comments = append(p.comments, comments)
201
202	return
203}
204
205// Advance to the next non-comment token. In the process, collect
206// any comment groups encountered, and remember the last lead and
207// line comments.
208//
209// A lead comment is a comment group that starts and ends in a
210// line without any other tokens and that is followed by a non-comment
211// token on the line immediately after the comment group.
212//
213// A line comment is a comment group that follows a non-comment
214// token on the same line, and that has no tokens after it on the line
215// where it ends.
216//
217// Lead and line comments may be considered documentation that is
218// stored in the AST.
219func (p *parser) next() {
220	p.leadComment = nil
221	p.lineComment = nil
222	prev := p.pos
223	p.next0()
224
225	if p.tok == token.COMMENT {
226		var comment *ast.CommentGroup
227		var endline int
228
229		if p.file.Line(p.pos) == p.file.Line(prev) {
230			// The comment is on same line as the previous token; it
231			// cannot be a lead comment but may be a line comment.
232			comment, endline = p.consumeCommentGroup(0)
233			if p.file.Line(p.pos) != endline || p.tok == token.SEMICOLON || p.tok == token.EOF {
234				// The next token is on a different line, thus
235				// the last comment group is a line comment.
236				p.lineComment = comment
237			}
238		}
239
240		// consume successor comments, if any
241		endline = -1
242		for p.tok == token.COMMENT {
243			comment, endline = p.consumeCommentGroup(1)
244		}
245
246		if endline+1 == p.file.Line(p.pos) {
247			// The next token is following on the line immediately after the
248			// comment group, thus the last comment group is a lead comment.
249			p.leadComment = comment
250		}
251	}
252}
253
254// A bailout panic is raised to indicate early termination. pos and msg are
255// only populated when bailing out of object resolution.
256type bailout struct {
257	pos token.Pos
258	msg string
259}
260
261func (p *parser) error(pos token.Pos, msg string) {
262	if p.trace {
263		defer un(trace(p, "error: "+msg))
264	}
265
266	epos := p.file.Position(pos)
267
268	// If AllErrors is not set, discard errors reported on the same line
269	// as the last recorded error and stop parsing if there are more than
270	// 10 errors.
271	if p.mode&AllErrors == 0 {
272		n := len(p.errors)
273		if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
274			return // discard - likely a spurious error
275		}
276		if n > 10 {
277			panic(bailout{})
278		}
279	}
280
281	p.errors.Add(epos, msg)
282}
283
284func (p *parser) errorExpected(pos token.Pos, msg string) {
285	msg = "expected " + msg
286	if pos == p.pos {
287		// the error happened at the current position;
288		// make the error message more specific
289		switch {
290		case p.tok == token.SEMICOLON && p.lit == "\n":
291			msg += ", found newline"
292		case p.tok.IsLiteral():
293			// print 123 rather than 'INT', etc.
294			msg += ", found " + p.lit
295		default:
296			msg += ", found '" + p.tok.String() + "'"
297		}
298	}
299	p.error(pos, msg)
300}
301
302func (p *parser) expect(tok token.Token) token.Pos {
303	pos := p.pos
304	if p.tok != tok {
305		p.errorExpected(pos, "'"+tok.String()+"'")
306	}
307	p.next() // make progress
308	return pos
309}
310
311// expect2 is like expect, but it returns an invalid position
312// if the expected token is not found.
313func (p *parser) expect2(tok token.Token) (pos token.Pos) {
314	if p.tok == tok {
315		pos = p.pos
316	} else {
317		p.errorExpected(p.pos, "'"+tok.String()+"'")
318	}
319	p.next() // make progress
320	return
321}
322
323// expectClosing is like expect but provides a better error message
324// for the common case of a missing comma before a newline.
325func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
326	if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
327		p.error(p.pos, "missing ',' before newline in "+context)
328		p.next()
329	}
330	return p.expect(tok)
331}
332
333// expectSemi consumes a semicolon and returns the applicable line comment.
334func (p *parser) expectSemi() (comment *ast.CommentGroup) {
335	// semicolon is optional before a closing ')' or '}'
336	if p.tok != token.RPAREN && p.tok != token.RBRACE {
337		switch p.tok {
338		case token.COMMA:
339			// permit a ',' instead of a ';' but complain
340			p.errorExpected(p.pos, "';'")
341			fallthrough
342		case token.SEMICOLON:
343			if p.lit == ";" {
344				// explicit semicolon
345				p.next()
346				comment = p.lineComment // use following comments
347			} else {
348				// artificial semicolon
349				comment = p.lineComment // use preceding comments
350				p.next()
351			}
352			return comment
353		default:
354			p.errorExpected(p.pos, "';'")
355			p.advance(stmtStart)
356		}
357	}
358	return nil
359}
360
361func (p *parser) atComma(context string, follow token.Token) bool {
362	if p.tok == token.COMMA {
363		return true
364	}
365	if p.tok != follow {
366		msg := "missing ','"
367		if p.tok == token.SEMICOLON && p.lit == "\n" {
368			msg += " before newline"
369		}
370		p.error(p.pos, msg+" in "+context)
371		return true // "insert" comma and continue
372	}
373	return false
374}
375
376func assert(cond bool, msg string) {
377	if !cond {
378		panic("go/parser internal error: " + msg)
379	}
380}
381
382// advance consumes tokens until the current token p.tok
383// is in the 'to' set, or token.EOF. For error recovery.
384func (p *parser) advance(to map[token.Token]bool) {
385	for ; p.tok != token.EOF; p.next() {
386		if to[p.tok] {
387			// Return only if parser made some progress since last
388			// sync or if it has not reached 10 advance calls without
389			// progress. Otherwise consume at least one token to
390			// avoid an endless parser loop (it is possible that
391			// both parseOperand and parseStmt call advance and
392			// correctly do not advance, thus the need for the
393			// invocation limit p.syncCnt).
394			if p.pos == p.syncPos && p.syncCnt < 10 {
395				p.syncCnt++
396				return
397			}
398			if p.pos > p.syncPos {
399				p.syncPos = p.pos
400				p.syncCnt = 0
401				return
402			}
403			// Reaching here indicates a parser bug, likely an
404			// incorrect token list in this function, but it only
405			// leads to skipping of possibly correct code if a
406			// previous error is present, and thus is preferred
407			// over a non-terminating parse.
408		}
409	}
410}
411
412var stmtStart = map[token.Token]bool{
413	token.BREAK:       true,
414	token.CONST:       true,
415	token.CONTINUE:    true,
416	token.DEFER:       true,
417	token.FALLTHROUGH: true,
418	token.FOR:         true,
419	token.GO:          true,
420	token.GOTO:        true,
421	token.IF:          true,
422	token.RETURN:      true,
423	token.SELECT:      true,
424	token.SWITCH:      true,
425	token.TYPE:        true,
426	token.VAR:         true,
427}
428
429var declStart = map[token.Token]bool{
430	token.IMPORT: true,
431	token.CONST:  true,
432	token.TYPE:   true,
433	token.VAR:    true,
434}
435
436var exprEnd = map[token.Token]bool{
437	token.COMMA:     true,
438	token.COLON:     true,
439	token.SEMICOLON: true,
440	token.RPAREN:    true,
441	token.RBRACK:    true,
442	token.RBRACE:    true,
443}
444
445// safePos returns a valid file position for a given position: If pos
446// is valid to begin with, safePos returns pos. If pos is out-of-range,
447// safePos returns the EOF position.
448//
449// This is hack to work around "artificial" end positions in the AST which
450// are computed by adding 1 to (presumably valid) token positions. If the
451// token positions are invalid due to parse errors, the resulting end position
452// may be past the file's EOF position, which would lead to panics if used
453// later on.
454func (p *parser) safePos(pos token.Pos) (res token.Pos) {
455	defer func() {
456		if recover() != nil {
457			res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
458		}
459	}()
460	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
461	return pos
462}
463
464// ----------------------------------------------------------------------------
465// Identifiers
466
467func (p *parser) parseIdent() *ast.Ident {
468	pos := p.pos
469	name := "_"
470	if p.tok == token.IDENT {
471		name = p.lit
472		p.next()
473	} else {
474		p.expect(token.IDENT) // use expect() error handling
475	}
476	return &ast.Ident{NamePos: pos, Name: name}
477}
478
479func (p *parser) parseIdentList() (list []*ast.Ident) {
480	if p.trace {
481		defer un(trace(p, "IdentList"))
482	}
483
484	list = append(list, p.parseIdent())
485	for p.tok == token.COMMA {
486		p.next()
487		list = append(list, p.parseIdent())
488	}
489
490	return
491}
492
493// ----------------------------------------------------------------------------
494// Common productions
495
496// If lhs is set, result list elements which are identifiers are not resolved.
497func (p *parser) parseExprList() (list []ast.Expr) {
498	if p.trace {
499		defer un(trace(p, "ExpressionList"))
500	}
501
502	list = append(list, p.parseExpr())
503	for p.tok == token.COMMA {
504		p.next()
505		list = append(list, p.parseExpr())
506	}
507
508	return
509}
510
511func (p *parser) parseList(inRhs bool) []ast.Expr {
512	old := p.inRhs
513	p.inRhs = inRhs
514	list := p.parseExprList()
515	p.inRhs = old
516	return list
517}
518
519// ----------------------------------------------------------------------------
520// Types
521
522func (p *parser) parseType() ast.Expr {
523	if p.trace {
524		defer un(trace(p, "Type"))
525	}
526
527	typ := p.tryIdentOrType()
528
529	if typ == nil {
530		pos := p.pos
531		p.errorExpected(pos, "type")
532		p.advance(exprEnd)
533		return &ast.BadExpr{From: pos, To: p.pos}
534	}
535
536	return typ
537}
538
539func (p *parser) parseQualifiedIdent(ident *ast.Ident) ast.Expr {
540	if p.trace {
541		defer un(trace(p, "QualifiedIdent"))
542	}
543
544	typ := p.parseTypeName(ident)
545	if p.tok == token.LBRACK {
546		typ = p.parseTypeInstance(typ)
547	}
548
549	return typ
550}
551
552// If the result is an identifier, it is not resolved.
553func (p *parser) parseTypeName(ident *ast.Ident) ast.Expr {
554	if p.trace {
555		defer un(trace(p, "TypeName"))
556	}
557
558	if ident == nil {
559		ident = p.parseIdent()
560	}
561
562	if p.tok == token.PERIOD {
563		// ident is a package name
564		p.next()
565		sel := p.parseIdent()
566		return &ast.SelectorExpr{X: ident, Sel: sel}
567	}
568
569	return ident
570}
571
572// "[" has already been consumed, and lbrack is its position.
573// If len != nil it is the already consumed array length.
574func (p *parser) parseArrayType(lbrack token.Pos, len ast.Expr) *ast.ArrayType {
575	if p.trace {
576		defer un(trace(p, "ArrayType"))
577	}
578
579	if len == nil {
580		p.exprLev++
581		// always permit ellipsis for more fault-tolerant parsing
582		if p.tok == token.ELLIPSIS {
583			len = &ast.Ellipsis{Ellipsis: p.pos}
584			p.next()
585		} else if p.tok != token.RBRACK {
586			len = p.parseRhs()
587		}
588		p.exprLev--
589	}
590	if p.tok == token.COMMA {
591		// Trailing commas are accepted in type parameter
592		// lists but not in array type declarations.
593		// Accept for better error handling but complain.
594		p.error(p.pos, "unexpected comma; expecting ]")
595		p.next()
596	}
597	p.expect(token.RBRACK)
598	elt := p.parseType()
599	return &ast.ArrayType{Lbrack: lbrack, Len: len, Elt: elt}
600}
601
602func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Expr) {
603	if p.trace {
604		defer un(trace(p, "ArrayFieldOrTypeInstance"))
605	}
606
607	lbrack := p.expect(token.LBRACK)
608	trailingComma := token.NoPos // if valid, the position of a trailing comma preceding the ']'
609	var args []ast.Expr
610	if p.tok != token.RBRACK {
611		p.exprLev++
612		args = append(args, p.parseRhs())
613		for p.tok == token.COMMA {
614			comma := p.pos
615			p.next()
616			if p.tok == token.RBRACK {
617				trailingComma = comma
618				break
619			}
620			args = append(args, p.parseRhs())
621		}
622		p.exprLev--
623	}
624	rbrack := p.expect(token.RBRACK)
625
626	if len(args) == 0 {
627		// x []E
628		elt := p.parseType()
629		return x, &ast.ArrayType{Lbrack: lbrack, Elt: elt}
630	}
631
632	// x [P]E or x[P]
633	if len(args) == 1 {
634		elt := p.tryIdentOrType()
635		if elt != nil {
636			// x [P]E
637			if trailingComma.IsValid() {
638				// Trailing commas are invalid in array type fields.
639				p.error(trailingComma, "unexpected comma; expecting ]")
640			}
641			return x, &ast.ArrayType{Lbrack: lbrack, Len: args[0], Elt: elt}
642		}
643	}
644
645	// x[P], x[P1, P2], ...
646	return nil, typeparams.PackIndexExpr(x, lbrack, args, rbrack)
647}
648
649func (p *parser) parseFieldDecl() *ast.Field {
650	if p.trace {
651		defer un(trace(p, "FieldDecl"))
652	}
653
654	doc := p.leadComment
655
656	var names []*ast.Ident
657	var typ ast.Expr
658	switch p.tok {
659	case token.IDENT:
660		name := p.parseIdent()
661		if p.tok == token.PERIOD || p.tok == token.STRING || p.tok == token.SEMICOLON || p.tok == token.RBRACE {
662			// embedded type
663			typ = name
664			if p.tok == token.PERIOD {
665				typ = p.parseQualifiedIdent(name)
666			}
667		} else {
668			// name1, name2, ... T
669			names = []*ast.Ident{name}
670			for p.tok == token.COMMA {
671				p.next()
672				names = append(names, p.parseIdent())
673			}
674			// Careful dance: We don't know if we have an embedded instantiated
675			// type T[P1, P2, ...] or a field T of array type []E or [P]E.
676			if len(names) == 1 && p.tok == token.LBRACK {
677				name, typ = p.parseArrayFieldOrTypeInstance(name)
678				if name == nil {
679					names = nil
680				}
681			} else {
682				// T P
683				typ = p.parseType()
684			}
685		}
686	case token.MUL:
687		star := p.pos
688		p.next()
689		if p.tok == token.LPAREN {
690			// *(T)
691			p.error(p.pos, "cannot parenthesize embedded type")
692			p.next()
693			typ = p.parseQualifiedIdent(nil)
694			// expect closing ')' but no need to complain if missing
695			if p.tok == token.RPAREN {
696				p.next()
697			}
698		} else {
699			// *T
700			typ = p.parseQualifiedIdent(nil)
701		}
702		typ = &ast.StarExpr{Star: star, X: typ}
703
704	case token.LPAREN:
705		p.error(p.pos, "cannot parenthesize embedded type")
706		p.next()
707		if p.tok == token.MUL {
708			// (*T)
709			star := p.pos
710			p.next()
711			typ = &ast.StarExpr{Star: star, X: p.parseQualifiedIdent(nil)}
712		} else {
713			// (T)
714			typ = p.parseQualifiedIdent(nil)
715		}
716		// expect closing ')' but no need to complain if missing
717		if p.tok == token.RPAREN {
718			p.next()
719		}
720
721	default:
722		pos := p.pos
723		p.errorExpected(pos, "field name or embedded type")
724		p.advance(exprEnd)
725		typ = &ast.BadExpr{From: pos, To: p.pos}
726	}
727
728	var tag *ast.BasicLit
729	if p.tok == token.STRING {
730		tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
731		p.next()
732	}
733
734	comment := p.expectSemi()
735
736	field := &ast.Field{Doc: doc, Names: names, Type: typ, Tag: tag, Comment: comment}
737	return field
738}
739
740func (p *parser) parseStructType() *ast.StructType {
741	if p.trace {
742		defer un(trace(p, "StructType"))
743	}
744
745	pos := p.expect(token.STRUCT)
746	lbrace := p.expect(token.LBRACE)
747	var list []*ast.Field
748	for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
749		// a field declaration cannot start with a '(' but we accept
750		// it here for more robust parsing and better error messages
751		// (parseFieldDecl will check and complain if necessary)
752		list = append(list, p.parseFieldDecl())
753	}
754	rbrace := p.expect(token.RBRACE)
755
756	return &ast.StructType{
757		Struct: pos,
758		Fields: &ast.FieldList{
759			Opening: lbrace,
760			List:    list,
761			Closing: rbrace,
762		},
763	}
764}
765
766func (p *parser) parsePointerType() *ast.StarExpr {
767	if p.trace {
768		defer un(trace(p, "PointerType"))
769	}
770
771	star := p.expect(token.MUL)
772	base := p.parseType()
773
774	return &ast.StarExpr{Star: star, X: base}
775}
776
777func (p *parser) parseDotsType() *ast.Ellipsis {
778	if p.trace {
779		defer un(trace(p, "DotsType"))
780	}
781
782	pos := p.expect(token.ELLIPSIS)
783	elt := p.parseType()
784
785	return &ast.Ellipsis{Ellipsis: pos, Elt: elt}
786}
787
788type field struct {
789	name *ast.Ident
790	typ  ast.Expr
791}
792
793func (p *parser) parseParamDecl(name *ast.Ident, typeSetsOK bool) (f field) {
794	// TODO(rFindley) refactor to be more similar to paramDeclOrNil in the syntax
795	// package
796	if p.trace {
797		defer un(trace(p, "ParamDeclOrNil"))
798	}
799
800	ptok := p.tok
801	if name != nil {
802		p.tok = token.IDENT // force token.IDENT case in switch below
803	} else if typeSetsOK && p.tok == token.TILDE {
804		// "~" ...
805		return field{nil, p.embeddedElem(nil)}
806	}
807
808	switch p.tok {
809	case token.IDENT:
810		// name
811		if name != nil {
812			f.name = name
813			p.tok = ptok
814		} else {
815			f.name = p.parseIdent()
816		}
817		switch p.tok {
818		case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
819			// name type
820			f.typ = p.parseType()
821
822		case token.LBRACK:
823			// name "[" type1, ..., typeN "]" or name "[" n "]" type
824			f.name, f.typ = p.parseArrayFieldOrTypeInstance(f.name)
825
826		case token.ELLIPSIS:
827			// name "..." type
828			f.typ = p.parseDotsType()
829			return // don't allow ...type "|" ...
830
831		case token.PERIOD:
832			// name "." ...
833			f.typ = p.parseQualifiedIdent(f.name)
834			f.name = nil
835
836		case token.TILDE:
837			if typeSetsOK {
838				f.typ = p.embeddedElem(nil)
839				return
840			}
841
842		case token.OR:
843			if typeSetsOK {
844				// name "|" typeset
845				f.typ = p.embeddedElem(f.name)
846				f.name = nil
847				return
848			}
849		}
850
851	case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
852		// type
853		f.typ = p.parseType()
854
855	case token.ELLIPSIS:
856		// "..." type
857		// (always accepted)
858		f.typ = p.parseDotsType()
859		return // don't allow ...type "|" ...
860
861	default:
862		// TODO(rfindley): this is incorrect in the case of type parameter lists
863		//                 (should be "']'" in that case)
864		p.errorExpected(p.pos, "')'")
865		p.advance(exprEnd)
866	}
867
868	// [name] type "|"
869	if typeSetsOK && p.tok == token.OR && f.typ != nil {
870		f.typ = p.embeddedElem(f.typ)
871	}
872
873	return
874}
875
876func (p *parser) parseParameterList(name0 *ast.Ident, typ0 ast.Expr, closing token.Token) (params []*ast.Field) {
877	if p.trace {
878		defer un(trace(p, "ParameterList"))
879	}
880
881	// Type parameters are the only parameter list closed by ']'.
882	tparams := closing == token.RBRACK
883
884	pos0 := p.pos
885	if name0 != nil {
886		pos0 = name0.Pos()
887	} else if typ0 != nil {
888		pos0 = typ0.Pos()
889	}
890
891	// Note: The code below matches the corresponding code in the syntax
892	//       parser closely. Changes must be reflected in either parser.
893	//       For the code to match, we use the local []field list that
894	//       corresponds to []syntax.Field. At the end, the list must be
895	//       converted into an []*ast.Field.
896
897	var list []field
898	var named int // number of parameters that have an explicit name and type
899	var typed int // number of parameters that have an explicit type
900
901	for name0 != nil || p.tok != closing && p.tok != token.EOF {
902		var par field
903		if typ0 != nil {
904			if tparams {
905				typ0 = p.embeddedElem(typ0)
906			}
907			par = field{name0, typ0}
908		} else {
909			par = p.parseParamDecl(name0, tparams)
910		}
911		name0 = nil // 1st name was consumed if present
912		typ0 = nil  // 1st typ was consumed if present
913		if par.name != nil || par.typ != nil {
914			list = append(list, par)
915			if par.name != nil && par.typ != nil {
916				named++
917			}
918			if par.typ != nil {
919				typed++
920			}
921		}
922		if !p.atComma("parameter list", closing) {
923			break
924		}
925		p.next()
926	}
927
928	if len(list) == 0 {
929		return // not uncommon
930	}
931
932	// distribute parameter types (len(list) > 0)
933	if named == 0 {
934		// all unnamed => found names are type names
935		for i := 0; i < len(list); i++ {
936			par := &list[i]
937			if typ := par.name; typ != nil {
938				par.typ = typ
939				par.name = nil
940			}
941		}
942		if tparams {
943			// This is the same error handling as below, adjusted for type parameters only.
944			// See comment below for details. (go.dev/issue/64534)
945			var errPos token.Pos
946			var msg string
947			if named == typed /* same as typed == 0 */ {
948				errPos = p.pos // position error at closing ]
949				msg = "missing type constraint"
950			} else {
951				errPos = pos0 // position at opening [ or first name
952				msg = "missing type parameter name"
953				if len(list) == 1 {
954					msg += " or invalid array length"
955				}
956			}
957			p.error(errPos, msg)
958		}
959	} else if named != len(list) {
960		// some named or we're in a type parameter list => all must be named
961		var errPos token.Pos // left-most error position (or invalid)
962		var typ ast.Expr     // current type (from right to left)
963		for i := len(list) - 1; i >= 0; i-- {
964			if par := &list[i]; par.typ != nil {
965				typ = par.typ
966				if par.name == nil {
967					errPos = typ.Pos()
968					n := ast.NewIdent("_")
969					n.NamePos = errPos // correct position
970					par.name = n
971				}
972			} else if typ != nil {
973				par.typ = typ
974			} else {
975				// par.typ == nil && typ == nil => we only have a par.name
976				errPos = par.name.Pos()
977				par.typ = &ast.BadExpr{From: errPos, To: p.pos}
978			}
979		}
980		if errPos.IsValid() {
981			var msg string
982			if tparams {
983				// Not all parameters are named because named != len(list).
984				// If named == typed we must have parameters that have no types,
985				// and they must be at the end of the parameter list, otherwise
986				// the types would have been filled in by the right-to-left sweep
987				// above and we wouldn't have an error. Since we are in a type
988				// parameter list, the missing types are constraints.
989				if named == typed {
990					errPos = p.pos // position error at closing ]
991					msg = "missing type constraint"
992				} else {
993					msg = "missing type parameter name"
994					// go.dev/issue/60812
995					if len(list) == 1 {
996						msg += " or invalid array length"
997					}
998				}
999			} else {
1000				msg = "mixed named and unnamed parameters"
1001			}
1002			p.error(errPos, msg)
1003		}
1004	}
1005
1006	// Convert list to []*ast.Field.
1007	// If list contains types only, each type gets its own ast.Field.
1008	if named == 0 {
1009		// parameter list consists of types only
1010		for _, par := range list {
1011			assert(par.typ != nil, "nil type in unnamed parameter list")
1012			params = append(params, &ast.Field{Type: par.typ})
1013		}
1014		return
1015	}
1016
1017	// If the parameter list consists of named parameters with types,
1018	// collect all names with the same types into a single ast.Field.
1019	var names []*ast.Ident
1020	var typ ast.Expr
1021	addParams := func() {
1022		assert(typ != nil, "nil type in named parameter list")
1023		field := &ast.Field{Names: names, Type: typ}
1024		params = append(params, field)
1025		names = nil
1026	}
1027	for _, par := range list {
1028		if par.typ != typ {
1029			if len(names) > 0 {
1030				addParams()
1031			}
1032			typ = par.typ
1033		}
1034		names = append(names, par.name)
1035	}
1036	if len(names) > 0 {
1037		addParams()
1038	}
1039	return
1040}
1041
1042func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.FieldList) {
1043	if p.trace {
1044		defer un(trace(p, "Parameters"))
1045	}
1046
1047	if acceptTParams && p.tok == token.LBRACK {
1048		opening := p.pos
1049		p.next()
1050		// [T any](params) syntax
1051		list := p.parseParameterList(nil, nil, token.RBRACK)
1052		rbrack := p.expect(token.RBRACK)
1053		tparams = &ast.FieldList{Opening: opening, List: list, Closing: rbrack}
1054		// Type parameter lists must not be empty.
1055		if tparams.NumFields() == 0 {
1056			p.error(tparams.Closing, "empty type parameter list")
1057			tparams = nil // avoid follow-on errors
1058		}
1059	}
1060
1061	opening := p.expect(token.LPAREN)
1062
1063	var fields []*ast.Field
1064	if p.tok != token.RPAREN {
1065		fields = p.parseParameterList(nil, nil, token.RPAREN)
1066	}
1067
1068	rparen := p.expect(token.RPAREN)
1069	params = &ast.FieldList{Opening: opening, List: fields, Closing: rparen}
1070
1071	return
1072}
1073
1074func (p *parser) parseResult() *ast.FieldList {
1075	if p.trace {
1076		defer un(trace(p, "Result"))
1077	}
1078
1079	if p.tok == token.LPAREN {
1080		_, results := p.parseParameters(false)
1081		return results
1082	}
1083
1084	typ := p.tryIdentOrType()
1085	if typ != nil {
1086		list := make([]*ast.Field, 1)
1087		list[0] = &ast.Field{Type: typ}
1088		return &ast.FieldList{List: list}
1089	}
1090
1091	return nil
1092}
1093
1094func (p *parser) parseFuncType() *ast.FuncType {
1095	if p.trace {
1096		defer un(trace(p, "FuncType"))
1097	}
1098
1099	pos := p.expect(token.FUNC)
1100	tparams, params := p.parseParameters(true)
1101	if tparams != nil {
1102		p.error(tparams.Pos(), "function type must have no type parameters")
1103	}
1104	results := p.parseResult()
1105
1106	return &ast.FuncType{Func: pos, Params: params, Results: results}
1107}
1108
1109func (p *parser) parseMethodSpec() *ast.Field {
1110	if p.trace {
1111		defer un(trace(p, "MethodSpec"))
1112	}
1113
1114	doc := p.leadComment
1115	var idents []*ast.Ident
1116	var typ ast.Expr
1117	x := p.parseTypeName(nil)
1118	if ident, _ := x.(*ast.Ident); ident != nil {
1119		switch {
1120		case p.tok == token.LBRACK:
1121			// generic method or embedded instantiated type
1122			lbrack := p.pos
1123			p.next()
1124			p.exprLev++
1125			x := p.parseExpr()
1126			p.exprLev--
1127			if name0, _ := x.(*ast.Ident); name0 != nil && p.tok != token.COMMA && p.tok != token.RBRACK {
1128				// generic method m[T any]
1129				//
1130				// Interface methods do not have type parameters. We parse them for a
1131				// better error message and improved error recovery.
1132				_ = p.parseParameterList(name0, nil, token.RBRACK)
1133				_ = p.expect(token.RBRACK)
1134				p.error(lbrack, "interface method must have no type parameters")
1135
1136				// TODO(rfindley) refactor to share code with parseFuncType.
1137				_, params := p.parseParameters(false)
1138				results := p.parseResult()
1139				idents = []*ast.Ident{ident}
1140				typ = &ast.FuncType{
1141					Func:    token.NoPos,
1142					Params:  params,
1143					Results: results,
1144				}
1145			} else {
1146				// embedded instantiated type
1147				// TODO(rfindley) should resolve all identifiers in x.
1148				list := []ast.Expr{x}
1149				if p.atComma("type argument list", token.RBRACK) {
1150					p.exprLev++
1151					p.next()
1152					for p.tok != token.RBRACK && p.tok != token.EOF {
1153						list = append(list, p.parseType())
1154						if !p.atComma("type argument list", token.RBRACK) {
1155							break
1156						}
1157						p.next()
1158					}
1159					p.exprLev--
1160				}
1161				rbrack := p.expectClosing(token.RBRACK, "type argument list")
1162				typ = typeparams.PackIndexExpr(ident, lbrack, list, rbrack)
1163			}
1164		case p.tok == token.LPAREN:
1165			// ordinary method
1166			// TODO(rfindley) refactor to share code with parseFuncType.
1167			_, params := p.parseParameters(false)
1168			results := p.parseResult()
1169			idents = []*ast.Ident{ident}
1170			typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
1171		default:
1172			// embedded type
1173			typ = x
1174		}
1175	} else {
1176		// embedded, possibly instantiated type
1177		typ = x
1178		if p.tok == token.LBRACK {
1179			// embedded instantiated interface
1180			typ = p.parseTypeInstance(typ)
1181		}
1182	}
1183
1184	// Comment is added at the callsite: the field below may joined with
1185	// additional type specs using '|'.
1186	// TODO(rfindley) this should be refactored.
1187	// TODO(rfindley) add more tests for comment handling.
1188	return &ast.Field{Doc: doc, Names: idents, Type: typ}
1189}
1190
1191func (p *parser) embeddedElem(x ast.Expr) ast.Expr {
1192	if p.trace {
1193		defer un(trace(p, "EmbeddedElem"))
1194	}
1195	if x == nil {
1196		x = p.embeddedTerm()
1197	}
1198	for p.tok == token.OR {
1199		t := new(ast.BinaryExpr)
1200		t.OpPos = p.pos
1201		t.Op = token.OR
1202		p.next()
1203		t.X = x
1204		t.Y = p.embeddedTerm()
1205		x = t
1206	}
1207	return x
1208}
1209
1210func (p *parser) embeddedTerm() ast.Expr {
1211	if p.trace {
1212		defer un(trace(p, "EmbeddedTerm"))
1213	}
1214	if p.tok == token.TILDE {
1215		t := new(ast.UnaryExpr)
1216		t.OpPos = p.pos
1217		t.Op = token.TILDE
1218		p.next()
1219		t.X = p.parseType()
1220		return t
1221	}
1222
1223	t := p.tryIdentOrType()
1224	if t == nil {
1225		pos := p.pos
1226		p.errorExpected(pos, "~ term or type")
1227		p.advance(exprEnd)
1228		return &ast.BadExpr{From: pos, To: p.pos}
1229	}
1230
1231	return t
1232}
1233
1234func (p *parser) parseInterfaceType() *ast.InterfaceType {
1235	if p.trace {
1236		defer un(trace(p, "InterfaceType"))
1237	}
1238
1239	pos := p.expect(token.INTERFACE)
1240	lbrace := p.expect(token.LBRACE)
1241
1242	var list []*ast.Field
1243
1244parseElements:
1245	for {
1246		switch {
1247		case p.tok == token.IDENT:
1248			f := p.parseMethodSpec()
1249			if f.Names == nil {
1250				f.Type = p.embeddedElem(f.Type)
1251			}
1252			f.Comment = p.expectSemi()
1253			list = append(list, f)
1254		case p.tok == token.TILDE:
1255			typ := p.embeddedElem(nil)
1256			comment := p.expectSemi()
1257			list = append(list, &ast.Field{Type: typ, Comment: comment})
1258		default:
1259			if t := p.tryIdentOrType(); t != nil {
1260				typ := p.embeddedElem(t)
1261				comment := p.expectSemi()
1262				list = append(list, &ast.Field{Type: typ, Comment: comment})
1263			} else {
1264				break parseElements
1265			}
1266		}
1267	}
1268
1269	// TODO(rfindley): the error produced here could be improved, since we could
1270	// accept an identifier, 'type', or a '}' at this point.
1271	rbrace := p.expect(token.RBRACE)
1272
1273	return &ast.InterfaceType{
1274		Interface: pos,
1275		Methods: &ast.FieldList{
1276			Opening: lbrace,
1277			List:    list,
1278			Closing: rbrace,
1279		},
1280	}
1281}
1282
1283func (p *parser) parseMapType() *ast.MapType {
1284	if p.trace {
1285		defer un(trace(p, "MapType"))
1286	}
1287
1288	pos := p.expect(token.MAP)
1289	p.expect(token.LBRACK)
1290	key := p.parseType()
1291	p.expect(token.RBRACK)
1292	value := p.parseType()
1293
1294	return &ast.MapType{Map: pos, Key: key, Value: value}
1295}
1296
1297func (p *parser) parseChanType() *ast.ChanType {
1298	if p.trace {
1299		defer un(trace(p, "ChanType"))
1300	}
1301
1302	pos := p.pos
1303	dir := ast.SEND | ast.RECV
1304	var arrow token.Pos
1305	if p.tok == token.CHAN {
1306		p.next()
1307		if p.tok == token.ARROW {
1308			arrow = p.pos
1309			p.next()
1310			dir = ast.SEND
1311		}
1312	} else {
1313		arrow = p.expect(token.ARROW)
1314		p.expect(token.CHAN)
1315		dir = ast.RECV
1316	}
1317	value := p.parseType()
1318
1319	return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
1320}
1321
1322func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
1323	if p.trace {
1324		defer un(trace(p, "TypeInstance"))
1325	}
1326
1327	opening := p.expect(token.LBRACK)
1328	p.exprLev++
1329	var list []ast.Expr
1330	for p.tok != token.RBRACK && p.tok != token.EOF {
1331		list = append(list, p.parseType())
1332		if !p.atComma("type argument list", token.RBRACK) {
1333			break
1334		}
1335		p.next()
1336	}
1337	p.exprLev--
1338
1339	closing := p.expectClosing(token.RBRACK, "type argument list")
1340
1341	if len(list) == 0 {
1342		p.errorExpected(closing, "type argument list")
1343		return &ast.IndexExpr{
1344			X:      typ,
1345			Lbrack: opening,
1346			Index:  &ast.BadExpr{From: opening + 1, To: closing},
1347			Rbrack: closing,
1348		}
1349	}
1350
1351	return typeparams.PackIndexExpr(typ, opening, list, closing)
1352}
1353
1354func (p *parser) tryIdentOrType() ast.Expr {
1355	defer decNestLev(incNestLev(p))
1356
1357	switch p.tok {
1358	case token.IDENT:
1359		typ := p.parseTypeName(nil)
1360		if p.tok == token.LBRACK {
1361			typ = p.parseTypeInstance(typ)
1362		}
1363		return typ
1364	case token.LBRACK:
1365		lbrack := p.expect(token.LBRACK)
1366		return p.parseArrayType(lbrack, nil)
1367	case token.STRUCT:
1368		return p.parseStructType()
1369	case token.MUL:
1370		return p.parsePointerType()
1371	case token.FUNC:
1372		return p.parseFuncType()
1373	case token.INTERFACE:
1374		return p.parseInterfaceType()
1375	case token.MAP:
1376		return p.parseMapType()
1377	case token.CHAN, token.ARROW:
1378		return p.parseChanType()
1379	case token.LPAREN:
1380		lparen := p.pos
1381		p.next()
1382		typ := p.parseType()
1383		rparen := p.expect(token.RPAREN)
1384		return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
1385	}
1386
1387	// no type found
1388	return nil
1389}
1390
1391// ----------------------------------------------------------------------------
1392// Blocks
1393
1394func (p *parser) parseStmtList() (list []ast.Stmt) {
1395	if p.trace {
1396		defer un(trace(p, "StatementList"))
1397	}
1398
1399	for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
1400		list = append(list, p.parseStmt())
1401	}
1402
1403	return
1404}
1405
1406func (p *parser) parseBody() *ast.BlockStmt {
1407	if p.trace {
1408		defer un(trace(p, "Body"))
1409	}
1410
1411	lbrace := p.expect(token.LBRACE)
1412	list := p.parseStmtList()
1413	rbrace := p.expect2(token.RBRACE)
1414
1415	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
1416}
1417
1418func (p *parser) parseBlockStmt() *ast.BlockStmt {
1419	if p.trace {
1420		defer un(trace(p, "BlockStmt"))
1421	}
1422
1423	lbrace := p.expect(token.LBRACE)
1424	list := p.parseStmtList()
1425	rbrace := p.expect2(token.RBRACE)
1426
1427	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
1428}
1429
1430// ----------------------------------------------------------------------------
1431// Expressions
1432
1433func (p *parser) parseFuncTypeOrLit() ast.Expr {
1434	if p.trace {
1435		defer un(trace(p, "FuncTypeOrLit"))
1436	}
1437
1438	typ := p.parseFuncType()
1439	if p.tok != token.LBRACE {
1440		// function type only
1441		return typ
1442	}
1443
1444	p.exprLev++
1445	body := p.parseBody()
1446	p.exprLev--
1447
1448	return &ast.FuncLit{Type: typ, Body: body}
1449}
1450
1451// parseOperand may return an expression or a raw type (incl. array
1452// types of the form [...]T). Callers must verify the result.
1453func (p *parser) parseOperand() ast.Expr {
1454	if p.trace {
1455		defer un(trace(p, "Operand"))
1456	}
1457
1458	switch p.tok {
1459	case token.IDENT:
1460		x := p.parseIdent()
1461		return x
1462
1463	case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
1464		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
1465		p.next()
1466		return x
1467
1468	case token.LPAREN:
1469		lparen := p.pos
1470		p.next()
1471		p.exprLev++
1472		x := p.parseRhs() // types may be parenthesized: (some type)
1473		p.exprLev--
1474		rparen := p.expect(token.RPAREN)
1475		return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
1476
1477	case token.FUNC:
1478		return p.parseFuncTypeOrLit()
1479	}
1480
1481	if typ := p.tryIdentOrType(); typ != nil { // do not consume trailing type parameters
1482		// could be type for composite literal or conversion
1483		_, isIdent := typ.(*ast.Ident)
1484		assert(!isIdent, "type cannot be identifier")
1485		return typ
1486	}
1487
1488	// we have an error
1489	pos := p.pos
1490	p.errorExpected(pos, "operand")
1491	p.advance(stmtStart)
1492	return &ast.BadExpr{From: pos, To: p.pos}
1493}
1494
1495func (p *parser) parseSelector(x ast.Expr) ast.Expr {
1496	if p.trace {
1497		defer un(trace(p, "Selector"))
1498	}
1499
1500	sel := p.parseIdent()
1501
1502	return &ast.SelectorExpr{X: x, Sel: sel}
1503}
1504
1505func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
1506	if p.trace {
1507		defer un(trace(p, "TypeAssertion"))
1508	}
1509
1510	lparen := p.expect(token.LPAREN)
1511	var typ ast.Expr
1512	if p.tok == token.TYPE {
1513		// type switch: typ == nil
1514		p.next()
1515	} else {
1516		typ = p.parseType()
1517	}
1518	rparen := p.expect(token.RPAREN)
1519
1520	return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
1521}
1522
1523func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr {
1524	if p.trace {
1525		defer un(trace(p, "parseIndexOrSliceOrInstance"))
1526	}
1527
1528	lbrack := p.expect(token.LBRACK)
1529	if p.tok == token.RBRACK {
1530		// empty index, slice or index expressions are not permitted;
1531		// accept them for parsing tolerance, but complain
1532		p.errorExpected(p.pos, "operand")
1533		rbrack := p.pos
1534		p.next()
1535		return &ast.IndexExpr{
1536			X:      x,
1537			Lbrack: lbrack,
1538			Index:  &ast.BadExpr{From: rbrack, To: rbrack},
1539			Rbrack: rbrack,
1540		}
1541	}
1542	p.exprLev++
1543
1544	const N = 3 // change the 3 to 2 to disable 3-index slices
1545	var args []ast.Expr
1546	var index [N]ast.Expr
1547	var colons [N - 1]token.Pos
1548	if p.tok != token.COLON {
1549		// We can't know if we have an index expression or a type instantiation;
1550		// so even if we see a (named) type we are not going to be in type context.
1551		index[0] = p.parseRhs()
1552	}
1553	ncolons := 0
1554	switch p.tok {
1555	case token.COLON:
1556		// slice expression
1557		for p.tok == token.COLON && ncolons < len(colons) {
1558			colons[ncolons] = p.pos
1559			ncolons++
1560			p.next()
1561			if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
1562				index[ncolons] = p.parseRhs()
1563			}
1564		}
1565	case token.COMMA:
1566		// instance expression
1567		args = append(args, index[0])
1568		for p.tok == token.COMMA {
1569			p.next()
1570			if p.tok != token.RBRACK && p.tok != token.EOF {
1571				args = append(args, p.parseType())
1572			}
1573		}
1574	}
1575
1576	p.exprLev--
1577	rbrack := p.expect(token.RBRACK)
1578
1579	if ncolons > 0 {
1580		// slice expression
1581		slice3 := false
1582		if ncolons == 2 {
1583			slice3 = true
1584			// Check presence of middle and final index here rather than during type-checking
1585			// to prevent erroneous programs from passing through gofmt (was go.dev/issue/7305).
1586			if index[1] == nil {
1587				p.error(colons[0], "middle index required in 3-index slice")
1588				index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
1589			}
1590			if index[2] == nil {
1591				p.error(colons[1], "final index required in 3-index slice")
1592				index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
1593			}
1594		}
1595		return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
1596	}
1597
1598	if len(args) == 0 {
1599		// index expression
1600		return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
1601	}
1602
1603	// instance expression
1604	return typeparams.PackIndexExpr(x, lbrack, args, rbrack)
1605}
1606
1607func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
1608	if p.trace {
1609		defer un(trace(p, "CallOrConversion"))
1610	}
1611
1612	lparen := p.expect(token.LPAREN)
1613	p.exprLev++
1614	var list []ast.Expr
1615	var ellipsis token.Pos
1616	for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
1617		list = append(list, p.parseRhs()) // builtins may expect a type: make(some type, ...)
1618		if p.tok == token.ELLIPSIS {
1619			ellipsis = p.pos
1620			p.next()
1621		}
1622		if !p.atComma("argument list", token.RPAREN) {
1623			break
1624		}
1625		p.next()
1626	}
1627	p.exprLev--
1628	rparen := p.expectClosing(token.RPAREN, "argument list")
1629
1630	return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
1631}
1632
1633func (p *parser) parseValue() ast.Expr {
1634	if p.trace {
1635		defer un(trace(p, "Element"))
1636	}
1637
1638	if p.tok == token.LBRACE {
1639		return p.parseLiteralValue(nil)
1640	}
1641
1642	x := p.parseExpr()
1643
1644	return x
1645}
1646
1647func (p *parser) parseElement() ast.Expr {
1648	if p.trace {
1649		defer un(trace(p, "Element"))
1650	}
1651
1652	x := p.parseValue()
1653	if p.tok == token.COLON {
1654		colon := p.pos
1655		p.next()
1656		x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue()}
1657	}
1658
1659	return x
1660}
1661
1662func (p *parser) parseElementList() (list []ast.Expr) {
1663	if p.trace {
1664		defer un(trace(p, "ElementList"))
1665	}
1666
1667	for p.tok != token.RBRACE && p.tok != token.EOF {
1668		list = append(list, p.parseElement())
1669		if !p.atComma("composite literal", token.RBRACE) {
1670			break
1671		}
1672		p.next()
1673	}
1674
1675	return
1676}
1677
1678func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
1679	defer decNestLev(incNestLev(p))
1680
1681	if p.trace {
1682		defer un(trace(p, "LiteralValue"))
1683	}
1684
1685	lbrace := p.expect(token.LBRACE)
1686	var elts []ast.Expr
1687	p.exprLev++
1688	if p.tok != token.RBRACE {
1689		elts = p.parseElementList()
1690	}
1691	p.exprLev--
1692	rbrace := p.expectClosing(token.RBRACE, "composite literal")
1693	return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
1694}
1695
1696func (p *parser) parsePrimaryExpr(x ast.Expr) ast.Expr {
1697	if p.trace {
1698		defer un(trace(p, "PrimaryExpr"))
1699	}
1700
1701	if x == nil {
1702		x = p.parseOperand()
1703	}
1704	// We track the nesting here rather than at the entry for the function,
1705	// since it can iteratively produce a nested output, and we want to
1706	// limit how deep a structure we generate.
1707	var n int
1708	defer func() { p.nestLev -= n }()
1709	for n = 1; ; n++ {
1710		incNestLev(p)
1711		switch p.tok {
1712		case token.PERIOD:
1713			p.next()
1714			switch p.tok {
1715			case token.IDENT:
1716				x = p.parseSelector(x)
1717			case token.LPAREN:
1718				x = p.parseTypeAssertion(x)
1719			default:
1720				pos := p.pos
1721				p.errorExpected(pos, "selector or type assertion")
1722				// TODO(rFindley) The check for token.RBRACE below is a targeted fix
1723				//                to error recovery sufficient to make the x/tools tests to
1724				//                pass with the new parsing logic introduced for type
1725				//                parameters. Remove this once error recovery has been
1726				//                more generally reconsidered.
1727				if p.tok != token.RBRACE {
1728					p.next() // make progress
1729				}
1730				sel := &ast.Ident{NamePos: pos, Name: "_"}
1731				x = &ast.SelectorExpr{X: x, Sel: sel}
1732			}
1733		case token.LBRACK:
1734			x = p.parseIndexOrSliceOrInstance(x)
1735		case token.LPAREN:
1736			x = p.parseCallOrConversion(x)
1737		case token.LBRACE:
1738			// operand may have returned a parenthesized complit
1739			// type; accept it but complain if we have a complit
1740			t := ast.Unparen(x)
1741			// determine if '{' belongs to a composite literal or a block statement
1742			switch t.(type) {
1743			case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr:
1744				if p.exprLev < 0 {
1745					return x
1746				}
1747				// x is possibly a composite literal type
1748			case *ast.IndexExpr, *ast.IndexListExpr:
1749				if p.exprLev < 0 {
1750					return x
1751				}
1752				// x is possibly a composite literal type
1753			case *ast.ArrayType, *ast.StructType, *ast.MapType:
1754				// x is a composite literal type
1755			default:
1756				return x
1757			}
1758			if t != x {
1759				p.error(t.Pos(), "cannot parenthesize type in composite literal")
1760				// already progressed, no need to advance
1761			}
1762			x = p.parseLiteralValue(x)
1763		default:
1764			return x
1765		}
1766	}
1767}
1768
1769func (p *parser) parseUnaryExpr() ast.Expr {
1770	defer decNestLev(incNestLev(p))
1771
1772	if p.trace {
1773		defer un(trace(p, "UnaryExpr"))
1774	}
1775
1776	switch p.tok {
1777	case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.TILDE:
1778		pos, op := p.pos, p.tok
1779		p.next()
1780		x := p.parseUnaryExpr()
1781		return &ast.UnaryExpr{OpPos: pos, Op: op, X: x}
1782
1783	case token.ARROW:
1784		// channel type or receive expression
1785		arrow := p.pos
1786		p.next()
1787
1788		// If the next token is token.CHAN we still don't know if it
1789		// is a channel type or a receive operation - we only know
1790		// once we have found the end of the unary expression. There
1791		// are two cases:
1792		//
1793		//   <- type  => (<-type) must be channel type
1794		//   <- expr  => <-(expr) is a receive from an expression
1795		//
1796		// In the first case, the arrow must be re-associated with
1797		// the channel type parsed already:
1798		//
1799		//   <- (chan type)    =>  (<-chan type)
1800		//   <- (chan<- type)  =>  (<-chan (<-type))
1801
1802		x := p.parseUnaryExpr()
1803
1804		// determine which case we have
1805		if typ, ok := x.(*ast.ChanType); ok {
1806			// (<-type)
1807
1808			// re-associate position info and <-
1809			dir := ast.SEND
1810			for ok && dir == ast.SEND {
1811				if typ.Dir == ast.RECV {
1812					// error: (<-type) is (<-(<-chan T))
1813					p.errorExpected(typ.Arrow, "'chan'")
1814				}
1815				arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
1816				dir, typ.Dir = typ.Dir, ast.RECV
1817				typ, ok = typ.Value.(*ast.ChanType)
1818			}
1819			if dir == ast.SEND {
1820				p.errorExpected(arrow, "channel type")
1821			}
1822
1823			return x
1824		}
1825
1826		// <-(expr)
1827		return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: x}
1828
1829	case token.MUL:
1830		// pointer type or unary "*" expression
1831		pos := p.pos
1832		p.next()
1833		x := p.parseUnaryExpr()
1834		return &ast.StarExpr{Star: pos, X: x}
1835	}
1836
1837	return p.parsePrimaryExpr(nil)
1838}
1839
1840func (p *parser) tokPrec() (token.Token, int) {
1841	tok := p.tok
1842	if p.inRhs && tok == token.ASSIGN {
1843		tok = token.EQL
1844	}
1845	return tok, tok.Precedence()
1846}
1847
1848// parseBinaryExpr parses a (possibly) binary expression.
1849// If x is non-nil, it is used as the left operand.
1850//
1851// TODO(rfindley): parseBinaryExpr has become overloaded. Consider refactoring.
1852func (p *parser) parseBinaryExpr(x ast.Expr, prec1 int) ast.Expr {
1853	if p.trace {
1854		defer un(trace(p, "BinaryExpr"))
1855	}
1856
1857	if x == nil {
1858		x = p.parseUnaryExpr()
1859	}
1860	// We track the nesting here rather than at the entry for the function,
1861	// since it can iteratively produce a nested output, and we want to
1862	// limit how deep a structure we generate.
1863	var n int
1864	defer func() { p.nestLev -= n }()
1865	for n = 1; ; n++ {
1866		incNestLev(p)
1867		op, oprec := p.tokPrec()
1868		if oprec < prec1 {
1869			return x
1870		}
1871		pos := p.expect(op)
1872		y := p.parseBinaryExpr(nil, oprec+1)
1873		x = &ast.BinaryExpr{X: x, OpPos: pos, Op: op, Y: y}
1874	}
1875}
1876
1877// The result may be a type or even a raw type ([...]int).
1878func (p *parser) parseExpr() ast.Expr {
1879	if p.trace {
1880		defer un(trace(p, "Expression"))
1881	}
1882
1883	return p.parseBinaryExpr(nil, token.LowestPrec+1)
1884}
1885
1886func (p *parser) parseRhs() ast.Expr {
1887	old := p.inRhs
1888	p.inRhs = true
1889	x := p.parseExpr()
1890	p.inRhs = old
1891	return x
1892}
1893
1894// ----------------------------------------------------------------------------
1895// Statements
1896
1897// Parsing modes for parseSimpleStmt.
1898const (
1899	basic = iota
1900	labelOk
1901	rangeOk
1902)
1903
1904// parseSimpleStmt returns true as 2nd result if it parsed the assignment
1905// of a range clause (with mode == rangeOk). The returned statement is an
1906// assignment with a right-hand side that is a single unary expression of
1907// the form "range x". No guarantees are given for the left-hand side.
1908func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
1909	if p.trace {
1910		defer un(trace(p, "SimpleStmt"))
1911	}
1912
1913	x := p.parseList(false)
1914
1915	switch p.tok {
1916	case
1917		token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
1918		token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
1919		token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
1920		token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
1921		// assignment statement, possibly part of a range clause
1922		pos, tok := p.pos, p.tok
1923		p.next()
1924		var y []ast.Expr
1925		isRange := false
1926		if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
1927			pos := p.pos
1928			p.next()
1929			y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
1930			isRange = true
1931		} else {
1932			y = p.parseList(true)
1933		}
1934		return &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}, isRange
1935	}
1936
1937	if len(x) > 1 {
1938		p.errorExpected(x[0].Pos(), "1 expression")
1939		// continue with first expression
1940	}
1941
1942	switch p.tok {
1943	case token.COLON:
1944		// labeled statement
1945		colon := p.pos
1946		p.next()
1947		if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
1948			// Go spec: The scope of a label is the body of the function
1949			// in which it is declared and excludes the body of any nested
1950			// function.
1951			stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
1952			return stmt, false
1953		}
1954		// The label declaration typically starts at x[0].Pos(), but the label
1955		// declaration may be erroneous due to a token after that position (and
1956		// before the ':'). If SpuriousErrors is not set, the (only) error
1957		// reported for the line is the illegal label error instead of the token
1958		// before the ':' that caused the problem. Thus, use the (latest) colon
1959		// position for error reporting.
1960		p.error(colon, "illegal label declaration")
1961		return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
1962
1963	case token.ARROW:
1964		// send statement
1965		arrow := p.pos
1966		p.next()
1967		y := p.parseRhs()
1968		return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
1969
1970	case token.INC, token.DEC:
1971		// increment or decrement
1972		s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
1973		p.next()
1974		return s, false
1975	}
1976
1977	// expression
1978	return &ast.ExprStmt{X: x[0]}, false
1979}
1980
1981func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
1982	x := p.parseRhs() // could be a conversion: (some type)(x)
1983	if t := ast.Unparen(x); t != x {
1984		p.error(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", callType))
1985		x = t
1986	}
1987	if call, isCall := x.(*ast.CallExpr); isCall {
1988		return call
1989	}
1990	if _, isBad := x.(*ast.BadExpr); !isBad {
1991		// only report error if it's a new one
1992		p.error(p.safePos(x.End()), fmt.Sprintf("expression in %s must be function call", callType))
1993	}
1994	return nil
1995}
1996
1997func (p *parser) parseGoStmt() ast.Stmt {
1998	if p.trace {
1999		defer un(trace(p, "GoStmt"))
2000	}
2001
2002	pos := p.expect(token.GO)
2003	call := p.parseCallExpr("go")
2004	p.expectSemi()
2005	if call == nil {
2006		return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
2007	}
2008
2009	return &ast.GoStmt{Go: pos, Call: call}
2010}
2011
2012func (p *parser) parseDeferStmt() ast.Stmt {
2013	if p.trace {
2014		defer un(trace(p, "DeferStmt"))
2015	}
2016
2017	pos := p.expect(token.DEFER)
2018	call := p.parseCallExpr("defer")
2019	p.expectSemi()
2020	if call == nil {
2021		return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
2022	}
2023
2024	return &ast.DeferStmt{Defer: pos, Call: call}
2025}
2026
2027func (p *parser) parseReturnStmt() *ast.ReturnStmt {
2028	if p.trace {
2029		defer un(trace(p, "ReturnStmt"))
2030	}
2031
2032	pos := p.pos
2033	p.expect(token.RETURN)
2034	var x []ast.Expr
2035	if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
2036		x = p.parseList(true)
2037	}
2038	p.expectSemi()
2039
2040	return &ast.ReturnStmt{Return: pos, Results: x}
2041}
2042
2043func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
2044	if p.trace {
2045		defer un(trace(p, "BranchStmt"))
2046	}
2047
2048	pos := p.expect(tok)
2049	var label *ast.Ident
2050	if tok != token.FALLTHROUGH && p.tok == token.IDENT {
2051		label = p.parseIdent()
2052	}
2053	p.expectSemi()
2054
2055	return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
2056}
2057
2058func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
2059	if s == nil {
2060		return nil
2061	}
2062	if es, isExpr := s.(*ast.ExprStmt); isExpr {
2063		return es.X
2064	}
2065	found := "simple statement"
2066	if _, isAss := s.(*ast.AssignStmt); isAss {
2067		found = "assignment"
2068	}
2069	p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
2070	return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
2071}
2072
2073// parseIfHeader is an adjusted version of parser.header
2074// in cmd/compile/internal/syntax/parser.go, which has
2075// been tuned for better error handling.
2076func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
2077	if p.tok == token.LBRACE {
2078		p.error(p.pos, "missing condition in if statement")
2079		cond = &ast.BadExpr{From: p.pos, To: p.pos}
2080		return
2081	}
2082	// p.tok != token.LBRACE
2083
2084	prevLev := p.exprLev
2085	p.exprLev = -1
2086
2087	if p.tok != token.SEMICOLON {
2088		// accept potential variable declaration but complain
2089		if p.tok == token.VAR {
2090			p.next()
2091			p.error(p.pos, "var declaration not allowed in if initializer")
2092		}
2093		init, _ = p.parseSimpleStmt(basic)
2094	}
2095
2096	var condStmt ast.Stmt
2097	var semi struct {
2098		pos token.Pos
2099		lit string // ";" or "\n"; valid if pos.IsValid()
2100	}
2101	if p.tok != token.LBRACE {
2102		if p.tok == token.SEMICOLON {
2103			semi.pos = p.pos
2104			semi.lit = p.lit
2105			p.next()
2106		} else {
2107			p.expect(token.SEMICOLON)
2108		}
2109		if p.tok != token.LBRACE {
2110			condStmt, _ = p.parseSimpleStmt(basic)
2111		}
2112	} else {
2113		condStmt = init
2114		init = nil
2115	}
2116
2117	if condStmt != nil {
2118		cond = p.makeExpr(condStmt, "boolean expression")
2119	} else if semi.pos.IsValid() {
2120		if semi.lit == "\n" {
2121			p.error(semi.pos, "unexpected newline, expecting { after if clause")
2122		} else {
2123			p.error(semi.pos, "missing condition in if statement")
2124		}
2125	}
2126
2127	// make sure we have a valid AST
2128	if cond == nil {
2129		cond = &ast.BadExpr{From: p.pos, To: p.pos}
2130	}
2131
2132	p.exprLev = prevLev
2133	return
2134}
2135
2136func (p *parser) parseIfStmt() *ast.IfStmt {
2137	defer decNestLev(incNestLev(p))
2138
2139	if p.trace {
2140		defer un(trace(p, "IfStmt"))
2141	}
2142
2143	pos := p.expect(token.IF)
2144
2145	init, cond := p.parseIfHeader()
2146	body := p.parseBlockStmt()
2147
2148	var else_ ast.Stmt
2149	if p.tok == token.ELSE {
2150		p.next()
2151		switch p.tok {
2152		case token.IF:
2153			else_ = p.parseIfStmt()
2154		case token.LBRACE:
2155			else_ = p.parseBlockStmt()
2156			p.expectSemi()
2157		default:
2158			p.errorExpected(p.pos, "if statement or block")
2159			else_ = &ast.BadStmt{From: p.pos, To: p.pos}
2160		}
2161	} else {
2162		p.expectSemi()
2163	}
2164
2165	return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_}
2166}
2167
2168func (p *parser) parseCaseClause() *ast.CaseClause {
2169	if p.trace {
2170		defer un(trace(p, "CaseClause"))
2171	}
2172
2173	pos := p.pos
2174	var list []ast.Expr
2175	if p.tok == token.CASE {
2176		p.next()
2177		list = p.parseList(true)
2178	} else {
2179		p.expect(token.DEFAULT)
2180	}
2181
2182	colon := p.expect(token.COLON)
2183	body := p.parseStmtList()
2184
2185	return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
2186}
2187
2188func isTypeSwitchAssert(x ast.Expr) bool {
2189	a, ok := x.(*ast.TypeAssertExpr)
2190	return ok && a.Type == nil
2191}
2192
2193func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool {
2194	switch t := s.(type) {
2195	case *ast.ExprStmt:
2196		// x.(type)
2197		return isTypeSwitchAssert(t.X)
2198	case *ast.AssignStmt:
2199		// v := x.(type)
2200		if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) {
2201			switch t.Tok {
2202			case token.ASSIGN:
2203				// permit v = x.(type) but complain
2204				p.error(t.TokPos, "expected ':=', found '='")
2205				fallthrough
2206			case token.DEFINE:
2207				return true
2208			}
2209		}
2210	}
2211	return false
2212}
2213
2214func (p *parser) parseSwitchStmt() ast.Stmt {
2215	if p.trace {
2216		defer un(trace(p, "SwitchStmt"))
2217	}
2218
2219	pos := p.expect(token.SWITCH)
2220
2221	var s1, s2 ast.Stmt
2222	if p.tok != token.LBRACE {
2223		prevLev := p.exprLev
2224		p.exprLev = -1
2225		if p.tok != token.SEMICOLON {
2226			s2, _ = p.parseSimpleStmt(basic)
2227		}
2228		if p.tok == token.SEMICOLON {
2229			p.next()
2230			s1 = s2
2231			s2 = nil
2232			if p.tok != token.LBRACE {
2233				// A TypeSwitchGuard may declare a variable in addition
2234				// to the variable declared in the initial SimpleStmt.
2235				// Introduce extra scope to avoid redeclaration errors:
2236				//
2237				//	switch t := 0; t := x.(T) { ... }
2238				//
2239				// (this code is not valid Go because the first t
2240				// cannot be accessed and thus is never used, the extra
2241				// scope is needed for the correct error message).
2242				//
2243				// If we don't have a type switch, s2 must be an expression.
2244				// Having the extra nested but empty scope won't affect it.
2245				s2, _ = p.parseSimpleStmt(basic)
2246			}
2247		}
2248		p.exprLev = prevLev
2249	}
2250
2251	typeSwitch := p.isTypeSwitchGuard(s2)
2252	lbrace := p.expect(token.LBRACE)
2253	var list []ast.Stmt
2254	for p.tok == token.CASE || p.tok == token.DEFAULT {
2255		list = append(list, p.parseCaseClause())
2256	}
2257	rbrace := p.expect(token.RBRACE)
2258	p.expectSemi()
2259	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
2260
2261	if typeSwitch {
2262		return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
2263	}
2264
2265	return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
2266}
2267
2268func (p *parser) parseCommClause() *ast.CommClause {
2269	if p.trace {
2270		defer un(trace(p, "CommClause"))
2271	}
2272
2273	pos := p.pos
2274	var comm ast.Stmt
2275	if p.tok == token.CASE {
2276		p.next()
2277		lhs := p.parseList(false)
2278		if p.tok == token.ARROW {
2279			// SendStmt
2280			if len(lhs) > 1 {
2281				p.errorExpected(lhs[0].Pos(), "1 expression")
2282				// continue with first expression
2283			}
2284			arrow := p.pos
2285			p.next()
2286			rhs := p.parseRhs()
2287			comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
2288		} else {
2289			// RecvStmt
2290			if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
2291				// RecvStmt with assignment
2292				if len(lhs) > 2 {
2293					p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
2294					// continue with first two expressions
2295					lhs = lhs[0:2]
2296				}
2297				pos := p.pos
2298				p.next()
2299				rhs := p.parseRhs()
2300				comm = &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
2301			} else {
2302				// lhs must be single receive operation
2303				if len(lhs) > 1 {
2304					p.errorExpected(lhs[0].Pos(), "1 expression")
2305					// continue with first expression
2306				}
2307				comm = &ast.ExprStmt{X: lhs[0]}
2308			}
2309		}
2310	} else {
2311		p.expect(token.DEFAULT)
2312	}
2313
2314	colon := p.expect(token.COLON)
2315	body := p.parseStmtList()
2316
2317	return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
2318}
2319
2320func (p *parser) parseSelectStmt() *ast.SelectStmt {
2321	if p.trace {
2322		defer un(trace(p, "SelectStmt"))
2323	}
2324
2325	pos := p.expect(token.SELECT)
2326	lbrace := p.expect(token.LBRACE)
2327	var list []ast.Stmt
2328	for p.tok == token.CASE || p.tok == token.DEFAULT {
2329		list = append(list, p.parseCommClause())
2330	}
2331	rbrace := p.expect(token.RBRACE)
2332	p.expectSemi()
2333	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
2334
2335	return &ast.SelectStmt{Select: pos, Body: body}
2336}
2337
2338func (p *parser) parseForStmt() ast.Stmt {
2339	if p.trace {
2340		defer un(trace(p, "ForStmt"))
2341	}
2342
2343	pos := p.expect(token.FOR)
2344
2345	var s1, s2, s3 ast.Stmt
2346	var isRange bool
2347	if p.tok != token.LBRACE {
2348		prevLev := p.exprLev
2349		p.exprLev = -1
2350		if p.tok != token.SEMICOLON {
2351			if p.tok == token.RANGE {
2352				// "for range x" (nil lhs in assignment)
2353				pos := p.pos
2354				p.next()
2355				y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
2356				s2 = &ast.AssignStmt{Rhs: y}
2357				isRange = true
2358			} else {
2359				s2, isRange = p.parseSimpleStmt(rangeOk)
2360			}
2361		}
2362		if !isRange && p.tok == token.SEMICOLON {
2363			p.next()
2364			s1 = s2
2365			s2 = nil
2366			if p.tok != token.SEMICOLON {
2367				s2, _ = p.parseSimpleStmt(basic)
2368			}
2369			p.expectSemi()
2370			if p.tok != token.LBRACE {
2371				s3, _ = p.parseSimpleStmt(basic)
2372			}
2373		}
2374		p.exprLev = prevLev
2375	}
2376
2377	body := p.parseBlockStmt()
2378	p.expectSemi()
2379
2380	if isRange {
2381		as := s2.(*ast.AssignStmt)
2382		// check lhs
2383		var key, value ast.Expr
2384		switch len(as.Lhs) {
2385		case 0:
2386			// nothing to do
2387		case 1:
2388			key = as.Lhs[0]
2389		case 2:
2390			key, value = as.Lhs[0], as.Lhs[1]
2391		default:
2392			p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
2393			return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
2394		}
2395		// parseSimpleStmt returned a right-hand side that
2396		// is a single unary expression of the form "range x"
2397		x := as.Rhs[0].(*ast.UnaryExpr).X
2398		return &ast.RangeStmt{
2399			For:    pos,
2400			Key:    key,
2401			Value:  value,
2402			TokPos: as.TokPos,
2403			Tok:    as.Tok,
2404			Range:  as.Rhs[0].Pos(),
2405			X:      x,
2406			Body:   body,
2407		}
2408	}
2409
2410	// regular for statement
2411	return &ast.ForStmt{
2412		For:  pos,
2413		Init: s1,
2414		Cond: p.makeExpr(s2, "boolean or range expression"),
2415		Post: s3,
2416		Body: body,
2417	}
2418}
2419
2420func (p *parser) parseStmt() (s ast.Stmt) {
2421	defer decNestLev(incNestLev(p))
2422
2423	if p.trace {
2424		defer un(trace(p, "Statement"))
2425	}
2426
2427	switch p.tok {
2428	case token.CONST, token.TYPE, token.VAR:
2429		s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)}
2430	case
2431		// tokens that may start an expression
2432		token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
2433		token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types
2434		token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
2435		s, _ = p.parseSimpleStmt(labelOk)
2436		// because of the required look-ahead, labeled statements are
2437		// parsed by parseSimpleStmt - don't expect a semicolon after
2438		// them
2439		if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
2440			p.expectSemi()
2441		}
2442	case token.GO:
2443		s = p.parseGoStmt()
2444	case token.DEFER:
2445		s = p.parseDeferStmt()
2446	case token.RETURN:
2447		s = p.parseReturnStmt()
2448	case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
2449		s = p.parseBranchStmt(p.tok)
2450	case token.LBRACE:
2451		s = p.parseBlockStmt()
2452		p.expectSemi()
2453	case token.IF:
2454		s = p.parseIfStmt()
2455	case token.SWITCH:
2456		s = p.parseSwitchStmt()
2457	case token.SELECT:
2458		s = p.parseSelectStmt()
2459	case token.FOR:
2460		s = p.parseForStmt()
2461	case token.SEMICOLON:
2462		// Is it ever possible to have an implicit semicolon
2463		// producing an empty statement in a valid program?
2464		// (handle correctly anyway)
2465		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"}
2466		p.next()
2467	case token.RBRACE:
2468		// a semicolon may be omitted before a closing "}"
2469		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true}
2470	default:
2471		// no statement found
2472		pos := p.pos
2473		p.errorExpected(pos, "statement")
2474		p.advance(stmtStart)
2475		s = &ast.BadStmt{From: pos, To: p.pos}
2476	}
2477
2478	return
2479}
2480
2481// ----------------------------------------------------------------------------
2482// Declarations
2483
2484type parseSpecFunction func(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec
2485
2486func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
2487	if p.trace {
2488		defer un(trace(p, "ImportSpec"))
2489	}
2490
2491	var ident *ast.Ident
2492	switch p.tok {
2493	case token.IDENT:
2494		ident = p.parseIdent()
2495	case token.PERIOD:
2496		ident = &ast.Ident{NamePos: p.pos, Name: "."}
2497		p.next()
2498	}
2499
2500	pos := p.pos
2501	var path string
2502	if p.tok == token.STRING {
2503		path = p.lit
2504		p.next()
2505	} else if p.tok.IsLiteral() {
2506		p.error(pos, "import path must be a string")
2507		p.next()
2508	} else {
2509		p.error(pos, "missing import path")
2510		p.advance(exprEnd)
2511	}
2512	comment := p.expectSemi()
2513
2514	// collect imports
2515	spec := &ast.ImportSpec{
2516		Doc:     doc,
2517		Name:    ident,
2518		Path:    &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
2519		Comment: comment,
2520	}
2521	p.imports = append(p.imports, spec)
2522
2523	return spec
2524}
2525
2526func (p *parser) parseValueSpec(doc *ast.CommentGroup, keyword token.Token, iota int) ast.Spec {
2527	if p.trace {
2528		defer un(trace(p, keyword.String()+"Spec"))
2529	}
2530
2531	idents := p.parseIdentList()
2532	var typ ast.Expr
2533	var values []ast.Expr
2534	switch keyword {
2535	case token.CONST:
2536		// always permit optional type and initialization for more tolerant parsing
2537		if p.tok != token.EOF && p.tok != token.SEMICOLON && p.tok != token.RPAREN {
2538			typ = p.tryIdentOrType()
2539			if p.tok == token.ASSIGN {
2540				p.next()
2541				values = p.parseList(true)
2542			}
2543		}
2544	case token.VAR:
2545		if p.tok != token.ASSIGN {
2546			typ = p.parseType()
2547		}
2548		if p.tok == token.ASSIGN {
2549			p.next()
2550			values = p.parseList(true)
2551		}
2552	default:
2553		panic("unreachable")
2554	}
2555	comment := p.expectSemi()
2556
2557	spec := &ast.ValueSpec{
2558		Doc:     doc,
2559		Names:   idents,
2560		Type:    typ,
2561		Values:  values,
2562		Comment: comment,
2563	}
2564	return spec
2565}
2566
2567func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, typ0 ast.Expr) {
2568	if p.trace {
2569		defer un(trace(p, "parseGenericType"))
2570	}
2571
2572	list := p.parseParameterList(name0, typ0, token.RBRACK)
2573	closePos := p.expect(token.RBRACK)
2574	spec.TypeParams = &ast.FieldList{Opening: openPos, List: list, Closing: closePos}
2575	// Let the type checker decide whether to accept type parameters on aliases:
2576	// see go.dev/issue/46477.
2577	if p.tok == token.ASSIGN {
2578		// type alias
2579		spec.Assign = p.pos
2580		p.next()
2581	}
2582	spec.Type = p.parseType()
2583}
2584
2585func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Token, _ int) ast.Spec {
2586	if p.trace {
2587		defer un(trace(p, "TypeSpec"))
2588	}
2589
2590	name := p.parseIdent()
2591	spec := &ast.TypeSpec{Doc: doc, Name: name}
2592
2593	if p.tok == token.LBRACK {
2594		// spec.Name "[" ...
2595		// array/slice type or type parameter list
2596		lbrack := p.pos
2597		p.next()
2598		if p.tok == token.IDENT {
2599			// We may have an array type or a type parameter list.
2600			// In either case we expect an expression x (which may
2601			// just be a name, or a more complex expression) which
2602			// we can analyze further.
2603			//
2604			// A type parameter list may have a type bound starting
2605			// with a "[" as in: P []E. In that case, simply parsing
2606			// an expression would lead to an error: P[] is invalid.
2607			// But since index or slice expressions are never constant
2608			// and thus invalid array length expressions, if the name
2609			// is followed by "[" it must be the start of an array or
2610			// slice constraint. Only if we don't see a "[" do we
2611			// need to parse a full expression. Notably, name <- x
2612			// is not a concern because name <- x is a statement and
2613			// not an expression.
2614			var x ast.Expr = p.parseIdent()
2615			if p.tok != token.LBRACK {
2616				// To parse the expression starting with name, expand
2617				// the call sequence we would get by passing in name
2618				// to parser.expr, and pass in name to parsePrimaryExpr.
2619				p.exprLev++
2620				lhs := p.parsePrimaryExpr(x)
2621				x = p.parseBinaryExpr(lhs, token.LowestPrec+1)
2622				p.exprLev--
2623			}
2624			// Analyze expression x. If we can split x into a type parameter
2625			// name, possibly followed by a type parameter type, we consider
2626			// this the start of a type parameter list, with some caveats:
2627			// a single name followed by "]" tilts the decision towards an
2628			// array declaration; a type parameter type that could also be
2629			// an ordinary expression but which is followed by a comma tilts
2630			// the decision towards a type parameter list.
2631			if pname, ptype := extractName(x, p.tok == token.COMMA); pname != nil && (ptype != nil || p.tok != token.RBRACK) {
2632				// spec.Name "[" pname ...
2633				// spec.Name "[" pname ptype ...
2634				// spec.Name "[" pname ptype "," ...
2635				p.parseGenericType(spec, lbrack, pname, ptype) // ptype may be nil
2636			} else {
2637				// spec.Name "[" pname "]" ...
2638				// spec.Name "[" x ...
2639				spec.Type = p.parseArrayType(lbrack, x)
2640			}
2641		} else {
2642			// array type
2643			spec.Type = p.parseArrayType(lbrack, nil)
2644		}
2645	} else {
2646		// no type parameters
2647		if p.tok == token.ASSIGN {
2648			// type alias
2649			spec.Assign = p.pos
2650			p.next()
2651		}
2652		spec.Type = p.parseType()
2653	}
2654
2655	spec.Comment = p.expectSemi()
2656
2657	return spec
2658}
2659
2660// extractName splits the expression x into (name, expr) if syntactically
2661// x can be written as name expr. The split only happens if expr is a type
2662// element (per the isTypeElem predicate) or if force is set.
2663// If x is just a name, the result is (name, nil). If the split succeeds,
2664// the result is (name, expr). Otherwise the result is (nil, x).
2665// Examples:
2666//
2667//	x           force    name    expr
2668//	------------------------------------
2669//	P*[]int     T/F      P       *[]int
2670//	P*E         T        P       *E
2671//	P*E         F        nil     P*E
2672//	P([]int)    T/F      P       []int
2673//	P(E)        T        P       E
2674//	P(E)        F        nil     P(E)
2675//	P*E|F|~G    T/F      P       *E|F|~G
2676//	P*E|F|G     T        P       *E|F|G
2677//	P*E|F|G     F        nil     P*E|F|G
2678func extractName(x ast.Expr, force bool) (*ast.Ident, ast.Expr) {
2679	switch x := x.(type) {
2680	case *ast.Ident:
2681		return x, nil
2682	case *ast.BinaryExpr:
2683		switch x.Op {
2684		case token.MUL:
2685			if name, _ := x.X.(*ast.Ident); name != nil && (force || isTypeElem(x.Y)) {
2686				// x = name *x.Y
2687				return name, &ast.StarExpr{Star: x.OpPos, X: x.Y}
2688			}
2689		case token.OR:
2690			if name, lhs := extractName(x.X, force || isTypeElem(x.Y)); name != nil && lhs != nil {
2691				// x = name lhs|x.Y
2692				op := *x
2693				op.X = lhs
2694				return name, &op
2695			}
2696		}
2697	case *ast.CallExpr:
2698		if name, _ := x.Fun.(*ast.Ident); name != nil {
2699			if len(x.Args) == 1 && x.Ellipsis == token.NoPos && (force || isTypeElem(x.Args[0])) {
2700				// x = name "(" x.ArgList[0] ")"
2701				return name, x.Args[0]
2702			}
2703		}
2704	}
2705	return nil, x
2706}
2707
2708// isTypeElem reports whether x is a (possibly parenthesized) type element expression.
2709// The result is false if x could be a type element OR an ordinary (value) expression.
2710func isTypeElem(x ast.Expr) bool {
2711	switch x := x.(type) {
2712	case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
2713		return true
2714	case *ast.BinaryExpr:
2715		return isTypeElem(x.X) || isTypeElem(x.Y)
2716	case *ast.UnaryExpr:
2717		return x.Op == token.TILDE
2718	case *ast.ParenExpr:
2719		return isTypeElem(x.X)
2720	}
2721	return false
2722}
2723
2724func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
2725	if p.trace {
2726		defer un(trace(p, "GenDecl("+keyword.String()+")"))
2727	}
2728
2729	doc := p.leadComment
2730	pos := p.expect(keyword)
2731	var lparen, rparen token.Pos
2732	var list []ast.Spec
2733	if p.tok == token.LPAREN {
2734		lparen = p.pos
2735		p.next()
2736		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
2737			list = append(list, f(p.leadComment, keyword, iota))
2738		}
2739		rparen = p.expect(token.RPAREN)
2740		p.expectSemi()
2741	} else {
2742		list = append(list, f(nil, keyword, 0))
2743	}
2744
2745	return &ast.GenDecl{
2746		Doc:    doc,
2747		TokPos: pos,
2748		Tok:    keyword,
2749		Lparen: lparen,
2750		Specs:  list,
2751		Rparen: rparen,
2752	}
2753}
2754
2755func (p *parser) parseFuncDecl() *ast.FuncDecl {
2756	if p.trace {
2757		defer un(trace(p, "FunctionDecl"))
2758	}
2759
2760	doc := p.leadComment
2761	pos := p.expect(token.FUNC)
2762
2763	var recv *ast.FieldList
2764	if p.tok == token.LPAREN {
2765		_, recv = p.parseParameters(false)
2766	}
2767
2768	ident := p.parseIdent()
2769
2770	tparams, params := p.parseParameters(true)
2771	if recv != nil && tparams != nil {
2772		// Method declarations do not have type parameters. We parse them for a
2773		// better error message and improved error recovery.
2774		p.error(tparams.Opening, "method must have no type parameters")
2775		tparams = nil
2776	}
2777	results := p.parseResult()
2778
2779	var body *ast.BlockStmt
2780	switch p.tok {
2781	case token.LBRACE:
2782		body = p.parseBody()
2783		p.expectSemi()
2784	case token.SEMICOLON:
2785		p.next()
2786		if p.tok == token.LBRACE {
2787			// opening { of function declaration on next line
2788			p.error(p.pos, "unexpected semicolon or newline before {")
2789			body = p.parseBody()
2790			p.expectSemi()
2791		}
2792	default:
2793		p.expectSemi()
2794	}
2795
2796	decl := &ast.FuncDecl{
2797		Doc:  doc,
2798		Recv: recv,
2799		Name: ident,
2800		Type: &ast.FuncType{
2801			Func:       pos,
2802			TypeParams: tparams,
2803			Params:     params,
2804			Results:    results,
2805		},
2806		Body: body,
2807	}
2808	return decl
2809}
2810
2811func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl {
2812	if p.trace {
2813		defer un(trace(p, "Declaration"))
2814	}
2815
2816	var f parseSpecFunction
2817	switch p.tok {
2818	case token.IMPORT:
2819		f = p.parseImportSpec
2820
2821	case token.CONST, token.VAR:
2822		f = p.parseValueSpec
2823
2824	case token.TYPE:
2825		f = p.parseTypeSpec
2826
2827	case token.FUNC:
2828		return p.parseFuncDecl()
2829
2830	default:
2831		pos := p.pos
2832		p.errorExpected(pos, "declaration")
2833		p.advance(sync)
2834		return &ast.BadDecl{From: pos, To: p.pos}
2835	}
2836
2837	return p.parseGenDecl(p.tok, f)
2838}
2839
2840// ----------------------------------------------------------------------------
2841// Source files
2842
2843func (p *parser) parseFile() *ast.File {
2844	if p.trace {
2845		defer un(trace(p, "File"))
2846	}
2847
2848	// Don't bother parsing the rest if we had errors scanning the first token.
2849	// Likely not a Go source file at all.
2850	if p.errors.Len() != 0 {
2851		return nil
2852	}
2853
2854	// package clause
2855	doc := p.leadComment
2856	pos := p.expect(token.PACKAGE)
2857	// Go spec: The package clause is not a declaration;
2858	// the package name does not appear in any scope.
2859	ident := p.parseIdent()
2860	if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
2861		p.error(p.pos, "invalid package name _")
2862	}
2863	p.expectSemi()
2864
2865	// Don't bother parsing the rest if we had errors parsing the package clause.
2866	// Likely not a Go source file at all.
2867	if p.errors.Len() != 0 {
2868		return nil
2869	}
2870
2871	var decls []ast.Decl
2872	if p.mode&PackageClauseOnly == 0 {
2873		// import decls
2874		for p.tok == token.IMPORT {
2875			decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
2876		}
2877
2878		if p.mode&ImportsOnly == 0 {
2879			// rest of package body
2880			prev := token.IMPORT
2881			for p.tok != token.EOF {
2882				// Continue to accept import declarations for error tolerance, but complain.
2883				if p.tok == token.IMPORT && prev != token.IMPORT {
2884					p.error(p.pos, "imports must appear before other declarations")
2885				}
2886				prev = p.tok
2887
2888				decls = append(decls, p.parseDecl(declStart))
2889			}
2890		}
2891	}
2892
2893	f := &ast.File{
2894		Doc:       doc,
2895		Package:   pos,
2896		Name:      ident,
2897		Decls:     decls,
2898		FileStart: token.Pos(p.file.Base()),
2899		FileEnd:   token.Pos(p.file.Base() + p.file.Size()),
2900		Imports:   p.imports,
2901		Comments:  p.comments,
2902		GoVersion: p.goVersion,
2903	}
2904	var declErr func(token.Pos, string)
2905	if p.mode&DeclarationErrors != 0 {
2906		declErr = p.error
2907	}
2908	if p.mode&SkipObjectResolution == 0 {
2909		resolveFile(f, p.file, declErr)
2910	}
2911
2912	return f
2913}
2914