...

Source file src/cuelang.org/go/cue/parser/parser.go

Documentation: cuelang.org/go/cue/parser

     1  // Copyright 2018 The CUE Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package parser
    16  
    17  import (
    18  	"fmt"
    19  	"strings"
    20  	"unicode"
    21  
    22  	"cuelang.org/go/cue/ast"
    23  	"cuelang.org/go/cue/errors"
    24  	"cuelang.org/go/cue/literal"
    25  	"cuelang.org/go/cue/scanner"
    26  	"cuelang.org/go/cue/token"
    27  	"cuelang.org/go/internal"
    28  	"cuelang.org/go/internal/astinternal"
    29  )
    30  
    31  var debugStr = astinternal.DebugStr
    32  
    33  // The parser structure holds the parser's internal state.
    34  type parser struct {
    35  	file    *token.File
    36  	errors  errors.Error
    37  	scanner scanner.Scanner
    38  
    39  	// Tracing/debugging
    40  	mode      mode // parsing mode
    41  	trace     bool // == (mode & Trace != 0)
    42  	panicking bool // set if we are bailing out due to too many errors.
    43  	indent    int  // indentation used for tracing output
    44  
    45  	// Comments
    46  	leadComment *ast.CommentGroup
    47  	comments    *commentState
    48  
    49  	// Next token
    50  	pos token.Pos   // token position
    51  	tok token.Token // one token look-ahead
    52  	lit string      // token literal
    53  
    54  	// Error recovery
    55  	// (used to limit the number of calls to syncXXX functions
    56  	// w/o making scanning progress - avoids potential endless
    57  	// loops across multiple parser functions during error recovery)
    58  	syncPos token.Pos // last synchronization position
    59  	syncCnt int       // number of calls to syncXXX without progress
    60  
    61  	// Non-syntactic parser control
    62  	exprLev int // < 0: in control clause, >= 0: in expression
    63  
    64  	imports []*ast.ImportSpec // list of imports
    65  
    66  	version int
    67  }
    68  
    69  func (p *parser) init(filename string, src []byte, mode []Option) {
    70  	for _, f := range mode {
    71  		f(p)
    72  	}
    73  	p.file = token.NewFile(filename, -1, len(src))
    74  
    75  	var m scanner.Mode
    76  	if p.mode&parseCommentsMode != 0 {
    77  		m = scanner.ScanComments
    78  	}
    79  	eh := func(pos token.Pos, msg string, args []interface{}) {
    80  		p.errors = errors.Append(p.errors, errors.Newf(pos, msg, args...))
    81  	}
    82  	p.scanner.Init(p.file, src, eh, m)
    83  
    84  	p.trace = p.mode&traceMode != 0 // for convenience (p.trace is used frequently)
    85  
    86  	p.comments = &commentState{pos: -1}
    87  
    88  	p.next()
    89  }
    90  
    91  type commentState struct {
    92  	parent *commentState
    93  	pos    int8
    94  	groups []*ast.CommentGroup
    95  
    96  	// lists are not attached to nodes themselves. Enclosed expressions may
    97  	// miss a comment due to commas and line termination. closeLists ensures
    98  	// that comments will be passed to someone.
    99  	isList    int
   100  	lastChild ast.Node
   101  	lastPos   int8
   102  }
   103  
   104  // openComments reserves the next doc comment for the caller and flushes
   105  func (p *parser) openComments() *commentState {
   106  	child := &commentState{
   107  		parent: p.comments,
   108  	}
   109  	if c := p.comments; c != nil && c.isList > 0 {
   110  		if c.lastChild != nil {
   111  			var groups []*ast.CommentGroup
   112  			for _, cg := range c.groups {
   113  				if cg.Position == 0 {
   114  					groups = append(groups, cg)
   115  				}
   116  			}
   117  			groups = append(groups, c.lastChild.Comments()...)
   118  			for _, cg := range c.groups {
   119  				if cg.Position != 0 {
   120  					cg.Position = c.lastPos
   121  					groups = append(groups, cg)
   122  				}
   123  			}
   124  			ast.SetComments(c.lastChild, groups)
   125  			c.groups = nil
   126  		} else {
   127  			c.lastChild = nil
   128  			// attach before next
   129  			for _, cg := range c.groups {
   130  				cg.Position = 0
   131  			}
   132  			child.groups = c.groups
   133  			c.groups = nil
   134  		}
   135  	}
   136  	if p.leadComment != nil {
   137  		child.groups = append(child.groups, p.leadComment)
   138  		p.leadComment = nil
   139  	}
   140  	p.comments = child
   141  	return child
   142  }
   143  
   144  // openList is used to treat a list of comments as a single comment
   145  // position in a production.
   146  func (p *parser) openList() {
   147  	if p.comments.isList > 0 {
   148  		p.comments.isList++
   149  		return
   150  	}
   151  	c := &commentState{
   152  		parent: p.comments,
   153  		isList: 1,
   154  	}
   155  	p.comments = c
   156  }
   157  
   158  func (c *commentState) add(g *ast.CommentGroup) {
   159  	g.Position = c.pos
   160  	c.groups = append(c.groups, g)
   161  }
   162  
   163  func (p *parser) closeList() {
   164  	c := p.comments
   165  	if c.lastChild != nil {
   166  		for _, cg := range c.groups {
   167  			cg.Position = c.lastPos
   168  			c.lastChild.AddComment(cg)
   169  		}
   170  		c.groups = nil
   171  	}
   172  	switch c.isList--; {
   173  	case c.isList < 0:
   174  		if !p.panicking {
   175  			err := errors.Newf(p.pos, "unmatched close list")
   176  			p.errors = errors.Append(p.errors, err)
   177  			p.panicking = true
   178  			panic(err)
   179  		}
   180  	case c.isList == 0:
   181  		parent := c.parent
   182  		if len(c.groups) > 0 {
   183  			parent.groups = append(parent.groups, c.groups...)
   184  		}
   185  		parent.pos++
   186  		p.comments = parent
   187  	}
   188  }
   189  
   190  func (c *commentState) closeNode(p *parser, n ast.Node) ast.Node {
   191  	if p.comments != c {
   192  		if !p.panicking {
   193  			err := errors.Newf(p.pos, "unmatched comments")
   194  			p.errors = errors.Append(p.errors, err)
   195  			p.panicking = true
   196  			panic(err)
   197  		}
   198  		return n
   199  	}
   200  	p.comments = c.parent
   201  	if c.parent != nil {
   202  		c.parent.lastChild = n
   203  		c.parent.lastPos = c.pos
   204  		c.parent.pos++
   205  	}
   206  	for _, cg := range c.groups {
   207  		if n != nil {
   208  			if cg != nil {
   209  				n.AddComment(cg)
   210  			}
   211  		}
   212  	}
   213  	c.groups = nil
   214  	return n
   215  }
   216  
   217  func (c *commentState) closeExpr(p *parser, n ast.Expr) ast.Expr {
   218  	c.closeNode(p, n)
   219  	return n
   220  }
   221  
   222  func (c *commentState) closeClause(p *parser, n ast.Clause) ast.Clause {
   223  	c.closeNode(p, n)
   224  	return n
   225  }
   226  
   227  // ----------------------------------------------------------------------------
   228  // Parsing support
   229  
   230  func (p *parser) printTrace(a ...interface{}) {
   231  	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
   232  	const n = len(dots)
   233  	pos := p.file.Position(p.pos)
   234  	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
   235  	i := 2 * p.indent
   236  	for i > n {
   237  		fmt.Print(dots)
   238  		i -= n
   239  	}
   240  	// i <= n
   241  	fmt.Print(dots[0:i])
   242  	fmt.Println(a...)
   243  }
   244  
   245  func trace(p *parser, msg string) *parser {
   246  	p.printTrace(msg, "(")
   247  	p.indent++
   248  	return p
   249  }
   250  
   251  // Usage pattern: defer un(trace(p, "..."))
   252  func un(p *parser) {
   253  	p.indent--
   254  	p.printTrace(")")
   255  }
   256  
   257  // Advance to the next
   258  func (p *parser) next0() {
   259  	// Because of one-token look-ahead, print the previous token
   260  	// when tracing as it provides a more readable output. The
   261  	// very first token (!p.pos.IsValid()) is not initialized
   262  	// (it is ILLEGAL), so don't print it .
   263  	if p.trace && p.pos.IsValid() {
   264  		s := p.tok.String()
   265  		switch {
   266  		case p.tok.IsLiteral():
   267  			p.printTrace(s, p.lit)
   268  		case p.tok.IsOperator(), p.tok.IsKeyword():
   269  			p.printTrace("\"" + s + "\"")
   270  		default:
   271  			p.printTrace(s)
   272  		}
   273  	}
   274  
   275  	p.pos, p.tok, p.lit = p.scanner.Scan()
   276  }
   277  
   278  // Consume a comment and return it and the line on which it ends.
   279  func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
   280  	// /*-style comments may end on a different line than where they start.
   281  	// Scan the comment for '\n' chars and adjust endline accordingly.
   282  	endline = p.file.Line(p.pos)
   283  	if p.lit[1] == '*' {
   284  		p.assertV0(p.pos, 0, 10, "block quotes")
   285  
   286  		// don't use range here - no need to decode Unicode code points
   287  		for i := 0; i < len(p.lit); i++ {
   288  			if p.lit[i] == '\n' {
   289  				endline++
   290  			}
   291  		}
   292  	}
   293  
   294  	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
   295  	p.next0()
   296  
   297  	return
   298  }
   299  
   300  // Consume a group of adjacent comments, add it to the parser's
   301  // comments list, and return it together with the line at which
   302  // the last comment in the group ends. A non-comment token or n
   303  // empty lines terminate a comment group.
   304  func (p *parser) consumeCommentGroup(prevLine, n int) (comments *ast.CommentGroup, endline int) {
   305  	var list []*ast.Comment
   306  	var rel token.RelPos
   307  	endline = p.file.Line(p.pos)
   308  	switch endline - prevLine {
   309  	case 0:
   310  		rel = token.Blank
   311  	case 1:
   312  		rel = token.Newline
   313  	default:
   314  		rel = token.NewSection
   315  	}
   316  	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
   317  		var comment *ast.Comment
   318  		comment, endline = p.consumeComment()
   319  		list = append(list, comment)
   320  	}
   321  
   322  	cg := &ast.CommentGroup{List: list}
   323  	ast.SetRelPos(cg, rel)
   324  	comments = cg
   325  	return
   326  }
   327  
   328  // Advance to the next non-comment  In the process, collect
   329  // any comment groups encountered, and refield the last lead and
   330  // line comments.
   331  //
   332  // A lead comment is a comment group that starts and ends in a
   333  // line without any other tokens and that is followed by a non-comment
   334  // token on the line immediately after the comment group.
   335  //
   336  // A line comment is a comment group that follows a non-comment
   337  // token on the same line, and that has no tokens after it on the line
   338  // where it ends.
   339  //
   340  // Lead and line comments may be considered documentation that is
   341  // stored in the AST.
   342  func (p *parser) next() {
   343  	// A leadComment may not be consumed if it leads an inner token of a node.
   344  	if p.leadComment != nil {
   345  		p.comments.add(p.leadComment)
   346  	}
   347  	p.leadComment = nil
   348  	prev := p.pos
   349  	p.next0()
   350  	p.comments.pos++
   351  
   352  	if p.tok == token.COMMENT {
   353  		var comment *ast.CommentGroup
   354  		var endline int
   355  
   356  		currentLine := p.file.Line(p.pos)
   357  		prevLine := p.file.Line(prev)
   358  		if prevLine == currentLine {
   359  			// The comment is on same line as the previous token; it
   360  			// cannot be a lead comment but may be a line comment.
   361  			comment, endline = p.consumeCommentGroup(prevLine, 0)
   362  			if p.file.Line(p.pos) != endline {
   363  				// The next token is on a different line, thus
   364  				// the last comment group is a line comment.
   365  				comment.Line = true
   366  			}
   367  		}
   368  
   369  		// consume successor comments, if any
   370  		endline = -1
   371  		for p.tok == token.COMMENT {
   372  			if comment != nil {
   373  				p.comments.add(comment)
   374  			}
   375  			comment, endline = p.consumeCommentGroup(prevLine, 1)
   376  			prevLine = currentLine
   377  			currentLine = p.file.Line(p.pos)
   378  
   379  		}
   380  
   381  		if endline+1 == p.file.Line(p.pos) && p.tok != token.EOF {
   382  			// The next token is following on the line immediately after the
   383  			// comment group, thus the last comment group is a lead comment.
   384  			comment.Doc = true
   385  			p.leadComment = comment
   386  		} else {
   387  			p.comments.add(comment)
   388  		}
   389  	}
   390  }
   391  
   392  // assertV0 indicates the last version at which a certain feature was
   393  // supported.
   394  func (p *parser) assertV0(pos token.Pos, minor, patch int, name string) {
   395  	v := internal.Version(minor, patch)
   396  	base := p.version
   397  	if base == 0 {
   398  		base = internal.APIVersionSupported
   399  	}
   400  	if base > v {
   401  		p.errors = errors.Append(p.errors,
   402  			errors.Wrapf(&DeprecationError{v}, pos,
   403  				"use of deprecated %s (deprecated as of v0.%d.%d)", name, minor, patch+1))
   404  	}
   405  }
   406  
   407  func (p *parser) errf(pos token.Pos, msg string, args ...interface{}) {
   408  	// ePos := p.file.Position(pos)
   409  	ePos := pos
   410  
   411  	// If AllErrors is not set, discard errors reported on the same line
   412  	// as the last recorded error and stop parsing if there are more than
   413  	// 10 errors.
   414  	if p.mode&allErrorsMode == 0 {
   415  		errors := errors.Errors(p.errors)
   416  		n := len(errors)
   417  		if n > 0 && errors[n-1].Position().Line() == ePos.Line() {
   418  			return // discard - likely a spurious error
   419  		}
   420  		if n > 10 {
   421  			p.panicking = true
   422  			panic("too many errors")
   423  		}
   424  	}
   425  
   426  	p.errors = errors.Append(p.errors, errors.Newf(ePos, msg, args...))
   427  }
   428  
   429  func (p *parser) errorExpected(pos token.Pos, obj string) {
   430  	if pos != p.pos {
   431  		p.errf(pos, "expected %s", obj)
   432  		return
   433  	}
   434  	// the error happened at the current position;
   435  	// make the error message more specific
   436  	if p.tok == token.COMMA && p.lit == "\n" {
   437  		p.errf(pos, "expected %s, found newline", obj)
   438  		return
   439  	}
   440  
   441  	if p.tok.IsLiteral() {
   442  		p.errf(pos, "expected %s, found '%s' %s", obj, p.tok, p.lit)
   443  	} else {
   444  		p.errf(pos, "expected %s, found '%s'", obj, p.tok)
   445  	}
   446  }
   447  
   448  func (p *parser) expect(tok token.Token) token.Pos {
   449  	pos := p.pos
   450  	if p.tok != tok {
   451  		p.errorExpected(pos, "'"+tok.String()+"'")
   452  	}
   453  	p.next() // make progress
   454  	return pos
   455  }
   456  
   457  // expectClosing is like expect but provides a better error message
   458  // for the common case of a missing comma before a newline.
   459  func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
   460  	if p.tok != tok && p.tok == token.COMMA && p.lit == "\n" {
   461  		p.errf(p.pos, "missing ',' before newline in %s", context)
   462  		p.next()
   463  	}
   464  	return p.expect(tok)
   465  }
   466  
   467  func (p *parser) expectComma() {
   468  	// semicolon is optional before a closing ')', ']', '}', or newline
   469  	if p.tok != token.RPAREN && p.tok != token.RBRACE && p.tok != token.EOF {
   470  		switch p.tok {
   471  		case token.COMMA:
   472  			p.next()
   473  		default:
   474  			p.errorExpected(p.pos, "','")
   475  			syncExpr(p)
   476  		}
   477  	}
   478  }
   479  
   480  func (p *parser) atComma(context string, follow ...token.Token) bool {
   481  	if p.tok == token.COMMA {
   482  		return true
   483  	}
   484  	for _, t := range follow {
   485  		if p.tok == t {
   486  			return false
   487  		}
   488  	}
   489  	// TODO: find a way to detect crossing lines now we don't have a semi.
   490  	if p.lit == "\n" {
   491  		p.errf(p.pos, "missing ',' before newline")
   492  	} else {
   493  		p.errf(p.pos, "missing ',' in %s", context)
   494  	}
   495  	return true // "insert" comma and continue
   496  }
   497  
   498  // syncExpr advances to the next field in a field list.
   499  // Used for synchronization after an error.
   500  func syncExpr(p *parser) {
   501  	for {
   502  		switch p.tok {
   503  		case token.COMMA:
   504  			// Return only if parser made some progress since last
   505  			// sync or if it has not reached 10 sync calls without
   506  			// progress. Otherwise consume at least one token to
   507  			// avoid an endless parser loop (it is possible that
   508  			// both parseOperand and parseStmt call syncStmt and
   509  			// correctly do not advance, thus the need for the
   510  			// invocation limit p.syncCnt).
   511  			if p.pos == p.syncPos && p.syncCnt < 10 {
   512  				p.syncCnt++
   513  				return
   514  			}
   515  			if p.syncPos.Before(p.pos) {
   516  				p.syncPos = p.pos
   517  				p.syncCnt = 0
   518  				return
   519  			}
   520  			// Reaching here indicates a parser bug, likely an
   521  			// incorrect token list in this function, but it only
   522  			// leads to skipping of possibly correct code if a
   523  			// previous error is present, and thus is preferred
   524  			// over a non-terminating parse.
   525  		case token.EOF:
   526  			return
   527  		}
   528  		p.next()
   529  	}
   530  }
   531  
   532  // safePos returns a valid file position for a given position: If pos
   533  // is valid to begin with, safePos returns pos. If pos is out-of-range,
   534  // safePos returns the EOF position.
   535  //
   536  // This is hack to work around "artificial" end positions in the AST which
   537  // are computed by adding 1 to (presumably valid) token positions. If the
   538  // token positions are invalid due to parse errors, the resulting end position
   539  // may be past the file's EOF position, which would lead to panics if used
   540  // later on.
   541  func (p *parser) safePos(pos token.Pos) (res token.Pos) {
   542  	defer func() {
   543  		if recover() != nil {
   544  			res = p.file.Pos(p.file.Base()+p.file.Size(), pos.RelPos()) // EOF position
   545  		}
   546  	}()
   547  	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
   548  	return pos
   549  }
   550  
   551  // ----------------------------------------------------------------------------
   552  // Identifiers
   553  
   554  func (p *parser) parseIdent() *ast.Ident {
   555  	c := p.openComments()
   556  	pos := p.pos
   557  	name := "_"
   558  	if p.tok == token.IDENT {
   559  		name = p.lit
   560  		p.next()
   561  	} else {
   562  		p.expect(token.IDENT) // use expect() error handling
   563  	}
   564  	ident := &ast.Ident{NamePos: pos, Name: name}
   565  	c.closeNode(p, ident)
   566  	return ident
   567  }
   568  
   569  func (p *parser) parseKeyIdent() *ast.Ident {
   570  	c := p.openComments()
   571  	pos := p.pos
   572  	name := p.lit
   573  	p.next()
   574  	ident := &ast.Ident{NamePos: pos, Name: name}
   575  	c.closeNode(p, ident)
   576  	return ident
   577  }
   578  
   579  // ----------------------------------------------------------------------------
   580  // Expressions
   581  
   582  // parseOperand returns an expression.
   583  // Callers must verify the result.
   584  func (p *parser) parseOperand() (expr ast.Expr) {
   585  	if p.trace {
   586  		defer un(trace(p, "Operand"))
   587  	}
   588  
   589  	switch p.tok {
   590  	case token.IDENT:
   591  		return p.parseIdent()
   592  
   593  	case token.LBRACE:
   594  		return p.parseStruct()
   595  
   596  	case token.LBRACK:
   597  		return p.parseList()
   598  
   599  	case token.FUNC:
   600  		if p.mode&parseFuncsMode != 0 {
   601  			return p.parseFunc()
   602  		} else {
   603  			return p.parseKeyIdent()
   604  		}
   605  
   606  	case token.BOTTOM:
   607  		c := p.openComments()
   608  		x := &ast.BottomLit{Bottom: p.pos}
   609  		p.next()
   610  		return c.closeExpr(p, x)
   611  
   612  	case token.NULL, token.TRUE, token.FALSE, token.INT, token.FLOAT, token.STRING:
   613  		c := p.openComments()
   614  		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
   615  		p.next()
   616  		return c.closeExpr(p, x)
   617  
   618  	case token.INTERPOLATION:
   619  		return p.parseInterpolation()
   620  
   621  	case token.LPAREN:
   622  		c := p.openComments()
   623  		defer func() { c.closeNode(p, expr) }()
   624  		lparen := p.pos
   625  		p.next()
   626  		p.exprLev++
   627  		p.openList()
   628  		x := p.parseRHS() // types may be parenthesized: (some type)
   629  		p.closeList()
   630  		p.exprLev--
   631  		rparen := p.expect(token.RPAREN)
   632  		return &ast.ParenExpr{
   633  			Lparen: lparen,
   634  			X:      x,
   635  			Rparen: rparen}
   636  
   637  	default:
   638  		if p.tok.IsKeyword() {
   639  			return p.parseKeyIdent()
   640  		}
   641  	}
   642  
   643  	// we have an error
   644  	c := p.openComments()
   645  	pos := p.pos
   646  	p.errorExpected(pos, "operand")
   647  	syncExpr(p)
   648  	return c.closeExpr(p, &ast.BadExpr{From: pos, To: p.pos})
   649  }
   650  
   651  func (p *parser) parseIndexOrSlice(x ast.Expr) (expr ast.Expr) {
   652  	if p.trace {
   653  		defer un(trace(p, "IndexOrSlice"))
   654  	}
   655  
   656  	c := p.openComments()
   657  	defer func() { c.closeNode(p, expr) }()
   658  	c.pos = 1
   659  
   660  	const N = 2
   661  	lbrack := p.expect(token.LBRACK)
   662  
   663  	p.exprLev++
   664  	var index [N]ast.Expr
   665  	var colons [N - 1]token.Pos
   666  	if p.tok != token.COLON {
   667  		index[0] = p.parseRHS()
   668  	}
   669  	nColons := 0
   670  	for p.tok == token.COLON && nColons < len(colons) {
   671  		colons[nColons] = p.pos
   672  		nColons++
   673  		p.next()
   674  		if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
   675  			index[nColons] = p.parseRHS()
   676  		}
   677  	}
   678  	p.exprLev--
   679  	rbrack := p.expect(token.RBRACK)
   680  
   681  	if nColons > 0 {
   682  		return &ast.SliceExpr{
   683  			X:      x,
   684  			Lbrack: lbrack,
   685  			Low:    index[0],
   686  			High:   index[1],
   687  			Rbrack: rbrack}
   688  	}
   689  
   690  	return &ast.IndexExpr{
   691  		X:      x,
   692  		Lbrack: lbrack,
   693  		Index:  index[0],
   694  		Rbrack: rbrack}
   695  }
   696  
   697  func (p *parser) parseCallOrConversion(fun ast.Expr) (expr *ast.CallExpr) {
   698  	if p.trace {
   699  		defer un(trace(p, "CallOrConversion"))
   700  	}
   701  	c := p.openComments()
   702  	defer func() { c.closeNode(p, expr) }()
   703  
   704  	p.openList()
   705  	defer p.closeList()
   706  
   707  	lparen := p.expect(token.LPAREN)
   708  
   709  	p.exprLev++
   710  	var list []ast.Expr
   711  	for p.tok != token.RPAREN && p.tok != token.EOF {
   712  		list = append(list, p.parseRHS()) // builtins may expect a type: make(some type, ...)
   713  		if !p.atComma("argument list", token.RPAREN) {
   714  			break
   715  		}
   716  		p.next()
   717  	}
   718  	p.exprLev--
   719  	rparen := p.expectClosing(token.RPAREN, "argument list")
   720  
   721  	return &ast.CallExpr{
   722  		Fun:    fun,
   723  		Lparen: lparen,
   724  		Args:   list,
   725  		Rparen: rparen}
   726  }
   727  
   728  // TODO: inline this function in parseFieldList once we no longer user comment
   729  // position information in parsing.
   730  func (p *parser) consumeDeclComma() {
   731  	if p.atComma("struct literal", token.RBRACE, token.EOF) {
   732  		p.next()
   733  	}
   734  }
   735  
   736  func (p *parser) parseFieldList() (list []ast.Decl) {
   737  	if p.trace {
   738  		defer un(trace(p, "FieldList"))
   739  	}
   740  	p.openList()
   741  	defer p.closeList()
   742  
   743  	for p.tok != token.RBRACE && p.tok != token.EOF {
   744  		switch p.tok {
   745  		case token.ATTRIBUTE:
   746  			list = append(list, p.parseAttribute())
   747  			p.consumeDeclComma()
   748  
   749  		case token.ELLIPSIS:
   750  			c := p.openComments()
   751  			ellipsis := &ast.Ellipsis{Ellipsis: p.pos}
   752  			p.next()
   753  			c.closeNode(p, ellipsis)
   754  			list = append(list, ellipsis)
   755  			p.consumeDeclComma()
   756  
   757  		default:
   758  			list = append(list, p.parseField())
   759  		}
   760  
   761  		// TODO: handle next comma here, after disallowing non-colon separator
   762  		// and we have eliminated the need comment positions.
   763  	}
   764  
   765  	return
   766  }
   767  
   768  func (p *parser) parseLetDecl() (decl ast.Decl, ident *ast.Ident) {
   769  	if p.trace {
   770  		defer un(trace(p, "Field"))
   771  	}
   772  
   773  	c := p.openComments()
   774  
   775  	letPos := p.expect(token.LET)
   776  	if p.tok != token.IDENT {
   777  		c.closeNode(p, ident)
   778  		return nil, &ast.Ident{
   779  			NamePos: letPos,
   780  			Name:    "let",
   781  		}
   782  	}
   783  	defer func() { c.closeNode(p, decl) }()
   784  
   785  	ident = p.parseIdent()
   786  	assign := p.expect(token.BIND)
   787  	expr := p.parseRHS()
   788  
   789  	p.consumeDeclComma()
   790  
   791  	return &ast.LetClause{
   792  		Let:   letPos,
   793  		Ident: ident,
   794  		Equal: assign,
   795  		Expr:  expr,
   796  	}, nil
   797  }
   798  
   799  func (p *parser) parseComprehension() (decl ast.Decl, ident *ast.Ident) {
   800  	if p.trace {
   801  		defer un(trace(p, "Comprehension"))
   802  	}
   803  
   804  	c := p.openComments()
   805  	defer func() { c.closeNode(p, decl) }()
   806  
   807  	tok := p.tok
   808  	pos := p.pos
   809  	clauses, fc := p.parseComprehensionClauses(true)
   810  	if fc != nil {
   811  		ident = &ast.Ident{
   812  			NamePos: pos,
   813  			Name:    tok.String(),
   814  		}
   815  		fc.closeNode(p, ident)
   816  		return nil, ident
   817  	}
   818  
   819  	sc := p.openComments()
   820  	expr := p.parseStruct()
   821  	sc.closeExpr(p, expr)
   822  
   823  	if p.atComma("struct literal", token.RBRACE) { // TODO: may be EOF
   824  		p.next()
   825  	}
   826  
   827  	return &ast.Comprehension{
   828  		Clauses: clauses,
   829  		Value:   expr,
   830  	}, nil
   831  }
   832  
   833  func (p *parser) parseField() (decl ast.Decl) {
   834  	if p.trace {
   835  		defer un(trace(p, "Field"))
   836  	}
   837  
   838  	c := p.openComments()
   839  	defer func() { c.closeNode(p, decl) }()
   840  
   841  	pos := p.pos
   842  
   843  	this := &ast.Field{Label: nil}
   844  	m := this
   845  
   846  	tok := p.tok
   847  
   848  	label, expr, decl, ok := p.parseLabel(false)
   849  	if decl != nil {
   850  		return decl
   851  	}
   852  	m.Label = label
   853  
   854  	if !ok {
   855  		if expr == nil {
   856  			expr = p.parseRHS()
   857  		}
   858  		if a, ok := expr.(*ast.Alias); ok {
   859  			p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
   860  			p.consumeDeclComma()
   861  			return a
   862  		}
   863  		e := &ast.EmbedDecl{Expr: expr}
   864  		p.consumeDeclComma()
   865  		return e
   866  	}
   867  
   868  	switch p.tok {
   869  	case token.OPTION, token.NOT:
   870  		m.Optional = p.pos
   871  		m.Constraint = p.tok
   872  		p.next()
   873  	}
   874  
   875  	// TODO: consider disallowing comprehensions with more than one label.
   876  	// This can be a bit awkward in some cases, but it would naturally
   877  	// enforce the proper style that a comprehension be defined in the
   878  	// smallest possible scope.
   879  	// allowComprehension = false
   880  
   881  	switch p.tok {
   882  	case token.COLON:
   883  	case token.COMMA:
   884  		p.expectComma() // sync parser.
   885  		fallthrough
   886  
   887  	case token.RBRACE, token.EOF:
   888  		if a, ok := expr.(*ast.Alias); ok {
   889  			p.assertV0(a.Pos(), 1, 3, `old-style alias; use "let X = expr" instead`)
   890  			return a
   891  		}
   892  		switch tok {
   893  		case token.IDENT, token.LBRACK, token.LPAREN,
   894  			token.STRING, token.INTERPOLATION,
   895  			token.NULL, token.TRUE, token.FALSE,
   896  			token.FOR, token.IF, token.LET, token.IN:
   897  			return &ast.EmbedDecl{Expr: expr}
   898  		}
   899  		fallthrough
   900  
   901  	default:
   902  		p.errorExpected(p.pos, "label or ':'")
   903  		return &ast.BadDecl{From: pos, To: p.pos}
   904  	}
   905  
   906  	m.TokenPos = p.pos
   907  	m.Token = p.tok
   908  	if p.tok != token.COLON {
   909  		p.errorExpected(pos, "':'")
   910  	}
   911  	p.next() // :
   912  
   913  	for {
   914  		if l, ok := m.Label.(*ast.ListLit); ok && len(l.Elts) != 1 {
   915  			p.errf(l.Pos(), "square bracket must have exactly one element")
   916  		}
   917  
   918  		label, expr, _, ok := p.parseLabel(true)
   919  		if !ok || (p.tok != token.COLON && p.tok != token.OPTION && p.tok != token.NOT) {
   920  			if expr == nil {
   921  				expr = p.parseRHS()
   922  			}
   923  			m.Value = expr
   924  			break
   925  		}
   926  		field := &ast.Field{Label: label}
   927  		m.Value = &ast.StructLit{Elts: []ast.Decl{field}}
   928  		m = field
   929  
   930  		switch p.tok {
   931  		case token.OPTION, token.NOT:
   932  			m.Optional = p.pos
   933  			m.Constraint = p.tok
   934  			p.next()
   935  		}
   936  
   937  		m.TokenPos = p.pos
   938  		m.Token = p.tok
   939  		if p.tok != token.COLON {
   940  			if p.tok.IsLiteral() {
   941  				p.errf(p.pos, "expected ':'; found %s", p.lit)
   942  			} else {
   943  				p.errf(p.pos, "expected ':'; found %s", p.tok)
   944  			}
   945  			break
   946  		}
   947  		p.next()
   948  	}
   949  
   950  	if attrs := p.parseAttributes(); attrs != nil {
   951  		m.Attrs = attrs
   952  	}
   953  
   954  	p.consumeDeclComma()
   955  
   956  	return this
   957  }
   958  
   959  func (p *parser) parseAttributes() (attrs []*ast.Attribute) {
   960  	p.openList()
   961  	for p.tok == token.ATTRIBUTE {
   962  		attrs = append(attrs, p.parseAttribute())
   963  	}
   964  	p.closeList()
   965  	return attrs
   966  }
   967  
   968  func (p *parser) parseAttribute() *ast.Attribute {
   969  	c := p.openComments()
   970  	a := &ast.Attribute{At: p.pos, Text: p.lit}
   971  	p.next()
   972  	c.closeNode(p, a)
   973  	return a
   974  }
   975  
   976  func (p *parser) parseLabel(rhs bool) (label ast.Label, expr ast.Expr, decl ast.Decl, ok bool) {
   977  	tok := p.tok
   978  	switch tok {
   979  
   980  	case token.FOR, token.IF:
   981  		if rhs {
   982  			expr = p.parseExpr()
   983  			break
   984  		}
   985  		comp, ident := p.parseComprehension()
   986  		if comp != nil {
   987  			return nil, nil, comp, false
   988  		}
   989  		expr = ident
   990  
   991  	case token.LET:
   992  		let, ident := p.parseLetDecl()
   993  		if let != nil {
   994  			return nil, nil, let, false
   995  		}
   996  		expr = ident
   997  
   998  	case token.IDENT, token.STRING, token.INTERPOLATION, token.LPAREN,
   999  		token.NULL, token.TRUE, token.FALSE, token.IN, token.FUNC:
  1000  		expr = p.parseExpr()
  1001  
  1002  	case token.LBRACK:
  1003  		expr = p.parseRHS()
  1004  		switch x := expr.(type) {
  1005  		case *ast.ListLit:
  1006  			// Note: caller must verify this list is suitable as a label.
  1007  			label, ok = x, true
  1008  		}
  1009  	}
  1010  
  1011  	switch x := expr.(type) {
  1012  	case *ast.BasicLit:
  1013  		switch x.Kind {
  1014  		case token.STRING, token.NULL, token.TRUE, token.FALSE, token.FUNC:
  1015  			// Keywords that represent operands.
  1016  
  1017  			// Allowing keywords to be used as a labels should not interfere with
  1018  			// generating good errors: any keyword can only appear on the RHS of a
  1019  			// field (after a ':'), whereas labels always appear on the LHS.
  1020  
  1021  			label, ok = x, true
  1022  		}
  1023  
  1024  	case *ast.Ident:
  1025  		if strings.HasPrefix(x.Name, "__") {
  1026  			p.errf(x.NamePos, "identifiers starting with '__' are reserved")
  1027  		}
  1028  
  1029  		expr = p.parseAlias(x)
  1030  		if a, ok := expr.(*ast.Alias); ok {
  1031  			if _, ok = a.Expr.(ast.Label); !ok {
  1032  				break
  1033  			}
  1034  			label = a
  1035  		} else {
  1036  			label = x
  1037  		}
  1038  		ok = true
  1039  
  1040  	case ast.Label:
  1041  		label, ok = x, true
  1042  	}
  1043  	return label, expr, nil, ok
  1044  }
  1045  
  1046  func (p *parser) parseStruct() (expr ast.Expr) {
  1047  	lbrace := p.expect(token.LBRACE)
  1048  
  1049  	if p.trace {
  1050  		defer un(trace(p, "StructLit"))
  1051  	}
  1052  
  1053  	elts := p.parseStructBody()
  1054  	rbrace := p.expectClosing(token.RBRACE, "struct literal")
  1055  	return &ast.StructLit{
  1056  		Lbrace: lbrace,
  1057  		Elts:   elts,
  1058  		Rbrace: rbrace,
  1059  	}
  1060  }
  1061  
  1062  func (p *parser) parseStructBody() []ast.Decl {
  1063  	if p.trace {
  1064  		defer un(trace(p, "StructBody"))
  1065  	}
  1066  
  1067  	p.exprLev++
  1068  	var elts []ast.Decl
  1069  
  1070  	// TODO: consider "stealing" non-lead comments.
  1071  	// for _, cg := range p.comments.groups {
  1072  	// 	if cg != nil {
  1073  	// 		elts = append(elts, cg)
  1074  	// 	}
  1075  	// }
  1076  	// p.comments.groups = p.comments.groups[:0]
  1077  
  1078  	if p.tok != token.RBRACE {
  1079  		elts = p.parseFieldList()
  1080  	}
  1081  	p.exprLev--
  1082  
  1083  	return elts
  1084  }
  1085  
  1086  // parseComprehensionClauses parses either new-style (first==true)
  1087  // or old-style (first==false).
  1088  // Should we now disallow keywords as identifiers? If not, we need to
  1089  // return a list of discovered labels as the alternative.
  1090  func (p *parser) parseComprehensionClauses(first bool) (clauses []ast.Clause, c *commentState) {
  1091  	// TODO: reuse Template spec, which is possible if it doesn't check the
  1092  	// first is an identifier.
  1093  
  1094  	for {
  1095  		switch p.tok {
  1096  		case token.FOR:
  1097  			c := p.openComments()
  1098  			forPos := p.expect(token.FOR)
  1099  			if first {
  1100  				switch p.tok {
  1101  				case token.COLON, token.BIND, token.OPTION,
  1102  					token.COMMA, token.EOF:
  1103  					return nil, c
  1104  				}
  1105  			}
  1106  
  1107  			var key, value *ast.Ident
  1108  			var colon token.Pos
  1109  			value = p.parseIdent()
  1110  			if p.tok == token.COMMA {
  1111  				colon = p.expect(token.COMMA)
  1112  				key = value
  1113  				value = p.parseIdent()
  1114  			}
  1115  			c.pos = 4
  1116  			// params := p.parseParams(nil, ARROW)
  1117  			clauses = append(clauses, c.closeClause(p, &ast.ForClause{
  1118  				For:    forPos,
  1119  				Key:    key,
  1120  				Colon:  colon,
  1121  				Value:  value,
  1122  				In:     p.expect(token.IN),
  1123  				Source: p.parseRHS(),
  1124  			}))
  1125  
  1126  		case token.IF:
  1127  			c := p.openComments()
  1128  			ifPos := p.expect(token.IF)
  1129  			if first {
  1130  				switch p.tok {
  1131  				case token.COLON, token.BIND, token.OPTION,
  1132  					token.COMMA, token.EOF:
  1133  					return nil, c
  1134  				}
  1135  			}
  1136  
  1137  			clauses = append(clauses, c.closeClause(p, &ast.IfClause{
  1138  				If:        ifPos,
  1139  				Condition: p.parseRHS(),
  1140  			}))
  1141  
  1142  		case token.LET:
  1143  			c := p.openComments()
  1144  			letPos := p.expect(token.LET)
  1145  
  1146  			ident := p.parseIdent()
  1147  			assign := p.expect(token.BIND)
  1148  			expr := p.parseRHS()
  1149  
  1150  			clauses = append(clauses, c.closeClause(p, &ast.LetClause{
  1151  				Let:   letPos,
  1152  				Ident: ident,
  1153  				Equal: assign,
  1154  				Expr:  expr,
  1155  			}))
  1156  
  1157  		default:
  1158  			return clauses, nil
  1159  		}
  1160  		if p.tok == token.COMMA {
  1161  			p.next()
  1162  		}
  1163  
  1164  		first = false
  1165  	}
  1166  }
  1167  
  1168  func (p *parser) parseFunc() (expr ast.Expr) {
  1169  	if p.trace {
  1170  		defer un(trace(p, "Func"))
  1171  	}
  1172  	tok := p.tok
  1173  	pos := p.pos
  1174  	fun := p.expect(token.FUNC)
  1175  
  1176  	// "func" might be used as an identifier, in which case bail out early.
  1177  	switch p.tok {
  1178  	case token.COLON, token.BIND, token.OPTION,
  1179  		token.COMMA, token.EOF:
  1180  
  1181  		return &ast.Ident{
  1182  			NamePos: pos,
  1183  			Name:    tok.String(),
  1184  		}
  1185  	}
  1186  
  1187  	p.expect(token.LPAREN)
  1188  	args := p.parseFuncArgs()
  1189  	p.expectClosing(token.RPAREN, "argument type list")
  1190  
  1191  	p.expect(token.COLON)
  1192  	ret := p.parseExpr()
  1193  
  1194  	return &ast.Func{
  1195  		Func: fun,
  1196  		Args: args,
  1197  		Ret:  ret,
  1198  	}
  1199  }
  1200  
  1201  func (p *parser) parseFuncArgs() (list []ast.Expr) {
  1202  	if p.trace {
  1203  		defer un(trace(p, "FuncArgs"))
  1204  	}
  1205  	p.openList()
  1206  	defer p.closeList()
  1207  
  1208  	for p.tok != token.RPAREN && p.tok != token.EOF {
  1209  		list = append(list, p.parseFuncArg())
  1210  		if p.tok != token.RPAREN {
  1211  			p.expectComma()
  1212  		}
  1213  	}
  1214  
  1215  	return list
  1216  }
  1217  
  1218  func (p *parser) parseFuncArg() (expr ast.Expr) {
  1219  	if p.trace {
  1220  		defer un(trace(p, "FuncArg"))
  1221  	}
  1222  	return p.parseExpr()
  1223  }
  1224  
  1225  func (p *parser) parseList() (expr ast.Expr) {
  1226  	lbrack := p.expect(token.LBRACK)
  1227  
  1228  	if p.trace {
  1229  		defer un(trace(p, "ListLiteral"))
  1230  	}
  1231  
  1232  	elts := p.parseListElements()
  1233  
  1234  	if p.tok == token.ELLIPSIS {
  1235  		ellipsis := &ast.Ellipsis{
  1236  			Ellipsis: p.pos,
  1237  		}
  1238  		elts = append(elts, ellipsis)
  1239  		p.next()
  1240  		if p.tok != token.COMMA && p.tok != token.RBRACK {
  1241  			ellipsis.Type = p.parseRHS()
  1242  		}
  1243  		if p.atComma("list literal", token.RBRACK) {
  1244  			p.next()
  1245  		}
  1246  	}
  1247  
  1248  	rbrack := p.expectClosing(token.RBRACK, "list literal")
  1249  	return &ast.ListLit{
  1250  		Lbrack: lbrack,
  1251  		Elts:   elts,
  1252  		Rbrack: rbrack}
  1253  }
  1254  
  1255  func (p *parser) parseListElements() (list []ast.Expr) {
  1256  	if p.trace {
  1257  		defer un(trace(p, "ListElements"))
  1258  	}
  1259  	p.openList()
  1260  	defer p.closeList()
  1261  
  1262  	for p.tok != token.RBRACK && p.tok != token.ELLIPSIS && p.tok != token.EOF {
  1263  		expr, ok := p.parseListElement()
  1264  		list = append(list, expr)
  1265  		if !ok {
  1266  			break
  1267  		}
  1268  	}
  1269  
  1270  	return
  1271  }
  1272  
  1273  func (p *parser) parseListElement() (expr ast.Expr, ok bool) {
  1274  	if p.trace {
  1275  		defer un(trace(p, "ListElement"))
  1276  	}
  1277  	c := p.openComments()
  1278  	defer func() { c.closeNode(p, expr) }()
  1279  
  1280  	switch p.tok {
  1281  	case token.FOR, token.IF:
  1282  		tok := p.tok
  1283  		pos := p.pos
  1284  		clauses, fc := p.parseComprehensionClauses(true)
  1285  		if clauses != nil {
  1286  			sc := p.openComments()
  1287  			expr := p.parseStruct()
  1288  			sc.closeExpr(p, expr)
  1289  
  1290  			if p.atComma("list literal", token.RBRACK) { // TODO: may be EOF
  1291  				p.next()
  1292  			}
  1293  
  1294  			return &ast.Comprehension{
  1295  				Clauses: clauses,
  1296  				Value:   expr,
  1297  			}, true
  1298  		}
  1299  
  1300  		expr = &ast.Ident{
  1301  			NamePos: pos,
  1302  			Name:    tok.String(),
  1303  		}
  1304  		fc.closeNode(p, expr)
  1305  
  1306  	default:
  1307  		expr = p.parseUnaryExpr()
  1308  	}
  1309  
  1310  	expr = p.parseBinaryExprTail(token.LowestPrec+1, expr)
  1311  	expr = p.parseAlias(expr)
  1312  
  1313  	// Enforce there is an explicit comma. We could also allow the
  1314  	// omission of commas in lists, but this gives rise to some ambiguities
  1315  	// with list comprehensions.
  1316  	if p.tok == token.COMMA && p.lit != "," {
  1317  		p.next()
  1318  		// Allow missing comma for last element, though, to be compliant
  1319  		// with JSON.
  1320  		if p.tok == token.RBRACK || p.tok == token.FOR || p.tok == token.IF {
  1321  			return expr, false
  1322  		}
  1323  		p.errf(p.pos, "missing ',' before newline in list literal")
  1324  	} else if !p.atComma("list literal", token.RBRACK, token.FOR, token.IF) {
  1325  		return expr, false
  1326  	}
  1327  	p.next()
  1328  
  1329  	return expr, true
  1330  }
  1331  
  1332  // parseAlias turns an expression into an alias.
  1333  func (p *parser) parseAlias(lhs ast.Expr) (expr ast.Expr) {
  1334  	if p.tok != token.BIND {
  1335  		return lhs
  1336  	}
  1337  	pos := p.pos
  1338  	p.next()
  1339  	expr = p.parseRHS()
  1340  	if expr == nil {
  1341  		panic("empty return")
  1342  	}
  1343  	switch x := lhs.(type) {
  1344  	case *ast.Ident:
  1345  		return &ast.Alias{Ident: x, Equal: pos, Expr: expr}
  1346  	}
  1347  	p.errf(p.pos, "expected identifier for alias")
  1348  	return expr
  1349  }
  1350  
  1351  // checkExpr checks that x is an expression (and not a type).
  1352  func (p *parser) checkExpr(x ast.Expr) ast.Expr {
  1353  	switch unparen(x).(type) {
  1354  	case *ast.BadExpr:
  1355  	case *ast.BottomLit:
  1356  	case *ast.Ident:
  1357  	case *ast.BasicLit:
  1358  	case *ast.Interpolation:
  1359  	case *ast.Func:
  1360  	case *ast.StructLit:
  1361  	case *ast.ListLit:
  1362  	case *ast.ParenExpr:
  1363  		panic("unreachable")
  1364  	case *ast.SelectorExpr:
  1365  	case *ast.IndexExpr:
  1366  	case *ast.SliceExpr:
  1367  	case *ast.CallExpr:
  1368  	case *ast.UnaryExpr:
  1369  	case *ast.BinaryExpr:
  1370  	default:
  1371  		// all other nodes are not proper expressions
  1372  		p.errorExpected(x.Pos(), "expression")
  1373  		x = &ast.BadExpr{
  1374  			From: x.Pos(), To: p.safePos(x.End()),
  1375  		}
  1376  	}
  1377  	return x
  1378  }
  1379  
  1380  // If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
  1381  func unparen(x ast.Expr) ast.Expr {
  1382  	if p, isParen := x.(*ast.ParenExpr); isParen {
  1383  		x = unparen(p.X)
  1384  	}
  1385  	return x
  1386  }
  1387  
  1388  // If lhs is set and the result is an identifier, it is not resolved.
  1389  func (p *parser) parsePrimaryExpr() ast.Expr {
  1390  	if p.trace {
  1391  		defer un(trace(p, "PrimaryExpr"))
  1392  	}
  1393  
  1394  	return p.parsePrimaryExprTail(p.parseOperand())
  1395  }
  1396  
  1397  func (p *parser) parsePrimaryExprTail(operand ast.Expr) ast.Expr {
  1398  	x := operand
  1399  L:
  1400  	for {
  1401  		switch p.tok {
  1402  		case token.PERIOD:
  1403  			c := p.openComments()
  1404  			c.pos = 1
  1405  			p.next()
  1406  			switch p.tok {
  1407  			case token.IDENT:
  1408  				x = &ast.SelectorExpr{
  1409  					X:   p.checkExpr(x),
  1410  					Sel: p.parseIdent(),
  1411  				}
  1412  			case token.STRING:
  1413  				if strings.HasPrefix(p.lit, `"`) && !strings.HasPrefix(p.lit, `""`) {
  1414  					str := &ast.BasicLit{
  1415  						ValuePos: p.pos,
  1416  						Kind:     token.STRING,
  1417  						Value:    p.lit,
  1418  					}
  1419  					p.next()
  1420  					x = &ast.SelectorExpr{
  1421  						X:   p.checkExpr(x),
  1422  						Sel: str,
  1423  					}
  1424  					break
  1425  				}
  1426  				fallthrough
  1427  			default:
  1428  				pos := p.pos
  1429  				p.errorExpected(pos, "selector")
  1430  				p.next() // make progress
  1431  				x = &ast.SelectorExpr{X: x, Sel: &ast.Ident{NamePos: pos, Name: "_"}}
  1432  			}
  1433  			c.closeNode(p, x)
  1434  		case token.LBRACK:
  1435  			x = p.parseIndexOrSlice(p.checkExpr(x))
  1436  		case token.LPAREN:
  1437  			x = p.parseCallOrConversion(p.checkExpr(x))
  1438  		default:
  1439  			break L
  1440  		}
  1441  	}
  1442  
  1443  	return x
  1444  }
  1445  
  1446  // If lhs is set and the result is an identifier, it is not resolved.
  1447  func (p *parser) parseUnaryExpr() ast.Expr {
  1448  	if p.trace {
  1449  		defer un(trace(p, "UnaryExpr"))
  1450  	}
  1451  
  1452  	switch p.tok {
  1453  	case token.ADD, token.SUB, token.NOT, token.MUL,
  1454  		token.LSS, token.LEQ, token.GEQ, token.GTR,
  1455  		token.NEQ, token.MAT, token.NMAT:
  1456  		pos, op := p.pos, p.tok
  1457  		c := p.openComments()
  1458  		p.next()
  1459  		return c.closeExpr(p, &ast.UnaryExpr{
  1460  			OpPos: pos,
  1461  			Op:    op,
  1462  			X:     p.checkExpr(p.parseUnaryExpr()),
  1463  		})
  1464  	}
  1465  
  1466  	return p.parsePrimaryExpr()
  1467  }
  1468  
  1469  func (p *parser) tokPrec() (token.Token, int) {
  1470  	tok := p.tok
  1471  	if tok == token.IDENT {
  1472  		switch p.lit {
  1473  		case "quo":
  1474  			return token.IQUO, 7
  1475  		case "rem":
  1476  			return token.IREM, 7
  1477  		case "div":
  1478  			return token.IDIV, 7
  1479  		case "mod":
  1480  			return token.IMOD, 7
  1481  		default:
  1482  			return tok, 0
  1483  		}
  1484  	}
  1485  	return tok, tok.Precedence()
  1486  }
  1487  
  1488  // If lhs is set and the result is an identifier, it is not resolved.
  1489  func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
  1490  	if p.trace {
  1491  		defer un(trace(p, "BinaryExpr"))
  1492  	}
  1493  	p.openList()
  1494  	defer p.closeList()
  1495  
  1496  	return p.parseBinaryExprTail(prec1, p.parseUnaryExpr())
  1497  }
  1498  
  1499  func (p *parser) parseBinaryExprTail(prec1 int, x ast.Expr) ast.Expr {
  1500  	for {
  1501  		op, prec := p.tokPrec()
  1502  		if prec < prec1 {
  1503  			return x
  1504  		}
  1505  		c := p.openComments()
  1506  		c.pos = 1
  1507  		pos := p.expect(p.tok)
  1508  		x = c.closeExpr(p, &ast.BinaryExpr{
  1509  			X:     p.checkExpr(x),
  1510  			OpPos: pos,
  1511  			Op:    op,
  1512  			// Treat nested expressions as RHS.
  1513  			Y: p.checkExpr(p.parseBinaryExpr(prec + 1))})
  1514  	}
  1515  }
  1516  
  1517  func (p *parser) parseInterpolation() (expr ast.Expr) {
  1518  	c := p.openComments()
  1519  	defer func() { c.closeNode(p, expr) }()
  1520  
  1521  	p.openList()
  1522  	defer p.closeList()
  1523  
  1524  	cc := p.openComments()
  1525  
  1526  	lit := p.lit
  1527  	pos := p.pos
  1528  	p.next()
  1529  	last := &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: lit}
  1530  	exprs := []ast.Expr{last}
  1531  
  1532  	for p.tok == token.LPAREN {
  1533  		c.pos = 1
  1534  		p.expect(token.LPAREN)
  1535  		cc.closeExpr(p, last)
  1536  
  1537  		exprs = append(exprs, p.parseRHS())
  1538  
  1539  		cc = p.openComments()
  1540  		if p.tok != token.RPAREN {
  1541  			p.errf(p.pos, "expected ')' for string interpolation")
  1542  		}
  1543  		lit = p.scanner.ResumeInterpolation()
  1544  		pos = p.pos
  1545  		p.next()
  1546  		last = &ast.BasicLit{
  1547  			ValuePos: pos,
  1548  			Kind:     token.STRING,
  1549  			Value:    lit,
  1550  		}
  1551  		exprs = append(exprs, last)
  1552  	}
  1553  	cc.closeExpr(p, last)
  1554  	return &ast.Interpolation{Elts: exprs}
  1555  }
  1556  
  1557  // Callers must check the result (using checkExpr), depending on context.
  1558  func (p *parser) parseExpr() (expr ast.Expr) {
  1559  	if p.trace {
  1560  		defer un(trace(p, "Expression"))
  1561  	}
  1562  
  1563  	c := p.openComments()
  1564  	defer func() { c.closeExpr(p, expr) }()
  1565  
  1566  	return p.parseBinaryExpr(token.LowestPrec + 1)
  1567  }
  1568  
  1569  func (p *parser) parseRHS() ast.Expr {
  1570  	x := p.checkExpr(p.parseExpr())
  1571  	return x
  1572  }
  1573  
  1574  // ----------------------------------------------------------------------------
  1575  // Declarations
  1576  
  1577  func isValidImport(lit string) bool {
  1578  	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
  1579  	s, _ := literal.Unquote(lit) // go/scanner returns a legal string literal
  1580  	if p := strings.LastIndexByte(s, ':'); p >= 0 {
  1581  		s = s[:p]
  1582  	}
  1583  	for _, r := range s {
  1584  		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
  1585  			return false
  1586  		}
  1587  	}
  1588  	return s != ""
  1589  }
  1590  
  1591  func (p *parser) parseImportSpec(_ int) *ast.ImportSpec {
  1592  	if p.trace {
  1593  		defer un(trace(p, "ImportSpec"))
  1594  	}
  1595  
  1596  	c := p.openComments()
  1597  
  1598  	var ident *ast.Ident
  1599  	if p.tok == token.IDENT {
  1600  		ident = p.parseIdent()
  1601  	}
  1602  
  1603  	pos := p.pos
  1604  	var path string
  1605  	if p.tok == token.STRING {
  1606  		path = p.lit
  1607  		if !isValidImport(path) {
  1608  			p.errf(pos, "invalid import path: %s", path)
  1609  		}
  1610  		p.next()
  1611  		p.expectComma() // call before accessing p.linecomment
  1612  	} else {
  1613  		p.expect(token.STRING) // use expect() error handling
  1614  		if p.tok == token.COMMA {
  1615  			p.expectComma() // call before accessing p.linecomment
  1616  		}
  1617  	}
  1618  	// collect imports
  1619  	spec := &ast.ImportSpec{
  1620  		Name: ident,
  1621  		Path: &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
  1622  	}
  1623  	c.closeNode(p, spec)
  1624  	p.imports = append(p.imports, spec)
  1625  
  1626  	return spec
  1627  }
  1628  
  1629  func (p *parser) parseImports() *ast.ImportDecl {
  1630  	if p.trace {
  1631  		defer un(trace(p, "Imports"))
  1632  	}
  1633  	c := p.openComments()
  1634  
  1635  	ident := p.parseIdent()
  1636  	var lparen, rparen token.Pos
  1637  	var list []*ast.ImportSpec
  1638  	if p.tok == token.LPAREN {
  1639  		lparen = p.pos
  1640  		p.next()
  1641  		p.openList()
  1642  		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
  1643  			list = append(list, p.parseImportSpec(iota))
  1644  		}
  1645  		p.closeList()
  1646  		rparen = p.expect(token.RPAREN)
  1647  		p.expectComma()
  1648  	} else {
  1649  		list = append(list, p.parseImportSpec(0))
  1650  	}
  1651  
  1652  	d := &ast.ImportDecl{
  1653  		Import: ident.Pos(),
  1654  		Lparen: lparen,
  1655  		Specs:  list,
  1656  		Rparen: rparen,
  1657  	}
  1658  	c.closeNode(p, d)
  1659  	return d
  1660  }
  1661  
  1662  // ----------------------------------------------------------------------------
  1663  // Source files
  1664  
  1665  func (p *parser) parseFile() *ast.File {
  1666  	if p.trace {
  1667  		defer un(trace(p, "File"))
  1668  	}
  1669  
  1670  	c := p.comments
  1671  
  1672  	// Don't bother parsing the rest if we had errors scanning the first
  1673  	// Likely not a Go source file at all.
  1674  	if p.errors != nil {
  1675  		return nil
  1676  	}
  1677  	p.openList()
  1678  
  1679  	var decls []ast.Decl
  1680  
  1681  	for p.tok == token.ATTRIBUTE {
  1682  		decls = append(decls, p.parseAttribute())
  1683  		p.consumeDeclComma()
  1684  	}
  1685  
  1686  	// The package clause is not a declaration: it does not appear in any
  1687  	// scope.
  1688  	if p.tok == token.IDENT && p.lit == "package" {
  1689  		c := p.openComments()
  1690  
  1691  		pos := p.pos
  1692  		var name *ast.Ident
  1693  		p.expect(token.IDENT)
  1694  		name = p.parseIdent()
  1695  		if name.Name == "_" && p.mode&declarationErrorsMode != 0 {
  1696  			p.errf(p.pos, "invalid package name _")
  1697  		}
  1698  
  1699  		pkg := &ast.Package{
  1700  			PackagePos: pos,
  1701  			Name:       name,
  1702  		}
  1703  		decls = append(decls, pkg)
  1704  		p.expectComma()
  1705  		c.closeNode(p, pkg)
  1706  	}
  1707  
  1708  	for p.tok == token.ATTRIBUTE {
  1709  		decls = append(decls, p.parseAttribute())
  1710  		p.consumeDeclComma()
  1711  	}
  1712  
  1713  	if p.mode&packageClauseOnlyMode == 0 {
  1714  		// import decls
  1715  		for p.tok == token.IDENT && p.lit == "import" {
  1716  			decls = append(decls, p.parseImports())
  1717  		}
  1718  
  1719  		if p.mode&importsOnlyMode == 0 {
  1720  			// rest of package decls
  1721  			// TODO: loop and allow multiple expressions.
  1722  			decls = append(decls, p.parseFieldList()...)
  1723  			p.expect(token.EOF)
  1724  		}
  1725  	}
  1726  	p.closeList()
  1727  
  1728  	f := &ast.File{
  1729  		Imports: p.imports,
  1730  		Decls:   decls,
  1731  	}
  1732  	c.closeNode(p, f)
  1733  	return f
  1734  }
  1735  

View as plain text