summaryrefslogtreecommitdiffstats
path: root/src/go/printer
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-16 19:23:18 +0000
commit43a123c1ae6613b3efeed291fa552ecd909d3acf (patch)
treefd92518b7024bc74031f78a1cf9e454b65e73665 /src/go/printer
parentInitial commit. (diff)
downloadgolang-1.20-upstream.tar.xz
golang-1.20-upstream.zip
Adding upstream version 1.20.14.upstream/1.20.14upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/go/printer')
-rw-r--r--src/go/printer/comment.go155
-rw-r--r--src/go/printer/example_test.go67
-rw-r--r--src/go/printer/gobuild.go170
-rw-r--r--src/go/printer/nodes.go2001
-rw-r--r--src/go/printer/performance_test.go91
-rw-r--r--src/go/printer/printer.go1436
-rw-r--r--src/go/printer/printer_test.go827
-rw-r--r--src/go/printer/testdata/alignment.golden172
-rw-r--r--src/go/printer/testdata/alignment.input179
-rw-r--r--src/go/printer/testdata/comments.golden774
-rw-r--r--src/go/printer/testdata/comments.input773
-rw-r--r--src/go/printer/testdata/comments.x55
-rw-r--r--src/go/printer/testdata/comments2.golden163
-rw-r--r--src/go/printer/testdata/comments2.input168
-rw-r--r--src/go/printer/testdata/complit.input65
-rw-r--r--src/go/printer/testdata/complit.x62
-rw-r--r--src/go/printer/testdata/declarations.golden1008
-rw-r--r--src/go/printer/testdata/declarations.input1021
-rw-r--r--src/go/printer/testdata/doc.golden21
-rw-r--r--src/go/printer/testdata/doc.input20
-rw-r--r--src/go/printer/testdata/empty.golden5
-rw-r--r--src/go/printer/testdata/empty.input5
-rw-r--r--src/go/printer/testdata/expressions.golden743
-rw-r--r--src/go/printer/testdata/expressions.input771
-rw-r--r--src/go/printer/testdata/expressions.raw743
-rw-r--r--src/go/printer/testdata/generics.golden109
-rw-r--r--src/go/printer/testdata/generics.input105
-rw-r--r--src/go/printer/testdata/go2numbers.golden186
-rw-r--r--src/go/printer/testdata/go2numbers.input186
-rw-r--r--src/go/printer/testdata/go2numbers.norm186
-rw-r--r--src/go/printer/testdata/gobuild1.golden6
-rw-r--r--src/go/printer/testdata/gobuild1.input7
-rw-r--r--src/go/printer/testdata/gobuild2.golden8
-rw-r--r--src/go/printer/testdata/gobuild2.input9
-rw-r--r--src/go/printer/testdata/gobuild3.golden10
-rw-r--r--src/go/printer/testdata/gobuild3.input11
-rw-r--r--src/go/printer/testdata/gobuild4.golden6
-rw-r--r--src/go/printer/testdata/gobuild4.input5
-rw-r--r--src/go/printer/testdata/gobuild5.golden4
-rw-r--r--src/go/printer/testdata/gobuild5.input4
-rw-r--r--src/go/printer/testdata/gobuild6.golden5
-rw-r--r--src/go/printer/testdata/gobuild6.input4
-rw-r--r--src/go/printer/testdata/gobuild7.golden11
-rw-r--r--src/go/printer/testdata/gobuild7.input11
-rw-r--r--src/go/printer/testdata/linebreaks.golden295
-rw-r--r--src/go/printer/testdata/linebreaks.input291
-rw-r--r--src/go/printer/testdata/parser.go2148
-rw-r--r--src/go/printer/testdata/slow.golden85
-rw-r--r--src/go/printer/testdata/slow.input85
-rw-r--r--src/go/printer/testdata/statements.golden644
-rw-r--r--src/go/printer/testdata/statements.input555
51 files changed, 16471 insertions, 0 deletions
diff --git a/src/go/printer/comment.go b/src/go/printer/comment.go
new file mode 100644
index 0000000..9012714
--- /dev/null
+++ b/src/go/printer/comment.go
@@ -0,0 +1,155 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer
+
+import (
+ "go/ast"
+ "go/doc/comment"
+ "strings"
+)
+
+// formatDocComment reformats the doc comment list,
+// returning the canonical formatting.
+func formatDocComment(list []*ast.Comment) []*ast.Comment {
+ // Extract comment text (removing comment markers).
+ var kind, text string
+ var directives []*ast.Comment
+ if len(list) == 1 && strings.HasPrefix(list[0].Text, "/*") {
+ kind = "/*"
+ text = list[0].Text
+ if !strings.Contains(text, "\n") || allStars(text) {
+ // Single-line /* .. */ comment in doc comment position,
+ // or multiline old-style comment like
+ // /*
+ // * Comment
+ // * text here.
+ // */
+ // Should not happen, since it will not work well as a
+ // doc comment, but if it does, just ignore:
+ // reformatting it will only make the situation worse.
+ return list
+ }
+ text = text[2 : len(text)-2] // cut /* and */
+ } else if strings.HasPrefix(list[0].Text, "//") {
+ kind = "//"
+ var b strings.Builder
+ for _, c := range list {
+ after, found := strings.CutPrefix(c.Text, "//")
+ if !found {
+ return list
+ }
+ // Accumulate //go:build etc lines separately.
+ if isDirective(after) {
+ directives = append(directives, c)
+ continue
+ }
+ b.WriteString(strings.TrimPrefix(after, " "))
+ b.WriteString("\n")
+ }
+ text = b.String()
+ } else {
+ // Not sure what this is, so leave alone.
+ return list
+ }
+
+ if text == "" {
+ return list
+ }
+
+ // Parse comment and reformat as text.
+ var p comment.Parser
+ d := p.Parse(text)
+
+ var pr comment.Printer
+ text = string(pr.Comment(d))
+
+ // For /* */ comment, return one big comment with text inside.
+ slash := list[0].Slash
+ if kind == "/*" {
+ c := &ast.Comment{
+ Slash: slash,
+ Text: "/*\n" + text + "*/",
+ }
+ return []*ast.Comment{c}
+ }
+
+ // For // comment, return sequence of // lines.
+ var out []*ast.Comment
+ for text != "" {
+ var line string
+ line, text, _ = strings.Cut(text, "\n")
+ if line == "" {
+ line = "//"
+ } else if strings.HasPrefix(line, "\t") {
+ line = "//" + line
+ } else {
+ line = "// " + line
+ }
+ out = append(out, &ast.Comment{
+ Slash: slash,
+ Text: line,
+ })
+ }
+ if len(directives) > 0 {
+ out = append(out, &ast.Comment{
+ Slash: slash,
+ Text: "//",
+ })
+ for _, c := range directives {
+ out = append(out, &ast.Comment{
+ Slash: slash,
+ Text: c.Text,
+ })
+ }
+ }
+ return out
+}
+
+// isDirective reports whether c is a comment directive.
+// See go.dev/issue/37974.
+// This code is also in go/ast.
+func isDirective(c string) bool {
+ // "//line " is a line directive.
+ // "//extern " is for gccgo.
+ // "//export " is for cgo.
+ // (The // has been removed.)
+ if strings.HasPrefix(c, "line ") || strings.HasPrefix(c, "extern ") || strings.HasPrefix(c, "export ") {
+ return true
+ }
+
+ // "//[a-z0-9]+:[a-z0-9]"
+ // (The // has been removed.)
+ colon := strings.Index(c, ":")
+ if colon <= 0 || colon+1 >= len(c) {
+ return false
+ }
+ for i := 0; i <= colon+1; i++ {
+ if i == colon {
+ continue
+ }
+ b := c[i]
+ if !('a' <= b && b <= 'z' || '0' <= b && b <= '9') {
+ return false
+ }
+ }
+ return true
+}
+
+// allStars reports whether text is the interior of an
+// old-style /* */ comment with a star at the start of each line.
+func allStars(text string) bool {
+ for i := 0; i < len(text); i++ {
+ if text[i] == '\n' {
+ j := i + 1
+ for j < len(text) && (text[j] == ' ' || text[j] == '\t') {
+ j++
+ }
+ if j < len(text) && text[j] != '*' {
+ return false
+ }
+ }
+ }
+ return true
+}
diff --git a/src/go/printer/example_test.go b/src/go/printer/example_test.go
new file mode 100644
index 0000000..f7d72d1
--- /dev/null
+++ b/src/go/printer/example_test.go
@@ -0,0 +1,67 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer_test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "strings"
+)
+
+func parseFunc(filename, functionname string) (fun *ast.FuncDecl, fset *token.FileSet) {
+ fset = token.NewFileSet()
+ if file, err := parser.ParseFile(fset, filename, nil, 0); err == nil {
+ for _, d := range file.Decls {
+ if f, ok := d.(*ast.FuncDecl); ok && f.Name.Name == functionname {
+ fun = f
+ return
+ }
+ }
+ }
+ panic("function not found")
+}
+
+func printSelf() {
+ // Parse source file and extract the AST without comments for
+ // this function, with position information referring to the
+ // file set fset.
+ funcAST, fset := parseFunc("example_test.go", "printSelf")
+
+ // Print the function body into buffer buf.
+ // The file set is provided to the printer so that it knows
+ // about the original source formatting and can add additional
+ // line breaks where they were present in the source.
+ var buf bytes.Buffer
+ printer.Fprint(&buf, fset, funcAST.Body)
+
+ // Remove braces {} enclosing the function body, unindent,
+ // and trim leading and trailing white space.
+ s := buf.String()
+ s = s[1 : len(s)-1]
+ s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n"))
+
+ // Print the cleaned-up body text to stdout.
+ fmt.Println(s)
+}
+
+func ExampleFprint() {
+ printSelf()
+
+ // Output:
+ // funcAST, fset := parseFunc("example_test.go", "printSelf")
+ //
+ // var buf bytes.Buffer
+ // printer.Fprint(&buf, fset, funcAST.Body)
+ //
+ // s := buf.String()
+ // s = s[1 : len(s)-1]
+ // s = strings.TrimSpace(strings.ReplaceAll(s, "\n\t", "\n"))
+ //
+ // fmt.Println(s)
+}
diff --git a/src/go/printer/gobuild.go b/src/go/printer/gobuild.go
new file mode 100644
index 0000000..f00492d
--- /dev/null
+++ b/src/go/printer/gobuild.go
@@ -0,0 +1,170 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer
+
+import (
+ "go/build/constraint"
+ "sort"
+ "text/tabwriter"
+)
+
+func (p *printer) fixGoBuildLines() {
+ if len(p.goBuild)+len(p.plusBuild) == 0 {
+ return
+ }
+
+ // Find latest possible placement of //go:build and // +build comments.
+ // That's just after the last blank line before we find a non-comment.
+ // (We'll add another blank line after our comment block.)
+ // When we start dropping // +build comments, we can skip over /* */ comments too.
+ // Note that we are processing tabwriter input, so every comment
+ // begins and ends with a tabwriter.Escape byte.
+ // And some newlines have turned into \f bytes.
+ insert := 0
+ for pos := 0; ; {
+ // Skip leading space at beginning of line.
+ blank := true
+ for pos < len(p.output) && (p.output[pos] == ' ' || p.output[pos] == '\t') {
+ pos++
+ }
+ // Skip over // comment if any.
+ if pos+3 < len(p.output) && p.output[pos] == tabwriter.Escape && p.output[pos+1] == '/' && p.output[pos+2] == '/' {
+ blank = false
+ for pos < len(p.output) && !isNL(p.output[pos]) {
+ pos++
+ }
+ }
+ // Skip over \n at end of line.
+ if pos >= len(p.output) || !isNL(p.output[pos]) {
+ break
+ }
+ pos++
+
+ if blank {
+ insert = pos
+ }
+ }
+
+ // If there is a //go:build comment before the place we identified,
+ // use that point instead. (Earlier in the file is always fine.)
+ if len(p.goBuild) > 0 && p.goBuild[0] < insert {
+ insert = p.goBuild[0]
+ } else if len(p.plusBuild) > 0 && p.plusBuild[0] < insert {
+ insert = p.plusBuild[0]
+ }
+
+ var x constraint.Expr
+ switch len(p.goBuild) {
+ case 0:
+ // Synthesize //go:build expression from // +build lines.
+ for _, pos := range p.plusBuild {
+ y, err := constraint.Parse(p.commentTextAt(pos))
+ if err != nil {
+ x = nil
+ break
+ }
+ if x == nil {
+ x = y
+ } else {
+ x = &constraint.AndExpr{X: x, Y: y}
+ }
+ }
+ case 1:
+ // Parse //go:build expression.
+ x, _ = constraint.Parse(p.commentTextAt(p.goBuild[0]))
+ }
+
+ var block []byte
+ if x == nil {
+ // Don't have a valid //go:build expression to treat as truth.
+ // Bring all the lines together but leave them alone.
+ // Note that these are already tabwriter-escaped.
+ for _, pos := range p.goBuild {
+ block = append(block, p.lineAt(pos)...)
+ }
+ for _, pos := range p.plusBuild {
+ block = append(block, p.lineAt(pos)...)
+ }
+ } else {
+ block = append(block, tabwriter.Escape)
+ block = append(block, "//go:build "...)
+ block = append(block, x.String()...)
+ block = append(block, tabwriter.Escape, '\n')
+ if len(p.plusBuild) > 0 {
+ lines, err := constraint.PlusBuildLines(x)
+ if err != nil {
+ lines = []string{"// +build error: " + err.Error()}
+ }
+ for _, line := range lines {
+ block = append(block, tabwriter.Escape)
+ block = append(block, line...)
+ block = append(block, tabwriter.Escape, '\n')
+ }
+ }
+ }
+ block = append(block, '\n')
+
+ // Build sorted list of lines to delete from remainder of output.
+ toDelete := append(p.goBuild, p.plusBuild...)
+ sort.Ints(toDelete)
+
+ // Collect output after insertion point, with lines deleted, into after.
+ var after []byte
+ start := insert
+ for _, end := range toDelete {
+ if end < start {
+ continue
+ }
+ after = appendLines(after, p.output[start:end])
+ start = end + len(p.lineAt(end))
+ }
+ after = appendLines(after, p.output[start:])
+ if n := len(after); n >= 2 && isNL(after[n-1]) && isNL(after[n-2]) {
+ after = after[:n-1]
+ }
+
+ p.output = p.output[:insert]
+ p.output = append(p.output, block...)
+ p.output = append(p.output, after...)
+}
+
+// appendLines is like append(x, y...)
+// but it avoids creating doubled blank lines,
+// which would not be gofmt-standard output.
+// It assumes that only whole blocks of lines are being appended,
+// not line fragments.
+func appendLines(x, y []byte) []byte {
+ if len(y) > 0 && isNL(y[0]) && // y starts in blank line
+ (len(x) == 0 || len(x) >= 2 && isNL(x[len(x)-1]) && isNL(x[len(x)-2])) { // x is empty or ends in blank line
+ y = y[1:] // delete y's leading blank line
+ }
+ return append(x, y...)
+}
+
+func (p *printer) lineAt(start int) []byte {
+ pos := start
+ for pos < len(p.output) && !isNL(p.output[pos]) {
+ pos++
+ }
+ if pos < len(p.output) {
+ pos++
+ }
+ return p.output[start:pos]
+}
+
+func (p *printer) commentTextAt(start int) string {
+ if start < len(p.output) && p.output[start] == tabwriter.Escape {
+ start++
+ }
+ pos := start
+ for pos < len(p.output) && p.output[pos] != tabwriter.Escape && !isNL(p.output[pos]) {
+ pos++
+ }
+ return string(p.output[start:pos])
+}
+
+func isNL(b byte) bool {
+ return b == '\n' || b == '\f'
+}
diff --git a/src/go/printer/nodes.go b/src/go/printer/nodes.go
new file mode 100644
index 0000000..e41ffc1
--- /dev/null
+++ b/src/go/printer/nodes.go
@@ -0,0 +1,2001 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of AST nodes; specifically
+// expressions, statements, declarations, and files. It uses
+// the print functionality implemented in printer.go.
+
+package printer
+
+import (
+ "go/ast"
+ "go/token"
+ "math"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Formatting issues:
+// - better comment formatting for /*-style comments at the end of a line (e.g. a declaration)
+// when the comment spans multiple lines; if such a comment is just two lines, formatting is
+// not idempotent
+// - formatting of expression lists
+// - should use blank instead of tab to separate one-line function bodies from
+// the function header unless there is a group of consecutive one-liners
+
+// ----------------------------------------------------------------------------
+// Common AST nodes.
+
+// Print as many newlines as necessary (but at least min newlines) to get to
+// the current line. ws is printed before the first line break. If newSection
+// is set, the first line break is printed as formfeed. Returns 0 if no line
+// breaks were printed, returns 1 if there was exactly one newline printed,
+// and returns a value > 1 if there was a formfeed or more than one newline
+// printed.
+//
+// TODO(gri): linebreak may add too many lines if the next statement at "line"
+// is preceded by comments because the computation of n assumes
+// the current position before the comment and the target position
+// after the comment. Thus, after interspersing such comments, the
+// space taken up by them is not considered to reduce the number of
+// linebreaks. At the moment there is no easy way to know about
+// future (not yet interspersed) comments in this function.
+func (p *printer) linebreak(line, min int, ws whiteSpace, newSection bool) (nbreaks int) {
+ n := nlimit(line - p.pos.Line)
+ if n < min {
+ n = min
+ }
+ if n > 0 {
+ p.print(ws)
+ if newSection {
+ p.print(formfeed)
+ n--
+ nbreaks = 2
+ }
+ nbreaks += n
+ for ; n > 0; n-- {
+ p.print(newline)
+ }
+ }
+ return
+}
+
+// setComment sets g as the next comment if g != nil and if node comments
+// are enabled - this mode is used when printing source code fragments such
+// as exports only. It assumes that there is no pending comment in p.comments
+// and at most one pending comment in the p.comment cache.
+func (p *printer) setComment(g *ast.CommentGroup) {
+ if g == nil || !p.useNodeComments {
+ return
+ }
+ if p.comments == nil {
+ // initialize p.comments lazily
+ p.comments = make([]*ast.CommentGroup, 1)
+ } else if p.cindex < len(p.comments) {
+ // for some reason there are pending comments; this
+ // should never happen - handle gracefully and flush
+ // all comments up to g, ignore anything after that
+ p.flush(p.posFor(g.List[0].Pos()), token.ILLEGAL)
+ p.comments = p.comments[0:1]
+ // in debug mode, report error
+ p.internalError("setComment found pending comments")
+ }
+ p.comments[0] = g
+ p.cindex = 0
+ // don't overwrite any pending comment in the p.comment cache
+ // (there may be a pending comment when a line comment is
+ // immediately followed by a lead comment with no other
+ // tokens between)
+ if p.commentOffset == infinity {
+ p.nextComment() // get comment ready for use
+ }
+}
+
+type exprListMode uint
+
+const (
+ commaTerm exprListMode = 1 << iota // list is optionally terminated by a comma
+ noIndent // no extra indentation in multi-line lists
+)
+
+// If indent is set, a multi-line identifier list is indented after the
+// first linebreak encountered.
+func (p *printer) identList(list []*ast.Ident, indent bool) {
+ // convert into an expression list so we can re-use exprList formatting
+ xlist := make([]ast.Expr, len(list))
+ for i, x := range list {
+ xlist[i] = x
+ }
+ var mode exprListMode
+ if !indent {
+ mode = noIndent
+ }
+ p.exprList(token.NoPos, xlist, 1, mode, token.NoPos, false)
+}
+
+const filteredMsg = "contains filtered or unexported fields"
+
+// Print a list of expressions. If the list spans multiple
+// source lines, the original line breaks are respected between
+// expressions.
+//
+// TODO(gri) Consider rewriting this to be independent of []ast.Expr
+// so that we can use the algorithm for any kind of list
+//
+// (e.g., pass list via a channel over which to range).
+func (p *printer) exprList(prev0 token.Pos, list []ast.Expr, depth int, mode exprListMode, next0 token.Pos, isIncomplete bool) {
+ if len(list) == 0 {
+ if isIncomplete {
+ prev := p.posFor(prev0)
+ next := p.posFor(next0)
+ if prev.IsValid() && prev.Line == next.Line {
+ p.print("/* " + filteredMsg + " */")
+ } else {
+ p.print(newline)
+ p.print(indent, "// "+filteredMsg, unindent, newline)
+ }
+ }
+ return
+ }
+
+ prev := p.posFor(prev0)
+ next := p.posFor(next0)
+ line := p.lineFor(list[0].Pos())
+ endLine := p.lineFor(list[len(list)-1].End())
+
+ if prev.IsValid() && prev.Line == line && line == endLine {
+ // all list entries on a single line
+ for i, x := range list {
+ if i > 0 {
+ // use position of expression following the comma as
+ // comma position for correct comment placement
+ p.setPos(x.Pos())
+ p.print(token.COMMA, blank)
+ }
+ p.expr0(x, depth)
+ }
+ if isIncomplete {
+ p.print(token.COMMA, blank, "/* "+filteredMsg+" */")
+ }
+ return
+ }
+
+ // list entries span multiple lines;
+ // use source code positions to guide line breaks
+
+ // Don't add extra indentation if noIndent is set;
+ // i.e., pretend that the first line is already indented.
+ ws := ignore
+ if mode&noIndent == 0 {
+ ws = indent
+ }
+
+ // The first linebreak is always a formfeed since this section must not
+ // depend on any previous formatting.
+ prevBreak := -1 // index of last expression that was followed by a linebreak
+ if prev.IsValid() && prev.Line < line && p.linebreak(line, 0, ws, true) > 0 {
+ ws = ignore
+ prevBreak = 0
+ }
+
+ // initialize expression/key size: a zero value indicates expr/key doesn't fit on a single line
+ size := 0
+
+ // We use the ratio between the geometric mean of the previous key sizes and
+ // the current size to determine if there should be a break in the alignment.
+ // To compute the geometric mean we accumulate the ln(size) values (lnsum)
+ // and the number of sizes included (count).
+ lnsum := 0.0
+ count := 0
+
+ // print all list elements
+ prevLine := prev.Line
+ for i, x := range list {
+ line = p.lineFor(x.Pos())
+
+ // Determine if the next linebreak, if any, needs to use formfeed:
+ // in general, use the entire node size to make the decision; for
+ // key:value expressions, use the key size.
+ // TODO(gri) for a better result, should probably incorporate both
+ // the key and the node size into the decision process
+ useFF := true
+
+ // Determine element size: All bets are off if we don't have
+ // position information for the previous and next token (likely
+ // generated code - simply ignore the size in this case by setting
+ // it to 0).
+ prevSize := size
+ const infinity = 1e6 // larger than any source line
+ size = p.nodeSize(x, infinity)
+ pair, isPair := x.(*ast.KeyValueExpr)
+ if size <= infinity && prev.IsValid() && next.IsValid() {
+ // x fits on a single line
+ if isPair {
+ size = p.nodeSize(pair.Key, infinity) // size <= infinity
+ }
+ } else {
+ // size too large or we don't have good layout information
+ size = 0
+ }
+
+ // If the previous line and the current line had single-
+ // line-expressions and the key sizes are small or the
+ // ratio between the current key and the geometric mean
+ // if the previous key sizes does not exceed a threshold,
+ // align columns and do not use formfeed.
+ if prevSize > 0 && size > 0 {
+ const smallSize = 40
+ if count == 0 || prevSize <= smallSize && size <= smallSize {
+ useFF = false
+ } else {
+ const r = 2.5 // threshold
+ geomean := math.Exp(lnsum / float64(count)) // count > 0
+ ratio := float64(size) / geomean
+ useFF = r*ratio <= 1 || r <= ratio
+ }
+ }
+
+ needsLinebreak := 0 < prevLine && prevLine < line
+ if i > 0 {
+ // Use position of expression following the comma as
+ // comma position for correct comment placement, but
+ // only if the expression is on the same line.
+ if !needsLinebreak {
+ p.setPos(x.Pos())
+ }
+ p.print(token.COMMA)
+ needsBlank := true
+ if needsLinebreak {
+ // Lines are broken using newlines so comments remain aligned
+ // unless useFF is set or there are multiple expressions on
+ // the same line in which case formfeed is used.
+ nbreaks := p.linebreak(line, 0, ws, useFF || prevBreak+1 < i)
+ if nbreaks > 0 {
+ ws = ignore
+ prevBreak = i
+ needsBlank = false // we got a line break instead
+ }
+ // If there was a new section or more than one new line
+ // (which means that the tabwriter will implicitly break
+ // the section), reset the geomean variables since we are
+ // starting a new group of elements with the next element.
+ if nbreaks > 1 {
+ lnsum = 0
+ count = 0
+ }
+ }
+ if needsBlank {
+ p.print(blank)
+ }
+ }
+
+ if len(list) > 1 && isPair && size > 0 && needsLinebreak {
+ // We have a key:value expression that fits onto one line
+ // and it's not on the same line as the prior expression:
+ // Use a column for the key such that consecutive entries
+ // can align if possible.
+ // (needsLinebreak is set if we started a new line before)
+ p.expr(pair.Key)
+ p.setPos(pair.Colon)
+ p.print(token.COLON, vtab)
+ p.expr(pair.Value)
+ } else {
+ p.expr0(x, depth)
+ }
+
+ if size > 0 {
+ lnsum += math.Log(float64(size))
+ count++
+ }
+
+ prevLine = line
+ }
+
+ if mode&commaTerm != 0 && next.IsValid() && p.pos.Line < next.Line {
+ // Print a terminating comma if the next token is on a new line.
+ p.print(token.COMMA)
+ if isIncomplete {
+ p.print(newline)
+ p.print("// " + filteredMsg)
+ }
+ if ws == ignore && mode&noIndent == 0 {
+ // unindent if we indented
+ p.print(unindent)
+ }
+ p.print(formfeed) // terminating comma needs a line break to look good
+ return
+ }
+
+ if isIncomplete {
+ p.print(token.COMMA, newline)
+ p.print("// "+filteredMsg, newline)
+ }
+
+ if ws == ignore && mode&noIndent == 0 {
+ // unindent if we indented
+ p.print(unindent)
+ }
+}
+
+type paramMode int
+
+const (
+ funcParam paramMode = iota
+ funcTParam
+ typeTParam
+)
+
+func (p *printer) parameters(fields *ast.FieldList, mode paramMode) {
+ openTok, closeTok := token.LPAREN, token.RPAREN
+ if mode != funcParam {
+ openTok, closeTok = token.LBRACK, token.RBRACK
+ }
+ p.setPos(fields.Opening)
+ p.print(openTok)
+ if len(fields.List) > 0 {
+ prevLine := p.lineFor(fields.Opening)
+ ws := indent
+ for i, par := range fields.List {
+ // determine par begin and end line (may be different
+ // if there are multiple parameter names for this par
+ // or the type is on a separate line)
+ parLineBeg := p.lineFor(par.Pos())
+ parLineEnd := p.lineFor(par.End())
+ // separating "," if needed
+ needsLinebreak := 0 < prevLine && prevLine < parLineBeg
+ if i > 0 {
+ // use position of parameter following the comma as
+ // comma position for correct comma placement, but
+ // only if the next parameter is on the same line
+ if !needsLinebreak {
+ p.setPos(par.Pos())
+ }
+ p.print(token.COMMA)
+ }
+ // separator if needed (linebreak or blank)
+ if needsLinebreak && p.linebreak(parLineBeg, 0, ws, true) > 0 {
+ // break line if the opening "(" or previous parameter ended on a different line
+ ws = ignore
+ } else if i > 0 {
+ p.print(blank)
+ }
+ // parameter names
+ if len(par.Names) > 0 {
+ // Very subtle: If we indented before (ws == ignore), identList
+ // won't indent again. If we didn't (ws == indent), identList will
+ // indent if the identList spans multiple lines, and it will outdent
+ // again at the end (and still ws == indent). Thus, a subsequent indent
+ // by a linebreak call after a type, or in the next multi-line identList
+ // will do the right thing.
+ p.identList(par.Names, ws == indent)
+ p.print(blank)
+ }
+ // parameter type
+ p.expr(stripParensAlways(par.Type))
+ prevLine = parLineEnd
+ }
+
+ // if the closing ")" is on a separate line from the last parameter,
+ // print an additional "," and line break
+ if closing := p.lineFor(fields.Closing); 0 < prevLine && prevLine < closing {
+ p.print(token.COMMA)
+ p.linebreak(closing, 0, ignore, true)
+ } else if mode == typeTParam && fields.NumFields() == 1 && combinesWithName(fields.List[0].Type) {
+ // A type parameter list [P T] where the name P and the type expression T syntactically
+ // combine to another valid (value) expression requires a trailing comma, as in [P *T,]
+ // (or an enclosing interface as in [P interface(*T)]), so that the type parameter list
+ // is not parsed as an array length [P*T].
+ p.print(token.COMMA)
+ }
+
+ // unindent if we indented
+ if ws == ignore {
+ p.print(unindent)
+ }
+ }
+
+ p.setPos(fields.Closing)
+ p.print(closeTok)
+}
+
+// combinesWithName reports whether a name followed by the expression x
+// syntactically combines to another valid (value) expression. For instance
+// using *T for x, "name *T" syntactically appears as the expression x*T.
+// On the other hand, using P|Q or *P|~Q for x, "name P|Q" or name *P|~Q"
+// cannot be combined into a valid (value) expression.
+func combinesWithName(x ast.Expr) bool {
+ switch x := x.(type) {
+ case *ast.StarExpr:
+ // name *x.X combines to name*x.X if x.X is not a type element
+ return !isTypeElem(x.X)
+ case *ast.BinaryExpr:
+ return combinesWithName(x.X) && !isTypeElem(x.Y)
+ case *ast.ParenExpr:
+ // name(x) combines but we are making sure at
+ // the call site that x is never parenthesized.
+ panic("unexpected parenthesized expression")
+ }
+ return false
+}
+
+// isTypeElem reports whether x is a (possibly parenthesized) type element expression.
+// The result is false if x could be a type element OR an ordinary (value) expression.
+func isTypeElem(x ast.Expr) bool {
+ switch x := x.(type) {
+ case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
+ return true
+ case *ast.UnaryExpr:
+ return x.Op == token.TILDE
+ case *ast.BinaryExpr:
+ return isTypeElem(x.X) || isTypeElem(x.Y)
+ case *ast.ParenExpr:
+ return isTypeElem(x.X)
+ }
+ return false
+}
+
+func (p *printer) signature(sig *ast.FuncType) {
+ if sig.TypeParams != nil {
+ p.parameters(sig.TypeParams, funcTParam)
+ }
+ if sig.Params != nil {
+ p.parameters(sig.Params, funcParam)
+ } else {
+ p.print(token.LPAREN, token.RPAREN)
+ }
+ res := sig.Results
+ n := res.NumFields()
+ if n > 0 {
+ // res != nil
+ p.print(blank)
+ if n == 1 && res.List[0].Names == nil {
+ // single anonymous res; no ()'s
+ p.expr(stripParensAlways(res.List[0].Type))
+ return
+ }
+ p.parameters(res, funcParam)
+ }
+}
+
+func identListSize(list []*ast.Ident, maxSize int) (size int) {
+ for i, x := range list {
+ if i > 0 {
+ size += len(", ")
+ }
+ size += utf8.RuneCountInString(x.Name)
+ if size >= maxSize {
+ break
+ }
+ }
+ return
+}
+
+func (p *printer) isOneLineFieldList(list []*ast.Field) bool {
+ if len(list) != 1 {
+ return false // allow only one field
+ }
+ f := list[0]
+ if f.Tag != nil || f.Comment != nil {
+ return false // don't allow tags or comments
+ }
+ // only name(s) and type
+ const maxSize = 30 // adjust as appropriate, this is an approximate value
+ namesSize := identListSize(f.Names, maxSize)
+ if namesSize > 0 {
+ namesSize = 1 // blank between names and types
+ }
+ typeSize := p.nodeSize(f.Type, maxSize)
+ return namesSize+typeSize <= maxSize
+}
+
+func (p *printer) setLineComment(text string) {
+ p.setComment(&ast.CommentGroup{List: []*ast.Comment{{Slash: token.NoPos, Text: text}}})
+}
+
+func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
+ lbrace := fields.Opening
+ list := fields.List
+ rbrace := fields.Closing
+ hasComments := isIncomplete || p.commentBefore(p.posFor(rbrace))
+ srcIsOneLine := lbrace.IsValid() && rbrace.IsValid() && p.lineFor(lbrace) == p.lineFor(rbrace)
+
+ if !hasComments && srcIsOneLine {
+ // possibly a one-line struct/interface
+ if len(list) == 0 {
+ // no blank between keyword and {} in this case
+ p.setPos(lbrace)
+ p.print(token.LBRACE)
+ p.setPos(rbrace)
+ p.print(token.RBRACE)
+ return
+ } else if p.isOneLineFieldList(list) {
+ // small enough - print on one line
+ // (don't use identList and ignore source line breaks)
+ p.setPos(lbrace)
+ p.print(token.LBRACE, blank)
+ f := list[0]
+ if isStruct {
+ for i, x := range f.Names {
+ if i > 0 {
+ // no comments so no need for comma position
+ p.print(token.COMMA, blank)
+ }
+ p.expr(x)
+ }
+ if len(f.Names) > 0 {
+ p.print(blank)
+ }
+ p.expr(f.Type)
+ } else { // interface
+ if len(f.Names) > 0 {
+ name := f.Names[0] // method name
+ p.expr(name)
+ p.signature(f.Type.(*ast.FuncType)) // don't print "func"
+ } else {
+ // embedded interface
+ p.expr(f.Type)
+ }
+ }
+ p.print(blank)
+ p.setPos(rbrace)
+ p.print(token.RBRACE)
+ return
+ }
+ }
+ // hasComments || !srcIsOneLine
+
+ p.print(blank)
+ p.setPos(lbrace)
+ p.print(token.LBRACE, indent)
+ if hasComments || len(list) > 0 {
+ p.print(formfeed)
+ }
+
+ if isStruct {
+
+ sep := vtab
+ if len(list) == 1 {
+ sep = blank
+ }
+ var line int
+ for i, f := range list {
+ if i > 0 {
+ p.linebreak(p.lineFor(f.Pos()), 1, ignore, p.linesFrom(line) > 0)
+ }
+ extraTabs := 0
+ p.setComment(f.Doc)
+ p.recordLine(&line)
+ if len(f.Names) > 0 {
+ // named fields
+ p.identList(f.Names, false)
+ p.print(sep)
+ p.expr(f.Type)
+ extraTabs = 1
+ } else {
+ // anonymous field
+ p.expr(f.Type)
+ extraTabs = 2
+ }
+ if f.Tag != nil {
+ if len(f.Names) > 0 && sep == vtab {
+ p.print(sep)
+ }
+ p.print(sep)
+ p.expr(f.Tag)
+ extraTabs = 0
+ }
+ if f.Comment != nil {
+ for ; extraTabs > 0; extraTabs-- {
+ p.print(sep)
+ }
+ p.setComment(f.Comment)
+ }
+ }
+ if isIncomplete {
+ if len(list) > 0 {
+ p.print(formfeed)
+ }
+ p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+ p.setLineComment("// " + filteredMsg)
+ }
+
+ } else { // interface
+
+ var line int
+ var prev *ast.Ident // previous "type" identifier
+ for i, f := range list {
+ var name *ast.Ident // first name, or nil
+ if len(f.Names) > 0 {
+ name = f.Names[0]
+ }
+ if i > 0 {
+ // don't do a line break (min == 0) if we are printing a list of types
+ // TODO(gri) this doesn't work quite right if the list of types is
+ // spread across multiple lines
+ min := 1
+ if prev != nil && name == prev {
+ min = 0
+ }
+ p.linebreak(p.lineFor(f.Pos()), min, ignore, p.linesFrom(line) > 0)
+ }
+ p.setComment(f.Doc)
+ p.recordLine(&line)
+ if name != nil {
+ // method
+ p.expr(name)
+ p.signature(f.Type.(*ast.FuncType)) // don't print "func"
+ prev = nil
+ } else {
+ // embedded interface
+ p.expr(f.Type)
+ prev = nil
+ }
+ p.setComment(f.Comment)
+ }
+ if isIncomplete {
+ if len(list) > 0 {
+ p.print(formfeed)
+ }
+ p.flush(p.posFor(rbrace), token.RBRACE) // make sure we don't lose the last line comment
+ p.setLineComment("// contains filtered or unexported methods")
+ }
+
+ }
+ p.print(unindent, formfeed)
+ p.setPos(rbrace)
+ p.print(token.RBRACE)
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func walkBinary(e *ast.BinaryExpr) (has4, has5 bool, maxProblem int) {
+ switch e.Op.Precedence() {
+ case 4:
+ has4 = true
+ case 5:
+ has5 = true
+ }
+
+ switch l := e.X.(type) {
+ case *ast.BinaryExpr:
+ if l.Op.Precedence() < e.Op.Precedence() {
+ // parens will be inserted.
+ // pretend this is an *ast.ParenExpr and do nothing.
+ break
+ }
+ h4, h5, mp := walkBinary(l)
+ has4 = has4 || h4
+ has5 = has5 || h5
+ if maxProblem < mp {
+ maxProblem = mp
+ }
+ }
+
+ switch r := e.Y.(type) {
+ case *ast.BinaryExpr:
+ if r.Op.Precedence() <= e.Op.Precedence() {
+ // parens will be inserted.
+ // pretend this is an *ast.ParenExpr and do nothing.
+ break
+ }
+ h4, h5, mp := walkBinary(r)
+ has4 = has4 || h4
+ has5 = has5 || h5
+ if maxProblem < mp {
+ maxProblem = mp
+ }
+
+ case *ast.StarExpr:
+ if e.Op == token.QUO { // `*/`
+ maxProblem = 5
+ }
+
+ case *ast.UnaryExpr:
+ switch e.Op.String() + r.Op.String() {
+ case "/*", "&&", "&^":
+ maxProblem = 5
+ case "++", "--":
+ if maxProblem < 4 {
+ maxProblem = 4
+ }
+ }
+ }
+ return
+}
+
+func cutoff(e *ast.BinaryExpr, depth int) int {
+ has4, has5, maxProblem := walkBinary(e)
+ if maxProblem > 0 {
+ return maxProblem + 1
+ }
+ if has4 && has5 {
+ if depth == 1 {
+ return 5
+ }
+ return 4
+ }
+ if depth == 1 {
+ return 6
+ }
+ return 4
+}
+
+func diffPrec(expr ast.Expr, prec int) int {
+ x, ok := expr.(*ast.BinaryExpr)
+ if !ok || prec != x.Op.Precedence() {
+ return 1
+ }
+ return 0
+}
+
+func reduceDepth(depth int) int {
+ depth--
+ if depth < 1 {
+ depth = 1
+ }
+ return depth
+}
+
+// Format the binary expression: decide the cutoff and then format.
+// Let's call depth == 1 Normal mode, and depth > 1 Compact mode.
+// (Algorithm suggestion by Russ Cox.)
+//
+// The precedences are:
+//
+// 5 * / % << >> & &^
+// 4 + - | ^
+// 3 == != < <= > >=
+// 2 &&
+// 1 ||
+//
+// The only decision is whether there will be spaces around levels 4 and 5.
+// There are never spaces at level 6 (unary), and always spaces at levels 3 and below.
+//
+// To choose the cutoff, look at the whole expression but excluding primary
+// expressions (function calls, parenthesized exprs), and apply these rules:
+//
+// 1. If there is a binary operator with a right side unary operand
+// that would clash without a space, the cutoff must be (in order):
+//
+// /* 6
+// && 6
+// &^ 6
+// ++ 5
+// -- 5
+//
+// (Comparison operators always have spaces around them.)
+//
+// 2. If there is a mix of level 5 and level 4 operators, then the cutoff
+// is 5 (use spaces to distinguish precedence) in Normal mode
+// and 4 (never use spaces) in Compact mode.
+//
+// 3. If there are no level 4 operators or no level 5 operators, then the
+// cutoff is 6 (always use spaces) in Normal mode
+// and 4 (never use spaces) in Compact mode.
+func (p *printer) binaryExpr(x *ast.BinaryExpr, prec1, cutoff, depth int) {
+ prec := x.Op.Precedence()
+ if prec < prec1 {
+ // parenthesis needed
+ // Note: The parser inserts an ast.ParenExpr node; thus this case
+ // can only occur if the AST is created in a different way.
+ p.print(token.LPAREN)
+ p.expr0(x, reduceDepth(depth)) // parentheses undo one level of depth
+ p.print(token.RPAREN)
+ return
+ }
+
+ printBlank := prec < cutoff
+
+ ws := indent
+ p.expr1(x.X, prec, depth+diffPrec(x.X, prec))
+ if printBlank {
+ p.print(blank)
+ }
+ xline := p.pos.Line // before the operator (it may be on the next line!)
+ yline := p.lineFor(x.Y.Pos())
+ p.setPos(x.OpPos)
+ p.print(x.Op)
+ if xline != yline && xline > 0 && yline > 0 {
+ // at least one line break, but respect an extra empty line
+ // in the source
+ if p.linebreak(yline, 1, ws, true) > 0 {
+ ws = ignore
+ printBlank = false // no blank after line break
+ }
+ }
+ if printBlank {
+ p.print(blank)
+ }
+ p.expr1(x.Y, prec+1, depth+1)
+ if ws == ignore {
+ p.print(unindent)
+ }
+}
+
+func isBinary(expr ast.Expr) bool {
+ _, ok := expr.(*ast.BinaryExpr)
+ return ok
+}
+
+func (p *printer) expr1(expr ast.Expr, prec1, depth int) {
+ p.setPos(expr.Pos())
+
+ switch x := expr.(type) {
+ case *ast.BadExpr:
+ p.print("BadExpr")
+
+ case *ast.Ident:
+ p.print(x)
+
+ case *ast.BinaryExpr:
+ if depth < 1 {
+ p.internalError("depth < 1:", depth)
+ depth = 1
+ }
+ p.binaryExpr(x, prec1, cutoff(x, depth), depth)
+
+ case *ast.KeyValueExpr:
+ p.expr(x.Key)
+ p.setPos(x.Colon)
+ p.print(token.COLON, blank)
+ p.expr(x.Value)
+
+ case *ast.StarExpr:
+ const prec = token.UnaryPrec
+ if prec < prec1 {
+ // parenthesis needed
+ p.print(token.LPAREN)
+ p.print(token.MUL)
+ p.expr(x.X)
+ p.print(token.RPAREN)
+ } else {
+ // no parenthesis needed
+ p.print(token.MUL)
+ p.expr(x.X)
+ }
+
+ case *ast.UnaryExpr:
+ const prec = token.UnaryPrec
+ if prec < prec1 {
+ // parenthesis needed
+ p.print(token.LPAREN)
+ p.expr(x)
+ p.print(token.RPAREN)
+ } else {
+ // no parenthesis needed
+ p.print(x.Op)
+ if x.Op == token.RANGE {
+ // TODO(gri) Remove this code if it cannot be reached.
+ p.print(blank)
+ }
+ p.expr1(x.X, prec, depth)
+ }
+
+ case *ast.BasicLit:
+ if p.Config.Mode&normalizeNumbers != 0 {
+ x = normalizedNumber(x)
+ }
+ p.print(x)
+
+ case *ast.FuncLit:
+ p.setPos(x.Type.Pos())
+ p.print(token.FUNC)
+ // See the comment in funcDecl about how the header size is computed.
+ startCol := p.out.Column - len("func")
+ p.signature(x.Type)
+ p.funcBody(p.distanceFrom(x.Type.Pos(), startCol), blank, x.Body)
+
+ case *ast.ParenExpr:
+ if _, hasParens := x.X.(*ast.ParenExpr); hasParens {
+ // don't print parentheses around an already parenthesized expression
+ // TODO(gri) consider making this more general and incorporate precedence levels
+ p.expr0(x.X, depth)
+ } else {
+ p.print(token.LPAREN)
+ p.expr0(x.X, reduceDepth(depth)) // parentheses undo one level of depth
+ p.setPos(x.Rparen)
+ p.print(token.RPAREN)
+ }
+
+ case *ast.SelectorExpr:
+ p.selectorExpr(x, depth, false)
+
+ case *ast.TypeAssertExpr:
+ p.expr1(x.X, token.HighestPrec, depth)
+ p.print(token.PERIOD)
+ p.setPos(x.Lparen)
+ p.print(token.LPAREN)
+ if x.Type != nil {
+ p.expr(x.Type)
+ } else {
+ p.print(token.TYPE)
+ }
+ p.setPos(x.Rparen)
+ p.print(token.RPAREN)
+
+ case *ast.IndexExpr:
+ // TODO(gri): should treat[] like parentheses and undo one level of depth
+ p.expr1(x.X, token.HighestPrec, 1)
+ p.setPos(x.Lbrack)
+ p.print(token.LBRACK)
+ p.expr0(x.Index, depth+1)
+ p.setPos(x.Rbrack)
+ p.print(token.RBRACK)
+
+ case *ast.IndexListExpr:
+ // TODO(gri): as for IndexExpr, should treat [] like parentheses and undo
+ // one level of depth
+ p.expr1(x.X, token.HighestPrec, 1)
+ p.setPos(x.Lbrack)
+ p.print(token.LBRACK)
+ p.exprList(x.Lbrack, x.Indices, depth+1, commaTerm, x.Rbrack, false)
+ p.setPos(x.Rbrack)
+ p.print(token.RBRACK)
+
+ case *ast.SliceExpr:
+ // TODO(gri): should treat[] like parentheses and undo one level of depth
+ p.expr1(x.X, token.HighestPrec, 1)
+ p.setPos(x.Lbrack)
+ p.print(token.LBRACK)
+ indices := []ast.Expr{x.Low, x.High}
+ if x.Max != nil {
+ indices = append(indices, x.Max)
+ }
+ // determine if we need extra blanks around ':'
+ var needsBlanks bool
+ if depth <= 1 {
+ var indexCount int
+ var hasBinaries bool
+ for _, x := range indices {
+ if x != nil {
+ indexCount++
+ if isBinary(x) {
+ hasBinaries = true
+ }
+ }
+ }
+ if indexCount > 1 && hasBinaries {
+ needsBlanks = true
+ }
+ }
+ for i, x := range indices {
+ if i > 0 {
+ if indices[i-1] != nil && needsBlanks {
+ p.print(blank)
+ }
+ p.print(token.COLON)
+ if x != nil && needsBlanks {
+ p.print(blank)
+ }
+ }
+ if x != nil {
+ p.expr0(x, depth+1)
+ }
+ }
+ p.setPos(x.Rbrack)
+ p.print(token.RBRACK)
+
+ case *ast.CallExpr:
+ if len(x.Args) > 1 {
+ depth++
+ }
+ var wasIndented bool
+ if _, ok := x.Fun.(*ast.FuncType); ok {
+ // conversions to literal function types require parentheses around the type
+ p.print(token.LPAREN)
+ wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
+ p.print(token.RPAREN)
+ } else {
+ wasIndented = p.possibleSelectorExpr(x.Fun, token.HighestPrec, depth)
+ }
+ p.setPos(x.Lparen)
+ p.print(token.LPAREN)
+ if x.Ellipsis.IsValid() {
+ p.exprList(x.Lparen, x.Args, depth, 0, x.Ellipsis, false)
+ p.setPos(x.Ellipsis)
+ p.print(token.ELLIPSIS)
+ if x.Rparen.IsValid() && p.lineFor(x.Ellipsis) < p.lineFor(x.Rparen) {
+ p.print(token.COMMA, formfeed)
+ }
+ } else {
+ p.exprList(x.Lparen, x.Args, depth, commaTerm, x.Rparen, false)
+ }
+ p.setPos(x.Rparen)
+ p.print(token.RPAREN)
+ if wasIndented {
+ p.print(unindent)
+ }
+
+ case *ast.CompositeLit:
+ // composite literal elements that are composite literals themselves may have the type omitted
+ if x.Type != nil {
+ p.expr1(x.Type, token.HighestPrec, depth)
+ }
+ p.level++
+ p.setPos(x.Lbrace)
+ p.print(token.LBRACE)
+ p.exprList(x.Lbrace, x.Elts, 1, commaTerm, x.Rbrace, x.Incomplete)
+ // do not insert extra line break following a /*-style comment
+ // before the closing '}' as it might break the code if there
+ // is no trailing ','
+ mode := noExtraLinebreak
+ // do not insert extra blank following a /*-style comment
+ // before the closing '}' unless the literal is empty
+ if len(x.Elts) > 0 {
+ mode |= noExtraBlank
+ }
+ // need the initial indent to print lone comments with
+ // the proper level of indentation
+ p.print(indent, unindent, mode)
+ p.setPos(x.Rbrace)
+ p.print(token.RBRACE, mode)
+ p.level--
+
+ case *ast.Ellipsis:
+ p.print(token.ELLIPSIS)
+ if x.Elt != nil {
+ p.expr(x.Elt)
+ }
+
+ case *ast.ArrayType:
+ p.print(token.LBRACK)
+ if x.Len != nil {
+ p.expr(x.Len)
+ }
+ p.print(token.RBRACK)
+ p.expr(x.Elt)
+
+ case *ast.StructType:
+ p.print(token.STRUCT)
+ p.fieldList(x.Fields, true, x.Incomplete)
+
+ case *ast.FuncType:
+ p.print(token.FUNC)
+ p.signature(x)
+
+ case *ast.InterfaceType:
+ p.print(token.INTERFACE)
+ p.fieldList(x.Methods, false, x.Incomplete)
+
+ case *ast.MapType:
+ p.print(token.MAP, token.LBRACK)
+ p.expr(x.Key)
+ p.print(token.RBRACK)
+ p.expr(x.Value)
+
+ case *ast.ChanType:
+ switch x.Dir {
+ case ast.SEND | ast.RECV:
+ p.print(token.CHAN)
+ case ast.RECV:
+ p.print(token.ARROW, token.CHAN) // x.Arrow and x.Pos() are the same
+ case ast.SEND:
+ p.print(token.CHAN)
+ p.setPos(x.Arrow)
+ p.print(token.ARROW)
+ }
+ p.print(blank)
+ p.expr(x.Value)
+
+ default:
+ panic("unreachable")
+ }
+}
+
+// normalizedNumber rewrites base prefixes and exponents
+// of numbers to use lower-case letters (0X123 to 0x123 and 1.2E3 to 1.2e3),
+// and removes leading 0's from integer imaginary literals (0765i to 765i).
+// It leaves hexadecimal digits alone.
+//
+// normalizedNumber doesn't modify the ast.BasicLit value lit points to.
+// If lit is not a number or a number in canonical format already,
+// lit is returned as is. Otherwise a new ast.BasicLit is created.
+func normalizedNumber(lit *ast.BasicLit) *ast.BasicLit {
+ if lit.Kind != token.INT && lit.Kind != token.FLOAT && lit.Kind != token.IMAG {
+ return lit // not a number - nothing to do
+ }
+ if len(lit.Value) < 2 {
+ return lit // only one digit (common case) - nothing to do
+ }
+ // len(lit.Value) >= 2
+
+ // We ignore lit.Kind because for lit.Kind == token.IMAG the literal may be an integer
+ // or floating-point value, decimal or not. Instead, just consider the literal pattern.
+ x := lit.Value
+ switch x[:2] {
+ default:
+ // 0-prefix octal, decimal int, or float (possibly with 'i' suffix)
+ if i := strings.LastIndexByte(x, 'E'); i >= 0 {
+ x = x[:i] + "e" + x[i+1:]
+ break
+ }
+ // remove leading 0's from integer (but not floating-point) imaginary literals
+ if x[len(x)-1] == 'i' && !strings.ContainsAny(x, ".e") {
+ x = strings.TrimLeft(x, "0_")
+ if x == "i" {
+ x = "0i"
+ }
+ }
+ case "0X":
+ x = "0x" + x[2:]
+ // possibly a hexadecimal float
+ if i := strings.LastIndexByte(x, 'P'); i >= 0 {
+ x = x[:i] + "p" + x[i+1:]
+ }
+ case "0x":
+ // possibly a hexadecimal float
+ i := strings.LastIndexByte(x, 'P')
+ if i == -1 {
+ return lit // nothing to do
+ }
+ x = x[:i] + "p" + x[i+1:]
+ case "0O":
+ x = "0o" + x[2:]
+ case "0o":
+ return lit // nothing to do
+ case "0B":
+ x = "0b" + x[2:]
+ case "0b":
+ return lit // nothing to do
+ }
+
+ return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: lit.Kind, Value: x}
+}
+
+func (p *printer) possibleSelectorExpr(expr ast.Expr, prec1, depth int) bool {
+ if x, ok := expr.(*ast.SelectorExpr); ok {
+ return p.selectorExpr(x, depth, true)
+ }
+ p.expr1(expr, prec1, depth)
+ return false
+}
+
+// selectorExpr handles an *ast.SelectorExpr node and reports whether x spans
+// multiple lines.
+func (p *printer) selectorExpr(x *ast.SelectorExpr, depth int, isMethod bool) bool {
+ p.expr1(x.X, token.HighestPrec, depth)
+ p.print(token.PERIOD)
+ if line := p.lineFor(x.Sel.Pos()); p.pos.IsValid() && p.pos.Line < line {
+ p.print(indent, newline)
+ p.setPos(x.Sel.Pos())
+ p.print(x.Sel)
+ if !isMethod {
+ p.print(unindent)
+ }
+ return true
+ }
+ p.setPos(x.Sel.Pos())
+ p.print(x.Sel)
+ return false
+}
+
+func (p *printer) expr0(x ast.Expr, depth int) {
+ p.expr1(x, token.LowestPrec, depth)
+}
+
+func (p *printer) expr(x ast.Expr) {
+ const depth = 1
+ p.expr1(x, token.LowestPrec, depth)
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// Print the statement list indented, but without a newline after the last statement.
+// Extra line breaks between statements in the source are respected but at most one
+// empty line is printed between statements.
+func (p *printer) stmtList(list []ast.Stmt, nindent int, nextIsRBrace bool) {
+ if nindent > 0 {
+ p.print(indent)
+ }
+ var line int
+ i := 0
+ for _, s := range list {
+ // ignore empty statements (was issue 3466)
+ if _, isEmpty := s.(*ast.EmptyStmt); !isEmpty {
+ // nindent == 0 only for lists of switch/select case clauses;
+ // in those cases each clause is a new section
+ if len(p.output) > 0 {
+ // only print line break if we are not at the beginning of the output
+ // (i.e., we are not printing only a partial program)
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, i == 0 || nindent == 0 || p.linesFrom(line) > 0)
+ }
+ p.recordLine(&line)
+ p.stmt(s, nextIsRBrace && i == len(list)-1)
+ // labeled statements put labels on a separate line, but here
+ // we only care about the start line of the actual statement
+ // without label - correct line for each label
+ for t := s; ; {
+ lt, _ := t.(*ast.LabeledStmt)
+ if lt == nil {
+ break
+ }
+ line++
+ t = lt.Stmt
+ }
+ i++
+ }
+ }
+ if nindent > 0 {
+ p.print(unindent)
+ }
+}
+
+// block prints an *ast.BlockStmt; it always spans at least two lines.
+func (p *printer) block(b *ast.BlockStmt, nindent int) {
+ p.setPos(b.Lbrace)
+ p.print(token.LBRACE)
+ p.stmtList(b.List, nindent, true)
+ p.linebreak(p.lineFor(b.Rbrace), 1, ignore, true)
+ p.setPos(b.Rbrace)
+ p.print(token.RBRACE)
+}
+
+func isTypeName(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.Ident:
+ return true
+ case *ast.SelectorExpr:
+ return isTypeName(t.X)
+ }
+ return false
+}
+
+func stripParens(x ast.Expr) ast.Expr {
+ if px, strip := x.(*ast.ParenExpr); strip {
+ // parentheses must not be stripped if there are any
+ // unparenthesized composite literals starting with
+ // a type name
+ ast.Inspect(px.X, func(node ast.Node) bool {
+ switch x := node.(type) {
+ case *ast.ParenExpr:
+ // parentheses protect enclosed composite literals
+ return false
+ case *ast.CompositeLit:
+ if isTypeName(x.Type) {
+ strip = false // do not strip parentheses
+ }
+ return false
+ }
+ // in all other cases, keep inspecting
+ return true
+ })
+ if strip {
+ return stripParens(px.X)
+ }
+ }
+ return x
+}
+
+func stripParensAlways(x ast.Expr) ast.Expr {
+ if x, ok := x.(*ast.ParenExpr); ok {
+ return stripParensAlways(x.X)
+ }
+ return x
+}
+
+func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) {
+ p.print(blank)
+ needsBlank := false
+ if init == nil && post == nil {
+ // no semicolons required
+ if expr != nil {
+ p.expr(stripParens(expr))
+ needsBlank = true
+ }
+ } else {
+ // all semicolons required
+ // (they are not separators, print them explicitly)
+ if init != nil {
+ p.stmt(init, false)
+ }
+ p.print(token.SEMICOLON, blank)
+ if expr != nil {
+ p.expr(stripParens(expr))
+ needsBlank = true
+ }
+ if isForStmt {
+ p.print(token.SEMICOLON, blank)
+ needsBlank = false
+ if post != nil {
+ p.stmt(post, false)
+ needsBlank = true
+ }
+ }
+ }
+ if needsBlank {
+ p.print(blank)
+ }
+}
+
+// indentList reports whether an expression list would look better if it
+// were indented wholesale (starting with the very first element, rather
+// than starting at the first line break).
+func (p *printer) indentList(list []ast.Expr) bool {
+ // Heuristic: indentList reports whether there are more than one multi-
+ // line element in the list, or if there is any element that is not
+ // starting on the same line as the previous one ends.
+ if len(list) >= 2 {
+ var b = p.lineFor(list[0].Pos())
+ var e = p.lineFor(list[len(list)-1].End())
+ if 0 < b && b < e {
+ // list spans multiple lines
+ n := 0 // multi-line element count
+ line := b
+ for _, x := range list {
+ xb := p.lineFor(x.Pos())
+ xe := p.lineFor(x.End())
+ if line < xb {
+ // x is not starting on the same
+ // line as the previous one ended
+ return true
+ }
+ if xb < xe {
+ // x is a multi-line element
+ n++
+ }
+ line = xe
+ }
+ return n > 1
+ }
+ }
+ return false
+}
+
+func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool) {
+ p.setPos(stmt.Pos())
+
+ switch s := stmt.(type) {
+ case *ast.BadStmt:
+ p.print("BadStmt")
+
+ case *ast.DeclStmt:
+ p.decl(s.Decl)
+
+ case *ast.EmptyStmt:
+ // nothing to do
+
+ case *ast.LabeledStmt:
+ // a "correcting" unindent immediately following a line break
+ // is applied before the line break if there is no comment
+ // between (see writeWhitespace)
+ p.print(unindent)
+ p.expr(s.Label)
+ p.setPos(s.Colon)
+ p.print(token.COLON, indent)
+ if e, isEmpty := s.Stmt.(*ast.EmptyStmt); isEmpty {
+ if !nextIsRBrace {
+ p.print(newline)
+ p.setPos(e.Pos())
+ p.print(token.SEMICOLON)
+ break
+ }
+ } else {
+ p.linebreak(p.lineFor(s.Stmt.Pos()), 1, ignore, true)
+ }
+ p.stmt(s.Stmt, nextIsRBrace)
+
+ case *ast.ExprStmt:
+ const depth = 1
+ p.expr0(s.X, depth)
+
+ case *ast.SendStmt:
+ const depth = 1
+ p.expr0(s.Chan, depth)
+ p.print(blank)
+ p.setPos(s.Arrow)
+ p.print(token.ARROW, blank)
+ p.expr0(s.Value, depth)
+
+ case *ast.IncDecStmt:
+ const depth = 1
+ p.expr0(s.X, depth+1)
+ p.setPos(s.TokPos)
+ p.print(s.Tok)
+
+ case *ast.AssignStmt:
+ var depth = 1
+ if len(s.Lhs) > 1 && len(s.Rhs) > 1 {
+ depth++
+ }
+ p.exprList(s.Pos(), s.Lhs, depth, 0, s.TokPos, false)
+ p.print(blank)
+ p.setPos(s.TokPos)
+ p.print(s.Tok, blank)
+ p.exprList(s.TokPos, s.Rhs, depth, 0, token.NoPos, false)
+
+ case *ast.GoStmt:
+ p.print(token.GO, blank)
+ p.expr(s.Call)
+
+ case *ast.DeferStmt:
+ p.print(token.DEFER, blank)
+ p.expr(s.Call)
+
+ case *ast.ReturnStmt:
+ p.print(token.RETURN)
+ if s.Results != nil {
+ p.print(blank)
+ // Use indentList heuristic to make corner cases look
+ // better (issue 1207). A more systematic approach would
+ // always indent, but this would cause significant
+ // reformatting of the code base and not necessarily
+ // lead to more nicely formatted code in general.
+ if p.indentList(s.Results) {
+ p.print(indent)
+ // Use NoPos so that a newline never goes before
+ // the results (see issue #32854).
+ p.exprList(token.NoPos, s.Results, 1, noIndent, token.NoPos, false)
+ p.print(unindent)
+ } else {
+ p.exprList(token.NoPos, s.Results, 1, 0, token.NoPos, false)
+ }
+ }
+
+ case *ast.BranchStmt:
+ p.print(s.Tok)
+ if s.Label != nil {
+ p.print(blank)
+ p.expr(s.Label)
+ }
+
+ case *ast.BlockStmt:
+ p.block(s, 1)
+
+ case *ast.IfStmt:
+ p.print(token.IF)
+ p.controlClause(false, s.Init, s.Cond, nil)
+ p.block(s.Body, 1)
+ if s.Else != nil {
+ p.print(blank, token.ELSE, blank)
+ switch s.Else.(type) {
+ case *ast.BlockStmt, *ast.IfStmt:
+ p.stmt(s.Else, nextIsRBrace)
+ default:
+ // This can only happen with an incorrectly
+ // constructed AST. Permit it but print so
+ // that it can be parsed without errors.
+ p.print(token.LBRACE, indent, formfeed)
+ p.stmt(s.Else, true)
+ p.print(unindent, formfeed, token.RBRACE)
+ }
+ }
+
+ case *ast.CaseClause:
+ if s.List != nil {
+ p.print(token.CASE, blank)
+ p.exprList(s.Pos(), s.List, 1, 0, s.Colon, false)
+ } else {
+ p.print(token.DEFAULT)
+ }
+ p.setPos(s.Colon)
+ p.print(token.COLON)
+ p.stmtList(s.Body, 1, nextIsRBrace)
+
+ case *ast.SwitchStmt:
+ p.print(token.SWITCH)
+ p.controlClause(false, s.Init, s.Tag, nil)
+ p.block(s.Body, 0)
+
+ case *ast.TypeSwitchStmt:
+ p.print(token.SWITCH)
+ if s.Init != nil {
+ p.print(blank)
+ p.stmt(s.Init, false)
+ p.print(token.SEMICOLON)
+ }
+ p.print(blank)
+ p.stmt(s.Assign, false)
+ p.print(blank)
+ p.block(s.Body, 0)
+
+ case *ast.CommClause:
+ if s.Comm != nil {
+ p.print(token.CASE, blank)
+ p.stmt(s.Comm, false)
+ } else {
+ p.print(token.DEFAULT)
+ }
+ p.setPos(s.Colon)
+ p.print(token.COLON)
+ p.stmtList(s.Body, 1, nextIsRBrace)
+
+ case *ast.SelectStmt:
+ p.print(token.SELECT, blank)
+ body := s.Body
+ if len(body.List) == 0 && !p.commentBefore(p.posFor(body.Rbrace)) {
+ // print empty select statement w/o comments on one line
+ p.setPos(body.Lbrace)
+ p.print(token.LBRACE)
+ p.setPos(body.Rbrace)
+ p.print(token.RBRACE)
+ } else {
+ p.block(body, 0)
+ }
+
+ case *ast.ForStmt:
+ p.print(token.FOR)
+ p.controlClause(true, s.Init, s.Cond, s.Post)
+ p.block(s.Body, 1)
+
+ case *ast.RangeStmt:
+ p.print(token.FOR, blank)
+ if s.Key != nil {
+ p.expr(s.Key)
+ if s.Value != nil {
+ // use position of value following the comma as
+ // comma position for correct comment placement
+ p.setPos(s.Value.Pos())
+ p.print(token.COMMA, blank)
+ p.expr(s.Value)
+ }
+ p.print(blank)
+ p.setPos(s.TokPos)
+ p.print(s.Tok, blank)
+ }
+ p.print(token.RANGE, blank)
+ p.expr(stripParens(s.X))
+ p.print(blank)
+ p.block(s.Body, 1)
+
+ default:
+ panic("unreachable")
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// The keepTypeColumn function determines if the type column of a series of
+// consecutive const or var declarations must be kept, or if initialization
+// values (V) can be placed in the type column (T) instead. The i'th entry
+// in the result slice is true if the type column in spec[i] must be kept.
+//
+// For example, the declaration:
+//
+// const (
+// foobar int = 42 // comment
+// x = 7 // comment
+// foo
+// bar = 991
+// )
+//
+// leads to the type/values matrix below. A run of value columns (V) can
+// be moved into the type column if there is no type for any of the values
+// in that column (we only move entire columns so that they align properly).
+//
+// matrix formatted result
+// matrix
+// T V -> T V -> true there is a T and so the type
+// - V - V true column must be kept
+// - - - - false
+// - V V - false V is moved into T column
+func keepTypeColumn(specs []ast.Spec) []bool {
+ m := make([]bool, len(specs))
+
+ populate := func(i, j int, keepType bool) {
+ if keepType {
+ for ; i < j; i++ {
+ m[i] = true
+ }
+ }
+ }
+
+ i0 := -1 // if i0 >= 0 we are in a run and i0 is the start of the run
+ var keepType bool
+ for i, s := range specs {
+ t := s.(*ast.ValueSpec)
+ if t.Values != nil {
+ if i0 < 0 {
+ // start of a run of ValueSpecs with non-nil Values
+ i0 = i
+ keepType = false
+ }
+ } else {
+ if i0 >= 0 {
+ // end of a run
+ populate(i0, i, keepType)
+ i0 = -1
+ }
+ }
+ if t.Type != nil {
+ keepType = true
+ }
+ }
+ if i0 >= 0 {
+ // end of a run
+ populate(i0, len(specs), keepType)
+ }
+
+ return m
+}
+
+func (p *printer) valueSpec(s *ast.ValueSpec, keepType bool) {
+ p.setComment(s.Doc)
+ p.identList(s.Names, false) // always present
+ extraTabs := 3
+ if s.Type != nil || keepType {
+ p.print(vtab)
+ extraTabs--
+ }
+ if s.Type != nil {
+ p.expr(s.Type)
+ }
+ if s.Values != nil {
+ p.print(vtab, token.ASSIGN, blank)
+ p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false)
+ extraTabs--
+ }
+ if s.Comment != nil {
+ for ; extraTabs > 0; extraTabs-- {
+ p.print(vtab)
+ }
+ p.setComment(s.Comment)
+ }
+}
+
+func sanitizeImportPath(lit *ast.BasicLit) *ast.BasicLit {
+ // Note: An unmodified AST generated by go/parser will already
+ // contain a backward- or double-quoted path string that does
+ // not contain any invalid characters, and most of the work
+ // here is not needed. However, a modified or generated AST
+ // may possibly contain non-canonical paths. Do the work in
+ // all cases since it's not too hard and not speed-critical.
+
+ // if we don't have a proper string, be conservative and return whatever we have
+ if lit.Kind != token.STRING {
+ return lit
+ }
+ s, err := strconv.Unquote(lit.Value)
+ if err != nil {
+ return lit
+ }
+
+ // if the string is an invalid path, return whatever we have
+ //
+ // spec: "Implementation restriction: A compiler may restrict
+ // ImportPaths to non-empty strings using only characters belonging
+ // to Unicode's L, M, N, P, and S general categories (the Graphic
+ // characters without spaces) and may also exclude the characters
+ // !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character
+ // U+FFFD."
+ if s == "" {
+ return lit
+ }
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ for _, r := range s {
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return lit
+ }
+ }
+
+ // otherwise, return the double-quoted path
+ s = strconv.Quote(s)
+ if s == lit.Value {
+ return lit // nothing wrong with lit
+ }
+ return &ast.BasicLit{ValuePos: lit.ValuePos, Kind: token.STRING, Value: s}
+}
+
+// The parameter n is the number of specs in the group. If doIndent is set,
+// multi-line identifier lists in the spec are indented when the first
+// linebreak is encountered.
+func (p *printer) spec(spec ast.Spec, n int, doIndent bool) {
+ switch s := spec.(type) {
+ case *ast.ImportSpec:
+ p.setComment(s.Doc)
+ if s.Name != nil {
+ p.expr(s.Name)
+ p.print(blank)
+ }
+ p.expr(sanitizeImportPath(s.Path))
+ p.setComment(s.Comment)
+ p.setPos(s.EndPos)
+
+ case *ast.ValueSpec:
+ if n != 1 {
+ p.internalError("expected n = 1; got", n)
+ }
+ p.setComment(s.Doc)
+ p.identList(s.Names, doIndent) // always present
+ if s.Type != nil {
+ p.print(blank)
+ p.expr(s.Type)
+ }
+ if s.Values != nil {
+ p.print(blank, token.ASSIGN, blank)
+ p.exprList(token.NoPos, s.Values, 1, 0, token.NoPos, false)
+ }
+ p.setComment(s.Comment)
+
+ case *ast.TypeSpec:
+ p.setComment(s.Doc)
+ p.expr(s.Name)
+ if s.TypeParams != nil {
+ p.parameters(s.TypeParams, typeTParam)
+ }
+ if n == 1 {
+ p.print(blank)
+ } else {
+ p.print(vtab)
+ }
+ if s.Assign.IsValid() {
+ p.print(token.ASSIGN, blank)
+ }
+ p.expr(s.Type)
+ p.setComment(s.Comment)
+
+ default:
+ panic("unreachable")
+ }
+}
+
+func (p *printer) genDecl(d *ast.GenDecl) {
+ p.setComment(d.Doc)
+ p.setPos(d.Pos())
+ p.print(d.Tok, blank)
+
+ if d.Lparen.IsValid() || len(d.Specs) > 1 {
+ // group of parenthesized declarations
+ p.setPos(d.Lparen)
+ p.print(token.LPAREN)
+ if n := len(d.Specs); n > 0 {
+ p.print(indent, formfeed)
+ if n > 1 && (d.Tok == token.CONST || d.Tok == token.VAR) {
+ // two or more grouped const/var declarations:
+ // determine if the type column must be kept
+ keepType := keepTypeColumn(d.Specs)
+ var line int
+ for i, s := range d.Specs {
+ if i > 0 {
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0)
+ }
+ p.recordLine(&line)
+ p.valueSpec(s.(*ast.ValueSpec), keepType[i])
+ }
+ } else {
+ var line int
+ for i, s := range d.Specs {
+ if i > 0 {
+ p.linebreak(p.lineFor(s.Pos()), 1, ignore, p.linesFrom(line) > 0)
+ }
+ p.recordLine(&line)
+ p.spec(s, n, false)
+ }
+ }
+ p.print(unindent, formfeed)
+ }
+ p.setPos(d.Rparen)
+ p.print(token.RPAREN)
+
+ } else if len(d.Specs) > 0 {
+ // single declaration
+ p.spec(d.Specs[0], 1, true)
+ }
+}
+
+// sizeCounter is an io.Writer which counts the number of bytes written,
+// as well as whether a newline character was seen.
+type sizeCounter struct {
+ hasNewline bool
+ size int
+}
+
+func (c *sizeCounter) Write(p []byte) (int, error) {
+ if !c.hasNewline {
+ for _, b := range p {
+ if b == '\n' || b == '\f' {
+ c.hasNewline = true
+ break
+ }
+ }
+ }
+ c.size += len(p)
+ return len(p), nil
+}
+
+// nodeSize determines the size of n in chars after formatting.
+// The result is <= maxSize if the node fits on one line with at
+// most maxSize chars and the formatted output doesn't contain
+// any control chars. Otherwise, the result is > maxSize.
+func (p *printer) nodeSize(n ast.Node, maxSize int) (size int) {
+ // nodeSize invokes the printer, which may invoke nodeSize
+ // recursively. For deep composite literal nests, this can
+ // lead to an exponential algorithm. Remember previous
+ // results to prune the recursion (was issue 1628).
+ if size, found := p.nodeSizes[n]; found {
+ return size
+ }
+
+ size = maxSize + 1 // assume n doesn't fit
+ p.nodeSizes[n] = size
+
+ // nodeSize computation must be independent of particular
+ // style so that we always get the same decision; print
+ // in RawFormat
+ cfg := Config{Mode: RawFormat}
+ var counter sizeCounter
+ if err := cfg.fprint(&counter, p.fset, n, p.nodeSizes); err != nil {
+ return
+ }
+ if counter.size <= maxSize && !counter.hasNewline {
+ // n fits in a single line
+ size = counter.size
+ p.nodeSizes[n] = size
+ }
+ return
+}
+
+// numLines returns the number of lines spanned by node n in the original source.
+func (p *printer) numLines(n ast.Node) int {
+ if from := n.Pos(); from.IsValid() {
+ if to := n.End(); to.IsValid() {
+ return p.lineFor(to) - p.lineFor(from) + 1
+ }
+ }
+ return infinity
+}
+
+// bodySize is like nodeSize but it is specialized for *ast.BlockStmt's.
+func (p *printer) bodySize(b *ast.BlockStmt, maxSize int) int {
+ pos1 := b.Pos()
+ pos2 := b.Rbrace
+ if pos1.IsValid() && pos2.IsValid() && p.lineFor(pos1) != p.lineFor(pos2) {
+ // opening and closing brace are on different lines - don't make it a one-liner
+ return maxSize + 1
+ }
+ if len(b.List) > 5 {
+ // too many statements - don't make it a one-liner
+ return maxSize + 1
+ }
+ // otherwise, estimate body size
+ bodySize := p.commentSizeBefore(p.posFor(pos2))
+ for i, s := range b.List {
+ if bodySize > maxSize {
+ break // no need to continue
+ }
+ if i > 0 {
+ bodySize += 2 // space for a semicolon and blank
+ }
+ bodySize += p.nodeSize(s, maxSize)
+ }
+ return bodySize
+}
+
+// funcBody prints a function body following a function header of given headerSize.
+// If the header's and block's size are "small enough" and the block is "simple enough",
+// the block is printed on the current line, without line breaks, spaced from the header
+// by sep. Otherwise the block's opening "{" is printed on the current line, followed by
+// lines for the block's statements and its closing "}".
+func (p *printer) funcBody(headerSize int, sep whiteSpace, b *ast.BlockStmt) {
+ if b == nil {
+ return
+ }
+
+ // save/restore composite literal nesting level
+ defer func(level int) {
+ p.level = level
+ }(p.level)
+ p.level = 0
+
+ const maxSize = 100
+ if headerSize+p.bodySize(b, maxSize) <= maxSize {
+ p.print(sep)
+ p.setPos(b.Lbrace)
+ p.print(token.LBRACE)
+ if len(b.List) > 0 {
+ p.print(blank)
+ for i, s := range b.List {
+ if i > 0 {
+ p.print(token.SEMICOLON, blank)
+ }
+ p.stmt(s, i == len(b.List)-1)
+ }
+ p.print(blank)
+ }
+ p.print(noExtraLinebreak)
+ p.setPos(b.Rbrace)
+ p.print(token.RBRACE, noExtraLinebreak)
+ return
+ }
+
+ if sep != ignore {
+ p.print(blank) // always use blank
+ }
+ p.block(b, 1)
+}
+
+// distanceFrom returns the column difference between p.out (the current output
+// position) and startOutCol. If the start position is on a different line from
+// the current position (or either is unknown), the result is infinity.
+func (p *printer) distanceFrom(startPos token.Pos, startOutCol int) int {
+ if startPos.IsValid() && p.pos.IsValid() && p.posFor(startPos).Line == p.pos.Line {
+ return p.out.Column - startOutCol
+ }
+ return infinity
+}
+
+func (p *printer) funcDecl(d *ast.FuncDecl) {
+ p.setComment(d.Doc)
+ p.setPos(d.Pos())
+ p.print(token.FUNC, blank)
+ // We have to save startCol only after emitting FUNC; otherwise it can be on a
+ // different line (all whitespace preceding the FUNC is emitted only when the
+ // FUNC is emitted).
+ startCol := p.out.Column - len("func ")
+ if d.Recv != nil {
+ p.parameters(d.Recv, funcParam) // method: print receiver
+ p.print(blank)
+ }
+ p.expr(d.Name)
+ p.signature(d.Type)
+ p.funcBody(p.distanceFrom(d.Pos(), startCol), vtab, d.Body)
+}
+
+func (p *printer) decl(decl ast.Decl) {
+ switch d := decl.(type) {
+ case *ast.BadDecl:
+ p.setPos(d.Pos())
+ p.print("BadDecl")
+ case *ast.GenDecl:
+ p.genDecl(d)
+ case *ast.FuncDecl:
+ p.funcDecl(d)
+ default:
+ panic("unreachable")
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Files
+
+func declToken(decl ast.Decl) (tok token.Token) {
+ tok = token.ILLEGAL
+ switch d := decl.(type) {
+ case *ast.GenDecl:
+ tok = d.Tok
+ case *ast.FuncDecl:
+ tok = token.FUNC
+ }
+ return
+}
+
+func (p *printer) declList(list []ast.Decl) {
+ tok := token.ILLEGAL
+ for _, d := range list {
+ prev := tok
+ tok = declToken(d)
+ // If the declaration token changed (e.g., from CONST to TYPE)
+ // or the next declaration has documentation associated with it,
+ // print an empty line between top-level declarations.
+ // (because p.linebreak is called with the position of d, which
+ // is past any documentation, the minimum requirement is satisfied
+ // even w/o the extra getDoc(d) nil-check - leave it in case the
+ // linebreak logic improves - there's already a TODO).
+ if len(p.output) > 0 {
+ // only print line break if we are not at the beginning of the output
+ // (i.e., we are not printing only a partial program)
+ min := 1
+ if prev != tok || getDoc(d) != nil {
+ min = 2
+ }
+ // start a new section if the next declaration is a function
+ // that spans multiple lines (see also issue #19544)
+ p.linebreak(p.lineFor(d.Pos()), min, ignore, tok == token.FUNC && p.numLines(d) > 1)
+ }
+ p.decl(d)
+ }
+}
+
+func (p *printer) file(src *ast.File) {
+ p.setComment(src.Doc)
+ p.setPos(src.Pos())
+ p.print(token.PACKAGE, blank)
+ p.expr(src.Name)
+ p.declList(src.Decls)
+ p.print(newline)
+}
diff --git a/src/go/printer/performance_test.go b/src/go/printer/performance_test.go
new file mode 100644
index 0000000..c58f6d4
--- /dev/null
+++ b/src/go/printer/performance_test.go
@@ -0,0 +1,91 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a simple printer performance benchmark:
+// go test -bench=BenchmarkPrint
+
+package printer
+
+import (
+ "bytes"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "io"
+ "log"
+ "os"
+ "testing"
+)
+
+var (
+ fileNode *ast.File
+ fileSize int64
+
+ declNode ast.Decl
+ declSize int64
+)
+
+func testprint(out io.Writer, node ast.Node) {
+ if err := (&Config{TabIndent | UseSpaces | normalizeNumbers, 8, 0}).Fprint(out, fset, node); err != nil {
+ log.Fatalf("print error: %s", err)
+ }
+}
+
+// cannot initialize in init because (printer) Fprint launches goroutines.
+func initialize() {
+ const filename = "testdata/parser.go"
+
+ src, err := os.ReadFile(filename)
+ if err != nil {
+ log.Fatalf("%s", err)
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
+ if err != nil {
+ log.Fatalf("%s", err)
+ }
+
+ var buf bytes.Buffer
+ testprint(&buf, file)
+ if !bytes.Equal(buf.Bytes(), src) {
+ log.Fatalf("print error: %s not idempotent", filename)
+ }
+
+ fileNode = file
+ fileSize = int64(len(src))
+
+ for _, decl := range file.Decls {
+ // The first global variable, which is pretty short:
+ //
+ // var unresolved = new(ast.Object)
+ if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.VAR {
+ declNode = decl
+ declSize = int64(fset.Position(decl.End()).Offset - fset.Position(decl.Pos()).Offset)
+ break
+ }
+
+ }
+}
+
+func BenchmarkPrintFile(b *testing.B) {
+ if fileNode == nil {
+ initialize()
+ }
+ b.ReportAllocs()
+ b.SetBytes(fileSize)
+ for i := 0; i < b.N; i++ {
+ testprint(io.Discard, fileNode)
+ }
+}
+
+func BenchmarkPrintDecl(b *testing.B) {
+ if declNode == nil {
+ initialize()
+ }
+ b.ReportAllocs()
+ b.SetBytes(declSize)
+ for i := 0; i < b.N; i++ {
+ testprint(io.Discard, declNode)
+ }
+}
diff --git a/src/go/printer/printer.go b/src/go/printer/printer.go
new file mode 100644
index 0000000..46131c6
--- /dev/null
+++ b/src/go/printer/printer.go
@@ -0,0 +1,1436 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package printer implements printing of AST nodes.
+package printer
+
+import (
+ "fmt"
+ "go/ast"
+ "go/build/constraint"
+ "go/token"
+ "io"
+ "os"
+ "strings"
+ "sync"
+ "text/tabwriter"
+ "unicode"
+)
+
+const (
+ maxNewlines = 2 // max. number of newlines between source text
+ debug = false // enable for debugging
+ infinity = 1 << 30
+)
+
+type whiteSpace byte
+
+const (
+ ignore = whiteSpace(0)
+ blank = whiteSpace(' ')
+ vtab = whiteSpace('\v')
+ newline = whiteSpace('\n')
+ formfeed = whiteSpace('\f')
+ indent = whiteSpace('>')
+ unindent = whiteSpace('<')
+)
+
+// A pmode value represents the current printer mode.
+type pmode int
+
+const (
+ noExtraBlank pmode = 1 << iota // disables extra blank after /*-style comment
+ noExtraLinebreak // disables extra line break after /*-style comment
+)
+
+type commentInfo struct {
+ cindex int // current comment index
+ comment *ast.CommentGroup // = printer.comments[cindex]; or nil
+ commentOffset int // = printer.posFor(printer.comments[cindex].List[0].Pos()).Offset; or infinity
+ commentNewline bool // true if the comment group contains newlines
+}
+
+type printer struct {
+ // Configuration (does not change after initialization)
+ Config
+ fset *token.FileSet
+
+ // Current state
+ output []byte // raw printer result
+ indent int // current indentation
+ level int // level == 0: outside composite literal; level > 0: inside composite literal
+ mode pmode // current printer mode
+ endAlignment bool // if set, terminate alignment immediately
+ impliedSemi bool // if set, a linebreak implies a semicolon
+ lastTok token.Token // last token printed (token.ILLEGAL if it's whitespace)
+ prevOpen token.Token // previous non-brace "open" token (, [, or token.ILLEGAL
+ wsbuf []whiteSpace // delayed white space
+ goBuild []int // start index of all //go:build comments in output
+ plusBuild []int // start index of all // +build comments in output
+
+ // Positions
+ // The out position differs from the pos position when the result
+ // formatting differs from the source formatting (in the amount of
+ // white space). If there's a difference and SourcePos is set in
+ // ConfigMode, //line directives are used in the output to restore
+ // original source positions for a reader.
+ pos token.Position // current position in AST (source) space
+ out token.Position // current position in output space
+ last token.Position // value of pos after calling writeString
+ linePtr *int // if set, record out.Line for the next token in *linePtr
+ sourcePosErr error // if non-nil, the first error emitting a //line directive
+
+ // The list of all source comments, in order of appearance.
+ comments []*ast.CommentGroup // may be nil
+ useNodeComments bool // if not set, ignore lead and line comments of nodes
+
+ // Information about p.comments[p.cindex]; set up by nextComment.
+ commentInfo
+
+ // Cache of already computed node sizes.
+ nodeSizes map[ast.Node]int
+
+ // Cache of most recently computed line position.
+ cachedPos token.Pos
+ cachedLine int // line corresponding to cachedPos
+}
+
+func (p *printer) internalError(msg ...any) {
+ if debug {
+ fmt.Print(p.pos.String() + ": ")
+ fmt.Println(msg...)
+ panic("go/printer")
+ }
+}
+
+// commentsHaveNewline reports whether a list of comments belonging to
+// an *ast.CommentGroup contains newlines. Because the position information
+// may only be partially correct, we also have to read the comment text.
+func (p *printer) commentsHaveNewline(list []*ast.Comment) bool {
+ // len(list) > 0
+ line := p.lineFor(list[0].Pos())
+ for i, c := range list {
+ if i > 0 && p.lineFor(list[i].Pos()) != line {
+ // not all comments on the same line
+ return true
+ }
+ if t := c.Text; len(t) >= 2 && (t[1] == '/' || strings.Contains(t, "\n")) {
+ return true
+ }
+ }
+ _ = line
+ return false
+}
+
+func (p *printer) nextComment() {
+ for p.cindex < len(p.comments) {
+ c := p.comments[p.cindex]
+ p.cindex++
+ if list := c.List; len(list) > 0 {
+ p.comment = c
+ p.commentOffset = p.posFor(list[0].Pos()).Offset
+ p.commentNewline = p.commentsHaveNewline(list)
+ return
+ }
+ // we should not reach here (correct ASTs don't have empty
+ // ast.CommentGroup nodes), but be conservative and try again
+ }
+ // no more comments
+ p.commentOffset = infinity
+}
+
+// commentBefore reports whether the current comment group occurs
+// before the next position in the source code and printing it does
+// not introduce implicit semicolons.
+func (p *printer) commentBefore(next token.Position) bool {
+ return p.commentOffset < next.Offset && (!p.impliedSemi || !p.commentNewline)
+}
+
+// commentSizeBefore returns the estimated size of the
+// comments on the same line before the next position.
+func (p *printer) commentSizeBefore(next token.Position) int {
+ // save/restore current p.commentInfo (p.nextComment() modifies it)
+ defer func(info commentInfo) {
+ p.commentInfo = info
+ }(p.commentInfo)
+
+ size := 0
+ for p.commentBefore(next) {
+ for _, c := range p.comment.List {
+ size += len(c.Text)
+ }
+ p.nextComment()
+ }
+ return size
+}
+
+// recordLine records the output line number for the next non-whitespace
+// token in *linePtr. It is used to compute an accurate line number for a
+// formatted construct, independent of pending (not yet emitted) whitespace
+// or comments.
+func (p *printer) recordLine(linePtr *int) {
+ p.linePtr = linePtr
+}
+
+// linesFrom returns the number of output lines between the current
+// output line and the line argument, ignoring any pending (not yet
+// emitted) whitespace or comments. It is used to compute an accurate
+// size (in number of lines) for a formatted construct.
+func (p *printer) linesFrom(line int) int {
+ return p.out.Line - line
+}
+
+func (p *printer) posFor(pos token.Pos) token.Position {
+ // not used frequently enough to cache entire token.Position
+ return p.fset.PositionFor(pos, false /* absolute position */)
+}
+
+func (p *printer) lineFor(pos token.Pos) int {
+ if pos != p.cachedPos {
+ p.cachedPos = pos
+ p.cachedLine = p.fset.PositionFor(pos, false /* absolute position */).Line
+ }
+ return p.cachedLine
+}
+
+// writeLineDirective writes a //line directive if necessary.
+func (p *printer) writeLineDirective(pos token.Position) {
+ if pos.IsValid() && (p.out.Line != pos.Line || p.out.Filename != pos.Filename) {
+ if strings.ContainsAny(pos.Filename, "\r\n") {
+ if p.sourcePosErr == nil {
+ p.sourcePosErr = fmt.Errorf("go/printer: source filename contains unexpected newline character: %q", pos.Filename)
+ }
+ return
+ }
+
+ p.output = append(p.output, tabwriter.Escape) // protect '\n' in //line from tabwriter interpretation
+ p.output = append(p.output, fmt.Sprintf("//line %s:%d\n", pos.Filename, pos.Line)...)
+ p.output = append(p.output, tabwriter.Escape)
+ // p.out must match the //line directive
+ p.out.Filename = pos.Filename
+ p.out.Line = pos.Line
+ }
+}
+
+// writeIndent writes indentation.
+func (p *printer) writeIndent() {
+ // use "hard" htabs - indentation columns
+ // must not be discarded by the tabwriter
+ n := p.Config.Indent + p.indent // include base indentation
+ for i := 0; i < n; i++ {
+ p.output = append(p.output, '\t')
+ }
+
+ // update positions
+ p.pos.Offset += n
+ p.pos.Column += n
+ p.out.Column += n
+}
+
+// writeByte writes ch n times to p.output and updates p.pos.
+// Only used to write formatting (white space) characters.
+func (p *printer) writeByte(ch byte, n int) {
+ if p.endAlignment {
+ // Ignore any alignment control character;
+ // and at the end of the line, break with
+ // a formfeed to indicate termination of
+ // existing columns.
+ switch ch {
+ case '\t', '\v':
+ ch = ' '
+ case '\n', '\f':
+ ch = '\f'
+ p.endAlignment = false
+ }
+ }
+
+ if p.out.Column == 1 {
+ // no need to write line directives before white space
+ p.writeIndent()
+ }
+
+ for i := 0; i < n; i++ {
+ p.output = append(p.output, ch)
+ }
+
+ // update positions
+ p.pos.Offset += n
+ if ch == '\n' || ch == '\f' {
+ p.pos.Line += n
+ p.out.Line += n
+ p.pos.Column = 1
+ p.out.Column = 1
+ return
+ }
+ p.pos.Column += n
+ p.out.Column += n
+}
+
+// writeString writes the string s to p.output and updates p.pos, p.out,
+// and p.last. If isLit is set, s is escaped w/ tabwriter.Escape characters
+// to protect s from being interpreted by the tabwriter.
+//
+// Note: writeString is only used to write Go tokens, literals, and
+// comments, all of which must be written literally. Thus, it is correct
+// to always set isLit = true. However, setting it explicitly only when
+// needed (i.e., when we don't know that s contains no tabs or line breaks)
+// avoids processing extra escape characters and reduces run time of the
+// printer benchmark by up to 10%.
+func (p *printer) writeString(pos token.Position, s string, isLit bool) {
+ if p.out.Column == 1 {
+ if p.Config.Mode&SourcePos != 0 {
+ p.writeLineDirective(pos)
+ }
+ p.writeIndent()
+ }
+
+ if pos.IsValid() {
+ // update p.pos (if pos is invalid, continue with existing p.pos)
+ // Note: Must do this after handling line beginnings because
+ // writeIndent updates p.pos if there's indentation, but p.pos
+ // is the position of s.
+ p.pos = pos
+ }
+
+ if isLit {
+ // Protect s such that is passes through the tabwriter
+ // unchanged. Note that valid Go programs cannot contain
+ // tabwriter.Escape bytes since they do not appear in legal
+ // UTF-8 sequences.
+ p.output = append(p.output, tabwriter.Escape)
+ }
+
+ if debug {
+ p.output = append(p.output, fmt.Sprintf("/*%s*/", pos)...) // do not update p.pos!
+ }
+ p.output = append(p.output, s...)
+
+ // update positions
+ nlines := 0
+ var li int // index of last newline; valid if nlines > 0
+ for i := 0; i < len(s); i++ {
+ // Raw string literals may contain any character except back quote (`).
+ if ch := s[i]; ch == '\n' || ch == '\f' {
+ // account for line break
+ nlines++
+ li = i
+ // A line break inside a literal will break whatever column
+ // formatting is in place; ignore any further alignment through
+ // the end of the line.
+ p.endAlignment = true
+ }
+ }
+ p.pos.Offset += len(s)
+ if nlines > 0 {
+ p.pos.Line += nlines
+ p.out.Line += nlines
+ c := len(s) - li
+ p.pos.Column = c
+ p.out.Column = c
+ } else {
+ p.pos.Column += len(s)
+ p.out.Column += len(s)
+ }
+
+ if isLit {
+ p.output = append(p.output, tabwriter.Escape)
+ }
+
+ p.last = p.pos
+}
+
+// writeCommentPrefix writes the whitespace before a comment.
+// If there is any pending whitespace, it consumes as much of
+// it as is likely to help position the comment nicely.
+// pos is the comment position, next the position of the item
+// after all pending comments, prev is the previous comment in
+// a group of comments (or nil), and tok is the next token.
+func (p *printer) writeCommentPrefix(pos, next token.Position, prev *ast.Comment, tok token.Token) {
+ if len(p.output) == 0 {
+ // the comment is the first item to be printed - don't write any whitespace
+ return
+ }
+
+ if pos.IsValid() && pos.Filename != p.last.Filename {
+ // comment in a different file - separate with newlines
+ p.writeByte('\f', maxNewlines)
+ return
+ }
+
+ if pos.Line == p.last.Line && (prev == nil || prev.Text[1] != '/') {
+ // comment on the same line as last item:
+ // separate with at least one separator
+ hasSep := false
+ if prev == nil {
+ // first comment of a comment group
+ j := 0
+ for i, ch := range p.wsbuf {
+ switch ch {
+ case blank:
+ // ignore any blanks before a comment
+ p.wsbuf[i] = ignore
+ continue
+ case vtab:
+ // respect existing tabs - important
+ // for proper formatting of commented structs
+ hasSep = true
+ continue
+ case indent:
+ // apply pending indentation
+ continue
+ }
+ j = i
+ break
+ }
+ p.writeWhitespace(j)
+ }
+ // make sure there is at least one separator
+ if !hasSep {
+ sep := byte('\t')
+ if pos.Line == next.Line {
+ // next item is on the same line as the comment
+ // (which must be a /*-style comment): separate
+ // with a blank instead of a tab
+ sep = ' '
+ }
+ p.writeByte(sep, 1)
+ }
+
+ } else {
+ // comment on a different line:
+ // separate with at least one line break
+ droppedLinebreak := false
+ j := 0
+ for i, ch := range p.wsbuf {
+ switch ch {
+ case blank, vtab:
+ // ignore any horizontal whitespace before line breaks
+ p.wsbuf[i] = ignore
+ continue
+ case indent:
+ // apply pending indentation
+ continue
+ case unindent:
+ // if this is not the last unindent, apply it
+ // as it is (likely) belonging to the last
+ // construct (e.g., a multi-line expression list)
+ // and is not part of closing a block
+ if i+1 < len(p.wsbuf) && p.wsbuf[i+1] == unindent {
+ continue
+ }
+ // if the next token is not a closing }, apply the unindent
+ // if it appears that the comment is aligned with the
+ // token; otherwise assume the unindent is part of a
+ // closing block and stop (this scenario appears with
+ // comments before a case label where the comments
+ // apply to the next case instead of the current one)
+ if tok != token.RBRACE && pos.Column == next.Column {
+ continue
+ }
+ case newline, formfeed:
+ p.wsbuf[i] = ignore
+ droppedLinebreak = prev == nil // record only if first comment of a group
+ }
+ j = i
+ break
+ }
+ p.writeWhitespace(j)
+
+ // determine number of linebreaks before the comment
+ n := 0
+ if pos.IsValid() && p.last.IsValid() {
+ n = pos.Line - p.last.Line
+ if n < 0 { // should never happen
+ n = 0
+ }
+ }
+
+ // at the package scope level only (p.indent == 0),
+ // add an extra newline if we dropped one before:
+ // this preserves a blank line before documentation
+ // comments at the package scope level (issue 2570)
+ if p.indent == 0 && droppedLinebreak {
+ n++
+ }
+
+ // make sure there is at least one line break
+ // if the previous comment was a line comment
+ if n == 0 && prev != nil && prev.Text[1] == '/' {
+ n = 1
+ }
+
+ if n > 0 {
+ // use formfeeds to break columns before a comment;
+ // this is analogous to using formfeeds to separate
+ // individual lines of /*-style comments
+ p.writeByte('\f', nlimit(n))
+ }
+ }
+}
+
+// Returns true if s contains only white space
+// (only tabs and blanks can appear in the printer's context).
+func isBlank(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] > ' ' {
+ return false
+ }
+ }
+ return true
+}
+
+// commonPrefix returns the common prefix of a and b.
+func commonPrefix(a, b string) string {
+ i := 0
+ for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') {
+ i++
+ }
+ return a[0:i]
+}
+
+// trimRight returns s with trailing whitespace removed.
+func trimRight(s string) string {
+ return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// stripCommonPrefix removes a common prefix from /*-style comment lines (unless no
+// comment line is indented, all but the first line have some form of space prefix).
+// The prefix is computed using heuristics such that is likely that the comment
+// contents are nicely laid out after re-printing each line using the printer's
+// current indentation.
+func stripCommonPrefix(lines []string) {
+ if len(lines) <= 1 {
+ return // at most one line - nothing to do
+ }
+ // len(lines) > 1
+
+ // The heuristic in this function tries to handle a few
+ // common patterns of /*-style comments: Comments where
+ // the opening /* and closing */ are aligned and the
+ // rest of the comment text is aligned and indented with
+ // blanks or tabs, cases with a vertical "line of stars"
+ // on the left, and cases where the closing */ is on the
+ // same line as the last comment text.
+
+ // Compute maximum common white prefix of all but the first,
+ // last, and blank lines, and replace blank lines with empty
+ // lines (the first line starts with /* and has no prefix).
+ // In cases where only the first and last lines are not blank,
+ // such as two-line comments, or comments where all inner lines
+ // are blank, consider the last line for the prefix computation
+ // since otherwise the prefix would be empty.
+ //
+ // Note that the first and last line are never empty (they
+ // contain the opening /* and closing */ respectively) and
+ // thus they can be ignored by the blank line check.
+ prefix := ""
+ prefixSet := false
+ if len(lines) > 2 {
+ for i, line := range lines[1 : len(lines)-1] {
+ if isBlank(line) {
+ lines[1+i] = "" // range starts with lines[1]
+ } else {
+ if !prefixSet {
+ prefix = line
+ prefixSet = true
+ }
+ prefix = commonPrefix(prefix, line)
+ }
+
+ }
+ }
+ // If we don't have a prefix yet, consider the last line.
+ if !prefixSet {
+ line := lines[len(lines)-1]
+ prefix = commonPrefix(line, line)
+ }
+
+ /*
+ * Check for vertical "line of stars" and correct prefix accordingly.
+ */
+ lineOfStars := false
+ if p, _, ok := strings.Cut(prefix, "*"); ok {
+ // remove trailing blank from prefix so stars remain aligned
+ prefix = strings.TrimSuffix(p, " ")
+ lineOfStars = true
+ } else {
+ // No line of stars present.
+ // Determine the white space on the first line after the /*
+ // and before the beginning of the comment text, assume two
+ // blanks instead of the /* unless the first character after
+ // the /* is a tab. If the first comment line is empty but
+ // for the opening /*, assume up to 3 blanks or a tab. This
+ // whitespace may be found as suffix in the common prefix.
+ first := lines[0]
+ if isBlank(first[2:]) {
+ // no comment text on the first line:
+ // reduce prefix by up to 3 blanks or a tab
+ // if present - this keeps comment text indented
+ // relative to the /* and */'s if it was indented
+ // in the first place
+ i := len(prefix)
+ for n := 0; n < 3 && i > 0 && prefix[i-1] == ' '; n++ {
+ i--
+ }
+ if i == len(prefix) && i > 0 && prefix[i-1] == '\t' {
+ i--
+ }
+ prefix = prefix[0:i]
+ } else {
+ // comment text on the first line
+ suffix := make([]byte, len(first))
+ n := 2 // start after opening /*
+ for n < len(first) && first[n] <= ' ' {
+ suffix[n] = first[n]
+ n++
+ }
+ if n > 2 && suffix[2] == '\t' {
+ // assume the '\t' compensates for the /*
+ suffix = suffix[2:n]
+ } else {
+ // otherwise assume two blanks
+ suffix[0], suffix[1] = ' ', ' '
+ suffix = suffix[0:n]
+ }
+ // Shorten the computed common prefix by the length of
+ // suffix, if it is found as suffix of the prefix.
+ prefix = strings.TrimSuffix(prefix, string(suffix))
+ }
+ }
+
+ // Handle last line: If it only contains a closing */, align it
+ // with the opening /*, otherwise align the text with the other
+ // lines.
+ last := lines[len(lines)-1]
+ closing := "*/"
+ before, _, _ := strings.Cut(last, closing) // closing always present
+ if isBlank(before) {
+ // last line only contains closing */
+ if lineOfStars {
+ closing = " */" // add blank to align final star
+ }
+ lines[len(lines)-1] = prefix + closing
+ } else {
+ // last line contains more comment text - assume
+ // it is aligned like the other lines and include
+ // in prefix computation
+ prefix = commonPrefix(prefix, last)
+ }
+
+ // Remove the common prefix from all but the first and empty lines.
+ for i, line := range lines {
+ if i > 0 && line != "" {
+ lines[i] = line[len(prefix):]
+ }
+ }
+}
+
+func (p *printer) writeComment(comment *ast.Comment) {
+ text := comment.Text
+ pos := p.posFor(comment.Pos())
+
+ const linePrefix = "//line "
+ if strings.HasPrefix(text, linePrefix) && (!pos.IsValid() || pos.Column == 1) {
+ // Possibly a //-style line directive.
+ // Suspend indentation temporarily to keep line directive valid.
+ defer func(indent int) { p.indent = indent }(p.indent)
+ p.indent = 0
+ }
+
+ // shortcut common case of //-style comments
+ if text[1] == '/' {
+ if constraint.IsGoBuild(text) {
+ p.goBuild = append(p.goBuild, len(p.output))
+ } else if constraint.IsPlusBuild(text) {
+ p.plusBuild = append(p.plusBuild, len(p.output))
+ }
+ p.writeString(pos, trimRight(text), true)
+ return
+ }
+
+ // for /*-style comments, print line by line and let the
+ // write function take care of the proper indentation
+ lines := strings.Split(text, "\n")
+
+ // The comment started in the first column but is going
+ // to be indented. For an idempotent result, add indentation
+ // to all lines such that they look like they were indented
+ // before - this will make sure the common prefix computation
+ // is the same independent of how many times formatting is
+ // applied (was issue 1835).
+ if pos.IsValid() && pos.Column == 1 && p.indent > 0 {
+ for i, line := range lines[1:] {
+ lines[1+i] = " " + line
+ }
+ }
+
+ stripCommonPrefix(lines)
+
+ // write comment lines, separated by formfeed,
+ // without a line break after the last line
+ for i, line := range lines {
+ if i > 0 {
+ p.writeByte('\f', 1)
+ pos = p.pos
+ }
+ if len(line) > 0 {
+ p.writeString(pos, trimRight(line), true)
+ }
+ }
+}
+
+// writeCommentSuffix writes a line break after a comment if indicated
+// and processes any leftover indentation information. If a line break
+// is needed, the kind of break (newline vs formfeed) depends on the
+// pending whitespace. The writeCommentSuffix result indicates if a
+// newline was written or if a formfeed was dropped from the whitespace
+// buffer.
+func (p *printer) writeCommentSuffix(needsLinebreak bool) (wroteNewline, droppedFF bool) {
+ for i, ch := range p.wsbuf {
+ switch ch {
+ case blank, vtab:
+ // ignore trailing whitespace
+ p.wsbuf[i] = ignore
+ case indent, unindent:
+ // don't lose indentation information
+ case newline, formfeed:
+ // if we need a line break, keep exactly one
+ // but remember if we dropped any formfeeds
+ if needsLinebreak {
+ needsLinebreak = false
+ wroteNewline = true
+ } else {
+ if ch == formfeed {
+ droppedFF = true
+ }
+ p.wsbuf[i] = ignore
+ }
+ }
+ }
+ p.writeWhitespace(len(p.wsbuf))
+
+ // make sure we have a line break
+ if needsLinebreak {
+ p.writeByte('\n', 1)
+ wroteNewline = true
+ }
+
+ return
+}
+
+// containsLinebreak reports whether the whitespace buffer contains any line breaks.
+func (p *printer) containsLinebreak() bool {
+ for _, ch := range p.wsbuf {
+ if ch == newline || ch == formfeed {
+ return true
+ }
+ }
+ return false
+}
+
+// intersperseComments consumes all comments that appear before the next token
+// tok and prints it together with the buffered whitespace (i.e., the whitespace
+// that needs to be written before the next token). A heuristic is used to mix
+// the comments and whitespace. The intersperseComments result indicates if a
+// newline was written or if a formfeed was dropped from the whitespace buffer.
+func (p *printer) intersperseComments(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
+ var last *ast.Comment
+ for p.commentBefore(next) {
+ list := p.comment.List
+ changed := false
+ if p.lastTok != token.IMPORT && // do not rewrite cgo's import "C" comments
+ p.posFor(p.comment.Pos()).Column == 1 &&
+ p.posFor(p.comment.End()+1) == next {
+ // Unindented comment abutting next token position:
+ // a top-level doc comment.
+ list = formatDocComment(list)
+ changed = true
+
+ if len(p.comment.List) > 0 && len(list) == 0 {
+ // The doc comment was removed entirely.
+ // Keep preceding whitespace.
+ p.writeCommentPrefix(p.posFor(p.comment.Pos()), next, last, tok)
+ // Change print state to continue at next.
+ p.pos = next
+ p.last = next
+ // There can't be any more comments.
+ p.nextComment()
+ return p.writeCommentSuffix(false)
+ }
+ }
+ for _, c := range list {
+ p.writeCommentPrefix(p.posFor(c.Pos()), next, last, tok)
+ p.writeComment(c)
+ last = c
+ }
+ // In case list was rewritten, change print state to where
+ // the original list would have ended.
+ if len(p.comment.List) > 0 && changed {
+ last = p.comment.List[len(p.comment.List)-1]
+ p.pos = p.posFor(last.End())
+ p.last = p.pos
+ }
+ p.nextComment()
+ }
+
+ if last != nil {
+ // If the last comment is a /*-style comment and the next item
+ // follows on the same line but is not a comma, and not a "closing"
+ // token immediately following its corresponding "opening" token,
+ // add an extra separator unless explicitly disabled. Use a blank
+ // as separator unless we have pending linebreaks, they are not
+ // disabled, and we are outside a composite literal, in which case
+ // we want a linebreak (issue 15137).
+ // TODO(gri) This has become overly complicated. We should be able
+ // to track whether we're inside an expression or statement and
+ // use that information to decide more directly.
+ needsLinebreak := false
+ if p.mode&noExtraBlank == 0 &&
+ last.Text[1] == '*' && p.lineFor(last.Pos()) == next.Line &&
+ tok != token.COMMA &&
+ (tok != token.RPAREN || p.prevOpen == token.LPAREN) &&
+ (tok != token.RBRACK || p.prevOpen == token.LBRACK) {
+ if p.containsLinebreak() && p.mode&noExtraLinebreak == 0 && p.level == 0 {
+ needsLinebreak = true
+ } else {
+ p.writeByte(' ', 1)
+ }
+ }
+ // Ensure that there is a line break after a //-style comment,
+ // before EOF, and before a closing '}' unless explicitly disabled.
+ if last.Text[1] == '/' ||
+ tok == token.EOF ||
+ tok == token.RBRACE && p.mode&noExtraLinebreak == 0 {
+ needsLinebreak = true
+ }
+ return p.writeCommentSuffix(needsLinebreak)
+ }
+
+ // no comment was written - we should never reach here since
+ // intersperseComments should not be called in that case
+ p.internalError("intersperseComments called without pending comments")
+ return
+}
+
+// whiteWhitespace writes the first n whitespace entries.
+func (p *printer) writeWhitespace(n int) {
+ // write entries
+ for i := 0; i < n; i++ {
+ switch ch := p.wsbuf[i]; ch {
+ case ignore:
+ // ignore!
+ case indent:
+ p.indent++
+ case unindent:
+ p.indent--
+ if p.indent < 0 {
+ p.internalError("negative indentation:", p.indent)
+ p.indent = 0
+ }
+ case newline, formfeed:
+ // A line break immediately followed by a "correcting"
+ // unindent is swapped with the unindent - this permits
+ // proper label positioning. If a comment is between
+ // the line break and the label, the unindent is not
+ // part of the comment whitespace prefix and the comment
+ // will be positioned correctly indented.
+ if i+1 < n && p.wsbuf[i+1] == unindent {
+ // Use a formfeed to terminate the current section.
+ // Otherwise, a long label name on the next line leading
+ // to a wide column may increase the indentation column
+ // of lines before the label; effectively leading to wrong
+ // indentation.
+ p.wsbuf[i], p.wsbuf[i+1] = unindent, formfeed
+ i-- // do it again
+ continue
+ }
+ fallthrough
+ default:
+ p.writeByte(byte(ch), 1)
+ }
+ }
+
+ // shift remaining entries down
+ l := copy(p.wsbuf, p.wsbuf[n:])
+ p.wsbuf = p.wsbuf[:l]
+}
+
+// ----------------------------------------------------------------------------
+// Printing interface
+
+// nlimit limits n to maxNewlines.
+func nlimit(n int) int {
+ if n > maxNewlines {
+ n = maxNewlines
+ }
+ return n
+}
+
+func mayCombine(prev token.Token, next byte) (b bool) {
+ switch prev {
+ case token.INT:
+ b = next == '.' // 1.
+ case token.ADD:
+ b = next == '+' // ++
+ case token.SUB:
+ b = next == '-' // --
+ case token.QUO:
+ b = next == '*' // /*
+ case token.LSS:
+ b = next == '-' || next == '<' // <- or <<
+ case token.AND:
+ b = next == '&' || next == '^' // && or &^
+ }
+ return
+}
+
+func (p *printer) setPos(pos token.Pos) {
+ if pos.IsValid() {
+ p.pos = p.posFor(pos) // accurate position of next item
+ }
+}
+
+// print prints a list of "items" (roughly corresponding to syntactic
+// tokens, but also including whitespace and formatting information).
+// It is the only print function that should be called directly from
+// any of the AST printing functions in nodes.go.
+//
+// Whitespace is accumulated until a non-whitespace token appears. Any
+// comments that need to appear before that token are printed first,
+// taking into account the amount and structure of any pending white-
+// space for best comment placement. Then, any leftover whitespace is
+// printed, followed by the actual token.
+func (p *printer) print(args ...any) {
+ for _, arg := range args {
+ // information about the current arg
+ var data string
+ var isLit bool
+ var impliedSemi bool // value for p.impliedSemi after this arg
+
+ // record previous opening token, if any
+ switch p.lastTok {
+ case token.ILLEGAL:
+ // ignore (white space)
+ case token.LPAREN, token.LBRACK:
+ p.prevOpen = p.lastTok
+ default:
+ // other tokens followed any opening token
+ p.prevOpen = token.ILLEGAL
+ }
+
+ switch x := arg.(type) {
+ case pmode:
+ // toggle printer mode
+ p.mode ^= x
+ continue
+
+ case whiteSpace:
+ if x == ignore {
+ // don't add ignore's to the buffer; they
+ // may screw up "correcting" unindents (see
+ // LabeledStmt)
+ continue
+ }
+ i := len(p.wsbuf)
+ if i == cap(p.wsbuf) {
+ // Whitespace sequences are very short so this should
+ // never happen. Handle gracefully (but possibly with
+ // bad comment placement) if it does happen.
+ p.writeWhitespace(i)
+ i = 0
+ }
+ p.wsbuf = p.wsbuf[0 : i+1]
+ p.wsbuf[i] = x
+ if x == newline || x == formfeed {
+ // newlines affect the current state (p.impliedSemi)
+ // and not the state after printing arg (impliedSemi)
+ // because comments can be interspersed before the arg
+ // in this case
+ p.impliedSemi = false
+ }
+ p.lastTok = token.ILLEGAL
+ continue
+
+ case *ast.Ident:
+ data = x.Name
+ impliedSemi = true
+ p.lastTok = token.IDENT
+
+ case *ast.BasicLit:
+ data = x.Value
+ isLit = true
+ impliedSemi = true
+ p.lastTok = x.Kind
+
+ case token.Token:
+ s := x.String()
+ if mayCombine(p.lastTok, s[0]) {
+ // the previous and the current token must be
+ // separated by a blank otherwise they combine
+ // into a different incorrect token sequence
+ // (except for token.INT followed by a '.' this
+ // should never happen because it is taken care
+ // of via binary expression formatting)
+ if len(p.wsbuf) != 0 {
+ p.internalError("whitespace buffer not empty")
+ }
+ p.wsbuf = p.wsbuf[0:1]
+ p.wsbuf[0] = ' '
+ }
+ data = s
+ // some keywords followed by a newline imply a semicolon
+ switch x {
+ case token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN,
+ token.INC, token.DEC, token.RPAREN, token.RBRACK, token.RBRACE:
+ impliedSemi = true
+ }
+ p.lastTok = x
+
+ case string:
+ // incorrect AST - print error message
+ data = x
+ isLit = true
+ impliedSemi = true
+ p.lastTok = token.STRING
+
+ default:
+ fmt.Fprintf(os.Stderr, "print: unsupported argument %v (%T)\n", arg, arg)
+ panic("go/printer type")
+ }
+ // data != ""
+
+ next := p.pos // estimated/accurate position of next item
+ wroteNewline, droppedFF := p.flush(next, p.lastTok)
+
+ // intersperse extra newlines if present in the source and
+ // if they don't cause extra semicolons (don't do this in
+ // flush as it will cause extra newlines at the end of a file)
+ if !p.impliedSemi {
+ n := nlimit(next.Line - p.pos.Line)
+ // don't exceed maxNewlines if we already wrote one
+ if wroteNewline && n == maxNewlines {
+ n = maxNewlines - 1
+ }
+ if n > 0 {
+ ch := byte('\n')
+ if droppedFF {
+ ch = '\f' // use formfeed since we dropped one before
+ }
+ p.writeByte(ch, n)
+ impliedSemi = false
+ }
+ }
+
+ // the next token starts now - record its line number if requested
+ if p.linePtr != nil {
+ *p.linePtr = p.out.Line
+ p.linePtr = nil
+ }
+
+ p.writeString(next, data, isLit)
+ p.impliedSemi = impliedSemi
+ }
+}
+
+// flush prints any pending comments and whitespace occurring textually
+// before the position of the next token tok. The flush result indicates
+// if a newline was written or if a formfeed was dropped from the whitespace
+// buffer.
+func (p *printer) flush(next token.Position, tok token.Token) (wroteNewline, droppedFF bool) {
+ if p.commentBefore(next) {
+ // if there are comments before the next item, intersperse them
+ wroteNewline, droppedFF = p.intersperseComments(next, tok)
+ } else {
+ // otherwise, write any leftover whitespace
+ p.writeWhitespace(len(p.wsbuf))
+ }
+ return
+}
+
+// getDoc returns the ast.CommentGroup associated with n, if any.
+func getDoc(n ast.Node) *ast.CommentGroup {
+ switch n := n.(type) {
+ case *ast.Field:
+ return n.Doc
+ case *ast.ImportSpec:
+ return n.Doc
+ case *ast.ValueSpec:
+ return n.Doc
+ case *ast.TypeSpec:
+ return n.Doc
+ case *ast.GenDecl:
+ return n.Doc
+ case *ast.FuncDecl:
+ return n.Doc
+ case *ast.File:
+ return n.Doc
+ }
+ return nil
+}
+
+func getLastComment(n ast.Node) *ast.CommentGroup {
+ switch n := n.(type) {
+ case *ast.Field:
+ return n.Comment
+ case *ast.ImportSpec:
+ return n.Comment
+ case *ast.ValueSpec:
+ return n.Comment
+ case *ast.TypeSpec:
+ return n.Comment
+ case *ast.GenDecl:
+ if len(n.Specs) > 0 {
+ return getLastComment(n.Specs[len(n.Specs)-1])
+ }
+ case *ast.File:
+ if len(n.Comments) > 0 {
+ return n.Comments[len(n.Comments)-1]
+ }
+ }
+ return nil
+}
+
+func (p *printer) printNode(node any) error {
+ // unpack *CommentedNode, if any
+ var comments []*ast.CommentGroup
+ if cnode, ok := node.(*CommentedNode); ok {
+ node = cnode.Node
+ comments = cnode.Comments
+ }
+
+ if comments != nil {
+ // commented node - restrict comment list to relevant range
+ n, ok := node.(ast.Node)
+ if !ok {
+ goto unsupported
+ }
+ beg := n.Pos()
+ end := n.End()
+ // if the node has associated documentation,
+ // include that commentgroup in the range
+ // (the comment list is sorted in the order
+ // of the comment appearance in the source code)
+ if doc := getDoc(n); doc != nil {
+ beg = doc.Pos()
+ }
+ if com := getLastComment(n); com != nil {
+ if e := com.End(); e > end {
+ end = e
+ }
+ }
+ // token.Pos values are global offsets, we can
+ // compare them directly
+ i := 0
+ for i < len(comments) && comments[i].End() < beg {
+ i++
+ }
+ j := i
+ for j < len(comments) && comments[j].Pos() < end {
+ j++
+ }
+ if i < j {
+ p.comments = comments[i:j]
+ }
+ } else if n, ok := node.(*ast.File); ok {
+ // use ast.File comments, if any
+ p.comments = n.Comments
+ }
+
+ // if there are no comments, use node comments
+ p.useNodeComments = p.comments == nil
+
+ // get comments ready for use
+ p.nextComment()
+
+ p.print(pmode(0))
+
+ // format node
+ switch n := node.(type) {
+ case ast.Expr:
+ p.expr(n)
+ case ast.Stmt:
+ // A labeled statement will un-indent to position the label.
+ // Set p.indent to 1 so we don't get indent "underflow".
+ if _, ok := n.(*ast.LabeledStmt); ok {
+ p.indent = 1
+ }
+ p.stmt(n, false)
+ case ast.Decl:
+ p.decl(n)
+ case ast.Spec:
+ p.spec(n, 1, false)
+ case []ast.Stmt:
+ // A labeled statement will un-indent to position the label.
+ // Set p.indent to 1 so we don't get indent "underflow".
+ for _, s := range n {
+ if _, ok := s.(*ast.LabeledStmt); ok {
+ p.indent = 1
+ }
+ }
+ p.stmtList(n, 0, false)
+ case []ast.Decl:
+ p.declList(n)
+ case *ast.File:
+ p.file(n)
+ default:
+ goto unsupported
+ }
+
+ return p.sourcePosErr
+
+unsupported:
+ return fmt.Errorf("go/printer: unsupported node type %T", node)
+}
+
+// ----------------------------------------------------------------------------
+// Trimmer
+
+// A trimmer is an io.Writer filter for stripping tabwriter.Escape
+// characters, trailing blanks and tabs, and for converting formfeed
+// and vtab characters into newlines and htabs (in case no tabwriter
+// is used). Text bracketed by tabwriter.Escape characters is passed
+// through unchanged.
+type trimmer struct {
+ output io.Writer
+ state int
+ space []byte
+}
+
+// trimmer is implemented as a state machine.
+// It can be in one of the following states:
+const (
+ inSpace = iota // inside space
+ inEscape // inside text bracketed by tabwriter.Escapes
+ inText // inside text
+)
+
+func (p *trimmer) resetSpace() {
+ p.state = inSpace
+ p.space = p.space[0:0]
+}
+
+// Design note: It is tempting to eliminate extra blanks occurring in
+// whitespace in this function as it could simplify some
+// of the blanks logic in the node printing functions.
+// However, this would mess up any formatting done by
+// the tabwriter.
+
+var aNewline = []byte("\n")
+
+func (p *trimmer) Write(data []byte) (n int, err error) {
+ // invariants:
+ // p.state == inSpace:
+ // p.space is unwritten
+ // p.state == inEscape, inText:
+ // data[m:n] is unwritten
+ m := 0
+ var b byte
+ for n, b = range data {
+ if b == '\v' {
+ b = '\t' // convert to htab
+ }
+ switch p.state {
+ case inSpace:
+ switch b {
+ case '\t', ' ':
+ p.space = append(p.space, b)
+ case '\n', '\f':
+ p.resetSpace() // discard trailing space
+ _, err = p.output.Write(aNewline)
+ case tabwriter.Escape:
+ _, err = p.output.Write(p.space)
+ p.state = inEscape
+ m = n + 1 // +1: skip tabwriter.Escape
+ default:
+ _, err = p.output.Write(p.space)
+ p.state = inText
+ m = n
+ }
+ case inEscape:
+ if b == tabwriter.Escape {
+ _, err = p.output.Write(data[m:n])
+ p.resetSpace()
+ }
+ case inText:
+ switch b {
+ case '\t', ' ':
+ _, err = p.output.Write(data[m:n])
+ p.resetSpace()
+ p.space = append(p.space, b)
+ case '\n', '\f':
+ _, err = p.output.Write(data[m:n])
+ p.resetSpace()
+ if err == nil {
+ _, err = p.output.Write(aNewline)
+ }
+ case tabwriter.Escape:
+ _, err = p.output.Write(data[m:n])
+ p.state = inEscape
+ m = n + 1 // +1: skip tabwriter.Escape
+ }
+ default:
+ panic("unreachable")
+ }
+ if err != nil {
+ return
+ }
+ }
+ n = len(data)
+
+ switch p.state {
+ case inEscape, inText:
+ _, err = p.output.Write(data[m:n])
+ p.resetSpace()
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Public interface
+
+// A Mode value is a set of flags (or 0). They control printing.
+type Mode uint
+
+const (
+ RawFormat Mode = 1 << iota // do not use a tabwriter; if set, UseSpaces is ignored
+ TabIndent // use tabs for indentation independent of UseSpaces
+ UseSpaces // use spaces instead of tabs for alignment
+ SourcePos // emit //line directives to preserve original source positions
+)
+
+// The mode below is not included in printer's public API because
+// editing code text is deemed out of scope. Because this mode is
+// unexported, it's also possible to modify or remove it based on
+// the evolving needs of go/format and cmd/gofmt without breaking
+// users. See discussion in CL 240683.
+const (
+ // normalizeNumbers means to canonicalize number
+ // literal prefixes and exponents while printing.
+ //
+ // This value is known in and used by go/format and cmd/gofmt.
+ // It is currently more convenient and performant for those
+ // packages to apply number normalization during printing,
+ // rather than by modifying the AST in advance.
+ normalizeNumbers Mode = 1 << 30
+)
+
+// A Config node controls the output of Fprint.
+type Config struct {
+ Mode Mode // default: 0
+ Tabwidth int // default: 8
+ Indent int // default: 0 (all code is indented at least by this much)
+}
+
+var printerPool = sync.Pool{
+ New: func() any {
+ return &printer{
+ // Whitespace sequences are short.
+ wsbuf: make([]whiteSpace, 0, 16),
+ // We start the printer with a 16K output buffer, which is currently
+ // larger than about 80% of Go files in the standard library.
+ output: make([]byte, 0, 16<<10),
+ }
+ },
+}
+
+func newPrinter(cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) *printer {
+ p := printerPool.Get().(*printer)
+ *p = printer{
+ Config: *cfg,
+ fset: fset,
+ pos: token.Position{Line: 1, Column: 1},
+ out: token.Position{Line: 1, Column: 1},
+ wsbuf: p.wsbuf[:0],
+ nodeSizes: nodeSizes,
+ cachedPos: -1,
+ output: p.output[:0],
+ }
+ return p
+}
+
+func (p *printer) free() {
+ // Hard limit on buffer size; see https://golang.org/issue/23199.
+ if cap(p.output) > 64<<10 {
+ return
+ }
+
+ printerPool.Put(p)
+}
+
+// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
+func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node any, nodeSizes map[ast.Node]int) (err error) {
+ // print node
+ p := newPrinter(cfg, fset, nodeSizes)
+ defer p.free()
+ if err = p.printNode(node); err != nil {
+ return
+ }
+ // print outstanding comments
+ p.impliedSemi = false // EOF acts like a newline
+ p.flush(token.Position{Offset: infinity, Line: infinity}, token.EOF)
+
+ // output is buffered in p.output now.
+ // fix //go:build and // +build comments if needed.
+ p.fixGoBuildLines()
+
+ // redirect output through a trimmer to eliminate trailing whitespace
+ // (Input to a tabwriter must be untrimmed since trailing tabs provide
+ // formatting information. The tabwriter could provide trimming
+ // functionality but no tabwriter is used when RawFormat is set.)
+ output = &trimmer{output: output}
+
+ // redirect output through a tabwriter if necessary
+ if cfg.Mode&RawFormat == 0 {
+ minwidth := cfg.Tabwidth
+
+ padchar := byte('\t')
+ if cfg.Mode&UseSpaces != 0 {
+ padchar = ' '
+ }
+
+ twmode := tabwriter.DiscardEmptyColumns
+ if cfg.Mode&TabIndent != 0 {
+ minwidth = 0
+ twmode |= tabwriter.TabIndent
+ }
+
+ output = tabwriter.NewWriter(output, minwidth, cfg.Tabwidth, 1, padchar, twmode)
+ }
+
+ // write printer result via tabwriter/trimmer to output
+ if _, err = output.Write(p.output); err != nil {
+ return
+ }
+
+ // flush tabwriter, if any
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
+ }
+
+ return
+}
+
+// A CommentedNode bundles an AST node and corresponding comments.
+// It may be provided as argument to any of the Fprint functions.
+type CommentedNode struct {
+ Node any // *ast.File, or ast.Expr, ast.Decl, ast.Spec, or ast.Stmt
+ Comments []*ast.CommentGroup
+}
+
+// Fprint "pretty-prints" an AST node to output for a given configuration cfg.
+// Position information is interpreted relative to the file set fset.
+// The node type must be *ast.File, *CommentedNode, []ast.Decl, []ast.Stmt,
+// or assignment-compatible to ast.Expr, ast.Decl, ast.Spec, or ast.Stmt.
+func (cfg *Config) Fprint(output io.Writer, fset *token.FileSet, node any) error {
+ return cfg.fprint(output, fset, node, make(map[ast.Node]int))
+}
+
+// Fprint "pretty-prints" an AST node to output.
+// It calls Config.Fprint with default settings.
+// Note that gofmt uses tabs for indentation but spaces for alignment;
+// use format.Node (package go/format) for output that matches gofmt.
+func Fprint(output io.Writer, fset *token.FileSet, node any) error {
+ return (&Config{Tabwidth: 8}).Fprint(output, fset, node)
+}
diff --git a/src/go/printer/printer_test.go b/src/go/printer/printer_test.go
new file mode 100644
index 0000000..3a8ce60
--- /dev/null
+++ b/src/go/printer/printer_test.go
@@ -0,0 +1,827 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package printer
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "internal/diff"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+)
+
+const (
+ dataDir = "testdata"
+ tabwidth = 8
+)
+
+var update = flag.Bool("update", false, "update golden files")
+
+var fset = token.NewFileSet()
+
+type checkMode uint
+
+const (
+ export checkMode = 1 << iota
+ rawFormat
+ normNumber
+ idempotent
+ allowTypeParams
+)
+
+// format parses src, prints the corresponding AST, verifies the resulting
+// src is syntactically correct, and returns the resulting src or an error
+// if any.
+func format(src []byte, mode checkMode) ([]byte, error) {
+ // parse src
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ return nil, fmt.Errorf("parse: %s\n%s", err, src)
+ }
+
+ // filter exports if necessary
+ if mode&export != 0 {
+ ast.FileExports(f) // ignore result
+ f.Comments = nil // don't print comments that are not in AST
+ }
+
+ // determine printer configuration
+ cfg := Config{Tabwidth: tabwidth}
+ if mode&rawFormat != 0 {
+ cfg.Mode |= RawFormat
+ }
+ if mode&normNumber != 0 {
+ cfg.Mode |= normalizeNumbers
+ }
+
+ // print AST
+ var buf bytes.Buffer
+ if err := cfg.Fprint(&buf, fset, f); err != nil {
+ return nil, fmt.Errorf("print: %s", err)
+ }
+
+ // make sure formatted output is syntactically correct
+ res := buf.Bytes()
+ if _, err := parser.ParseFile(fset, "", res, parser.ParseComments); err != nil {
+ return nil, fmt.Errorf("re-parse: %s\n%s", err, buf.Bytes())
+ }
+
+ return res, nil
+}
+
+// lineAt returns the line in text starting at offset offs.
+func lineAt(text []byte, offs int) []byte {
+ i := offs
+ for i < len(text) && text[i] != '\n' {
+ i++
+ }
+ return text[offs:i]
+}
+
+// checkEqual compares a and b.
+func checkEqual(aname, bname string, a, b []byte) error {
+ if bytes.Equal(a, b) {
+ return nil
+ }
+ return errors.New(string(diff.Diff(aname, a, bname, b)))
+}
+
+func runcheck(t *testing.T, source, golden string, mode checkMode) {
+ src, err := os.ReadFile(source)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ res, err := format(src, mode)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // update golden files if necessary
+ if *update {
+ if err := os.WriteFile(golden, res, 0644); err != nil {
+ t.Error(err)
+ }
+ return
+ }
+
+ // get golden
+ gld, err := os.ReadFile(golden)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ // formatted source and golden must be the same
+ if err := checkEqual(source, golden, res, gld); err != nil {
+ t.Error(err)
+ return
+ }
+
+ if mode&idempotent != 0 {
+ // formatting golden must be idempotent
+ // (This is very difficult to achieve in general and for now
+ // it is only checked for files explicitly marked as such.)
+ res, err = format(gld, mode)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if err := checkEqual(golden, fmt.Sprintf("format(%s)", golden), gld, res); err != nil {
+ t.Errorf("golden is not idempotent: %s", err)
+ }
+ }
+}
+
+func check(t *testing.T, source, golden string, mode checkMode) {
+ // run the test
+ cc := make(chan int, 1)
+ go func() {
+ runcheck(t, source, golden, mode)
+ cc <- 0
+ }()
+
+ // wait with timeout
+ select {
+ case <-time.After(10 * time.Second): // plenty of a safety margin, even for very slow machines
+ // test running past time out
+ t.Errorf("%s: running too slowly", source)
+ case <-cc:
+ // test finished within allotted time margin
+ }
+}
+
+type entry struct {
+ source, golden string
+ mode checkMode
+}
+
+// Use go test -update to create/update the respective golden files.
+var data = []entry{
+ {"empty.input", "empty.golden", idempotent},
+ {"comments.input", "comments.golden", 0},
+ {"comments.input", "comments.x", export},
+ {"comments2.input", "comments2.golden", idempotent},
+ {"alignment.input", "alignment.golden", idempotent},
+ {"linebreaks.input", "linebreaks.golden", idempotent},
+ {"expressions.input", "expressions.golden", idempotent},
+ {"expressions.input", "expressions.raw", rawFormat | idempotent},
+ {"declarations.input", "declarations.golden", 0},
+ {"statements.input", "statements.golden", 0},
+ {"slow.input", "slow.golden", idempotent},
+ {"complit.input", "complit.x", export},
+ {"go2numbers.input", "go2numbers.golden", idempotent},
+ {"go2numbers.input", "go2numbers.norm", normNumber | idempotent},
+ {"generics.input", "generics.golden", idempotent | allowTypeParams},
+ {"gobuild1.input", "gobuild1.golden", idempotent},
+ {"gobuild2.input", "gobuild2.golden", idempotent},
+ {"gobuild3.input", "gobuild3.golden", idempotent},
+ {"gobuild4.input", "gobuild4.golden", idempotent},
+ {"gobuild5.input", "gobuild5.golden", idempotent},
+ {"gobuild6.input", "gobuild6.golden", idempotent},
+ {"gobuild7.input", "gobuild7.golden", idempotent},
+}
+
+func TestFiles(t *testing.T) {
+ t.Parallel()
+ for _, e := range data {
+ source := filepath.Join(dataDir, e.source)
+ golden := filepath.Join(dataDir, e.golden)
+ mode := e.mode
+ t.Run(e.source, func(t *testing.T) {
+ t.Parallel()
+ check(t, source, golden, mode)
+ // TODO(gri) check that golden is idempotent
+ //check(t, golden, golden, e.mode)
+ })
+ }
+}
+
+// TestLineComments, using a simple test case, checks that consecutive line
+// comments are properly terminated with a newline even if the AST position
+// information is incorrect.
+func TestLineComments(t *testing.T) {
+ const src = `// comment 1
+ // comment 2
+ // comment 3
+ package main
+ `
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ fset = token.NewFileSet() // use the wrong file set
+ Fprint(&buf, fset, f)
+
+ nlines := 0
+ for _, ch := range buf.Bytes() {
+ if ch == '\n' {
+ nlines++
+ }
+ }
+
+ const expected = 3
+ if nlines < expected {
+ t.Errorf("got %d, expected %d\n", nlines, expected)
+ t.Errorf("result:\n%s", buf.Bytes())
+ }
+}
+
+// Verify that the printer can be invoked during initialization.
+func init() {
+ const name = "foobar"
+ var buf bytes.Buffer
+ if err := Fprint(&buf, fset, &ast.Ident{Name: name}); err != nil {
+ panic(err) // error in test
+ }
+ // in debug mode, the result contains additional information;
+ // ignore it
+ if s := buf.String(); !debug && s != name {
+ panic("got " + s + ", want " + name)
+ }
+}
+
+// Verify that the printer doesn't crash if the AST contains BadXXX nodes.
+func TestBadNodes(t *testing.T) {
+ const src = "package p\n("
+ const res = "package p\nBadDecl\n"
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err == nil {
+ t.Error("expected illegal program") // error in test
+ }
+ var buf bytes.Buffer
+ Fprint(&buf, fset, f)
+ if buf.String() != res {
+ t.Errorf("got %q, expected %q", buf.String(), res)
+ }
+}
+
+// testComment verifies that f can be parsed again after printing it
+// with its first comment set to comment at any possible source offset.
+func testComment(t *testing.T, f *ast.File, srclen int, comment *ast.Comment) {
+ f.Comments[0].List[0] = comment
+ var buf bytes.Buffer
+ for offs := 0; offs <= srclen; offs++ {
+ buf.Reset()
+ // Printing f should result in a correct program no
+ // matter what the (incorrect) comment position is.
+ if err := Fprint(&buf, fset, f); err != nil {
+ t.Error(err)
+ }
+ if _, err := parser.ParseFile(fset, "", buf.Bytes(), 0); err != nil {
+ t.Fatalf("incorrect program for pos = %d:\n%s", comment.Slash, buf.String())
+ }
+ // Position information is just an offset.
+ // Move comment one byte down in the source.
+ comment.Slash++
+ }
+}
+
+// Verify that the printer produces a correct program
+// even if the position information of comments introducing newlines
+// is incorrect.
+func TestBadComments(t *testing.T) {
+ t.Parallel()
+ const src = `
+// first comment - text and position changed by test
+package p
+import "fmt"
+const pi = 3.14 // rough circle
+var (
+ x, y, z int = 1, 2, 3
+ u, v float64
+)
+func fibo(n int) {
+ if n < 2 {
+ return n /* seed values */
+ }
+ return fibo(n-1) + fibo(n-2)
+}
+`
+
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ t.Error(err) // error in test
+ }
+
+ comment := f.Comments[0].List[0]
+ pos := comment.Pos()
+ if fset.PositionFor(pos, false /* absolute position */).Offset != 1 {
+ t.Error("expected offset 1") // error in test
+ }
+
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "//-style comment"})
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment */"})
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style \n comment */"})
+ testComment(t, f, len(src), &ast.Comment{Slash: pos, Text: "/*-style comment \n\n\n */"})
+}
+
+type visitor chan *ast.Ident
+
+func (v visitor) Visit(n ast.Node) (w ast.Visitor) {
+ if ident, ok := n.(*ast.Ident); ok {
+ v <- ident
+ }
+ return v
+}
+
+// idents is an iterator that returns all idents in f via the result channel.
+func idents(f *ast.File) <-chan *ast.Ident {
+ v := make(visitor)
+ go func() {
+ ast.Walk(v, f)
+ close(v)
+ }()
+ return v
+}
+
+// identCount returns the number of identifiers found in f.
+func identCount(f *ast.File) int {
+ n := 0
+ for range idents(f) {
+ n++
+ }
+ return n
+}
+
+// Verify that the SourcePos mode emits correct //line directives
+// by testing that position information for matching identifiers
+// is maintained.
+func TestSourcePos(t *testing.T) {
+ const src = `
+package p
+import ( "go/printer"; "math" )
+const pi = 3.14; var x = 0
+type t struct{ x, y, z int; u, v, w float32 }
+func (t *t) foo(a, b, c int) int {
+ return a*t.x + b*t.y +
+ // two extra lines here
+ // ...
+ c*t.z
+}
+`
+
+ // parse original
+ f1, err := parser.ParseFile(fset, "src", src, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // pretty-print original
+ var buf bytes.Buffer
+ err = (&Config{Mode: UseSpaces | SourcePos, Tabwidth: 8}).Fprint(&buf, fset, f1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // parse pretty printed original
+ // (//line directives must be interpreted even w/o parser.ParseComments set)
+ f2, err := parser.ParseFile(fset, "", buf.Bytes(), 0)
+ if err != nil {
+ t.Fatalf("%s\n%s", err, buf.Bytes())
+ }
+
+ // At this point the position information of identifiers in f2 should
+ // match the position information of corresponding identifiers in f1.
+
+ // number of identifiers must be > 0 (test should run) and must match
+ n1 := identCount(f1)
+ n2 := identCount(f2)
+ if n1 == 0 {
+ t.Fatal("got no idents")
+ }
+ if n2 != n1 {
+ t.Errorf("got %d idents; want %d", n2, n1)
+ }
+
+ // verify that all identifiers have correct line information
+ i2range := idents(f2)
+ for i1 := range idents(f1) {
+ i2 := <-i2range
+
+ if i2.Name != i1.Name {
+ t.Errorf("got ident %s; want %s", i2.Name, i1.Name)
+ }
+
+ // here we care about the relative (line-directive adjusted) positions
+ l1 := fset.Position(i1.Pos()).Line
+ l2 := fset.Position(i2.Pos()).Line
+ if l2 != l1 {
+ t.Errorf("got line %d; want %d for %s", l2, l1, i1.Name)
+ }
+ }
+
+ if t.Failed() {
+ t.Logf("\n%s", buf.Bytes())
+ }
+}
+
+// Verify that the SourcePos mode doesn't emit unnecessary //line directives
+// before empty lines.
+func TestIssue5945(t *testing.T) {
+ const orig = `
+package p // line 2
+func f() {} // line 3
+
+var x, y, z int
+
+
+func g() { // line 8
+}
+`
+
+ const want = `//line src.go:2
+package p
+
+//line src.go:3
+func f() {}
+
+var x, y, z int
+
+//line src.go:8
+func g() {
+}
+`
+
+ // parse original
+ f1, err := parser.ParseFile(fset, "src.go", orig, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // pretty-print original
+ var buf bytes.Buffer
+ err = (&Config{Mode: UseSpaces | SourcePos, Tabwidth: 8}).Fprint(&buf, fset, f1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got := buf.String()
+
+ // compare original with desired output
+ if got != want {
+ t.Errorf("got:\n%s\nwant:\n%s\n", got, want)
+ }
+}
+
+var decls = []string{
+ `import "fmt"`,
+ "const pi = 3.1415\nconst e = 2.71828\n\nvar x = pi",
+ "func sum(x, y int) int\t{ return x + y }",
+}
+
+func TestDeclLists(t *testing.T) {
+ for _, src := range decls {
+ file, err := parser.ParseFile(fset, "", "package p;"+src, parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ err = Fprint(&buf, fset, file.Decls) // only print declarations
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ out := buf.String()
+ if out != src {
+ t.Errorf("\ngot : %q\nwant: %q\n", out, src)
+ }
+ }
+}
+
+var stmts = []string{
+ "i := 0",
+ "select {}\nvar a, b = 1, 2\nreturn a + b",
+ "go f()\ndefer func() {}()",
+}
+
+func TestStmtLists(t *testing.T) {
+ for _, src := range stmts {
+ file, err := parser.ParseFile(fset, "", "package p; func _() {"+src+"}", parser.ParseComments)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ var buf bytes.Buffer
+ err = Fprint(&buf, fset, file.Decls[0].(*ast.FuncDecl).Body.List) // only print statements
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ out := buf.String()
+ if out != src {
+ t.Errorf("\ngot : %q\nwant: %q\n", out, src)
+ }
+ }
+}
+
+func TestBaseIndent(t *testing.T) {
+ t.Parallel()
+ // The testfile must not contain multi-line raw strings since those
+ // are not indented (because their values must not change) and make
+ // this test fail.
+ const filename = "printer.go"
+ src, err := os.ReadFile(filename)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ file, err := parser.ParseFile(fset, filename, src, 0)
+ if err != nil {
+ panic(err) // error in test
+ }
+
+ for indent := 0; indent < 4; indent++ {
+ indent := indent
+ t.Run(fmt.Sprint(indent), func(t *testing.T) {
+ t.Parallel()
+ var buf bytes.Buffer
+ (&Config{Tabwidth: tabwidth, Indent: indent}).Fprint(&buf, fset, file)
+ // all code must be indented by at least 'indent' tabs
+ lines := bytes.Split(buf.Bytes(), []byte{'\n'})
+ for i, line := range lines {
+ if len(line) == 0 {
+ continue // empty lines don't have indentation
+ }
+ n := 0
+ for j, b := range line {
+ if b != '\t' {
+ // end of indentation
+ n = j
+ break
+ }
+ }
+ if n < indent {
+ t.Errorf("line %d: got only %d tabs; want at least %d: %q", i, n, indent, line)
+ }
+ }
+ })
+ }
+}
+
+// TestFuncType tests that an ast.FuncType with a nil Params field
+// can be printed (per go/ast specification). Test case for issue 3870.
+func TestFuncType(t *testing.T) {
+ src := &ast.File{
+ Name: &ast.Ident{Name: "p"},
+ Decls: []ast.Decl{
+ &ast.FuncDecl{
+ Name: &ast.Ident{Name: "f"},
+ Type: &ast.FuncType{},
+ },
+ },
+ }
+
+ var buf bytes.Buffer
+ if err := Fprint(&buf, fset, src); err != nil {
+ t.Fatal(err)
+ }
+ got := buf.String()
+
+ const want = `package p
+
+func f()
+`
+
+ if got != want {
+ t.Fatalf("got:\n%s\nwant:\n%s\n", got, want)
+ }
+}
+
+type limitWriter struct {
+ remaining int
+ errCount int
+}
+
+func (l *limitWriter) Write(buf []byte) (n int, err error) {
+ n = len(buf)
+ if n >= l.remaining {
+ n = l.remaining
+ err = io.EOF
+ l.errCount++
+ }
+ l.remaining -= n
+ return n, err
+}
+
+// Test whether the printer stops writing after the first error
+func TestWriteErrors(t *testing.T) {
+ t.Parallel()
+ const filename = "printer.go"
+ src, err := os.ReadFile(filename)
+ if err != nil {
+ panic(err) // error in test
+ }
+ file, err := parser.ParseFile(fset, filename, src, 0)
+ if err != nil {
+ panic(err) // error in test
+ }
+ for i := 0; i < 20; i++ {
+ lw := &limitWriter{remaining: i}
+ err := (&Config{Mode: RawFormat}).Fprint(lw, fset, file)
+ if lw.errCount > 1 {
+ t.Fatal("Writes continued after first error returned")
+ }
+ // We expect errCount be 1 iff err is set
+ if (lw.errCount != 0) != (err != nil) {
+ t.Fatal("Expected err when errCount != 0")
+ }
+ }
+}
+
+// TextX is a skeleton test that can be filled in for debugging one-off cases.
+// Do not remove.
+func TestX(t *testing.T) {
+ const src = `
+package p
+func _() {}
+`
+ _, err := format([]byte(src), 0)
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestCommentedNode(t *testing.T) {
+ const (
+ input = `package main
+
+func foo() {
+ // comment inside func
+}
+
+// leading comment
+type bar int // comment2
+
+`
+
+ foo = `func foo() {
+ // comment inside func
+}`
+
+ bar = `// leading comment
+type bar int // comment2
+`
+ )
+
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "input.go", input, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+
+ err = Fprint(&buf, fset, &CommentedNode{Node: f.Decls[0], Comments: f.Comments})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if buf.String() != foo {
+ t.Errorf("got %q, want %q", buf.String(), foo)
+ }
+
+ buf.Reset()
+
+ err = Fprint(&buf, fset, &CommentedNode{Node: f.Decls[1], Comments: f.Comments})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if buf.String() != bar {
+ t.Errorf("got %q, want %q", buf.String(), bar)
+ }
+}
+
+func TestIssue11151(t *testing.T) {
+ const src = "package p\t/*\r/1\r*\r/2*\r\r\r\r/3*\r\r+\r\r/4*/\n"
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var buf bytes.Buffer
+ Fprint(&buf, fset, f)
+ got := buf.String()
+ const want = "package p\t/*/1*\r/2*\r/3*+/4*/\n" // \r following opening /* should be stripped
+ if got != want {
+ t.Errorf("\ngot : %q\nwant: %q", got, want)
+ }
+
+ // the resulting program must be valid
+ _, err = parser.ParseFile(fset, "", got, 0)
+ if err != nil {
+ t.Errorf("%v\norig: %q\ngot : %q", err, src, got)
+ }
+}
+
+// If a declaration has multiple specifications, a parenthesized
+// declaration must be printed even if Lparen is token.NoPos.
+func TestParenthesizedDecl(t *testing.T) {
+ // a package with multiple specs in a single declaration
+ const src = "package p; var ( a float64; b int )"
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // print the original package
+ var buf bytes.Buffer
+ err = Fprint(&buf, fset, f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ original := buf.String()
+
+ // now remove parentheses from the declaration
+ for i := 0; i != len(f.Decls); i++ {
+ f.Decls[i].(*ast.GenDecl).Lparen = token.NoPos
+ }
+ buf.Reset()
+ err = Fprint(&buf, fset, f)
+ if err != nil {
+ t.Fatal(err)
+ }
+ noparen := buf.String()
+
+ if noparen != original {
+ t.Errorf("got %q, want %q", noparen, original)
+ }
+}
+
+// Verify that we don't print a newline between "return" and its results, as
+// that would incorrectly cause a naked return.
+func TestIssue32854(t *testing.T) {
+ src := `package foo
+
+func f() {
+ return Composite{
+ call(),
+ }
+}`
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "", src, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ // Replace the result with call(), which is on the next line.
+ fd := file.Decls[0].(*ast.FuncDecl)
+ ret := fd.Body.List[0].(*ast.ReturnStmt)
+ ret.Results[0] = ret.Results[0].(*ast.CompositeLit).Elts[0]
+
+ var buf bytes.Buffer
+ if err := Fprint(&buf, fset, ret); err != nil {
+ t.Fatal(err)
+ }
+ want := "return call()"
+ if got := buf.String(); got != want {
+ t.Fatalf("got %q, want %q", got, want)
+ }
+}
+
+func TestSourcePosNewline(t *testing.T) {
+ // We don't provide a syntax for escaping or unescaping characters in line
+ // directives (see https://go.dev/issue/24183#issuecomment-372449628).
+ // As a result, we cannot write a line directive with the correct path for a
+ // filename containing newlines. We should return an error rather than
+ // silently dropping or mangling it.
+
+ fname := "foo\nbar/bar.go"
+ src := `package bar`
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, fname, src, parser.ParseComments|parser.AllErrors|parser.SkipObjectResolution)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cfg := &Config{
+ Mode: SourcePos, // emit line comments
+ Tabwidth: 8,
+ }
+ var buf bytes.Buffer
+ if err := cfg.Fprint(&buf, fset, f); err == nil {
+ t.Errorf("Fprint did not error for source file path containing newline")
+ }
+ if buf.Len() != 0 {
+ t.Errorf("unexpected Fprint output:\n%s", buf.Bytes())
+ }
+}
diff --git a/src/go/printer/testdata/alignment.golden b/src/go/printer/testdata/alignment.golden
new file mode 100644
index 0000000..96086ed
--- /dev/null
+++ b/src/go/printer/testdata/alignment.golden
@@ -0,0 +1,172 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package alignment
+
+// ----------------------------------------------------------------------------
+// Examples from issue #7335.
+
+func main() {
+ z := MyStruct{
+ Foo: "foo",
+ Bar: "bar",
+ Name: "name",
+ LongName: "longname",
+ Baz: "baz",
+ }
+ y := MyStruct{
+ Foo: "foo",
+ Bar: "bar",
+ NameXX: "name",
+ LongNameXXXXXXXXXXXXX: "longname",
+ Baz: "baz",
+ }
+ z := MyStruct{
+ Foo: "foo",
+ Bar: "bar",
+ Name: "name",
+ LongNameXXXXXXXXXXXXX: "longname",
+ Baz: "baz",
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #10392.
+
+var kcfg = KubeletConfig{
+ Address: s.Address,
+ AllowPrivileged: s.AllowPrivileged,
+ HostNetworkSources: hostNetworkSources,
+ HostnameOverride: s.HostnameOverride,
+ RootDirectory: s.RootDirectory,
+ ConfigFile: s.Config,
+ ManifestURL: s.ManifestURL,
+ FileCheckFrequency: s.FileCheckFrequency,
+ HTTPCheckFrequency: s.HTTPCheckFrequency,
+ PodInfraContainerImage: s.PodInfraContainerImage,
+ SyncFrequency: s.SyncFrequency,
+ RegistryPullQPS: s.RegistryPullQPS,
+ RegistryBurst: s.RegistryBurst,
+ MinimumGCAge: s.MinimumGCAge,
+ MaxPerPodContainerCount: s.MaxPerPodContainerCount,
+ MaxContainerCount: s.MaxContainerCount,
+ ClusterDomain: s.ClusterDomain,
+ ClusterDNS: s.ClusterDNS,
+ Runonce: s.RunOnce,
+ Port: s.Port,
+ ReadOnlyPort: s.ReadOnlyPort,
+ CadvisorInterface: cadvisorInterface,
+ EnableServer: s.EnableServer,
+ EnableDebuggingHandlers: s.EnableDebuggingHandlers,
+ DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
+ KubeClient: client,
+ MasterServiceNamespace: s.MasterServiceNamespace,
+ VolumePlugins: ProbeVolumePlugins(),
+ NetworkPlugins: ProbeNetworkPlugins(),
+ NetworkPluginName: s.NetworkPluginName,
+ StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
+ TLSOptions: tlsOptions,
+ ImageGCPolicy: imageGCPolicy, imageGCPolicy,
+ Cloud: cloud,
+ NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
+}
+
+var a = A{
+ Long: 1,
+ LongLong: 1,
+ LongLongLong: 1,
+ LongLongLongLong: 1,
+ LongLongLongLongLong: 1,
+ LongLongLongLongLongLong: 1,
+ LongLongLongLongLongLongLong: 1,
+ LongLongLongLongLongLongLongLong: 1,
+ Short: 1,
+ LongLongLongLongLongLongLongLongLong: 3,
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #22852.
+
+var fmtMap = map[string]string{
+ "1": "123",
+ "12": "123",
+ "123": "123",
+ "1234": "123",
+ "12345": "123",
+ "123456": "123",
+ "12345678901234567890123456789": "123",
+ "abcde": "123",
+ "123456789012345678901234567890": "123",
+ "1234567": "123",
+ "abcdefghijklmnopqrstuvwxyzabcd": "123",
+ "abcd": "123",
+}
+
+type Fmt struct {
+ abcdefghijklmnopqrstuvwx string
+ abcdefghijklmnopqrstuvwxy string
+ abcdefghijklmnopqrstuvwxyz string
+ abcdefghijklmnopqrstuvwxyza string
+ abcdefghijklmnopqrstuvwxyzab string
+ abcdefghijklmnopqrstuvwxyzabc string
+ abcde string
+ abcdefghijklmnopqrstuvwxyzabcde string
+ abcdefg string
+}
+
+func main() {
+ _ := Fmt{
+ abcdefghijklmnopqrstuvwx: "foo",
+ abcdefghijklmnopqrstuvwxyza: "foo",
+ abcdefghijklmnopqrstuvwxyzab: "foo",
+ abcdefghijklmnopqrstuvwxyzabc: "foo",
+ abcde: "foo",
+ abcdefghijklmnopqrstuvwxyzabcde: "foo",
+ abcdefg: "foo",
+ abcdefghijklmnopqrstuvwxy: "foo",
+ abcdefghijklmnopqrstuvwxyz: "foo",
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #26352.
+
+var _ = map[int]string{
+ 1: "",
+
+ 12345678901234567890123456789: "",
+ 12345678901234567890123456789012345678: "",
+}
+
+func f() {
+ _ = map[int]string{
+ 1: "",
+
+ 12345678901234567: "",
+ 12345678901234567890123456789012345678901: "",
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #26930.
+
+var _ = S{
+ F1: []string{},
+ F2____: []string{},
+}
+
+var _ = S{
+ F1: []string{},
+ F2____: []string{},
+}
+
+var _ = S{
+ F1____: []string{},
+ F2: []string{},
+}
+
+var _ = S{
+ F1____: []string{},
+ F2: []string{},
+}
diff --git a/src/go/printer/testdata/alignment.input b/src/go/printer/testdata/alignment.input
new file mode 100644
index 0000000..323d268
--- /dev/null
+++ b/src/go/printer/testdata/alignment.input
@@ -0,0 +1,179 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package alignment
+
+// ----------------------------------------------------------------------------
+// Examples from issue #7335.
+
+func main() {
+ z := MyStruct{
+ Foo: "foo",
+ Bar: "bar",
+ Name: "name",
+ LongName: "longname",
+ Baz: "baz",
+ }
+ y := MyStruct{
+ Foo: "foo",
+ Bar: "bar",
+ NameXX: "name",
+ LongNameXXXXXXXXXXXXX: "longname",
+ Baz: "baz",
+ }
+ z := MyStruct{
+ Foo: "foo",
+ Bar: "bar",
+ Name: "name",
+ LongNameXXXXXXXXXXXXX: "longname",
+ Baz: "baz",
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #10392.
+
+var kcfg = KubeletConfig{
+ Address: s.Address,
+ AllowPrivileged: s.AllowPrivileged,
+ HostNetworkSources: hostNetworkSources,
+ HostnameOverride: s.HostnameOverride,
+ RootDirectory: s.RootDirectory,
+ ConfigFile: s.Config,
+ ManifestURL: s.ManifestURL,
+ FileCheckFrequency: s.FileCheckFrequency,
+ HTTPCheckFrequency: s.HTTPCheckFrequency,
+ PodInfraContainerImage: s.PodInfraContainerImage,
+ SyncFrequency: s.SyncFrequency,
+ RegistryPullQPS: s.RegistryPullQPS,
+ RegistryBurst: s.RegistryBurst,
+ MinimumGCAge: s.MinimumGCAge,
+ MaxPerPodContainerCount: s.MaxPerPodContainerCount,
+ MaxContainerCount: s.MaxContainerCount,
+ ClusterDomain: s.ClusterDomain,
+ ClusterDNS: s.ClusterDNS,
+ Runonce: s.RunOnce,
+ Port: s.Port,
+ ReadOnlyPort: s.ReadOnlyPort,
+ CadvisorInterface: cadvisorInterface,
+ EnableServer: s.EnableServer,
+ EnableDebuggingHandlers: s.EnableDebuggingHandlers,
+ DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
+ KubeClient: client,
+ MasterServiceNamespace: s.MasterServiceNamespace,
+ VolumePlugins: ProbeVolumePlugins(),
+ NetworkPlugins: ProbeNetworkPlugins(),
+ NetworkPluginName: s.NetworkPluginName,
+ StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
+ TLSOptions: tlsOptions,
+ ImageGCPolicy: imageGCPolicy,imageGCPolicy,
+ Cloud: cloud,
+ NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
+}
+
+var a = A{
+ Long: 1,
+ LongLong: 1,
+ LongLongLong: 1,
+ LongLongLongLong: 1,
+ LongLongLongLongLong: 1,
+ LongLongLongLongLongLong: 1,
+ LongLongLongLongLongLongLong: 1,
+ LongLongLongLongLongLongLongLong: 1,
+ Short: 1,
+ LongLongLongLongLongLongLongLongLong: 3,
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #22852.
+
+var fmtMap = map[string]string{
+ "1": "123",
+ "12": "123",
+ "123": "123",
+ "1234": "123",
+ "12345": "123",
+ "123456": "123",
+ "12345678901234567890123456789": "123",
+ "abcde": "123",
+ "123456789012345678901234567890": "123",
+ "1234567": "123",
+ "abcdefghijklmnopqrstuvwxyzabcd": "123",
+ "abcd": "123",
+}
+
+type Fmt struct {
+ abcdefghijklmnopqrstuvwx string
+ abcdefghijklmnopqrstuvwxy string
+ abcdefghijklmnopqrstuvwxyz string
+ abcdefghijklmnopqrstuvwxyza string
+ abcdefghijklmnopqrstuvwxyzab string
+ abcdefghijklmnopqrstuvwxyzabc string
+ abcde string
+ abcdefghijklmnopqrstuvwxyzabcde string
+ abcdefg string
+}
+
+func main() {
+ _ := Fmt{
+ abcdefghijklmnopqrstuvwx: "foo",
+ abcdefghijklmnopqrstuvwxyza: "foo",
+ abcdefghijklmnopqrstuvwxyzab: "foo",
+ abcdefghijklmnopqrstuvwxyzabc: "foo",
+ abcde: "foo",
+ abcdefghijklmnopqrstuvwxyzabcde: "foo",
+ abcdefg: "foo",
+ abcdefghijklmnopqrstuvwxy: "foo",
+ abcdefghijklmnopqrstuvwxyz: "foo",
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #26352.
+
+var _ = map[int]string{
+ 1: "",
+
+ 12345678901234567890123456789: "",
+ 12345678901234567890123456789012345678: "",
+}
+
+func f() {
+ _ = map[int]string{
+ 1: "",
+
+ 12345678901234567: "",
+ 12345678901234567890123456789012345678901: "",
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Examples from issue #26930.
+
+var _ = S{
+ F1: []string{
+ },
+ F2____: []string{},
+}
+
+var _ = S{
+ F1: []string{
+
+
+ },
+ F2____: []string{},
+}
+
+var _ = S{
+ F1____: []string{
+ },
+ F2: []string{},
+}
+
+var _ = S{
+ F1____: []string{
+
+ },
+ F2: []string{},
+}
diff --git a/src/go/printer/testdata/comments.golden b/src/go/printer/testdata/comments.golden
new file mode 100644
index 0000000..1e5d17b
--- /dev/null
+++ b/src/go/printer/testdata/comments.golden
@@ -0,0 +1,774 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+package main
+
+import "fmt" // fmt
+
+const c0 = 0 // zero
+const (
+ c1 = iota // c1
+ c2 // c2
+)
+
+// Alignment of comments in declarations>
+const (
+ _ T = iota // comment
+ _ // comment
+ _ // comment
+ _ = iota + 10
+ _ // comments
+
+ _ = 10 // comment
+ _ T = 20 // comment
+)
+
+const (
+ _____ = iota // foo
+ _ // bar
+ _ = 0 // bal
+ _ // bat
+)
+
+const (
+ _ T = iota // comment
+ _ // comment
+ _ // comment
+ _ = iota + 10
+ _ // comment
+ _ = 10
+ _ = 20 // comment
+ _ T = 0 // comment
+)
+
+// The SZ struct; it is empty.
+type SZ struct{}
+
+// The S0 struct; no field is exported.
+type S0 struct {
+ int
+ x, y, z int // 3 unexported fields
+}
+
+// The S1 struct; some fields are not exported.
+type S1 struct {
+ S0
+ A, B, C float // 3 exported fields
+ D, b, c int // 2 unexported fields
+}
+
+// The S2 struct; all fields are exported.
+type S2 struct {
+ S1
+ A, B, C float // 3 exported fields
+}
+
+// The IZ interface; it is empty.
+type SZ interface{}
+
+// The I0 interface; no method is exported.
+type I0 interface {
+ f(x int) int // unexported method
+}
+
+// The I1 interface; some methods are not exported.
+type I1 interface {
+ I0
+ F(x float) float // exported methods
+ g(x int) int // unexported method
+}
+
+// The I2 interface; all methods are exported.
+type I2 interface {
+ I0
+ F(x float) float // exported method
+ G(x float) float // exported method
+}
+
+// The S3 struct; all comments except for the last one must appear in the export.
+type S3 struct {
+ // lead comment for F1
+ F1 int // line comment for F1
+ // lead comment for F2
+ F2 int // line comment for F2
+ f3 int // f3 is not exported
+}
+
+// Here is a comment.
+// Here is an accidentally unindented line.
+// More comment.
+//
+//dir:ect ive
+type directiveCheck struct{}
+
+// This comment group should be separated
+// with a newline from the next comment
+// group.
+
+// This comment should NOT be associated with the next declaration.
+
+var x int // x
+var ()
+
+// This comment SHOULD be associated with f0.
+func f0() {
+ const pi = 3.14 // pi
+ var s1 struct{} /* an empty struct */ /* foo */
+ // a struct constructor
+ // --------------------
+ var s2 struct{} = struct{}{}
+ x := pi
+}
+
+// This comment should be associated with f1, with one blank line before the comment.
+func f1() {
+ f0()
+ /* 1 */
+ // 2
+ /* 3 */
+ /* 4 */
+ f0()
+}
+
+func _() {
+ // this comment should be properly indented
+}
+
+func _(x int) int {
+ if x < 0 { // the tab printed before this comment's // must not affect the remaining lines
+ return -x // this statement should be properly indented
+ }
+ if x < 0 { /* the tab printed before this comment's /* must not affect the remaining lines */
+ return -x // this statement should be properly indented
+ }
+ return x
+}
+
+func typeswitch(x interface{}) {
+ switch v := x.(type) {
+ case bool, int, float:
+ case string:
+ default:
+ }
+
+ switch x.(type) {
+ }
+
+ switch v0, ok := x.(int); v := x.(type) {
+ }
+
+ switch v0, ok := x.(int); x.(type) {
+ case byte: // this comment should be on the same line as the keyword
+ // this comment should be normally indented
+ _ = 0
+ case bool, int, float:
+ // this comment should be indented
+ case string:
+ default:
+ // this comment should be indented
+ }
+ // this comment should not be indented
+}
+
+//
+// Indentation of comments after possibly indented multi-line constructs
+// (test cases for issue 3147).
+//
+
+func _() {
+ s := 1 +
+ 2
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+ // should be indented like s
+ _ = 0
+}
+
+// Test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // should be aligned with f()
+ f()
+}
+
+// Modified test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // may not be aligned with f() (source is not aligned)
+ f()
+}
+
+//
+// Test cases for alignment of lines in general comments.
+//
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line */
+}
+
+// Issue 9751.
+func _() {
+ /*a string
+
+ b string*/
+
+ /*A string
+
+
+
+ Z string*/
+
+ /*a string
+
+ b string
+
+ c string*/
+
+ {
+ /*a string
+ b string*/
+
+ /*a string
+
+ b string*/
+
+ /*a string
+
+ b string
+
+ c string*/
+ }
+
+ {
+ /*a string
+ b string*/
+
+ /*a string
+
+ b string*/
+
+ /*a string
+
+ b string
+
+ c string*/
+ }
+
+ /*
+ */
+
+ /*
+
+ */
+
+ /*
+
+ * line
+
+ */
+}
+
+/*
+ * line
+ * of
+ * stars
+ */
+
+/* another line
+ * of
+ * stars */
+
+/* and another line
+ * of
+ * stars */
+
+/* a line of
+ * stars */
+
+/* and another line of
+ * stars */
+
+/* a line of stars
+ */
+
+/* and another line of
+ */
+
+/* a line of stars
+ */
+
+/* and another line of
+ */
+
+/*
+aligned in middle
+here
+ not here
+*/
+
+/*
+blank line in middle:
+
+with no leading spaces on blank line.
+*/
+
+/*
+ aligned in middle
+ here
+ not here
+*/
+
+/*
+ blank line in middle:
+
+ with no leading spaces on blank line.
+*/
+
+func _() {
+ /*
+ * line
+ * of
+ * stars
+ */
+
+ /*
+ aligned in middle
+ here
+ not here
+ */
+
+ /*
+ blank line in middle:
+
+ with no leading spaces on blank line.
+ */
+}
+
+// Some interesting interspersed comments.
+// See below for more common cases.
+func _( /* this */ x /* is */ /* an */ int) {
+}
+
+func _( /* no params - extra blank before and after comment */ ) {}
+func _(a, b int /* params - no extra blank after comment */) {}
+
+func _() { f( /* no args - extra blank before and after comment */ ) }
+func _() { f(a, b /* args - no extra blank after comment */) }
+
+func _() {
+ f( /* no args - extra blank before and after comment */ )
+ f(a, b /* args - no extra blank after comment */)
+}
+
+func ( /* comment1 */ T /* comment2 */) _() {}
+
+func _() { /* "short-ish one-line functions with comments are formatted as multi-line functions */ }
+func _() { x := 0; /* comment */ y = x /* comment */ }
+
+func _() {
+ _ = 0
+ /* closing curly brace should be on new line */
+}
+
+func _() {
+ _ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
+}
+
+// Test cases from issue 1542:
+// Comments must not be placed before commas and cause invalid programs.
+func _() {
+ var a = []int{1, 2 /*jasldf*/}
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2}/*jasldf
+ */
+
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2}// jasldf
+
+ _ = a
+}
+
+// Test cases from issues 11274, 15137:
+// Semicolon must not be lost when multiple statements are on the same line with a comment.
+func _() {
+ x := 0 /**/
+ y := 1
+}
+
+func _() {
+ f()
+ f()
+ f() /* comment */
+ f()
+ f() /* comment */
+ f()
+ f() /* a */ /* b */
+ f()
+ f() /* a */ /* b */
+ f()
+ f() /* a */ /* b */
+ f()
+}
+
+func _() {
+ f() /* a */ /* b */
+}
+
+// Comments immediately adjacent to punctuation followed by a newline
+// remain after the punctuation (looks better and permits alignment of
+// comments).
+func _() {
+ _ = T{
+ 1, // comment after comma
+ 2, /* comment after comma */
+ 3, // comment after comma
+ }
+ _ = T{
+ 1, // comment after comma
+ 2, /* comment after comma */
+ 3, // comment after comma
+ }
+ _ = T{
+ /* comment before literal */ 1,
+ 2, /* comment before comma - ok to move after comma */
+ 3, /* comment before comma - ok to move after comma */
+ }
+
+ for i = 0; // comment after semicolon
+ i < 9; /* comment after semicolon */
+ i++ { // comment after opening curly brace
+ }
+
+ // TODO(gri) the last comment in this example should be aligned */
+ for i = 0; // comment after semicolon
+ i < 9; /* comment before semicolon - ok to move after semicolon */
+ i++ /* comment before opening curly brace */ {
+ }
+}
+
+// If there is no newline following punctuation, commas move before the punctuation.
+// This way, commas interspersed in lists stay with the respective expression.
+func f(x /* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
+ f(x /* comment */, y)
+ f(x, /* comment */
+ y)
+ f(
+ x, /* comment */
+ )
+}
+
+func g(
+ x int, /* comment */
+) {
+}
+
+type _ struct {
+ a, b /* comment */, c int
+}
+
+type _ struct {
+ a, b /* comment */, c int
+}
+
+func _() {
+ for a /* comment */, b := range x {
+ }
+}
+
+//extern foo
+func foo() {}
+
+//export bar
+func bar() {}
+
+// Print line directives correctly.
+
+// The following is a legal line directive.
+//
+//line foo:1
+func _() {
+ _ = 0
+ // The following is a legal line directive. It must not be indented:
+//line foo:2
+ _ = 1
+
+ // The following is not a legal line directive (it doesn't start in column 1):
+ //line foo:2
+ _ = 2
+
+ // The following is not a legal line directive (missing colon):
+//line foo -3
+ _ = 3
+}
+
+// Line comments with tabs
+func _() {
+ var finput *bufio.Reader // input file
+ var stderr *bufio.Writer
+ var ftable *bufio.Writer // y.go file
+ var foutput *bufio.Writer // y.output file
+
+ var oflag string // -o [y.go] - y.go file
+ var vflag string // -v [y.output] - y.output file
+ var lflag bool // -l - disable line directives
+}
+
+// Trailing white space in comments should be trimmed
+func _() {
+ // This comment has 4 blanks following that should be trimmed:
+ /* Each line of this comment has blanks or tabs following that should be trimmed:
+ line 2:
+ line 3:
+ */
+}
+
+var _ = []T{ /* lone comment */ }
+
+var _ = []T{
+ /* lone comment */
+}
+
+var _ = []T{
+ // lone comments
+ // in composite lit
+}
+
+var _ = [][]T{
+ {
+ // lone comments
+ // in composite lit
+ },
+}
+
+// TODO: gofmt doesn't add these tabs; make it so that these golden
+// tests run the printer in a way that it's exactly like gofmt.
+
+var _ = []T{ // lone comment
+}
+
+var _ = []T{ // lone comments
+ // in composite lit
+}
+
+func _() {}
+
+func _() {}
+
+/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/src/go/printer/testdata/comments.input b/src/go/printer/testdata/comments.input
new file mode 100644
index 0000000..40aa55b
--- /dev/null
+++ b/src/go/printer/testdata/comments.input
@@ -0,0 +1,773 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+import "fmt" // fmt
+
+const c0 = 0 // zero
+const (
+ c1 = iota // c1
+ c2 // c2
+)
+
+// Alignment of comments in declarations>
+const (
+ _ T = iota // comment
+ _ // comment
+ _ // comment
+ _ = iota+10
+ _ // comments
+
+ _ = 10 // comment
+ _ T = 20 // comment
+)
+
+const (
+ _____ = iota // foo
+ _ // bar
+ _ = 0 // bal
+ _ // bat
+)
+
+const (
+ _ T = iota // comment
+ _ // comment
+ _ // comment
+ _ = iota + 10
+ _ // comment
+ _ = 10
+ _ = 20 // comment
+ _ T = 0 // comment
+)
+
+// The SZ struct; it is empty.
+type SZ struct {}
+
+// The S0 struct; no field is exported.
+type S0 struct {
+ int
+ x, y, z int // 3 unexported fields
+}
+
+// The S1 struct; some fields are not exported.
+type S1 struct {
+ S0
+ A, B, C float // 3 exported fields
+ D, b, c int // 2 unexported fields
+}
+
+// The S2 struct; all fields are exported.
+type S2 struct {
+ S1
+ A, B, C float // 3 exported fields
+}
+
+// The IZ interface; it is empty.
+type SZ interface {}
+
+// The I0 interface; no method is exported.
+type I0 interface {
+ f(x int) int // unexported method
+}
+
+// The I1 interface; some methods are not exported.
+type I1 interface {
+ I0
+ F(x float) float // exported methods
+ g(x int) int // unexported method
+}
+
+// The I2 interface; all methods are exported.
+type I2 interface {
+ I0
+ F(x float) float // exported method
+ G(x float) float // exported method
+}
+
+// The S3 struct; all comments except for the last one must appear in the export.
+type S3 struct {
+ // lead comment for F1
+ F1 int // line comment for F1
+ // lead comment for F2
+ F2 int // line comment for F2
+ f3 int // f3 is not exported
+}
+
+// Here is a comment.
+//Here is an accidentally unindented line.
+//dir:ect ive
+// More comment.
+type directiveCheck struct{}
+
+// This comment group should be separated
+// with a newline from the next comment
+// group.
+
+// This comment should NOT be associated with the next declaration.
+
+var x int // x
+var ()
+
+
+// This comment SHOULD be associated with f0.
+func f0() {
+ const pi = 3.14 // pi
+ var s1 struct {} /* an empty struct */ /* foo */
+ // a struct constructor
+ // --------------------
+ var s2 struct {} = struct {}{}
+ x := pi
+}
+//
+// This comment should be associated with f1, with one blank line before the comment.
+//
+func f1() {
+ f0()
+ /* 1 */
+ // 2
+ /* 3 */
+ /* 4 */
+ f0()
+}
+
+
+func _() {
+// this comment should be properly indented
+}
+
+
+func _(x int) int {
+ if x < 0 { // the tab printed before this comment's // must not affect the remaining lines
+ return -x // this statement should be properly indented
+ }
+ if x < 0 { /* the tab printed before this comment's /* must not affect the remaining lines */
+ return -x // this statement should be properly indented
+ }
+ return x
+}
+
+
+func typeswitch(x interface{}) {
+ switch v := x.(type) {
+ case bool, int, float:
+ case string:
+ default:
+ }
+
+ switch x.(type) {
+ }
+
+ switch v0, ok := x.(int); v := x.(type) {
+ }
+
+ switch v0, ok := x.(int); x.(type) {
+ case byte: // this comment should be on the same line as the keyword
+ // this comment should be normally indented
+ _ = 0
+ case bool, int, float:
+ // this comment should be indented
+ case string:
+ default:
+ // this comment should be indented
+ }
+ // this comment should not be indented
+}
+
+//
+// Indentation of comments after possibly indented multi-line constructs
+// (test cases for issue 3147).
+//
+
+func _() {
+ s := 1 +
+ 2
+// should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+// should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+}
+
+func _() {
+ s := 1 +
+ 2 // comment
+
+ // should be indented like s
+ _ = 0
+}
+
+func _() {
+ s := 1 +
+ 2
+
+ // should be indented like s
+ _ = 0
+}
+
+// Test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // should be aligned with f()
+ f()
+}
+
+// Modified test case from issue 3147.
+func f() {
+ templateText := "a" + // A
+ "b" + // B
+ "c" // C
+
+ // may not be aligned with f() (source is not aligned)
+ f()
+}
+
+//
+// Test cases for alignment of lines in general comments.
+//
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ aligned line */
+}
+
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /* freestanding comment
+ aligned line */
+}
+
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line
+ */
+}
+
+func _() {
+ /*
+ freestanding comment
+ aligned line */
+}
+
+// Issue 9751.
+func _() {
+ /*a string
+
+ b string*/
+
+ /*A string
+
+
+
+ Z string*/
+
+ /*a string
+
+ b string
+
+ c string*/
+
+ {
+ /*a string
+b string*/
+
+ /*a string
+
+b string*/
+
+ /*a string
+
+b string
+
+c string*/
+ }
+
+ {
+ /*a string
+ b string*/
+
+ /*a string
+
+ b string*/
+
+ /*a string
+
+ b string
+
+ c string*/
+ }
+
+ /*
+ */
+
+ /*
+
+ */
+
+ /*
+
+ * line
+
+ */
+}
+
+/*
+ * line
+ * of
+ * stars
+ */
+
+/* another line
+ * of
+ * stars */
+
+/* and another line
+ * of
+ * stars */
+
+/* a line of
+ * stars */
+
+/* and another line of
+ * stars */
+
+/* a line of stars
+*/
+
+/* and another line of
+*/
+
+/* a line of stars
+ */
+
+/* and another line of
+ */
+
+/*
+aligned in middle
+here
+ not here
+*/
+
+/*
+blank line in middle:
+
+with no leading spaces on blank line.
+*/
+
+/*
+ aligned in middle
+ here
+ not here
+*/
+
+/*
+ blank line in middle:
+
+ with no leading spaces on blank line.
+*/
+
+func _() {
+ /*
+ * line
+ * of
+ * stars
+ */
+
+ /*
+ aligned in middle
+ here
+ not here
+ */
+
+ /*
+ blank line in middle:
+
+ with no leading spaces on blank line.
+*/
+}
+
+
+// Some interesting interspersed comments.
+// See below for more common cases.
+func _(/* this */x/* is *//* an */ int) {
+}
+
+func _(/* no params - extra blank before and after comment */) {}
+func _(a, b int /* params - no extra blank after comment */) {}
+
+func _() { f(/* no args - extra blank before and after comment */) }
+func _() { f(a, b /* args - no extra blank after comment */) }
+
+func _() {
+ f(/* no args - extra blank before and after comment */)
+ f(a, b /* args - no extra blank after comment */)
+}
+
+func (/* comment1 */ T /* comment2 */) _() {}
+
+func _() { /* "short-ish one-line functions with comments are formatted as multi-line functions */ }
+func _() { x := 0; /* comment */ y = x /* comment */ }
+
+func _() {
+ _ = 0
+ /* closing curly brace should be on new line */ }
+
+func _() {
+ _ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */}
+}
+
+// Test cases from issue 1542:
+// Comments must not be placed before commas and cause invalid programs.
+func _() {
+ var a = []int{1, 2, /*jasldf*/
+ }
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2, /*jasldf
+ */
+ }
+ _ = a
+}
+
+func _() {
+ var a = []int{1, 2, // jasldf
+ }
+ _ = a
+}
+
+// Test cases from issues 11274, 15137:
+// Semicolon must not be lost when multiple statements are on the same line with a comment.
+func _() {
+ x := 0 /**/; y := 1
+}
+
+func _() {
+ f(); f()
+ f(); /* comment */ f()
+ f() /* comment */; f()
+ f(); /* a */ /* b */ f()
+ f() /* a */ /* b */; f()
+ f() /* a */; /* b */ f()
+}
+
+func _() {
+ f() /* a */ /* b */ }
+
+// Comments immediately adjacent to punctuation followed by a newline
+// remain after the punctuation (looks better and permits alignment of
+// comments).
+func _() {
+ _ = T{
+ 1, // comment after comma
+ 2, /* comment after comma */
+ 3 , // comment after comma
+ }
+ _ = T{
+ 1 ,// comment after comma
+ 2 ,/* comment after comma */
+ 3,// comment after comma
+ }
+ _ = T{
+ /* comment before literal */1,
+ 2/* comment before comma - ok to move after comma */,
+ 3 /* comment before comma - ok to move after comma */ ,
+ }
+
+ for
+ i=0;// comment after semicolon
+ i<9;/* comment after semicolon */
+ i++{// comment after opening curly brace
+ }
+
+ // TODO(gri) the last comment in this example should be aligned */
+ for
+ i=0;// comment after semicolon
+ i<9/* comment before semicolon - ok to move after semicolon */;
+ i++ /* comment before opening curly brace */ {
+ }
+}
+
+// If there is no newline following punctuation, commas move before the punctuation.
+// This way, commas interspersed in lists stay with the respective expression.
+func f(x/* comment */, y int, z int /* comment */, u, v, w int /* comment */) {
+ f(x /* comment */, y)
+ f(x /* comment */,
+ y)
+ f(
+ x /* comment */,
+ )
+}
+
+func g(
+ x int /* comment */,
+) {}
+
+type _ struct {
+ a, b /* comment */, c int
+}
+
+type _ struct { a, b /* comment */, c int }
+
+func _() {
+ for a /* comment */, b := range x {
+ }
+}
+
+//extern foo
+func foo() {}
+
+//export bar
+func bar() {}
+
+// Print line directives correctly.
+
+// The following is a legal line directive.
+//line foo:1
+func _() {
+ _ = 0
+// The following is a legal line directive. It must not be indented:
+//line foo:2
+ _ = 1
+
+// The following is not a legal line directive (it doesn't start in column 1):
+ //line foo:2
+ _ = 2
+
+// The following is not a legal line directive (missing colon):
+//line foo -3
+ _ = 3
+}
+
+// Line comments with tabs
+func _() {
+var finput *bufio.Reader // input file
+var stderr *bufio.Writer
+var ftable *bufio.Writer // y.go file
+var foutput *bufio.Writer // y.output file
+
+var oflag string // -o [y.go] - y.go file
+var vflag string // -v [y.output] - y.output file
+var lflag bool // -l - disable line directives
+}
+
+// Trailing white space in comments should be trimmed
+func _() {
+// This comment has 4 blanks following that should be trimmed:
+/* Each line of this comment has blanks or tabs following that should be trimmed:
+ line 2:
+ line 3:
+*/
+}
+
+var _ = []T{/* lone comment */}
+
+var _ = []T{
+/* lone comment */
+}
+
+var _ = []T{
+// lone comments
+// in composite lit
+}
+
+var _ = [][]T{
+ {
+ // lone comments
+ // in composite lit
+ },
+}
+
+// TODO: gofmt doesn't add these tabs; make it so that these golden
+// tests run the printer in a way that it's exactly like gofmt.
+
+var _ = []T{// lone comment
+}
+
+var _ = []T{// lone comments
+// in composite lit
+}
+
+func _() {}
+
+//
+func _() {}
+
+/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
diff --git a/src/go/printer/testdata/comments.x b/src/go/printer/testdata/comments.x
new file mode 100644
index 0000000..5d088ab
--- /dev/null
+++ b/src/go/printer/testdata/comments.x
@@ -0,0 +1,55 @@
+// This is a package for testing comment placement by go/printer.
+package main
+
+// The SZ struct; it is empty.
+type SZ struct{}
+
+// The S0 struct; no field is exported.
+type S0 struct {
+ // contains filtered or unexported fields
+}
+
+// The S1 struct; some fields are not exported.
+type S1 struct {
+ S0
+ A, B, C float // 3 exported fields
+ D int // 2 unexported fields
+ // contains filtered or unexported fields
+}
+
+// The S2 struct; all fields are exported.
+type S2 struct {
+ S1
+ A, B, C float // 3 exported fields
+}
+
+// The IZ interface; it is empty.
+type SZ interface{}
+
+// The I0 interface; no method is exported.
+type I0 interface {
+ // contains filtered or unexported methods
+}
+
+// The I1 interface; some methods are not exported.
+type I1 interface {
+ I0
+ F(x float) float // exported methods
+ // contains filtered or unexported methods
+}
+
+// The I2 interface; all methods are exported.
+type I2 interface {
+ I0
+ F(x float) float // exported method
+ G(x float) float // exported method
+}
+
+// The S3 struct; all comments except for the last one must appear in the export.
+type S3 struct {
+ // lead comment for F1
+ F1 int // line comment for F1
+ // lead comment for F2
+ F2 int // line comment for F2
+ // contains filtered or unexported fields
+}
diff --git a/src/go/printer/testdata/comments2.golden b/src/go/printer/testdata/comments2.golden
new file mode 100644
index 0000000..83213d1
--- /dev/null
+++ b/src/go/printer/testdata/comments2.golden
@@ -0,0 +1,163 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+package main
+
+// Test cases for idempotent comment formatting (was issue 1835).
+/*
+c1a
+*/
+/*
+ c1b
+*/
+/* foo
+c1c
+*/
+/* foo
+ c1d
+*/
+/*
+c1e
+foo */
+/*
+ c1f
+ foo */
+
+func f() {
+ /*
+ c2a
+ */
+ /*
+ c2b
+ */
+ /* foo
+ c2c
+ */
+ /* foo
+ c2d
+ */
+ /*
+ c2e
+ foo */
+ /*
+ c2f
+ foo */
+}
+
+func g() {
+ /*
+ c3a
+ */
+ /*
+ c3b
+ */
+ /* foo
+ c3c
+ */
+ /* foo
+ c3d
+ */
+ /*
+ c3e
+ foo */
+ /*
+ c3f
+ foo */
+}
+
+// Test case taken literally from issue 1835.
+func main() {
+ /*
+ prints test 5 times
+ */
+ for i := 0; i < 5; i++ {
+ println("test")
+ }
+}
+
+func issue5623() {
+L:
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+
+ _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LLLLLLL:
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LL:
+LLLLL:
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+ _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+
+ // test case from issue
+label:
+ mask := uint64(1)<<c - 1 // Allocation mask
+ used := atomic.LoadUint64(&h.used) // Current allocations
+}
+
+// Test cases for issue 18782
+var _ = [][]int{
+ /* a, b, c, d, e */
+ /* a */ {0, 0, 0, 0, 0},
+ /* b */ {0, 5, 4, 4, 4},
+ /* c */ {0, 4, 5, 4, 4},
+ /* d */ {0, 4, 4, 5, 4},
+ /* e */ {0, 4, 4, 4, 5},
+}
+
+var _ = T{ /* a */ 0}
+
+var _ = T{ /* a */ /* b */ 0}
+
+var _ = T{ /* a */ /* b */
+ /* c */ 0,
+}
+
+var _ = T{ /* a */ /* b */
+ /* c */
+ /* d */ 0,
+}
+
+var _ = T{
+ /* a */
+ /* b */ 0,
+}
+
+var _ = T{ /* a */ {}}
+
+var _ = T{ /* a */ /* b */ {}}
+
+var _ = T{ /* a */ /* b */
+ /* c */ {},
+}
+
+var _ = T{ /* a */ /* b */
+ /* c */
+ /* d */ {},
+}
+
+var _ = T{
+ /* a */
+ /* b */ {},
+}
+
+var _ = []T{
+ func() {
+ var _ = [][]int{
+ /* a, b, c, d, e */
+ /* a */ {0, 0, 0, 0, 0},
+ /* b */ {0, 5, 4, 4, 4},
+ /* c */ {0, 4, 5, 4, 4},
+ /* d */ {0, 4, 4, 5, 4},
+ /* e */ {0, 4, 4, 4, 5},
+ }
+ },
+}
diff --git a/src/go/printer/testdata/comments2.input b/src/go/printer/testdata/comments2.input
new file mode 100644
index 0000000..8d38c41
--- /dev/null
+++ b/src/go/printer/testdata/comments2.input
@@ -0,0 +1,168 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a package for testing comment placement by go/printer.
+//
+package main
+
+// Test cases for idempotent comment formatting (was issue 1835).
+/*
+c1a
+*/
+/*
+ c1b
+*/
+/* foo
+c1c
+*/
+/* foo
+ c1d
+*/
+/*
+c1e
+foo */
+/*
+ c1f
+ foo */
+
+func f() {
+/*
+c2a
+*/
+/*
+ c2b
+*/
+/* foo
+c2c
+*/
+/* foo
+ c2d
+*/
+/*
+c2e
+foo */
+/*
+ c2f
+ foo */
+}
+
+func g() {
+/*
+c3a
+*/
+/*
+ c3b
+*/
+/* foo
+c3c
+*/
+/* foo
+ c3d
+*/
+/*
+c3e
+foo */
+/*
+ c3f
+ foo */
+}
+
+// Test case taken literally from issue 1835.
+func main() {
+/*
+prints test 5 times
+*/
+ for i := 0; i < 5; i++ {
+ println("test")
+ }
+}
+
+func issue5623() {
+L:
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+
+ _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LLLLLLL:
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+
+LL:
+LLLLL:
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx /* comment */
+ _ = yyyyyyyyyyyyyyyy /* comment - should be aligned */
+
+ _ = xxxxxxxxxxxxxxxxxxxxxxxxxxxx // comment
+ _ = yyyyyyyyyyyyyyyy // comment - should be aligned
+
+// test case from issue
+label:
+ mask := uint64(1)<<c - 1 // Allocation mask
+ used := atomic.LoadUint64(&h.used) // Current allocations
+}
+
+// Test cases for issue 18782
+var _ = [][]int{
+ /* a, b, c, d, e */
+ /* a */ {0, 0, 0, 0, 0},
+ /* b */ {0, 5, 4, 4, 4},
+ /* c */ {0, 4, 5, 4, 4},
+ /* d */ {0, 4, 4, 5, 4},
+ /* e */ {0, 4, 4, 4, 5},
+}
+
+var _ = T{ /* a */ 0,
+}
+
+var _ = T{ /* a */ /* b */ 0,
+}
+
+var _ = T{ /* a */ /* b */
+ /* c */ 0,
+}
+
+var _ = T{ /* a */ /* b */
+ /* c */
+ /* d */ 0,
+}
+
+var _ = T{
+ /* a */
+ /* b */ 0,
+}
+
+var _ = T{ /* a */ {},
+}
+
+var _ = T{ /* a */ /* b */ {},
+}
+
+var _ = T{ /* a */ /* b */
+ /* c */ {},
+}
+
+var _ = T{ /* a */ /* b */
+ /* c */
+ /* d */ {},
+}
+
+var _ = T{
+ /* a */
+ /* b */ {},
+}
+
+var _ = []T{
+ func() {
+ var _ = [][]int{
+ /* a, b, c, d, e */
+ /* a */ {0, 0, 0, 0, 0},
+ /* b */ {0, 5, 4, 4, 4},
+ /* c */ {0, 4, 5, 4, 4},
+ /* d */ {0, 4, 4, 5, 4},
+ /* e */ {0, 4, 4, 4, 5},
+ }
+ },
+}
diff --git a/src/go/printer/testdata/complit.input b/src/go/printer/testdata/complit.input
new file mode 100644
index 0000000..82806a4
--- /dev/null
+++ b/src/go/printer/testdata/complit.input
@@ -0,0 +1,65 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package complit
+
+var (
+ // Multi-line declarations
+ V1 = T{
+ F1: "hello",
+ f2: 1,
+ }
+ V2 = T{
+ f2: 1,
+ F1: "hello",
+ }
+ V3 = T{
+ F1: "hello",
+ F2: T2{
+ A: "world",
+ b: "hidden",
+ },
+ f3: T2{
+ A: "world",
+ },
+ }
+ V4 = T{
+ f2: 1,
+ }
+
+ // Single-line declarations
+ V5 = T{F1: "hello", f2: 1}
+ V6 = T{f2: 1, F1: "hello"}
+ V7 = T{f2: 1}
+
+ // Mixed-mode declarations
+ V8 = T{
+ F1: "hello", f2: 1,
+ F3: "world",
+ f4: 2}
+ V9 = T{
+ f2: 1, F1: "hello",}
+ V10 = T{
+ F1: "hello", f2: 1,
+ f3: 2,
+ F4: "world", f5: 3,
+ }
+
+ // Other miscellaneous declarations
+ V11 = T{
+ t{
+ A: "world",
+ b: "hidden",
+ },
+ f2: t{
+ A: "world",
+ b: "hidden",
+ },
+ }
+ V12 = T{
+ F1: make(chan int),
+ f2: []int{},
+ F3: make(map[int]string), f4: 1,
+ }
+) \ No newline at end of file
diff --git a/src/go/printer/testdata/complit.x b/src/go/printer/testdata/complit.x
new file mode 100644
index 0000000..458ac61
--- /dev/null
+++ b/src/go/printer/testdata/complit.x
@@ -0,0 +1,62 @@
+package complit
+
+var (
+ // Multi-line declarations
+ V1 = T{
+ F1: "hello",
+ // contains filtered or unexported fields
+ }
+ V2 = T{
+
+ F1: "hello",
+ // contains filtered or unexported fields
+ }
+ V3 = T{
+ F1: "hello",
+ F2: T2{
+ A: "world",
+ // contains filtered or unexported fields
+ },
+ // contains filtered or unexported fields
+ }
+ V4 = T{
+ // contains filtered or unexported fields
+ }
+
+ // Single-line declarations
+ V5 = T{F1: "hello", /* contains filtered or unexported fields */}
+ V6 = T{F1: "hello", /* contains filtered or unexported fields */}
+ V7 = T{/* contains filtered or unexported fields */}
+
+ // Mixed-mode declarations
+ V8 = T{
+ F1: "hello",
+ F3: "world",
+ // contains filtered or unexported fields
+ }
+ V9 = T{
+ F1: "hello",
+ // contains filtered or unexported fields
+ }
+ V10 = T{
+ F1: "hello",
+
+ F4: "world",
+ // contains filtered or unexported fields
+ }
+
+ // Other miscellaneous declarations
+ V11 = T{
+ t{
+ A: "world",
+ // contains filtered or unexported fields
+ },
+ // contains filtered or unexported fields
+ }
+ V12 = T{
+ F1: make(chan int),
+
+ F3: make(map[int]string),
+ // contains filtered or unexported fields
+ }
+)
diff --git a/src/go/printer/testdata/declarations.golden b/src/go/printer/testdata/declarations.golden
new file mode 100644
index 0000000..fe0f783
--- /dev/null
+++ b/src/go/printer/testdata/declarations.golden
@@ -0,0 +1,1008 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import "io"
+
+import (
+ _ "io"
+)
+
+import _ "io"
+
+import (
+ "io"
+ "io"
+ "io"
+)
+
+import (
+ "io"
+ aLongRename "io"
+
+ b "io"
+)
+
+import (
+ "unrenamed"
+ renamed "renameMe"
+ . "io"
+ _ "io"
+ "io"
+ . "os"
+)
+
+// no newlines between consecutive single imports, but
+// respect extra line breaks in the source (at most one empty line)
+import _ "io"
+import _ "io"
+import _ "io"
+
+import _ "os"
+import _ "os"
+import _ "os"
+
+import _ "fmt"
+import _ "fmt"
+import _ "fmt"
+
+import "foo" // a comment
+import "bar" // a comment
+
+import (
+ _ "foo"
+ // a comment
+ "bar"
+ "foo" // a comment
+ "bar" // a comment
+)
+
+// comments + renames
+import (
+ "unrenamed" // a comment
+ renamed "renameMe"
+ . "io" /* a comment */
+ _ "io/ioutil" // a comment
+ "io" // testing alignment
+ . "os"
+ // a comment
+)
+
+// a case that caused problems in the past (comment placement)
+import (
+ . "fmt"
+ "io"
+ "malloc" // for the malloc count test only
+ "math"
+ "strings"
+ "testing"
+)
+
+// more import examples
+import (
+ "xxx"
+ "much_longer_name" // comment
+ "short_name" // comment
+)
+
+import (
+ _ "xxx"
+ "much_longer_name" // comment
+)
+
+import (
+ mymath "math"
+ "/foo/bar/long_package_path" // a comment
+)
+
+import (
+ "package_a" // comment
+ "package_b"
+ my_better_c "package_c" // comment
+ "package_d" // comment
+ my_e "package_e" // comment
+
+ "package_a" // comment
+ "package_bb"
+ "package_ccc" // comment
+ "package_dddd" // comment
+)
+
+// print import paths as double-quoted strings
+// (we would like more test cases but the go/parser
+// already excludes most incorrect paths, and we don't
+// bother setting up test-ASTs manually)
+import (
+ "fmt"
+ "math"
+)
+
+// at least one empty line between declarations of different kind
+import _ "io"
+
+var _ int
+
+// at least one empty line between declarations of the same kind
+// if there is associated documentation (was issue 2570)
+type T1 struct{}
+
+// T2 comment
+type T2 struct {
+} // should be a two-line struct
+
+// T3 comment
+type T2 struct {
+} // should be a two-line struct
+
+// printing of constant literals
+const (
+ _ = "foobar"
+ _ = "a۰۱۸"
+ _ = "foo६४"
+ _ = "bar9876"
+ _ = 0
+ _ = 1
+ _ = 123456789012345678890
+ _ = 01234567
+ _ = 0xcafebabe
+ _ = 0.
+ _ = .0
+ _ = 3.14159265
+ _ = 1e0
+ _ = 1e+100
+ _ = 1e-100
+ _ = 2.71828e-1000
+ _ = 0i
+ _ = 1i
+ _ = 012345678901234567889i
+ _ = 123456789012345678890i
+ _ = 0.i
+ _ = .0i
+ _ = 3.14159265i
+ _ = 1e0i
+ _ = 1e+100i
+ _ = 1e-100i
+ _ = 2.71828e-1000i
+ _ = 'a'
+ _ = '\000'
+ _ = '\xFF'
+ _ = '\uff16'
+ _ = '\U0000ff16'
+ _ = `foobar`
+ _ = `foo
+---
+---
+bar`
+)
+
+func _() {
+ type _ int
+ type _ *int
+ type _ []int
+ type _ map[string]int
+ type _ chan int
+ type _ func() int
+
+ var _ int
+ var _ *int
+ var _ []int
+ var _ map[string]int
+ var _ chan int
+ var _ func() int
+
+ type _ struct{}
+ type _ *struct{}
+ type _ []struct{}
+ type _ map[string]struct{}
+ type _ chan struct{}
+ type _ func() struct{}
+
+ type _ interface{}
+ type _ *interface{}
+ type _ []interface{}
+ type _ map[string]interface{}
+ type _ chan interface{}
+ type _ func() interface{}
+
+ var _ struct{}
+ var _ *struct{}
+ var _ []struct{}
+ var _ map[string]struct{}
+ var _ chan struct{}
+ var _ func() struct{}
+
+ var _ interface{}
+ var _ *interface{}
+ var _ []interface{}
+ var _ map[string]interface{}
+ var _ chan interface{}
+ var _ func() interface{}
+}
+
+// don't lose blank lines in grouped declarations
+const (
+ _ int = 0
+ _ float = 1
+
+ _ string = "foo"
+
+ _ = iota
+ _
+
+ // a comment
+ _
+
+ _
+)
+
+type (
+ _ int
+ _ struct{}
+
+ _ interface{}
+
+ // a comment
+ _ map[string]int
+)
+
+var (
+ _ int = 0
+ _ float = 1
+
+ _ string = "foo"
+
+ _ bool
+
+ // a comment
+ _ bool
+)
+
+// don't lose blank lines in this struct
+type _ struct {
+ String struct {
+ Str, Len int
+ }
+ Slice struct {
+ Array, Len, Cap int
+ }
+ Eface struct {
+ Typ, Ptr int
+ }
+
+ UncommonType struct {
+ Name, PkgPath int
+ }
+ CommonType struct {
+ Size, Hash, Alg, Align, FieldAlign, String, UncommonType int
+ }
+ Type struct {
+ Typ, Ptr int
+ }
+ StructField struct {
+ Name, PkgPath, Typ, Tag, Offset int
+ }
+ StructType struct {
+ Fields int
+ }
+ PtrType struct {
+ Elem int
+ }
+ SliceType struct {
+ Elem int
+ }
+ ArrayType struct {
+ Elem, Len int
+ }
+
+ Stktop struct {
+ Stackguard, Stackbase, Gobuf int
+ }
+ Gobuf struct {
+ Sp, Pc, G int
+ }
+ G struct {
+ Stackbase, Sched, Status, Alllink int
+ }
+}
+
+// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
+type _ struct{}
+type _ struct {
+}
+
+type _ interface{}
+type _ interface {
+}
+
+// no tabs for single or ungrouped decls
+func _() {
+ const xxxxxx = 0
+ type x int
+ var xxx int
+ var yyyy float = 3.14
+ var zzzzz = "bar"
+
+ const (
+ xxxxxx = 0
+ )
+ type (
+ x int
+ )
+ var (
+ xxx int
+ )
+ var (
+ yyyy float = 3.14
+ )
+ var (
+ zzzzz = "bar"
+ )
+}
+
+// tabs for multiple or grouped decls
+func _() {
+ // no entry has a type
+ const (
+ zzzzzz = 1
+ z = 2
+ zzz = 3
+ )
+ // some entries have a type
+ const (
+ xxxxxx = 1
+ x = 2
+ xxx = 3
+ yyyyyyyy float = iota
+ yyyy = "bar"
+ yyy
+ yy = 2
+ )
+}
+
+func _() {
+ // no entry has a type
+ var (
+ zzzzzz = 1
+ z = 2
+ zzz = 3
+ )
+ // no entry has a value
+ var (
+ _ int
+ _ float
+ _ string
+
+ _ int // comment
+ _ float // comment
+ _ string // comment
+ )
+ // some entries have a type
+ var (
+ xxxxxx int
+ x float
+ xxx string
+ yyyyyyyy int = 1234
+ y float = 3.14
+ yyyy = "bar"
+ yyy string = "foo"
+ )
+ // mixed entries - all comments should be aligned
+ var (
+ a, b, c int
+ x = 10
+ d int // comment
+ y = 20 // comment
+ f, ff, fff, ffff int = 0, 1, 2, 3 // comment
+ )
+ // respect original line breaks
+ var _ = []T{
+ T{0x20, "Telugu"},
+ }
+ var _ = []T{
+ // respect original line breaks
+ T{0x20, "Telugu"},
+ }
+}
+
+// use the formatted output rather than the input to decide when to align
+// (was issue 4505)
+const (
+ short = 2 * (1 + 2)
+ aMuchLongerName = 3
+)
+
+var (
+ short = X{}
+ aMuchLongerName = X{}
+
+ x1 = X{} // foo
+ x2 = X{} // foo
+)
+
+func _() {
+ type (
+ xxxxxx int
+ x float
+ xxx string
+ xxxxx []x
+ xx struct{}
+ xxxxxxx struct {
+ _, _ int
+ _ float
+ }
+ xxxx chan<- string
+ )
+}
+
+// alignment of "=" in consecutive lines (extended example from issue 1414)
+const (
+ umax uint = ^uint(0) // maximum value for a uint
+ bpu = 1 << (5 + umax>>63) // bits per uint
+ foo
+ bar = -1
+)
+
+// typical enum
+const (
+ a MyType = iota
+ abcd
+ b
+ c
+ def
+)
+
+// excerpt from godoc.go
+var (
+ goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory")
+ testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+ pkgPath = flag.String("path", "", "additional package directories (colon-separated)")
+ filter = flag.String("filter", "", "filter file containing permitted package directory paths")
+ filterMin = flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
+ filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
+)
+
+// formatting of structs
+type _ struct{}
+
+type _ struct { /* this comment should be visible */
+}
+
+type _ struct {
+ // this comment should be visible and properly indented
+}
+
+type _ struct { // this comment must not change indentation
+ f int
+ f, ff, fff, ffff int
+}
+
+type _ struct {
+ string
+}
+
+type _ struct {
+ string // comment
+}
+
+type _ struct {
+ string "tag"
+}
+
+type _ struct {
+ string "tag" // comment
+}
+
+type _ struct {
+ f int
+}
+
+type _ struct {
+ f int // comment
+}
+
+type _ struct {
+ f int "tag"
+}
+
+type _ struct {
+ f int "tag" // comment
+}
+
+type _ struct {
+ bool
+ a, b, c int
+ int "tag"
+ ES // comment
+ float "tag" // comment
+ f int // comment
+ f, ff, fff, ffff int // comment
+ g float "tag"
+ h float "tag" // comment
+}
+
+type _ struct {
+ a, b,
+ c, d int // this line should be indented
+ u, v, w, x float // this line should be indented
+ p, q,
+ r, s float // this line should be indented
+}
+
+// difficult cases
+type _ struct {
+ bool // comment
+ text []byte // comment
+}
+
+// formatting of interfaces
+type EI interface{}
+
+type _ interface {
+ EI
+}
+
+type _ interface {
+ f()
+ fffff()
+}
+
+type _ interface {
+ EI
+ f()
+ fffffg()
+}
+
+type _ interface { // this comment must not change indentation
+ EI // here's a comment
+ f() // no blank between identifier and ()
+ fffff() // no blank between identifier and ()
+ gggggggggggg(x, y, z int) // hurray
+}
+
+// formatting of variable declarations
+func _() {
+ type day struct {
+ n int
+ short, long string
+ }
+ var (
+ Sunday = day{0, "SUN", "Sunday"}
+ Monday = day{1, "MON", "Monday"}
+ Tuesday = day{2, "TUE", "Tuesday"}
+ Wednesday = day{3, "WED", "Wednesday"}
+ Thursday = day{4, "THU", "Thursday"}
+ Friday = day{5, "FRI", "Friday"}
+ Saturday = day{6, "SAT", "Saturday"}
+ )
+}
+
+// formatting of multi-line variable declarations
+var a1, b1, c1 int // all on one line
+
+var a2, b2,
+ c2 int // this line should be indented
+
+var (
+ a3, b3,
+ c3, d3 int // this line should be indented
+ a4, b4, c4 int // this line should be indented
+)
+
+// Test case from issue 3304: multi-line declarations must end
+// a formatting section and not influence indentation of the
+// next line.
+var (
+ minRefreshTimeSec = flag.Int64("min_refresh_time_sec", 604800,
+ "minimum time window between two refreshes for a given user.")
+ x = flag.Int64("refresh_user_rollout_percent", 100,
+ "temporary flag to ramp up the refresh user rpc")
+ aVeryLongVariableName = stats.GetVarInt("refresh-user-count")
+)
+
+func _() {
+ var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
+ Headers: map[string]string{},
+ Bytes: []uint8{0x30, 0x82, 0x1, 0x3a, 0x2, 0x1, 0x0, 0x2,
+ 0x41, 0x0, 0xb2, 0x99, 0xf, 0x49, 0xc4, 0x7d, 0xfa, 0x8c,
+ 0xd4, 0x0, 0xae, 0x6a, 0x4d, 0x1b, 0x8a, 0x3b, 0x6a, 0x13,
+ 0x64, 0x2b, 0x23, 0xf2, 0x8b, 0x0, 0x3b, 0xfb, 0x97, 0x79,
+ },
+ }
+}
+
+func _() {
+ var Universe = Scope{
+ Names: map[string]*Ident{
+ // basic types
+ "bool": nil,
+ "byte": nil,
+ "int8": nil,
+ "int16": nil,
+ "int32": nil,
+ "int64": nil,
+ "uint8": nil,
+ "uint16": nil,
+ "uint32": nil,
+ "uint64": nil,
+ "float32": nil,
+ "float64": nil,
+ "string": nil,
+
+ // convenience types
+ "int": nil,
+ "uint": nil,
+ "uintptr": nil,
+ "float": nil,
+
+ // constants
+ "false": nil,
+ "true": nil,
+ "iota": nil,
+ "nil": nil,
+
+ // functions
+ "cap": nil,
+ "len": nil,
+ "new": nil,
+ "make": nil,
+ "panic": nil,
+ "panicln": nil,
+ "print": nil,
+ "println": nil,
+ },
+ }
+}
+
+// alignment of map composite entries
+var _ = map[int]int{
+ // small key sizes: always align even if size ratios are large
+ a: a,
+ abcdefghabcdefgh: a,
+ ab: a,
+ abc: a,
+ abcdefgabcdefg: a,
+ abcd: a,
+ abcde: a,
+ abcdef: a,
+
+ // mixed key sizes: align when key sizes change within accepted ratio
+ abcdefgh: a,
+ abcdefghabcdefg: a,
+ abcdefghij: a,
+ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // outlier - do not align with previous line
+ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // align with previous line
+
+ ab: a, // do not align with previous line
+ abcde: a, // align with previous line
+}
+
+// alignment of map composite entries: test cases from issue 3965
+// aligned
+var _ = T1{
+ a: x,
+ b: y,
+ cccccccccccccccccccc: z,
+}
+
+// not aligned
+var _ = T2{
+ a: x,
+ b: y,
+ ccccccccccccccccccccc: z,
+}
+
+// aligned
+var _ = T3{
+ aaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+// not aligned
+var _ = T4{
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+// no alignment of map composite entries if they are not the first entry on a line
+var _ = T{0: 0} // not aligned
+var _ = T{0: 0, // not aligned
+ 1: 1, // aligned
+ 22: 22, // aligned
+ 333: 333, 1234: 12, 12345: 0, // first on line aligned
+}
+
+// test cases form issue 8685
+// not aligned
+var _ = map[int]string{1: "spring", 2: "summer",
+ 3: "autumn", 4: "winter"}
+
+// not aligned
+var _ = map[string]string{"a": "spring", "b": "summer",
+ "c": "autumn", "d": "winter"}
+
+// aligned
+var _ = map[string]string{"a": "spring",
+ "b": "summer",
+ "c": "autumn",
+ "d": "winter"}
+
+func _() {
+ var _ = T{
+ a, // must introduce trailing comma
+ }
+}
+
+// formatting of function results
+func _() func() {}
+func _() func(int) { return nil }
+func _() func(int) int { return nil }
+func _() func(int) func(int) func() { return nil }
+
+// formatting of consecutive single-line functions
+func _() {}
+func _() {}
+func _() {}
+
+func _() {} // an empty line before this function
+func _() {}
+func _() {}
+
+func _() { f(1, 2, 3) }
+func _(x int) int { y := x; return y + 1 }
+func _() int { type T struct{}; var x T; return x }
+
+// these must remain multi-line since they are multi-line in the source
+func _() {
+ f(1, 2, 3)
+}
+func _(x int) int {
+ y := x
+ return y + 1
+}
+func _() int {
+ type T struct{}
+ var x T
+ return x
+}
+
+// making function declarations safe for new semicolon rules
+func _() { /* single-line function because of "short-ish" comment */ }
+func _() { /* multi-line function because of "long-ish" comment - much more comment text is following here */ /* and more */
+}
+
+func _() {
+ /* multi-line func because block is on multiple lines */
+}
+
+// test case for issue #19544
+func _() {}
+func _longer_name_() { // this comment must not force the {} from above to alignment
+ // multiple lines
+}
+
+// ellipsis parameters
+func _(...int)
+func _(...*int)
+func _(...[]int)
+func _(...struct{})
+func _(bool, ...interface{})
+func _(bool, ...func())
+func _(bool, ...func(...int))
+func _(bool, ...map[string]int)
+func _(bool, ...chan int)
+
+func _(b bool, x ...int)
+func _(b bool, x ...*int)
+func _(b bool, x ...[]int)
+func _(b bool, x ...struct{})
+func _(x ...interface{})
+func _(x ...func())
+func _(x ...func(...int))
+func _(x ...map[string]int)
+func _(x ...chan int)
+
+// these parameter lists must remain multi-line since they are multi-line in the source
+func _(bool,
+ int) {
+}
+func _(x bool,
+ y int) {
+}
+func _(x,
+ y bool) {
+}
+func _(bool, // comment
+ int) {
+}
+func _(x bool, // comment
+ y int) {
+}
+func _(x, // comment
+ y bool) {
+}
+func _(bool, // comment
+ // comment
+ int) {
+}
+func _(x bool, // comment
+ // comment
+ y int) {
+}
+func _(x, // comment
+ // comment
+ y bool) {
+}
+func _(bool,
+ // comment
+ int) {
+}
+func _(x bool,
+ // comment
+ y int) {
+}
+func _(x,
+ // comment
+ y bool) {
+}
+func _(x, // comment
+ y, // comment
+ z bool) {
+}
+func _(x, // comment
+ y, // comment
+ z bool) {
+}
+func _(x int, // comment
+ y float, // comment
+ z bool) {
+}
+
+// properly indent multi-line signatures
+func ManageStatus(in <-chan *Status, req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int) {
+}
+
+func MultiLineSignature0(
+ a, b, c int,
+) {
+}
+
+func MultiLineSignature1(
+ a, b, c int,
+ u, v, w float,
+) {
+}
+
+func MultiLineSignature2(
+ a, b,
+ c int,
+) {
+}
+
+func MultiLineSignature3(
+ a, b,
+ c int, u, v,
+ w float,
+ x ...int) {
+}
+
+func MultiLineSignature4(
+ a, b, c int,
+ u, v,
+ w float,
+ x ...int) {
+}
+
+func MultiLineSignature5(
+ a, b, c int,
+ u, v, w float,
+ p, q,
+ r string,
+ x ...int) {
+}
+
+// make sure it also works for methods in interfaces
+type _ interface {
+ MultiLineSignature0(
+ a, b, c int,
+ )
+
+ MultiLineSignature1(
+ a, b, c int,
+ u, v, w float,
+ )
+
+ MultiLineSignature2(
+ a, b,
+ c int,
+ )
+
+ MultiLineSignature3(
+ a, b,
+ c int, u, v,
+ w float,
+ x ...int)
+
+ MultiLineSignature4(
+ a, b, c int,
+ u, v,
+ w float,
+ x ...int)
+
+ MultiLineSignature5(
+ a, b, c int,
+ u, v, w float,
+ p, q,
+ r string,
+ x ...int)
+}
+
+// omit superfluous parentheses in parameter lists
+func _(int)
+func _(int)
+func _(x int)
+func _(x int)
+func _(x, y int)
+func _(x, y int)
+
+func _() int
+func _() int
+func _() int
+
+func _() (x int)
+func _() (x int)
+func _() (x int)
+
+// special cases: some channel types require parentheses
+func _(x chan (<-chan int))
+func _(x chan (<-chan int))
+func _(x chan (<-chan int))
+
+func _(x chan<- (chan int))
+func _(x chan<- (chan int))
+func _(x chan<- (chan int))
+
+// don't introduce comma after last parameter if the closing ) is on the same line
+// even if the parameter type itself is multi-line (test cases from issue 4533)
+func _(...interface{})
+func _(...interface {
+ m()
+ n()
+}) // no extra comma between } and )
+
+func (t *T) _(...interface{})
+func (t *T) _(...interface {
+ m()
+ n()
+}) // no extra comma between } and )
+
+func _(interface{})
+func _(interface {
+ m()
+}) // no extra comma between } and )
+
+func _(struct{})
+func _(struct {
+ x int
+ y int
+}) // no extra comma between } and )
+
+// alias declarations
+
+type c0 struct{}
+type c1 = C
+type c2 = struct{ x int }
+type c3 = p.C
+type (
+ s struct{}
+ a = A
+ b = A
+ c = foo
+ d = interface{}
+ ddd = p.Foo
+)
diff --git a/src/go/printer/testdata/declarations.input b/src/go/printer/testdata/declarations.input
new file mode 100644
index 0000000..f34395b
--- /dev/null
+++ b/src/go/printer/testdata/declarations.input
@@ -0,0 +1,1021 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package imports
+
+import "io"
+
+import (
+ _ "io"
+)
+
+import _ "io"
+
+import (
+ "io"
+ "io"
+ "io"
+)
+
+import (
+ "io"
+ aLongRename "io"
+
+ b "io"
+)
+
+import (
+ "unrenamed"
+ renamed "renameMe"
+ . "io"
+ _ "io"
+ "io"
+ . "os"
+)
+
+// no newlines between consecutive single imports, but
+// respect extra line breaks in the source (at most one empty line)
+import _ "io"
+import _ "io"
+import _ "io"
+
+import _ "os"
+import _ "os"
+import _ "os"
+
+
+import _ "fmt"
+import _ "fmt"
+import _ "fmt"
+
+import "foo" // a comment
+import "bar" // a comment
+
+import (
+ _ "foo"
+ // a comment
+ "bar"
+ "foo" // a comment
+ "bar" // a comment
+)
+
+// comments + renames
+import (
+ "unrenamed" // a comment
+ renamed "renameMe"
+ . "io" /* a comment */
+ _ "io/ioutil" // a comment
+ "io" // testing alignment
+ . "os"
+ // a comment
+)
+
+// a case that caused problems in the past (comment placement)
+import (
+ . "fmt"
+ "io"
+ "malloc" // for the malloc count test only
+ "math"
+ "strings"
+ "testing"
+)
+
+// more import examples
+import (
+ "xxx"
+ "much_longer_name" // comment
+ "short_name" // comment
+)
+
+import (
+ _ "xxx"
+ "much_longer_name" // comment
+)
+
+import (
+ mymath "math"
+ "/foo/bar/long_package_path" // a comment
+)
+
+import (
+ "package_a" // comment
+ "package_b"
+ my_better_c "package_c" // comment
+ "package_d" // comment
+ my_e "package_e" // comment
+
+ "package_a" // comment
+ "package_bb"
+ "package_ccc" // comment
+ "package_dddd" // comment
+)
+
+// print import paths as double-quoted strings
+// (we would like more test cases but the go/parser
+// already excludes most incorrect paths, and we don't
+// bother setting up test-ASTs manually)
+import (
+ `fmt`
+ "math"
+)
+
+// at least one empty line between declarations of different kind
+import _ "io"
+var _ int
+
+// at least one empty line between declarations of the same kind
+// if there is associated documentation (was issue 2570)
+type T1 struct{}
+// T2 comment
+type T2 struct {
+} // should be a two-line struct
+
+
+// T3 comment
+type T2 struct {
+
+
+} // should be a two-line struct
+
+
+// printing of constant literals
+const (
+ _ = "foobar"
+ _ = "a۰۱۸"
+ _ = "foo६४"
+ _ = "bar9876"
+ _ = 0
+ _ = 1
+ _ = 123456789012345678890
+ _ = 01234567
+ _ = 0xcafebabe
+ _ = 0.
+ _ = .0
+ _ = 3.14159265
+ _ = 1e0
+ _ = 1e+100
+ _ = 1e-100
+ _ = 2.71828e-1000
+ _ = 0i
+ _ = 1i
+ _ = 012345678901234567889i
+ _ = 123456789012345678890i
+ _ = 0.i
+ _ = .0i
+ _ = 3.14159265i
+ _ = 1e0i
+ _ = 1e+100i
+ _ = 1e-100i
+ _ = 2.71828e-1000i
+ _ = 'a'
+ _ = '\000'
+ _ = '\xFF'
+ _ = '\uff16'
+ _ = '\U0000ff16'
+ _ = `foobar`
+ _ = `foo
+---
+---
+bar`
+)
+
+
+func _() {
+ type _ int
+ type _ *int
+ type _ []int
+ type _ map[string]int
+ type _ chan int
+ type _ func() int
+
+ var _ int
+ var _ *int
+ var _ []int
+ var _ map[string]int
+ var _ chan int
+ var _ func() int
+
+ type _ struct{}
+ type _ *struct{}
+ type _ []struct{}
+ type _ map[string]struct{}
+ type _ chan struct{}
+ type _ func() struct{}
+
+ type _ interface{}
+ type _ *interface{}
+ type _ []interface{}
+ type _ map[string]interface{}
+ type _ chan interface{}
+ type _ func() interface{}
+
+ var _ struct{}
+ var _ *struct{}
+ var _ []struct{}
+ var _ map[string]struct{}
+ var _ chan struct{}
+ var _ func() struct{}
+
+ var _ interface{}
+ var _ *interface{}
+ var _ []interface{}
+ var _ map[string]interface{}
+ var _ chan interface{}
+ var _ func() interface{}
+}
+
+
+// don't lose blank lines in grouped declarations
+const (
+ _ int = 0
+ _ float = 1
+
+ _ string = "foo"
+
+ _ = iota
+ _
+
+ // a comment
+ _
+
+ _
+)
+
+
+type (
+ _ int
+ _ struct {}
+
+ _ interface{}
+
+ // a comment
+ _ map[string]int
+)
+
+
+var (
+ _ int = 0
+ _ float = 1
+
+ _ string = "foo"
+
+ _ bool
+
+ // a comment
+ _ bool
+)
+
+
+// don't lose blank lines in this struct
+type _ struct {
+ String struct {
+ Str, Len int
+ }
+ Slice struct {
+ Array, Len, Cap int
+ }
+ Eface struct {
+ Typ, Ptr int
+ }
+
+ UncommonType struct {
+ Name, PkgPath int
+ }
+ CommonType struct {
+ Size, Hash, Alg, Align, FieldAlign, String, UncommonType int
+ }
+ Type struct {
+ Typ, Ptr int
+ }
+ StructField struct {
+ Name, PkgPath, Typ, Tag, Offset int
+ }
+ StructType struct {
+ Fields int
+ }
+ PtrType struct {
+ Elem int
+ }
+ SliceType struct {
+ Elem int
+ }
+ ArrayType struct {
+ Elem, Len int
+ }
+
+ Stktop struct {
+ Stackguard, Stackbase, Gobuf int
+ }
+ Gobuf struct {
+ Sp, Pc, G int
+ }
+ G struct {
+ Stackbase, Sched, Status, Alllink int
+ }
+}
+
+
+// no blank lines in empty structs and interfaces, but leave 1- or 2-line layout alone
+type _ struct{ }
+type _ struct {
+
+}
+
+type _ interface{ }
+type _ interface {
+
+}
+
+
+// no tabs for single or ungrouped decls
+func _() {
+ const xxxxxx = 0
+ type x int
+ var xxx int
+ var yyyy float = 3.14
+ var zzzzz = "bar"
+
+ const (
+ xxxxxx = 0
+ )
+ type (
+ x int
+ )
+ var (
+ xxx int
+ )
+ var (
+ yyyy float = 3.14
+ )
+ var (
+ zzzzz = "bar"
+ )
+}
+
+// tabs for multiple or grouped decls
+func _() {
+ // no entry has a type
+ const (
+ zzzzzz = 1
+ z = 2
+ zzz = 3
+ )
+ // some entries have a type
+ const (
+ xxxxxx = 1
+ x = 2
+ xxx = 3
+ yyyyyyyy float = iota
+ yyyy = "bar"
+ yyy
+ yy = 2
+ )
+}
+
+func _() {
+ // no entry has a type
+ var (
+ zzzzzz = 1
+ z = 2
+ zzz = 3
+ )
+ // no entry has a value
+ var (
+ _ int
+ _ float
+ _ string
+
+ _ int // comment
+ _ float // comment
+ _ string // comment
+ )
+ // some entries have a type
+ var (
+ xxxxxx int
+ x float
+ xxx string
+ yyyyyyyy int = 1234
+ y float = 3.14
+ yyyy = "bar"
+ yyy string = "foo"
+ )
+ // mixed entries - all comments should be aligned
+ var (
+ a, b, c int
+ x = 10
+ d int // comment
+ y = 20 // comment
+ f, ff, fff, ffff int = 0, 1, 2, 3 // comment
+ )
+ // respect original line breaks
+ var _ = []T {
+ T{0x20, "Telugu"},
+ }
+ var _ = []T {
+ // respect original line breaks
+ T{0x20, "Telugu"},
+ }
+}
+
+// use the formatted output rather than the input to decide when to align
+// (was issue 4505)
+const (
+ short = 2 * (
+ 1 + 2)
+ aMuchLongerName = 3
+)
+
+var (
+ short = X{
+ }
+ aMuchLongerName = X{}
+
+ x1 = X{} // foo
+ x2 = X{
+ } // foo
+)
+
+func _() {
+ type (
+ xxxxxx int
+ x float
+ xxx string
+ xxxxx []x
+ xx struct{}
+ xxxxxxx struct {
+ _, _ int
+ _ float
+ }
+ xxxx chan<- string
+ )
+}
+
+// alignment of "=" in consecutive lines (extended example from issue 1414)
+const (
+ umax uint = ^uint(0) // maximum value for a uint
+ bpu = 1 << (5 + umax>>63) // bits per uint
+ foo
+ bar = -1
+)
+
+// typical enum
+const (
+ a MyType = iota
+ abcd
+ b
+ c
+ def
+)
+
+// excerpt from godoc.go
+var (
+ goroot = flag.String("goroot", runtime.GOROOT(), "Go root directory")
+ testDir = flag.String("testdir", "", "Go root subdirectory - for testing only (faster startups)")
+ pkgPath = flag.String("path", "", "additional package directories (colon-separated)")
+ filter = flag.String("filter", "", "filter file containing permitted package directory paths")
+ filterMin = flag.Int("filter_minutes", 0, "filter file update interval in minutes; disabled if <= 0")
+ filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
+)
+
+
+// formatting of structs
+type _ struct{}
+
+type _ struct{ /* this comment should be visible */ }
+
+type _ struct{
+ // this comment should be visible and properly indented
+}
+
+type _ struct { // this comment must not change indentation
+ f int
+ f, ff, fff, ffff int
+}
+
+type _ struct {
+ string
+}
+
+type _ struct {
+ string // comment
+}
+
+type _ struct {
+ string "tag"
+}
+
+type _ struct {
+ string "tag" // comment
+}
+
+type _ struct {
+ f int
+}
+
+type _ struct {
+ f int // comment
+}
+
+type _ struct {
+ f int "tag"
+}
+
+type _ struct {
+ f int "tag" // comment
+}
+
+type _ struct {
+ bool
+ a, b, c int
+ int "tag"
+ ES // comment
+ float "tag" // comment
+ f int // comment
+ f, ff, fff, ffff int // comment
+ g float "tag"
+ h float "tag" // comment
+}
+
+type _ struct { a, b,
+c, d int // this line should be indented
+u, v, w, x float // this line should be indented
+p, q,
+r, s float // this line should be indented
+}
+
+
+// difficult cases
+type _ struct {
+ bool // comment
+ text []byte // comment
+}
+
+
+// formatting of interfaces
+type EI interface{}
+
+type _ interface {
+ EI
+}
+
+type _ interface {
+ f()
+ fffff()
+}
+
+type _ interface {
+ EI
+ f()
+ fffffg()
+}
+
+type _ interface { // this comment must not change indentation
+ EI // here's a comment
+ f() // no blank between identifier and ()
+ fffff() // no blank between identifier and ()
+ gggggggggggg(x, y, z int) () // hurray
+}
+
+
+// formatting of variable declarations
+func _() {
+ type day struct { n int; short, long string }
+ var (
+ Sunday = day{ 0, "SUN", "Sunday" }
+ Monday = day{ 1, "MON", "Monday" }
+ Tuesday = day{ 2, "TUE", "Tuesday" }
+ Wednesday = day{ 3, "WED", "Wednesday" }
+ Thursday = day{ 4, "THU", "Thursday" }
+ Friday = day{ 5, "FRI", "Friday" }
+ Saturday = day{ 6, "SAT", "Saturday" }
+ )
+}
+
+
+// formatting of multi-line variable declarations
+var a1, b1, c1 int // all on one line
+
+var a2, b2,
+c2 int // this line should be indented
+
+var (a3, b3,
+c3, d3 int // this line should be indented
+a4, b4, c4 int // this line should be indented
+)
+
+// Test case from issue 3304: multi-line declarations must end
+// a formatting section and not influence indentation of the
+// next line.
+var (
+ minRefreshTimeSec = flag.Int64("min_refresh_time_sec", 604800,
+ "minimum time window between two refreshes for a given user.")
+ x = flag.Int64("refresh_user_rollout_percent", 100,
+ "temporary flag to ramp up the refresh user rpc")
+ aVeryLongVariableName = stats.GetVarInt("refresh-user-count")
+)
+
+func _() {
+ var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
+ Headers: map[string]string{},
+ Bytes: []uint8{0x30, 0x82, 0x1, 0x3a, 0x2, 0x1, 0x0, 0x2,
+ 0x41, 0x0, 0xb2, 0x99, 0xf, 0x49, 0xc4, 0x7d, 0xfa, 0x8c,
+ 0xd4, 0x0, 0xae, 0x6a, 0x4d, 0x1b, 0x8a, 0x3b, 0x6a, 0x13,
+ 0x64, 0x2b, 0x23, 0xf2, 0x8b, 0x0, 0x3b, 0xfb, 0x97, 0x79,
+ },
+ }
+}
+
+
+func _() {
+ var Universe = Scope {
+ Names: map[string]*Ident {
+ // basic types
+ "bool": nil,
+ "byte": nil,
+ "int8": nil,
+ "int16": nil,
+ "int32": nil,
+ "int64": nil,
+ "uint8": nil,
+ "uint16": nil,
+ "uint32": nil,
+ "uint64": nil,
+ "float32": nil,
+ "float64": nil,
+ "string": nil,
+
+ // convenience types
+ "int": nil,
+ "uint": nil,
+ "uintptr": nil,
+ "float": nil,
+
+ // constants
+ "false": nil,
+ "true": nil,
+ "iota": nil,
+ "nil": nil,
+
+ // functions
+ "cap": nil,
+ "len": nil,
+ "new": nil,
+ "make": nil,
+ "panic": nil,
+ "panicln": nil,
+ "print": nil,
+ "println": nil,
+ },
+ }
+}
+
+
+// alignment of map composite entries
+var _ = map[int]int{
+ // small key sizes: always align even if size ratios are large
+ a: a,
+ abcdefghabcdefgh: a,
+ ab: a,
+ abc: a,
+ abcdefgabcdefg: a,
+ abcd: a,
+ abcde: a,
+ abcdef: a,
+
+ // mixed key sizes: align when key sizes change within accepted ratio
+ abcdefgh: a,
+ abcdefghabcdefg: a,
+ abcdefghij: a,
+ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // outlier - do not align with previous line
+ abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij: a, // align with previous line
+
+ ab: a, // do not align with previous line
+ abcde: a, // align with previous line
+}
+
+// alignment of map composite entries: test cases from issue 3965
+// aligned
+var _ = T1{
+ a: x,
+ b: y,
+ cccccccccccccccccccc: z,
+}
+
+// not aligned
+var _ = T2{
+ a: x,
+ b: y,
+ ccccccccccccccccccccc: z,
+}
+
+// aligned
+var _ = T3{
+ aaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+// not aligned
+var _ = T4{
+ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: x,
+ b: y,
+ c: z,
+}
+
+
+// no alignment of map composite entries if they are not the first entry on a line
+var _ = T{0: 0} // not aligned
+var _ = T{0: 0, // not aligned
+ 1: 1, // aligned
+ 22: 22, // aligned
+ 333: 333, 1234: 12, 12345: 0, // first on line aligned
+}
+
+
+// test cases form issue 8685
+// not aligned
+var _ = map[int]string{1: "spring", 2: "summer",
+ 3: "autumn", 4: "winter"}
+
+// not aligned
+var _ = map[string]string{"a": "spring", "b": "summer",
+ "c": "autumn", "d": "winter"}
+
+// aligned
+var _ = map[string]string{"a": "spring",
+"b": "summer",
+ "c": "autumn",
+"d": "winter"}
+
+
+func _() {
+ var _ = T{
+ a, // must introduce trailing comma
+ }
+}
+
+
+// formatting of function results
+func _() func() {}
+func _() func(int) { return nil }
+func _() func(int) int { return nil }
+func _() func(int) func(int) func() { return nil }
+
+
+// formatting of consecutive single-line functions
+func _() {}
+func _() {}
+func _() {}
+
+func _() {} // an empty line before this function
+func _() {}
+func _() {}
+
+func _() { f(1, 2, 3) }
+func _(x int) int { y := x; return y+1 }
+func _() int { type T struct{}; var x T; return x }
+
+// these must remain multi-line since they are multi-line in the source
+func _() {
+ f(1, 2, 3)
+}
+func _(x int) int {
+ y := x; return y+1
+}
+func _() int {
+ type T struct{}; var x T; return x
+}
+
+
+// making function declarations safe for new semicolon rules
+func _() { /* single-line function because of "short-ish" comment */ }
+func _() { /* multi-line function because of "long-ish" comment - much more comment text is following here */ /* and more */ }
+
+func _() {
+/* multi-line func because block is on multiple lines */ }
+
+// test case for issue #19544
+func _() {}
+func _longer_name_() { // this comment must not force the {} from above to alignment
+ // multiple lines
+}
+
+// ellipsis parameters
+func _(...int)
+func _(...*int)
+func _(...[]int)
+func _(...struct{})
+func _(bool, ...interface{})
+func _(bool, ...func())
+func _(bool, ...func(...int))
+func _(bool, ...map[string]int)
+func _(bool, ...chan int)
+
+func _(b bool, x ...int)
+func _(b bool, x ...*int)
+func _(b bool, x ...[]int)
+func _(b bool, x ...struct{})
+func _(x ...interface{})
+func _(x ...func())
+func _(x ...func(...int))
+func _(x ...map[string]int)
+func _(x ...chan int)
+
+
+// these parameter lists must remain multi-line since they are multi-line in the source
+func _(bool,
+int) {
+}
+func _(x bool,
+y int) {
+}
+func _(x,
+y bool) {
+}
+func _(bool, // comment
+int) {
+}
+func _(x bool, // comment
+y int) {
+}
+func _(x, // comment
+y bool) {
+}
+func _(bool, // comment
+// comment
+int) {
+}
+func _(x bool, // comment
+// comment
+y int) {
+}
+func _(x, // comment
+// comment
+y bool) {
+}
+func _(bool,
+// comment
+int) {
+}
+func _(x bool,
+// comment
+y int) {
+}
+func _(x,
+// comment
+y bool) {
+}
+func _(x, // comment
+y,// comment
+z bool) {
+}
+func _(x, // comment
+ y,// comment
+ z bool) {
+}
+func _(x int, // comment
+ y float, // comment
+ z bool) {
+}
+
+
+// properly indent multi-line signatures
+func ManageStatus(in <-chan *Status, req <-chan Request,
+stat chan<- *TargetInfo,
+TargetHistorySize int) {
+}
+
+func MultiLineSignature0(
+a, b, c int,
+) {}
+
+func MultiLineSignature1(
+a, b, c int,
+u, v, w float,
+) {}
+
+func MultiLineSignature2(
+a, b,
+c int,
+) {}
+
+func MultiLineSignature3(
+a, b,
+c int, u, v,
+w float,
+ x ...int) {}
+
+func MultiLineSignature4(
+a, b, c int,
+u, v,
+w float,
+ x ...int) {}
+
+func MultiLineSignature5(
+a, b, c int,
+u, v, w float,
+p, q,
+r string,
+ x ...int) {}
+
+// make sure it also works for methods in interfaces
+type _ interface {
+MultiLineSignature0(
+a, b, c int,
+)
+
+MultiLineSignature1(
+a, b, c int,
+u, v, w float,
+)
+
+MultiLineSignature2(
+a, b,
+c int,
+)
+
+MultiLineSignature3(
+a, b,
+c int, u, v,
+w float,
+ x ...int)
+
+MultiLineSignature4(
+a, b, c int,
+u, v,
+w float,
+ x ...int)
+
+MultiLineSignature5(
+a, b, c int,
+u, v, w float,
+p, q,
+r string,
+ x ...int)
+}
+
+// omit superfluous parentheses in parameter lists
+func _((int))
+func _((((((int))))))
+func _(x (int))
+func _(x (((((int))))))
+func _(x, y (int))
+func _(x, y (((((int))))))
+
+func _() (int)
+func _() ((int))
+func _() ((((((int))))))
+
+func _() (x int)
+func _() (x (int))
+func _() (x (((((int))))))
+
+// special cases: some channel types require parentheses
+func _(x chan(<-chan int))
+func _(x (chan(<-chan int)))
+func _(x ((((chan(<-chan int))))))
+
+func _(x chan<-(chan int))
+func _(x (chan<-(chan int)))
+func _(x ((((chan<-(chan int))))))
+
+// don't introduce comma after last parameter if the closing ) is on the same line
+// even if the parameter type itself is multi-line (test cases from issue 4533)
+func _(...interface{})
+func _(...interface {
+ m()
+ n()
+}) // no extra comma between } and )
+
+func (t *T) _(...interface{})
+func (t *T) _(...interface {
+ m()
+ n()
+}) // no extra comma between } and )
+
+func _(interface{})
+func _(interface {
+ m()
+}) // no extra comma between } and )
+
+func _(struct{})
+func _(struct {
+ x int
+ y int
+}) // no extra comma between } and )
+
+// alias declarations
+
+type c0 struct{}
+type c1 = C
+type c2 = struct{ x int}
+type c3 = p.C
+type (
+ s struct{}
+ a = A
+ b = A
+ c = foo
+ d = interface{}
+ ddd = p.Foo
+)
diff --git a/src/go/printer/testdata/doc.golden b/src/go/printer/testdata/doc.golden
new file mode 100644
index 0000000..7ac241a
--- /dev/null
+++ b/src/go/printer/testdata/doc.golden
@@ -0,0 +1,21 @@
+package p
+
+/*
+Doc comment.
+
+ - List1.
+
+ - List2.
+*/
+var X int
+
+/* erroneous doc comment */
+var Y int
+
+/*
+ * Another erroneous
+ * doc comment.
+ */
+var Z int
+
+
diff --git a/src/go/printer/testdata/doc.input b/src/go/printer/testdata/doc.input
new file mode 100644
index 0000000..5c057ed
--- /dev/null
+++ b/src/go/printer/testdata/doc.input
@@ -0,0 +1,20 @@
+package p
+
+/*
+Doc comment.
+ - List1.
+
+ - List2.
+*/
+var X int
+
+/* erroneous doc comment */
+var Y int
+
+/*
+ * Another erroneous
+ * doc comment.
+ */
+var Z int
+
+
diff --git a/src/go/printer/testdata/empty.golden b/src/go/printer/testdata/empty.golden
new file mode 100644
index 0000000..a055f47
--- /dev/null
+++ b/src/go/printer/testdata/empty.golden
@@ -0,0 +1,5 @@
+// a comment at the beginning of the file
+
+package empty
+
+// a comment at the end of the file
diff --git a/src/go/printer/testdata/empty.input b/src/go/printer/testdata/empty.input
new file mode 100644
index 0000000..a055f47
--- /dev/null
+++ b/src/go/printer/testdata/empty.input
@@ -0,0 +1,5 @@
+// a comment at the beginning of the file
+
+package empty
+
+// a comment at the end of the file
diff --git a/src/go/printer/testdata/expressions.golden b/src/go/printer/testdata/expressions.golden
new file mode 100644
index 0000000..16a68c7
--- /dev/null
+++ b/src/go/printer/testdata/expressions.golden
@@ -0,0 +1,743 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expressions
+
+type T struct {
+ x, y, z int
+}
+
+var (
+ a, b, c, d, e int
+ under_bar int
+ longIdentifier1, longIdentifier2, longIdentifier3 int
+ t0, t1, t2 T
+ s string
+ p *int
+)
+
+func _() {
+ // no spaces around simple or parenthesized expressions
+ _ = (a + 0)
+ _ = a + b
+ _ = a + b + c
+ _ = a + b - c
+ _ = a - b - c
+ _ = a + (b * c)
+ _ = a + (b / c)
+ _ = a - (b % c)
+ _ = 1 + a
+ _ = a + 1
+ _ = a + b + 1
+ _ = s[a]
+ _ = s[a:]
+ _ = s[:b]
+ _ = s[1:2]
+ _ = s[a:b]
+ _ = s[0:len(s)]
+ _ = s[0] << 1
+ _ = (s[0] << 1) & 0xf
+ _ = s[0]<<2 | s[1]>>4
+ _ = "foo" + s
+ _ = s + "foo"
+ _ = 'a' + 'b'
+ _ = len(s) / 2
+ _ = len(t0.x) / a
+
+ // spaces around expressions of different precedence or expressions containing spaces
+ _ = a + -b
+ _ = a - ^b
+ _ = a / *p
+ _ = a + b*c
+ _ = 1 + b*c
+ _ = a + 2*c
+ _ = a + c*2
+ _ = 1 + 2*3
+ _ = s[1 : 2*3]
+ _ = s[a : b-c]
+ _ = s[0:]
+ _ = s[a+b]
+ _ = s[:b-c]
+ _ = s[a+b:]
+ _ = a[a<<b+1]
+ _ = a[a<<b+1:]
+ _ = s[a+b : len(s)]
+ _ = s[len(s):-a]
+ _ = s[a : len(s)+1]
+ _ = s[a:len(s)+1] + s
+
+ // spaces around operators with equal or lower precedence than comparisons
+ _ = a == b
+ _ = a != b
+ _ = a > b
+ _ = a >= b
+ _ = a < b
+ _ = a <= b
+ _ = a < b && c > d
+ _ = a < b || c > d
+
+ // spaces around "long" operands
+ _ = a + longIdentifier1
+ _ = longIdentifier1 + a
+ _ = longIdentifier1 + longIdentifier2*longIdentifier3
+ _ = s + "a longer string"
+
+ // some selected cases
+ _ = a + t0.x
+ _ = a + t0.x + t1.x*t2.x
+ _ = a + b + c + d + e + 2*3
+ _ = a + b + c + 2*3 + d + e
+ _ = (a + b + c) * 2
+ _ = a - b + c - d + (a + b + c) + d&e
+ _ = under_bar - 1
+ _ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
+ _ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
+
+ // test case for issue 8021
+ // want:
+ // ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+ _ = ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+
+ // the parser does not restrict expressions that may appear as statements
+ true
+ 42
+ "foo"
+ x
+ (x)
+ a + b
+ a + b + c
+ a + (b * c)
+ a + (b / c)
+ 1 + a
+ a + 1
+ s[a]
+ x << 1
+ (s[0] << 1) & 0xf
+ "foo" + s
+ x == y
+ x < y || z > 42
+}
+
+// slice expressions with cap
+func _() {
+ _ = x[a:b:c]
+ _ = x[a : b : c+d]
+ _ = x[a : b+d : c]
+ _ = x[a : b+d : c+d]
+ _ = x[a+d : b : c]
+ _ = x[a+d : b : c+d]
+ _ = x[a+d : b+d : c]
+ _ = x[a+d : b+d : c+d]
+
+ _ = x[:b:c]
+ _ = x[: b : c+d]
+ _ = x[: b+d : c]
+ _ = x[: b+d : c+d]
+}
+
+func issue22111() {
+ _ = x[:]
+
+ _ = x[:b]
+ _ = x[:b+1]
+
+ _ = x[a:]
+ _ = x[a+1:]
+
+ _ = x[a:b]
+ _ = x[a+1 : b]
+ _ = x[a : b+1]
+ _ = x[a+1 : b+1]
+
+ _ = x[:b:c]
+ _ = x[: b+1 : c]
+ _ = x[: b : c+1]
+ _ = x[: b+1 : c+1]
+
+ _ = x[a:b:c]
+ _ = x[a+1 : b : c]
+ _ = x[a : b+1 : c]
+ _ = x[a+1 : b+1 : c]
+ _ = x[a : b : c+1]
+ _ = x[a+1 : b : c+1]
+ _ = x[a : b+1 : c+1]
+ _ = x[a+1 : b+1 : c+1]
+}
+
+func _() {
+ _ = a + b
+ _ = a + b + c
+ _ = a + b*c
+ _ = a + (b * c)
+ _ = (a + b) * c
+ _ = a + (b * c * d)
+ _ = a + (b*c + d)
+
+ _ = 1 << x
+ _ = -1 << x
+ _ = 1<<x - 1
+ _ = -1<<x - 1
+
+ _ = f(a + b)
+ _ = f(a + b + c)
+ _ = f(a + b*c)
+ _ = f(a + (b * c))
+ _ = f(1<<x-1, 1<<x-2)
+
+ _ = 1<<d.logWindowSize - 1
+
+ buf = make(x, 2*cap(b.buf)+n)
+
+ dst[i*3+2] = dbuf[0] << 2
+ dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
+
+ b.buf = b.buf[0 : b.off+m+n]
+ b.buf = b.buf[0 : b.off+m*n]
+ f(b.buf[0 : b.off+m+n])
+
+ signed += ' ' * 8
+ tw.octal(header[148:155], chksum)
+
+ _ = x > 0 && i >= 0
+
+ x1, x0 := x>>w2, x&m2
+ z0 = t1<<w2 + t0
+ z1 = (t1 + t0>>w2) >> w2
+ q1, r1 := x1/d1, x1%d1
+ r1 = r1*b2 | x0>>w2
+ x1 = (x1 << z) | (x0 >> (uint(w) - z))
+ x1 = x1<<z | x0>>(uint(w)-z)
+
+ _ = buf[0 : len(buf)+1]
+ _ = buf[0 : n+1]
+
+ a, b = b, a
+ a = b + c
+ a = b*c + d
+ _ = a*b + c
+ _ = a - b - c
+ _ = a - (b - c)
+ _ = a - b*c
+ _ = a - (b * c)
+ _ = a * b / c
+ _ = a / *b
+ _ = x[a|^b]
+ _ = x[a / *b]
+ _ = a & ^b
+ _ = a + +b
+ _ = a - -b
+ _ = x[a*-b]
+ _ = x[a + +b]
+ _ = x ^ y ^ z
+ _ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+ _ = len(longVariableName) * 2
+
+ _ = token(matchType + xlength<<lengthShift + xoffset)
+}
+
+func f(x int, args ...int) {
+ f(0, args...)
+ f(1, args)
+ f(2, args[0])
+
+ // make sure syntactically legal code remains syntactically legal
+ f(3, 42 ...) // a blank must remain between 42 and ...
+ f(4, 42....)
+ f(5, 42....)
+ f(6, 42.0...)
+ f(7, 42.0...)
+ f(8, .42...)
+ f(9, .42...)
+ f(10, 42e0...)
+ f(11, 42e0...)
+
+ _ = 42 .x // a blank must remain between 42 and .x
+ _ = 42..x
+ _ = 42..x
+ _ = 42.0.x
+ _ = 42.0.x
+ _ = .42.x
+ _ = .42.x
+ _ = 42e0.x
+ _ = 42e0.x
+
+ // a blank must remain between the binary operator and the 2nd operand
+ _ = x / *y
+ _ = x < -1
+ _ = x < <-1
+ _ = x + +1
+ _ = x - -1
+ _ = x & &x
+ _ = x & ^x
+
+ _ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
+}
+
+func _() {
+ _ = T{}
+ _ = struct{}{}
+ _ = [10]T{}
+ _ = [...]T{}
+ _ = []T{}
+ _ = map[int]T{}
+}
+
+// one-line structs/interfaces in composite literals (up to a threshold)
+func _() {
+ _ = struct{}{}
+ _ = struct{ x int }{0}
+ _ = struct{ x, y, z int }{0, 1, 2}
+ _ = struct{ int }{0}
+ _ = struct{ s struct{ int } }{struct{ int }{0}}
+
+ _ = (interface{})(nil)
+ _ = (interface{ String() string })(nil)
+ _ = (interface {
+ String() string
+ })(nil)
+ _ = (interface{ fmt.Stringer })(nil)
+ _ = (interface {
+ fmt.Stringer
+ })(nil)
+}
+
+func _() {
+ // do not modify literals
+ _ = "tab1 tab2 tab3 end" // string contains 3 tabs
+ _ = "tab1 tab2 tab3 end" // same string with 3 blanks - may be unaligned because editors see tabs in strings
+ _ = "" // this comment should be aligned with the one on the previous line
+ _ = ``
+ _ = `
+`
+ _ = `foo
+ bar`
+ _ = `three spaces before the end of the line starting here:
+they must not be removed`
+}
+
+func _() {
+ // smart handling of indentation for multi-line raw strings
+ var _ = ``
+ var _ = `foo`
+ var _ = `foo
+bar`
+
+ var _ = ``
+ var _ = `foo`
+ var _ =
+ // the next line should remain indented
+ `foo
+bar`
+
+ var _ = // comment
+ ``
+ var _ = // comment
+ `foo`
+ var _ = // comment
+ // the next line should remain indented
+ `foo
+bar`
+
+ var _ = /* comment */ ``
+ var _ = /* comment */ `foo`
+ var _ = /* comment */ `foo
+bar`
+
+ var _ = /* comment */
+ ``
+ var _ = /* comment */
+ `foo`
+ var _ = /* comment */
+ // the next line should remain indented
+ `foo
+bar`
+
+ var board = []int(
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`)
+
+ var state = S{
+ "foo",
+ // the next line should remain indented
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`,
+ "bar",
+ }
+}
+
+func _() {
+ // one-line function literals (body is on a single line)
+ _ = func() {}
+ _ = func() int { return 0 }
+ _ = func(x, y int) bool { m := (x + y) / 2; return m < 0 }
+
+ // multi-line function literals (body is not on one line)
+ _ = func() {
+ }
+ _ = func() int {
+ return 0
+ }
+ _ = func(x, y int) bool {
+ m := (x + y) / 2
+ return x < y
+ }
+
+ f(func() {
+ })
+ f(func() int {
+ return 0
+ })
+ f(func(x, y int) bool {
+ m := (x + y) / 2
+ return x < y
+ })
+}
+
+func _() {
+ _ = [][]int{
+ []int{1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int{
+ {1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int{
+ {1},
+ {1, 2},
+ {1, 2, 3},
+ }
+ _ = [][]int{{1}, {1, 2}, {1, 2, 3}}
+}
+
+// various multi-line expressions
+func _() {
+ // do not add extra indentation to multi-line string lists
+ _ = "foo" + "bar"
+ _ = "foo" +
+ "bar" +
+ "bah"
+ _ = []string{
+ "abc" +
+ "def",
+ "foo" +
+ "bar",
+ }
+}
+
+const _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+const _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+const _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+func _() {
+ _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+ _ =
+ `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+ _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+}
+
+func _() {
+ // respect source lines in multi-line expressions
+ _ = a +
+ b +
+ c
+ _ = a < b ||
+ b < a
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+}
+
+// Alignment after overlong lines
+const (
+ _ = "991"
+ _ = "2432902008176640000" // 20!
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+)
+
+// Correct placement of operators and comments in multi-line expressions
+func _() {
+ _ = a + // comment
+ b + // comment
+ c
+ _ = "a" +
+ "b" + // comment
+ "c"
+ _ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required"
+}
+
+// Correct placement of terminating comma/closing parentheses in multi-line calls.
+func _() {
+ f(1,
+ 2,
+ 3)
+ f(1,
+ 2,
+ 3,
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+}
+
+// Align comments in multi-line lists of single-line expressions.
+var txpix = [NCOL]draw.Color{
+ draw.Yellow, // yellow
+ draw.Cyan, // cyan
+ draw.Green, // lime green
+ draw.GreyBlue, // slate
+ draw.Red, /* red */
+ draw.GreyGreen, /* olive green */
+ draw.Blue, /* blue */
+ draw.Color(0xFF55AAFF), /* pink */
+ draw.Color(0xFFAAFFFF), /* lavender */
+ draw.Color(0xBB005DFF), /* maroon */
+}
+
+func same(t, u *Time) bool {
+ // respect source lines in multi-line expressions
+ return t.Year == u.Year &&
+ t.Month == u.Month &&
+ t.Day == u.Day &&
+ t.Hour == u.Hour &&
+ t.Minute == u.Minute &&
+ t.Second == u.Second &&
+ t.Weekday == u.Weekday &&
+ t.ZoneOffset == u.ZoneOffset &&
+ t.Zone == u.Zone
+}
+
+func (p *parser) charClass() {
+ // respect source lines in multi-line expressions
+ if cc.negate && len(cc.ranges) == 2 &&
+ cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
+ nl := new(_NotNl)
+ p.re.add(nl)
+ }
+}
+
+func addState(s []state, inst instr, match []int) {
+ // handle comments correctly in multi-line expressions
+ for i := 0; i < l; i++ {
+ if s[i].inst.index() == index && // same instruction
+ s[i].match[0] < pos { // earlier match already going; leftmost wins
+ return s
+ }
+ }
+}
+
+func (self *T) foo(x int) *T { return self }
+
+func _() { module.Func1().Func2() }
+
+func _() {
+ _ = new(T).
+ foo(1).
+ foo(2).
+ foo(3)
+
+ _ = new(T).
+ foo(1).
+ foo(2). // inline comments
+ foo(3)
+
+ _ = new(T).foo(1).foo(2).foo(3)
+
+ // handle multiline argument list correctly
+ _ = new(T).
+ foo(
+ 1).
+ foo(2)
+
+ _ = new(T).foo(
+ 1).foo(2)
+
+ _ = Array[3+
+ 4]
+
+ _ = Method(1, 2,
+ 3)
+
+ _ = new(T).
+ foo().
+ bar().(*Type)
+
+ _ = new(T).
+ foo().
+ bar().(*Type).
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()["idx"]
+
+ _ = new(T).
+ foo().
+ bar()["idx"].
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()[1:2]
+
+ _ = new(T).
+ foo().
+ bar()[1:2].
+ baz()
+
+ _ = new(T).
+ Field.
+ Array[3+
+ 4].
+ Table["foo"].
+ Blob.(*Type).
+ Slices[1:4].
+ Method(1, 2,
+ 3).
+ Thingy
+
+ _ = a.b.c
+ _ = a.
+ b.
+ c
+ _ = a.b().c
+ _ = a.
+ b().
+ c
+ _ = a.b[0].c
+ _ = a.
+ b[0].
+ c
+ _ = a.b[0:].c
+ _ = a.
+ b[0:].
+ c
+ _ = a.b.(T).c
+ _ = a.
+ b.(T).
+ c
+}
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC, 0666); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+ _ = append(s, a...)
+ _ = append(
+ s, a...)
+ _ = append(s,
+ a...)
+ _ = append(
+ s,
+ a...)
+ _ = append(s, a...,
+ )
+ _ = append(s,
+ a...,
+ )
+ _ = append(
+ s,
+ a...,
+ )
+}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+ // these conversions should be rewritten to look
+ // the same as the parenthesized conversions below
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+}
+
+func _() {
+ _ = f().
+ f(func() {
+ f()
+ }).
+ f(map[int]int{
+ 1: 2,
+ 3: 4,
+ })
+
+ _ = f().
+ f(
+ func() {
+ f()
+ },
+ )
+}
diff --git a/src/go/printer/testdata/expressions.input b/src/go/printer/testdata/expressions.input
new file mode 100644
index 0000000..8c523b6
--- /dev/null
+++ b/src/go/printer/testdata/expressions.input
@@ -0,0 +1,771 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expressions
+
+type T struct {
+ x, y, z int
+}
+
+var (
+ a, b, c, d, e int
+ under_bar int
+ longIdentifier1, longIdentifier2, longIdentifier3 int
+ t0, t1, t2 T
+ s string
+ p *int
+)
+
+
+func _() {
+ // no spaces around simple or parenthesized expressions
+ _ = (a+0)
+ _ = a+b
+ _ = a+b+c
+ _ = a+b-c
+ _ = a-b-c
+ _ = a+(b*c)
+ _ = a+(b/c)
+ _ = a-(b%c)
+ _ = 1+a
+ _ = a+1
+ _ = a+b+1
+ _ = s[a]
+ _ = s[a:]
+ _ = s[:b]
+ _ = s[1:2]
+ _ = s[a:b]
+ _ = s[0:len(s)]
+ _ = s[0]<<1
+ _ = (s[0]<<1)&0xf
+ _ = s[0] << 2 | s[1] >> 4
+ _ = "foo"+s
+ _ = s+"foo"
+ _ = 'a'+'b'
+ _ = len(s)/2
+ _ = len(t0.x)/a
+
+ // spaces around expressions of different precedence or expressions containing spaces
+ _ = a + -b
+ _ = a - ^b
+ _ = a / *p
+ _ = a + b*c
+ _ = 1 + b*c
+ _ = a + 2*c
+ _ = a + c*2
+ _ = 1 + 2*3
+ _ = s[1 : 2*3]
+ _ = s[a : b-c]
+ _ = s[0:]
+ _ = s[a+b]
+ _ = s[: b-c]
+ _ = s[a+b :]
+ _ = a[a<<b+1]
+ _ = a[a<<b+1 :]
+ _ = s[a+b : len(s)]
+ _ = s[len(s) : -a]
+ _ = s[a : len(s)+1]
+ _ = s[a : len(s)+1]+s
+
+ // spaces around operators with equal or lower precedence than comparisons
+ _ = a == b
+ _ = a != b
+ _ = a > b
+ _ = a >= b
+ _ = a < b
+ _ = a <= b
+ _ = a < b && c > d
+ _ = a < b || c > d
+
+ // spaces around "long" operands
+ _ = a + longIdentifier1
+ _ = longIdentifier1 + a
+ _ = longIdentifier1 + longIdentifier2 * longIdentifier3
+ _ = s + "a longer string"
+
+ // some selected cases
+ _ = a + t0.x
+ _ = a + t0.x + t1.x * t2.x
+ _ = a + b + c + d + e + 2*3
+ _ = a + b + c + 2*3 + d + e
+ _ = (a+b+c)*2
+ _ = a - b + c - d + (a+b+c) + d&e
+ _ = under_bar-1
+ _ = Open(dpath + "/file", O_WRONLY | O_CREAT, 0666)
+ _ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
+
+ // test case for issue 8021
+ // want:
+ // ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+ _ = ([]bool{})[([]int{})[((1) + (((((1) + (((((((1) * (((((1) + (1))) + (1))))) + (1))) * (1))))) + (1))))]]
+
+ // the parser does not restrict expressions that may appear as statements
+ true
+ 42
+ "foo"
+ x
+ (x)
+ a+b
+ a+b+c
+ a+(b*c)
+ a+(b/c)
+ 1+a
+ a+1
+ s[a]
+ x<<1
+ (s[0]<<1)&0xf
+ "foo"+s
+ x == y
+ x < y || z > 42
+}
+
+
+// slice expressions with cap
+func _() {
+ _ = x[a:b:c]
+ _ = x[a:b:c+d]
+ _ = x[a:b+d:c]
+ _ = x[a:b+d:c+d]
+ _ = x[a+d:b:c]
+ _ = x[a+d:b:c+d]
+ _ = x[a+d:b+d:c]
+ _ = x[a+d:b+d:c+d]
+
+ _ = x[:b:c]
+ _ = x[:b:c+d]
+ _ = x[:b+d:c]
+ _ = x[:b+d:c+d]
+}
+
+func issue22111() {
+ _ = x[:]
+
+ _ = x[:b]
+ _ = x[:b+1]
+
+ _ = x[a:]
+ _ = x[a+1:]
+
+ _ = x[a:b]
+ _ = x[a+1:b]
+ _ = x[a:b+1]
+ _ = x[a+1:b+1]
+
+ _ = x[:b:c]
+ _ = x[:b+1:c]
+ _ = x[:b:c+1]
+ _ = x[:b+1:c+1]
+
+ _ = x[a:b:c]
+ _ = x[a+1:b:c]
+ _ = x[a:b+1:c]
+ _ = x[a+1:b+1:c]
+ _ = x[a:b:c+1]
+ _ = x[a+1:b:c+1]
+ _ = x[a:b+1:c+1]
+ _ = x[a+1:b+1:c+1]
+}
+
+func _() {
+ _ = a+b
+ _ = a+b+c
+ _ = a+b*c
+ _ = a+(b*c)
+ _ = (a+b)*c
+ _ = a+(b*c*d)
+ _ = a+(b*c+d)
+
+ _ = 1<<x
+ _ = -1<<x
+ _ = 1<<x-1
+ _ = -1<<x-1
+
+ _ = f(a+b)
+ _ = f(a+b+c)
+ _ = f(a+b*c)
+ _ = f(a+(b*c))
+ _ = f(1<<x-1, 1<<x-2)
+
+ _ = 1<<d.logWindowSize-1
+
+ buf = make(x, 2*cap(b.buf) + n)
+
+ dst[i*3+2] = dbuf[0]<<2
+ dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
+
+ b.buf = b.buf[0:b.off+m+n]
+ b.buf = b.buf[0:b.off+m*n]
+ f(b.buf[0:b.off+m+n])
+
+ signed += ' '*8
+ tw.octal(header[148:155], chksum)
+
+ _ = x > 0 && i >= 0
+
+ x1, x0 := x>>w2, x&m2
+ z0 = t1<<w2+t0
+ z1 = (t1+t0>>w2)>>w2
+ q1, r1 := x1/d1, x1%d1
+ r1 = r1*b2 | x0>>w2
+ x1 = (x1<<z)|(x0>>(uint(w)-z))
+ x1 = x1<<z | x0>>(uint(w)-z)
+
+ _ = buf[0:len(buf)+1]
+ _ = buf[0:n+1]
+
+ a,b = b,a
+ a = b+c
+ a = b*c+d
+ _ = a*b+c
+ _ = a-b-c
+ _ = a-(b-c)
+ _ = a-b*c
+ _ = a-(b*c)
+ _ = a*b/c
+ _ = a/ *b
+ _ = x[a|^b]
+ _ = x[a/ *b]
+ _ = a& ^b
+ _ = a+ +b
+ _ = a- -b
+ _ = x[a*-b]
+ _ = x[a+ +b]
+ _ = x^y^z
+ _ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+ _ = len(longVariableName)*2
+
+ _ = token(matchType + xlength<<lengthShift + xoffset)
+}
+
+
+func f(x int, args ...int) {
+ f(0, args...)
+ f(1, args)
+ f(2, args[0])
+
+ // make sure syntactically legal code remains syntactically legal
+ f(3, 42 ...) // a blank must remain between 42 and ...
+ f(4, 42. ...)
+ f(5, 42....)
+ f(6, 42.0 ...)
+ f(7, 42.0...)
+ f(8, .42 ...)
+ f(9, .42...)
+ f(10, 42e0 ...)
+ f(11, 42e0...)
+
+ _ = 42 .x // a blank must remain between 42 and .x
+ _ = 42. .x
+ _ = 42..x
+ _ = 42.0 .x
+ _ = 42.0.x
+ _ = .42 .x
+ _ = .42.x
+ _ = 42e0 .x
+ _ = 42e0.x
+
+ // a blank must remain between the binary operator and the 2nd operand
+ _ = x/ *y
+ _ = x< -1
+ _ = x< <-1
+ _ = x+ +1
+ _ = x- -1
+ _ = x& &x
+ _ = x& ^x
+
+ _ = f(x/ *y, x< -1, x< <-1, x+ +1, x- -1, x& &x, x& ^x)
+}
+
+
+func _() {
+ _ = T{}
+ _ = struct{}{}
+ _ = [10]T{}
+ _ = [...]T{}
+ _ = []T{}
+ _ = map[int]T{}
+}
+
+
+// one-line structs/interfaces in composite literals (up to a threshold)
+func _() {
+ _ = struct{}{}
+ _ = struct{ x int }{0}
+ _ = struct{ x, y, z int }{0, 1, 2}
+ _ = struct{ int }{0}
+ _ = struct{ s struct { int } }{struct{ int}{0} }
+
+ _ = (interface{})(nil)
+ _ = (interface{String() string})(nil)
+ _ = (interface{
+ String() string
+ })(nil)
+ _ = (interface{fmt.Stringer})(nil)
+ _ = (interface{
+ fmt.Stringer
+ })(nil)
+}
+
+func _() {
+ // do not modify literals
+ _ = "tab1 tab2 tab3 end" // string contains 3 tabs
+ _ = "tab1 tab2 tab3 end" // same string with 3 blanks - may be unaligned because editors see tabs in strings
+ _ = "" // this comment should be aligned with the one on the previous line
+ _ = ``
+ _ = `
+`
+_ = `foo
+ bar`
+ _ = `three spaces before the end of the line starting here:
+they must not be removed`
+}
+
+
+func _() {
+ // smart handling of indentation for multi-line raw strings
+ var _ = ``
+ var _ = `foo`
+ var _ = `foo
+bar`
+
+
+var _ =
+ ``
+var _ =
+ `foo`
+var _ =
+ // the next line should remain indented
+ `foo
+bar`
+
+
+ var _ = // comment
+ ``
+ var _ = // comment
+ `foo`
+ var _ = // comment
+ // the next line should remain indented
+ `foo
+bar`
+
+
+var _ = /* comment */ ``
+var _ = /* comment */ `foo`
+var _ = /* comment */ `foo
+bar`
+
+
+ var _ = /* comment */
+ ``
+ var _ = /* comment */
+ `foo`
+ var _ = /* comment */
+ // the next line should remain indented
+ `foo
+bar`
+
+
+var board = []int(
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`)
+
+
+ var state = S{
+ "foo",
+ // the next line should remain indented
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`,
+ "bar",
+ }
+}
+
+
+func _() {
+ // one-line function literals (body is on a single line)
+ _ = func() {}
+ _ = func() int { return 0 }
+ _ = func(x, y int) bool { m := (x+y)/2; return m < 0 }
+
+ // multi-line function literals (body is not on one line)
+ _ = func() {
+ }
+ _ = func() int {
+ return 0
+ }
+ _ = func(x, y int) bool {
+ m := (x+y)/2; return x < y }
+
+ f(func() {
+ })
+ f(func() int {
+ return 0
+ })
+ f(func(x, y int) bool {
+ m := (x+y)/2; return x < y })
+}
+
+
+func _() {
+ _ = [][]int {
+ []int{1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int {
+ {1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int {
+ {1},
+ {1, 2},
+ {1, 2, 3},
+ }
+ _ = [][]int {{1}, {1, 2}, {1, 2, 3}}
+}
+
+
+// various multi-line expressions
+func _() {
+ // do not add extra indentation to multi-line string lists
+ _ = "foo" + "bar"
+ _ = "foo" +
+ "bar" +
+ "bah"
+ _ = []string {
+ "abc" +
+ "def",
+ "foo" +
+ "bar",
+ }
+}
+
+
+const _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+
+const _ =
+ `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+
+const _ = `datafmt "datafmt";` +
+`default = "%v";` +
+`array = *;` +
+`datafmt.T3 = s {" " a a / ","};`
+
+
+func _() {
+ _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+ _ =
+ `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+ _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+}
+
+
+func _() {
+ // respect source lines in multi-line expressions
+ _ = a+
+ b+
+ c
+ _ = a < b ||
+ b < a
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+}
+
+
+// Alignment after overlong lines
+const (
+ _ = "991"
+ _ = "2432902008176640000" // 20!
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+)
+
+
+// Correct placement of operators and comments in multi-line expressions
+func _() {
+ _ = a + // comment
+ b + // comment
+ c
+ _ = "a" +
+ "b" + // comment
+ "c"
+ _ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required"
+}
+
+
+// Correct placement of terminating comma/closing parentheses in multi-line calls.
+func _() {
+ f(1,
+ 2,
+ 3)
+ f(1,
+ 2,
+ 3,
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+ f(1,
+ 2,
+ 3)// comment
+ f(1,
+ 2,
+ 3,// comment
+ )
+}
+
+
+// Align comments in multi-line lists of single-line expressions.
+var txpix = [NCOL]draw.Color{
+ draw.Yellow, // yellow
+ draw.Cyan, // cyan
+ draw.Green, // lime green
+ draw.GreyBlue, // slate
+ draw.Red, /* red */
+ draw.GreyGreen, /* olive green */
+ draw.Blue, /* blue */
+ draw.Color(0xFF55AAFF), /* pink */
+ draw.Color(0xFFAAFFFF), /* lavender */
+ draw.Color(0xBB005DFF), /* maroon */
+}
+
+
+func same(t, u *Time) bool {
+ // respect source lines in multi-line expressions
+ return t.Year == u.Year &&
+ t.Month == u.Month &&
+ t.Day == u.Day &&
+ t.Hour == u.Hour &&
+ t.Minute == u.Minute &&
+ t.Second == u.Second &&
+ t.Weekday == u.Weekday &&
+ t.ZoneOffset == u.ZoneOffset &&
+ t.Zone == u.Zone
+}
+
+
+func (p *parser) charClass() {
+ // respect source lines in multi-line expressions
+ if cc.negate && len(cc.ranges) == 2 &&
+ cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
+ nl := new(_NotNl)
+ p.re.add(nl)
+ }
+}
+
+
+func addState(s []state, inst instr, match []int) {
+ // handle comments correctly in multi-line expressions
+ for i := 0; i < l; i++ {
+ if s[i].inst.index() == index && // same instruction
+ s[i].match[0] < pos { // earlier match already going; leftmost wins
+ return s
+ }
+ }
+}
+
+func (self *T) foo(x int) *T { return self }
+
+func _() { module.Func1().Func2() }
+
+func _() {
+ _ = new(T).
+ foo(1).
+ foo(2).
+ foo(3)
+
+ _ = new(T).
+ foo(1).
+ foo(2). // inline comments
+ foo(3)
+
+ _ = new(T).foo(1).foo(2).foo(3)
+
+ // handle multiline argument list correctly
+ _ = new(T).
+ foo(
+ 1).
+ foo(2)
+
+ _ = new(T).foo(
+ 1).foo(2)
+
+ _ = Array[3 +
+4]
+
+ _ = Method(1, 2,
+ 3)
+
+ _ = new(T).
+ foo().
+ bar() . (*Type)
+
+ _ = new(T).
+foo().
+bar().(*Type).
+baz()
+
+ _ = new(T).
+ foo().
+ bar()["idx"]
+
+ _ = new(T).
+ foo().
+ bar()["idx"] .
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()[1:2]
+
+ _ = new(T).
+ foo().
+ bar()[1:2].
+ baz()
+
+ _ = new(T).
+ Field.
+ Array[3+
+ 4].
+ Table ["foo"].
+ Blob. (*Type).
+ Slices[1:4].
+ Method(1, 2,
+ 3).
+ Thingy
+
+ _ = a.b.c
+ _ = a.
+ b.
+ c
+ _ = a.b().c
+ _ = a.
+ b().
+ c
+ _ = a.b[0].c
+ _ = a.
+ b[0].
+ c
+ _ = a.b[0:].c
+ _ = a.
+ b[0:].
+ c
+ _ = a.b.(T).c
+ _ = a.
+ b.
+ (T).
+ c
+}
+
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC, 0666); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+ _ = append(s, a...)
+ _ = append(
+ s, a...)
+ _ = append(s,
+ a...)
+ _ = append(
+ s,
+ a...)
+ _ = append(s, a...,
+ )
+ _ = append(s,
+ a...,
+ )
+ _ = append(
+ s,
+ a...,
+ )
+}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+ // these conversions should be rewritten to look
+ // the same as the parenthesized conversions below
+ _ = func()()(nil)
+ _ = func(x int)(float)(nil)
+ _ = func() func() func()()(nil)
+
+ _ = (func()())(nil)
+ _ = (func(x int)(float))(nil)
+ _ = (func() func() func()())(nil)
+}
+
+func _() {
+ _ = f().
+ f(func() {
+ f()
+ }).
+ f(map[int]int{
+ 1: 2,
+ 3: 4,
+})
+
+ _ = f().
+ f(
+ func() {
+ f()
+ },
+ )
+}
diff --git a/src/go/printer/testdata/expressions.raw b/src/go/printer/testdata/expressions.raw
new file mode 100644
index 0000000..058fded
--- /dev/null
+++ b/src/go/printer/testdata/expressions.raw
@@ -0,0 +1,743 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expressions
+
+type T struct {
+ x, y, z int
+}
+
+var (
+ a, b, c, d, e int
+ under_bar int
+ longIdentifier1, longIdentifier2, longIdentifier3 int
+ t0, t1, t2 T
+ s string
+ p *int
+)
+
+func _() {
+ // no spaces around simple or parenthesized expressions
+ _ = (a + 0)
+ _ = a + b
+ _ = a + b + c
+ _ = a + b - c
+ _ = a - b - c
+ _ = a + (b * c)
+ _ = a + (b / c)
+ _ = a - (b % c)
+ _ = 1 + a
+ _ = a + 1
+ _ = a + b + 1
+ _ = s[a]
+ _ = s[a:]
+ _ = s[:b]
+ _ = s[1:2]
+ _ = s[a:b]
+ _ = s[0:len(s)]
+ _ = s[0] << 1
+ _ = (s[0] << 1) & 0xf
+ _ = s[0]<<2 | s[1]>>4
+ _ = "foo" + s
+ _ = s + "foo"
+ _ = 'a' + 'b'
+ _ = len(s) / 2
+ _ = len(t0.x) / a
+
+ // spaces around expressions of different precedence or expressions containing spaces
+ _ = a + -b
+ _ = a - ^b
+ _ = a / *p
+ _ = a + b*c
+ _ = 1 + b*c
+ _ = a + 2*c
+ _ = a + c*2
+ _ = 1 + 2*3
+ _ = s[1 : 2*3]
+ _ = s[a : b-c]
+ _ = s[0:]
+ _ = s[a+b]
+ _ = s[:b-c]
+ _ = s[a+b:]
+ _ = a[a<<b+1]
+ _ = a[a<<b+1:]
+ _ = s[a+b : len(s)]
+ _ = s[len(s):-a]
+ _ = s[a : len(s)+1]
+ _ = s[a:len(s)+1] + s
+
+ // spaces around operators with equal or lower precedence than comparisons
+ _ = a == b
+ _ = a != b
+ _ = a > b
+ _ = a >= b
+ _ = a < b
+ _ = a <= b
+ _ = a < b && c > d
+ _ = a < b || c > d
+
+ // spaces around "long" operands
+ _ = a + longIdentifier1
+ _ = longIdentifier1 + a
+ _ = longIdentifier1 + longIdentifier2*longIdentifier3
+ _ = s + "a longer string"
+
+ // some selected cases
+ _ = a + t0.x
+ _ = a + t0.x + t1.x*t2.x
+ _ = a + b + c + d + e + 2*3
+ _ = a + b + c + 2*3 + d + e
+ _ = (a + b + c) * 2
+ _ = a - b + c - d + (a + b + c) + d&e
+ _ = under_bar - 1
+ _ = Open(dpath+"/file", O_WRONLY|O_CREAT, 0666)
+ _ = int(c0&_Mask4)<<18 | int(c1&_Maskx)<<12 | int(c2&_Maskx)<<6 | int(c3&_Maskx)
+
+ // test case for issue 8021
+ // want:
+ // ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+ _ = ([]bool{})[([]int{})[((1)+(((1)+((((1)*(((1)+(1))+(1)))+(1))*(1)))+(1)))]]
+
+ // the parser does not restrict expressions that may appear as statements
+ true
+ 42
+ "foo"
+ x
+ (x)
+ a + b
+ a + b + c
+ a + (b * c)
+ a + (b / c)
+ 1 + a
+ a + 1
+ s[a]
+ x << 1
+ (s[0] << 1) & 0xf
+ "foo" + s
+ x == y
+ x < y || z > 42
+}
+
+// slice expressions with cap
+func _() {
+ _ = x[a:b:c]
+ _ = x[a : b : c+d]
+ _ = x[a : b+d : c]
+ _ = x[a : b+d : c+d]
+ _ = x[a+d : b : c]
+ _ = x[a+d : b : c+d]
+ _ = x[a+d : b+d : c]
+ _ = x[a+d : b+d : c+d]
+
+ _ = x[:b:c]
+ _ = x[: b : c+d]
+ _ = x[: b+d : c]
+ _ = x[: b+d : c+d]
+}
+
+func issue22111() {
+ _ = x[:]
+
+ _ = x[:b]
+ _ = x[:b+1]
+
+ _ = x[a:]
+ _ = x[a+1:]
+
+ _ = x[a:b]
+ _ = x[a+1 : b]
+ _ = x[a : b+1]
+ _ = x[a+1 : b+1]
+
+ _ = x[:b:c]
+ _ = x[: b+1 : c]
+ _ = x[: b : c+1]
+ _ = x[: b+1 : c+1]
+
+ _ = x[a:b:c]
+ _ = x[a+1 : b : c]
+ _ = x[a : b+1 : c]
+ _ = x[a+1 : b+1 : c]
+ _ = x[a : b : c+1]
+ _ = x[a+1 : b : c+1]
+ _ = x[a : b+1 : c+1]
+ _ = x[a+1 : b+1 : c+1]
+}
+
+func _() {
+ _ = a + b
+ _ = a + b + c
+ _ = a + b*c
+ _ = a + (b * c)
+ _ = (a + b) * c
+ _ = a + (b * c * d)
+ _ = a + (b*c + d)
+
+ _ = 1 << x
+ _ = -1 << x
+ _ = 1<<x - 1
+ _ = -1<<x - 1
+
+ _ = f(a + b)
+ _ = f(a + b + c)
+ _ = f(a + b*c)
+ _ = f(a + (b * c))
+ _ = f(1<<x-1, 1<<x-2)
+
+ _ = 1<<d.logWindowSize - 1
+
+ buf = make(x, 2*cap(b.buf)+n)
+
+ dst[i*3+2] = dbuf[0] << 2
+ dst[i*3+2] = dbuf[0]<<2 | dbuf[1]>>4
+
+ b.buf = b.buf[0 : b.off+m+n]
+ b.buf = b.buf[0 : b.off+m*n]
+ f(b.buf[0 : b.off+m+n])
+
+ signed += ' ' * 8
+ tw.octal(header[148:155], chksum)
+
+ _ = x > 0 && i >= 0
+
+ x1, x0 := x>>w2, x&m2
+ z0 = t1<<w2 + t0
+ z1 = (t1 + t0>>w2) >> w2
+ q1, r1 := x1/d1, x1%d1
+ r1 = r1*b2 | x0>>w2
+ x1 = (x1 << z) | (x0 >> (uint(w) - z))
+ x1 = x1<<z | x0>>(uint(w)-z)
+
+ _ = buf[0 : len(buf)+1]
+ _ = buf[0 : n+1]
+
+ a, b = b, a
+ a = b + c
+ a = b*c + d
+ _ = a*b + c
+ _ = a - b - c
+ _ = a - (b - c)
+ _ = a - b*c
+ _ = a - (b * c)
+ _ = a * b / c
+ _ = a / *b
+ _ = x[a|^b]
+ _ = x[a / *b]
+ _ = a & ^b
+ _ = a + +b
+ _ = a - -b
+ _ = x[a*-b]
+ _ = x[a + +b]
+ _ = x ^ y ^ z
+ _ = b[a>>24] ^ b[(a>>16)&0xFF] ^ b[(a>>8)&0xFF] ^ b[a&0xFF]
+ _ = len(longVariableName) * 2
+
+ _ = token(matchType + xlength<<lengthShift + xoffset)
+}
+
+func f(x int, args ...int) {
+ f(0, args...)
+ f(1, args)
+ f(2, args[0])
+
+ // make sure syntactically legal code remains syntactically legal
+ f(3, 42 ...) // a blank must remain between 42 and ...
+ f(4, 42....)
+ f(5, 42....)
+ f(6, 42.0...)
+ f(7, 42.0...)
+ f(8, .42...)
+ f(9, .42...)
+ f(10, 42e0...)
+ f(11, 42e0...)
+
+ _ = 42 .x // a blank must remain between 42 and .x
+ _ = 42..x
+ _ = 42..x
+ _ = 42.0.x
+ _ = 42.0.x
+ _ = .42.x
+ _ = .42.x
+ _ = 42e0.x
+ _ = 42e0.x
+
+ // a blank must remain between the binary operator and the 2nd operand
+ _ = x / *y
+ _ = x < -1
+ _ = x < <-1
+ _ = x + +1
+ _ = x - -1
+ _ = x & &x
+ _ = x & ^x
+
+ _ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
+}
+
+func _() {
+ _ = T{}
+ _ = struct{}{}
+ _ = [10]T{}
+ _ = [...]T{}
+ _ = []T{}
+ _ = map[int]T{}
+}
+
+// one-line structs/interfaces in composite literals (up to a threshold)
+func _() {
+ _ = struct{}{}
+ _ = struct{ x int }{0}
+ _ = struct{ x, y, z int }{0, 1, 2}
+ _ = struct{ int }{0}
+ _ = struct{ s struct{ int } }{struct{ int }{0}}
+
+ _ = (interface{})(nil)
+ _ = (interface{ String() string })(nil)
+ _ = (interface {
+ String() string
+ })(nil)
+ _ = (interface{ fmt.Stringer })(nil)
+ _ = (interface {
+ fmt.Stringer
+ })(nil)
+}
+
+func _() {
+ // do not modify literals
+ _ = "tab1 tab2 tab3 end" // string contains 3 tabs
+ _ = "tab1 tab2 tab3 end" // same string with 3 blanks - may be unaligned because editors see tabs in strings
+ _ = "" // this comment should be aligned with the one on the previous line
+ _ = ``
+ _ = `
+`
+ _ = `foo
+ bar`
+ _ = `three spaces before the end of the line starting here:
+they must not be removed`
+}
+
+func _() {
+ // smart handling of indentation for multi-line raw strings
+ var _ = ``
+ var _ = `foo`
+ var _ = `foo
+bar`
+
+ var _ = ``
+ var _ = `foo`
+ var _ =
+ // the next line should remain indented
+ `foo
+bar`
+
+ var _ = // comment
+ ``
+ var _ = // comment
+ `foo`
+ var _ = // comment
+ // the next line should remain indented
+ `foo
+bar`
+
+ var _ = /* comment */ ``
+ var _ = /* comment */ `foo`
+ var _ = /* comment */ `foo
+bar`
+
+ var _ = /* comment */
+ ``
+ var _ = /* comment */
+ `foo`
+ var _ = /* comment */
+ // the next line should remain indented
+ `foo
+bar`
+
+ var board = []int(
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`)
+
+ var state = S{
+ "foo",
+ // the next line should remain indented
+ `...........
+...........
+....●●●....
+....●●●....
+..●●●●●●●..
+..●●●○●●●..
+..●●●●●●●..
+....●●●....
+....●●●....
+...........
+...........
+`,
+ "bar",
+ }
+}
+
+func _() {
+ // one-line function literals (body is on a single line)
+ _ = func() {}
+ _ = func() int { return 0 }
+ _ = func(x, y int) bool { m := (x + y) / 2; return m < 0 }
+
+ // multi-line function literals (body is not on one line)
+ _ = func() {
+ }
+ _ = func() int {
+ return 0
+ }
+ _ = func(x, y int) bool {
+ m := (x + y) / 2
+ return x < y
+ }
+
+ f(func() {
+ })
+ f(func() int {
+ return 0
+ })
+ f(func(x, y int) bool {
+ m := (x + y) / 2
+ return x < y
+ })
+}
+
+func _() {
+ _ = [][]int{
+ []int{1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int{
+ {1},
+ []int{1, 2},
+ []int{1, 2, 3},
+ }
+ _ = [][]int{
+ {1},
+ {1, 2},
+ {1, 2, 3},
+ }
+ _ = [][]int{{1}, {1, 2}, {1, 2, 3}}
+}
+
+// various multi-line expressions
+func _() {
+ // do not add extra indentation to multi-line string lists
+ _ = "foo" + "bar"
+ _ = "foo" +
+ "bar" +
+ "bah"
+ _ = []string{
+ "abc" +
+ "def",
+ "foo" +
+ "bar",
+ }
+}
+
+const _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+const _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+const _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+func _() {
+ _ = F1 +
+ `string = "%s";` +
+ `ptr = *;` +
+ `datafmt.T2 = s ["-" p "-"];`
+
+ _ =
+ `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+
+ _ = `datafmt "datafmt";` +
+ `default = "%v";` +
+ `array = *;` +
+ `datafmt.T3 = s {" " a a / ","};`
+}
+
+func _() {
+ // respect source lines in multi-line expressions
+ _ = a +
+ b +
+ c
+ _ = a < b ||
+ b < a
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+}
+
+// Alignment after overlong lines
+const (
+ _ = "991"
+ _ = "2432902008176640000" // 20!
+ _ = "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000" // 100!
+ _ = "170141183460469231731687303715884105727" // prime
+)
+
+// Correct placement of operators and comments in multi-line expressions
+func _() {
+ _ = a + // comment
+ b + // comment
+ c
+ _ = "a" +
+ "b" + // comment
+ "c"
+ _ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required"
+}
+
+// Correct placement of terminating comma/closing parentheses in multi-line calls.
+func _() {
+ f(1,
+ 2,
+ 3)
+ f(1,
+ 2,
+ 3,
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+ f(1,
+ 2,
+ 3) // comment
+ f(1,
+ 2,
+ 3, // comment
+ )
+}
+
+// Align comments in multi-line lists of single-line expressions.
+var txpix = [NCOL]draw.Color{
+ draw.Yellow, // yellow
+ draw.Cyan, // cyan
+ draw.Green, // lime green
+ draw.GreyBlue, // slate
+ draw.Red, /* red */
+ draw.GreyGreen, /* olive green */
+ draw.Blue, /* blue */
+ draw.Color(0xFF55AAFF), /* pink */
+ draw.Color(0xFFAAFFFF), /* lavender */
+ draw.Color(0xBB005DFF), /* maroon */
+}
+
+func same(t, u *Time) bool {
+ // respect source lines in multi-line expressions
+ return t.Year == u.Year &&
+ t.Month == u.Month &&
+ t.Day == u.Day &&
+ t.Hour == u.Hour &&
+ t.Minute == u.Minute &&
+ t.Second == u.Second &&
+ t.Weekday == u.Weekday &&
+ t.ZoneOffset == u.ZoneOffset &&
+ t.Zone == u.Zone
+}
+
+func (p *parser) charClass() {
+ // respect source lines in multi-line expressions
+ if cc.negate && len(cc.ranges) == 2 &&
+ cc.ranges[0] == '\n' && cc.ranges[1] == '\n' {
+ nl := new(_NotNl)
+ p.re.add(nl)
+ }
+}
+
+func addState(s []state, inst instr, match []int) {
+ // handle comments correctly in multi-line expressions
+ for i := 0; i < l; i++ {
+ if s[i].inst.index() == index && // same instruction
+ s[i].match[0] < pos { // earlier match already going; leftmost wins
+ return s
+ }
+ }
+}
+
+func (self *T) foo(x int) *T { return self }
+
+func _() { module.Func1().Func2() }
+
+func _() {
+ _ = new(T).
+ foo(1).
+ foo(2).
+ foo(3)
+
+ _ = new(T).
+ foo(1).
+ foo(2). // inline comments
+ foo(3)
+
+ _ = new(T).foo(1).foo(2).foo(3)
+
+ // handle multiline argument list correctly
+ _ = new(T).
+ foo(
+ 1).
+ foo(2)
+
+ _ = new(T).foo(
+ 1).foo(2)
+
+ _ = Array[3+
+ 4]
+
+ _ = Method(1, 2,
+ 3)
+
+ _ = new(T).
+ foo().
+ bar().(*Type)
+
+ _ = new(T).
+ foo().
+ bar().(*Type).
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()["idx"]
+
+ _ = new(T).
+ foo().
+ bar()["idx"].
+ baz()
+
+ _ = new(T).
+ foo().
+ bar()[1:2]
+
+ _ = new(T).
+ foo().
+ bar()[1:2].
+ baz()
+
+ _ = new(T).
+ Field.
+ Array[3+
+ 4].
+ Table["foo"].
+ Blob.(*Type).
+ Slices[1:4].
+ Method(1, 2,
+ 3).
+ Thingy
+
+ _ = a.b.c
+ _ = a.
+ b.
+ c
+ _ = a.b().c
+ _ = a.
+ b().
+ c
+ _ = a.b[0].c
+ _ = a.
+ b[0].
+ c
+ _ = a.b[0:].c
+ _ = a.
+ b[0:].
+ c
+ _ = a.b.(T).c
+ _ = a.
+ b.(T).
+ c
+}
+
+// Don't introduce extra newlines in strangely formatted expression lists.
+func f() {
+ // os.Open parameters should remain on two lines
+ if writer, err = os.Open(outfile, s.O_WRONLY|os.O_CREATE|
+ os.O_TRUNC, 0666); err != nil {
+ log.Fatal(err)
+ }
+}
+
+// Handle multi-line argument lists ending in ... correctly.
+// Was issue 3130.
+func _() {
+ _ = append(s, a...)
+ _ = append(
+ s, a...)
+ _ = append(s,
+ a...)
+ _ = append(
+ s,
+ a...)
+ _ = append(s, a...,
+ )
+ _ = append(s,
+ a...,
+ )
+ _ = append(
+ s,
+ a...,
+ )
+}
+
+// Literal function types in conversions must be parenthesized;
+// for now go/parser accepts the unparenthesized form where it
+// is non-ambiguous.
+func _() {
+ // these conversions should be rewritten to look
+ // the same as the parenthesized conversions below
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+
+ _ = (func())(nil)
+ _ = (func(x int) float)(nil)
+ _ = (func() func() func())(nil)
+}
+
+func _() {
+ _ = f().
+ f(func() {
+ f()
+ }).
+ f(map[int]int{
+ 1: 2,
+ 3: 4,
+ })
+
+ _ = f().
+ f(
+ func() {
+ f()
+ },
+ )
+}
diff --git a/src/go/printer/testdata/generics.golden b/src/go/printer/testdata/generics.golden
new file mode 100644
index 0000000..7ddf20b
--- /dev/null
+++ b/src/go/printer/testdata/generics.golden
@@ -0,0 +1,109 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package generics
+
+func _[A, B any](a A, b B) int {}
+func _[T any](x, y T) T
+
+type T[P any] struct{}
+type T[P1, P2, P3 any] struct{}
+
+type T[P C] struct{}
+type T[P1, P2, P3 C] struct{}
+
+type T[P C[P]] struct{}
+type T[P1, P2, P3 C[P1, P2, P3]] struct{}
+
+func f[P any](x P)
+func f[P1, P2, P3 any](x1 P1, x2 P2, x3 P3) struct{}
+
+func f[P interface{}](x P)
+func f[P1, P2, P3 interface {
+ m1(P1)
+ ~P2 | ~P3
+}](x1 P1, x2 P2, x3 P3) struct{}
+func f[P any](T1[P], T2[P]) T3[P]
+
+func (x T[P]) m()
+func (T[P]) m(x T[P]) P
+
+func _() {
+ type _ []T[P]
+ var _ []T[P]
+ _ = []T[P]{}
+}
+
+// type constraint literals with elided interfaces
+func _[P ~int, Q int | string]() {}
+func _[P struct{ f int }, Q *P]() {}
+
+// various potentially ambiguous type parameter lists (issue #49482)
+type _[P *T,] struct{}
+type _[P T | T] struct{}
+type _[P T | T | T | T] struct{}
+type _[P *T, _ any] struct{}
+type _[P *T,] struct{}
+type _[P *T, _ any] struct{}
+type _[P T] struct{}
+type _[P T, _ any] struct{}
+
+type _[P *struct{}] struct{}
+type _ [P(*struct{})]struct{}
+type _[P []int] struct{}
+
+// a type literal in an |-expression indicates a type parameter list (blank after type parameter list and type)
+type _[P *[]int] struct{}
+type _[P *T | T, Q T] struct{}
+type _[P *[]T | T] struct{}
+type _[P *T | T | T | T | ~T] struct{}
+type _[P *T | T | T | ~T | T] struct{}
+type _[P *T | T | struct{} | T] struct{}
+type _[P <-chan int] struct{}
+type _[P *T | struct{} | T] struct{}
+
+// a trailing comma always indicates a (possibly invalid) type parameter list (blank after type parameter list and type)
+type _[P *T,] struct{}
+type _[P *T | T,] struct{}
+type _[P *T | <-T | T,] struct{}
+
+// slice/array type declarations (no blank between array length and element type)
+type _ []byte
+type _ [n]byte
+type _ [P(T)]byte
+type _ [P((T))]byte
+type _ [P * *T]byte
+type _ [P * T]byte
+type _ [P(*T)]byte
+type _ [P(**T)]byte
+type _ [P*T - T]byte
+type _ [P*T - T]byte
+type _ [P*T | T]byte
+type _ [P*T | <-T | T]byte
+
+// equivalent test cases for potentially ambiguous type parameter lists, except
+// for function declarations there is no ambiguity (issue #51548)
+func _[P *T]() {}
+func _[P *T, _ any]() {}
+func _[P *T]() {}
+func _[P *T, _ any]() {}
+func _[P T]() {}
+func _[P T, _ any]() {}
+
+func _[P *struct{}]() {}
+func _[P *struct{}]() {}
+func _[P []int]() {}
+
+func _[P T]() {}
+func _[P T]() {}
+func _[P **T]() {}
+func _[P *T]() {}
+func _[P *T]() {}
+func _[P **T]() {}
+func _[P *T]() {}
+
+func _[
+ P *T,
+]() {
+}
diff --git a/src/go/printer/testdata/generics.input b/src/go/printer/testdata/generics.input
new file mode 100644
index 0000000..4940f93
--- /dev/null
+++ b/src/go/printer/testdata/generics.input
@@ -0,0 +1,105 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package generics
+
+func _[A, B any](a A, b B) int {}
+func _[T any](x, y T) T
+
+type T[P any] struct{}
+type T[P1, P2, P3 any] struct{}
+
+type T[P C] struct{}
+type T[P1, P2, P3 C] struct{}
+
+type T[P C[P]] struct{}
+type T[P1, P2, P3 C[P1, P2, P3]] struct{}
+
+func f[P any](x P)
+func f[P1, P2, P3 any](x1 P1, x2 P2, x3 P3) struct{}
+
+func f[P interface{}](x P)
+func f[P1, P2, P3 interface{ m1(P1); ~P2|~P3 }](x1 P1, x2 P2, x3 P3) struct{}
+func f[P any](T1[P], T2[P]) T3[P]
+
+func (x T[P]) m()
+func ((T[P])) m(x T[P]) P
+
+func _() {
+ type _ []T[P]
+ var _ []T[P]
+ _ = []T[P]{}
+}
+
+// type constraint literals with elided interfaces
+func _[P ~int, Q int | string]() {}
+func _[P struct{f int}, Q *P]() {}
+
+// various potentially ambiguous type parameter lists (issue #49482)
+type _[P *T,] struct{}
+type _[P T | T] struct{}
+type _[P T | T | T | T] struct{}
+type _[P *T, _ any] struct{}
+type _[P (*T),] struct{}
+type _[P (*T), _ any] struct{}
+type _[P (T),] struct{}
+type _[P (T), _ any] struct{}
+
+type _[P *struct{}] struct{}
+type _[P (*struct{})] struct{}
+type _[P ([]int)] struct{}
+
+// a type literal in an |-expression indicates a type parameter list (blank after type parameter list and type)
+type _[P *[]int] struct{}
+type _[P *T | T, Q T] struct{}
+type _[P *[]T | T] struct{}
+type _[P *T | T | T | T | ~T] struct{}
+type _[P *T | T | T | ~T | T] struct{}
+type _[P *T | T | struct{} | T] struct{}
+type _[P <-chan int] struct{}
+type _[P *T | struct{} | T] struct{}
+
+// a trailing comma always indicates a (possibly invalid) type parameter list (blank after type parameter list and type)
+type _[P *T,] struct{}
+type _[P *T | T,] struct{}
+type _[P *T | <-T | T,] struct{}
+
+// slice/array type declarations (no blank between array length and element type)
+type _ []byte
+type _ [n]byte
+type _ [P(T)]byte
+type _ [P((T))]byte
+type _ [P * *T]byte
+type _ [P * T]byte
+type _ [P(*T)]byte
+type _ [P(**T)]byte
+type _ [P * T - T]byte
+type _ [P * T - T]byte
+type _ [P * T | T]byte
+type _ [P * T | <-T | T]byte
+
+// equivalent test cases for potentially ambiguous type parameter lists, except
+// for function declarations there is no ambiguity (issue #51548)
+func _[P *T,]() {}
+func _[P *T, _ any]() {}
+func _[P (*T),]() {}
+func _[P (*T), _ any]() {}
+func _[P (T),]() {}
+func _[P (T), _ any]() {}
+
+func _[P *struct{}] () {}
+func _[P (*struct{})] () {}
+func _[P ([]int)] () {}
+
+func _ [P(T)]() {}
+func _ [P((T))]() {}
+func _ [P * *T]() {}
+func _ [P * T]() {}
+func _ [P(*T)]() {}
+func _ [P(**T)]() {}
+func _ [P * T]() {}
+
+func _[
+ P *T,
+]() {}
diff --git a/src/go/printer/testdata/go2numbers.golden b/src/go/printer/testdata/go2numbers.golden
new file mode 100644
index 0000000..3c12049
--- /dev/null
+++ b/src/go/printer/testdata/go2numbers.golden
@@ -0,0 +1,186 @@
+package p
+
+const (
+ // 0-octals
+ _ = 0
+ _ = 0123
+ _ = 0123456
+
+ _ = 0_123
+ _ = 0123_456
+
+ // decimals
+ _ = 1
+ _ = 1234
+ _ = 1234567
+
+ _ = 1_234
+ _ = 1_234_567
+
+ // hexadecimals
+ _ = 0x0
+ _ = 0x1234
+ _ = 0xcafef00d
+
+ _ = 0X0
+ _ = 0X1234
+ _ = 0XCAFEf00d
+
+ _ = 0X_0
+ _ = 0X_1234
+ _ = 0X_CAFE_f00d
+
+ // octals
+ _ = 0o0
+ _ = 0o1234
+ _ = 0o01234567
+
+ _ = 0O0
+ _ = 0O1234
+ _ = 0O01234567
+
+ _ = 0o_0
+ _ = 0o_1234
+ _ = 0o0123_4567
+
+ _ = 0O_0
+ _ = 0O_1234
+ _ = 0O0123_4567
+
+ // binaries
+ _ = 0b0
+ _ = 0b1011
+ _ = 0b00101101
+
+ _ = 0B0
+ _ = 0B1011
+ _ = 0B00101101
+
+ _ = 0b_0
+ _ = 0b10_11
+ _ = 0b_0010_1101
+
+ // decimal floats
+ _ = 0.
+ _ = 123.
+ _ = 0123.
+
+ _ = .0
+ _ = .123
+ _ = .0123
+
+ _ = 0e0
+ _ = 123e+0
+ _ = 0123E-1
+
+ _ = 0e-0
+ _ = 123E+0
+ _ = 0123E123
+
+ _ = 0.e+1
+ _ = 123.E-10
+ _ = 0123.e123
+
+ _ = .0e-1
+ _ = .123E+10
+ _ = .0123E123
+
+ _ = 0.0
+ _ = 123.123
+ _ = 0123.0123
+
+ _ = 0.0e1
+ _ = 123.123E-10
+ _ = 0123.0123e+456
+
+ _ = 1_2_3.
+ _ = 0_123.
+
+ _ = 0_0e0
+ _ = 1_2_3e0
+ _ = 0_123e0
+
+ _ = 0e-0_0
+ _ = 1_2_3E+0
+ _ = 0123E1_2_3
+
+ _ = 0.e+1
+ _ = 123.E-1_0
+ _ = 01_23.e123
+
+ _ = .0e-1
+ _ = .123E+10
+ _ = .0123E123
+
+ _ = 1_2_3.123
+ _ = 0123.01_23
+
+ // hexadecimal floats
+ _ = 0x0.p+0
+ _ = 0Xdeadcafe.p-10
+ _ = 0x1234.P123
+
+ _ = 0x.1p-0
+ _ = 0X.deadcafep2
+ _ = 0x.1234P+10
+
+ _ = 0x0p0
+ _ = 0Xdeadcafep+1
+ _ = 0x1234P-10
+
+ _ = 0x0.0p0
+ _ = 0Xdead.cafep+1
+ _ = 0x12.34P-10
+
+ _ = 0Xdead_cafep+1
+ _ = 0x_1234P-10
+
+ _ = 0X_dead_cafe.p-10
+ _ = 0x12_34.P1_2_3
+ _ = 0X1_2_3_4.P-1_2_3
+
+ // imaginaries
+ _ = 0i
+ _ = 00i
+ _ = 08i
+ _ = 0000000000i
+ _ = 0123i
+ _ = 0000000123i
+ _ = 0000056789i
+ _ = 1234i
+ _ = 1234567i
+
+ _ = 0i
+ _ = 0_0i
+ _ = 0_8i
+ _ = 0_000_000_000i
+ _ = 0_123i
+ _ = 0_000_000_123i
+ _ = 0_000_056_789i
+ _ = 1_234i
+ _ = 1_234_567i
+
+ _ = 0.i
+ _ = 123.i
+ _ = 0123.i
+ _ = 000123.i
+
+ _ = 0e0i
+ _ = 123e0i
+ _ = 0123E0i
+ _ = 000123E0i
+
+ _ = 0.e+1i
+ _ = 123.E-1_0i
+ _ = 01_23.e123i
+ _ = 00_01_23.e123i
+
+ _ = 0b1010i
+ _ = 0B1010i
+ _ = 0o660i
+ _ = 0O660i
+ _ = 0xabcDEFi
+ _ = 0XabcDEFi
+ _ = 0xabcDEFP0i
+ _ = 0XabcDEFp0i
+)
diff --git a/src/go/printer/testdata/go2numbers.input b/src/go/printer/testdata/go2numbers.input
new file mode 100644
index 0000000..f3e7828
--- /dev/null
+++ b/src/go/printer/testdata/go2numbers.input
@@ -0,0 +1,186 @@
+package p
+
+const (
+ // 0-octals
+ _ = 0
+ _ = 0123
+ _ = 0123456
+
+ _ = 0_123
+ _ = 0123_456
+
+ // decimals
+ _ = 1
+ _ = 1234
+ _ = 1234567
+
+ _ = 1_234
+ _ = 1_234_567
+
+ // hexadecimals
+ _ = 0x0
+ _ = 0x1234
+ _ = 0xcafef00d
+
+ _ = 0X0
+ _ = 0X1234
+ _ = 0XCAFEf00d
+
+ _ = 0X_0
+ _ = 0X_1234
+ _ = 0X_CAFE_f00d
+
+ // octals
+ _ = 0o0
+ _ = 0o1234
+ _ = 0o01234567
+
+ _ = 0O0
+ _ = 0O1234
+ _ = 0O01234567
+
+ _ = 0o_0
+ _ = 0o_1234
+ _ = 0o0123_4567
+
+ _ = 0O_0
+ _ = 0O_1234
+ _ = 0O0123_4567
+
+ // binaries
+ _ = 0b0
+ _ = 0b1011
+ _ = 0b00101101
+
+ _ = 0B0
+ _ = 0B1011
+ _ = 0B00101101
+
+ _ = 0b_0
+ _ = 0b10_11
+ _ = 0b_0010_1101
+
+ // decimal floats
+ _ = 0.
+ _ = 123.
+ _ = 0123.
+
+ _ = .0
+ _ = .123
+ _ = .0123
+
+ _ = 0e0
+ _ = 123e+0
+ _ = 0123E-1
+
+ _ = 0e-0
+ _ = 123E+0
+ _ = 0123E123
+
+ _ = 0.e+1
+ _ = 123.E-10
+ _ = 0123.e123
+
+ _ = .0e-1
+ _ = .123E+10
+ _ = .0123E123
+
+ _ = 0.0
+ _ = 123.123
+ _ = 0123.0123
+
+ _ = 0.0e1
+ _ = 123.123E-10
+ _ = 0123.0123e+456
+
+ _ = 1_2_3.
+ _ = 0_123.
+
+ _ = 0_0e0
+ _ = 1_2_3e0
+ _ = 0_123e0
+
+ _ = 0e-0_0
+ _ = 1_2_3E+0
+ _ = 0123E1_2_3
+
+ _ = 0.e+1
+ _ = 123.E-1_0
+ _ = 01_23.e123
+
+ _ = .0e-1
+ _ = .123E+10
+ _ = .0123E123
+
+ _ = 1_2_3.123
+ _ = 0123.01_23
+
+ // hexadecimal floats
+ _ = 0x0.p+0
+ _ = 0Xdeadcafe.p-10
+ _ = 0x1234.P123
+
+ _ = 0x.1p-0
+ _ = 0X.deadcafep2
+ _ = 0x.1234P+10
+
+ _ = 0x0p0
+ _ = 0Xdeadcafep+1
+ _ = 0x1234P-10
+
+ _ = 0x0.0p0
+ _ = 0Xdead.cafep+1
+ _ = 0x12.34P-10
+
+ _ = 0Xdead_cafep+1
+ _ = 0x_1234P-10
+
+ _ = 0X_dead_cafe.p-10
+ _ = 0x12_34.P1_2_3
+ _ = 0X1_2_3_4.P-1_2_3
+
+ // imaginaries
+ _ = 0i
+ _ = 00i
+ _ = 08i
+ _ = 0000000000i
+ _ = 0123i
+ _ = 0000000123i
+ _ = 0000056789i
+ _ = 1234i
+ _ = 1234567i
+
+ _ = 0i
+ _ = 0_0i
+ _ = 0_8i
+ _ = 0_000_000_000i
+ _ = 0_123i
+ _ = 0_000_000_123i
+ _ = 0_000_056_789i
+ _ = 1_234i
+ _ = 1_234_567i
+
+ _ = 0.i
+ _ = 123.i
+ _ = 0123.i
+ _ = 000123.i
+
+ _ = 0e0i
+ _ = 123e0i
+ _ = 0123E0i
+ _ = 000123E0i
+
+ _ = 0.e+1i
+ _ = 123.E-1_0i
+ _ = 01_23.e123i
+ _ = 00_01_23.e123i
+
+ _ = 0b1010i
+ _ = 0B1010i
+ _ = 0o660i
+ _ = 0O660i
+ _ = 0xabcDEFi
+ _ = 0XabcDEFi
+ _ = 0xabcDEFP0i
+ _ = 0XabcDEFp0i
+)
diff --git a/src/go/printer/testdata/go2numbers.norm b/src/go/printer/testdata/go2numbers.norm
new file mode 100644
index 0000000..855f0fc
--- /dev/null
+++ b/src/go/printer/testdata/go2numbers.norm
@@ -0,0 +1,186 @@
+package p
+
+const (
+ // 0-octals
+ _ = 0
+ _ = 0123
+ _ = 0123456
+
+ _ = 0_123
+ _ = 0123_456
+
+ // decimals
+ _ = 1
+ _ = 1234
+ _ = 1234567
+
+ _ = 1_234
+ _ = 1_234_567
+
+ // hexadecimals
+ _ = 0x0
+ _ = 0x1234
+ _ = 0xcafef00d
+
+ _ = 0x0
+ _ = 0x1234
+ _ = 0xCAFEf00d
+
+ _ = 0x_0
+ _ = 0x_1234
+ _ = 0x_CAFE_f00d
+
+ // octals
+ _ = 0o0
+ _ = 0o1234
+ _ = 0o01234567
+
+ _ = 0o0
+ _ = 0o1234
+ _ = 0o01234567
+
+ _ = 0o_0
+ _ = 0o_1234
+ _ = 0o0123_4567
+
+ _ = 0o_0
+ _ = 0o_1234
+ _ = 0o0123_4567
+
+ // binaries
+ _ = 0b0
+ _ = 0b1011
+ _ = 0b00101101
+
+ _ = 0b0
+ _ = 0b1011
+ _ = 0b00101101
+
+ _ = 0b_0
+ _ = 0b10_11
+ _ = 0b_0010_1101
+
+ // decimal floats
+ _ = 0.
+ _ = 123.
+ _ = 0123.
+
+ _ = .0
+ _ = .123
+ _ = .0123
+
+ _ = 0e0
+ _ = 123e+0
+ _ = 0123e-1
+
+ _ = 0e-0
+ _ = 123e+0
+ _ = 0123e123
+
+ _ = 0.e+1
+ _ = 123.e-10
+ _ = 0123.e123
+
+ _ = .0e-1
+ _ = .123e+10
+ _ = .0123e123
+
+ _ = 0.0
+ _ = 123.123
+ _ = 0123.0123
+
+ _ = 0.0e1
+ _ = 123.123e-10
+ _ = 0123.0123e+456
+
+ _ = 1_2_3.
+ _ = 0_123.
+
+ _ = 0_0e0
+ _ = 1_2_3e0
+ _ = 0_123e0
+
+ _ = 0e-0_0
+ _ = 1_2_3e+0
+ _ = 0123e1_2_3
+
+ _ = 0.e+1
+ _ = 123.e-1_0
+ _ = 01_23.e123
+
+ _ = .0e-1
+ _ = .123e+10
+ _ = .0123e123
+
+ _ = 1_2_3.123
+ _ = 0123.01_23
+
+ // hexadecimal floats
+ _ = 0x0.p+0
+ _ = 0xdeadcafe.p-10
+ _ = 0x1234.p123
+
+ _ = 0x.1p-0
+ _ = 0x.deadcafep2
+ _ = 0x.1234p+10
+
+ _ = 0x0p0
+ _ = 0xdeadcafep+1
+ _ = 0x1234p-10
+
+ _ = 0x0.0p0
+ _ = 0xdead.cafep+1
+ _ = 0x12.34p-10
+
+ _ = 0xdead_cafep+1
+ _ = 0x_1234p-10
+
+ _ = 0x_dead_cafe.p-10
+ _ = 0x12_34.p1_2_3
+ _ = 0x1_2_3_4.p-1_2_3
+
+ // imaginaries
+ _ = 0i
+ _ = 0i
+ _ = 8i
+ _ = 0i
+ _ = 123i
+ _ = 123i
+ _ = 56789i
+ _ = 1234i
+ _ = 1234567i
+
+ _ = 0i
+ _ = 0i
+ _ = 8i
+ _ = 0i
+ _ = 123i
+ _ = 123i
+ _ = 56_789i
+ _ = 1_234i
+ _ = 1_234_567i
+
+ _ = 0.i
+ _ = 123.i
+ _ = 0123.i
+ _ = 000123.i
+
+ _ = 0e0i
+ _ = 123e0i
+ _ = 0123e0i
+ _ = 000123e0i
+
+ _ = 0.e+1i
+ _ = 123.e-1_0i
+ _ = 01_23.e123i
+ _ = 00_01_23.e123i
+
+ _ = 0b1010i
+ _ = 0b1010i
+ _ = 0o660i
+ _ = 0o660i
+ _ = 0xabcDEFi
+ _ = 0xabcDEFi
+ _ = 0xabcDEFp0i
+ _ = 0xabcDEFp0i
+)
diff --git a/src/go/printer/testdata/gobuild1.golden b/src/go/printer/testdata/gobuild1.golden
new file mode 100644
index 0000000..649da40
--- /dev/null
+++ b/src/go/printer/testdata/gobuild1.golden
@@ -0,0 +1,6 @@
+//go:build x
+// +build x
+
+package p
+
+func f()
diff --git a/src/go/printer/testdata/gobuild1.input b/src/go/printer/testdata/gobuild1.input
new file mode 100644
index 0000000..6538ee6
--- /dev/null
+++ b/src/go/printer/testdata/gobuild1.input
@@ -0,0 +1,7 @@
+package p
+
+//go:build x
+
+func f()
+
+// +build y
diff --git a/src/go/printer/testdata/gobuild2.golden b/src/go/printer/testdata/gobuild2.golden
new file mode 100644
index 0000000..c46fd34
--- /dev/null
+++ b/src/go/printer/testdata/gobuild2.golden
@@ -0,0 +1,8 @@
+//go:build x
+// +build x
+
+// other comment
+
+package p
+
+func f()
diff --git a/src/go/printer/testdata/gobuild2.input b/src/go/printer/testdata/gobuild2.input
new file mode 100644
index 0000000..f0f772a
--- /dev/null
+++ b/src/go/printer/testdata/gobuild2.input
@@ -0,0 +1,9 @@
+// +build y
+
+// other comment
+
+package p
+
+func f()
+
+//go:build x
diff --git a/src/go/printer/testdata/gobuild3.golden b/src/go/printer/testdata/gobuild3.golden
new file mode 100644
index 0000000..db92c57
--- /dev/null
+++ b/src/go/printer/testdata/gobuild3.golden
@@ -0,0 +1,10 @@
+// other comment
+
+//go:build x
+// +build x
+
+// yet another comment
+
+package p
+
+func f()
diff --git a/src/go/printer/testdata/gobuild3.input b/src/go/printer/testdata/gobuild3.input
new file mode 100644
index 0000000..d0c97b2
--- /dev/null
+++ b/src/go/printer/testdata/gobuild3.input
@@ -0,0 +1,11 @@
+// other comment
+
+// +build y
+
+// yet another comment
+
+package p
+
+//go:build x
+
+func f()
diff --git a/src/go/printer/testdata/gobuild4.golden b/src/go/printer/testdata/gobuild4.golden
new file mode 100644
index 0000000..b16477f
--- /dev/null
+++ b/src/go/printer/testdata/gobuild4.golden
@@ -0,0 +1,6 @@
+//go:build (x || y) && z
+// +build x y
+// +build z
+
+// doc comment
+package p
diff --git a/src/go/printer/testdata/gobuild4.input b/src/go/printer/testdata/gobuild4.input
new file mode 100644
index 0000000..29d5a0a
--- /dev/null
+++ b/src/go/printer/testdata/gobuild4.input
@@ -0,0 +1,5 @@
+// doc comment
+package p
+
+// +build x y
+// +build z
diff --git a/src/go/printer/testdata/gobuild5.golden b/src/go/printer/testdata/gobuild5.golden
new file mode 100644
index 0000000..2808a53
--- /dev/null
+++ b/src/go/printer/testdata/gobuild5.golden
@@ -0,0 +1,4 @@
+//go:build !(x || y) && z
+// +build !x,!y,z
+
+package p
diff --git a/src/go/printer/testdata/gobuild5.input b/src/go/printer/testdata/gobuild5.input
new file mode 100644
index 0000000..ec5815c
--- /dev/null
+++ b/src/go/printer/testdata/gobuild5.input
@@ -0,0 +1,4 @@
+//go:build !(x || y) && z
+// +build something else
+
+package p
diff --git a/src/go/printer/testdata/gobuild6.golden b/src/go/printer/testdata/gobuild6.golden
new file mode 100644
index 0000000..abb1e2a
--- /dev/null
+++ b/src/go/printer/testdata/gobuild6.golden
@@ -0,0 +1,5 @@
+//go:build !(x || y) && z
+
+// no +build line
+
+package p
diff --git a/src/go/printer/testdata/gobuild6.input b/src/go/printer/testdata/gobuild6.input
new file mode 100644
index 0000000..1621897
--- /dev/null
+++ b/src/go/printer/testdata/gobuild6.input
@@ -0,0 +1,4 @@
+//go:build !(x || y) && z
+// no +build line
+
+package p
diff --git a/src/go/printer/testdata/gobuild7.golden b/src/go/printer/testdata/gobuild7.golden
new file mode 100644
index 0000000..bf41dd4
--- /dev/null
+++ b/src/go/printer/testdata/gobuild7.golden
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(rsc): Delete this file once Go 1.17 comes out and we can retire Go 1.15 support.
+
+//go:build !go1.16
+// +build !go1.16
+
+// Package buildtag defines an Analyzer that checks build tags.
+package buildtag
diff --git a/src/go/printer/testdata/gobuild7.input b/src/go/printer/testdata/gobuild7.input
new file mode 100644
index 0000000..bf41dd4
--- /dev/null
+++ b/src/go/printer/testdata/gobuild7.input
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO(rsc): Delete this file once Go 1.17 comes out and we can retire Go 1.15 support.
+
+//go:build !go1.16
+// +build !go1.16
+
+// Package buildtag defines an Analyzer that checks build tags.
+package buildtag
diff --git a/src/go/printer/testdata/linebreaks.golden b/src/go/printer/testdata/linebreaks.golden
new file mode 100644
index 0000000..17d2b5c
--- /dev/null
+++ b/src/go/printer/testdata/linebreaks.golden
@@ -0,0 +1,295 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linebreaks
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+type writerTestEntry struct {
+ header *Header
+ contents string
+}
+
+type writerTest struct {
+ file string // filename of expected output
+ entries []*writerTestEntry
+}
+
+var writerTests = []*writerTest{
+ &writerTest{
+ file: "testdata/writer.tar",
+ entries: []*writerTestEntry{
+ &writerTestEntry{
+ header: &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1246508266,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Kilts",
+ },
+ &writerTestEntry{
+ header: &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1245217492,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Google.com\n",
+ },
+ },
+ },
+ // The truncated test file was produced using these commands:
+ // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
+ // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
+ &writerTest{
+ file: "testdata/writer-big.tar",
+ entries: []*writerTestEntry{
+ &writerTestEntry{
+ header: &Header{
+ Name: "tmp/16gig.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 16 << 30,
+ Mtime: 1254699560,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ // no contents
+ },
+ },
+ },
+}
+
+type untarTest struct {
+ file string
+ headers []*Header
+}
+
+var untarTests = []*untarTest{
+ &untarTest{
+ file: "testdata/gnu.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244428340,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244436044,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ },
+ },
+ &untarTest{
+ file: "testdata/star.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244592783,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ Atime: 1244592783,
+ Ctime: 1244592783,
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244592783,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ Atime: 1244592783,
+ Ctime: 1244592783,
+ },
+ },
+ },
+ &untarTest{
+ file: "testdata/v7.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244593104,
+ Typeflag: '\x00',
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244593104,
+ Typeflag: '\x00',
+ },
+ },
+ },
+}
+
+var facts = map[int]string{
+ 0: "1",
+ 1: "1",
+ 2: "2",
+ 10: "3628800",
+ 20: "2432902008176640000",
+ 100: "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000",
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr,
+ // TODO(gri): the 2nd string of this string list should not be indented
+ "usage: godoc package [name ...]\n"+
+ " godoc -http=:6060\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+func TestReader(t *testing.T) {
+testLoop:
+ for i, test := range untarTests {
+ f, err := os.Open(test.file, os.O_RDONLY, 0444)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+ tr := NewReader(f)
+ for j, header := range test.headers {
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
+ f.Close()
+ continue testLoop
+ }
+ if !reflect.DeepEqual(hdr, header) {
+ t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
+ i, j, *hdr, *header)
+ }
+ }
+ hdr, err := tr.Next()
+ if hdr != nil || err != nil {
+ t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err)
+ }
+ f.Close()
+ }
+}
+
+// Respect line breaks in function calls.
+func _() {
+ f(x)
+ f(x,
+ x)
+ f(x,
+ x,
+ )
+ f(
+ x,
+ x)
+ f(
+ x,
+ x,
+ )
+}
+
+// Respect line breaks in function declarations.
+func _(x T) {}
+func _(x T,
+ y T) {
+}
+func _(x T,
+ y T,
+) {
+}
+func _(
+ x T,
+ y T) {
+}
+func _(
+ x T,
+ y T,
+) {
+}
+
+// Example from issue #2597.
+func ManageStatus0(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int) {
+}
+
+func ManageStatus1(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int,
+) {
+}
+
+// Example from issue #9064.
+func (y *y) xerrors() error {
+ _ = "xerror.test" //TODO-
+ _ = []byte(`
+foo bar foo bar foo bar
+`) //TODO-
+}
+
+func _() {
+ _ = "abc" // foo
+ _ = `abc_0123456789_` // foo
+}
+
+func _() {
+ _ = "abc" // foo
+ _ = `abc
+0123456789
+` // foo
+}
+
+// There should be exactly one linebreak after this comment.
diff --git a/src/go/printer/testdata/linebreaks.input b/src/go/printer/testdata/linebreaks.input
new file mode 100644
index 0000000..9e714f3
--- /dev/null
+++ b/src/go/printer/testdata/linebreaks.input
@@ -0,0 +1,291 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linebreaks
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+type writerTestEntry struct {
+ header *Header
+ contents string
+}
+
+type writerTest struct {
+ file string // filename of expected output
+ entries []*writerTestEntry
+}
+
+var writerTests = []*writerTest{
+ &writerTest{
+ file: "testdata/writer.tar",
+ entries: []*writerTestEntry{
+ &writerTestEntry{
+ header: &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1246508266,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Kilts",
+ },
+ &writerTestEntry{
+ header: &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1245217492,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ contents: "Google.com\n",
+ },
+ },
+ },
+ // The truncated test file was produced using these commands:
+ // dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
+ // tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
+ &writerTest{
+ file: "testdata/writer-big.tar",
+ entries: []*writerTestEntry{
+ &writerTestEntry{
+ header: &Header{
+ Name: "tmp/16gig.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 16 << 30,
+ Mtime: 1254699560,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ // no contents
+ },
+ },
+ },
+}
+
+type untarTest struct {
+ file string
+ headers []*Header
+}
+
+var untarTests = []*untarTest{
+ &untarTest{
+ file: "testdata/gnu.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244428340,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244436044,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ },
+ },
+ },
+ &untarTest{
+ file: "testdata/star.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244592783,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ Atime: 1244592783,
+ Ctime: 1244592783,
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0640,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244592783,
+ Typeflag: '0',
+ Uname: "dsymonds",
+ Gname: "eng",
+ Atime: 1244592783,
+ Ctime: 1244592783,
+ },
+ },
+ },
+ &untarTest{
+ file: "testdata/v7.tar",
+ headers: []*Header{
+ &Header{
+ Name: "small.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 5,
+ Mtime: 1244593104,
+ Typeflag: '\x00',
+ },
+ &Header{
+ Name: "small2.txt",
+ Mode: 0444,
+ Uid: 73025,
+ Gid: 5000,
+ Size: 11,
+ Mtime: 1244593104,
+ Typeflag: '\x00',
+ },
+ },
+ },
+}
+
+var facts = map[int] string {
+ 0: "1",
+ 1: "1",
+ 2: "2",
+ 10: "3628800",
+ 20: "2432902008176640000",
+ 100: "933262154439441526816992388562667004907159682643816214685929" +
+ "638952175999932299156089414639761565182862536979208272237582" +
+ "51185210916864000000000000000000000000",
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr,
+ // TODO(gri): the 2nd string of this string list should not be indented
+ "usage: godoc package [name ...]\n" +
+ " godoc -http=:6060\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+func TestReader(t *testing.T) {
+testLoop:
+ for i, test := range untarTests {
+ f, err := os.Open(test.file, os.O_RDONLY, 0444)
+ if err != nil {
+ t.Errorf("test %d: Unexpected error: %v", i, err)
+ continue
+ }
+ tr := NewReader(f)
+ for j, header := range test.headers {
+ hdr, err := tr.Next()
+ if err != nil || hdr == nil {
+ t.Errorf("test %d, entry %d: Didn't get entry: %v", i, j, err)
+ f.Close()
+ continue testLoop
+ }
+ if !reflect.DeepEqual(hdr, header) {
+ t.Errorf("test %d, entry %d: Incorrect header:\nhave %+v\nwant %+v",
+ i, j, *hdr, *header)
+ }
+ }
+ hdr, err := tr.Next()
+ if hdr != nil || err != nil {
+ t.Errorf("test %d: Unexpected entry or error: hdr=%v err=%v", i, err)
+ }
+ f.Close()
+ }
+}
+
+// Respect line breaks in function calls.
+func _() {
+ f(x)
+ f(x,
+ x)
+ f(x,
+ x,
+ )
+ f(
+ x,
+ x)
+ f(
+ x,
+ x,
+ )
+}
+
+// Respect line breaks in function declarations.
+func _(x T) {}
+func _(x T,
+ y T) {}
+func _(x T,
+ y T,
+) {}
+func _(
+ x T,
+ y T) {}
+func _(
+ x T,
+ y T,
+) {}
+
+// Example from issue #2597.
+func ManageStatus0(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int) {
+}
+
+func ManageStatus1(
+ in <-chan *Status,
+ req <-chan Request,
+ stat chan<- *TargetInfo,
+ TargetHistorySize int,
+) {
+}
+
+// Example from issue #9064.
+func (y *y) xerrors() error {
+ _ = "xerror.test" //TODO-
+ _ = []byte(`
+foo bar foo bar foo bar
+`) //TODO-
+}
+
+func _() {
+ _ = "abc" // foo
+ _ = `abc_0123456789_` // foo
+}
+
+func _() {
+ _ = "abc" // foo
+ _ = `abc
+0123456789
+` // foo
+}
+
+// There should be exactly one linebreak after this comment.
diff --git a/src/go/printer/testdata/parser.go b/src/go/printer/testdata/parser.go
new file mode 100644
index 0000000..bb06c8d
--- /dev/null
+++ b/src/go/printer/testdata/parser.go
@@ -0,0 +1,2148 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package parser implements a parser for Go source files. Input may be
+// provided in a variety of forms (see the various Parse* functions); the
+// output is an abstract syntax tree (AST) representing the Go source. The
+// parser is invoked through one of the Parse* functions.
+
+package parser
+
+import (
+ "fmt"
+ "go/ast"
+ "go/scanner"
+ "go/token"
+)
+
+// The mode parameter to the Parse* functions is a set of flags (or 0).
+// They control the amount of source code parsed and other optional
+// parser functionality.
+const (
+ PackageClauseOnly uint = 1 << iota // parsing stops after package clause
+ ImportsOnly // parsing stops after import declarations
+ ParseComments // parse comments and add them to AST
+ Trace // print a trace of parsed productions
+ DeclarationErrors // report declaration errors
+)
+
+// The parser structure holds the parser's internal state.
+type parser struct {
+ file *token.File
+ scanner.ErrorVector
+ scanner scanner.Scanner
+
+ // Tracing/debugging
+ mode uint // parsing mode
+ trace bool // == (mode & Trace != 0)
+ indent uint // indentation used for tracing output
+
+ // Comments
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ // Next token
+ pos token.Pos // token position
+ tok token.Token // one token look-ahead
+ lit string // token literal
+
+ // Non-syntactic parser control
+ exprLev int // < 0: in control clause, >= 0: in expression
+
+ // Ordinary identifier scopes
+ pkgScope *ast.Scope // pkgScope.Outer == nil
+ topScope *ast.Scope // top-most scope; may be pkgScope
+ unresolved []*ast.Ident // unresolved identifiers
+ imports []*ast.ImportSpec // list of imports
+
+ // Label scope
+ // (maintained by open/close LabelScope)
+ labelScope *ast.Scope // label scope for current function
+ targetStack [][]*ast.Ident // stack of unresolved labels
+}
+
+// scannerMode returns the scanner mode bits given the parser's mode bits.
+func scannerMode(mode uint) uint {
+ var m uint = scanner.InsertSemis
+ if mode&ParseComments != 0 {
+ m |= scanner.ScanComments
+ }
+ return m
+}
+
+func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
+ p.file = fset.AddFile(filename, fset.Base(), len(src))
+ p.scanner.Init(p.file, src, p, scannerMode(mode))
+
+ p.mode = mode
+ p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
+
+ p.next()
+
+ // set up the pkgScope here (as opposed to in parseFile) because
+ // there are other parser entry points (ParseExpr, etc.)
+ p.openScope()
+ p.pkgScope = p.topScope
+
+ // for the same reason, set up a label scope
+ p.openLabelScope()
+}
+
+// ----------------------------------------------------------------------------
+// Scoping support
+
+func (p *parser) openScope() {
+ p.topScope = ast.NewScope(p.topScope)
+}
+
+func (p *parser) closeScope() {
+ p.topScope = p.topScope.Outer
+}
+
+func (p *parser) openLabelScope() {
+ p.labelScope = ast.NewScope(p.labelScope)
+ p.targetStack = append(p.targetStack, nil)
+}
+
+func (p *parser) closeLabelScope() {
+ // resolve labels
+ n := len(p.targetStack) - 1
+ scope := p.labelScope
+ for _, ident := range p.targetStack[n] {
+ ident.Obj = scope.Lookup(ident.Name)
+ if ident.Obj == nil && p.mode&DeclarationErrors != 0 {
+ p.error(ident.Pos(), fmt.Sprintf("label %s undefined", ident.Name))
+ }
+ }
+ // pop label scope
+ p.targetStack = p.targetStack[0:n]
+ p.labelScope = p.labelScope.Outer
+}
+
+func (p *parser) declare(decl any, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
+ for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name != "_" {
+ obj := ast.NewObj(kind, ident.Name)
+ // remember the corresponding declaration for redeclaration
+ // errors and global variable resolution/typechecking phase
+ obj.Decl = decl
+ if alt := scope.Insert(obj); alt != nil && p.mode&DeclarationErrors != 0 {
+ prevDecl := ""
+ if pos := alt.Pos(); pos.IsValid() {
+ prevDecl = fmt.Sprintf("\n\tprevious declaration at %s", p.file.Position(pos))
+ }
+ p.error(ident.Pos(), fmt.Sprintf("%s redeclared in this block%s", ident.Name, prevDecl))
+ }
+ ident.Obj = obj
+ }
+ }
+}
+
+func (p *parser) shortVarDecl(idents []*ast.Ident) {
+ // Go spec: A short variable declaration may redeclare variables
+ // provided they were originally declared in the same block with
+ // the same type, and at least one of the non-blank variables is new.
+ n := 0 // number of new variables
+ for _, ident := range idents {
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name != "_" {
+ obj := ast.NewObj(ast.Var, ident.Name)
+ // short var declarations cannot have redeclaration errors
+ // and are not global => no need to remember the respective
+ // declaration
+ alt := p.topScope.Insert(obj)
+ if alt == nil {
+ n++ // new declaration
+ alt = obj
+ }
+ ident.Obj = alt
+ }
+ }
+ if n == 0 && p.mode&DeclarationErrors != 0 {
+ p.error(idents[0].Pos(), "no new variables on left side of :=")
+ }
+}
+
+// The unresolved object is a sentinel to mark identifiers that have been added
+// to the list of unresolved identifiers. The sentinel is only used for verifying
+// internal consistency.
+var unresolved = new(ast.Object)
+
+func (p *parser) resolve(x ast.Expr) {
+ // nothing to do if x is not an identifier or the blank identifier
+ ident, _ := x.(*ast.Ident)
+ if ident == nil {
+ return
+ }
+ assert(ident.Obj == nil, "identifier already declared or resolved")
+ if ident.Name == "_" {
+ return
+ }
+ // try to resolve the identifier
+ for s := p.topScope; s != nil; s = s.Outer {
+ if obj := s.Lookup(ident.Name); obj != nil {
+ ident.Obj = obj
+ return
+ }
+ }
+ // all local scopes are known, so any unresolved identifier
+ // must be found either in the file scope, package scope
+ // (perhaps in another file), or universe scope --- collect
+ // them so that they can be resolved later
+ ident.Obj = unresolved
+ p.unresolved = append(p.unresolved, ident)
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *parser) printTrace(a ...any) {
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " +
+ ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = uint(len(dots))
+ pos := p.file.Position(p.pos)
+ fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
+ i := 2 * p.indent
+ for ; i > n; i -= n {
+ fmt.Print(dots)
+ }
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *parser, msg string) *parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."));
+func un(p *parser) {
+ p.indent--
+ p.printTrace(")")
+}
+
+// Advance to the next token.
+func (p *parser) next0() {
+ // Because of one-token look-ahead, print the previous token
+ // when tracing as it provides a more readable output. The
+ // very first token (!p.pos.IsValid()) is not initialized
+ // (it is token.ILLEGAL), so don't print it.
+ if p.trace && p.pos.IsValid() {
+ s := p.tok.String()
+ switch {
+ case p.tok.IsLiteral():
+ p.printTrace(s, p.lit)
+ case p.tok.IsOperator(), p.tok.IsKeyword():
+ p.printTrace("\"" + s + "\"")
+ default:
+ p.printTrace(s)
+ }
+ }
+
+ p.pos, p.tok, p.lit = p.scanner.Scan()
+}
+
+// Consume a comment and return it and the line on which it ends.
+func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
+ // /*-style comments may end on a different line than where they start.
+ // Scan the comment for '\n' chars and adjust endline accordingly.
+ endline = p.file.Line(p.pos)
+ if p.lit[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.lit); i++ {
+ if p.lit[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{p.pos, p.lit}
+ p.next0()
+
+ return
+}
+
+// Consume a group of adjacent comments, add it to the parser's
+// comments list, and return it together with the line at which
+// the last comment in the group ends. An empty line or non-comment
+// token terminates a comment group.
+func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.file.Line(p.pos)
+ for p.tok == token.COMMENT && endline+1 >= p.file.Line(p.pos) {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// Advance to the next non-comment token. In the process, collect
+// any comment groups encountered, and remember the last lead and
+// line comments.
+//
+// A lead comment is a comment group that starts and ends in a
+// line without any other tokens and that is followed by a non-comment
+// token on the line immediately after the comment group.
+//
+// A line comment is a comment group that follows a non-comment
+// token on the same line, and that has no tokens after it on the line
+// where it ends.
+//
+// Lead and line comments may be considered documentation that is
+// stored in the AST.
+func (p *parser) next() {
+ p.leadComment = nil
+ p.lineComment = nil
+ line := p.file.Line(p.pos) // current line
+ p.next0()
+
+ if p.tok == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ if p.file.Line(p.pos) == line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup()
+ if p.file.Line(p.pos) != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok == token.COMMENT {
+ comment, endline = p.consumeCommentGroup()
+ }
+
+ if endline+1 == p.file.Line(p.pos) {
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+}
+
+func (p *parser) error(pos token.Pos, msg string) {
+ p.Error(p.file.Position(pos), msg)
+}
+
+func (p *parser) errorExpected(pos token.Pos, msg string) {
+ msg = "expected " + msg
+ if pos == p.pos {
+ // the error happened at the current position;
+ // make the error message more specific
+ if p.tok == token.SEMICOLON && p.lit[0] == '\n' {
+ msg += ", found newline"
+ } else {
+ msg += ", found '" + p.tok.String() + "'"
+ if p.tok.IsLiteral() {
+ msg += " " + p.lit
+ }
+ }
+ }
+ p.error(pos, msg)
+}
+
+func (p *parser) expect(tok token.Token) token.Pos {
+ pos := p.pos
+ if p.tok != tok {
+ p.errorExpected(pos, "'"+tok.String()+"'")
+ }
+ p.next() // make progress
+ return pos
+}
+
+func (p *parser) expectSemi() {
+ if p.tok != token.RPAREN && p.tok != token.RBRACE {
+ p.expect(token.SEMICOLON)
+ }
+}
+
+func assert(cond bool, msg string) {
+ if !cond {
+ panic("go/parser internal error: " + msg)
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Identifiers
+
+func (p *parser) parseIdent() *ast.Ident {
+ pos := p.pos
+ name := "_"
+ if p.tok == token.IDENT {
+ name = p.lit
+ p.next()
+ } else {
+ p.expect(token.IDENT) // use expect() error handling
+ }
+ return &ast.Ident{pos, name, nil}
+}
+
+func (p *parser) parseIdentList() (list []*ast.Ident) {
+ if p.trace {
+ defer un(trace(p, "IdentList"))
+ }
+
+ list = append(list, p.parseIdent())
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseIdent())
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Common productions
+
+// If lhs is set, result list elements which are identifiers are not resolved.
+func (p *parser) parseExprList(lhs bool) (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "ExpressionList"))
+ }
+
+ list = append(list, p.parseExpr(lhs))
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseExpr(lhs))
+ }
+
+ return
+}
+
+func (p *parser) parseLhsList() []ast.Expr {
+ list := p.parseExprList(true)
+ switch p.tok {
+ case token.DEFINE:
+ // lhs of a short variable declaration
+ p.shortVarDecl(p.makeIdentList(list))
+ case token.COLON:
+ // lhs of a label declaration or a communication clause of a select
+ // statement (parseLhsList is not called when parsing the case clause
+ // of a switch statement):
+ // - labels are declared by the caller of parseLhsList
+ // - for communication clauses, if there is a stand-alone identifier
+ // followed by a colon, we have a syntax error; there is no need
+ // to resolve the identifier in that case
+ default:
+ // identifiers must be declared elsewhere
+ for _, x := range list {
+ p.resolve(x)
+ }
+ }
+ return list
+}
+
+func (p *parser) parseRhsList() []ast.Expr {
+ return p.parseExprList(false)
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func (p *parser) parseType() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Type"))
+ }
+
+ typ := p.tryType()
+
+ if typ == nil {
+ pos := p.pos
+ p.errorExpected(pos, "type")
+ p.next() // make progress
+ return &ast.BadExpr{pos, p.pos}
+ }
+
+ return typ
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) parseTypeName() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeName"))
+ }
+
+ ident := p.parseIdent()
+ // don't resolve ident yet - it may be a parameter or field name
+
+ if p.tok == token.PERIOD {
+ // ident is a package name
+ p.next()
+ p.resolve(ident)
+ sel := p.parseIdent()
+ return &ast.SelectorExpr{ident, sel}
+ }
+
+ return ident
+}
+
+func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "ArrayType"))
+ }
+
+ lbrack := p.expect(token.LBRACK)
+ var len ast.Expr
+ if ellipsisOk && p.tok == token.ELLIPSIS {
+ len = &ast.Ellipsis{p.pos, nil}
+ p.next()
+ } else if p.tok != token.RBRACK {
+ len = p.parseRhs()
+ }
+ p.expect(token.RBRACK)
+ elt := p.parseType()
+
+ return &ast.ArrayType{lbrack, len, elt}
+}
+
+func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
+ idents := make([]*ast.Ident, len(list))
+ for i, x := range list {
+ ident, isIdent := x.(*ast.Ident)
+ if !isIdent {
+ pos := x.(ast.Expr).Pos()
+ p.errorExpected(pos, "identifier")
+ ident = &ast.Ident{pos, "_", nil}
+ }
+ idents[i] = ident
+ }
+ return idents
+}
+
+func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "FieldDecl"))
+ }
+
+ doc := p.leadComment
+
+ // fields
+ list, typ := p.parseVarList(false)
+
+ // optional tag
+ var tag *ast.BasicLit
+ if p.tok == token.STRING {
+ tag = &ast.BasicLit{p.pos, p.tok, p.lit}
+ p.next()
+ }
+
+ // analyze case
+ var idents []*ast.Ident
+ if typ != nil {
+ // IdentifierList Type
+ idents = p.makeIdentList(list)
+ } else {
+ // ["*"] TypeName (AnonymousField)
+ typ = list[0] // we always have at least one element
+ p.resolve(typ)
+ if n := len(list); n > 1 || !isTypeName(deref(typ)) {
+ pos := typ.Pos()
+ p.errorExpected(pos, "anonymous field")
+ typ = &ast.BadExpr{pos, list[n-1].End()}
+ }
+ }
+
+ p.expectSemi() // call before accessing p.linecomment
+
+ field := &ast.Field{doc, idents, typ, tag, p.lineComment}
+ p.declare(field, scope, ast.Var, idents...)
+
+ return field
+}
+
+func (p *parser) parseStructType() *ast.StructType {
+ if p.trace {
+ defer un(trace(p, "StructType"))
+ }
+
+ pos := p.expect(token.STRUCT)
+ lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // struct scope
+ var list []*ast.Field
+ for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
+ // a field declaration cannot start with a '(' but we accept
+ // it here for more robust parsing and better error messages
+ // (parseFieldDecl will check and complain if necessary)
+ list = append(list, p.parseFieldDecl(scope))
+ }
+ rbrace := p.expect(token.RBRACE)
+
+ // TODO(gri): store struct scope in AST
+ return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+}
+
+func (p *parser) parsePointerType() *ast.StarExpr {
+ if p.trace {
+ defer un(trace(p, "PointerType"))
+ }
+
+ star := p.expect(token.MUL)
+ base := p.parseType()
+
+ return &ast.StarExpr{star, base}
+}
+
+func (p *parser) tryVarType(isParam bool) ast.Expr {
+ if isParam && p.tok == token.ELLIPSIS {
+ pos := p.pos
+ p.next()
+ typ := p.tryIdentOrType(isParam) // don't use parseType so we can provide better error message
+ if typ == nil {
+ p.error(pos, "'...' parameter is missing type")
+ typ = &ast.BadExpr{pos, p.pos}
+ }
+ if p.tok != token.RPAREN {
+ p.error(pos, "can use '...' with last parameter type only")
+ }
+ return &ast.Ellipsis{pos, typ}
+ }
+ return p.tryIdentOrType(false)
+}
+
+func (p *parser) parseVarType(isParam bool) ast.Expr {
+ typ := p.tryVarType(isParam)
+ if typ == nil {
+ pos := p.pos
+ p.errorExpected(pos, "type")
+ p.next() // make progress
+ typ = &ast.BadExpr{pos, p.pos}
+ }
+ return typ
+}
+
+func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "VarList"))
+ }
+
+ // a list of identifiers looks like a list of type names
+ for {
+ // parseVarType accepts any type (including parenthesized ones)
+ // even though the syntax does not permit them here: we
+ // accept them all for more robust parsing and complain
+ // afterwards
+ list = append(list, p.parseVarType(isParam))
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+
+ // if we had a list of identifiers, it must be followed by a type
+ typ = p.tryVarType(isParam)
+ if typ != nil {
+ p.resolve(typ)
+ }
+
+ return
+}
+
+func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
+ if p.trace {
+ defer un(trace(p, "ParameterList"))
+ }
+
+ list, typ := p.parseVarList(ellipsisOk)
+ if typ != nil {
+ // IdentifierList Type
+ idents := p.makeIdentList(list)
+ field := &ast.Field{nil, idents, typ, nil, nil}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, scope, ast.Var, idents...)
+ if p.tok == token.COMMA {
+ p.next()
+ }
+
+ for p.tok != token.RPAREN && p.tok != token.EOF {
+ idents := p.parseIdentList()
+ typ := p.parseVarType(ellipsisOk)
+ field := &ast.Field{nil, idents, typ, nil, nil}
+ params = append(params, field)
+ // Go spec: The scope of an identifier denoting a function
+ // parameter or result variable is the function body.
+ p.declare(field, scope, ast.Var, idents...)
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+
+ } else {
+ // Type { "," Type } (anonymous parameters)
+ params = make([]*ast.Field, len(list))
+ for i, x := range list {
+ p.resolve(x)
+ params[i] = &ast.Field{Type: x}
+ }
+ }
+
+ return
+}
+
+func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Parameters"))
+ }
+
+ var params []*ast.Field
+ lparen := p.expect(token.LPAREN)
+ if p.tok != token.RPAREN {
+ params = p.parseParameterList(scope, ellipsisOk)
+ }
+ rparen := p.expect(token.RPAREN)
+
+ return &ast.FieldList{lparen, params, rparen}
+}
+
+func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Result"))
+ }
+
+ if p.tok == token.LPAREN {
+ return p.parseParameters(scope, false)
+ }
+
+ typ := p.tryType()
+ if typ != nil {
+ list := make([]*ast.Field, 1)
+ list[0] = &ast.Field{Type: typ}
+ return &ast.FieldList{List: list}
+ }
+
+ return nil
+}
+
+func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
+ if p.trace {
+ defer un(trace(p, "Signature"))
+ }
+
+ params = p.parseParameters(scope, true)
+ results = p.parseResult(scope)
+
+ return
+}
+
+func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
+ if p.trace {
+ defer un(trace(p, "FuncType"))
+ }
+
+ pos := p.expect(token.FUNC)
+ scope := ast.NewScope(p.topScope) // function scope
+ params, results := p.parseSignature(scope)
+
+ return &ast.FuncType{pos, params, results}, scope
+}
+
+func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
+ if p.trace {
+ defer un(trace(p, "MethodSpec"))
+ }
+
+ doc := p.leadComment
+ var idents []*ast.Ident
+ var typ ast.Expr
+ x := p.parseTypeName()
+ if ident, isIdent := x.(*ast.Ident); isIdent && p.tok == token.LPAREN {
+ // method
+ idents = []*ast.Ident{ident}
+ scope := ast.NewScope(nil) // method scope
+ params, results := p.parseSignature(scope)
+ typ = &ast.FuncType{token.NoPos, params, results}
+ } else {
+ // embedded interface
+ typ = x
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ spec := &ast.Field{doc, idents, typ, nil, p.lineComment}
+ p.declare(spec, scope, ast.Fun, idents...)
+
+ return spec
+}
+
+func (p *parser) parseInterfaceType() *ast.InterfaceType {
+ if p.trace {
+ defer un(trace(p, "InterfaceType"))
+ }
+
+ pos := p.expect(token.INTERFACE)
+ lbrace := p.expect(token.LBRACE)
+ scope := ast.NewScope(nil) // interface scope
+ var list []*ast.Field
+ for p.tok == token.IDENT {
+ list = append(list, p.parseMethodSpec(scope))
+ }
+ rbrace := p.expect(token.RBRACE)
+
+ // TODO(gri): store interface scope in AST
+ return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
+}
+
+func (p *parser) parseMapType() *ast.MapType {
+ if p.trace {
+ defer un(trace(p, "MapType"))
+ }
+
+ pos := p.expect(token.MAP)
+ p.expect(token.LBRACK)
+ key := p.parseType()
+ p.expect(token.RBRACK)
+ value := p.parseType()
+
+ return &ast.MapType{pos, key, value}
+}
+
+func (p *parser) parseChanType() *ast.ChanType {
+ if p.trace {
+ defer un(trace(p, "ChanType"))
+ }
+
+ pos := p.pos
+ dir := ast.SEND | ast.RECV
+ if p.tok == token.CHAN {
+ p.next()
+ if p.tok == token.ARROW {
+ p.next()
+ dir = ast.SEND
+ }
+ } else {
+ p.expect(token.ARROW)
+ p.expect(token.CHAN)
+ dir = ast.RECV
+ }
+ value := p.parseType()
+
+ return &ast.ChanType{pos, dir, value}
+}
+
+// If the result is an identifier, it is not resolved.
+func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
+ switch p.tok {
+ case token.IDENT:
+ return p.parseTypeName()
+ case token.LBRACK:
+ return p.parseArrayType(ellipsisOk)
+ case token.STRUCT:
+ return p.parseStructType()
+ case token.MUL:
+ return p.parsePointerType()
+ case token.FUNC:
+ typ, _ := p.parseFuncType()
+ return typ
+ case token.INTERFACE:
+ return p.parseInterfaceType()
+ case token.MAP:
+ return p.parseMapType()
+ case token.CHAN, token.ARROW:
+ return p.parseChanType()
+ case token.LPAREN:
+ lparen := p.pos
+ p.next()
+ typ := p.parseType()
+ rparen := p.expect(token.RPAREN)
+ return &ast.ParenExpr{lparen, typ, rparen}
+ }
+
+ // no type found
+ return nil
+}
+
+func (p *parser) tryType() ast.Expr {
+ typ := p.tryIdentOrType(false)
+ if typ != nil {
+ p.resolve(typ)
+ }
+ return typ
+}
+
+// ----------------------------------------------------------------------------
+// Blocks
+
+func (p *parser) parseStmtList() (list []ast.Stmt) {
+ if p.trace {
+ defer un(trace(p, "StatementList"))
+ }
+
+ for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
+ list = append(list, p.parseStmt())
+ }
+
+ return
+}
+
+func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
+ if p.trace {
+ defer un(trace(p, "Body"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ p.topScope = scope // open function scope
+ p.openLabelScope()
+ list := p.parseStmtList()
+ p.closeLabelScope()
+ p.closeScope()
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.BlockStmt{lbrace, list, rbrace}
+}
+
+func (p *parser) parseBlockStmt() *ast.BlockStmt {
+ if p.trace {
+ defer un(trace(p, "BlockStmt"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ p.openScope()
+ list := p.parseStmtList()
+ p.closeScope()
+ rbrace := p.expect(token.RBRACE)
+
+ return &ast.BlockStmt{lbrace, list, rbrace}
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func (p *parser) parseFuncTypeOrLit() ast.Expr {
+ if p.trace {
+ defer un(trace(p, "FuncTypeOrLit"))
+ }
+
+ typ, scope := p.parseFuncType()
+ if p.tok != token.LBRACE {
+ // function type only
+ return typ
+ }
+
+ p.exprLev++
+ body := p.parseBody(scope)
+ p.exprLev--
+
+ return &ast.FuncLit{typ, body}
+}
+
+// parseOperand may return an expression or a raw type (incl. array
+// types of the form [...]T. Callers must verify the result.
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseOperand(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Operand"))
+ }
+
+ switch p.tok {
+ case token.IDENT:
+ x := p.parseIdent()
+ if !lhs {
+ p.resolve(x)
+ }
+ return x
+
+ case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
+ x := &ast.BasicLit{p.pos, p.tok, p.lit}
+ p.next()
+ return x
+
+ case token.LPAREN:
+ lparen := p.pos
+ p.next()
+ p.exprLev++
+ x := p.parseRhs()
+ p.exprLev--
+ rparen := p.expect(token.RPAREN)
+ return &ast.ParenExpr{lparen, x, rparen}
+
+ case token.FUNC:
+ return p.parseFuncTypeOrLit()
+
+ default:
+ if typ := p.tryIdentOrType(true); typ != nil {
+ // could be type for composite literal or conversion
+ _, isIdent := typ.(*ast.Ident)
+ assert(!isIdent, "type cannot be identifier")
+ return typ
+ }
+ }
+
+ pos := p.pos
+ p.errorExpected(pos, "operand")
+ p.next() // make progress
+ return &ast.BadExpr{pos, p.pos}
+}
+
+func (p *parser) parseSelector(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Selector"))
+ }
+
+ sel := p.parseIdent()
+
+ return &ast.SelectorExpr{x, sel}
+}
+
+func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "TypeAssertion"))
+ }
+
+ p.expect(token.LPAREN)
+ var typ ast.Expr
+ if p.tok == token.TYPE {
+ // type switch: typ == nil
+ p.next()
+ } else {
+ typ = p.parseType()
+ }
+ p.expect(token.RPAREN)
+
+ return &ast.TypeAssertExpr{x, typ}
+}
+
+func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "IndexOrSlice"))
+ }
+
+ lbrack := p.expect(token.LBRACK)
+ p.exprLev++
+ var low, high ast.Expr
+ isSlice := false
+ if p.tok != token.COLON {
+ low = p.parseRhs()
+ }
+ if p.tok == token.COLON {
+ isSlice = true
+ p.next()
+ if p.tok != token.RBRACK {
+ high = p.parseRhs()
+ }
+ }
+ p.exprLev--
+ rbrack := p.expect(token.RBRACK)
+
+ if isSlice {
+ return &ast.SliceExpr{x, lbrack, low, high, rbrack}
+ }
+ return &ast.IndexExpr{x, lbrack, low, rbrack}
+}
+
+func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
+ if p.trace {
+ defer un(trace(p, "CallOrConversion"))
+ }
+
+ lparen := p.expect(token.LPAREN)
+ p.exprLev++
+ var list []ast.Expr
+ var ellipsis token.Pos
+ for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
+ list = append(list, p.parseRhs())
+ if p.tok == token.ELLIPSIS {
+ ellipsis = p.pos
+ p.next()
+ }
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+ p.exprLev--
+ rparen := p.expect(token.RPAREN)
+
+ return &ast.CallExpr{fun, lparen, list, ellipsis, rparen}
+}
+
+func (p *parser) parseElement(keyOk bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Element"))
+ }
+
+ if p.tok == token.LBRACE {
+ return p.parseLiteralValue(nil)
+ }
+
+ x := p.parseExpr(keyOk) // don't resolve if map key
+ if keyOk {
+ if p.tok == token.COLON {
+ colon := p.pos
+ p.next()
+ return &ast.KeyValueExpr{x, colon, p.parseElement(false)}
+ }
+ p.resolve(x) // not a map key
+ }
+
+ return x
+}
+
+func (p *parser) parseElementList() (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "ElementList"))
+ }
+
+ for p.tok != token.RBRACE && p.tok != token.EOF {
+ list = append(list, p.parseElement(true))
+ if p.tok != token.COMMA {
+ break
+ }
+ p.next()
+ }
+
+ return
+}
+
+func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "LiteralValue"))
+ }
+
+ lbrace := p.expect(token.LBRACE)
+ var elts []ast.Expr
+ p.exprLev++
+ if p.tok != token.RBRACE {
+ elts = p.parseElementList()
+ }
+ p.exprLev--
+ rbrace := p.expect(token.RBRACE)
+ return &ast.CompositeLit{typ, lbrace, elts, rbrace}
+}
+
+// checkExpr checks that x is an expression (and not a type).
+func (p *parser) checkExpr(x ast.Expr) ast.Expr {
+ switch t := unparen(x).(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.BasicLit:
+ case *ast.FuncLit:
+ case *ast.CompositeLit:
+ case *ast.ParenExpr:
+ panic("unreachable")
+ case *ast.SelectorExpr:
+ case *ast.IndexExpr:
+ case *ast.SliceExpr:
+ case *ast.TypeAssertExpr:
+ if t.Type == nil {
+ // the form X.(type) is only allowed in type switch expressions
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ case *ast.CallExpr:
+ case *ast.StarExpr:
+ case *ast.UnaryExpr:
+ if t.Op == token.RANGE {
+ // the range operator is only allowed at the top of a for statement
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ case *ast.BinaryExpr:
+ default:
+ // all other nodes are not proper expressions
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ return x
+}
+
+// isTypeName reports whether x is a (qualified) TypeName.
+func isTypeName(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.SelectorExpr:
+ _, isIdent := t.X.(*ast.Ident)
+ return isIdent
+ default:
+ return false // all other nodes are not type names
+ }
+ return true
+}
+
+// isLiteralType reports whether x is a legal composite literal type.
+func isLiteralType(x ast.Expr) bool {
+ switch t := x.(type) {
+ case *ast.BadExpr:
+ case *ast.Ident:
+ case *ast.SelectorExpr:
+ _, isIdent := t.X.(*ast.Ident)
+ return isIdent
+ case *ast.ArrayType:
+ case *ast.StructType:
+ case *ast.MapType:
+ default:
+ return false // all other nodes are not legal composite literal types
+ }
+ return true
+}
+
+// If x is of the form *T, deref returns T, otherwise it returns x.
+func deref(x ast.Expr) ast.Expr {
+ if p, isPtr := x.(*ast.StarExpr); isPtr {
+ x = p.X
+ }
+ return x
+}
+
+// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
+func unparen(x ast.Expr) ast.Expr {
+ if p, isParen := x.(*ast.ParenExpr); isParen {
+ x = unparen(p.X)
+ }
+ return x
+}
+
+// checkExprOrType checks that x is an expression or a type
+// (and not a raw type such as [...]T).
+func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
+ switch t := unparen(x).(type) {
+ case *ast.ParenExpr:
+ panic("unreachable")
+ case *ast.UnaryExpr:
+ if t.Op == token.RANGE {
+ // the range operator is only allowed at the top of a for statement
+ p.errorExpected(x.Pos(), "expression")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ case *ast.ArrayType:
+ if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
+ p.error(len.Pos(), "expected array length, found '...'")
+ x = &ast.BadExpr{x.Pos(), x.End()}
+ }
+ }
+
+ // all other nodes are expressions or types
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "PrimaryExpr"))
+ }
+
+ x := p.parseOperand(lhs)
+L:
+ for {
+ switch p.tok {
+ case token.PERIOD:
+ p.next()
+ if lhs {
+ p.resolve(x)
+ }
+ switch p.tok {
+ case token.IDENT:
+ x = p.parseSelector(p.checkExpr(x))
+ case token.LPAREN:
+ x = p.parseTypeAssertion(p.checkExpr(x))
+ default:
+ pos := p.pos
+ p.next() // make progress
+ p.errorExpected(pos, "selector or type assertion")
+ x = &ast.BadExpr{pos, p.pos}
+ }
+ case token.LBRACK:
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseIndexOrSlice(p.checkExpr(x))
+ case token.LPAREN:
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseCallOrConversion(p.checkExprOrType(x))
+ case token.LBRACE:
+ if isLiteralType(x) && (p.exprLev >= 0 || !isTypeName(x)) {
+ if lhs {
+ p.resolve(x)
+ }
+ x = p.parseLiteralValue(x)
+ } else {
+ break L
+ }
+ default:
+ break L
+ }
+ lhs = false // no need to try to resolve again
+ }
+
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "UnaryExpr"))
+ }
+
+ switch p.tok {
+ case token.ADD, token.SUB, token.NOT, token.XOR, token.AND, token.RANGE:
+ pos, op := p.pos, p.tok
+ p.next()
+ x := p.parseUnaryExpr(false)
+ return &ast.UnaryExpr{pos, op, p.checkExpr(x)}
+
+ case token.ARROW:
+ // channel type or receive expression
+ pos := p.pos
+ p.next()
+ if p.tok == token.CHAN {
+ p.next()
+ value := p.parseType()
+ return &ast.ChanType{pos, ast.RECV, value}
+ }
+
+ x := p.parseUnaryExpr(false)
+ return &ast.UnaryExpr{pos, token.ARROW, p.checkExpr(x)}
+
+ case token.MUL:
+ // pointer type or unary "*" expression
+ pos := p.pos
+ p.next()
+ x := p.parseUnaryExpr(false)
+ return &ast.StarExpr{pos, p.checkExprOrType(x)}
+ }
+
+ return p.parsePrimaryExpr(lhs)
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "BinaryExpr"))
+ }
+
+ x := p.parseUnaryExpr(lhs)
+ for prec := p.tok.Precedence(); prec >= prec1; prec-- {
+ for p.tok.Precedence() == prec {
+ pos, op := p.pos, p.tok
+ p.next()
+ if lhs {
+ p.resolve(x)
+ lhs = false
+ }
+ y := p.parseBinaryExpr(false, prec+1)
+ x = &ast.BinaryExpr{p.checkExpr(x), pos, op, p.checkExpr(y)}
+ }
+ }
+
+ return x
+}
+
+// If lhs is set and the result is an identifier, it is not resolved.
+// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
+// should reject when a type/raw type is obviously not allowed
+func (p *parser) parseExpr(lhs bool) ast.Expr {
+ if p.trace {
+ defer un(trace(p, "Expression"))
+ }
+
+ return p.parseBinaryExpr(lhs, token.LowestPrec+1)
+}
+
+func (p *parser) parseRhs() ast.Expr {
+ return p.parseExpr(false)
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "SimpleStmt"))
+ }
+
+ x := p.parseLhsList()
+
+ switch p.tok {
+ case
+ token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
+ token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
+ token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
+ token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
+ // assignment statement
+ pos, tok := p.pos, p.tok
+ p.next()
+ y := p.parseRhsList()
+ return &ast.AssignStmt{x, pos, tok, y}
+ }
+
+ if len(x) > 1 {
+ p.errorExpected(x[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+
+ switch p.tok {
+ case token.COLON:
+ // labeled statement
+ colon := p.pos
+ p.next()
+ if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent {
+ // Go spec: The scope of a label is the body of the function
+ // in which it is declared and excludes the body of any nested
+ // function.
+ stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
+ p.declare(stmt, p.labelScope, ast.Lbl, label)
+ return stmt
+ }
+ p.error(x[0].Pos(), "illegal label declaration")
+ return &ast.BadStmt{x[0].Pos(), colon + 1}
+
+ case token.ARROW:
+ // send statement
+ arrow := p.pos
+ p.next() // consume "<-"
+ y := p.parseRhs()
+ return &ast.SendStmt{x[0], arrow, y}
+
+ case token.INC, token.DEC:
+ // increment or decrement
+ s := &ast.IncDecStmt{x[0], p.pos, p.tok}
+ p.next() // consume "++" or "--"
+ return s
+ }
+
+ // expression
+ return &ast.ExprStmt{x[0]}
+}
+
+func (p *parser) parseCallExpr() *ast.CallExpr {
+ x := p.parseRhs()
+ if call, isCall := x.(*ast.CallExpr); isCall {
+ return call
+ }
+ p.errorExpected(x.Pos(), "function/method call")
+ return nil
+}
+
+func (p *parser) parseGoStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "GoStmt"))
+ }
+
+ pos := p.expect(token.GO)
+ call := p.parseCallExpr()
+ p.expectSemi()
+ if call == nil {
+ return &ast.BadStmt{pos, pos + 2} // len("go")
+ }
+
+ return &ast.GoStmt{pos, call}
+}
+
+func (p *parser) parseDeferStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "DeferStmt"))
+ }
+
+ pos := p.expect(token.DEFER)
+ call := p.parseCallExpr()
+ p.expectSemi()
+ if call == nil {
+ return &ast.BadStmt{pos, pos + 5} // len("defer")
+ }
+
+ return &ast.DeferStmt{pos, call}
+}
+
+func (p *parser) parseReturnStmt() *ast.ReturnStmt {
+ if p.trace {
+ defer un(trace(p, "ReturnStmt"))
+ }
+
+ pos := p.pos
+ p.expect(token.RETURN)
+ var x []ast.Expr
+ if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
+ x = p.parseRhsList()
+ }
+ p.expectSemi()
+
+ return &ast.ReturnStmt{pos, x}
+}
+
+func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
+ if p.trace {
+ defer un(trace(p, "BranchStmt"))
+ }
+
+ pos := p.expect(tok)
+ var label *ast.Ident
+ if tok != token.FALLTHROUGH && p.tok == token.IDENT {
+ label = p.parseIdent()
+ // add to list of unresolved targets
+ n := len(p.targetStack) - 1
+ p.targetStack[n] = append(p.targetStack[n], label)
+ }
+ p.expectSemi()
+
+ return &ast.BranchStmt{pos, tok, label}
+}
+
+func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
+ if s == nil {
+ return nil
+ }
+ if es, isExpr := s.(*ast.ExprStmt); isExpr {
+ return p.checkExpr(es.X)
+ }
+ p.error(s.Pos(), "expected condition, found simple statement")
+ return &ast.BadExpr{s.Pos(), s.End()}
+}
+
+func (p *parser) parseIfStmt() *ast.IfStmt {
+ if p.trace {
+ defer un(trace(p, "IfStmt"))
+ }
+
+ pos := p.expect(token.IF)
+ p.openScope()
+ defer p.closeScope()
+
+ var s ast.Stmt
+ var x ast.Expr
+ {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok == token.SEMICOLON {
+ p.next()
+ x = p.parseRhs()
+ } else {
+ s = p.parseSimpleStmt(false)
+ if p.tok == token.SEMICOLON {
+ p.next()
+ x = p.parseRhs()
+ } else {
+ x = p.makeExpr(s)
+ s = nil
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ body := p.parseBlockStmt()
+ var else_ ast.Stmt
+ if p.tok == token.ELSE {
+ p.next()
+ else_ = p.parseStmt()
+ } else {
+ p.expectSemi()
+ }
+
+ return &ast.IfStmt{pos, s, x, body, else_}
+}
+
+func (p *parser) parseTypeList() (list []ast.Expr) {
+ if p.trace {
+ defer un(trace(p, "TypeList"))
+ }
+
+ list = append(list, p.parseType())
+ for p.tok == token.COMMA {
+ p.next()
+ list = append(list, p.parseType())
+ }
+
+ return
+}
+
+func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
+ if p.trace {
+ defer un(trace(p, "CaseClause"))
+ }
+
+ pos := p.pos
+ var list []ast.Expr
+ if p.tok == token.CASE {
+ p.next()
+ if exprSwitch {
+ list = p.parseRhsList()
+ } else {
+ list = p.parseTypeList()
+ }
+ } else {
+ p.expect(token.DEFAULT)
+ }
+
+ colon := p.expect(token.COLON)
+ p.openScope()
+ body := p.parseStmtList()
+ p.closeScope()
+
+ return &ast.CaseClause{pos, list, colon, body}
+}
+
+func isExprSwitch(s ast.Stmt) bool {
+ if s == nil {
+ return true
+ }
+ if e, ok := s.(*ast.ExprStmt); ok {
+ if a, ok := e.X.(*ast.TypeAssertExpr); ok {
+ return a.Type != nil // regular type assertion
+ }
+ return true
+ }
+ return false
+}
+
+func (p *parser) parseSwitchStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "SwitchStmt"))
+ }
+
+ pos := p.expect(token.SWITCH)
+ p.openScope()
+ defer p.closeScope()
+
+ var s1, s2 ast.Stmt
+ if p.tok != token.LBRACE {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok != token.SEMICOLON {
+ s2 = p.parseSimpleStmt(false)
+ }
+ if p.tok == token.SEMICOLON {
+ p.next()
+ s1 = s2
+ s2 = nil
+ if p.tok != token.LBRACE {
+ s2 = p.parseSimpleStmt(false)
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ exprSwitch := isExprSwitch(s2)
+ lbrace := p.expect(token.LBRACE)
+ var list []ast.Stmt
+ for p.tok == token.CASE || p.tok == token.DEFAULT {
+ list = append(list, p.parseCaseClause(exprSwitch))
+ }
+ rbrace := p.expect(token.RBRACE)
+ p.expectSemi()
+ body := &ast.BlockStmt{lbrace, list, rbrace}
+
+ if exprSwitch {
+ return &ast.SwitchStmt{pos, s1, p.makeExpr(s2), body}
+ }
+ // type switch
+ // TODO(gri): do all the checks!
+ return &ast.TypeSwitchStmt{pos, s1, s2, body}
+}
+
+func (p *parser) parseCommClause() *ast.CommClause {
+ if p.trace {
+ defer un(trace(p, "CommClause"))
+ }
+
+ p.openScope()
+ pos := p.pos
+ var comm ast.Stmt
+ if p.tok == token.CASE {
+ p.next()
+ lhs := p.parseLhsList()
+ if p.tok == token.ARROW {
+ // SendStmt
+ if len(lhs) > 1 {
+ p.errorExpected(lhs[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+ arrow := p.pos
+ p.next()
+ rhs := p.parseRhs()
+ comm = &ast.SendStmt{lhs[0], arrow, rhs}
+ } else {
+ // RecvStmt
+ pos := p.pos
+ tok := p.tok
+ var rhs ast.Expr
+ if tok == token.ASSIGN || tok == token.DEFINE {
+ // RecvStmt with assignment
+ if len(lhs) > 2 {
+ p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
+ // continue with first two expressions
+ lhs = lhs[0:2]
+ }
+ p.next()
+ rhs = p.parseRhs()
+ } else {
+ // rhs must be single receive operation
+ if len(lhs) > 1 {
+ p.errorExpected(lhs[0].Pos(), "1 expression")
+ // continue with first expression
+ }
+ rhs = lhs[0]
+ lhs = nil // there is no lhs
+ }
+ if x, isUnary := rhs.(*ast.UnaryExpr); !isUnary || x.Op != token.ARROW {
+ p.errorExpected(rhs.Pos(), "send or receive operation")
+ rhs = &ast.BadExpr{rhs.Pos(), rhs.End()}
+ }
+ if lhs != nil {
+ comm = &ast.AssignStmt{lhs, pos, tok, []ast.Expr{rhs}}
+ } else {
+ comm = &ast.ExprStmt{rhs}
+ }
+ }
+ } else {
+ p.expect(token.DEFAULT)
+ }
+
+ colon := p.expect(token.COLON)
+ body := p.parseStmtList()
+ p.closeScope()
+
+ return &ast.CommClause{pos, comm, colon, body}
+}
+
+func (p *parser) parseSelectStmt() *ast.SelectStmt {
+ if p.trace {
+ defer un(trace(p, "SelectStmt"))
+ }
+
+ pos := p.expect(token.SELECT)
+ lbrace := p.expect(token.LBRACE)
+ var list []ast.Stmt
+ for p.tok == token.CASE || p.tok == token.DEFAULT {
+ list = append(list, p.parseCommClause())
+ }
+ rbrace := p.expect(token.RBRACE)
+ p.expectSemi()
+ body := &ast.BlockStmt{lbrace, list, rbrace}
+
+ return &ast.SelectStmt{pos, body}
+}
+
+func (p *parser) parseForStmt() ast.Stmt {
+ if p.trace {
+ defer un(trace(p, "ForStmt"))
+ }
+
+ pos := p.expect(token.FOR)
+ p.openScope()
+ defer p.closeScope()
+
+ var s1, s2, s3 ast.Stmt
+ if p.tok != token.LBRACE {
+ prevLev := p.exprLev
+ p.exprLev = -1
+ if p.tok != token.SEMICOLON {
+ s2 = p.parseSimpleStmt(false)
+ }
+ if p.tok == token.SEMICOLON {
+ p.next()
+ s1 = s2
+ s2 = nil
+ if p.tok != token.SEMICOLON {
+ s2 = p.parseSimpleStmt(false)
+ }
+ p.expectSemi()
+ if p.tok != token.LBRACE {
+ s3 = p.parseSimpleStmt(false)
+ }
+ }
+ p.exprLev = prevLev
+ }
+
+ body := p.parseBlockStmt()
+ p.expectSemi()
+
+ if as, isAssign := s2.(*ast.AssignStmt); isAssign {
+ // possibly a for statement with a range clause; check assignment operator
+ if as.Tok != token.ASSIGN && as.Tok != token.DEFINE {
+ p.errorExpected(as.TokPos, "'=' or ':='")
+ return &ast.BadStmt{pos, body.End()}
+ }
+ // check lhs
+ var key, value ast.Expr
+ switch len(as.Lhs) {
+ case 2:
+ key, value = as.Lhs[0], as.Lhs[1]
+ case 1:
+ key = as.Lhs[0]
+ default:
+ p.errorExpected(as.Lhs[0].Pos(), "1 or 2 expressions")
+ return &ast.BadStmt{pos, body.End()}
+ }
+ // check rhs
+ if len(as.Rhs) != 1 {
+ p.errorExpected(as.Rhs[0].Pos(), "1 expression")
+ return &ast.BadStmt{pos, body.End()}
+ }
+ if rhs, isUnary := as.Rhs[0].(*ast.UnaryExpr); isUnary && rhs.Op == token.RANGE {
+ // rhs is range expression
+ // (any short variable declaration was handled by parseSimpleStat above)
+ return &ast.RangeStmt{pos, key, value, as.TokPos, as.Tok, rhs.X, body}
+ }
+ p.errorExpected(s2.Pos(), "range clause")
+ return &ast.BadStmt{pos, body.End()}
+ }
+
+ // regular for statement
+ return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
+}
+
+func (p *parser) parseStmt() (s ast.Stmt) {
+ if p.trace {
+ defer un(trace(p, "Statement"))
+ }
+
+ switch p.tok {
+ case token.CONST, token.TYPE, token.VAR:
+ s = &ast.DeclStmt{p.parseDecl()}
+ case
+ // tokens that may start a top-level expression
+ token.IDENT, token.INT, token.FLOAT, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operand
+ token.LBRACK, token.STRUCT, // composite type
+ token.MUL, token.AND, token.ARROW, token.ADD, token.SUB, token.XOR: // unary operators
+ s = p.parseSimpleStmt(true)
+ // because of the required look-ahead, labeled statements are
+ // parsed by parseSimpleStmt - don't expect a semicolon after
+ // them
+ if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
+ p.expectSemi()
+ }
+ case token.GO:
+ s = p.parseGoStmt()
+ case token.DEFER:
+ s = p.parseDeferStmt()
+ case token.RETURN:
+ s = p.parseReturnStmt()
+ case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
+ s = p.parseBranchStmt(p.tok)
+ case token.LBRACE:
+ s = p.parseBlockStmt()
+ p.expectSemi()
+ case token.IF:
+ s = p.parseIfStmt()
+ case token.SWITCH:
+ s = p.parseSwitchStmt()
+ case token.SELECT:
+ s = p.parseSelectStmt()
+ case token.FOR:
+ s = p.parseForStmt()
+ case token.SEMICOLON:
+ s = &ast.EmptyStmt{p.pos}
+ p.next()
+ case token.RBRACE:
+ // a semicolon may be omitted before a closing "}"
+ s = &ast.EmptyStmt{p.pos}
+ default:
+ // no statement found
+ pos := p.pos
+ p.errorExpected(pos, "statement")
+ p.next() // make progress
+ s = &ast.BadStmt{pos, p.pos}
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
+
+func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "ImportSpec"))
+ }
+
+ var ident *ast.Ident
+ switch p.tok {
+ case token.PERIOD:
+ ident = &ast.Ident{p.pos, ".", nil}
+ p.next()
+ case token.IDENT:
+ ident = p.parseIdent()
+ }
+
+ var path *ast.BasicLit
+ if p.tok == token.STRING {
+ path = &ast.BasicLit{p.pos, p.tok, p.lit}
+ p.next()
+ } else {
+ p.expect(token.STRING) // use expect() error handling
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // collect imports
+ spec := &ast.ImportSpec{doc, ident, path, p.lineComment}
+ p.imports = append(p.imports, spec)
+
+ return spec
+}
+
+func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "ConstSpec"))
+ }
+
+ idents := p.parseIdentList()
+ typ := p.tryType()
+ var values []ast.Expr
+ if typ != nil || p.tok == token.ASSIGN || iota == 0 {
+ p.expect(token.ASSIGN)
+ values = p.parseRhsList()
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // Go spec: The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec and ends at
+ // the end of the innermost containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ p.declare(spec, p.topScope, ast.Con, idents...)
+
+ return spec
+}
+
+func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "TypeSpec"))
+ }
+
+ ident := p.parseIdent()
+
+ // Go spec: The scope of a type identifier declared inside a function begins
+ // at the identifier in the TypeSpec and ends at the end of the innermost
+ // containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.TypeSpec{doc, ident, nil, nil}
+ p.declare(spec, p.topScope, ast.Typ, ident)
+
+ spec.Type = p.parseType()
+ p.expectSemi() // call before accessing p.linecomment
+ spec.Comment = p.lineComment
+
+ return spec
+}
+
+func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
+ if p.trace {
+ defer un(trace(p, "VarSpec"))
+ }
+
+ idents := p.parseIdentList()
+ typ := p.tryType()
+ var values []ast.Expr
+ if typ == nil || p.tok == token.ASSIGN {
+ p.expect(token.ASSIGN)
+ values = p.parseRhsList()
+ }
+ p.expectSemi() // call before accessing p.linecomment
+
+ // Go spec: The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec and ends at
+ // the end of the innermost containing block.
+ // (Global identifiers are resolved in a separate phase after parsing.)
+ spec := &ast.ValueSpec{doc, idents, typ, values, p.lineComment}
+ p.declare(spec, p.topScope, ast.Var, idents...)
+
+ return spec
+}
+
+func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
+ if p.trace {
+ defer un(trace(p, "GenDecl("+keyword.String()+")"))
+ }
+
+ doc := p.leadComment
+ pos := p.expect(keyword)
+ var lparen, rparen token.Pos
+ var list []ast.Spec
+ if p.tok == token.LPAREN {
+ lparen = p.pos
+ p.next()
+ for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
+ list = append(list, f(p, p.leadComment, iota))
+ }
+ rparen = p.expect(token.RPAREN)
+ p.expectSemi()
+ } else {
+ list = append(list, f(p, nil, 0))
+ }
+
+ return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
+}
+
+func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
+ if p.trace {
+ defer un(trace(p, "Receiver"))
+ }
+
+ pos := p.pos
+ par := p.parseParameters(scope, false)
+
+ // must have exactly one receiver
+ if par.NumFields() != 1 {
+ p.errorExpected(pos, "exactly one receiver")
+ // TODO determine a better range for BadExpr below
+ par.List = []*ast.Field{{Type: &ast.BadExpr{pos, pos}}}
+ return par
+ }
+
+ // recv type must be of the form ["*"] identifier
+ recv := par.List[0]
+ base := deref(recv.Type)
+ if _, isIdent := base.(*ast.Ident); !isIdent {
+ p.errorExpected(base.Pos(), "(unqualified) identifier")
+ par.List = []*ast.Field{{Type: &ast.BadExpr{recv.Pos(), recv.End()}}}
+ }
+
+ return par
+}
+
+func (p *parser) parseFuncDecl() *ast.FuncDecl {
+ if p.trace {
+ defer un(trace(p, "FunctionDecl"))
+ }
+
+ doc := p.leadComment
+ pos := p.expect(token.FUNC)
+ scope := ast.NewScope(p.topScope) // function scope
+
+ var recv *ast.FieldList
+ if p.tok == token.LPAREN {
+ recv = p.parseReceiver(scope)
+ }
+
+ ident := p.parseIdent()
+
+ params, results := p.parseSignature(scope)
+
+ var body *ast.BlockStmt
+ if p.tok == token.LBRACE {
+ body = p.parseBody(scope)
+ }
+ p.expectSemi()
+
+ decl := &ast.FuncDecl{doc, recv, ident, &ast.FuncType{pos, params, results}, body}
+ if recv == nil {
+ // Go spec: The scope of an identifier denoting a constant, type,
+ // variable, or function (but not method) declared at top level
+ // (outside any function) is the package block.
+ //
+ // init() functions cannot be referred to and there may
+ // be more than one - don't put them in the pkgScope
+ if ident.Name != "init" {
+ p.declare(decl, p.pkgScope, ast.Fun, ident)
+ }
+ }
+
+ return decl
+}
+
+func (p *parser) parseDecl() ast.Decl {
+ if p.trace {
+ defer un(trace(p, "Declaration"))
+ }
+
+ var f parseSpecFunction
+ switch p.tok {
+ case token.CONST:
+ f = parseConstSpec
+
+ case token.TYPE:
+ f = parseTypeSpec
+
+ case token.VAR:
+ f = parseVarSpec
+
+ case token.FUNC:
+ return p.parseFuncDecl()
+
+ default:
+ pos := p.pos
+ p.errorExpected(pos, "declaration")
+ p.next() // make progress
+ decl := &ast.BadDecl{pos, p.pos}
+ return decl
+ }
+
+ return p.parseGenDecl(p.tok, f)
+}
+
+func (p *parser) parseDeclList() (list []ast.Decl) {
+ if p.trace {
+ defer un(trace(p, "DeclList"))
+ }
+
+ for p.tok != token.EOF {
+ list = append(list, p.parseDecl())
+ }
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Source files
+
+func (p *parser) parseFile() *ast.File {
+ if p.trace {
+ defer un(trace(p, "File"))
+ }
+
+ // package clause
+ doc := p.leadComment
+ pos := p.expect(token.PACKAGE)
+ // Go spec: The package clause is not a declaration;
+ // the package name does not appear in any scope.
+ ident := p.parseIdent()
+ if ident.Name == "_" {
+ p.error(p.pos, "invalid package name _")
+ }
+ p.expectSemi()
+
+ var decls []ast.Decl
+
+ // Don't bother parsing the rest if we had errors already.
+ // Likely not a Go source file at all.
+
+ if p.ErrorCount() == 0 && p.mode&PackageClauseOnly == 0 {
+ // import decls
+ for p.tok == token.IMPORT {
+ decls = append(decls, p.parseGenDecl(token.IMPORT, parseImportSpec))
+ }
+
+ if p.mode&ImportsOnly == 0 {
+ // rest of package body
+ for p.tok != token.EOF {
+ decls = append(decls, p.parseDecl())
+ }
+ }
+ }
+
+ assert(p.topScope == p.pkgScope, "imbalanced scopes")
+
+ // resolve global identifiers within the same file
+ i := 0
+ for _, ident := range p.unresolved {
+ // i <= index for current ident
+ assert(ident.Obj == unresolved, "object already resolved")
+ ident.Obj = p.pkgScope.Lookup(ident.Name) // also removes unresolved sentinel
+ if ident.Obj == nil {
+ p.unresolved[i] = ident
+ i++
+ }
+ }
+
+ // TODO(gri): store p.imports in AST
+ return &ast.File{doc, pos, ident, decls, p.pkgScope, p.imports, p.unresolved[0:i], p.comments}
+}
diff --git a/src/go/printer/testdata/slow.golden b/src/go/printer/testdata/slow.golden
new file mode 100644
index 0000000..43a15cb
--- /dev/null
+++ b/src/go/printer/testdata/slow.golden
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deepequal_test
+
+import (
+ "testing"
+ "google3/spam/archer/frontend/deepequal"
+)
+
+func TestTwoNilValues(t *testing.T) {
+ if err := deepequal.Check(nil, nil); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
+
+type Foo struct {
+ bar *Bar
+ bang *Bar
+}
+
+type Bar struct {
+ baz *Baz
+ foo []*Foo
+}
+
+type Baz struct {
+ entries map[int]interface{}
+ whatever string
+}
+
+func newFoo() *Foo {
+ return &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 42: &Foo{},
+ 21: &Bar{},
+ 11: &Baz{whatever: "it's just a test"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 43: &Foo{},
+ 22: &Bar{},
+ 13: &Baz{whatever: "this is nuts"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 61: &Foo{},
+ 71: &Bar{},
+ 11: &Baz{whatever: "no, it's Go"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 0: &Foo{},
+ -2: &Bar{},
+ -11: &Baz{whatever: "we need to go deeper"}}}},
+ bang: &Bar{foo: []*Foo{
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ -2: &Foo{},
+ -5: &Bar{},
+ -7: &Baz{whatever: "are you serious?"}}}},
+ bang: &Bar{foo: []*Foo{}}},
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ -100: &Foo{},
+ 50: &Bar{},
+ 20: &Baz{whatever: "na, not really ..."}}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}},
+ &Foo{bar: &Bar{baz: &Baz{
+ entries: map[int]interface{}{
+ 2: &Foo{},
+ 1: &Bar{},
+ -1: &Baz{whatever: "... it's just a test."}}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}}
+}
+
+func TestElaborate(t *testing.T) {
+ a := newFoo()
+ b := newFoo()
+
+ if err := deepequal.Check(a, b); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
diff --git a/src/go/printer/testdata/slow.input b/src/go/printer/testdata/slow.input
new file mode 100644
index 0000000..0e5a23d
--- /dev/null
+++ b/src/go/printer/testdata/slow.input
@@ -0,0 +1,85 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deepequal_test
+
+import (
+ "testing"
+ "google3/spam/archer/frontend/deepequal"
+)
+
+func TestTwoNilValues(t *testing.T) {
+ if err := deepequal.Check(nil, nil); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
+
+type Foo struct {
+ bar *Bar
+ bang *Bar
+}
+
+type Bar struct {
+ baz *Baz
+ foo []*Foo
+}
+
+type Baz struct {
+ entries map[int]interface{}
+ whatever string
+}
+
+func newFoo() (*Foo) {
+return &Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+42: &Foo{},
+21: &Bar{},
+11: &Baz{ whatever: "it's just a test" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+43: &Foo{},
+22: &Bar{},
+13: &Baz{ whatever: "this is nuts" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+61: &Foo{},
+71: &Bar{},
+11: &Baz{ whatever: "no, it's Go" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+0: &Foo{},
+-2: &Bar{},
+-11: &Baz{ whatever: "we need to go deeper" }}}},
+ bang: &Bar{foo: []*Foo{
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+-2: &Foo{},
+-5: &Bar{},
+-7: &Baz{ whatever: "are you serious?" }}}},
+ bang: &Bar{foo: []*Foo{}}},
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+-100: &Foo{},
+50: &Bar{},
+20: &Baz{ whatever: "na, not really ..." }}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}},
+&Foo{bar: &Bar{ baz: &Baz{
+entries: map[int]interface{}{
+2: &Foo{},
+1: &Bar{},
+-1: &Baz{ whatever: "... it's just a test." }}}},
+ bang: &Bar{foo: []*Foo{}}}}}}}}}
+}
+
+func TestElaborate(t *testing.T) {
+ a := newFoo()
+ b := newFoo()
+
+ if err := deepequal.Check(a, b); err != nil {
+ t.Errorf("expected nil, saw %v", err)
+ }
+}
diff --git a/src/go/printer/testdata/statements.golden b/src/go/printer/testdata/statements.golden
new file mode 100644
index 0000000..4b13460
--- /dev/null
+++ b/src/go/printer/testdata/statements.golden
@@ -0,0 +1,644 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package statements
+
+var expr bool
+
+func use(x interface{}) {}
+
+// Formatting of multi-line return statements.
+func _f() {
+ return
+ return x, y, z
+ return T{}
+ return T{1, 2, 3},
+ x, y, z
+ return T{1, 2, 3},
+ x, y,
+ z
+ return T{1,
+ 2,
+ 3}
+ return T{1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ 3}
+ return T{
+ 1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ T{1, 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2,
+ 3},
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ },
+ nil
+ return x + y +
+ z
+ return func() {}
+ return func() {
+ _ = 0
+ }, T{
+ 1, 2,
+ }
+ return func() {
+ _ = 0
+ }
+ return func() T {
+ return T{
+ 1, 2,
+ }
+ }
+}
+
+// Formatting of multi-line returns: test cases from issue 1207.
+func F() (*T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ nil
+}
+
+func G() (*T, *T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ &T{
+ X: 3,
+ Y: 4,
+ },
+ nil
+}
+
+func _() interface{} {
+ return &fileStat{
+ name: basename(file.name),
+ size: mkSize(d.FileSizeHigh, d.FileSizeLow),
+ modTime: mkModTime(d.LastWriteTime),
+ mode: mkMode(d.FileAttributes),
+ sys: mkSysFromFI(&d),
+ }, nil
+}
+
+// Formatting of if-statement headers.
+func _() {
+ if true {
+ }
+ if true {
+ } // no semicolon printed
+ if expr {
+ }
+ if expr {
+ } // no semicolon printed
+ if expr {
+ } // no parens printed
+ if expr {
+ } // no semicolon and parens printed
+ if x := expr; true {
+ use(x)
+ }
+ if x := expr; expr {
+ use(x)
+ }
+}
+
+// Formatting of switch-statement headers.
+func _() {
+ switch {
+ }
+ switch {
+ } // no semicolon printed
+ switch expr {
+ }
+ switch expr {
+ } // no semicolon printed
+ switch expr {
+ } // no parens printed
+ switch expr {
+ } // no semicolon and parens printed
+ switch x := expr; {
+ default:
+ use(
+ x)
+ }
+ switch x := expr; expr {
+ default:
+ use(x)
+ }
+}
+
+// Formatting of switch statement bodies.
+func _() {
+ switch {
+ }
+
+ switch x := 0; x {
+ case 1:
+ use(x)
+ use(x) // followed by an empty line
+
+ case 2: // followed by an empty line
+
+ use(x) // followed by an empty line
+
+ case 3: // no empty lines
+ use(x)
+ use(x)
+ }
+
+ switch x {
+ case 0:
+ use(x)
+ case 1: // this comment should have no effect on the previous or next line
+ use(x)
+ }
+
+ switch x := 0; x {
+ case 1:
+ x = 0
+ // this comment should be indented
+ case 2:
+ x = 0
+ // this comment should not be indented, it is aligned with the next case
+ case 3:
+ x = 0
+ /* indented comment
+ aligned
+ aligned
+ */
+ // bla
+ /* and more */
+ case 4:
+ x = 0
+ /* not indented comment
+ aligned
+ aligned
+ */
+ // bla
+ /* and more */
+ case 5:
+ }
+}
+
+// Formatting of selected select statements.
+func _() {
+ select {}
+ select { /* this comment should not be tab-aligned because the closing } is on the same line */
+ }
+ select { /* this comment should be tab-aligned */
+ }
+ select { // this comment should be tab-aligned
+ }
+ select {
+ case <-c:
+ }
+}
+
+// Formatting of for-statement headers for single-line for-loops.
+func _() {
+ for {
+ }
+ for expr {
+ }
+ for expr {
+ } // no parens printed
+ for {
+ } // no semicolons printed
+ for x := expr; ; {
+ use(x)
+ }
+ for expr {
+ } // no semicolons printed
+ for expr {
+ } // no semicolons and parens printed
+ for ; ; expr = false {
+ }
+ for x := expr; expr; {
+ use(x)
+ }
+ for x := expr; ; expr = false {
+ use(x)
+ }
+ for ; expr; expr = false {
+ }
+ for x := expr; expr; expr = false {
+ use(x)
+ }
+ for x := range []int{} {
+ use(x)
+ }
+ for x := range []int{} {
+ use(x)
+ } // no parens printed
+}
+
+// Formatting of for-statement headers for multi-line for-loops.
+func _() {
+ for {
+ }
+ for expr {
+ }
+ for expr {
+ } // no parens printed
+ for {
+ } // no semicolons printed
+ for x := expr; ; {
+ use(x)
+ }
+ for expr {
+ } // no semicolons printed
+ for expr {
+ } // no semicolons and parens printed
+ for ; ; expr = false {
+ }
+ for x := expr; expr; {
+ use(x)
+ }
+ for x := expr; ; expr = false {
+ use(x)
+ }
+ for ; expr; expr = false {
+ }
+ for x := expr; expr; expr = false {
+ use(x)
+ }
+ for range []int{} {
+ println("foo")
+ }
+ for x := range []int{} {
+ use(x)
+ }
+ for x := range []int{} {
+ use(x)
+ } // no parens printed
+}
+
+// Formatting of selected short single- and multi-line statements.
+func _() {
+ if cond {
+ }
+ if cond {
+ } // multiple lines
+ if cond {
+ } else {
+ } // else clause always requires multiple lines
+
+ for {
+ }
+ for i := 0; i < len(a); 1++ {
+ }
+ for i := 0; i < len(a); 1++ {
+ a[i] = i
+ }
+ for i := 0; i < len(a); 1++ {
+ a[i] = i
+ } // multiple lines
+
+ for range a {
+ }
+ for _ = range a {
+ }
+ for _, _ = range a {
+ }
+ for i := range a {
+ }
+ for i := range a {
+ a[i] = i
+ }
+ for i := range a {
+ a[i] = i
+ } // multiple lines
+
+ go func() {
+ for {
+ a <- <-b
+ }
+ }()
+ defer func() {
+ if x := recover(); x != nil {
+ err = fmt.Sprintf("error: %s", x.msg)
+ }
+ }()
+}
+
+// Don't remove mandatory parentheses around composite literals in control clauses.
+func _() {
+ // strip parentheses - no composite literals or composite literals don't start with a type name
+ if x {
+ }
+ if x {
+ }
+ if []T{} {
+ }
+ if []T{} {
+ }
+ if []T{} {
+ }
+
+ for x {
+ }
+ for x {
+ }
+ for []T{} {
+ }
+ for []T{} {
+ }
+ for []T{} {
+ }
+
+ switch x {
+ }
+ switch x {
+ }
+ switch []T{} {
+ }
+ switch []T{} {
+ }
+
+ for _ = range []T{T{42}} {
+ }
+
+ // leave parentheses - composite literals start with a type name
+ if (T{}) {
+ }
+ if (T{}) {
+ }
+ if (T{}) {
+ }
+
+ for (T{}) {
+ }
+ for (T{}) {
+ }
+ for (T{}) {
+ }
+
+ switch (T{}) {
+ }
+ switch (T{}) {
+ }
+
+ for _ = range (T1{T{42}}) {
+ }
+
+ if x == (T{42}[0]) {
+ }
+ if (x == T{42}[0]) {
+ }
+ if x == (T{42}[0]) {
+ }
+ if x == (T{42}[0]) {
+ }
+ if x == (T{42}[0]) {
+ }
+ if x == a+b*(T{42}[0]) {
+ }
+ if (x == a+b*T{42}[0]) {
+ }
+ if x == a+b*(T{42}[0]) {
+ }
+ if x == a+(b*(T{42}[0])) {
+ }
+ if x == a+b*(T{42}[0]) {
+ }
+ if (a + b*(T{42}[0])) == x {
+ }
+ if (a + b*(T{42}[0])) == x {
+ }
+
+ if struct{ x bool }{false}.x {
+ }
+ if (struct{ x bool }{false}.x) == false {
+ }
+ if struct{ x bool }{false}.x == false {
+ }
+}
+
+// Extra empty lines inside functions. Do respect source code line
+// breaks between statement boundaries but print at most one empty
+// line at a time.
+func _() {
+
+ const _ = 0
+
+ const _ = 1
+ type _ int
+ type _ float
+
+ var _ = 0
+ var x = 1
+
+ // Each use(x) call below should have at most one empty line before and after.
+ // Known bug: The first use call may have more than one empty line before
+ // (see go/printer/nodes.go, func linebreak).
+
+ use(x)
+
+ if x < x {
+
+ use(x)
+
+ } else {
+
+ use(x)
+
+ }
+}
+
+// Formatting around labels.
+func _() {
+L:
+}
+
+func _() {
+ // this comment should be indented
+L: // no semicolon needed
+}
+
+func _() {
+ switch 0 {
+ case 0:
+ L0:
+ ; // semicolon required
+ case 1:
+ L1:
+ ; // semicolon required
+ default:
+ L2: // no semicolon needed
+ }
+}
+
+func _() {
+ f()
+L1:
+ f()
+L2:
+ ;
+L3:
+}
+
+func _() {
+ // this comment should be indented
+L:
+}
+
+func _() {
+L:
+ _ = 0
+}
+
+func _() {
+ // this comment should be indented
+L:
+ _ = 0
+}
+
+func _() {
+ for {
+ L1:
+ _ = 0
+ L2:
+ _ = 0
+ }
+}
+
+func _() {
+ // this comment should be indented
+ for {
+ L1:
+ _ = 0
+ L2:
+ _ = 0
+ }
+}
+
+func _() {
+ if true {
+ _ = 0
+ }
+ _ = 0 // the indentation here should not be affected by the long label name
+AnOverlongLabel:
+ _ = 0
+
+ if true {
+ _ = 0
+ }
+ _ = 0
+
+L:
+ _ = 0
+}
+
+func _() {
+ for {
+ goto L
+ }
+L:
+
+ MoreCode()
+}
+
+func _() {
+ for {
+ goto L
+ }
+L: // A comment on the same line as the label, followed by a single empty line.
+ // Known bug: There may be more than one empty line before MoreCode()
+ // (see go/printer/nodes.go, func linebreak).
+
+ MoreCode()
+}
+
+func _() {
+ for {
+ goto L
+ }
+L:
+
+ // There should be a single empty line before this comment.
+ MoreCode()
+}
+
+func _() {
+ for {
+ goto AVeryLongLabelThatShouldNotAffectFormatting
+ }
+AVeryLongLabelThatShouldNotAffectFormatting:
+ // There should be a single empty line after this comment.
+
+ // There should be a single empty line before this comment.
+ MoreCode()
+}
+
+// Formatting of empty statements.
+func _() {
+
+}
+
+func _() {
+}
+
+func _() {
+}
+
+func _() {
+ f()
+}
+
+func _() {
+L:
+ ;
+}
+
+func _() {
+L:
+ ;
+ f()
+}
diff --git a/src/go/printer/testdata/statements.input b/src/go/printer/testdata/statements.input
new file mode 100644
index 0000000..cade157
--- /dev/null
+++ b/src/go/printer/testdata/statements.input
@@ -0,0 +1,555 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package statements
+
+var expr bool
+
+func use(x interface{}) {}
+
+// Formatting of multi-line return statements.
+func _f() {
+ return
+ return x, y, z
+ return T{}
+ return T{1, 2, 3},
+ x, y, z
+ return T{1, 2, 3},
+ x, y,
+ z
+ return T{1,
+ 2,
+ 3}
+ return T{1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ 3}
+ return T{
+ 1,
+ 2,
+ 3,
+ }
+ return T{
+ 1,
+ T{1, 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2, 3},
+ 3,
+ }
+ return T{
+ 1,
+ T{1,
+ 2,
+ 3},
+ 3,
+ }
+ return T{
+ 1,
+ 2,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ }, nil
+ return T{
+ 1,
+ 2,
+ },
+ nil
+ return T{
+ 1,
+ 2,
+ },
+ T{
+ x: 3,
+ y: 4,
+ },
+ nil
+ return x + y +
+ z
+ return func() {}
+ return func() {
+ _ = 0
+ }, T{
+ 1, 2,
+ }
+ return func() {
+ _ = 0
+ }
+ return func() T {
+ return T {
+ 1, 2,
+ }
+ }
+}
+
+// Formatting of multi-line returns: test cases from issue 1207.
+func F() (*T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ nil
+}
+
+func G() (*T, *T, os.Error) {
+ return &T{
+ X: 1,
+ Y: 2,
+ },
+ &T{
+ X: 3,
+ Y: 4,
+ },
+ nil
+}
+
+func _() interface{} {
+ return &fileStat{
+ name: basename(file.name),
+ size: mkSize(d.FileSizeHigh, d.FileSizeLow),
+ modTime: mkModTime(d.LastWriteTime),
+ mode: mkMode(d.FileAttributes),
+ sys: mkSysFromFI(&d),
+ }, nil
+}
+
+// Formatting of if-statement headers.
+func _() {
+ if true {}
+ if; true {} // no semicolon printed
+ if expr{}
+ if;expr{} // no semicolon printed
+ if (expr){} // no parens printed
+ if;((expr)){} // no semicolon and parens printed
+ if x:=expr;true{
+ use(x)}
+ if x:=expr; expr {use(x)}
+}
+
+
+// Formatting of switch-statement headers.
+func _() {
+ switch {}
+ switch;{} // no semicolon printed
+ switch expr {}
+ switch;expr{} // no semicolon printed
+ switch (expr) {} // no parens printed
+ switch;((expr)){} // no semicolon and parens printed
+ switch x := expr; { default:use(
+x)
+ }
+ switch x := expr; expr {default:use(x)}
+}
+
+
+// Formatting of switch statement bodies.
+func _() {
+ switch {
+ }
+
+ switch x := 0; x {
+ case 1:
+ use(x)
+ use(x) // followed by an empty line
+
+ case 2: // followed by an empty line
+
+ use(x) // followed by an empty line
+
+ case 3: // no empty lines
+ use(x)
+ use(x)
+ }
+
+ switch x {
+ case 0:
+ use(x)
+ case 1: // this comment should have no effect on the previous or next line
+ use(x)
+ }
+
+ switch x := 0; x {
+ case 1:
+ x = 0
+ // this comment should be indented
+ case 2:
+ x = 0
+ // this comment should not be indented, it is aligned with the next case
+ case 3:
+ x = 0
+ /* indented comment
+ aligned
+ aligned
+ */
+ // bla
+ /* and more */
+ case 4:
+ x = 0
+ /* not indented comment
+ aligned
+ aligned
+ */
+ // bla
+ /* and more */
+ case 5:
+ }
+}
+
+
+// Formatting of selected select statements.
+func _() {
+ select {
+ }
+ select { /* this comment should not be tab-aligned because the closing } is on the same line */ }
+ select { /* this comment should be tab-aligned */
+ }
+ select { // this comment should be tab-aligned
+ }
+ select { case <-c: }
+}
+
+
+// Formatting of for-statement headers for single-line for-loops.
+func _() {
+ for{}
+ for expr {}
+ for (expr) {} // no parens printed
+ for;;{} // no semicolons printed
+ for x :=expr;; {use( x)}
+ for; expr;{} // no semicolons printed
+ for; ((expr));{} // no semicolons and parens printed
+ for; ; expr = false {}
+ for x :=expr; expr; {use(x)}
+ for x := expr;; expr=false {use(x)}
+ for;expr;expr =false {}
+ for x := expr;expr;expr = false { use(x) }
+ for x := range []int{} { use(x) }
+ for x := range (([]int{})) { use(x) } // no parens printed
+}
+
+
+// Formatting of for-statement headers for multi-line for-loops.
+func _() {
+ for{
+ }
+ for expr {
+ }
+ for (expr) {
+ } // no parens printed
+ for;;{
+ } // no semicolons printed
+ for x :=expr;; {use( x)
+ }
+ for; expr;{
+ } // no semicolons printed
+ for; ((expr));{
+ } // no semicolons and parens printed
+ for; ; expr = false {
+ }
+ for x :=expr; expr; {use(x)
+ }
+ for x := expr;; expr=false {use(x)
+ }
+ for;expr;expr =false {
+ }
+ for x := expr;expr;expr = false {
+ use(x)
+ }
+ for range []int{} {
+ println("foo")}
+ for x := range []int{} {
+ use(x) }
+ for x := range (([]int{})) {
+ use(x) } // no parens printed
+}
+
+
+// Formatting of selected short single- and multi-line statements.
+func _() {
+ if cond {}
+ if cond {
+ } // multiple lines
+ if cond {} else {} // else clause always requires multiple lines
+
+ for {}
+ for i := 0; i < len(a); 1++ {}
+ for i := 0; i < len(a); 1++ { a[i] = i }
+ for i := 0; i < len(a); 1++ { a[i] = i
+ } // multiple lines
+
+ for range a{}
+ for _ = range a{}
+ for _, _ = range a{}
+ for i := range a {}
+ for i := range a { a[i] = i }
+ for i := range a { a[i] = i
+ } // multiple lines
+
+ go func() { for { a <- <-b } }()
+ defer func() { if x := recover(); x != nil { err = fmt.Sprintf("error: %s", x.msg) } }()
+}
+
+
+// Don't remove mandatory parentheses around composite literals in control clauses.
+func _() {
+ // strip parentheses - no composite literals or composite literals don't start with a type name
+ if (x) {}
+ if (((x))) {}
+ if ([]T{}) {}
+ if (([]T{})) {}
+ if ; (((([]T{})))) {}
+
+ for (x) {}
+ for (((x))) {}
+ for ([]T{}) {}
+ for (([]T{})) {}
+ for ; (((([]T{})))) ; {}
+
+ switch (x) {}
+ switch (((x))) {}
+ switch ([]T{}) {}
+ switch ; (((([]T{})))) {}
+
+ for _ = range ((([]T{T{42}}))) {}
+
+ // leave parentheses - composite literals start with a type name
+ if (T{}) {}
+ if ((T{})) {}
+ if ; ((((T{})))) {}
+
+ for (T{}) {}
+ for ((T{})) {}
+ for ; ((((T{})))) ; {}
+
+ switch (T{}) {}
+ switch ; ((((T{})))) {}
+
+ for _ = range (((T1{T{42}}))) {}
+
+ if x == (T{42}[0]) {}
+ if (x == T{42}[0]) {}
+ if (x == (T{42}[0])) {}
+ if (x == (((T{42}[0])))) {}
+ if (((x == (T{42}[0])))) {}
+ if x == a + b*(T{42}[0]) {}
+ if (x == a + b*T{42}[0]) {}
+ if (x == a + b*(T{42}[0])) {}
+ if (x == a + ((b * (T{42}[0])))) {}
+ if (((x == a + b * (T{42}[0])))) {}
+ if (((a + b * (T{42}[0])) == x)) {}
+ if (((a + b * (T{42}[0])))) == x {}
+
+ if (struct{x bool}{false}.x) {}
+ if (struct{x bool}{false}.x) == false {}
+ if (struct{x bool}{false}.x == false) {}
+}
+
+
+// Extra empty lines inside functions. Do respect source code line
+// breaks between statement boundaries but print at most one empty
+// line at a time.
+func _() {
+
+ const _ = 0
+
+ const _ = 1
+ type _ int
+ type _ float
+
+ var _ = 0
+ var x = 1
+
+ // Each use(x) call below should have at most one empty line before and after.
+ // Known bug: The first use call may have more than one empty line before
+ // (see go/printer/nodes.go, func linebreak).
+
+
+
+ use(x)
+
+ if x < x {
+
+ use(x)
+
+ } else {
+
+ use(x)
+
+ }
+}
+
+
+// Formatting around labels.
+func _() {
+ L:
+}
+
+
+func _() {
+ // this comment should be indented
+ L: ; // no semicolon needed
+}
+
+
+func _() {
+ switch 0 {
+ case 0:
+ L0: ; // semicolon required
+ case 1:
+ L1: ; // semicolon required
+ default:
+ L2: ; // no semicolon needed
+ }
+}
+
+
+func _() {
+ f()
+L1:
+ f()
+L2:
+ ;
+L3:
+}
+
+
+func _() {
+ // this comment should be indented
+ L:
+}
+
+
+func _() {
+ L: _ = 0
+}
+
+
+func _() {
+ // this comment should be indented
+ L: _ = 0
+}
+
+
+func _() {
+ for {
+ L1: _ = 0
+ L2:
+ _ = 0
+ }
+}
+
+
+func _() {
+ // this comment should be indented
+ for {
+ L1: _ = 0
+ L2:
+ _ = 0
+ }
+}
+
+
+func _() {
+ if true {
+ _ = 0
+ }
+ _ = 0 // the indentation here should not be affected by the long label name
+AnOverlongLabel:
+ _ = 0
+
+ if true {
+ _ = 0
+ }
+ _ = 0
+
+L: _ = 0
+}
+
+
+func _() {
+ for {
+ goto L
+ }
+L:
+
+ MoreCode()
+}
+
+
+func _() {
+ for {
+ goto L
+ }
+L: // A comment on the same line as the label, followed by a single empty line.
+ // Known bug: There may be more than one empty line before MoreCode()
+ // (see go/printer/nodes.go, func linebreak).
+
+
+
+
+ MoreCode()
+}
+
+
+func _() {
+ for {
+ goto L
+ }
+L:
+
+
+
+
+ // There should be a single empty line before this comment.
+ MoreCode()
+}
+
+
+func _() {
+ for {
+ goto AVeryLongLabelThatShouldNotAffectFormatting
+ }
+AVeryLongLabelThatShouldNotAffectFormatting:
+ // There should be a single empty line after this comment.
+
+ // There should be a single empty line before this comment.
+ MoreCode()
+}
+
+
+// Formatting of empty statements.
+func _() {
+ ;;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {;;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {;;;;;;;;;;;;;;;;;;;;;;;;;}
+
+func _() {
+f();;;;;;;;;;;;;;;;;;;;;;;;;
+}
+
+func _() {
+L:;;;;;;;;;;;;
+}
+
+func _() {
+L:;;;;;;;;;;;;
+ f()
+}