Merge pull request #12685 from eparis/goimports

Remove dependancy on external goimports binary
pull/6/head
Brendan Burns 2015-08-14 20:29:00 -07:00
commit 9cc5156b69
26 changed files with 12599 additions and 52 deletions

View File

@ -8,7 +8,6 @@ matrix:
install:
- go get github.com/tools/godep
- go get golang.org/x/tools/cmd/goimports
- ./hack/travis/install-etcd.sh
- ./hack/build-go.sh
- export PATH=$GOPATH/bin:./third_party/etcd:$PATH

8
Godeps/Godeps.json generated
View File

@ -551,6 +551,14 @@
"ImportPath": "golang.org/x/oauth2",
"Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9"
},
{
"ImportPath": "golang.org/x/tools/go/ast/astutil",
"Rev": "4f50f44d7a3206e9e28b984e023efce2a4a75369"
},
{
"ImportPath": "golang.org/x/tools/imports",
"Rev": "4f50f44d7a3206e9e28b984e023efce2a4a75369"
},
{
"ImportPath": "google.golang.org/api/compute/v1",
"Rev": "0c2979aeaa5b573e60d3ddffe5ce8dca8df309bd"

View File

@ -0,0 +1,625 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil
// This file defines utilities for working with source positions.
import (
"fmt"
"go/ast"
"go/token"
"sort"
)
// PathEnclosingInterval returns the node that encloses the source
// interval [start, end), and all its ancestors up to the AST root.
//
// The definition of "enclosing" used by this function considers
// additional whitespace abutting a node to be enclosed by it.
// In this example:
//
// z := x + y // add them
// <-A->
// <----B----->
//
// the ast.BinaryExpr(+) node is considered to enclose interval B
// even though its [Pos()..End()) is actually only interval A.
// This behaviour makes user interfaces more tolerant of imperfect
// input.
//
// This function treats tokens as nodes, though they are not included
// in the result. e.g. PathEnclosingInterval("+") returns the
// enclosing ast.BinaryExpr("x + y").
//
// If start==end, the 1-char interval following start is used instead.
//
// The 'exact' result is true if the interval contains only path[0]
// and perhaps some adjacent whitespace. It is false if the interval
// overlaps multiple children of path[0], or if it contains only
// interior whitespace of path[0].
// In this example:
//
// z := x + y // add them
// <--C--> <---E-->
// ^
// D
//
// intervals C, D and E are inexact. C is contained by the
// z-assignment statement, because it spans three of its children (:=,
// x, +). So too is the 1-char interval D, because it contains only
// interior whitespace of the assignment. E is considered interior
// whitespace of the BlockStmt containing the assignment.
//
// Precondition: [start, end) both lie within the same file as root.
// TODO(adonovan): return (nil, false) in this case and remove precond.
// Requires FileSet; see loader.tokenFileContainsPos.
//
// Postcondition: path is never nil; it always contains at least 'root'.
//
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
// Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
var visit func(node ast.Node) bool
visit = func(node ast.Node) bool {
path = append(path, node)
nodePos := node.Pos()
nodeEnd := node.End()
// fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
// Intersect [start, end) with interval of node.
if start < nodePos {
start = nodePos
}
if end > nodeEnd {
end = nodeEnd
}
// Find sole child that contains [start, end).
children := childrenOf(node)
l := len(children)
for i, child := range children {
// [childPos, childEnd) is unaugmented interval of child.
childPos := child.Pos()
childEnd := child.End()
// [augPos, augEnd) is whitespace-augmented interval of child.
augPos := childPos
augEnd := childEnd
if i > 0 {
augPos = children[i-1].End() // start of preceding whitespace
}
if i < l-1 {
nextChildPos := children[i+1].Pos()
// Does [start, end) lie between child and next child?
if start >= augEnd && end <= nextChildPos {
return false // inexact match
}
augEnd = nextChildPos // end of following whitespace
}
// fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
// i, augPos, augEnd, start, end) // debugging
// Does augmented child strictly contain [start, end)?
if augPos <= start && end <= augEnd {
_, isToken := child.(tokenNode)
return isToken || visit(child)
}
// Does [start, end) overlap multiple children?
// i.e. left-augmented child contains start
// but LR-augmented child does not contain end.
if start < childEnd && end > augEnd {
break
}
}
// No single child contained [start, end),
// so node is the result. Is it exact?
// (It's tempting to put this condition before the
// child loop, but it gives the wrong result in the
// case where a node (e.g. ExprStmt) and its sole
// child have equal intervals.)
if start == nodePos && end == nodeEnd {
return true // exact match
}
return false // inexact: overlaps multiple children
}
if start > end {
start, end = end, start
}
if start < root.End() && end > root.Pos() {
if start == end {
end = start + 1 // empty interval => interval of size 1
}
exact = visit(root)
// Reverse the path:
for i, l := 0, len(path); i < l/2; i++ {
path[i], path[l-1-i] = path[l-1-i], path[i]
}
} else {
// Selection lies within whitespace preceding the
// first (or following the last) declaration in the file.
// The result nonetheless always includes the ast.File.
path = append(path, root)
}
return
}
// tokenNode is a dummy implementation of ast.Node for a single token.
// They are used transiently by PathEnclosingInterval but never escape
// this package.
//
type tokenNode struct {
pos token.Pos
end token.Pos
}
func (n tokenNode) Pos() token.Pos {
return n.pos
}
func (n tokenNode) End() token.Pos {
return n.end
}
func tok(pos token.Pos, len int) ast.Node {
return tokenNode{pos, pos + token.Pos(len)}
}
// childrenOf returns the direct non-nil children of ast.Node n.
// It may include fake ast.Node implementations for bare tokens.
// it is not safe to call (e.g.) ast.Walk on such nodes.
//
func childrenOf(n ast.Node) []ast.Node {
var children []ast.Node
// First add nodes for all true subtrees.
ast.Inspect(n, func(node ast.Node) bool {
if node == n { // push n
return true // recur
}
if node != nil { // push child
children = append(children, node)
}
return false // no recursion
})
// Then add fake Nodes for bare tokens.
switch n := n.(type) {
case *ast.ArrayType:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Elt.End(), len("]")))
case *ast.AssignStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.BasicLit:
children = append(children,
tok(n.ValuePos, len(n.Value)))
case *ast.BinaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.BlockStmt:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("}")))
case *ast.BranchStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.CallExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
if n.Ellipsis != 0 {
children = append(children, tok(n.Ellipsis, len("...")))
}
case *ast.CaseClause:
if n.List == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.ChanType:
switch n.Dir {
case ast.RECV:
children = append(children, tok(n.Begin, len("<-chan")))
case ast.SEND:
children = append(children, tok(n.Begin, len("chan<-")))
case ast.RECV | ast.SEND:
children = append(children, tok(n.Begin, len("chan")))
}
case *ast.CommClause:
if n.Comm == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.Comment:
// nop
case *ast.CommentGroup:
// nop
case *ast.CompositeLit:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("{")))
case *ast.DeclStmt:
// nop
case *ast.DeferStmt:
children = append(children,
tok(n.Defer, len("defer")))
case *ast.Ellipsis:
children = append(children,
tok(n.Ellipsis, len("...")))
case *ast.EmptyStmt:
// nop
case *ast.ExprStmt:
// nop
case *ast.Field:
// TODO(adonovan): Field.{Doc,Comment,Tag}?
case *ast.FieldList:
children = append(children,
tok(n.Opening, len("(")),
tok(n.Closing, len(")")))
case *ast.File:
// TODO test: Doc
children = append(children,
tok(n.Package, len("package")))
case *ast.ForStmt:
children = append(children,
tok(n.For, len("for")))
case *ast.FuncDecl:
// TODO(adonovan): FuncDecl.Comment?
// Uniquely, FuncDecl breaks the invariant that
// preorder traversal yields tokens in lexical order:
// in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
//
// As a workaround, we inline the case for FuncType
// here and order things correctly.
//
children = nil // discard ast.Walk(FuncDecl) info subtrees
children = append(children, tok(n.Type.Func, len("func")))
if n.Recv != nil {
children = append(children, n.Recv)
}
children = append(children, n.Name)
if n.Type.Params != nil {
children = append(children, n.Type.Params)
}
if n.Type.Results != nil {
children = append(children, n.Type.Results)
}
if n.Body != nil {
children = append(children, n.Body)
}
case *ast.FuncLit:
// nop
case *ast.FuncType:
if n.Func != 0 {
children = append(children,
tok(n.Func, len("func")))
}
case *ast.GenDecl:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
if n.Lparen != 0 {
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
}
case *ast.GoStmt:
children = append(children,
tok(n.Go, len("go")))
case *ast.Ident:
children = append(children,
tok(n.NamePos, len(n.Name)))
case *ast.IfStmt:
children = append(children,
tok(n.If, len("if")))
case *ast.ImportSpec:
// TODO(adonovan): ImportSpec.{Doc,EndPos}?
case *ast.IncDecStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.IndexExpr:
children = append(children,
tok(n.Lbrack, len("{")),
tok(n.Rbrack, len("}")))
case *ast.InterfaceType:
children = append(children,
tok(n.Interface, len("interface")))
case *ast.KeyValueExpr:
children = append(children,
tok(n.Colon, len(":")))
case *ast.LabeledStmt:
children = append(children,
tok(n.Colon, len(":")))
case *ast.MapType:
children = append(children,
tok(n.Map, len("map")))
case *ast.ParenExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.RangeStmt:
children = append(children,
tok(n.For, len("for")),
tok(n.TokPos, len(n.Tok.String())))
case *ast.ReturnStmt:
children = append(children,
tok(n.Return, len("return")))
case *ast.SelectStmt:
children = append(children,
tok(n.Select, len("select")))
case *ast.SelectorExpr:
// nop
case *ast.SendStmt:
children = append(children,
tok(n.Arrow, len("<-")))
case *ast.SliceExpr:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Rbrack, len("]")))
case *ast.StarExpr:
children = append(children, tok(n.Star, len("*")))
case *ast.StructType:
children = append(children, tok(n.Struct, len("struct")))
case *ast.SwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.TypeAssertExpr:
children = append(children,
tok(n.Lparen-1, len(".")),
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.TypeSpec:
// TODO(adonovan): TypeSpec.{Doc,Comment}?
case *ast.TypeSwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.UnaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.ValueSpec:
// TODO(adonovan): ValueSpec.{Doc,Comment}?
default:
// Includes *ast.BadDecl, *ast.BadExpr, *ast.BadStmt.
panic(fmt.Sprintf("unexpected node type %T", n))
}
// TODO(adonovan): opt: merge the logic of ast.Inspect() into
// the switch above so we can make interleaved callbacks for
// both Nodes and Tokens in the right order and avoid the need
// to sort.
sort.Sort(byPos(children))
return children
}
type byPos []ast.Node
func (sl byPos) Len() int {
return len(sl)
}
func (sl byPos) Less(i, j int) bool {
return sl[i].Pos() < sl[j].Pos()
}
func (sl byPos) Swap(i, j int) {
sl[i], sl[j] = sl[j], sl[i]
}
// NodeDescription returns a description of the concrete type of n suitable
// for a user interface.
//
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
// StarExpr) we could be much more specific given the path to the AST
// root. Perhaps we should do that.
//
func NodeDescription(n ast.Node) string {
switch n := n.(type) {
case *ast.ArrayType:
return "array type"
case *ast.AssignStmt:
return "assignment"
case *ast.BadDecl:
return "bad declaration"
case *ast.BadExpr:
return "bad expression"
case *ast.BadStmt:
return "bad statement"
case *ast.BasicLit:
return "basic literal"
case *ast.BinaryExpr:
return fmt.Sprintf("binary %s operation", n.Op)
case *ast.BlockStmt:
return "block"
case *ast.BranchStmt:
switch n.Tok {
case token.BREAK:
return "break statement"
case token.CONTINUE:
return "continue statement"
case token.GOTO:
return "goto statement"
case token.FALLTHROUGH:
return "fall-through statement"
}
case *ast.CallExpr:
return "function call (or conversion)"
case *ast.CaseClause:
return "case clause"
case *ast.ChanType:
return "channel type"
case *ast.CommClause:
return "communication clause"
case *ast.Comment:
return "comment"
case *ast.CommentGroup:
return "comment group"
case *ast.CompositeLit:
return "composite literal"
case *ast.DeclStmt:
return NodeDescription(n.Decl) + " statement"
case *ast.DeferStmt:
return "defer statement"
case *ast.Ellipsis:
return "ellipsis"
case *ast.EmptyStmt:
return "empty statement"
case *ast.ExprStmt:
return "expression statement"
case *ast.Field:
// Can be any of these:
// struct {x, y int} -- struct field(s)
// struct {T} -- anon struct field
// interface {I} -- interface embedding
// interface {f()} -- interface method
// func (A) func(B) C -- receiver, param(s), result(s)
return "field/method/parameter"
case *ast.FieldList:
return "field/method/parameter list"
case *ast.File:
return "source file"
case *ast.ForStmt:
return "for loop"
case *ast.FuncDecl:
return "function declaration"
case *ast.FuncLit:
return "function literal"
case *ast.FuncType:
return "function type"
case *ast.GenDecl:
switch n.Tok {
case token.IMPORT:
return "import declaration"
case token.CONST:
return "constant declaration"
case token.TYPE:
return "type declaration"
case token.VAR:
return "variable declaration"
}
case *ast.GoStmt:
return "go statement"
case *ast.Ident:
return "identifier"
case *ast.IfStmt:
return "if statement"
case *ast.ImportSpec:
return "import specification"
case *ast.IncDecStmt:
if n.Tok == token.INC {
return "increment statement"
}
return "decrement statement"
case *ast.IndexExpr:
return "index expression"
case *ast.InterfaceType:
return "interface type"
case *ast.KeyValueExpr:
return "key/value association"
case *ast.LabeledStmt:
return "statement label"
case *ast.MapType:
return "map type"
case *ast.Package:
return "package"
case *ast.ParenExpr:
return "parenthesized " + NodeDescription(n.X)
case *ast.RangeStmt:
return "range loop"
case *ast.ReturnStmt:
return "return statement"
case *ast.SelectStmt:
return "select statement"
case *ast.SelectorExpr:
return "selector"
case *ast.SendStmt:
return "channel send"
case *ast.SliceExpr:
return "slice expression"
case *ast.StarExpr:
return "*-operation" // load/store expr or pointer type
case *ast.StructType:
return "struct type"
case *ast.SwitchStmt:
return "switch statement"
case *ast.TypeAssertExpr:
return "type assertion"
case *ast.TypeSpec:
return "type specification"
case *ast.TypeSwitchStmt:
return "type switch"
case *ast.UnaryExpr:
return fmt.Sprintf("unary %s operation", n.Op)
case *ast.ValueSpec:
return "value specification"
}
panic(fmt.Sprintf("unexpected node type: %T", n))
}

View File

@ -0,0 +1,195 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil_test
// This file defines tests of PathEnclosingInterval.
// TODO(adonovan): exhaustive tests that run over the whole input
// tree, not just handcrafted examples.
import (
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/token"
"strings"
"testing"
"golang.org/x/tools/go/ast/astutil"
)
// pathToString returns a string containing the concrete types of the
// nodes in path.
func pathToString(path []ast.Node) string {
var buf bytes.Buffer
fmt.Fprint(&buf, "[")
for i, n := range path {
if i > 0 {
fmt.Fprint(&buf, " ")
}
fmt.Fprint(&buf, strings.TrimPrefix(fmt.Sprintf("%T", n), "*ast."))
}
fmt.Fprint(&buf, "]")
return buf.String()
}
// findInterval parses input and returns the [start, end) positions of
// the first occurrence of substr in input. f==nil indicates failure;
// an error has already been reported in that case.
//
func findInterval(t *testing.T, fset *token.FileSet, input, substr string) (f *ast.File, start, end token.Pos) {
f, err := parser.ParseFile(fset, "<input>", input, 0)
if err != nil {
t.Errorf("parse error: %s", err)
return
}
i := strings.Index(input, substr)
if i < 0 {
t.Errorf("%q is not a substring of input", substr)
f = nil
return
}
filePos := fset.File(f.Package)
return f, filePos.Pos(i), filePos.Pos(i + len(substr))
}
// Common input for following tests.
const input = `
// Hello.
package main
import "fmt"
func f() {}
func main() {
z := (x + y) // add them
f() // NB: ExprStmt and its CallExpr have same Pos/End
}
`
func TestPathEnclosingInterval_Exact(t *testing.T) {
// For the exact tests, we check that a substring is mapped to
// the canonical string for the node it denotes.
tests := []struct {
substr string // first occurrence of this string indicates interval
node string // complete text of expected containing node
}{
{"package",
input[11 : len(input)-1]},
{"\npack",
input[11 : len(input)-1]},
{"main",
"main"},
{"import",
"import \"fmt\""},
{"\"fmt\"",
"\"fmt\""},
{"\nfunc f() {}\n",
"func f() {}"},
{"x ",
"x"},
{" y",
"y"},
{"z",
"z"},
{" + ",
"x + y"},
{" :=",
"z := (x + y)"},
{"x + y",
"x + y"},
{"(x + y)",
"(x + y)"},
{" (x + y) ",
"(x + y)"},
{" (x + y) // add",
"(x + y)"},
{"func",
"func f() {}"},
{"func f() {}",
"func f() {}"},
{"\nfun",
"func f() {}"},
{" f",
"f"},
}
for _, test := range tests {
f, start, end := findInterval(t, new(token.FileSet), input, test.substr)
if f == nil {
continue
}
path, exact := astutil.PathEnclosingInterval(f, start, end)
if !exact {
t.Errorf("PathEnclosingInterval(%q) not exact", test.substr)
continue
}
if len(path) == 0 {
if test.node != "" {
t.Errorf("PathEnclosingInterval(%q).path: got [], want %q",
test.substr, test.node)
}
continue
}
if got := input[path[0].Pos():path[0].End()]; got != test.node {
t.Errorf("PathEnclosingInterval(%q): got %q, want %q (path was %s)",
test.substr, got, test.node, pathToString(path))
continue
}
}
}
func TestPathEnclosingInterval_Paths(t *testing.T) {
// For these tests, we check only the path of the enclosing
// node, but not its complete text because it's often quite
// large when !exact.
tests := []struct {
substr string // first occurrence of this string indicates interval
path string // the pathToString(),exact of the expected path
}{
{"// add",
"[BlockStmt FuncDecl File],false"},
{"(x + y",
"[ParenExpr AssignStmt BlockStmt FuncDecl File],false"},
{"x +",
"[BinaryExpr ParenExpr AssignStmt BlockStmt FuncDecl File],false"},
{"z := (x",
"[AssignStmt BlockStmt FuncDecl File],false"},
{"func f",
"[FuncDecl File],false"},
{"func f()",
"[FuncDecl File],false"},
{" f()",
"[FuncDecl File],false"},
{"() {}",
"[FuncDecl File],false"},
{"// Hello",
"[File],false"},
{" f",
"[Ident FuncDecl File],true"},
{"func ",
"[FuncDecl File],true"},
{"mai",
"[Ident File],true"},
{"f() // NB",
"[CallExpr ExprStmt BlockStmt FuncDecl File],true"},
}
for _, test := range tests {
f, start, end := findInterval(t, new(token.FileSet), input, test.substr)
if f == nil {
continue
}
path, exact := astutil.PathEnclosingInterval(f, start, end)
if got := fmt.Sprintf("%s,%v", pathToString(path), exact); got != test.path {
t.Errorf("PathEnclosingInterval(%q): got %q, want %q",
test.substr, got, test.path)
continue
}
}
}

View File

@ -0,0 +1,359 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package astutil contains common utilities for working with the Go AST.
package astutil
import (
"fmt"
"go/ast"
"go/token"
"strconv"
"strings"
)
// AddImport adds the import path to the file f, if absent.
func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) {
return AddNamedImport(fset, f, "", ipath)
}
// AddNamedImport adds the import path to the file f, if absent.
// If name is not empty, it is used to rename the import.
//
// For example, calling
// AddNamedImport(fset, f, "pathpkg", "path")
// adds
// import pathpkg "path"
func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) {
if imports(f, ipath) {
return false
}
newImport := &ast.ImportSpec{
Path: &ast.BasicLit{
Kind: token.STRING,
Value: strconv.Quote(ipath),
},
}
if name != "" {
newImport.Name = &ast.Ident{Name: name}
}
// Find an import decl to add to.
// The goal is to find an existing import
// whose import path has the longest shared
// prefix with ipath.
var (
bestMatch = -1 // length of longest shared prefix
lastImport = -1 // index in f.Decls of the file's final import decl
impDecl *ast.GenDecl // import decl containing the best match
impIndex = -1 // spec index in impDecl containing the best match
)
for i, decl := range f.Decls {
gen, ok := decl.(*ast.GenDecl)
if ok && gen.Tok == token.IMPORT {
lastImport = i
// Do not add to import "C", to avoid disrupting the
// association with its doc comment, breaking cgo.
if declImports(gen, "C") {
continue
}
// Match an empty import decl if that's all that is available.
if len(gen.Specs) == 0 && bestMatch == -1 {
impDecl = gen
}
// Compute longest shared prefix with imports in this group.
for j, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
n := matchLen(importPath(impspec), ipath)
if n > bestMatch {
bestMatch = n
impDecl = gen
impIndex = j
}
}
}
}
// If no import decl found, add one after the last import.
if impDecl == nil {
impDecl = &ast.GenDecl{
Tok: token.IMPORT,
}
if lastImport >= 0 {
impDecl.TokPos = f.Decls[lastImport].End()
} else {
// There are no existing imports.
// Our new import goes after the package declaration and after
// the comment, if any, that starts on the same line as the
// package declaration.
impDecl.TokPos = f.Package
file := fset.File(f.Package)
pkgLine := file.Line(f.Package)
for _, c := range f.Comments {
if file.Line(c.Pos()) > pkgLine {
break
}
impDecl.TokPos = c.End()
}
}
f.Decls = append(f.Decls, nil)
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
f.Decls[lastImport+1] = impDecl
}
// Insert new import at insertAt.
insertAt := 0
if impIndex >= 0 {
// insert after the found import
insertAt = impIndex + 1
}
impDecl.Specs = append(impDecl.Specs, nil)
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
impDecl.Specs[insertAt] = newImport
pos := impDecl.Pos()
if insertAt > 0 {
// Assign same position as the previous import,
// so that the sorter sees it as being in the same block.
pos = impDecl.Specs[insertAt-1].Pos()
}
if newImport.Name != nil {
newImport.Name.NamePos = pos
}
newImport.Path.ValuePos = pos
newImport.EndPos = pos
// Clean up parens. impDecl contains at least one spec.
if len(impDecl.Specs) == 1 {
// Remove unneeded parens.
impDecl.Lparen = token.NoPos
} else if !impDecl.Lparen.IsValid() {
// impDecl needs parens added.
impDecl.Lparen = impDecl.Specs[0].Pos()
}
f.Imports = append(f.Imports, newImport)
return true
}
// DeleteImport deletes the import path from the file f, if present.
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
var delspecs []*ast.ImportSpec
// Find the import nodes that import path, if any.
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT {
continue
}
for j := 0; j < len(gen.Specs); j++ {
spec := gen.Specs[j]
impspec := spec.(*ast.ImportSpec)
if importPath(impspec) != path {
continue
}
// We found an import spec that imports path.
// Delete it.
delspecs = append(delspecs, impspec)
deleted = true
copy(gen.Specs[j:], gen.Specs[j+1:])
gen.Specs = gen.Specs[:len(gen.Specs)-1]
// If this was the last import spec in this decl,
// delete the decl, too.
if len(gen.Specs) == 0 {
copy(f.Decls[i:], f.Decls[i+1:])
f.Decls = f.Decls[:len(f.Decls)-1]
i--
break
} else if len(gen.Specs) == 1 {
gen.Lparen = token.NoPos // drop parens
}
if j > 0 {
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
line := fset.Position(impspec.Path.ValuePos).Line
// We deleted an entry but now there may be
// a blank line-sized hole where the import was.
if line-lastLine > 1 {
// There was a blank line immediately preceding the deleted import,
// so there's no need to close the hole.
// Do nothing.
} else {
// There was no blank line. Close the hole.
fset.File(gen.Rparen).MergeLine(line)
}
}
j--
}
}
// Delete them from f.Imports.
for i := 0; i < len(f.Imports); i++ {
imp := f.Imports[i]
for j, del := range delspecs {
if imp == del {
copy(f.Imports[i:], f.Imports[i+1:])
f.Imports = f.Imports[:len(f.Imports)-1]
copy(delspecs[j:], delspecs[j+1:])
delspecs = delspecs[:len(delspecs)-1]
i--
break
}
}
}
if len(delspecs) > 0 {
panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
}
return
}
// RewriteImport rewrites any import of path oldPath to path newPath.
func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
for _, imp := range f.Imports {
if importPath(imp) == oldPath {
rewrote = true
// record old End, because the default is to compute
// it using the length of imp.Path.Value.
imp.EndPos = imp.End()
imp.Path.Value = strconv.Quote(newPath)
}
}
return
}
// UsesImport reports whether a given import is used.
func UsesImport(f *ast.File, path string) (used bool) {
spec := importSpec(f, path)
if spec == nil {
return
}
name := spec.Name.String()
switch name {
case "<nil>":
// If the package name is not explicitly specified,
// make an educated guess. This is not guaranteed to be correct.
lastSlash := strings.LastIndex(path, "/")
if lastSlash == -1 {
name = path
} else {
name = path[lastSlash+1:]
}
case "_", ".":
// Not sure if this import is used - err on the side of caution.
return true
}
ast.Walk(visitFn(func(n ast.Node) {
sel, ok := n.(*ast.SelectorExpr)
if ok && isTopName(sel.X, name) {
used = true
}
}), f)
return
}
type visitFn func(node ast.Node)
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
fn(node)
return fn
}
// imports returns true if f imports path.
func imports(f *ast.File, path string) bool {
return importSpec(f, path) != nil
}
// importSpec returns the import spec if f imports path,
// or nil otherwise.
func importSpec(f *ast.File, path string) *ast.ImportSpec {
for _, s := range f.Imports {
if importPath(s) == path {
return s
}
}
return nil
}
// importPath returns the unquoted import path of s,
// or "" if the path is not properly quoted.
func importPath(s *ast.ImportSpec) string {
t, err := strconv.Unquote(s.Path.Value)
if err == nil {
return t
}
return ""
}
// declImports reports whether gen contains an import of path.
func declImports(gen *ast.GenDecl, path string) bool {
if gen.Tok != token.IMPORT {
return false
}
for _, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
if importPath(impspec) == path {
return true
}
}
return false
}
// matchLen returns the length of the longest path segment prefix shared by x and y.
func matchLen(x, y string) int {
n := 0
for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
if x[i] == '/' {
n++
}
}
return n
}
// isTopName returns true if n is a top-level unresolved identifier with the given name.
func isTopName(n ast.Expr, name string) bool {
id, ok := n.(*ast.Ident)
return ok && id.Name == name && id.Obj == nil
}
// Imports returns the file imports grouped by paragraph.
func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
var groups [][]*ast.ImportSpec
for _, decl := range f.Decls {
genDecl, ok := decl.(*ast.GenDecl)
if !ok || genDecl.Tok != token.IMPORT {
break
}
group := []*ast.ImportSpec{}
var lastLine int
for _, spec := range genDecl.Specs {
importSpec := spec.(*ast.ImportSpec)
pos := importSpec.Path.ValuePos
line := fset.Position(pos).Line
if lastLine > 0 && pos > 0 && line-lastLine > 1 {
groups = append(groups, group)
group = []*ast.ImportSpec{}
}
group = append(group, importSpec)
lastLine = line
}
groups = append(groups, group)
}
return groups
}

View File

@ -0,0 +1,946 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil
import (
"bytes"
"go/ast"
"go/format"
"go/parser"
"go/token"
"reflect"
"strconv"
"testing"
)
var fset = token.NewFileSet()
func parse(t *testing.T, name, in string) *ast.File {
file, err := parser.ParseFile(fset, name, in, parser.ParseComments)
if err != nil {
t.Fatalf("%s parse: %v", name, err)
}
return file
}
func print(t *testing.T, name string, f *ast.File) string {
var buf bytes.Buffer
if err := format.Node(&buf, fset, f); err != nil {
t.Fatalf("%s gofmt: %v", name, err)
}
return string(buf.Bytes())
}
type test struct {
name string
renamedPkg string
pkg string
in string
out string
broken bool // known broken
}
var addTests = []test{
{
name: "leave os alone",
pkg: "os",
in: `package main
import (
"os"
)
`,
out: `package main
import (
"os"
)
`,
},
{
name: "import.1",
pkg: "os",
in: `package main
`,
out: `package main
import "os"
`,
},
{
name: "import.2",
pkg: "os",
in: `package main
// Comment
import "C"
`,
out: `package main
// Comment
import "C"
import "os"
`,
},
{
name: "import.3",
pkg: "os",
in: `package main
// Comment
import "C"
import (
"io"
"utf8"
)
`,
out: `package main
// Comment
import "C"
import (
"io"
"os"
"utf8"
)
`,
},
{
name: "import.17",
pkg: "x/y/z",
in: `package main
// Comment
import "C"
import (
"a"
"b"
"x/w"
"d/f"
)
`,
out: `package main
// Comment
import "C"
import (
"a"
"b"
"x/w"
"x/y/z"
"d/f"
)
`,
},
{
name: "import into singular group",
pkg: "bytes",
in: `package main
import "os"
`,
out: `package main
import (
"bytes"
"os"
)
`,
},
{
name: "import into singular group with comment",
pkg: "bytes",
in: `package main
import /* why */ /* comment here? */ "os"
`,
out: `package main
import /* why */ /* comment here? */ (
"bytes"
"os"
)
`,
},
{
name: "import into group with leading comment",
pkg: "strings",
in: `package main
import (
// comment before bytes
"bytes"
"os"
)
`,
out: `package main
import (
// comment before bytes
"bytes"
"os"
"strings"
)
`,
},
{
name: "",
renamedPkg: "fmtpkg",
pkg: "fmt",
in: `package main
import "os"
`,
out: `package main
import (
fmtpkg "fmt"
"os"
)
`,
},
{
name: "struct comment",
pkg: "time",
in: `package main
// This is a comment before a struct.
type T struct {
t time.Time
}
`,
out: `package main
import "time"
// This is a comment before a struct.
type T struct {
t time.Time
}
`,
},
{
name: "issue 8729 import C",
pkg: "time",
in: `package main
import "C"
// comment
type T time.Time
`,
out: `package main
import "C"
import "time"
// comment
type T time.Time
`,
},
{
name: "issue 8729 empty import",
pkg: "time",
in: `package main
import ()
// comment
type T time.Time
`,
out: `package main
import "time"
// comment
type T time.Time
`,
},
{
name: "issue 8729 comment on package line",
pkg: "time",
in: `package main // comment
type T time.Time
`,
out: `package main // comment
import "time"
type T time.Time
`,
},
{
name: "issue 8729 comment after package",
pkg: "time",
in: `package main
// comment
type T time.Time
`,
out: `package main
import "time"
// comment
type T time.Time
`,
},
{
name: "issue 8729 comment before and on package line",
pkg: "time",
in: `// comment before
package main // comment on
type T time.Time
`,
out: `// comment before
package main // comment on
import "time"
type T time.Time
`,
},
// Issue 9961: Match prefixes using path segments rather than bytes
{
name: "issue 9961",
pkg: "regexp",
in: `package main
import (
"flag"
"testing"
"rsc.io/p"
)
`,
out: `package main
import (
"flag"
"regexp"
"testing"
"rsc.io/p"
)
`,
},
}
func TestAddImport(t *testing.T) {
for _, test := range addTests {
file := parse(t, test.name, test.in)
var before bytes.Buffer
ast.Fprint(&before, fset, file, nil)
AddNamedImport(fset, file, test.renamedPkg, test.pkg)
if got := print(t, test.name, file); got != test.out {
if test.broken {
t.Logf("%s is known broken:\ngot: %s\nwant: %s", test.name, got, test.out)
} else {
t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out)
}
var after bytes.Buffer
ast.Fprint(&after, fset, file, nil)
t.Logf("AST before:\n%s\nAST after:\n%s\n", before.String(), after.String())
}
}
}
func TestDoubleAddImport(t *testing.T) {
file := parse(t, "doubleimport", "package main\n")
AddImport(fset, file, "os")
AddImport(fset, file, "bytes")
want := `package main
import (
"bytes"
"os"
)
`
if got := print(t, "doubleimport", file); got != want {
t.Errorf("got: %s\nwant: %s", got, want)
}
}
func TestDoubleAddNamedImport(t *testing.T) {
file := parse(t, "doublenamedimport", "package main\n")
AddNamedImport(fset, file, "o", "os")
AddNamedImport(fset, file, "i", "io")
want := `package main
import (
i "io"
o "os"
)
`
if got := print(t, "doublenamedimport", file); got != want {
t.Errorf("got: %s\nwant: %s", got, want)
}
}
// Part of issue 8729.
func TestDoubleAddImportWithDeclComment(t *testing.T) {
file := parse(t, "doubleimport", `package main
import (
)
// comment
type I int
`)
// The AddImport order here matters.
AddImport(fset, file, "golang.org/x/tools/go/ast/astutil")
AddImport(fset, file, "os")
want := `package main
import (
"golang.org/x/tools/go/ast/astutil"
"os"
)
// comment
type I int
`
if got := print(t, "doubleimport_with_decl_comment", file); got != want {
t.Errorf("got: %s\nwant: %s", got, want)
}
}
var deleteTests = []test{
{
name: "import.4",
pkg: "os",
in: `package main
import (
"os"
)
`,
out: `package main
`,
},
{
name: "import.5",
pkg: "os",
in: `package main
// Comment
import "C"
import "os"
`,
out: `package main
// Comment
import "C"
`,
},
{
name: "import.6",
pkg: "os",
in: `package main
// Comment
import "C"
import (
"io"
"os"
"utf8"
)
`,
out: `package main
// Comment
import "C"
import (
"io"
"utf8"
)
`,
},
{
name: "import.7",
pkg: "io",
in: `package main
import (
"io" // a
"os" // b
"utf8" // c
)
`,
out: `package main
import (
// a
"os" // b
"utf8" // c
)
`,
},
{
name: "import.8",
pkg: "os",
in: `package main
import (
"io" // a
"os" // b
"utf8" // c
)
`,
out: `package main
import (
"io" // a
// b
"utf8" // c
)
`,
},
{
name: "import.9",
pkg: "utf8",
in: `package main
import (
"io" // a
"os" // b
"utf8" // c
)
`,
out: `package main
import (
"io" // a
"os" // b
// c
)
`,
},
{
name: "import.10",
pkg: "io",
in: `package main
import (
"io"
"os"
"utf8"
)
`,
out: `package main
import (
"os"
"utf8"
)
`,
},
{
name: "import.11",
pkg: "os",
in: `package main
import (
"io"
"os"
"utf8"
)
`,
out: `package main
import (
"io"
"utf8"
)
`,
},
{
name: "import.12",
pkg: "utf8",
in: `package main
import (
"io"
"os"
"utf8"
)
`,
out: `package main
import (
"io"
"os"
)
`,
},
{
name: "handle.raw.quote.imports",
pkg: "os",
in: "package main\n\nimport `os`",
out: `package main
`,
},
{
name: "import.13",
pkg: "io",
in: `package main
import (
"fmt"
"io"
"os"
"utf8"
"go/format"
)
`,
out: `package main
import (
"fmt"
"os"
"utf8"
"go/format"
)
`,
},
{
name: "import.14",
pkg: "io",
in: `package main
import (
"fmt" // a
"io" // b
"os" // c
"utf8" // d
"go/format" // e
)
`,
out: `package main
import (
"fmt" // a
// b
"os" // c
"utf8" // d
"go/format" // e
)
`,
},
{
name: "import.15",
pkg: "double",
in: `package main
import (
"double"
"double"
)
`,
out: `package main
`,
},
{
name: "import.16",
pkg: "bubble",
in: `package main
import (
"toil"
"bubble"
"bubble"
"trouble"
)
`,
out: `package main
import (
"toil"
"trouble"
)
`,
},
{
name: "import.17",
pkg: "quad",
in: `package main
import (
"quad"
"quad"
)
import (
"quad"
"quad"
)
`,
out: `package main
`,
},
}
func TestDeleteImport(t *testing.T) {
for _, test := range deleteTests {
file := parse(t, test.name, test.in)
DeleteImport(fset, file, test.pkg)
if got := print(t, test.name, file); got != test.out {
t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out)
}
}
}
type rewriteTest struct {
name string
srcPkg string
dstPkg string
in string
out string
}
var rewriteTests = []rewriteTest{
{
name: "import.13",
srcPkg: "utf8",
dstPkg: "encoding/utf8",
in: `package main
import (
"io"
"os"
"utf8" // thanks ken
)
`,
out: `package main
import (
"encoding/utf8" // thanks ken
"io"
"os"
)
`,
},
{
name: "import.14",
srcPkg: "asn1",
dstPkg: "encoding/asn1",
in: `package main
import (
"asn1"
"crypto"
"crypto/rsa"
_ "crypto/sha1"
"crypto/x509"
"crypto/x509/pkix"
"time"
)
var x = 1
`,
out: `package main
import (
"crypto"
"crypto/rsa"
_ "crypto/sha1"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"time"
)
var x = 1
`,
},
{
name: "import.15",
srcPkg: "url",
dstPkg: "net/url",
in: `package main
import (
"bufio"
"net"
"path"
"url"
)
var x = 1 // comment on x, not on url
`,
out: `package main
import (
"bufio"
"net"
"net/url"
"path"
)
var x = 1 // comment on x, not on url
`,
},
{
name: "import.16",
srcPkg: "http",
dstPkg: "net/http",
in: `package main
import (
"flag"
"http"
"log"
"text/template"
)
var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18
`,
out: `package main
import (
"flag"
"log"
"net/http"
"text/template"
)
var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18
`,
},
}
func TestRewriteImport(t *testing.T) {
for _, test := range rewriteTests {
file := parse(t, test.name, test.in)
RewriteImport(fset, file, test.srcPkg, test.dstPkg)
if got := print(t, test.name, file); got != test.out {
t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out)
}
}
}
var importsTests = []struct {
name string
in string
want [][]string
}{
{
name: "no packages",
in: `package foo
`,
want: nil,
},
{
name: "one group",
in: `package foo
import (
"fmt"
"testing"
)
`,
want: [][]string{{"fmt", "testing"}},
},
{
name: "four groups",
in: `package foo
import "C"
import (
"fmt"
"testing"
"appengine"
"myproject/mylib1"
"myproject/mylib2"
)
`,
want: [][]string{
{"C"},
{"fmt", "testing"},
{"appengine"},
{"myproject/mylib1", "myproject/mylib2"},
},
},
{
name: "multiple factored groups",
in: `package foo
import (
"fmt"
"testing"
"appengine"
)
import (
"reflect"
"bytes"
)
`,
want: [][]string{
{"fmt", "testing"},
{"appengine"},
{"reflect"},
{"bytes"},
},
},
}
func unquote(s string) string {
res, err := strconv.Unquote(s)
if err != nil {
return "could_not_unquote"
}
return res
}
func TestImports(t *testing.T) {
fset := token.NewFileSet()
for _, test := range importsTests {
f, err := parser.ParseFile(fset, "test.go", test.in, 0)
if err != nil {
t.Errorf("%s: %v", test.name, err)
continue
}
var got [][]string
for _, group := range Imports(fset, f) {
var b []string
for _, spec := range group {
b = append(b, unquote(spec.Path.Value))
}
got = append(got, b)
}
if !reflect.DeepEqual(got, test.want) {
t.Errorf("Imports(%s)=%v, want %v", test.name, got, test.want)
}
}
}

View File

@ -0,0 +1,14 @@
package astutil
import "go/ast"
// Unparen returns e with any enclosing parentheses stripped.
func Unparen(e ast.Expr) ast.Expr {
for {
p, ok := e.(*ast.ParenExpr)
if !ok {
return e
}
e = p.X
}
}

387
Godeps/_workspace/src/golang.org/x/tools/imports/fix.go generated vendored Normal file
View File

@ -0,0 +1,387 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/token"
"os"
"path"
"path/filepath"
"strings"
"sync"
"golang.org/x/tools/go/ast/astutil"
)
// importToGroup is a list of functions which map from an import path to
// a group number.
var importToGroup = []func(importPath string) (num int, ok bool){
func(importPath string) (num int, ok bool) {
if strings.HasPrefix(importPath, "appengine") {
return 2, true
}
return
},
func(importPath string) (num int, ok bool) {
if strings.Contains(importPath, ".") {
return 1, true
}
return
},
}
func importGroup(importPath string) int {
for _, fn := range importToGroup {
if n, ok := fn(importPath); ok {
return n
}
}
return 0
}
func fixImports(fset *token.FileSet, f *ast.File) (added []string, err error) {
// refs are a set of possible package references currently unsatisfied by imports.
// first key: either base package (e.g. "fmt") or renamed package
// second key: referenced package symbol (e.g. "Println")
refs := make(map[string]map[string]bool)
// decls are the current package imports. key is base package or renamed package.
decls := make(map[string]*ast.ImportSpec)
// collect potential uses of packages.
var visitor visitFn
visitor = visitFn(func(node ast.Node) ast.Visitor {
if node == nil {
return visitor
}
switch v := node.(type) {
case *ast.ImportSpec:
if v.Name != nil {
decls[v.Name.Name] = v
} else {
local := importPathToName(strings.Trim(v.Path.Value, `\"`))
decls[local] = v
}
case *ast.SelectorExpr:
xident, ok := v.X.(*ast.Ident)
if !ok {
break
}
if xident.Obj != nil {
// if the parser can resolve it, it's not a package ref
break
}
pkgName := xident.Name
if refs[pkgName] == nil {
refs[pkgName] = make(map[string]bool)
}
if decls[pkgName] == nil {
refs[pkgName][v.Sel.Name] = true
}
}
return visitor
})
ast.Walk(visitor, f)
// Nil out any unused ImportSpecs, to be removed in following passes
unusedImport := map[string]bool{}
for pkg, is := range decls {
if refs[pkg] == nil && pkg != "_" && pkg != "." {
unusedImport[strings.Trim(is.Path.Value, `"`)] = true
}
}
for ipath := range unusedImport {
if ipath == "C" {
// Don't remove cgo stuff.
continue
}
astutil.DeleteImport(fset, f, ipath)
}
// Search for imports matching potential package references.
searches := 0
type result struct {
ipath string
name string
err error
}
results := make(chan result)
for pkgName, symbols := range refs {
if len(symbols) == 0 {
continue // skip over packages already imported
}
go func(pkgName string, symbols map[string]bool) {
ipath, rename, err := findImport(pkgName, symbols)
r := result{ipath: ipath, err: err}
if rename {
r.name = pkgName
}
results <- r
}(pkgName, symbols)
searches++
}
for i := 0; i < searches; i++ {
result := <-results
if result.err != nil {
return nil, result.err
}
if result.ipath != "" {
if result.name != "" {
astutil.AddNamedImport(fset, f, result.name, result.ipath)
} else {
astutil.AddImport(fset, f, result.ipath)
}
added = append(added, result.ipath)
}
}
return added, nil
}
// importPathToName returns the package name for the given import path.
var importPathToName = importPathToNameGoPath
// importPathToNameBasic assumes the package name is the base of import path.
func importPathToNameBasic(importPath string) (packageName string) {
return path.Base(importPath)
}
// importPathToNameGoPath finds out the actual package name, as declared in its .go files.
// If there's a problem, it falls back to using importPathToNameBasic.
func importPathToNameGoPath(importPath string) (packageName string) {
if buildPkg, err := build.Import(importPath, "", 0); err == nil {
return buildPkg.Name
} else {
return importPathToNameBasic(importPath)
}
}
type pkg struct {
importpath string // full pkg import path, e.g. "net/http"
dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt"
}
var pkgIndexOnce sync.Once
var pkgIndex struct {
sync.Mutex
m map[string][]pkg // shortname => []pkg, e.g "http" => "net/http"
}
// gate is a semaphore for limiting concurrency.
type gate chan struct{}
func (g gate) enter() { g <- struct{}{} }
func (g gate) leave() { <-g }
// fsgate protects the OS & filesystem from too much concurrency.
// Too much disk I/O -> too many threads -> swapping and bad scheduling.
var fsgate = make(gate, 8)
func loadPkgIndex() {
pkgIndex.Lock()
pkgIndex.m = make(map[string][]pkg)
pkgIndex.Unlock()
var wg sync.WaitGroup
for _, path := range build.Default.SrcDirs() {
fsgate.enter()
f, err := os.Open(path)
if err != nil {
fsgate.leave()
fmt.Fprint(os.Stderr, err)
continue
}
children, err := f.Readdir(-1)
f.Close()
fsgate.leave()
if err != nil {
fmt.Fprint(os.Stderr, err)
continue
}
for _, child := range children {
if child.IsDir() {
wg.Add(1)
go func(path, name string) {
defer wg.Done()
loadPkg(&wg, path, name)
}(path, child.Name())
}
}
}
wg.Wait()
}
func loadPkg(wg *sync.WaitGroup, root, pkgrelpath string) {
importpath := filepath.ToSlash(pkgrelpath)
dir := filepath.Join(root, importpath)
fsgate.enter()
defer fsgate.leave()
pkgDir, err := os.Open(dir)
if err != nil {
return
}
children, err := pkgDir.Readdir(-1)
pkgDir.Close()
if err != nil {
return
}
// hasGo tracks whether a directory actually appears to be a
// Go source code directory. If $GOPATH == $HOME, and
// $HOME/src has lots of other large non-Go projects in it,
// then the calls to importPathToName below can be expensive.
hasGo := false
for _, child := range children {
// Avoid .foo, _foo, and testdata directory trees.
name := child.Name()
if name == "" || name[0] == '.' || name[0] == '_' || name == "testdata" {
continue
}
if strings.HasSuffix(name, ".go") {
hasGo = true
}
if child.IsDir() {
wg.Add(1)
go func(root, name string) {
defer wg.Done()
loadPkg(wg, root, name)
}(root, filepath.Join(importpath, name))
}
}
if hasGo {
shortName := importPathToName(importpath)
pkgIndex.Lock()
pkgIndex.m[shortName] = append(pkgIndex.m[shortName], pkg{
importpath: importpath,
dir: dir,
})
pkgIndex.Unlock()
}
}
// loadExports returns a list exports for a package.
var loadExports = loadExportsGoPath
func loadExportsGoPath(dir string) map[string]bool {
exports := make(map[string]bool)
buildPkg, err := build.ImportDir(dir, 0)
if err != nil {
if strings.Contains(err.Error(), "no buildable Go source files in") {
return nil
}
fmt.Fprintf(os.Stderr, "could not import %q: %v\n", dir, err)
return nil
}
fset := token.NewFileSet()
for _, files := range [...][]string{buildPkg.GoFiles, buildPkg.CgoFiles} {
for _, file := range files {
f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)
if err != nil {
fmt.Fprintf(os.Stderr, "could not parse %q: %v\n", file, err)
continue
}
for name := range f.Scope.Objects {
if ast.IsExported(name) {
exports[name] = true
}
}
}
}
return exports
}
// findImport searches for a package with the given symbols.
// If no package is found, findImport returns "".
// Declared as a variable rather than a function so goimports can be easily
// extended by adding a file with an init function.
var findImport = findImportGoPath
func findImportGoPath(pkgName string, symbols map[string]bool) (string, bool, error) {
// Fast path for the standard library.
// In the common case we hopefully never have to scan the GOPATH, which can
// be slow with moving disks.
if pkg, rename, ok := findImportStdlib(pkgName, symbols); ok {
return pkg, rename, nil
}
// TODO(sameer): look at the import lines for other Go files in the
// local directory, since the user is likely to import the same packages
// in the current Go file. Return rename=true when the other Go files
// use a renamed package that's also used in the current file.
pkgIndexOnce.Do(loadPkgIndex)
// Collect exports for packages with matching names.
var wg sync.WaitGroup
var pkgsMu sync.Mutex // guards pkgs
// full importpath => exported symbol => True
// e.g. "net/http" => "Client" => True
pkgs := make(map[string]map[string]bool)
pkgIndex.Lock()
for _, pkg := range pkgIndex.m[pkgName] {
wg.Add(1)
go func(importpath, dir string) {
defer wg.Done()
exports := loadExports(dir)
if exports != nil {
pkgsMu.Lock()
pkgs[importpath] = exports
pkgsMu.Unlock()
}
}(pkg.importpath, pkg.dir)
}
pkgIndex.Unlock()
wg.Wait()
// Filter out packages missing required exported symbols.
for symbol := range symbols {
for importpath, exports := range pkgs {
if !exports[symbol] {
delete(pkgs, importpath)
}
}
}
if len(pkgs) == 0 {
return "", false, nil
}
// If there are multiple candidate packages, the shortest one wins.
// This is a heuristic to prefer the standard library (e.g. "bytes")
// over e.g. "github.com/foo/bar/bytes".
shortest := ""
for importPath := range pkgs {
if shortest == "" || len(importPath) < len(shortest) {
shortest = importPath
}
}
return shortest, false, nil
}
type visitFn func(node ast.Node) ast.Visitor
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
return fn(node)
}
func findImportStdlib(shortPkg string, symbols map[string]bool) (importPath string, rename, ok bool) {
for symbol := range symbols {
path := stdlib[shortPkg+"."+symbol]
if path == "" {
return "", false, false
}
if importPath != "" && importPath != path {
// Ambiguous. Symbols pointed to different things.
return "", false, false
}
importPath = path
}
return importPath, false, importPath != ""
}

View File

@ -0,0 +1,862 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import (
"flag"
"go/build"
"io/ioutil"
"os"
"path/filepath"
"sync"
"testing"
)
var only = flag.String("only", "", "If non-empty, the fix test to run")
var tests = []struct {
name string
in, out string
}{
// Adding an import to an existing parenthesized import
{
name: "factored_imports_add",
in: `package foo
import (
"fmt"
)
func bar() {
var b bytes.Buffer
fmt.Println(b.String())
}
`,
out: `package foo
import (
"bytes"
"fmt"
)
func bar() {
var b bytes.Buffer
fmt.Println(b.String())
}
`,
},
// Adding an import to an existing parenthesized import,
// verifying it goes into the first section.
{
name: "factored_imports_add_first_sec",
in: `package foo
import (
"fmt"
"appengine"
)
func bar() {
var b bytes.Buffer
_ = appengine.IsDevServer
fmt.Println(b.String())
}
`,
out: `package foo
import (
"bytes"
"fmt"
"appengine"
)
func bar() {
var b bytes.Buffer
_ = appengine.IsDevServer
fmt.Println(b.String())
}
`,
},
// Adding an import to an existing parenthesized import,
// verifying it goes into the first section. (test 2)
{
name: "factored_imports_add_first_sec_2",
in: `package foo
import (
"fmt"
"appengine"
)
func bar() {
_ = math.NaN
_ = fmt.Sprintf
_ = appengine.IsDevServer
}
`,
out: `package foo
import (
"fmt"
"math"
"appengine"
)
func bar() {
_ = math.NaN
_ = fmt.Sprintf
_ = appengine.IsDevServer
}
`,
},
// Adding a new import line, without parens
{
name: "add_import_section",
in: `package foo
func bar() {
var b bytes.Buffer
}
`,
out: `package foo
import "bytes"
func bar() {
var b bytes.Buffer
}
`,
},
// Adding two new imports, which should make a parenthesized import decl.
{
name: "add_import_paren_section",
in: `package foo
func bar() {
_, _ := bytes.Buffer, zip.NewReader
}
`,
out: `package foo
import (
"archive/zip"
"bytes"
)
func bar() {
_, _ := bytes.Buffer, zip.NewReader
}
`,
},
// Make sure we don't add things twice
{
name: "no_double_add",
in: `package foo
func bar() {
_, _ := bytes.Buffer, bytes.NewReader
}
`,
out: `package foo
import "bytes"
func bar() {
_, _ := bytes.Buffer, bytes.NewReader
}
`,
},
// Remove unused imports, 1 of a factored block
{
name: "remove_unused_1_of_2",
in: `package foo
import (
"bytes"
"fmt"
)
func bar() {
_, _ := bytes.Buffer, bytes.NewReader
}
`,
out: `package foo
import "bytes"
func bar() {
_, _ := bytes.Buffer, bytes.NewReader
}
`,
},
// Remove unused imports, 2 of 2
{
name: "remove_unused_2_of_2",
in: `package foo
import (
"bytes"
"fmt"
)
func bar() {
}
`,
out: `package foo
func bar() {
}
`,
},
// Remove unused imports, 1 of 1
{
name: "remove_unused_1_of_1",
in: `package foo
import "fmt"
func bar() {
}
`,
out: `package foo
func bar() {
}
`,
},
// Don't remove empty imports.
{
name: "dont_remove_empty_imports",
in: `package foo
import (
_ "image/png"
_ "image/jpeg"
)
`,
out: `package foo
import (
_ "image/jpeg"
_ "image/png"
)
`,
},
// Don't remove dot imports.
{
name: "dont_remove_dot_imports",
in: `package foo
import (
. "foo"
. "bar"
)
`,
out: `package foo
import (
. "bar"
. "foo"
)
`,
},
// Skip refs the parser can resolve.
{
name: "skip_resolved_refs",
in: `package foo
func f() {
type t struct{ Println func(string) }
fmt := t{Println: func(string) {}}
fmt.Println("foo")
}
`,
out: `package foo
func f() {
type t struct{ Println func(string) }
fmt := t{Println: func(string) {}}
fmt.Println("foo")
}
`,
},
// Do not add a package we already have a resolution for.
{
name: "skip_template",
in: `package foo
import "html/template"
func f() { t = template.New("sometemplate") }
`,
out: `package foo
import "html/template"
func f() { t = template.New("sometemplate") }
`,
},
// Don't touch cgo
{
name: "cgo",
in: `package foo
/*
#include <foo.h>
*/
import "C"
`,
out: `package foo
/*
#include <foo.h>
*/
import "C"
`,
},
// Put some things in their own section
{
name: "make_sections",
in: `package foo
import (
"os"
)
func foo () {
_, _ = os.Args, fmt.Println
_, _ = appengine.FooSomething, user.Current
}
`,
out: `package foo
import (
"fmt"
"os"
"appengine"
"appengine/user"
)
func foo() {
_, _ = os.Args, fmt.Println
_, _ = appengine.FooSomething, user.Current
}
`,
},
// Delete existing empty import block
{
name: "delete_empty_import_block",
in: `package foo
import ()
`,
out: `package foo
`,
},
// Use existing empty import block
{
name: "use_empty_import_block",
in: `package foo
import ()
func f() {
_ = fmt.Println
}
`,
out: `package foo
import "fmt"
func f() {
_ = fmt.Println
}
`,
},
// Blank line before adding new section.
{
name: "blank_line_before_new_group",
in: `package foo
import (
"fmt"
"net"
)
func f() {
_ = net.Dial
_ = fmt.Printf
_ = snappy.Foo
}
`,
out: `package foo
import (
"fmt"
"net"
"code.google.com/p/snappy-go/snappy"
)
func f() {
_ = net.Dial
_ = fmt.Printf
_ = snappy.Foo
}
`,
},
// Blank line between standard library and third-party stuff.
{
name: "blank_line_separating_std_and_third_party",
in: `package foo
import (
"code.google.com/p/snappy-go/snappy"
"fmt"
"net"
)
func f() {
_ = net.Dial
_ = fmt.Printf
_ = snappy.Foo
}
`,
out: `package foo
import (
"fmt"
"net"
"code.google.com/p/snappy-go/snappy"
)
func f() {
_ = net.Dial
_ = fmt.Printf
_ = snappy.Foo
}
`,
},
// golang.org/issue/6884
{
name: "issue 6884",
in: `package main
// A comment
func main() {
fmt.Println("Hello, world")
}
`,
out: `package main
import "fmt"
// A comment
func main() {
fmt.Println("Hello, world")
}
`,
},
// golang.org/issue/7132
{
name: "issue 7132",
in: `package main
import (
"fmt"
"gu"
"github.com/foo/bar"
)
var (
a = bar.a
b = gu.a
c = fmt.Printf
)
`,
out: `package main
import (
"fmt"
"gu"
"github.com/foo/bar"
)
var (
a = bar.a
b = gu.a
c = fmt.Printf
)
`,
},
{
name: "renamed package",
in: `package main
var _ = str.HasPrefix
`,
out: `package main
import str "strings"
var _ = str.HasPrefix
`,
},
{
name: "fragment with main",
in: `func main(){fmt.Println("Hello, world")}`,
out: `package main
import "fmt"
func main() { fmt.Println("Hello, world") }
`,
},
{
name: "fragment without main",
in: `func notmain(){fmt.Println("Hello, world")}`,
out: `import "fmt"
func notmain() { fmt.Println("Hello, world") }`,
},
// Remove first import within in a 2nd/3rd/4th/etc. section.
// golang.org/issue/7679
{
name: "issue 7679",
in: `package main
import (
"fmt"
"github.com/foo/bar"
"github.com/foo/qux"
)
func main() {
var _ = fmt.Println
//var _ = bar.A
var _ = qux.B
}
`,
out: `package main
import (
"fmt"
"github.com/foo/qux"
)
func main() {
var _ = fmt.Println
//var _ = bar.A
var _ = qux.B
}
`,
},
// Blank line can be added before all types of import declarations.
// golang.org/issue/7866
{
name: "issue 7866",
in: `package main
import (
"fmt"
renamed_bar "github.com/foo/bar"
. "github.com/foo/baz"
"io"
_ "github.com/foo/qux"
"strings"
)
func main() {
_, _, _, _, _ = fmt.Errorf, io.Copy, strings.Contains, renamed_bar.A, B
}
`,
out: `package main
import (
"fmt"
renamed_bar "github.com/foo/bar"
"io"
. "github.com/foo/baz"
"strings"
_ "github.com/foo/qux"
)
func main() {
_, _, _, _, _ = fmt.Errorf, io.Copy, strings.Contains, renamed_bar.A, B
}
`,
},
// Non-idempotent comment formatting
// golang.org/issue/8035
{
name: "issue 8035",
in: `package main
import (
"fmt" // A
"go/ast" // B
_ "launchpad.net/gocheck" // C
)
func main() { _, _ = fmt.Print, ast.Walk }
`,
out: `package main
import (
"fmt" // A
"go/ast" // B
_ "launchpad.net/gocheck" // C
)
func main() { _, _ = fmt.Print, ast.Walk }
`,
},
// Failure to delete all duplicate imports
// golang.org/issue/8459
{
name: "issue 8459",
in: `package main
import (
"fmt"
"log"
"log"
"math"
)
func main() { fmt.Println("pi:", math.Pi) }
`,
out: `package main
import (
"fmt"
"math"
)
func main() { fmt.Println("pi:", math.Pi) }
`,
},
// Too aggressive prefix matching
// golang.org/issue/9961
{
name: "issue 9961",
in: `package p
import (
"zip"
"rsc.io/p"
)
var (
_ = fmt.Print
_ = zip.Store
_ p.P
_ = regexp.Compile
)
`,
out: `package p
import (
"fmt"
"regexp"
"zip"
"rsc.io/p"
)
var (
_ = fmt.Print
_ = zip.Store
_ p.P
_ = regexp.Compile
)
`,
},
// Unused named import is mistaken for unnamed import
// golang.org/issue/8149
{
name: "issue 8149",
in: `package main
import foo "fmt"
func main() { fmt.Println() }
`,
out: `package main
import "fmt"
func main() { fmt.Println() }
`,
},
}
func TestFixImports(t *testing.T) {
simplePkgs := map[string]string{
"appengine": "appengine",
"bytes": "bytes",
"fmt": "fmt",
"math": "math",
"os": "os",
"p": "rsc.io/p",
"regexp": "regexp",
"snappy": "code.google.com/p/snappy-go/snappy",
"str": "strings",
"user": "appengine/user",
"zip": "archive/zip",
}
findImport = func(pkgName string, symbols map[string]bool) (string, bool, error) {
return simplePkgs[pkgName], pkgName == "str", nil
}
options := &Options{
TabWidth: 8,
TabIndent: true,
Comments: true,
Fragment: true,
}
for _, tt := range tests {
if *only != "" && tt.name != *only {
continue
}
buf, err := Process(tt.name+".go", []byte(tt.in), options)
if err != nil {
t.Errorf("error on %q: %v", tt.name, err)
continue
}
if got := string(buf); got != tt.out {
t.Errorf("results diff on %q\nGOT:\n%s\nWANT:\n%s\n", tt.name, got, tt.out)
}
}
}
func TestFindImportGoPath(t *testing.T) {
goroot, err := ioutil.TempDir("", "goimports-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(goroot)
pkgIndexOnce = sync.Once{}
origStdlib := stdlib
defer func() {
stdlib = origStdlib
}()
stdlib = nil
// Test against imaginary bits/bytes package in std lib
bytesDir := filepath.Join(goroot, "src", "pkg", "bits", "bytes")
for _, tag := range build.Default.ReleaseTags {
// Go 1.4 rearranged the GOROOT tree to remove the "pkg" path component.
if tag == "go1.4" {
bytesDir = filepath.Join(goroot, "src", "bits", "bytes")
}
}
if err := os.MkdirAll(bytesDir, 0755); err != nil {
t.Fatal(err)
}
bytesSrcPath := filepath.Join(bytesDir, "bytes.go")
bytesPkgPath := "bits/bytes"
bytesSrc := []byte(`package bytes
type Buffer2 struct {}
`)
if err := ioutil.WriteFile(bytesSrcPath, bytesSrc, 0775); err != nil {
t.Fatal(err)
}
oldGOROOT := build.Default.GOROOT
oldGOPATH := build.Default.GOPATH
build.Default.GOROOT = goroot
build.Default.GOPATH = ""
defer func() {
build.Default.GOROOT = oldGOROOT
build.Default.GOPATH = oldGOPATH
}()
got, rename, err := findImportGoPath("bytes", map[string]bool{"Buffer2": true})
if err != nil {
t.Fatal(err)
}
if got != bytesPkgPath || rename {
t.Errorf(`findImportGoPath("bytes", Buffer2 ...)=%q, %t, want "%s", false`, got, rename, bytesPkgPath)
}
got, rename, err = findImportGoPath("bytes", map[string]bool{"Missing": true})
if err != nil {
t.Fatal(err)
}
if got != "" || rename {
t.Errorf(`findImportGoPath("bytes", Missing ...)=%q, %t, want "", false`, got, rename)
}
}
func TestFindImportStdlib(t *testing.T) {
tests := []struct {
pkg string
symbols []string
want string
}{
{"http", []string{"Get"}, "net/http"},
{"http", []string{"Get", "Post"}, "net/http"},
{"http", []string{"Get", "Foo"}, ""},
{"bytes", []string{"Buffer"}, "bytes"},
{"ioutil", []string{"Discard"}, "io/ioutil"},
}
for _, tt := range tests {
got, rename, ok := findImportStdlib(tt.pkg, strSet(tt.symbols))
if (got != "") != ok {
t.Error("findImportStdlib return value inconsistent")
}
if got != tt.want || rename {
t.Errorf("findImportStdlib(%q, %q) = %q, %t; want %q, false", tt.pkg, tt.symbols, got, rename, tt.want)
}
}
}
func strSet(ss []string) map[string]bool {
m := make(map[string]bool)
for _, s := range ss {
m[s] = true
}
return m
}

View File

@ -0,0 +1,279 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package imports implements a Go pretty-printer (like package "go/format")
// that also adds or removes import statements as necessary.
package imports
import (
"bufio"
"bytes"
"fmt"
"go/ast"
"go/format"
"go/parser"
"go/printer"
"go/token"
"io"
"regexp"
"strconv"
"strings"
"golang.org/x/tools/go/ast/astutil"
)
// Options specifies options for processing files.
type Options struct {
Fragment bool // Accept fragment of a source file (no package statement)
AllErrors bool // Report all errors (not just the first 10 on different lines)
Comments bool // Print comments (true if nil *Options provided)
TabIndent bool // Use tabs for indent (true if nil *Options provided)
TabWidth int // Tab width (8 if nil *Options provided)
}
// Process formats and adjusts imports for the provided file.
// If opt is nil the defaults are used.
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
if opt == nil {
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
}
fileSet := token.NewFileSet()
file, adjust, err := parse(fileSet, filename, src, opt)
if err != nil {
return nil, err
}
_, err = fixImports(fileSet, file)
if err != nil {
return nil, err
}
sortImports(fileSet, file)
imps := astutil.Imports(fileSet, file)
var spacesBefore []string // import paths we need spaces before
for _, impSection := range imps {
// Within each block of contiguous imports, see if any
// import lines are in different group numbers. If so,
// we'll need to put a space between them so it's
// compatible with gofmt.
lastGroup := -1
for _, importSpec := range impSection {
importPath, _ := strconv.Unquote(importSpec.Path.Value)
groupNum := importGroup(importPath)
if groupNum != lastGroup && lastGroup != -1 {
spacesBefore = append(spacesBefore, importPath)
}
lastGroup = groupNum
}
}
printerMode := printer.UseSpaces
if opt.TabIndent {
printerMode |= printer.TabIndent
}
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
var buf bytes.Buffer
err = printConfig.Fprint(&buf, fileSet, file)
if err != nil {
return nil, err
}
out := buf.Bytes()
if adjust != nil {
out = adjust(src, out)
}
if len(spacesBefore) > 0 {
out = addImportSpaces(bytes.NewReader(out), spacesBefore)
}
out, err = format.Source(out)
if err != nil {
return nil, err
}
return out, nil
}
// parse parses src, which was read from filename,
// as a Go source file or statement list.
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
parserMode := parser.Mode(0)
if opt.Comments {
parserMode |= parser.ParseComments
}
if opt.AllErrors {
parserMode |= parser.AllErrors
}
// Try as whole source file.
file, err := parser.ParseFile(fset, filename, src, parserMode)
if err == nil {
return file, nil, nil
}
// If the error is that the source file didn't begin with a
// package line and we accept fragmented input, fall through to
// try as a source fragment. Stop and return on any other error.
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
return nil, nil, err
}
// If this is a declaration list, make it a source file
// by inserting a package clause.
// Insert using a ;, not a newline, so that the line numbers
// in psrc match the ones in src.
psrc := append([]byte("package main;"), src...)
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
if err == nil {
// If a main function exists, we will assume this is a main
// package and leave the file.
if containsMainFunc(file) {
return file, nil, nil
}
adjust := func(orig, src []byte) []byte {
// Remove the package clause.
// Gofmt has turned the ; into a \n.
src = src[len("package main\n"):]
return matchSpace(orig, src)
}
return file, adjust, nil
}
// If the error is that the source file didn't begin with a
// declaration, fall through to try as a statement list.
// Stop and return on any other error.
if !strings.Contains(err.Error(), "expected declaration") {
return nil, nil, err
}
// If this is a statement list, make it a source file
// by inserting a package clause and turning the list
// into a function body. This handles expressions too.
// Insert using a ;, not a newline, so that the line numbers
// in fsrc match the ones in src.
fsrc := append(append([]byte("package p; func _() {"), src...), '}')
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
if err == nil {
adjust := func(orig, src []byte) []byte {
// Remove the wrapping.
// Gofmt has turned the ; into a \n\n.
src = src[len("package p\n\nfunc _() {"):]
src = src[:len(src)-len("}\n")]
// Gofmt has also indented the function body one level.
// Remove that indent.
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
return matchSpace(orig, src)
}
return file, adjust, nil
}
// Failed, and out of options.
return nil, nil, err
}
// containsMainFunc checks if a file contains a function declaration with the
// function signature 'func main()'
func containsMainFunc(file *ast.File) bool {
for _, decl := range file.Decls {
if f, ok := decl.(*ast.FuncDecl); ok {
if f.Name.Name != "main" {
continue
}
if len(f.Type.Params.List) != 0 {
continue
}
if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
continue
}
return true
}
}
return false
}
func cutSpace(b []byte) (before, middle, after []byte) {
i := 0
for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
i++
}
j := len(b)
for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
j--
}
if i <= j {
return b[:i], b[i:j], b[j:]
}
return nil, nil, b[j:]
}
// matchSpace reformats src to use the same space context as orig.
// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
// 2) matchSpace copies the indentation of the first non-blank line in orig
// to every non-blank line in src.
// 3) matchSpace copies the trailing space from orig and uses it in place
// of src's trailing space.
func matchSpace(orig []byte, src []byte) []byte {
before, _, after := cutSpace(orig)
i := bytes.LastIndex(before, []byte{'\n'})
before, indent := before[:i+1], before[i+1:]
_, src, _ = cutSpace(src)
var b bytes.Buffer
b.Write(before)
for len(src) > 0 {
line := src
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, src = line[:i+1], line[i+1:]
} else {
src = nil
}
if len(line) > 0 && line[0] != '\n' { // not blank
b.Write(indent)
}
b.Write(line)
}
b.Write(after)
return b.Bytes()
}
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
func addImportSpaces(r io.Reader, breaks []string) []byte {
var out bytes.Buffer
sc := bufio.NewScanner(r)
inImports := false
done := false
for sc.Scan() {
s := sc.Text()
if !inImports && !done && strings.HasPrefix(s, "import") {
inImports = true
}
if inImports && (strings.HasPrefix(s, "var") ||
strings.HasPrefix(s, "func") ||
strings.HasPrefix(s, "const") ||
strings.HasPrefix(s, "type")) {
done = true
inImports = false
}
if inImports && len(breaks) > 0 {
if m := impLine.FindStringSubmatch(s); m != nil {
if m[1] == string(breaks[0]) {
out.WriteByte('\n')
breaks = breaks[1:]
}
}
}
fmt.Fprintln(&out, s)
}
return out.Bytes()
}

View File

@ -0,0 +1,173 @@
// +build ignore
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Command mkindex creates the file "pkgindex.go" containing an index of the Go
// standard library. The file is intended to be built as part of the imports
// package, so that the package may be used in environments where a GOROOT is
// not available (such as App Engine).
package main
import (
"bytes"
"fmt"
"go/ast"
"go/build"
"go/format"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"strings"
)
var (
pkgIndex = make(map[string][]pkg)
exports = make(map[string]map[string]bool)
)
func main() {
// Don't use GOPATH.
ctx := build.Default
ctx.GOPATH = ""
// Populate pkgIndex global from GOROOT.
for _, path := range ctx.SrcDirs() {
f, err := os.Open(path)
if err != nil {
log.Print(err)
continue
}
children, err := f.Readdir(-1)
f.Close()
if err != nil {
log.Print(err)
continue
}
for _, child := range children {
if child.IsDir() {
loadPkg(path, child.Name())
}
}
}
// Populate exports global.
for _, ps := range pkgIndex {
for _, p := range ps {
e := loadExports(p.dir)
if e != nil {
exports[p.dir] = e
}
}
}
// Construct source file.
var buf bytes.Buffer
fmt.Fprint(&buf, pkgIndexHead)
fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex)
fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports)
src := buf.Bytes()
// Replace main.pkg type name with pkg.
src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1)
// Replace actual GOROOT with "/go".
src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1)
// Add some line wrapping.
src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1)
src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1)
var err error
src, err = format.Source(src)
if err != nil {
log.Fatal(err)
}
// Write out source file.
err = ioutil.WriteFile("pkgindex.go", src, 0644)
if err != nil {
log.Fatal(err)
}
}
const pkgIndexHead = `package imports
func init() {
pkgIndexOnce.Do(func() {
pkgIndex.m = pkgIndexMaster
})
loadExports = func(dir string) map[string]bool {
return exportsMaster[dir]
}
}
`
type pkg struct {
importpath string // full pkg import path, e.g. "net/http"
dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt"
}
var fset = token.NewFileSet()
func loadPkg(root, importpath string) {
shortName := path.Base(importpath)
if shortName == "testdata" {
return
}
dir := filepath.Join(root, importpath)
pkgIndex[shortName] = append(pkgIndex[shortName], pkg{
importpath: importpath,
dir: dir,
})
pkgDir, err := os.Open(dir)
if err != nil {
return
}
children, err := pkgDir.Readdir(-1)
pkgDir.Close()
if err != nil {
return
}
for _, child := range children {
name := child.Name()
if name == "" {
continue
}
if c := name[0]; c == '.' || ('0' <= c && c <= '9') {
continue
}
if child.IsDir() {
loadPkg(root, filepath.Join(importpath, name))
}
}
}
func loadExports(dir string) map[string]bool {
exports := make(map[string]bool)
buildPkg, err := build.ImportDir(dir, 0)
if err != nil {
if strings.Contains(err.Error(), "no buildable Go source files in") {
return nil
}
log.Printf("could not import %q: %v", dir, err)
return nil
}
for _, file := range buildPkg.GoFiles {
f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)
if err != nil {
log.Printf("could not parse %q: %v", file, err)
continue
}
for name := range f.Scope.Objects {
if ast.IsExported(name) {
exports[name] = true
}
}
}
return exports
}

View File

@ -0,0 +1,90 @@
// +build ignore
// mkstdlib generates the zstdlib.go file, containing the Go standard
// library API symbols. It's baked into the binary to avoid scanning
// GOPATH in the common case.
package main
import (
"bufio"
"bytes"
"fmt"
"go/format"
"io"
"log"
"os"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
)
func mustOpen(name string) io.Reader {
f, err := os.Open(name)
if err != nil {
log.Fatal(err)
}
return f
}
func api(base string) string {
return filepath.Join(os.Getenv("GOROOT"), "api", base)
}
var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`)
func main() {
var buf bytes.Buffer
outf := func(format string, args ...interface{}) {
fmt.Fprintf(&buf, format, args...)
}
outf("// AUTO-GENERATED BY mkstdlib.go\n\n")
outf("package imports\n")
outf("var stdlib = map[string]string{\n")
f := io.MultiReader(
mustOpen(api("go1.txt")),
mustOpen(api("go1.1.txt")),
mustOpen(api("go1.2.txt")),
)
sc := bufio.NewScanner(f)
fullImport := map[string]string{} // "zip.NewReader" => "archive/zip"
ambiguous := map[string]bool{}
var keys []string
for sc.Scan() {
l := sc.Text()
has := func(v string) bool { return strings.Contains(l, v) }
if has("struct, ") || has("interface, ") || has(", method (") {
continue
}
if m := sym.FindStringSubmatch(l); m != nil {
full := m[1]
key := path.Base(full) + "." + m[2]
if exist, ok := fullImport[key]; ok {
if exist != full {
ambiguous[key] = true
}
} else {
fullImport[key] = full
keys = append(keys, key)
}
}
}
if err := sc.Err(); err != nil {
log.Fatal(err)
}
sort.Strings(keys)
for _, key := range keys {
if ambiguous[key] {
outf("\t// %q is ambiguous\n", key)
} else {
outf("\t%q: %q,\n", key, fullImport[key])
}
}
outf("}\n")
fmtbuf, err := format.Source(buf.Bytes())
if err != nil {
log.Fatal(err)
}
os.Stdout.Write(fmtbuf)
}

View File

@ -0,0 +1,214 @@
// +build go1.2
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hacked up copy of go/ast/import.go
package imports
import (
"go/ast"
"go/token"
"sort"
"strconv"
)
// sortImports sorts runs of consecutive import lines in import blocks in f.
// It also removes duplicate imports when it is possible to do so without data loss.
func sortImports(fset *token.FileSet, f *ast.File) {
for i, d := range f.Decls {
d, ok := d.(*ast.GenDecl)
if !ok || d.Tok != token.IMPORT {
// Not an import declaration, so we're done.
// Imports are always first.
break
}
if len(d.Specs) == 0 {
// Empty import block, remove it.
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
}
if !d.Lparen.IsValid() {
// Not a block: sorted by default.
continue
}
// Identify and sort runs of specs on successive lines.
i := 0
specs := d.Specs[:0]
for j, s := range d.Specs {
if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
// j begins a new run. End this one.
specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
i = j
}
}
specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
d.Specs = specs
// Deduping can leave a blank line before the rparen; clean that up.
if len(d.Specs) > 0 {
lastSpec := d.Specs[len(d.Specs)-1]
lastLine := fset.Position(lastSpec.Pos()).Line
if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
fset.File(d.Rparen).MergeLine(rParenLine - 1)
}
}
}
}
func importPath(s ast.Spec) string {
t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
if err == nil {
return t
}
return ""
}
func importName(s ast.Spec) string {
n := s.(*ast.ImportSpec).Name
if n == nil {
return ""
}
return n.Name
}
func importComment(s ast.Spec) string {
c := s.(*ast.ImportSpec).Comment
if c == nil {
return ""
}
return c.Text()
}
// collapse indicates whether prev may be removed, leaving only next.
func collapse(prev, next ast.Spec) bool {
if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
return false
}
return prev.(*ast.ImportSpec).Comment == nil
}
type posSpan struct {
Start token.Pos
End token.Pos
}
func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
// Can't short-circuit here even if specs are already sorted,
// since they might yet need deduplication.
// A lone import, however, may be safely ignored.
if len(specs) <= 1 {
return specs
}
// Record positions for specs.
pos := make([]posSpan, len(specs))
for i, s := range specs {
pos[i] = posSpan{s.Pos(), s.End()}
}
// Identify comments in this range.
// Any comment from pos[0].Start to the final line counts.
lastLine := fset.Position(pos[len(pos)-1].End).Line
cstart := len(f.Comments)
cend := len(f.Comments)
for i, g := range f.Comments {
if g.Pos() < pos[0].Start {
continue
}
if i < cstart {
cstart = i
}
if fset.Position(g.End()).Line > lastLine {
cend = i
break
}
}
comments := f.Comments[cstart:cend]
// Assign each comment to the import spec preceding it.
importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
specIndex := 0
for _, g := range comments {
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
specIndex++
}
s := specs[specIndex].(*ast.ImportSpec)
importComment[s] = append(importComment[s], g)
}
// Sort the import specs by import path.
// Remove duplicates, when possible without data loss.
// Reassign the import paths to have the same position sequence.
// Reassign each comment to abut the end of its spec.
// Sort the comments by new position.
sort.Sort(byImportSpec(specs))
// Dedup. Thanks to our sorting, we can just consider
// adjacent pairs of imports.
deduped := specs[:0]
for i, s := range specs {
if i == len(specs)-1 || !collapse(s, specs[i+1]) {
deduped = append(deduped, s)
} else {
p := s.Pos()
fset.File(p).MergeLine(fset.Position(p).Line)
}
}
specs = deduped
// Fix up comment positions
for i, s := range specs {
s := s.(*ast.ImportSpec)
if s.Name != nil {
s.Name.NamePos = pos[i].Start
}
s.Path.ValuePos = pos[i].Start
s.EndPos = pos[i].End
for _, g := range importComment[s] {
for _, c := range g.List {
c.Slash = pos[i].End
}
}
}
sort.Sort(byCommentPos(comments))
return specs
}
type byImportSpec []ast.Spec // slice of *ast.ImportSpec
func (x byImportSpec) Len() int { return len(x) }
func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byImportSpec) Less(i, j int) bool {
ipath := importPath(x[i])
jpath := importPath(x[j])
igroup := importGroup(ipath)
jgroup := importGroup(jpath)
if igroup != jgroup {
return igroup < jgroup
}
if ipath != jpath {
return ipath < jpath
}
iname := importName(x[i])
jname := importName(x[j])
if iname != jname {
return iname < jname
}
return importComment(x[i]) < importComment(x[j])
}
type byCommentPos []*ast.CommentGroup
func (x byCommentPos) Len() int { return len(x) }
func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }

View File

@ -0,0 +1,14 @@
// +build !go1.2
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package imports
import "go/ast"
// Go 1.1 users don't get fancy package grouping.
// But this is still gofmt-compliant:
var sortImports = ast.SortImports

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,7 @@ limitations under the License.
package main
import (
"bytes"
"fmt"
"io"
"os"
@ -33,6 +34,7 @@ import (
"github.com/golang/glog"
flag "github.com/spf13/pflag"
"golang.org/x/tools/imports"
)
const pkgBase = "k8s.io/kubernetes/pkg"
@ -58,9 +60,16 @@ func main() {
funcOut = file
}
data := new(bytes.Buffer)
group, version := path.Split(*groupVersion)
group = strings.TrimRight(group, "/")
_, err := data.WriteString(fmt.Sprintf("package %v\n", version))
if err != nil {
glog.Fatalf("error writing package line: %v", err)
}
versionPath := path.Join(pkgBase, group, version)
generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), versionPath)
apiShort := generator.AddImport(path.Join(pkgBase, "api"))
@ -76,13 +85,21 @@ func main() {
}
}
generator.RepackImports(util.NewStringSet())
if err := generator.WriteImports(funcOut); err != nil {
if err := generator.WriteImports(data); err != nil {
glog.Fatalf("error while writing imports: %v", err)
}
if err := generator.WriteConversionFunctions(funcOut); err != nil {
if err := generator.WriteConversionFunctions(data); err != nil {
glog.Fatalf("Error while writing conversion functions: %v", err)
}
if err := generator.RegisterConversionFunctions(funcOut, fmt.Sprintf("%s.Scheme", apiShort)); err != nil {
if err := generator.RegisterConversionFunctions(data, fmt.Sprintf("%s.Scheme", apiShort)); err != nil {
glog.Fatalf("Error while writing conversion functions: %v", err)
}
b, err := imports.Process("", data.Bytes(), nil)
if err != nil {
glog.Fatalf("error while update imports: %v", err)
}
if _, err := funcOut.Write(b); err != nil {
glog.Fatalf("error while writing out the resulting file: %v", err)
}
}

View File

@ -17,6 +17,8 @@ limitations under the License.
package main
import (
"bytes"
"fmt"
"io"
"os"
"path"
@ -32,6 +34,7 @@ import (
"github.com/golang/glog"
flag "github.com/spf13/pflag"
"golang.org/x/tools/imports"
)
const pkgBase = "k8s.io/kubernetes/pkg"
@ -58,12 +61,24 @@ func main() {
funcOut = file
}
data := new(bytes.Buffer)
group, version := path.Split(*groupVersion)
group = strings.TrimRight(group, "/")
registerTo := "api.Scheme"
if *groupVersion == "api/" {
registerTo = "Scheme"
}
pkgname := group
if len(version) != 0 {
pkgname = version
}
_, err := data.WriteString(fmt.Sprintf("package %s\n", pkgname))
if err != nil {
glog.Fatalf("error writing package line: %v", err)
}
versionPath := path.Join(pkgBase, group, version)
generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), versionPath, util.NewStringSet("k8s.io/kubernetes"))
generator.AddImport(path.Join(pkgBase, "api"))
@ -86,13 +101,20 @@ func main() {
}
}
generator.RepackImports()
if err := generator.WriteImports(funcOut); err != nil {
if err := generator.WriteImports(data); err != nil {
glog.Fatalf("error while writing imports: %v", err)
}
if err := generator.WriteDeepCopyFunctions(funcOut); err != nil {
if err := generator.WriteDeepCopyFunctions(data); err != nil {
glog.Fatalf("error while writing deep copy functions: %v", err)
}
if err := generator.RegisterDeepCopyFunctions(funcOut, registerTo); err != nil {
if err := generator.RegisterDeepCopyFunctions(data, registerTo); err != nil {
glog.Fatalf("error while registering deep copy functions: %v", err)
}
b, err := imports.Process("", data.Bytes(), nil)
if err != nil {
glog.Fatalf("error while update imports: %v", err)
}
if _, err := funcOut.Write(b); err != nil {
glog.Fatalf("error while writing out the resulting file: %v", err)
}
}

View File

@ -33,26 +33,15 @@ function generate_version() {
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > "$TMPFILE"
cat >> "$TMPFILE" <<EOF
package ${version##*/}
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY \$KUBEROOT/hack/update-generated-conversions.sh
// AUTO-GENERATED FUNCTIONS START HERE
EOF
"${genconversion}" -v "${version}" -f - >> "$TMPFILE"
cat >> "$TMPFILE" <<EOF
// AUTO-GENERATED FUNCTIONS END HERE
EOF
goimports -w "$TMPFILE"
mv "$TMPFILE" "pkg/${version}/conversion_generated.go"
}
if ! which goimports >/dev/null; then
echo "goimports not in path, run go get golang.org/x/tools/cmd/goimports"
exit 1
fi
DEFAULT_VERSIONS="api/v1 expapi/v1"
VERSIONS=${VERSIONS:-$DEFAULT_VERSIONS}
for ver in $VERSIONS; do

View File

@ -36,27 +36,14 @@ function generate_version() {
echo "Generating for ${version}"
# version is group/version, so use the version number as the package name unless
# this is an internal version, in which case use the group name.
pkgname=${version##*/}
if [[ -z $pkgname ]]; then
pkgname=${version%/*}
fi
sed 's/YEAR/2015/' hack/boilerplate/boilerplate.go.txt > $TMPFILE
cat >> $TMPFILE <<EOF
package $pkgname
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY \$KUBEROOT/hack/update-generated-deep-copies.sh.
// AUTO-GENERATED FUNCTIONS START HERE
EOF
"${gendeepcopy}" -v "${version}" -f - -o "${version}=" >> "$TMPFILE"
cat >> "$TMPFILE" <<EOF
// AUTO-GENERATED FUNCTIONS END HERE
EOF
goimports -w "$TMPFILE"
mv "$TMPFILE" `result_file_name ${version}`
}

View File

@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh.
package api
// AUTO-GENERATED FUNCTIONS START HERE
import (
time "time"
@ -2358,5 +2359,3 @@ func init() {
panic(err)
}
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh
package v1
// AUTO-GENERATED FUNCTIONS START HERE
import (
reflect "reflect"
@ -5105,5 +5106,3 @@ func init() {
panic(err)
}
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh.
package v1
// AUTO-GENERATED FUNCTIONS START HERE
import (
time "time"
@ -2360,5 +2361,3 @@ func init() {
panic(err)
}
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh.
package expapi
// AUTO-GENERATED FUNCTIONS START HERE
import (
time "time"
@ -232,5 +233,3 @@ func init() {
panic(err)
}
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-conversions.sh
package v1
// AUTO-GENERATED FUNCTIONS START HERE
import (
reflect "reflect"
@ -444,5 +445,3 @@ func init() {
panic(err)
}
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh.
package v1
// AUTO-GENERATED FUNCTIONS START HERE
import (
time "time"
@ -233,5 +234,3 @@ func init() {
panic(err)
}
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@ -23,7 +23,6 @@ before_install:
install:
- go get golang.org/x/tools/cmd/cover
- go get golang.org/x/tools/cmd/goimports
- go get github.com/mattn/goveralls
- go get github.com/tools/godep
- ./hack/build-go.sh