1// Package checker defines the implementation of the checker commands.
2// The same code drives the multi-analysis driver, the single-analysis
3// driver that is conventionally provided for convenience along with
4// each analysis package, and the test driver.
5package checker
6
7import (
8	"bytes"
9	"encoding/gob"
10	"encoding/json"
11	"flag"
12	"fmt"
13	"go/token"
14	"go/types"
15	"io/ioutil"
16	"log"
17	"os"
18	"reflect"
19	"runtime"
20	"runtime/pprof"
21	"runtime/trace"
22	"sort"
23	"strings"
24	"sync"
25	"time"
26
27	"golang.org/x/tools/go/analysis"
28	"golang.org/x/tools/go/analysis/internal/unitchecker"
29	"golang.org/x/tools/go/packages"
30)
31
32var (
33	JSON = false
34
35	// Debug is a set of single-letter flags:
36	//
37	//	f	show [f]acts as they are created
38	// 	p	disable [p]arallel execution of analyzers
39	//	s	do additional [s]anity checks on fact types and serialization
40	//	t	show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise)
41	//	v	show [v]erbose logging
42	//
43	Debug = ""
44
45	Context = -1 // if >=0, display offending line plus this many lines of context
46
47	// Log files for optional performance tracing.
48	CPUProfile, MemProfile, Trace string
49)
50
51// RegisterFlags registers command-line flags used the analysis driver.
52func RegisterFlags() {
53	flag.BoolVar(&JSON, "json", JSON, "emit JSON output")
54	flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "lpsv"`)
55	flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`)
56
57	flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file")
58	flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file")
59	flag.StringVar(&Trace, "trace", "", "write trace log to this file")
60}
61
62// Run loads the packages specified by args using go/packages,
63// then applies the specified analyzers to them.
64// Analysis flags must already have been set.
65// It provides most of the logic for the main functions of both the
66// singlechecker and the multi-analysis commands.
67func Run(args []string, analyzers []*analysis.Analyzer) error {
68	if CPUProfile != "" {
69		f, err := os.Create(CPUProfile)
70		if err != nil {
71			log.Fatal(err)
72		}
73		if err := pprof.StartCPUProfile(f); err != nil {
74			log.Fatal(err)
75		}
76		// NB: profile won't be written in case of error.
77		defer pprof.StopCPUProfile()
78	}
79
80	if Trace != "" {
81		f, err := os.Create(Trace)
82		if err != nil {
83			log.Fatal(err)
84		}
85		if err := trace.Start(f); err != nil {
86			log.Fatal(err)
87		}
88		// NB: trace log won't be written in case of error.
89		defer func() {
90			trace.Stop()
91			log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace)
92		}()
93	}
94
95	if MemProfile != "" {
96		f, err := os.Create(MemProfile)
97		if err != nil {
98			log.Fatal(err)
99		}
100		// NB: memprofile won't be written in case of error.
101		defer func() {
102			runtime.GC() // get up-to-date statistics
103			if err := pprof.WriteHeapProfile(f); err != nil {
104				log.Fatalf("Writing memory profile: %v", err)
105			}
106			f.Close()
107		}()
108	}
109
110	// The undocumented protocol used by 'go vet'
111	// is that a vet-like tool must support:
112	//
113	//      -flags          describe flags in JSON
114	//      -V=full         describe executable for build caching
115	//      foo.cfg         perform separate modular analyze on the single
116	//                      unit described by a JSON config file foo.cfg.
117	if len(args) == 1 && strings.HasSuffix(args[0], ".cfg") {
118		unitchecker.Main(args[0], analyzers)
119		panic("unreachable")
120	}
121
122	// Load the packages.
123	if dbg('v') {
124		log.SetPrefix("")
125		log.SetFlags(log.Lmicroseconds) // display timing
126		log.Printf("load %s", args)
127	}
128
129	// Optimization: if the selected analyzers don't produce/consume
130	// facts, we need source only for the initial packages.
131	allSyntax := needFacts(analyzers)
132	initial, err := load(args, allSyntax)
133	if err != nil {
134		return err
135	}
136
137	roots := analyze(initial, analyzers)
138
139	// Print the results.
140	printDiagnostics(roots)
141
142	return nil
143}
144
145// load loads the initial packages.
146func load(patterns []string, allSyntax bool) ([]*packages.Package, error) {
147	mode := packages.LoadSyntax
148	if allSyntax {
149		mode = packages.LoadAllSyntax
150	}
151	conf := packages.Config{
152		Mode:  mode,
153		Tests: true,
154	}
155	initial, err := packages.Load(&conf, patterns...)
156	if err == nil {
157		if n := packages.PrintErrors(initial); n > 1 {
158			err = fmt.Errorf("%d errors during loading", n)
159		} else if n == 1 {
160			err = fmt.Errorf("error during loading")
161		}
162	}
163	return initial, err
164}
165
166// TestAnalyzer applies an analysis to a set of packages (and their
167// dependencies if necessary) and returns the results.
168//
169// Facts about pkg are returned in a map keyed by object; package facts
170// have a nil key.
171//
172// This entry point is used only by analysistest.
173func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult {
174	var results []*TestAnalyzerResult
175	for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) {
176		facts := make(map[types.Object][]analysis.Fact)
177		for key, fact := range act.objectFacts {
178			if key.obj.Pkg() == act.pass.Pkg {
179				facts[key.obj] = append(facts[key.obj], fact)
180			}
181		}
182		for key, fact := range act.packageFacts {
183			if key.pkg == act.pass.Pkg {
184				facts[nil] = append(facts[nil], fact)
185			}
186		}
187
188		results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err})
189	}
190	return results
191}
192
193type TestAnalyzerResult struct {
194	Pass        *analysis.Pass
195	Diagnostics []analysis.Diagnostic
196	Facts       map[types.Object][]analysis.Fact
197	Result      interface{}
198	Err         error
199}
200
201func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action {
202	// Construct the action graph.
203	if dbg('v') {
204		log.Printf("building graph of analysis passes")
205	}
206
207	// Each graph node (action) is one unit of analysis.
208	// Edges express package-to-package (vertical) dependencies,
209	// and analysis-to-analysis (horizontal) dependencies.
210	type key struct {
211		*analysis.Analyzer
212		*packages.Package
213	}
214	actions := make(map[key]*action)
215
216	var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action
217	mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action {
218		k := key{a, pkg}
219		act, ok := actions[k]
220		if !ok {
221			act = &action{a: a, pkg: pkg}
222
223			// Add a dependency on each required analyzers.
224			for _, req := range a.Requires {
225				act.deps = append(act.deps, mkAction(req, pkg))
226			}
227
228			// An analysis that consumes/produces facts
229			// must run on the package's dependencies too.
230			if len(a.FactTypes) > 0 {
231				paths := make([]string, 0, len(pkg.Imports))
232				for path := range pkg.Imports {
233					paths = append(paths, path)
234				}
235				sort.Strings(paths) // for determinism
236				for _, path := range paths {
237					dep := mkAction(a, pkg.Imports[path])
238					act.deps = append(act.deps, dep)
239				}
240			}
241
242			actions[k] = act
243		}
244		return act
245	}
246
247	// Build nodes for initial packages.
248	var roots []*action
249	for _, a := range analyzers {
250		for _, pkg := range pkgs {
251			root := mkAction(a, pkg)
252			root.isroot = true
253			roots = append(roots, root)
254		}
255	}
256
257	// Execute the graph in parallel.
258	execAll(roots)
259
260	return roots
261}
262
263// printDiagnostics prints the diagnostics for the root packages in either
264// plain text or JSON format. JSON format also includes errors for any
265// dependencies.
266func printDiagnostics(roots []*action) {
267	// Print the output.
268	//
269	// Print diagnostics only for root packages,
270	// but errors for all packages.
271	printed := make(map[*action]bool)
272	var print func(*action)
273	var visitAll func(actions []*action)
274	visitAll = func(actions []*action) {
275		for _, act := range actions {
276			if !printed[act] {
277				printed[act] = true
278				visitAll(act.deps)
279				print(act)
280			}
281		}
282	}
283
284	if JSON {
285		tree := make(map[string]map[string]interface{}) // ID -> analysis -> result
286
287		print = func(act *action) {
288			m, existing := tree[act.pkg.ID]
289			if !existing {
290				m = make(map[string]interface{})
291				// Insert m into tree later iff non-empty.
292			}
293			if act.err != nil {
294				type jsonError struct {
295					Err string `json:"error"`
296				}
297				m[act.a.Name] = jsonError{act.err.Error()}
298			} else if act.isroot {
299				type jsonDiagnostic struct {
300					Category string `json:"category,omitempty"`
301					Posn     string `json:"posn"`
302					Message  string `json:"message"`
303				}
304				var diagnostics []jsonDiagnostic
305				for _, f := range act.diagnostics {
306					diagnostics = append(diagnostics, jsonDiagnostic{
307						Category: f.Category,
308						Posn:     act.pkg.Fset.Position(f.Pos).String(),
309						Message:  f.Message,
310					})
311				}
312				if diagnostics != nil {
313					m[act.a.Name] = diagnostics
314				}
315			}
316			if !existing && len(m) > 0 {
317				tree[act.pkg.ID] = m
318			}
319		}
320		visitAll(roots)
321
322		data, err := json.MarshalIndent(tree, "", "\t")
323		if err != nil {
324			log.Panicf("internal error: JSON marshalling failed: %v", err)
325		}
326		os.Stdout.Write(data)
327		fmt.Println()
328	} else {
329		// plain text output
330
331		// De-duplicate diagnostics by position (not token.Pos) to
332		// avoid double-reporting in source files that belong to
333		// multiple packages, such as foo and foo.test.
334		type key struct {
335			token.Position
336			*analysis.Analyzer
337			message string
338		}
339		seen := make(map[key]bool)
340
341		print = func(act *action) {
342			if act.err != nil {
343				fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err)
344				return
345			}
346			if act.isroot {
347				for _, f := range act.diagnostics {
348					// We don't display a.Name/f.Category
349					// as most users don't care.
350
351					posn := act.pkg.Fset.Position(f.Pos)
352
353					k := key{posn, act.a, f.Message}
354					if seen[k] {
355						continue // duplicate
356					}
357					seen[k] = true
358
359					fmt.Fprintf(os.Stderr, "%s: %s\n", posn, f.Message)
360
361					// -c=0: show offending line of code in context.
362					if Context >= 0 {
363						data, _ := ioutil.ReadFile(posn.Filename)
364						lines := strings.Split(string(data), "\n")
365						for i := posn.Line - Context; i <= posn.Line+Context; i++ {
366							if 1 <= i && i <= len(lines) {
367								fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1])
368							}
369						}
370					}
371				}
372			}
373		}
374		visitAll(roots)
375	}
376
377	// Print timing info.
378	if dbg('t') {
379		if !dbg('p') {
380			log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism")
381		}
382		var all []*action
383		var total time.Duration
384		for act := range printed {
385			all = append(all, act)
386			total += act.duration
387		}
388		sort.Slice(all, func(i, j int) bool {
389			return all[i].duration > all[j].duration
390		})
391
392		// Print actions accounting for 90% of the total.
393		var sum time.Duration
394		for _, act := range all {
395			fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act)
396			sum += act.duration
397			if sum >= total*9/10 {
398				break
399			}
400		}
401	}
402}
403
404// needFacts reports whether any analysis required by the specified set
405// needs facts.  If so, we must load the entire program from source.
406func needFacts(analyzers []*analysis.Analyzer) bool {
407	seen := make(map[*analysis.Analyzer]bool)
408	var q []*analysis.Analyzer // for BFS
409	q = append(q, analyzers...)
410	for len(q) > 0 {
411		a := q[0]
412		q = q[1:]
413		if !seen[a] {
414			seen[a] = true
415			if len(a.FactTypes) > 0 {
416				return true
417			}
418			q = append(q, a.Requires...)
419		}
420	}
421	return false
422}
423
424// An action represents one unit of analysis work: the application of
425// one analysis to one package. Actions form a DAG, both within a
426// package (as different analyzers are applied, either in sequence or
427// parallel), and across packages (as dependencies are analyzed).
428type action struct {
429	once         sync.Once
430	a            *analysis.Analyzer
431	pkg          *packages.Package
432	pass         *analysis.Pass
433	isroot       bool
434	deps         []*action
435	objectFacts  map[objectFactKey]analysis.Fact
436	packageFacts map[packageFactKey]analysis.Fact
437	inputs       map[*analysis.Analyzer]interface{}
438	result       interface{}
439	diagnostics  []analysis.Diagnostic
440	err          error
441	duration     time.Duration
442}
443
444type objectFactKey struct {
445	obj types.Object
446	typ reflect.Type
447}
448
449type packageFactKey struct {
450	pkg *types.Package
451	typ reflect.Type
452}
453
454func (act *action) String() string {
455	return fmt.Sprintf("%s@%s", act.a, act.pkg)
456}
457
458func execAll(actions []*action) {
459	sequential := dbg('p')
460	var wg sync.WaitGroup
461	for _, act := range actions {
462		wg.Add(1)
463		work := func(act *action) {
464			act.exec()
465			wg.Done()
466		}
467		if sequential {
468			work(act)
469		} else {
470			go work(act)
471		}
472	}
473	wg.Wait()
474}
475
476func (act *action) exec() { act.once.Do(act.execOnce) }
477
478func (act *action) execOnce() {
479	// Analyze dependencies.
480	execAll(act.deps)
481
482	// TODO(adonovan): uncomment this during profiling.
483	// It won't build pre-go1.11 but conditional compilation
484	// using build tags isn't warranted.
485	//
486	// ctx, task := trace.NewTask(context.Background(), "exec")
487	// trace.Log(ctx, "pass", act.String())
488	// defer task.End()
489
490	// Record time spent in this node but not its dependencies.
491	// In parallel mode, due to GC/scheduler contention, the
492	// time is 5x higher than in sequential mode, even with a
493	// semaphore limiting the number of threads here.
494	// So use -debug=tp.
495	if dbg('t') {
496		t0 := time.Now()
497		defer func() { act.duration = time.Since(t0) }()
498	}
499
500	// Report an error if any dependency failed.
501	var failed []string
502	for _, dep := range act.deps {
503		if dep.err != nil {
504			failed = append(failed, dep.String())
505		}
506	}
507	if failed != nil {
508		sort.Strings(failed)
509		act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", "))
510		return
511	}
512
513	// Plumb the output values of the dependencies
514	// into the inputs of this action.  Also facts.
515	inputs := make(map[*analysis.Analyzer]interface{})
516	act.objectFacts = make(map[objectFactKey]analysis.Fact)
517	act.packageFacts = make(map[packageFactKey]analysis.Fact)
518	for _, dep := range act.deps {
519		if dep.pkg == act.pkg {
520			// Same package, different analysis (horizontal edge):
521			// in-memory outputs of prerequisite analyzers
522			// become inputs to this analysis pass.
523			inputs[dep.a] = dep.result
524
525		} else if dep.a == act.a { // (always true)
526			// Same analysis, different package (vertical edge):
527			// serialized facts produced by prerequisite analysis
528			// become available to this analysis pass.
529			inheritFacts(act, dep)
530		}
531	}
532
533	// Run the analysis.
534	pass := &analysis.Pass{
535		Analyzer:          act.a,
536		Fset:              act.pkg.Fset,
537		Files:             act.pkg.Syntax,
538		OtherFiles:        act.pkg.OtherFiles,
539		Pkg:               act.pkg.Types,
540		TypesInfo:         act.pkg.TypesInfo,
541		ResultOf:          inputs,
542		Report:            func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
543		ImportObjectFact:  act.importObjectFact,
544		ExportObjectFact:  act.exportObjectFact,
545		ImportPackageFact: act.importPackageFact,
546		ExportPackageFact: act.exportPackageFact,
547	}
548	act.pass = pass
549
550	var err error
551	if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors {
552		err = fmt.Errorf("analysis skipped due to errors in package")
553	} else {
554		act.result, err = pass.Analyzer.Run(pass)
555		if err == nil {
556			if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want {
557				err = fmt.Errorf(
558					"internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
559					pass.Pkg.Path(), pass.Analyzer, got, want)
560			}
561		}
562	}
563	act.err = err
564
565	// disallow calls after Run
566	pass.ExportObjectFact = nil
567	pass.ExportPackageFact = nil
568}
569
570// inheritFacts populates act.facts with
571// those it obtains from its dependency, dep.
572func inheritFacts(act, dep *action) {
573	serialize := dbg('s')
574
575	for key, fact := range dep.objectFacts {
576		// Filter out facts related to objects
577		// that are irrelevant downstream
578		// (equivalently: not in the compiler export data).
579		if !exportedFrom(key.obj, dep.pkg.Types) {
580			if false {
581				log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
582			}
583			continue
584		}
585
586		// Optionally serialize/deserialize fact
587		// to verify that it works across address spaces.
588		if serialize {
589			var err error
590			fact, err = codeFact(fact)
591			if err != nil {
592				log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
593			}
594		}
595
596		if false {
597			log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact)
598		}
599		act.objectFacts[key] = fact
600	}
601
602	for key, fact := range dep.packageFacts {
603		// TODO: filter out facts that belong to
604		// packages not mentioned in the export data
605		// to prevent side channels.
606
607		// Optionally serialize/deserialize fact
608		// to verify that it works across address spaces
609		// and is deterministic.
610		if serialize {
611			var err error
612			fact, err = codeFact(fact)
613			if err != nil {
614				log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
615			}
616		}
617
618		if false {
619			log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact)
620		}
621		act.packageFacts[key] = fact
622	}
623}
624
625// codeFact encodes then decodes a fact,
626// just to exercise that logic.
627func codeFact(fact analysis.Fact) (analysis.Fact, error) {
628	// We encode facts one at a time.
629	// A real modular driver would emit all facts
630	// into one encoder to improve gob efficiency.
631	var buf bytes.Buffer
632	if err := gob.NewEncoder(&buf).Encode(fact); err != nil {
633		return nil, err
634	}
635
636	// Encode it twice and assert that we get the same bits.
637	// This helps detect nondeterministic Gob encoding (e.g. of maps).
638	var buf2 bytes.Buffer
639	if err := gob.NewEncoder(&buf2).Encode(fact); err != nil {
640		return nil, err
641	}
642	if !bytes.Equal(buf.Bytes(), buf2.Bytes()) {
643		return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact)
644	}
645
646	new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact)
647	if err := gob.NewDecoder(&buf).Decode(new); err != nil {
648		return nil, err
649	}
650	return new, nil
651}
652
653// exportedFrom reports whether obj may be visible to a package that imports pkg.
654// This includes not just the exported members of pkg, but also unexported
655// constants, types, fields, and methods, perhaps belonging to oether packages,
656// that find there way into the API.
657// This is an overapproximation of the more accurate approach used by
658// gc export data, which walks the type graph, but it's much simpler.
659//
660// TODO(adonovan): do more accurate filtering by walking the type graph.
661func exportedFrom(obj types.Object, pkg *types.Package) bool {
662	switch obj := obj.(type) {
663	case *types.Func:
664		return obj.Exported() && obj.Pkg() == pkg ||
665			obj.Type().(*types.Signature).Recv() != nil
666	case *types.Var:
667		return obj.Exported() && obj.Pkg() == pkg ||
668			obj.IsField()
669	case *types.TypeName, *types.Const:
670		return true
671	}
672	return false // Nil, Builtin, Label, or PkgName
673}
674
675// importObjectFact implements Pass.ImportObjectFact.
676// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
677// importObjectFact copies the fact value to *ptr.
678func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
679	if obj == nil {
680		panic("nil object")
681	}
682	key := objectFactKey{obj, factType(ptr)}
683	if v, ok := act.objectFacts[key]; ok {
684		reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
685		return true
686	}
687	return false
688}
689
690// exportObjectFact implements Pass.ExportObjectFact.
691func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
692	if act.pass.ExportObjectFact == nil {
693		log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)
694	}
695
696	if obj.Pkg() != act.pkg.Types {
697		log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
698			act.a, act.pkg, obj, fact)
699	}
700
701	key := objectFactKey{obj, factType(fact)}
702	act.objectFacts[key] = fact // clobber any existing entry
703	if dbg('f') {
704		objstr := types.ObjectString(obj, (*types.Package).Name)
705		fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n",
706			act.pkg.Fset.Position(obj.Pos()), objstr, fact)
707	}
708}
709
710// importPackageFact implements Pass.ImportPackageFact.
711// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
712// fact copies the fact value to *ptr.
713func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
714	if pkg == nil {
715		panic("nil package")
716	}
717	key := packageFactKey{pkg, factType(ptr)}
718	if v, ok := act.packageFacts[key]; ok {
719		reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
720		return true
721	}
722	return false
723}
724
725// exportPackageFact implements Pass.ExportPackageFact.
726func (act *action) exportPackageFact(fact analysis.Fact) {
727	if act.pass.ExportPackageFact == nil {
728		log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact)
729	}
730
731	key := packageFactKey{act.pass.Pkg, factType(fact)}
732	act.packageFacts[key] = fact // clobber any existing entry
733	if dbg('f') {
734		fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n",
735			act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
736	}
737}
738
739func factType(fact analysis.Fact) reflect.Type {
740	t := reflect.TypeOf(fact)
741	if t.Kind() != reflect.Ptr {
742		log.Fatalf("invalid Fact type: got %T, want pointer", t)
743	}
744	return t
745}
746
747func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 }
748