1 | // Copyright 2018 The Go Authors. All rights reserved. |
---|---|
2 | // Use of this source code is governed by a BSD-style |
3 | // license that can be found in the LICENSE file. |
4 | |
5 | // Package checker defines the implementation of the checker commands. |
6 | // The same code drives the multi-analysis driver, the single-analysis |
7 | // driver that is conventionally provided for convenience along with |
8 | // each analysis package, and the test driver. |
9 | package checker |
10 | |
11 | import ( |
12 | "bytes" |
13 | "encoding/gob" |
14 | "errors" |
15 | "flag" |
16 | "fmt" |
17 | "go/format" |
18 | "go/parser" |
19 | "go/token" |
20 | "go/types" |
21 | "io/ioutil" |
22 | "log" |
23 | "os" |
24 | "reflect" |
25 | "runtime" |
26 | "runtime/pprof" |
27 | "runtime/trace" |
28 | "sort" |
29 | "strings" |
30 | "sync" |
31 | "time" |
32 | |
33 | "golang.org/x/tools/go/analysis" |
34 | "golang.org/x/tools/go/analysis/internal/analysisflags" |
35 | "golang.org/x/tools/go/packages" |
36 | ) |
37 | |
38 | var ( |
39 | // Debug is a set of single-letter flags: |
40 | // |
41 | // f show [f]acts as they are created |
42 | // p disable [p]arallel execution of analyzers |
43 | // s do additional [s]anity checks on fact types and serialization |
44 | // t show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise) |
45 | // v show [v]erbose logging |
46 | // |
47 | Debug = "" |
48 | |
49 | // Log files for optional performance tracing. |
50 | CPUProfile, MemProfile, Trace string |
51 | |
52 | // IncludeTests indicates whether test files should be analyzed too. |
53 | IncludeTests = true |
54 | |
55 | // Fix determines whether to apply all suggested fixes. |
56 | Fix bool |
57 | ) |
58 | |
59 | // RegisterFlags registers command-line flags used by the analysis driver. |
60 | func RegisterFlags() { |
61 | // When adding flags here, remember to update |
62 | // the list of suppressed flags in analysisflags. |
63 | |
64 | flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "fpstv"`) |
65 | |
66 | flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file") |
67 | flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file") |
68 | flag.StringVar(&Trace, "trace", "", "write trace log to this file") |
69 | flag.BoolVar(&IncludeTests, "test", IncludeTests, "indicates whether test files should be analyzed, too") |
70 | |
71 | flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes") |
72 | } |
73 | |
74 | // Run loads the packages specified by args using go/packages, |
75 | // then applies the specified analyzers to them. |
76 | // Analysis flags must already have been set. |
77 | // It provides most of the logic for the main functions of both the |
78 | // singlechecker and the multi-analysis commands. |
79 | // It returns the appropriate exit code. |
80 | func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) { |
81 | if CPUProfile != "" { |
82 | f, err := os.Create(CPUProfile) |
83 | if err != nil { |
84 | log.Fatal(err) |
85 | } |
86 | if err := pprof.StartCPUProfile(f); err != nil { |
87 | log.Fatal(err) |
88 | } |
89 | // NB: profile won't be written in case of error. |
90 | defer pprof.StopCPUProfile() |
91 | } |
92 | |
93 | if Trace != "" { |
94 | f, err := os.Create(Trace) |
95 | if err != nil { |
96 | log.Fatal(err) |
97 | } |
98 | if err := trace.Start(f); err != nil { |
99 | log.Fatal(err) |
100 | } |
101 | // NB: trace log won't be written in case of error. |
102 | defer func() { |
103 | trace.Stop() |
104 | log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace) |
105 | }() |
106 | } |
107 | |
108 | if MemProfile != "" { |
109 | f, err := os.Create(MemProfile) |
110 | if err != nil { |
111 | log.Fatal(err) |
112 | } |
113 | // NB: memprofile won't be written in case of error. |
114 | defer func() { |
115 | runtime.GC() // get up-to-date statistics |
116 | if err := pprof.WriteHeapProfile(f); err != nil { |
117 | log.Fatalf("Writing memory profile: %v", err) |
118 | } |
119 | f.Close() |
120 | }() |
121 | } |
122 | |
123 | // Load the packages. |
124 | if dbg('v') { |
125 | log.SetPrefix("") |
126 | log.SetFlags(log.Lmicroseconds) // display timing |
127 | log.Printf("load %s", args) |
128 | } |
129 | |
130 | // Optimization: if the selected analyzers don't produce/consume |
131 | // facts, we need source only for the initial packages. |
132 | allSyntax := needFacts(analyzers) |
133 | initial, err := load(args, allSyntax) |
134 | if err != nil { |
135 | if _, ok := err.(typeParseError); !ok { |
136 | // Fail when some of the errors are not |
137 | // related to parsing nor typing. |
138 | log.Print(err) |
139 | return 1 |
140 | } |
141 | // TODO: filter analyzers based on RunDespiteError? |
142 | } |
143 | |
144 | // Print the results. |
145 | roots := analyze(initial, analyzers) |
146 | |
147 | if Fix { |
148 | if err := applyFixes(roots); err != nil { |
149 | // Fail when applying fixes failed. |
150 | log.Print(err) |
151 | return 1 |
152 | } |
153 | } |
154 | return printDiagnostics(roots) |
155 | } |
156 | |
157 | // typeParseError represents a package load error |
158 | // that is related to typing and parsing. |
159 | type typeParseError struct { |
160 | error |
161 | } |
162 | |
163 | // load loads the initial packages. If all loading issues are related to |
164 | // typing and parsing, the returned error is of type typeParseError. |
165 | func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { |
166 | mode := packages.LoadSyntax |
167 | if allSyntax { |
168 | mode = packages.LoadAllSyntax |
169 | } |
170 | conf := packages.Config{ |
171 | Mode: mode, |
172 | Tests: IncludeTests, |
173 | } |
174 | initial, err := packages.Load(&conf, patterns...) |
175 | if err == nil { |
176 | if len(initial) == 0 { |
177 | err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " ")) |
178 | } else { |
179 | err = loadingError(initial) |
180 | } |
181 | } |
182 | return initial, err |
183 | } |
184 | |
185 | // loadingError checks for issues during the loading of initial |
186 | // packages. Returns nil if there are no issues. Returns error |
187 | // of type typeParseError if all errors, including those in |
188 | // dependencies, are related to typing or parsing. Otherwise, |
189 | // a plain error is returned with an appropriate message. |
190 | func loadingError(initial []*packages.Package) error { |
191 | var err error |
192 | if n := packages.PrintErrors(initial); n > 1 { |
193 | err = fmt.Errorf("%d errors during loading", n) |
194 | } else if n == 1 { |
195 | err = errors.New("error during loading") |
196 | } else { |
197 | // no errors |
198 | return nil |
199 | } |
200 | all := true |
201 | packages.Visit(initial, nil, func(pkg *packages.Package) { |
202 | for _, err := range pkg.Errors { |
203 | typeOrParse := err.Kind == packages.TypeError || err.Kind == packages.ParseError |
204 | all = all && typeOrParse |
205 | } |
206 | }) |
207 | if all { |
208 | return typeParseError{err} |
209 | } |
210 | return err |
211 | } |
212 | |
213 | // TestAnalyzer applies an analysis to a set of packages (and their |
214 | // dependencies if necessary) and returns the results. |
215 | // |
216 | // Facts about pkg are returned in a map keyed by object; package facts |
217 | // have a nil key. |
218 | // |
219 | // This entry point is used only by analysistest. |
220 | func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult { |
221 | var results []*TestAnalyzerResult |
222 | for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) { |
223 | facts := make(map[types.Object][]analysis.Fact) |
224 | for key, fact := range act.objectFacts { |
225 | if key.obj.Pkg() == act.pass.Pkg { |
226 | facts[key.obj] = append(facts[key.obj], fact) |
227 | } |
228 | } |
229 | for key, fact := range act.packageFacts { |
230 | if key.pkg == act.pass.Pkg { |
231 | facts[nil] = append(facts[nil], fact) |
232 | } |
233 | } |
234 | |
235 | results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err}) |
236 | } |
237 | return results |
238 | } |
239 | |
240 | type TestAnalyzerResult struct { |
241 | Pass *analysis.Pass |
242 | Diagnostics []analysis.Diagnostic |
243 | Facts map[types.Object][]analysis.Fact |
244 | Result interface{} |
245 | Err error |
246 | } |
247 | |
248 | func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action { |
249 | // Construct the action graph. |
250 | if dbg('v') { |
251 | log.Printf("building graph of analysis passes") |
252 | } |
253 | |
254 | // Each graph node (action) is one unit of analysis. |
255 | // Edges express package-to-package (vertical) dependencies, |
256 | // and analysis-to-analysis (horizontal) dependencies. |
257 | type key struct { |
258 | *analysis.Analyzer |
259 | *packages.Package |
260 | } |
261 | actions := make(map[key]*action) |
262 | |
263 | var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action |
264 | mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action { |
265 | k := key{a, pkg} |
266 | act, ok := actions[k] |
267 | if !ok { |
268 | act = &action{a: a, pkg: pkg} |
269 | |
270 | // Add a dependency on each required analyzers. |
271 | for _, req := range a.Requires { |
272 | act.deps = append(act.deps, mkAction(req, pkg)) |
273 | } |
274 | |
275 | // An analysis that consumes/produces facts |
276 | // must run on the package's dependencies too. |
277 | if len(a.FactTypes) > 0 { |
278 | paths := make([]string, 0, len(pkg.Imports)) |
279 | for path := range pkg.Imports { |
280 | paths = append(paths, path) |
281 | } |
282 | sort.Strings(paths) // for determinism |
283 | for _, path := range paths { |
284 | dep := mkAction(a, pkg.Imports[path]) |
285 | act.deps = append(act.deps, dep) |
286 | } |
287 | } |
288 | |
289 | actions[k] = act |
290 | } |
291 | return act |
292 | } |
293 | |
294 | // Build nodes for initial packages. |
295 | var roots []*action |
296 | for _, a := range analyzers { |
297 | for _, pkg := range pkgs { |
298 | root := mkAction(a, pkg) |
299 | root.isroot = true |
300 | roots = append(roots, root) |
301 | } |
302 | } |
303 | |
304 | // Execute the graph in parallel. |
305 | execAll(roots) |
306 | |
307 | return roots |
308 | } |
309 | |
310 | func applyFixes(roots []*action) error { |
311 | visited := make(map[*action]bool) |
312 | var apply func(*action) error |
313 | var visitAll func(actions []*action) error |
314 | visitAll = func(actions []*action) error { |
315 | for _, act := range actions { |
316 | if !visited[act] { |
317 | visited[act] = true |
318 | if err := visitAll(act.deps); err != nil { |
319 | return err |
320 | } |
321 | if err := apply(act); err != nil { |
322 | return err |
323 | } |
324 | } |
325 | } |
326 | return nil |
327 | } |
328 | |
329 | // TODO(matloob): Is this tree business too complicated? (After all this is Go!) |
330 | // Just create a set (map) of edits, sort by pos and call it a day? |
331 | type offsetedit struct { |
332 | start, end int |
333 | newText []byte |
334 | } // TextEdit using byteOffsets instead of pos |
335 | type node struct { |
336 | edit offsetedit |
337 | left, right *node |
338 | } |
339 | // Edits x and y are equivalent. |
340 | equiv := func(x, y offsetedit) bool { |
341 | return x.start == y.start && x.end == y.end && bytes.Equal(x.newText, y.newText) |
342 | } |
343 | |
344 | var insert func(tree **node, edit offsetedit) error |
345 | insert = func(treeptr **node, edit offsetedit) error { |
346 | if *treeptr == nil { |
347 | *treeptr = &node{edit, nil, nil} |
348 | return nil |
349 | } |
350 | tree := *treeptr |
351 | if edit.end <= tree.edit.start { |
352 | return insert(&tree.left, edit) |
353 | } else if edit.start >= tree.edit.end { |
354 | return insert(&tree.right, edit) |
355 | } |
356 | if equiv(edit, tree.edit) { // equivalent edits? |
357 | // We skip over equivalent edits without considering them |
358 | // an error. This handles identical edits coming from the |
359 | // multiple ways of loading a package into a |
360 | // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". |
361 | return nil |
362 | } |
363 | |
364 | // Overlapping text edit. |
365 | return fmt.Errorf("analyses applying overlapping text edits affecting pos range (%v, %v) and (%v, %v)", |
366 | edit.start, edit.end, tree.edit.start, tree.edit.end) |
367 | |
368 | } |
369 | |
370 | editsForFile := make(map[*token.File]*node) |
371 | |
372 | apply = func(act *action) error { |
373 | for _, diag := range act.diagnostics { |
374 | for _, sf := range diag.SuggestedFixes { |
375 | for _, edit := range sf.TextEdits { |
376 | // Validate the edit. |
377 | if edit.Pos > edit.End { |
378 | return fmt.Errorf( |
379 | "diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)", |
380 | act.a.Name, edit.Pos, edit.End) |
381 | } |
382 | file, endfile := act.pkg.Fset.File(edit.Pos), act.pkg.Fset.File(edit.End) |
383 | if file == nil || endfile == nil || file != endfile { |
384 | return (fmt.Errorf( |
385 | "diagnostic for analysis %v contains Suggested Fix with malformed spanning files %v and %v", |
386 | act.a.Name, file.Name(), endfile.Name())) |
387 | } |
388 | start, end := file.Offset(edit.Pos), file.Offset(edit.End) |
389 | |
390 | // TODO(matloob): Validate that edits do not affect other packages. |
391 | root := editsForFile[file] |
392 | if err := insert(&root, offsetedit{start, end, edit.NewText}); err != nil { |
393 | return err |
394 | } |
395 | editsForFile[file] = root // In case the root changed |
396 | } |
397 | } |
398 | } |
399 | return nil |
400 | } |
401 | |
402 | if err := visitAll(roots); err != nil { |
403 | return err |
404 | } |
405 | |
406 | fset := token.NewFileSet() // Shared by parse calls below |
407 | // Now we've got a set of valid edits for each file. Get the new file contents. |
408 | for f, tree := range editsForFile { |
409 | contents, err := ioutil.ReadFile(f.Name()) |
410 | if err != nil { |
411 | return err |
412 | } |
413 | |
414 | cur := 0 // current position in the file |
415 | |
416 | var out bytes.Buffer |
417 | |
418 | var recurse func(*node) |
419 | recurse = func(node *node) { |
420 | if node.left != nil { |
421 | recurse(node.left) |
422 | } |
423 | |
424 | edit := node.edit |
425 | if edit.start > cur { |
426 | out.Write(contents[cur:edit.start]) |
427 | out.Write(edit.newText) |
428 | } else if cur == 0 && edit.start == 0 { // edit starts at first character? |
429 | out.Write(edit.newText) |
430 | } |
431 | cur = edit.end |
432 | |
433 | if node.right != nil { |
434 | recurse(node.right) |
435 | } |
436 | } |
437 | recurse(tree) |
438 | // Write out the rest of the file. |
439 | if cur < len(contents) { |
440 | out.Write(contents[cur:]) |
441 | } |
442 | |
443 | // Try to format the file. |
444 | ff, err := parser.ParseFile(fset, f.Name(), out.Bytes(), parser.ParseComments) |
445 | if err == nil { |
446 | var buf bytes.Buffer |
447 | if err = format.Node(&buf, fset, ff); err == nil { |
448 | out = buf |
449 | } |
450 | } |
451 | |
452 | if err := ioutil.WriteFile(f.Name(), out.Bytes(), 0644); err != nil { |
453 | return err |
454 | } |
455 | } |
456 | return nil |
457 | } |
458 | |
459 | // printDiagnostics prints the diagnostics for the root packages in either |
460 | // plain text or JSON format. JSON format also includes errors for any |
461 | // dependencies. |
462 | // |
463 | // It returns the exitcode: in plain mode, 0 for success, 1 for analysis |
464 | // errors, and 3 for diagnostics. We avoid 2 since the flag package uses |
465 | // it. JSON mode always succeeds at printing errors and diagnostics in a |
466 | // structured form to stdout. |
467 | func printDiagnostics(roots []*action) (exitcode int) { |
468 | // Print the output. |
469 | // |
470 | // Print diagnostics only for root packages, |
471 | // but errors for all packages. |
472 | printed := make(map[*action]bool) |
473 | var print func(*action) |
474 | var visitAll func(actions []*action) |
475 | visitAll = func(actions []*action) { |
476 | for _, act := range actions { |
477 | if !printed[act] { |
478 | printed[act] = true |
479 | visitAll(act.deps) |
480 | print(act) |
481 | } |
482 | } |
483 | } |
484 | |
485 | if analysisflags.JSON { |
486 | // JSON output |
487 | tree := make(analysisflags.JSONTree) |
488 | print = func(act *action) { |
489 | var diags []analysis.Diagnostic |
490 | if act.isroot { |
491 | diags = act.diagnostics |
492 | } |
493 | tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err) |
494 | } |
495 | visitAll(roots) |
496 | tree.Print() |
497 | } else { |
498 | // plain text output |
499 | |
500 | // De-duplicate diagnostics by position (not token.Pos) to |
501 | // avoid double-reporting in source files that belong to |
502 | // multiple packages, such as foo and foo.test. |
503 | type key struct { |
504 | pos token.Position |
505 | end token.Position |
506 | *analysis.Analyzer |
507 | message string |
508 | } |
509 | seen := make(map[key]bool) |
510 | |
511 | print = func(act *action) { |
512 | if act.err != nil { |
513 | fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err) |
514 | exitcode = 1 // analysis failed, at least partially |
515 | return |
516 | } |
517 | if act.isroot { |
518 | for _, diag := range act.diagnostics { |
519 | // We don't display a.Name/f.Category |
520 | // as most users don't care. |
521 | |
522 | posn := act.pkg.Fset.Position(diag.Pos) |
523 | end := act.pkg.Fset.Position(diag.End) |
524 | k := key{posn, end, act.a, diag.Message} |
525 | if seen[k] { |
526 | continue // duplicate |
527 | } |
528 | seen[k] = true |
529 | |
530 | analysisflags.PrintPlain(act.pkg.Fset, diag) |
531 | } |
532 | } |
533 | } |
534 | visitAll(roots) |
535 | |
536 | if exitcode == 0 && len(seen) > 0 { |
537 | exitcode = 3 // successfully produced diagnostics |
538 | } |
539 | } |
540 | |
541 | // Print timing info. |
542 | if dbg('t') { |
543 | if !dbg('p') { |
544 | log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism") |
545 | } |
546 | var all []*action |
547 | var total time.Duration |
548 | for act := range printed { |
549 | all = append(all, act) |
550 | total += act.duration |
551 | } |
552 | sort.Slice(all, func(i, j int) bool { |
553 | return all[i].duration > all[j].duration |
554 | }) |
555 | |
556 | // Print actions accounting for 90% of the total. |
557 | var sum time.Duration |
558 | for _, act := range all { |
559 | fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act) |
560 | sum += act.duration |
561 | if sum >= total*9/10 { |
562 | break |
563 | } |
564 | } |
565 | } |
566 | |
567 | return exitcode |
568 | } |
569 | |
570 | // needFacts reports whether any analysis required by the specified set |
571 | // needs facts. If so, we must load the entire program from source. |
572 | func needFacts(analyzers []*analysis.Analyzer) bool { |
573 | seen := make(map[*analysis.Analyzer]bool) |
574 | var q []*analysis.Analyzer // for BFS |
575 | q = append(q, analyzers...) |
576 | for len(q) > 0 { |
577 | a := q[0] |
578 | q = q[1:] |
579 | if !seen[a] { |
580 | seen[a] = true |
581 | if len(a.FactTypes) > 0 { |
582 | return true |
583 | } |
584 | q = append(q, a.Requires...) |
585 | } |
586 | } |
587 | return false |
588 | } |
589 | |
590 | // An action represents one unit of analysis work: the application of |
591 | // one analysis to one package. Actions form a DAG, both within a |
592 | // package (as different analyzers are applied, either in sequence or |
593 | // parallel), and across packages (as dependencies are analyzed). |
594 | type action struct { |
595 | once sync.Once |
596 | a *analysis.Analyzer |
597 | pkg *packages.Package |
598 | pass *analysis.Pass |
599 | isroot bool |
600 | deps []*action |
601 | objectFacts map[objectFactKey]analysis.Fact |
602 | packageFacts map[packageFactKey]analysis.Fact |
603 | result interface{} |
604 | diagnostics []analysis.Diagnostic |
605 | err error |
606 | duration time.Duration |
607 | } |
608 | |
609 | type objectFactKey struct { |
610 | obj types.Object |
611 | typ reflect.Type |
612 | } |
613 | |
614 | type packageFactKey struct { |
615 | pkg *types.Package |
616 | typ reflect.Type |
617 | } |
618 | |
619 | func (act *action) String() string { |
620 | return fmt.Sprintf("%s@%s", act.a, act.pkg) |
621 | } |
622 | |
623 | func execAll(actions []*action) { |
624 | sequential := dbg('p') |
625 | var wg sync.WaitGroup |
626 | for _, act := range actions { |
627 | wg.Add(1) |
628 | work := func(act *action) { |
629 | act.exec() |
630 | wg.Done() |
631 | } |
632 | if sequential { |
633 | work(act) |
634 | } else { |
635 | go work(act) |
636 | } |
637 | } |
638 | wg.Wait() |
639 | } |
640 | |
641 | func (act *action) exec() { act.once.Do(act.execOnce) } |
642 | |
643 | func (act *action) execOnce() { |
644 | // Analyze dependencies. |
645 | execAll(act.deps) |
646 | |
647 | // TODO(adonovan): uncomment this during profiling. |
648 | // It won't build pre-go1.11 but conditional compilation |
649 | // using build tags isn't warranted. |
650 | // |
651 | // ctx, task := trace.NewTask(context.Background(), "exec") |
652 | // trace.Log(ctx, "pass", act.String()) |
653 | // defer task.End() |
654 | |
655 | // Record time spent in this node but not its dependencies. |
656 | // In parallel mode, due to GC/scheduler contention, the |
657 | // time is 5x higher than in sequential mode, even with a |
658 | // semaphore limiting the number of threads here. |
659 | // So use -debug=tp. |
660 | if dbg('t') { |
661 | t0 := time.Now() |
662 | defer func() { act.duration = time.Since(t0) }() |
663 | } |
664 | |
665 | // Report an error if any dependency failed. |
666 | var failed []string |
667 | for _, dep := range act.deps { |
668 | if dep.err != nil { |
669 | failed = append(failed, dep.String()) |
670 | } |
671 | } |
672 | if failed != nil { |
673 | sort.Strings(failed) |
674 | act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) |
675 | return |
676 | } |
677 | |
678 | // Plumb the output values of the dependencies |
679 | // into the inputs of this action. Also facts. |
680 | inputs := make(map[*analysis.Analyzer]interface{}) |
681 | act.objectFacts = make(map[objectFactKey]analysis.Fact) |
682 | act.packageFacts = make(map[packageFactKey]analysis.Fact) |
683 | for _, dep := range act.deps { |
684 | if dep.pkg == act.pkg { |
685 | // Same package, different analysis (horizontal edge): |
686 | // in-memory outputs of prerequisite analyzers |
687 | // become inputs to this analysis pass. |
688 | inputs[dep.a] = dep.result |
689 | |
690 | } else if dep.a == act.a { // (always true) |
691 | // Same analysis, different package (vertical edge): |
692 | // serialized facts produced by prerequisite analysis |
693 | // become available to this analysis pass. |
694 | inheritFacts(act, dep) |
695 | } |
696 | } |
697 | |
698 | // Run the analysis. |
699 | pass := &analysis.Pass{ |
700 | Analyzer: act.a, |
701 | Fset: act.pkg.Fset, |
702 | Files: act.pkg.Syntax, |
703 | OtherFiles: act.pkg.OtherFiles, |
704 | IgnoredFiles: act.pkg.IgnoredFiles, |
705 | Pkg: act.pkg.Types, |
706 | TypesInfo: act.pkg.TypesInfo, |
707 | TypesSizes: act.pkg.TypesSizes, |
708 | TypeErrors: act.pkg.TypeErrors, |
709 | |
710 | ResultOf: inputs, |
711 | Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, |
712 | ImportObjectFact: act.importObjectFact, |
713 | ExportObjectFact: act.exportObjectFact, |
714 | ImportPackageFact: act.importPackageFact, |
715 | ExportPackageFact: act.exportPackageFact, |
716 | AllObjectFacts: act.allObjectFacts, |
717 | AllPackageFacts: act.allPackageFacts, |
718 | } |
719 | act.pass = pass |
720 | |
721 | var err error |
722 | if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors { |
723 | err = fmt.Errorf("analysis skipped due to errors in package") |
724 | } else { |
725 | act.result, err = pass.Analyzer.Run(pass) |
726 | if err == nil { |
727 | if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want { |
728 | err = fmt.Errorf( |
729 | "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", |
730 | pass.Pkg.Path(), pass.Analyzer, got, want) |
731 | } |
732 | } |
733 | } |
734 | act.err = err |
735 | |
736 | // disallow calls after Run |
737 | pass.ExportObjectFact = nil |
738 | pass.ExportPackageFact = nil |
739 | } |
740 | |
741 | // inheritFacts populates act.facts with |
742 | // those it obtains from its dependency, dep. |
743 | func inheritFacts(act, dep *action) { |
744 | serialize := dbg('s') |
745 | |
746 | for key, fact := range dep.objectFacts { |
747 | // Filter out facts related to objects |
748 | // that are irrelevant downstream |
749 | // (equivalently: not in the compiler export data). |
750 | if !exportedFrom(key.obj, dep.pkg.Types) { |
751 | if false { |
752 | log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) |
753 | } |
754 | continue |
755 | } |
756 | |
757 | // Optionally serialize/deserialize fact |
758 | // to verify that it works across address spaces. |
759 | if serialize { |
760 | encodedFact, err := codeFact(fact) |
761 | if err != nil { |
762 | log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) |
763 | } |
764 | fact = encodedFact |
765 | } |
766 | |
767 | if false { |
768 | log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) |
769 | } |
770 | act.objectFacts[key] = fact |
771 | } |
772 | |
773 | for key, fact := range dep.packageFacts { |
774 | // TODO: filter out facts that belong to |
775 | // packages not mentioned in the export data |
776 | // to prevent side channels. |
777 | |
778 | // Optionally serialize/deserialize fact |
779 | // to verify that it works across address spaces |
780 | // and is deterministic. |
781 | if serialize { |
782 | encodedFact, err := codeFact(fact) |
783 | if err != nil { |
784 | log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) |
785 | } |
786 | fact = encodedFact |
787 | } |
788 | |
789 | if false { |
790 | log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) |
791 | } |
792 | act.packageFacts[key] = fact |
793 | } |
794 | } |
795 | |
796 | // codeFact encodes then decodes a fact, |
797 | // just to exercise that logic. |
798 | func codeFact(fact analysis.Fact) (analysis.Fact, error) { |
799 | // We encode facts one at a time. |
800 | // A real modular driver would emit all facts |
801 | // into one encoder to improve gob efficiency. |
802 | var buf bytes.Buffer |
803 | if err := gob.NewEncoder(&buf).Encode(fact); err != nil { |
804 | return nil, err |
805 | } |
806 | |
807 | // Encode it twice and assert that we get the same bits. |
808 | // This helps detect nondeterministic Gob encoding (e.g. of maps). |
809 | var buf2 bytes.Buffer |
810 | if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { |
811 | return nil, err |
812 | } |
813 | if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { |
814 | return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) |
815 | } |
816 | |
817 | new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) |
818 | if err := gob.NewDecoder(&buf).Decode(new); err != nil { |
819 | return nil, err |
820 | } |
821 | return new, nil |
822 | } |
823 | |
824 | // exportedFrom reports whether obj may be visible to a package that imports pkg. |
825 | // This includes not just the exported members of pkg, but also unexported |
826 | // constants, types, fields, and methods, perhaps belonging to other packages, |
827 | // that find there way into the API. |
828 | // This is an overapproximation of the more accurate approach used by |
829 | // gc export data, which walks the type graph, but it's much simpler. |
830 | // |
831 | // TODO(adonovan): do more accurate filtering by walking the type graph. |
832 | func exportedFrom(obj types.Object, pkg *types.Package) bool { |
833 | switch obj := obj.(type) { |
834 | case *types.Func: |
835 | return obj.Exported() && obj.Pkg() == pkg || |
836 | obj.Type().(*types.Signature).Recv() != nil |
837 | case *types.Var: |
838 | if obj.IsField() { |
839 | return true |
840 | } |
841 | // we can't filter more aggressively than this because we need |
842 | // to consider function parameters exported, but have no way |
843 | // of telling apart function parameters from local variables. |
844 | return obj.Pkg() == pkg |
845 | case *types.TypeName, *types.Const: |
846 | return true |
847 | } |
848 | return false // Nil, Builtin, Label, or PkgName |
849 | } |
850 | |
851 | // importObjectFact implements Pass.ImportObjectFact. |
852 | // Given a non-nil pointer ptr of type *T, where *T satisfies Fact, |
853 | // importObjectFact copies the fact value to *ptr. |
854 | func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { |
855 | if obj == nil { |
856 | panic("nil object") |
857 | } |
858 | key := objectFactKey{obj, factType(ptr)} |
859 | if v, ok := act.objectFacts[key]; ok { |
860 | reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) |
861 | return true |
862 | } |
863 | return false |
864 | } |
865 | |
866 | // exportObjectFact implements Pass.ExportObjectFact. |
867 | func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { |
868 | if act.pass.ExportObjectFact == nil { |
869 | log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) |
870 | } |
871 | |
872 | if obj.Pkg() != act.pkg.Types { |
873 | log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", |
874 | act.a, act.pkg, obj, fact) |
875 | } |
876 | |
877 | key := objectFactKey{obj, factType(fact)} |
878 | act.objectFacts[key] = fact // clobber any existing entry |
879 | if dbg('f') { |
880 | objstr := types.ObjectString(obj, (*types.Package).Name) |
881 | fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n", |
882 | act.pkg.Fset.Position(obj.Pos()), objstr, fact) |
883 | } |
884 | } |
885 | |
886 | // allObjectFacts implements Pass.AllObjectFacts. |
887 | func (act *action) allObjectFacts() []analysis.ObjectFact { |
888 | facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) |
889 | for k := range act.objectFacts { |
890 | facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) |
891 | } |
892 | return facts |
893 | } |
894 | |
895 | // importPackageFact implements Pass.ImportPackageFact. |
896 | // Given a non-nil pointer ptr of type *T, where *T satisfies Fact, |
897 | // fact copies the fact value to *ptr. |
898 | func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { |
899 | if pkg == nil { |
900 | panic("nil package") |
901 | } |
902 | key := packageFactKey{pkg, factType(ptr)} |
903 | if v, ok := act.packageFacts[key]; ok { |
904 | reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) |
905 | return true |
906 | } |
907 | return false |
908 | } |
909 | |
910 | // exportPackageFact implements Pass.ExportPackageFact. |
911 | func (act *action) exportPackageFact(fact analysis.Fact) { |
912 | if act.pass.ExportPackageFact == nil { |
913 | log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) |
914 | } |
915 | |
916 | key := packageFactKey{act.pass.Pkg, factType(fact)} |
917 | act.packageFacts[key] = fact // clobber any existing entry |
918 | if dbg('f') { |
919 | fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n", |
920 | act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) |
921 | } |
922 | } |
923 | |
924 | func factType(fact analysis.Fact) reflect.Type { |
925 | t := reflect.TypeOf(fact) |
926 | if t.Kind() != reflect.Ptr { |
927 | log.Fatalf("invalid Fact type: got %T, want pointer", fact) |
928 | } |
929 | return t |
930 | } |
931 | |
932 | // allObjectFacts implements Pass.AllObjectFacts. |
933 | func (act *action) allPackageFacts() []analysis.PackageFact { |
934 | facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) |
935 | for k := range act.packageFacts { |
936 | facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) |
937 | } |
938 | return facts |
939 | } |
940 | |
941 | func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 } |
942 |
Members