1// Copyright 2017 Google Inc. All rights reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15package main
16
17import (
18	"context"
19	"flag"
20	"fmt"
21	"io"
22	"io/ioutil"
23	"os"
24	"path/filepath"
25	"runtime"
26	"strings"
27	"sync"
28	"syscall"
29	"time"
30
31	"android/soong/finder"
32	"android/soong/ui/build"
33	"android/soong/ui/logger"
34	"android/soong/ui/status"
35	"android/soong/ui/terminal"
36	"android/soong/ui/tracer"
37	"android/soong/zip"
38)
39
40var numJobs = flag.Int("j", 0, "number of parallel jobs [0=autodetect]")
41
42var keepArtifacts = flag.Bool("keep", false, "keep archives of artifacts")
43var incremental = flag.Bool("incremental", false, "run in incremental mode (saving intermediates)")
44
45var outDir = flag.String("out", "", "path to store output directories (defaults to tmpdir under $OUT when empty)")
46var alternateResultDir = flag.Bool("dist", false, "write select results to $DIST_DIR (or <out>/dist when empty)")
47
48var onlyConfig = flag.Bool("only-config", false, "Only run product config (not Soong or Kati)")
49var onlySoong = flag.Bool("only-soong", false, "Only run product config and Soong (not Kati)")
50
51var buildVariant = flag.String("variant", "eng", "build variant to use")
52
53var skipProducts = flag.String("skip-products", "", "comma-separated list of products to skip (known failures, etc)")
54var includeProducts = flag.String("products", "", "comma-separated list of products to build")
55
56var shardCount = flag.Int("shard-count", 1, "split the products into multiple shards (to spread the build onto multiple machines, etc)")
57var shard = flag.Int("shard", 1, "1-indexed shard to execute")
58
59const errorLeadingLines = 20
60const errorTrailingLines = 20
61
62func errMsgFromLog(filename string) string {
63	if filename == "" {
64		return ""
65	}
66
67	data, err := ioutil.ReadFile(filename)
68	if err != nil {
69		return ""
70	}
71
72	lines := strings.Split(strings.TrimSpace(string(data)), "\n")
73	if len(lines) > errorLeadingLines+errorTrailingLines+1 {
74		lines[errorLeadingLines] = fmt.Sprintf("... skipping %d lines ...",
75			len(lines)-errorLeadingLines-errorTrailingLines)
76
77		lines = append(lines[:errorLeadingLines+1],
78			lines[len(lines)-errorTrailingLines:]...)
79	}
80	var buf strings.Builder
81	for _, line := range lines {
82		buf.WriteString("> ")
83		buf.WriteString(line)
84		buf.WriteString("\n")
85	}
86	return buf.String()
87}
88
89// TODO(b/70370883): This tool uses a lot of open files -- over the default
90// soft limit of 1024 on some systems. So bump up to the hard limit until I fix
91// the algorithm.
92func setMaxFiles(log logger.Logger) {
93	var limits syscall.Rlimit
94
95	err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &limits)
96	if err != nil {
97		log.Println("Failed to get file limit:", err)
98		return
99	}
100
101	log.Verbosef("Current file limits: %d soft, %d hard", limits.Cur, limits.Max)
102	if limits.Cur == limits.Max {
103		return
104	}
105
106	limits.Cur = limits.Max
107	err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &limits)
108	if err != nil {
109		log.Println("Failed to increase file limit:", err)
110	}
111}
112
113func inList(str string, list []string) bool {
114	for _, other := range list {
115		if str == other {
116			return true
117		}
118	}
119	return false
120}
121
122func copyFile(from, to string) error {
123	fromFile, err := os.Open(from)
124	if err != nil {
125		return err
126	}
127	defer fromFile.Close()
128
129	toFile, err := os.Create(to)
130	if err != nil {
131		return err
132	}
133	defer toFile.Close()
134
135	_, err = io.Copy(toFile, fromFile)
136	return err
137}
138
139type mpContext struct {
140	Context context.Context
141	Logger  logger.Logger
142	Status  status.ToolStatus
143	Tracer  tracer.Tracer
144	Finder  *finder.Finder
145	Config  build.Config
146
147	LogsDir string
148}
149
150func main() {
151	stdio := terminal.StdioImpl{}
152
153	output := terminal.NewStatusOutput(stdio.Stdout(), "", false,
154		build.OsEnvironment().IsEnvTrue("ANDROID_QUIET_BUILD"))
155
156	log := logger.New(output)
157	defer log.Cleanup()
158
159	flag.Parse()
160
161	ctx, cancel := context.WithCancel(context.Background())
162	defer cancel()
163
164	trace := tracer.New(log)
165	defer trace.Close()
166
167	stat := &status.Status{}
168	defer stat.Finish()
169	stat.AddOutput(output)
170
171	var failures failureCount
172	stat.AddOutput(&failures)
173
174	build.SetupSignals(log, cancel, func() {
175		trace.Close()
176		log.Cleanup()
177		stat.Finish()
178	})
179
180	buildCtx := build.Context{ContextImpl: &build.ContextImpl{
181		Context: ctx,
182		Logger:  log,
183		Tracer:  trace,
184		Writer:  output,
185		Status:  stat,
186	}}
187
188	config := build.NewConfig(buildCtx)
189	if *outDir == "" {
190		name := "multiproduct"
191		if !*incremental {
192			name += "-" + time.Now().Format("20060102150405")
193		}
194
195		*outDir = filepath.Join(config.OutDir(), name)
196
197		// Ensure the empty files exist in the output directory
198		// containing our output directory too. This is mostly for
199		// safety, but also triggers the ninja_build file so that our
200		// build servers know that they can parse the output as if it
201		// was ninja output.
202		build.SetupOutDir(buildCtx, config)
203
204		if err := os.MkdirAll(*outDir, 0777); err != nil {
205			log.Fatalf("Failed to create tempdir: %v", err)
206		}
207	}
208	config.Environment().Set("OUT_DIR", *outDir)
209	log.Println("Output directory:", *outDir)
210
211	logsDir := filepath.Join(config.OutDir(), "logs")
212	os.MkdirAll(logsDir, 0777)
213
214	build.SetupOutDir(buildCtx, config)
215	if *alternateResultDir {
216		distLogsDir := filepath.Join(config.DistDir(), "logs")
217		os.MkdirAll(distLogsDir, 0777)
218		log.SetOutput(filepath.Join(distLogsDir, "soong.log"))
219		trace.SetOutput(filepath.Join(distLogsDir, "build.trace"))
220	} else {
221		log.SetOutput(filepath.Join(config.OutDir(), "soong.log"))
222		trace.SetOutput(filepath.Join(config.OutDir(), "build.trace"))
223	}
224
225	var jobs = *numJobs
226	if jobs < 1 {
227		jobs = runtime.NumCPU() / 4
228
229		ramGb := int(config.TotalRAM() / 1024 / 1024 / 1024)
230		if ramJobs := ramGb / 20; ramGb > 0 && jobs > ramJobs {
231			jobs = ramJobs
232		}
233
234		if jobs < 1 {
235			jobs = 1
236		}
237	}
238	log.Verbosef("Using %d parallel jobs", jobs)
239
240	setMaxFiles(log)
241
242	finder := build.NewSourceFinder(buildCtx, config)
243	defer finder.Shutdown()
244
245	build.FindSources(buildCtx, config, finder)
246
247	vars, err := build.DumpMakeVars(buildCtx, config, nil, []string{"all_named_products"})
248	if err != nil {
249		log.Fatal(err)
250	}
251	var productsList []string
252	allProducts := strings.Fields(vars["all_named_products"])
253
254	if *includeProducts != "" {
255		missingProducts := []string{}
256		for _, product := range strings.Split(*includeProducts, ",") {
257			if inList(product, allProducts) {
258				productsList = append(productsList, product)
259			} else {
260				missingProducts = append(missingProducts, product)
261			}
262		}
263		if len(missingProducts) > 0 {
264			log.Fatalf("Products don't exist: %s\n", missingProducts)
265		}
266	} else {
267		productsList = allProducts
268	}
269
270	finalProductsList := make([]string, 0, len(productsList))
271	skipList := strings.Split(*skipProducts, ",")
272	skipProduct := func(p string) bool {
273		for _, s := range skipList {
274			if p == s {
275				return true
276			}
277		}
278		return false
279	}
280	for _, product := range productsList {
281		if !skipProduct(product) {
282			finalProductsList = append(finalProductsList, product)
283		} else {
284			log.Verbose("Skipping: ", product)
285		}
286	}
287
288	if *shard < 1 {
289		log.Fatalf("--shard value must be >= 1, not %d\n", *shard)
290	} else if *shardCount < 1 {
291		log.Fatalf("--shard-count value must be >= 1, not %d\n", *shardCount)
292	} else if *shard > *shardCount {
293		log.Fatalf("--shard (%d) must not be greater than --shard-count (%d)\n", *shard,
294			*shardCount)
295	} else if *shardCount > 1 {
296		finalProductsList = splitList(finalProductsList, *shardCount)[*shard-1]
297	}
298
299	log.Verbose("Got product list: ", finalProductsList)
300
301	s := buildCtx.Status.StartTool()
302	s.SetTotalActions(len(finalProductsList))
303
304	mpCtx := &mpContext{
305		Context: ctx,
306		Logger:  log,
307		Status:  s,
308		Tracer:  trace,
309
310		Finder: finder,
311		Config: config,
312
313		LogsDir: logsDir,
314	}
315
316	products := make(chan string, len(productsList))
317	go func() {
318		defer close(products)
319		for _, product := range finalProductsList {
320			products <- product
321		}
322	}()
323
324	var wg sync.WaitGroup
325	for i := 0; i < jobs; i++ {
326		wg.Add(1)
327		go func() {
328			defer wg.Done()
329			for {
330				select {
331				case product := <-products:
332					if product == "" {
333						return
334					}
335					buildProduct(mpCtx, product)
336				}
337			}
338		}()
339	}
340	wg.Wait()
341
342	if *alternateResultDir {
343		args := zip.ZipArgs{
344			FileArgs: []zip.FileArg{
345				{GlobDir: logsDir, SourcePrefixToStrip: logsDir},
346			},
347			OutputFilePath:   filepath.Join(config.DistDir(), "logs.zip"),
348			NumParallelJobs:  runtime.NumCPU(),
349			CompressionLevel: 5,
350		}
351		if err := zip.Zip(args); err != nil {
352			log.Fatalf("Error zipping logs: %v", err)
353		}
354	}
355
356	s.Finish()
357
358	if failures == 1 {
359		log.Fatal("1 failure")
360	} else if failures > 1 {
361		log.Fatalf("%d failures", failures)
362	} else {
363		fmt.Fprintln(output, "Success")
364	}
365}
366
367func buildProduct(mpctx *mpContext, product string) {
368	var stdLog string
369
370	outDir := filepath.Join(mpctx.Config.OutDir(), product)
371	logsDir := filepath.Join(mpctx.LogsDir, product)
372
373	if err := os.MkdirAll(outDir, 0777); err != nil {
374		mpctx.Logger.Fatalf("Error creating out directory: %v", err)
375	}
376	if err := os.MkdirAll(logsDir, 0777); err != nil {
377		mpctx.Logger.Fatalf("Error creating log directory: %v", err)
378	}
379
380	stdLog = filepath.Join(logsDir, "std.log")
381	f, err := os.Create(stdLog)
382	if err != nil {
383		mpctx.Logger.Fatalf("Error creating std.log: %v", err)
384	}
385	defer f.Close()
386
387	log := logger.New(f)
388	defer log.Cleanup()
389	log.SetOutput(filepath.Join(logsDir, "soong.log"))
390
391	action := &status.Action{
392		Description: product,
393		Outputs:     []string{product},
394	}
395	mpctx.Status.StartAction(action)
396	defer logger.Recover(func(err error) {
397		mpctx.Status.FinishAction(status.ActionResult{
398			Action: action,
399			Error:  err,
400			Output: errMsgFromLog(stdLog),
401		})
402	})
403
404	ctx := build.Context{ContextImpl: &build.ContextImpl{
405		Context: mpctx.Context,
406		Logger:  log,
407		Tracer:  mpctx.Tracer,
408		Writer:  f,
409		Thread:  mpctx.Tracer.NewThread(product),
410		Status:  &status.Status{},
411	}}
412	ctx.Status.AddOutput(terminal.NewStatusOutput(ctx.Writer, "", false,
413		build.OsEnvironment().IsEnvTrue("ANDROID_QUIET_BUILD")))
414
415	config := build.NewConfig(ctx, flag.Args()...)
416	config.Environment().Set("OUT_DIR", outDir)
417	if !*keepArtifacts {
418		config.Environment().Set("EMPTY_NINJA_FILE", "true")
419	}
420	build.FindSources(ctx, config, mpctx.Finder)
421	config.Lunch(ctx, product, *buildVariant)
422
423	defer func() {
424		if *keepArtifacts {
425			args := zip.ZipArgs{
426				FileArgs: []zip.FileArg{
427					{
428						GlobDir:             outDir,
429						SourcePrefixToStrip: outDir,
430					},
431				},
432				OutputFilePath:   filepath.Join(mpctx.Config.OutDir(), product+".zip"),
433				NumParallelJobs:  runtime.NumCPU(),
434				CompressionLevel: 5,
435			}
436			if err := zip.Zip(args); err != nil {
437				log.Fatalf("Error zipping artifacts: %v", err)
438			}
439		}
440		if !*incremental {
441			os.RemoveAll(outDir)
442		}
443	}()
444
445	buildWhat := build.BuildProductConfig
446	if !*onlyConfig {
447		buildWhat |= build.BuildSoong
448		if !*onlySoong {
449			buildWhat |= build.BuildKati
450		}
451	}
452
453	before := time.Now()
454	build.Build(ctx, config, buildWhat)
455
456	// Save std_full.log if Kati re-read the makefiles
457	if buildWhat&build.BuildKati != 0 {
458		if after, err := os.Stat(config.KatiBuildNinjaFile()); err == nil && after.ModTime().After(before) {
459			err := copyFile(stdLog, filepath.Join(filepath.Dir(stdLog), "std_full.log"))
460			if err != nil {
461				log.Fatalf("Error copying log file: %s", err)
462			}
463		}
464	}
465
466	mpctx.Status.FinishAction(status.ActionResult{
467		Action: action,
468	})
469}
470
471type failureCount int
472
473func (f *failureCount) StartAction(action *status.Action, counts status.Counts) {}
474
475func (f *failureCount) FinishAction(result status.ActionResult, counts status.Counts) {
476	if result.Error != nil {
477		*f += 1
478	}
479}
480
481func (f *failureCount) Message(level status.MsgLevel, message string) {
482	if level >= status.ErrorLvl {
483		*f += 1
484	}
485}
486
487func (f *failureCount) Flush() {}
488
489func (f *failureCount) Write(p []byte) (int, error) {
490	// discard writes
491	return len(p), nil
492}
493
494func splitList(list []string, shardCount int) (ret [][]string) {
495	each := len(list) / shardCount
496	extra := len(list) % shardCount
497	for i := 0; i < shardCount; i++ {
498		count := each
499		if extra > 0 {
500			count += 1
501			extra -= 1
502		}
503		ret = append(ret, list[:count])
504		list = list[count:]
505	}
506	return
507}
508