go1.5/ 775 0 0 0 12641202400 105005ustar00millermillergo1.5/src/ 775 0 0 0 12641202403 112725ustar00millermillergo1.5/src/cmd/ 775 0 0 0 12641202403 120355ustar00millermillergo1.5/src/cmd/go/ 775 0 0 0 12641202403 124425ustar00millermillergo1.5/src/cmd/go/build.go 640 0 0 302550 12562734702 14145ustar00millermiller// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bufio" "bytes" "container/heap" "debug/elf" "errors" "flag" "fmt" "go/build" "io" "io/ioutil" "log" "os" "os/exec" "path" "path/filepath" "regexp" "runtime" "strconv" "strings" "sync" "time" ) var cmdBuild = &Command{ UsageLine: "build [-o output] [-i] [build flags] [packages]", Short: "compile packages and dependencies", Long: ` Build compiles the packages named by the import paths, along with their dependencies, but it does not install the results. If the arguments to build are a list of .go files, build treats them as a list of source files specifying a single package. When compiling a single main package, build writes the resulting executable to an output file named after the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe') or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe'). The '.exe' suffix is added when writing a Windows executable. When compiling multiple packages or a single non-main package, build compiles the packages but discards the resulting object, serving only as a check that the packages can be built. The -o flag, only allowed when compiling a single package, forces build to write the resulting executable or object to the named output file, instead of the default behavior described in the last two paragraphs. The -i flag installs the packages that are dependencies of the target. The build flags are shared by the build, clean, get, install, list, run, and test commands: -a force rebuilding of packages that are already up-to-date. In Go releases, does not apply to the standard library. -n print the commands but do not run them. -p n the number of builds that can be run in parallel. The default is the number of CPUs available, except on darwin/arm which defaults to 1. -race enable data race detection. Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64. -v print the names of packages as they are compiled. -work print the name of the temporary work directory and do not delete it when exiting. -x print the commands. -asmflags 'flag list' arguments to pass on each go tool asm invocation. -buildmode mode build mode to use. See 'go help buildmode' for more. -compiler name name of compiler to use, as in runtime.Compiler (gccgo or gc). -gccgoflags 'arg list' arguments to pass on each gccgo compiler/linker invocation. -gcflags 'arg list' arguments to pass on each go tool compile invocation. -installsuffix suffix a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to race or, if set explicitly, has _race appended to it. Using a -buildmode option that requires non-default compile flags has a similar effect. -ldflags 'flag list' arguments to pass on each go tool link invocation. -linkshared link against shared libraries previously created with -buildmode=shared -pkgdir dir install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location. -tags 'tag list' a list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. -toolexec 'cmd args' a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run 'cmd args /path/to/asm '. The list flags accept a space-separated list of strings. To embed spaces in an element in the list, surround it with either single or double quotes. For more about specifying packages, see 'go help packages'. For more about where packages and binaries are installed, run 'go help gopath'. For more about calling between Go and C/C++, run 'go help c'. Note: Build adheres to certain conventions such as those described by 'go help gopath'. Not all projects can follow these conventions, however. Installations that have their own conventions or that use a separate software build system may choose to use lower-level invocations such as 'go tool compile' and 'go tool link' to avoid some of the overheads and design decisions of the build tool. See also: go install, go get, go clean. `, } func init() { // break init cycle cmdBuild.Run = runBuild cmdInstall.Run = runInstall cmdBuild.Flag.BoolVar(&buildI, "i", false, "") addBuildFlags(cmdBuild) addBuildFlags(cmdInstall) if buildContext.GOOS == "darwin" { switch buildContext.GOARCH { case "arm", "arm64": // darwin/arm cannot run multiple tests simultaneously. // Parallelism is limited in go_darwin_arm_exec, but // also needs to be limited here so go test std does not // timeout tests that waiting to run. buildP = 1 } } } // Flags set by multiple commands. var buildA bool // -a flag var buildN bool // -n flag var buildP = runtime.NumCPU() // -p flag var buildV bool // -v flag var buildX bool // -x flag var buildI bool // -i flag var buildO = cmdBuild.Flag.String("o", "", "output file") var buildWork bool // -work flag var buildAsmflags []string // -asmflags flag var buildGcflags []string // -gcflags flag var buildLdflags []string // -ldflags flag var buildGccgoflags []string // -gccgoflags flag var buildRace bool // -race flag var buildToolExec []string // -toolexec flag var buildBuildmode string // -buildmode flag var buildLinkshared bool // -linkshared flag var buildPkgdir string // -pkgdir flag var buildContext = build.Default var buildToolchain toolchain = noToolchain{} var ldBuildmode string // buildCompiler implements flag.Var. // It implements Set by updating both // buildToolchain and buildContext.Compiler. type buildCompiler struct{} func (c buildCompiler) Set(value string) error { switch value { case "gc": buildToolchain = gcToolchain{} case "gccgo": buildToolchain = gccgoToolchain{} default: return fmt.Errorf("unknown compiler %q", value) } buildContext.Compiler = value return nil } func (c buildCompiler) String() string { return buildContext.Compiler } func init() { switch build.Default.Compiler { case "gc": buildToolchain = gcToolchain{} case "gccgo": buildToolchain = gccgoToolchain{} } } // addBuildFlags adds the flags common to the build, clean, get, // install, list, run, and test commands. func addBuildFlags(cmd *Command) { cmd.Flag.BoolVar(&buildA, "a", false, "") cmd.Flag.BoolVar(&buildN, "n", false, "") cmd.Flag.IntVar(&buildP, "p", buildP, "") cmd.Flag.BoolVar(&buildV, "v", false, "") cmd.Flag.BoolVar(&buildX, "x", false, "") cmd.Flag.Var((*stringsFlag)(&buildAsmflags), "asmflags", "") cmd.Flag.Var(buildCompiler{}, "compiler", "") cmd.Flag.StringVar(&buildBuildmode, "buildmode", "default", "") cmd.Flag.Var((*stringsFlag)(&buildGcflags), "gcflags", "") cmd.Flag.Var((*stringsFlag)(&buildGccgoflags), "gccgoflags", "") cmd.Flag.StringVar(&buildContext.InstallSuffix, "installsuffix", "", "") cmd.Flag.Var((*stringsFlag)(&buildLdflags), "ldflags", "") cmd.Flag.BoolVar(&buildLinkshared, "linkshared", false, "") cmd.Flag.StringVar(&buildPkgdir, "pkgdir", "", "") cmd.Flag.BoolVar(&buildRace, "race", false, "") cmd.Flag.Var((*stringsFlag)(&buildContext.BuildTags), "tags", "") cmd.Flag.Var((*stringsFlag)(&buildToolExec), "toolexec", "") cmd.Flag.BoolVar(&buildWork, "work", false, "") } func addBuildFlagsNX(cmd *Command) { cmd.Flag.BoolVar(&buildN, "n", false, "") cmd.Flag.BoolVar(&buildX, "x", false, "") } func isSpaceByte(c byte) bool { return c == ' ' || c == '\t' || c == '\n' || c == '\r' } // fileExtSplit expects a filename and returns the name // and ext (without the dot). If the file has no // extension, ext will be empty. func fileExtSplit(file string) (name, ext string) { dotExt := filepath.Ext(file) name = file[:len(file)-len(dotExt)] if dotExt != "" { ext = dotExt[1:] } return } type stringsFlag []string func (v *stringsFlag) Set(s string) error { var err error *v, err = splitQuotedFields(s) if *v == nil { *v = []string{} } return err } func splitQuotedFields(s string) ([]string, error) { // Split fields allowing '' or "" around elements. // Quotes further inside the string do not count. var f []string for len(s) > 0 { for len(s) > 0 && isSpaceByte(s[0]) { s = s[1:] } if len(s) == 0 { break } // Accepted quoted string. No unescaping inside. if s[0] == '"' || s[0] == '\'' { quote := s[0] s = s[1:] i := 0 for i < len(s) && s[i] != quote { i++ } if i >= len(s) { return nil, fmt.Errorf("unterminated %c string", quote) } f = append(f, s[:i]) s = s[i+1:] continue } i := 0 for i < len(s) && !isSpaceByte(s[i]) { i++ } f = append(f, s[:i]) s = s[i:] } return f, nil } func (v *stringsFlag) String() string { return "" } func pkgsMain(pkgs []*Package) (res []*Package) { for _, p := range pkgs { if p.Name == "main" { res = append(res, p) } } return res } func pkgsNotMain(pkgs []*Package) (res []*Package) { for _, p := range pkgs { if p.Name != "main" { res = append(res, p) } } return res } var pkgsFilter = func(pkgs []*Package) []*Package { return pkgs } func buildModeInit() { _, gccgo := buildToolchain.(gccgoToolchain) var codegenArg string platform := goos + "/" + goarch switch buildBuildmode { case "archive": pkgsFilter = pkgsNotMain case "c-archive": pkgsFilter = func(p []*Package) []*Package { if len(p) != 1 || p[0].Name != "main" { fatalf("-buildmode=c-archive requires exactly one main package") } return p } exeSuffix = ".a" ldBuildmode = "c-archive" case "c-shared": pkgsFilter = pkgsMain if gccgo { codegenArg = "-fPIC" } else { switch platform { case "linux/amd64": codegenArg = "-shared" case "linux/arm": buildAsmflags = append(buildAsmflags, "-shared") case "darwin/amd64": case "android/arm": default: fatalf("-buildmode=c-shared not supported on %s\n", platform) } } ldBuildmode = "c-shared" case "default": ldBuildmode = "exe" case "exe": pkgsFilter = pkgsMain ldBuildmode = "exe" case "shared": pkgsFilter = pkgsNotMain if gccgo { codegenArg = "-fPIC" } else { switch platform { case "linux/amd64": default: fatalf("-buildmode=shared not supported on %s\n", platform) } codegenArg = "-dynlink" } if *buildO != "" { fatalf("-buildmode=shared and -o not supported together") } ldBuildmode = "shared" default: fatalf("buildmode=%s not supported", buildBuildmode) } if buildLinkshared { if gccgo { codegenArg = "-fPIC" } else { if platform != "linux/amd64" { fmt.Fprintf(os.Stderr, "go %s: -linkshared is only supported on linux/amd64\n", flag.Args()[0]) os.Exit(2) } codegenArg = "-dynlink" // TODO(mwhudson): remove -w when that gets fixed in linker. buildLdflags = append(buildLdflags, "-linkshared", "-w") } } if codegenArg != "" { if gccgo { buildGccgoflags = append(buildGccgoflags, codegenArg) } else { buildAsmflags = append(buildAsmflags, codegenArg) buildGcflags = append(buildGcflags, codegenArg) } if buildContext.InstallSuffix != "" { buildContext.InstallSuffix += "_" } buildContext.InstallSuffix += codegenArg[1:] } } func runBuild(cmd *Command, args []string) { raceInit() buildModeInit() var b builder b.init() pkgs := packagesForBuild(args) if len(pkgs) == 1 && pkgs[0].Name == "main" && *buildO == "" { _, *buildO = path.Split(pkgs[0].ImportPath) *buildO += exeSuffix } // sanity check some often mis-used options switch buildContext.Compiler { case "gccgo": if len(buildGcflags) != 0 { fmt.Println("go build: when using gccgo toolchain, please pass compiler flags using -gccgoflags, not -gcflags") } if len(buildLdflags) != 0 { fmt.Println("go build: when using gccgo toolchain, please pass linker flags using -gccgoflags, not -ldflags") } case "gc": if len(buildGccgoflags) != 0 { fmt.Println("go build: when using gc toolchain, please pass compile flags using -gcflags, and linker flags using -ldflags") } } depMode := modeBuild if buildI { depMode = modeInstall } if *buildO != "" { if len(pkgs) > 1 { fatalf("go build: cannot use -o with multiple packages") } else if len(pkgs) == 0 { fatalf("no packages to build") } p := pkgs[0] p.target = *buildO p.Stale = true // must build - not up to date a := b.action(modeInstall, depMode, p) b.do(a) return } var a *action if buildBuildmode == "shared" { a = b.libaction(libname(args), pkgsFilter(packages(args)), modeBuild, depMode) } else { a = &action{} for _, p := range pkgsFilter(packages(args)) { a.deps = append(a.deps, b.action(modeBuild, depMode, p)) } } b.do(a) } var cmdInstall = &Command{ UsageLine: "install [build flags] [packages]", Short: "compile and install packages and dependencies", Long: ` Install compiles and installs the packages named by the import paths, along with their dependencies. For more about the build flags, see 'go help build'. For more about specifying packages, see 'go help packages'. See also: go build, go get, go clean. `, } // libname returns the filename to use for the shared library when using // -buildmode=shared. The rules we use are: // 1) Drop any trailing "/..."s if present // 2) Change / to - // 3) Join arguments with , // So std -> libstd.so // a b/... -> liba,b.so // gopkg.in/tomb.v2 -> libgopkg.in-tomb.v2.so func libname(args []string) string { var libname string for _, arg := range args { arg = strings.TrimSuffix(arg, "/...") arg = strings.Replace(arg, "/", "-", -1) if libname == "" { libname = arg } else { libname += "," + arg } } // TODO(mwhudson): Needs to change for platforms that use different naming // conventions... return "lib" + libname + ".so" } func runInstall(cmd *Command, args []string) { raceInit() buildModeInit() pkgs := pkgsFilter(packagesForBuild(args)) for _, p := range pkgs { if p.Target == "" && (!p.Standard || p.ImportPath != "unsafe") { switch { case p.gobinSubdir: errorf("go install: cannot install cross-compiled binaries when GOBIN is set") case p.cmdline: errorf("go install: no install location for .go files listed on command line (GOBIN not set)") case p.ConflictDir != "": errorf("go install: no install location for %s: hidden by %s", p.Dir, p.ConflictDir) default: errorf("go install: no install location for directory %s outside GOPATH\n"+ "\tFor more details see: go help gopath", p.Dir) } } } exitIfErrors() var b builder b.init() var a *action if buildBuildmode == "shared" { a = b.libaction(libname(args), pkgs, modeInstall, modeInstall) } else { a = &action{} var tools []*action for _, p := range pkgs { // If p is a tool, delay the installation until the end of the build. // This avoids installing assemblers/compilers that are being executed // by other steps in the build. // cmd/cgo is handled specially in b.action, so that we can // both build and use it in the same 'go install'. action := b.action(modeInstall, modeInstall, p) if goTools[p.ImportPath] == toTool && p.ImportPath != "cmd/cgo" { a.deps = append(a.deps, action.deps...) action.deps = append(action.deps, a) tools = append(tools, action) continue } a.deps = append(a.deps, action) } if len(tools) > 0 { a = &action{ deps: tools, } } } b.do(a) exitIfErrors() // Success. If this command is 'go install' with no arguments // and the current directory (the implicit argument) is a command, // remove any leftover command binary from a previous 'go build'. // The binary is installed; it's not needed here anymore. // And worse it might be a stale copy, which you don't want to find // instead of the installed one if $PATH contains dot. // One way to view this behavior is that it is as if 'go install' first // runs 'go build' and the moves the generated file to the install dir. // See issue 9645. if len(args) == 0 && len(pkgs) == 1 && pkgs[0].Name == "main" { // Compute file 'go build' would have created. // If it exists and is an executable file, remove it. _, targ := filepath.Split(pkgs[0].ImportPath) targ += exeSuffix if filepath.Join(pkgs[0].Dir, targ) != pkgs[0].Target { // maybe $GOBIN is the current directory fi, err := os.Stat(targ) if err == nil { m := fi.Mode() if m.IsRegular() { if m&0111 != 0 || goos == "windows" { // windows never sets executable bit os.Remove(targ) } } } } } } // Global build parameters (used during package load) var ( goarch string goos string exeSuffix string ) func init() { goarch = buildContext.GOARCH goos = buildContext.GOOS if goos == "windows" { exeSuffix = ".exe" } } // A builder holds global state about a build. // It does not hold per-package state, because we // build packages in parallel, and the builder is shared. type builder struct { work string // the temporary work directory (ends in filepath.Separator) actionCache map[cacheKey]*action // a cache of already-constructed actions mkdirCache map[string]bool // a cache of created directories print func(args ...interface{}) (int, error) output sync.Mutex scriptDir string // current directory in printed script exec sync.Mutex readySema chan bool ready actionQueue } // An action represents a single action in the action graph. type action struct { p *Package // the package this action works on deps []*action // actions that must happen before this one triggers []*action // inverse of deps cgo *action // action for cgo binary if needed args []string // additional args for runProgram testOutput *bytes.Buffer // test output buffer f func(*builder, *action) error // the action itself (nil = no-op) ignoreFail bool // whether to run f even if dependencies fail // Generated files, directories. link bool // target is executable, not just package pkgdir string // the -I or -L argument to use when importing this package objdir string // directory for intermediate objects objpkg string // the intermediate package .a file created during the action target string // goal of the action: the created package or executable // Execution state. pending int // number of deps yet to complete priority int // relative execution priority failed bool // whether the action failed } // cacheKey is the key for the action cache. type cacheKey struct { mode buildMode p *Package shlib string } // buildMode specifies the build mode: // are we just building things or also installing the results? type buildMode int const ( modeBuild buildMode = iota modeInstall ) var ( goroot = filepath.Clean(runtime.GOROOT()) gobin = os.Getenv("GOBIN") gorootBin = filepath.Join(goroot, "bin") gorootPkg = filepath.Join(goroot, "pkg") gorootSrc = filepath.Join(goroot, "src") ) func (b *builder) init() { var err error b.print = func(a ...interface{}) (int, error) { return fmt.Fprint(os.Stderr, a...) } b.actionCache = make(map[cacheKey]*action) b.mkdirCache = make(map[string]bool) if buildN { b.work = "$WORK" } else { b.work, err = ioutil.TempDir("", "go-build") if err != nil { fatalf("%s", err) } if buildX || buildWork { fmt.Fprintf(os.Stderr, "WORK=%s\n", b.work) } if !buildWork { workdir := b.work atexit(func() { os.RemoveAll(workdir) }) } } } // goFilesPackage creates a package for building a collection of Go files // (typically named on the command line). The target is named p.a for // package p or named after the first Go file for package main. func goFilesPackage(gofiles []string) *Package { // TODO: Remove this restriction. for _, f := range gofiles { if !strings.HasSuffix(f, ".go") { fatalf("named files must be .go files") } } var stk importStack ctxt := buildContext ctxt.UseAllFiles = true // Synthesize fake "directory" that only shows the named files, // to make it look like this is a standard package or // command directory. So that local imports resolve // consistently, the files must all be in the same directory. var dirent []os.FileInfo var dir string for _, file := range gofiles { fi, err := os.Stat(file) if err != nil { fatalf("%s", err) } if fi.IsDir() { fatalf("%s is a directory, should be a Go file", file) } dir1, _ := filepath.Split(file) if dir1 == "" { dir1 = "./" } if dir == "" { dir = dir1 } else if dir != dir1 { fatalf("named files must all be in one directory; have %s and %s", dir, dir1) } dirent = append(dirent, fi) } ctxt.ReadDir = func(string) ([]os.FileInfo, error) { return dirent, nil } var err error if dir == "" { dir = cwd } dir, err = filepath.Abs(dir) if err != nil { fatalf("%s", err) } bp, err := ctxt.ImportDir(dir, 0) pkg := new(Package) pkg.local = true pkg.cmdline = true pkg.load(&stk, bp, err) pkg.localPrefix = dirToImportPath(dir) pkg.ImportPath = "command-line-arguments" pkg.target = "" if pkg.Name == "main" { _, elem := filepath.Split(gofiles[0]) exe := elem[:len(elem)-len(".go")] + exeSuffix if *buildO == "" { *buildO = exe } if gobin != "" { pkg.target = filepath.Join(gobin, exe) } } pkg.Target = pkg.target pkg.Stale = true computeStale(pkg) return pkg } // readpkglist returns the list of packages that were built into the shared library // at shlibpath. For the native toolchain this list is stored, newline separated, in // an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the // .go_export section. func readpkglist(shlibpath string) (pkgs []*Package) { var stk importStack if _, gccgo := buildToolchain.(gccgoToolchain); gccgo { f, _ := elf.Open(shlibpath) sect := f.Section(".go_export") data, _ := sect.Data() scanner := bufio.NewScanner(bytes.NewBuffer(data)) for scanner.Scan() { t := scanner.Text() if strings.HasPrefix(t, "pkgpath ") { t = strings.TrimPrefix(t, "pkgpath ") t = strings.TrimSuffix(t, ";") pkgs = append(pkgs, loadPackage(t, &stk)) } } } else { pkglistbytes, err := readELFNote(shlibpath, "Go\x00\x00", 1) if err != nil { fatalf("readELFNote failed: %v", err) } scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes)) for scanner.Scan() { t := scanner.Text() pkgs = append(pkgs, loadPackage(t, &stk)) } } return } // action returns the action for applying the given operation (mode) to the package. // depMode is the action to use when building dependencies. // action never looks for p in a shared library. func (b *builder) action(mode buildMode, depMode buildMode, p *Package) *action { return b.action1(mode, depMode, p, false) } // action1 returns the action for applying the given operation (mode) to the package. // depMode is the action to use when building dependencies. // action1 will look for p in a shared library if lookshared is true. func (b *builder) action1(mode buildMode, depMode buildMode, p *Package, lookshared bool) *action { shlib := "" if lookshared { shlib = p.Shlib } key := cacheKey{mode, p, shlib} a := b.actionCache[key] if a != nil { return a } if shlib != "" { key2 := cacheKey{modeInstall, nil, shlib} a = b.actionCache[key2] if a != nil { b.actionCache[key] = a return a } pkgs := readpkglist(shlib) a = b.libaction(filepath.Base(shlib), pkgs, modeInstall, depMode) b.actionCache[key2] = a b.actionCache[key] = a return a } a = &action{p: p, pkgdir: p.build.PkgRoot} if p.pkgdir != "" { // overrides p.t a.pkgdir = p.pkgdir } b.actionCache[key] = a for _, p1 := range p.imports { ls := buildLinkshared // If p1 is part of the same shared library as p, we need the action // that builds p here, not the shared libary or we get action loops. if p1.Shlib == p.Shlib { ls = false } a.deps = append(a.deps, b.action1(depMode, depMode, p1, ls)) } // If we are not doing a cross-build, then record the binary we'll // generate for cgo as a dependency of the build of any package // using cgo, to make sure we do not overwrite the binary while // a package is using it. If this is a cross-build, then the cgo we // are writing is not the cgo we need to use. if goos == runtime.GOOS && goarch == runtime.GOARCH && !buildRace { if (len(p.CgoFiles) > 0 || p.Standard && p.ImportPath == "runtime/cgo") && !buildLinkshared && buildBuildmode != "shared" { var stk importStack p1 := loadPackage("cmd/cgo", &stk) if p1.Error != nil { fatalf("load cmd/cgo: %v", p1.Error) } a.cgo = b.action(depMode, depMode, p1) a.deps = append(a.deps, a.cgo) } } if p.Standard { switch p.ImportPath { case "builtin", "unsafe": // Fake packages - nothing to build. return a } // gccgo standard library is "fake" too. if _, ok := buildToolchain.(gccgoToolchain); ok { // the target name is needed for cgo. a.target = p.target return a } } if !p.Stale && p.target != "" { // p.Stale==false implies that p.target is up-to-date. // Record target name for use by actions depending on this one. a.target = p.target return a } if p.local && p.target == "" { // Imported via local path. No permanent target. mode = modeBuild } work := p.pkgdir if work == "" { work = b.work } a.objdir = filepath.Join(work, a.p.ImportPath, "_obj") + string(filepath.Separator) a.objpkg = buildToolchain.pkgpath(work, a.p) a.link = p.Name == "main" switch mode { case modeInstall: a.f = (*builder).install a.deps = []*action{b.action1(modeBuild, depMode, p, lookshared)} a.target = a.p.target // Install header for cgo in c-archive and c-shared modes. if p.usesCgo() && (buildBuildmode == "c-archive" || buildBuildmode == "c-shared") { ah := &action{ p: a.p, deps: []*action{a.deps[0]}, f: (*builder).installHeader, pkgdir: a.pkgdir, objdir: a.objdir, target: a.target[:len(a.target)-len(filepath.Ext(a.target))] + ".h", } a.deps = append(a.deps, ah) } case modeBuild: a.f = (*builder).build a.target = a.objpkg if a.link { // An executable file. (This is the name of a temporary file.) // Because we run the temporary file in 'go run' and 'go test', // the name will show up in ps listings. If the caller has specified // a name, use that instead of a.out. The binary is generated // in an otherwise empty subdirectory named exe to avoid // naming conflicts. The only possible conflict is if we were // to create a top-level package named exe. name := "a.out" if p.exeName != "" { name = p.exeName } else if goos == "darwin" && buildBuildmode == "c-shared" && p.target != "" { // On OS X, the linker output name gets recorded in the // shared library's LC_ID_DYLIB load command. // The code invoking the linker knows to pass only the final // path element. Arrange that the path element matches what // we'll install it as; otherwise the library is only loadable as "a.out". _, name = filepath.Split(p.target) } a.target = a.objdir + filepath.Join("exe", name) + exeSuffix } } return a } func (b *builder) libaction(libname string, pkgs []*Package, mode, depMode buildMode) *action { a := &action{} if mode == modeBuild { a.f = (*builder).linkShared a.target = filepath.Join(b.work, libname) for _, p := range pkgs { if p.target == "" { continue } a.deps = append(a.deps, b.action(depMode, depMode, p)) } } else if mode == modeInstall { // Currently build mode shared forces external linking mode, and // external linking mode forces an import of runtime/cgo. So if it // was not passed on the command line and it is not present in // another shared library, add it here. seencgo := false _, gccgo := buildToolchain.(gccgoToolchain) if !gccgo { for _, p := range pkgs { seencgo = seencgo || (p.Standard && p.ImportPath == "runtime/cgo") } if !seencgo { var stk importStack p := loadPackage("runtime/cgo", &stk) if p.Error != nil { fatalf("load runtime/cgo: %v", p.Error) } computeStale(p) // If runtime/cgo is in another shared library, then that's // also the shared library that contains runtime, so // something will depend on it and so runtime/cgo's staleness // will be checked when processing that library. if p.Shlib == "" || p.Shlib == libname { pkgs = append([]*Package{}, pkgs...) pkgs = append(pkgs, p) } } } // Figure out where the library will go. var libdir string for _, p := range pkgs { plibdir := p.build.PkgTargetRoot if gccgo { plibdir = filepath.Join(plibdir, "shlibs") } if libdir == "" { libdir = plibdir } else if libdir != plibdir { fatalf("multiple roots %s & %s", libdir, plibdir) } } a.target = filepath.Join(libdir, libname) // Now we can check whether we need to rebuild it. stale := false var built time.Time if fi, err := os.Stat(a.target); err == nil { built = fi.ModTime() } for _, p := range pkgs { if p.target == "" { continue } stale = stale || p.Stale lstat, err := os.Stat(p.target) if err != nil || lstat.ModTime().After(built) { stale = true } a.deps = append(a.deps, b.action(depMode, depMode, p)) } if stale { a.f = (*builder).install buildAction := b.libaction(libname, pkgs, modeBuild, depMode) a.deps = []*action{buildAction} for _, p := range pkgs { if p.target == "" { continue } shlibnameaction := &action{} shlibnameaction.f = (*builder).installShlibname shlibnameaction.target = p.target[:len(p.target)-2] + ".shlibname" a.deps = append(a.deps, shlibnameaction) shlibnameaction.deps = append(shlibnameaction.deps, buildAction) } } } else { fatalf("unregonized mode %v", mode) } return a } // actionList returns the list of actions in the dag rooted at root // as visited in a depth-first post-order traversal. func actionList(root *action) []*action { seen := map[*action]bool{} all := []*action{} var walk func(*action) walk = func(a *action) { if seen[a] { return } seen[a] = true for _, a1 := range a.deps { walk(a1) } all = append(all, a) } walk(root) return all } // allArchiveActions returns a list of the archive dependencies of root. // This is needed because if package p depends on package q that is in libr.so, the // action graph looks like p->libr.so->q and so just scanning through p's // dependencies does not find the import dir for q. func allArchiveActions(root *action) []*action { seen := map[*action]bool{} r := []*action{} var walk func(*action) walk = func(a *action) { if seen[a] { return } seen[a] = true if strings.HasSuffix(a.target, ".so") || a == root { for _, a1 := range a.deps { walk(a1) } } else if strings.HasSuffix(a.target, ".a") { r = append(r, a) } } walk(root) return r } // do runs the action graph rooted at root. func (b *builder) do(root *action) { // Build list of all actions, assigning depth-first post-order priority. // The original implementation here was a true queue // (using a channel) but it had the effect of getting // distracted by low-level leaf actions to the detriment // of completing higher-level actions. The order of // work does not matter much to overall execution time, // but when running "go test std" it is nice to see each test // results as soon as possible. The priorities assigned // ensure that, all else being equal, the execution prefers // to do what it would have done first in a simple depth-first // dependency order traversal. all := actionList(root) for i, a := range all { a.priority = i } b.readySema = make(chan bool, len(all)) // Initialize per-action execution state. for _, a := range all { for _, a1 := range a.deps { a1.triggers = append(a1.triggers, a) } a.pending = len(a.deps) if a.pending == 0 { b.ready.push(a) b.readySema <- true } } // Handle runs a single action and takes care of triggering // any actions that are runnable as a result. handle := func(a *action) { var err error if a.f != nil && (!a.failed || a.ignoreFail) { err = a.f(b, a) } // The actions run in parallel but all the updates to the // shared work state are serialized through b.exec. b.exec.Lock() defer b.exec.Unlock() if err != nil { if err == errPrintedOutput { setExitStatus(2) } else { errorf("%s", err) } a.failed = true } for _, a0 := range a.triggers { if a.failed { a0.failed = true } if a0.pending--; a0.pending == 0 { b.ready.push(a0) b.readySema <- true } } if a == root { close(b.readySema) } } var wg sync.WaitGroup // Kick off goroutines according to parallelism. // If we are using the -n flag (just printing commands) // drop the parallelism to 1, both to make the output // deterministic and because there is no real work anyway. par := buildP if buildN { par = 1 } for i := 0; i < par; i++ { wg.Add(1) go func() { defer wg.Done() for { select { case _, ok := <-b.readySema: if !ok { return } // Receiving a value from b.readySema entitles // us to take from the ready queue. b.exec.Lock() a := b.ready.pop() b.exec.Unlock() handle(a) case <-interrupted: setExitStatus(1) return } } }() } wg.Wait() } // hasString reports whether s appears in the list of strings. func hasString(strings []string, s string) bool { for _, t := range strings { if s == t { return true } } return false } // build is the action for building a single package or command. func (b *builder) build(a *action) (err error) { // Return an error if the package has CXX files but it's not using // cgo nor SWIG, since the CXX files can only be processed by cgo // and SWIG. if len(a.p.CXXFiles) > 0 && !a.p.usesCgo() && !a.p.usesSwig() { return fmt.Errorf("can't build package %s because it contains C++ files (%s) but it's not using cgo nor SWIG", a.p.ImportPath, strings.Join(a.p.CXXFiles, ",")) } // Same as above for Objective-C files if len(a.p.MFiles) > 0 && !a.p.usesCgo() && !a.p.usesSwig() { return fmt.Errorf("can't build package %s because it contains Objective-C files (%s) but it's not using cgo nor SWIG", a.p.ImportPath, strings.Join(a.p.MFiles, ",")) } defer func() { if err != nil && err != errPrintedOutput { err = fmt.Errorf("go build %s: %v", a.p.ImportPath, err) } }() if buildN { // In -n mode, print a banner between packages. // The banner is five lines so that when changes to // different sections of the bootstrap script have to // be merged, the banners give patch something // to use to find its context. fmt.Printf("\n#\n# %s\n#\n\n", a.p.ImportPath) } if buildV { fmt.Fprintf(os.Stderr, "%s\n", a.p.ImportPath) } if a.p.Standard && a.p.ImportPath == "runtime" && buildContext.Compiler == "gc" && (!hasString(a.p.GoFiles, "zgoos_"+buildContext.GOOS+".go") || !hasString(a.p.GoFiles, "zgoarch_"+buildContext.GOARCH+".go")) { return fmt.Errorf("%s/%s must be bootstrapped using make%v", buildContext.GOOS, buildContext.GOARCH, defaultSuffix()) } // Make build directory. obj := a.objdir if err := b.mkdir(obj); err != nil { return err } // make target directory dir, _ := filepath.Split(a.target) if dir != "" { if err := b.mkdir(dir); err != nil { return err } } var gofiles, cgofiles, cfiles, sfiles, cxxfiles, objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string gofiles = append(gofiles, a.p.GoFiles...) cgofiles = append(cgofiles, a.p.CgoFiles...) cfiles = append(cfiles, a.p.CFiles...) sfiles = append(sfiles, a.p.SFiles...) cxxfiles = append(cxxfiles, a.p.CXXFiles...) if a.p.usesCgo() || a.p.usesSwig() { if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(a.p); err != nil { return } } // Run SWIG on each .swig and .swigcxx file. // Each run will generate two files, a .go file and a .c or .cxx file. // The .go file will use import "C" and is to be processed by cgo. if a.p.usesSwig() { outGo, outC, outCXX, err := b.swig(a.p, obj, pcCFLAGS) if err != nil { return err } cgofiles = append(cgofiles, outGo...) cfiles = append(cfiles, outC...) cxxfiles = append(cxxfiles, outCXX...) } // Run cgo. if a.p.usesCgo() || a.p.usesSwig() { // In a package using cgo, cgo compiles the C, C++ and assembly files with gcc. // There is one exception: runtime/cgo's job is to bridge the // cgo and non-cgo worlds, so it necessarily has files in both. // In that case gcc only gets the gcc_* files. var gccfiles []string if a.p.Standard && a.p.ImportPath == "runtime/cgo" { filter := func(files, nongcc, gcc []string) ([]string, []string) { for _, f := range files { if strings.HasPrefix(f, "gcc_") { gcc = append(gcc, f) } else { nongcc = append(nongcc, f) } } return nongcc, gcc } cfiles, gccfiles = filter(cfiles, cfiles[:0], gccfiles) sfiles, gccfiles = filter(sfiles, sfiles[:0], gccfiles) } else { gccfiles = append(cfiles, sfiles...) cfiles = nil sfiles = nil } cgoExe := tool("cgo") if a.cgo != nil && a.cgo.target != "" { cgoExe = a.cgo.target } outGo, outObj, err := b.cgo(a.p, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, cxxfiles, a.p.MFiles) if err != nil { return err } cgoObjects = append(cgoObjects, outObj...) gofiles = append(gofiles, outGo...) } if len(gofiles) == 0 { return &build.NoGoError{Dir: a.p.Dir} } // If we're doing coverage, preprocess the .go files and put them in the work directory if a.p.coverMode != "" { for i, file := range gofiles { var sourceFile string var coverFile string var key string if strings.HasSuffix(file, ".cgo1.go") { // cgo files have absolute paths base := filepath.Base(file) sourceFile = file coverFile = filepath.Join(obj, base) key = strings.TrimSuffix(base, ".cgo1.go") + ".go" } else { sourceFile = filepath.Join(a.p.Dir, file) coverFile = filepath.Join(obj, file) key = file } cover := a.p.coverVars[key] if cover == nil || isTestFile(file) { // Not covering this file. continue } if err := b.cover(a, coverFile, sourceFile, 0666, cover.Var); err != nil { return err } gofiles[i] = coverFile } } // Prepare Go import path list. inc := b.includeArgs("-I", allArchiveActions(a)) // Compile Go. ofile, out, err := buildToolchain.gc(b, a.p, a.objpkg, obj, len(sfiles) > 0, inc, gofiles) if len(out) > 0 { b.showOutput(a.p.Dir, a.p.ImportPath, b.processOutput(out)) if err != nil { return errPrintedOutput } } if err != nil { return err } if ofile != a.objpkg { objects = append(objects, ofile) } // Copy .h files named for goos or goarch or goos_goarch // to names using GOOS and GOARCH. // For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h. _goos_goarch := "_" + goos + "_" + goarch _goos := "_" + goos _goarch := "_" + goarch for _, file := range a.p.HFiles { name, ext := fileExtSplit(file) switch { case strings.HasSuffix(name, _goos_goarch): targ := file[:len(name)-len(_goos_goarch)] + "_GOOS_GOARCH." + ext if err := b.copyFile(a, obj+targ, filepath.Join(a.p.Dir, file), 0644, true); err != nil { return err } case strings.HasSuffix(name, _goarch): targ := file[:len(name)-len(_goarch)] + "_GOARCH." + ext if err := b.copyFile(a, obj+targ, filepath.Join(a.p.Dir, file), 0644, true); err != nil { return err } case strings.HasSuffix(name, _goos): targ := file[:len(name)-len(_goos)] + "_GOOS." + ext if err := b.copyFile(a, obj+targ, filepath.Join(a.p.Dir, file), 0644, true); err != nil { return err } } } for _, file := range cfiles { out := file[:len(file)-len(".c")] + ".o" if err := buildToolchain.cc(b, a.p, obj, obj+out, file); err != nil { return err } objects = append(objects, out) } // Assemble .s files. for _, file := range sfiles { out := file[:len(file)-len(".s")] + ".o" if err := buildToolchain.asm(b, a.p, obj, obj+out, file); err != nil { return err } objects = append(objects, out) } // NOTE(rsc): On Windows, it is critically important that the // gcc-compiled objects (cgoObjects) be listed after the ordinary // objects in the archive. I do not know why this is. // https://golang.org/issue/2601 objects = append(objects, cgoObjects...) // Add system object files. for _, syso := range a.p.SysoFiles { objects = append(objects, filepath.Join(a.p.Dir, syso)) } // Pack into archive in obj directory. // If the Go compiler wrote an archive, we only need to add the // object files for non-Go sources to the archive. // If the Go compiler wrote an archive and the package is entirely // Go sources, there is no pack to execute at all. if len(objects) > 0 { if err := buildToolchain.pack(b, a.p, obj, a.objpkg, objects); err != nil { return err } } // Link if needed. if a.link { // The compiler only cares about direct imports, but the // linker needs the whole dependency tree. all := actionList(a) all = all[:len(all)-1] // drop a if err := buildToolchain.ld(b, a, a.target, all, a.objpkg, objects); err != nil { return err } } return nil } // Calls pkg-config if needed and returns the cflags/ldflags needed to build the package. func (b *builder) getPkgConfigFlags(p *Package) (cflags, ldflags []string, err error) { if pkgs := p.CgoPkgConfig; len(pkgs) > 0 { var out []byte out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--cflags", pkgs) if err != nil { b.showOutput(p.Dir, "pkg-config --cflags "+strings.Join(pkgs, " "), string(out)) b.print(err.Error() + "\n") err = errPrintedOutput return } if len(out) > 0 { cflags = strings.Fields(string(out)) } out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--libs", pkgs) if err != nil { b.showOutput(p.Dir, "pkg-config --libs "+strings.Join(pkgs, " "), string(out)) b.print(err.Error() + "\n") err = errPrintedOutput return } if len(out) > 0 { ldflags = strings.Fields(string(out)) } } return } func (b *builder) installShlibname(a *action) error { a1 := a.deps[0] err := ioutil.WriteFile(a.target, []byte(filepath.Base(a1.target)+"\n"), 0644) if err != nil { return err } if buildX { b.showcmd("", "echo '%s' > %s # internal", filepath.Base(a1.target), a.target) } return nil } func (b *builder) linkShared(a *action) (err error) { allactions := actionList(a) allactions = allactions[:len(allactions)-1] return buildToolchain.ldShared(b, a.deps, a.target, allactions) } // install is the action for installing a single package or executable. func (b *builder) install(a *action) (err error) { defer func() { if err != nil && err != errPrintedOutput { err = fmt.Errorf("go install %s: %v", a.p.ImportPath, err) } }() a1 := a.deps[0] perm := os.FileMode(0644) if a1.link { switch buildBuildmode { case "c-archive", "c-shared": default: perm = 0755 } } // make target directory dir, _ := filepath.Split(a.target) if dir != "" { if err := b.mkdir(dir); err != nil { return err } } // remove object dir to keep the amount of // garbage down in a large build. On an operating system // with aggressive buffering, cleaning incrementally like // this keeps the intermediate objects from hitting the disk. if !buildWork { defer os.RemoveAll(a1.objdir) defer os.Remove(a1.target) } return b.moveOrCopyFile(a, a.target, a1.target, perm, false) } // includeArgs returns the -I or -L directory list for access // to the results of the list of actions. func (b *builder) includeArgs(flag string, all []*action) []string { inc := []string{} incMap := map[string]bool{ b.work: true, // handled later gorootPkg: true, "": true, // ignore empty strings } // Look in the temporary space for results of test-specific actions. // This is the $WORK/my/package/_test directory for the // package being built, so there are few of these. for _, a1 := range all { if a1.p == nil { continue } if dir := a1.pkgdir; dir != a1.p.build.PkgRoot && !incMap[dir] { incMap[dir] = true inc = append(inc, flag, dir) } } // Also look in $WORK for any non-test packages that have // been built but not installed. inc = append(inc, flag, b.work) // Finally, look in the installed package directories for each action. for _, a1 := range all { if a1.p == nil { continue } if dir := a1.pkgdir; dir == a1.p.build.PkgRoot && !incMap[dir] { incMap[dir] = true inc = append(inc, flag, a1.p.build.PkgTargetRoot) } } return inc } // moveOrCopyFile is like 'mv src dst' or 'cp src dst'. func (b *builder) moveOrCopyFile(a *action, dst, src string, perm os.FileMode, force bool) error { if buildN { b.showcmd("", "mv %s %s", src, dst) return nil } // If we can update the mode and rename to the dst, do it. // Otherwise fall back to standard copy. if err := os.Chmod(src, perm); err == nil { if err := os.Rename(src, dst); err == nil { if buildX { b.showcmd("", "mv %s %s", src, dst) } return nil } } return b.copyFile(a, dst, src, perm, force) } // copyFile is like 'cp src dst'. func (b *builder) copyFile(a *action, dst, src string, perm os.FileMode, force bool) error { if buildN || buildX { b.showcmd("", "cp %s %s", src, dst) if buildN { return nil } } sf, err := os.Open(src) if err != nil { return err } defer sf.Close() // Be careful about removing/overwriting dst. // Do not remove/overwrite if dst exists and is a directory // or a non-object file. if fi, err := os.Stat(dst); err == nil { if fi.IsDir() { return fmt.Errorf("build output %q already exists and is a directory", dst) } if !force && !isObject(dst) { return fmt.Errorf("build output %q already exists and is not an object file", dst) } } // On Windows, remove lingering ~ file from last attempt. if toolIsWindows { if _, err := os.Stat(dst + "~"); err == nil { os.Remove(dst + "~") } } os.Remove(dst) df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) if err != nil && toolIsWindows { // Windows does not allow deletion of a binary file // while it is executing. Try to move it out of the way. // If the move fails, which is likely, we'll try again the // next time we do an install of this binary. if err := os.Rename(dst, dst+"~"); err == nil { os.Remove(dst + "~") } df, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) } if err != nil { return err } _, err = io.Copy(df, sf) df.Close() if err != nil { os.Remove(dst) return fmt.Errorf("copying %s to %s: %v", src, dst, err) } return nil } // Install the cgo export header file, if there is one. func (b *builder) installHeader(a *action) error { src := a.objdir + "_cgo_install.h" if _, err := os.Stat(src); os.IsNotExist(err) { // If the file does not exist, there are no exported // functions, and we do not install anything. return nil } dir, _ := filepath.Split(a.target) if dir != "" { if err := b.mkdir(dir); err != nil { return err } } return b.moveOrCopyFile(a, a.target, src, 0644, true) } // cover runs, in effect, // go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go func (b *builder) cover(a *action, dst, src string, perm os.FileMode, varName string) error { return b.run(a.objdir, "cover "+a.p.ImportPath, nil, buildToolExec, tool("cover"), "-mode", a.p.coverMode, "-var", varName, "-o", dst, src) } var objectMagic = [][]byte{ {'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive {'\x7F', 'E', 'L', 'F'}, // ELF {0xFE, 0xED, 0xFA, 0xCE}, // Mach-O big-endian 32-bit {0xFE, 0xED, 0xFA, 0xCF}, // Mach-O big-endian 64-bit {0xCE, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 32-bit {0xCF, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 64-bit {0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00}, // PE (Windows) as generated by 6l/8l and gcc {0x00, 0x00, 0x01, 0xEB}, // Plan 9 i386 {0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64 {0x00, 0x00, 0x06, 0x47}, // Plan 9 arm } func isObject(s string) bool { f, err := os.Open(s) if err != nil { return false } defer f.Close() buf := make([]byte, 64) io.ReadFull(f, buf) for _, magic := range objectMagic { if bytes.HasPrefix(buf, magic) { return true } } return false } // fmtcmd formats a command in the manner of fmt.Sprintf but also: // // If dir is non-empty and the script is not in dir right now, // fmtcmd inserts "cd dir\n" before the command. // // fmtcmd replaces the value of b.work with $WORK. // fmtcmd replaces the value of goroot with $GOROOT. // fmtcmd replaces the value of b.gobin with $GOBIN. // // fmtcmd replaces the name of the current directory with dot (.) // but only when it is at the beginning of a space-separated token. // func (b *builder) fmtcmd(dir string, format string, args ...interface{}) string { cmd := fmt.Sprintf(format, args...) if dir != "" && dir != "/" { cmd = strings.Replace(" "+cmd, " "+dir, " .", -1)[1:] if b.scriptDir != dir { b.scriptDir = dir cmd = "cd " + dir + "\n" + cmd } } if b.work != "" { cmd = strings.Replace(cmd, b.work, "$WORK", -1) } return cmd } // showcmd prints the given command to standard output // for the implementation of -n or -x. func (b *builder) showcmd(dir string, format string, args ...interface{}) { b.output.Lock() defer b.output.Unlock() b.print(b.fmtcmd(dir, format, args...) + "\n") } // showOutput prints "# desc" followed by the given output. // The output is expected to contain references to 'dir', usually // the source directory for the package that has failed to build. // showOutput rewrites mentions of dir with a relative path to dir // when the relative path is shorter. This is usually more pleasant. // For example, if fmt doesn't compile and we are in src/html, // the output is // // $ go build // # fmt // ../fmt/print.go:1090: undefined: asdf // $ // // instead of // // $ go build // # fmt // /usr/gopher/go/src/fmt/print.go:1090: undefined: asdf // $ // // showOutput also replaces references to the work directory with $WORK. // func (b *builder) showOutput(dir, desc, out string) { prefix := "# " + desc suffix := "\n" + out if reldir := shortPath(dir); reldir != dir { suffix = strings.Replace(suffix, " "+dir, " "+reldir, -1) suffix = strings.Replace(suffix, "\n"+dir, "\n"+reldir, -1) } suffix = strings.Replace(suffix, " "+b.work, " $WORK", -1) b.output.Lock() defer b.output.Unlock() b.print(prefix, suffix) } // shortPath returns an absolute or relative name for path, whatever is shorter. func shortPath(path string) string { if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) { return rel } return path } // relPaths returns a copy of paths with absolute paths // made relative to the current directory if they would be shorter. func relPaths(paths []string) []string { var out []string pwd, _ := os.Getwd() for _, p := range paths { rel, err := filepath.Rel(pwd, p) if err == nil && len(rel) < len(p) { p = rel } out = append(out, p) } return out } // errPrintedOutput is a special error indicating that a command failed // but that it generated output as well, and that output has already // been printed, so there's no point showing 'exit status 1' or whatever // the wait status was. The main executor, builder.do, knows not to // print this error. var errPrintedOutput = errors.New("already printed output - no need to show error") var cgoLine = regexp.MustCompile(`\[[^\[\]]+\.cgo1\.go:[0-9]+\]`) var cgoTypeSigRe = regexp.MustCompile(`\b_Ctype_\B`) // run runs the command given by cmdline in the directory dir. // If the command fails, run prints information about the failure // and returns a non-nil error. func (b *builder) run(dir string, desc string, env []string, cmdargs ...interface{}) error { out, err := b.runOut(dir, desc, env, cmdargs...) if len(out) > 0 { if desc == "" { desc = b.fmtcmd(dir, "%s", strings.Join(stringList(cmdargs...), " ")) } b.showOutput(dir, desc, b.processOutput(out)) if err != nil { err = errPrintedOutput } } return err } // processOutput prepares the output of runOut to be output to the console. func (b *builder) processOutput(out []byte) string { if out[len(out)-1] != '\n' { out = append(out, '\n') } messages := string(out) // Fix up output referring to cgo-generated code to be more readable. // Replace x.go:19[/tmp/.../x.cgo1.go:18] with x.go:19. // Replace *[100]_Ctype_foo with *[100]C.foo. // If we're using -x, assume we're debugging and want the full dump, so disable the rewrite. if !buildX && cgoLine.MatchString(messages) { messages = cgoLine.ReplaceAllString(messages, "") messages = cgoTypeSigRe.ReplaceAllString(messages, "C.") } return messages } // runOut runs the command given by cmdline in the directory dir. // It returns the command output and any errors that occurred. func (b *builder) runOut(dir string, desc string, env []string, cmdargs ...interface{}) ([]byte, error) { cmdline := stringList(cmdargs...) if buildN || buildX { var envcmdline string for i := range env { envcmdline += env[i] envcmdline += " " } envcmdline += joinUnambiguously(cmdline) b.showcmd(dir, "%s", envcmdline) if buildN { return nil, nil } } nbusy := 0 for { var buf bytes.Buffer cmd := exec.Command(cmdline[0], cmdline[1:]...) cmd.Stdout = &buf cmd.Stderr = &buf cmd.Dir = dir cmd.Env = mergeEnvLists(env, envForDir(cmd.Dir, os.Environ())) err := cmd.Run() // cmd.Run will fail on Unix if some other process has the binary // we want to run open for writing. This can happen here because // we build and install the cgo command and then run it. // If another command was kicked off while we were writing the // cgo binary, the child process for that command may be holding // a reference to the fd, keeping us from running exec. // // But, you might reasonably wonder, how can this happen? // The cgo fd, like all our fds, is close-on-exec, so that we need // not worry about other processes inheriting the fd accidentally. // The answer is that running a command is fork and exec. // A child forked while the cgo fd is open inherits that fd. // Until the child has called exec, it holds the fd open and the // kernel will not let us run cgo. Even if the child were to close // the fd explicitly, it would still be open from the time of the fork // until the time of the explicit close, and the race would remain. // // On Unix systems, this results in ETXTBSY, which formats // as "text file busy". Rather than hard-code specific error cases, // we just look for that string. If this happens, sleep a little // and try again. We let this happen three times, with increasing // sleep lengths: 100+200+400 ms = 0.7 seconds. // // An alternate solution might be to split the cmd.Run into // separate cmd.Start and cmd.Wait, and then use an RWLock // to make sure that copyFile only executes when no cmd.Start // call is in progress. However, cmd.Start (really syscall.forkExec) // only guarantees that when it returns, the exec is committed to // happen and succeed. It uses a close-on-exec file descriptor // itself to determine this, so we know that when cmd.Start returns, // at least one close-on-exec file descriptor has been closed. // However, we cannot be sure that all of them have been closed, // so the program might still encounter ETXTBSY even with such // an RWLock. The race window would be smaller, perhaps, but not // guaranteed to be gone. // // Sleeping when we observe the race seems to be the most reliable // option we have. // // https://golang.org/issue/3001 // if err != nil && nbusy < 3 && strings.Contains(err.Error(), "text file busy") { time.Sleep(100 * time.Millisecond << uint(nbusy)) nbusy++ continue } // err can be something like 'exit status 1'. // Add information about what program was running. // Note that if buf.Bytes() is non-empty, the caller usually // shows buf.Bytes() and does not print err at all, so the // prefix here does not make most output any more verbose. if err != nil { err = errors.New(cmdline[0] + ": " + err.Error()) } return buf.Bytes(), err } } // joinUnambiguously prints the slice, quoting where necessary to make the // output unambiguous. // TODO: See issue 5279. The printing of commands needs a complete redo. func joinUnambiguously(a []string) string { var buf bytes.Buffer for i, s := range a { if i > 0 { buf.WriteByte(' ') } q := strconv.Quote(s) if s == "" || strings.Contains(s, " ") || len(q) > len(s)+2 { buf.WriteString(q) } else { buf.WriteString(s) } } return buf.String() } // mkdir makes the named directory. func (b *builder) mkdir(dir string) error { b.exec.Lock() defer b.exec.Unlock() // We can be a little aggressive about being // sure directories exist. Skip repeated calls. if b.mkdirCache[dir] { return nil } b.mkdirCache[dir] = true if buildN || buildX { b.showcmd("", "mkdir -p %s", dir) if buildN { return nil } } if err := os.MkdirAll(dir, 0777); err != nil { return err } return nil } // mkAbs returns an absolute path corresponding to // evaluating f in the directory dir. // We always pass absolute paths of source files so that // the error messages will include the full path to a file // in need of attention. func mkAbs(dir, f string) string { // Leave absolute paths alone. // Also, during -n mode we use the pseudo-directory $WORK // instead of creating an actual work directory that won't be used. // Leave paths beginning with $WORK alone too. if filepath.IsAbs(f) || strings.HasPrefix(f, "$WORK") { return f } return filepath.Join(dir, f) } type toolchain interface { // gc runs the compiler in a specific directory on a set of files // and returns the name of the generated output file. // The compiler runs in the directory dir. gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error) // cc runs the toolchain's C compiler in a directory on a C file // to produce an output file. cc(b *builder, p *Package, objdir, ofile, cfile string) error // asm runs the assembler in a specific directory on a specific file // to generate the named output file. asm(b *builder, p *Package, obj, ofile, sfile string) error // pkgpath builds an appropriate path for a temporary package file. pkgpath(basedir string, p *Package) string // pack runs the archive packer in a specific directory to create // an archive from a set of object files. // typically it is run in the object directory. pack(b *builder, p *Package, objDir, afile string, ofiles []string) error // ld runs the linker to create an executable starting at mainpkg. ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error // ldShared runs the linker to create a shared library containing the pkgs built by toplevelactions ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error compiler() string linker() string } type noToolchain struct{} func noCompiler() error { log.Fatalf("unknown compiler %q", buildContext.Compiler) return nil } func (noToolchain) compiler() string { noCompiler() return "" } func (noToolchain) linker() string { noCompiler() return "" } func (noToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error) { return "", nil, noCompiler() } func (noToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error { return noCompiler() } func (noToolchain) pkgpath(basedir string, p *Package) string { noCompiler() return "" } func (noToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []string) error { return noCompiler() } func (noToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error { return noCompiler() } func (noToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error { return noCompiler() } func (noToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error { return noCompiler() } // The Go toolchain. type gcToolchain struct{} func (gcToolchain) compiler() string { return tool("compile") } func (gcToolchain) linker() string { return tool("link") } func (gcToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) { if archive != "" { ofile = archive } else { out := "_go_.o" ofile = obj + out } gcargs := []string{"-p", p.ImportPath} if p.Name == "main" { gcargs[1] = "main" } if p.Standard && p.ImportPath == "runtime" { // runtime compiles with a special gc flag to emit // additional reflect type data. gcargs = append(gcargs, "-+") } // If we're giving the compiler the entire package (no C etc files), tell it that, // so that it can give good error messages about forward declarations. // Exceptions: a few standard packages have forward declarations for // pieces supplied behind-the-scenes by package runtime. extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles) if p.Standard { switch p.ImportPath { case "bytes", "net", "os", "runtime/pprof", "sync", "time": extFiles++ } } if extFiles == 0 { gcargs = append(gcargs, "-complete") } if buildContext.InstallSuffix != "" { gcargs = append(gcargs, "-installsuffix", buildContext.InstallSuffix) } if p.buildID != "" { gcargs = append(gcargs, "-buildid", p.buildID) } for _, path := range p.Imports { if i := strings.LastIndex(path, "/vendor/"); i >= 0 { gcargs = append(gcargs, "-importmap", path[i+len("/vendor/"):]+"="+path) } else if strings.HasPrefix(path, "vendor/") { gcargs = append(gcargs, "-importmap", path[len("vendor/"):]+"="+path) } } args := []interface{}{buildToolExec, tool("compile"), "-o", ofile, "-trimpath", b.work, buildGcflags, gcargs, "-D", p.localPrefix, importArgs} if ofile == archive { args = append(args, "-pack") } if asmhdr { args = append(args, "-asmhdr", obj+"go_asm.h") } for _, f := range gofiles { args = append(args, mkAbs(p.Dir, f)) } output, err = b.runOut(p.Dir, p.ImportPath, nil, args...) return ofile, output, err } func (gcToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error { // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files. inc := filepath.Join(goroot, "pkg", "include") sfile = mkAbs(p.Dir, sfile) args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags, sfile} if err := b.run(p.Dir, p.ImportPath, nil, args...); err != nil { return err } return nil } // toolVerify checks that the command line args writes the same output file // if run using newTool instead. // Unused now but kept around for future use. func toolVerify(b *builder, p *Package, newTool string, ofile string, args []interface{}) error { newArgs := make([]interface{}, len(args)) copy(newArgs, args) newArgs[1] = tool(newTool) newArgs[3] = ofile + ".new" // x.6 becomes x.6.new if err := b.run(p.Dir, p.ImportPath, nil, newArgs...); err != nil { return err } data1, err := ioutil.ReadFile(ofile) if err != nil { return err } data2, err := ioutil.ReadFile(ofile + ".new") if err != nil { return err } if !bytes.Equal(data1, data2) { return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(stringList(args...), " "), strings.Join(stringList(newArgs...), " ")) } os.Remove(ofile + ".new") return nil } func (gcToolchain) pkgpath(basedir string, p *Package) string { end := filepath.FromSlash(p.ImportPath + ".a") return filepath.Join(basedir, end) } func (gcToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []string) error { var absOfiles []string for _, f := range ofiles { absOfiles = append(absOfiles, mkAbs(objDir, f)) } cmd := "c" absAfile := mkAbs(objDir, afile) appending := false if _, err := os.Stat(absAfile); err == nil { appending = true cmd = "r" } cmdline := stringList("pack", cmd, absAfile, absOfiles) if appending { if buildN || buildX { b.showcmd(p.Dir, "%s # internal", joinUnambiguously(cmdline)) } if buildN { return nil } if err := packInternal(b, absAfile, absOfiles); err != nil { b.showOutput(p.Dir, p.ImportPath, err.Error()+"\n") return errPrintedOutput } return nil } // Need actual pack. cmdline[0] = tool("pack") return b.run(p.Dir, p.ImportPath, nil, buildToolExec, cmdline) } func packInternal(b *builder, afile string, ofiles []string) error { dst, err := os.OpenFile(afile, os.O_WRONLY|os.O_APPEND, 0) if err != nil { return err } defer dst.Close() // only for error returns or panics w := bufio.NewWriter(dst) for _, ofile := range ofiles { src, err := os.Open(ofile) if err != nil { return err } fi, err := src.Stat() if err != nil { src.Close() return err } // Note: Not using %-16.16s format because we care // about bytes, not runes. name := fi.Name() if len(name) > 16 { name = name[:16] } else { name += strings.Repeat(" ", 16-len(name)) } size := fi.Size() fmt.Fprintf(w, "%s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size) n, err := io.Copy(w, src) src.Close() if err == nil && n < size { err = io.ErrUnexpectedEOF } else if err == nil && n > size { err = fmt.Errorf("file larger than size reported by stat") } if err != nil { return fmt.Errorf("copying %s to %s: %v", ofile, afile, err) } if size&1 != 0 { w.WriteByte(0) } } if err := w.Flush(); err != nil { return err } return dst.Close() } // setextld sets the appropriate linker flags for the specified compiler. func setextld(ldflags []string, compiler []string) []string { for _, f := range ldflags { if f == "-extld" || strings.HasPrefix(f, "-extld=") { // don't override -extld if supplied return ldflags } } ldflags = append(ldflags, "-extld="+compiler[0]) if len(compiler) > 1 { extldflags := false add := strings.Join(compiler[1:], " ") for i, f := range ldflags { if f == "-extldflags" && i+1 < len(ldflags) { ldflags[i+1] = add + " " + ldflags[i+1] extldflags = true break } else if strings.HasPrefix(f, "-extldflags=") { ldflags[i] = "-extldflags=" + add + " " + ldflags[i][len("-extldflags="):] extldflags = true break } } if !extldflags { ldflags = append(ldflags, "-extldflags="+add) } } return ldflags } func (gcToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error { importArgs := b.includeArgs("-L", allactions) cxx := len(root.p.CXXFiles) > 0 || len(root.p.SwigCXXFiles) > 0 for _, a := range allactions { if a.p != nil && (len(a.p.CXXFiles) > 0 || len(a.p.SwigCXXFiles) > 0) { cxx = true } } var ldflags []string if buildContext.InstallSuffix != "" { ldflags = append(ldflags, "-installsuffix", buildContext.InstallSuffix) } if root.p.omitDWARF { ldflags = append(ldflags, "-w") } // If the user has not specified the -extld option, then specify the // appropriate linker. In case of C++ code, use the compiler named // by the CXX environment variable or defaultCXX if CXX is not set. // Else, use the CC environment variable and defaultCC as fallback. var compiler []string if cxx { compiler = envList("CXX", defaultCXX) } else { compiler = envList("CC", defaultCC) } ldflags = setextld(ldflags, compiler) ldflags = append(ldflags, "-buildmode="+ldBuildmode) if root.p.buildID != "" { ldflags = append(ldflags, "-buildid="+root.p.buildID) } ldflags = append(ldflags, buildLdflags...) // On OS X when using external linking to build a shared library, // the argument passed here to -o ends up recorded in the final // shared library in the LC_ID_DYLIB load command. // To avoid putting the temporary output directory name there // (and making the resulting shared library useless), // run the link in the output directory so that -o can name // just the final path element. dir := "." if goos == "darwin" && buildBuildmode == "c-shared" { dir, out = filepath.Split(out) } return b.run(dir, root.p.ImportPath, nil, buildToolExec, tool("link"), "-o", out, importArgs, ldflags, mainpkg) } func (gcToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error { importArgs := b.includeArgs("-L", allactions) ldflags := []string{"-installsuffix", buildContext.InstallSuffix} ldflags = append(ldflags, "-buildmode=shared") ldflags = append(ldflags, buildLdflags...) cxx := false for _, a := range allactions { if a.p != nil && (len(a.p.CXXFiles) > 0 || len(a.p.SwigCXXFiles) > 0) { cxx = true } } // If the user has not specified the -extld option, then specify the // appropriate linker. In case of C++ code, use the compiler named // by the CXX environment variable or defaultCXX if CXX is not set. // Else, use the CC environment variable and defaultCC as fallback. var compiler []string if cxx { compiler = envList("CXX", defaultCXX) } else { compiler = envList("CC", defaultCC) } ldflags = setextld(ldflags, compiler) for _, d := range toplevelactions { if !strings.HasSuffix(d.target, ".a") { // omit unsafe etc and actions for other shared libraries continue } ldflags = append(ldflags, d.p.ImportPath+"="+d.target) } return b.run(".", out, nil, buildToolExec, tool("link"), "-o", out, importArgs, ldflags) } func (gcToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error { return fmt.Errorf("%s: C source files not supported without cgo", mkAbs(p.Dir, cfile)) } // The Gccgo toolchain. type gccgoToolchain struct{} var gccgoName, gccgoBin string func init() { gccgoName = os.Getenv("GCCGO") if gccgoName == "" { gccgoName = "gccgo" } gccgoBin, _ = exec.LookPath(gccgoName) } func (gccgoToolchain) compiler() string { return gccgoBin } func (gccgoToolchain) linker() string { return gccgoBin } func (tools gccgoToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) { out := "_go_.o" ofile = obj + out gcargs := []string{"-g"} gcargs = append(gcargs, b.gccArchArgs()...) if pkgpath := gccgoPkgpath(p); pkgpath != "" { gcargs = append(gcargs, "-fgo-pkgpath="+pkgpath) } if p.localPrefix != "" { gcargs = append(gcargs, "-fgo-relative-import-path="+p.localPrefix) } args := stringList(tools.compiler(), importArgs, "-c", gcargs, "-o", ofile, buildGccgoflags) for _, f := range gofiles { args = append(args, mkAbs(p.Dir, f)) } output, err = b.runOut(p.Dir, p.ImportPath, nil, args) return ofile, output, err } func (tools gccgoToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error { sfile = mkAbs(p.Dir, sfile) defs := []string{"-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch} if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`) } defs = tools.maybePIC(defs) defs = append(defs, b.gccArchArgs()...) return b.run(p.Dir, p.ImportPath, nil, tools.compiler(), "-I", obj, "-o", ofile, defs, sfile) } func (gccgoToolchain) pkgpath(basedir string, p *Package) string { end := filepath.FromSlash(p.ImportPath + ".a") afile := filepath.Join(basedir, end) // add "lib" to the final element return filepath.Join(filepath.Dir(afile), "lib"+filepath.Base(afile)) } func (gccgoToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []string) error { var absOfiles []string for _, f := range ofiles { absOfiles = append(absOfiles, mkAbs(objDir, f)) } return b.run(p.Dir, p.ImportPath, nil, "ar", "cru", mkAbs(objDir, afile), absOfiles) } func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error { // gccgo needs explicit linking with all package dependencies, // and all LDFLAGS from cgo dependencies. apackagesSeen := make(map[*Package]bool) afiles := []string{} shlibs := []string{} xfiles := []string{} ldflags := b.gccArchArgs() cgoldflags := []string{} usesCgo := false cxx := len(root.p.CXXFiles) > 0 || len(root.p.SwigCXXFiles) > 0 objc := len(root.p.MFiles) > 0 actionsSeen := make(map[*action]bool) // Make a pre-order depth-first traversal of the action graph, taking note of // whether a shared library action has been seen on the way to an action (the // construction of the graph means that if any path to a node passes through // a shared library action, they all do). var walk func(a *action, seenShlib bool) walk = func(a *action, seenShlib bool) { if actionsSeen[a] { return } actionsSeen[a] = true if a.p != nil && !seenShlib { if a.p.Standard { return } // We record the target of the first time we see a .a file // for a package to make sure that we prefer the 'install' // rather than the 'build' location (which may not exist any // more). We still need to traverse the dependencies of the // build action though so saying // if apackagesSeen[a.p] { return } // doesn't work. if !apackagesSeen[a.p] { apackagesSeen[a.p] = true if a.p.fake && a.p.external { // external _tests, if present must come before // internal _tests. Store these on a separate list // and place them at the head after this loop. xfiles = append(xfiles, a.target) } else if a.p.fake { // move _test files to the top of the link order afiles = append([]string{a.target}, afiles...) } else { afiles = append(afiles, a.target) } } } if strings.HasSuffix(a.target, ".so") { shlibs = append(shlibs, a.target) seenShlib = true } for _, a1 := range a.deps { walk(a1, seenShlib) } } for _, a1 := range root.deps { walk(a1, false) } afiles = append(xfiles, afiles...) for _, a := range allactions { // Gather CgoLDFLAGS, but not from standard packages. // The go tool can dig up runtime/cgo from GOROOT and // think that it should use its CgoLDFLAGS, but gccgo // doesn't use runtime/cgo. if a.p == nil { continue } if !a.p.Standard { cgoldflags = append(cgoldflags, a.p.CgoLDFLAGS...) } if len(a.p.CgoFiles) > 0 { usesCgo = true } if a.p.usesSwig() { usesCgo = true } if len(a.p.CXXFiles) > 0 || len(a.p.SwigCXXFiles) > 0 { cxx = true } if len(a.p.MFiles) > 0 { objc = true } } switch ldBuildmode { case "c-archive", "c-shared": ldflags = append(ldflags, "-Wl,--whole-archive") } ldflags = append(ldflags, afiles...) switch ldBuildmode { case "c-archive", "c-shared": ldflags = append(ldflags, "-Wl,--no-whole-archive") } ldflags = append(ldflags, cgoldflags...) ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...) ldflags = append(ldflags, root.p.CgoLDFLAGS...) ldflags = stringList("-Wl,-(", ldflags, "-Wl,-)") for _, shlib := range shlibs { ldflags = append( ldflags, "-L"+filepath.Dir(shlib), "-Wl,-rpath="+filepath.Dir(shlib), "-l"+strings.TrimSuffix( strings.TrimPrefix(filepath.Base(shlib), "lib"), ".so")) } var realOut string switch ldBuildmode { case "exe": if usesCgo && goos == "linux" { ldflags = append(ldflags, "-Wl,-E") } case "c-archive": // Link the Go files into a single .o, and also link // in -lgolibbegin. // // We need to use --whole-archive with -lgolibbegin // because it doesn't define any symbols that will // cause the contents to be pulled in; it's just // initialization code. // // The user remains responsible for linking against // -lgo -lpthread -lm in the final link. We can't use // -r to pick them up because we can't combine // split-stack and non-split-stack code in a single -r // link, and libgo picks up non-split-stack code from // libffi. ldflags = append(ldflags, "-Wl,-r", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive") // We are creating an object file, so we don't want a build ID. ldflags = b.disableBuildID(ldflags) realOut = out out = out + ".o" case "c-shared": ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc") default: fatalf("-buildmode=%s not supported for gccgo", ldBuildmode) } switch ldBuildmode { case "exe", "c-shared": if cxx { ldflags = append(ldflags, "-lstdc++") } if objc { ldflags = append(ldflags, "-lobjc") } } if err := b.run(".", root.p.ImportPath, nil, tools.linker(), "-o", out, ofiles, ldflags, buildGccgoflags); err != nil { return err } switch ldBuildmode { case "c-archive": if err := b.run(".", root.p.ImportPath, nil, "ar", "rc", realOut, out); err != nil { return err } } return nil } func (tools gccgoToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error { args := []string{"-o", out, "-shared", "-nostdlib", "-zdefs", "-Wl,--whole-archive"} for _, a := range toplevelactions { args = append(args, a.target) } args = append(args, "-Wl,--no-whole-archive", "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc") shlibs := []string{} for _, a := range allactions { if strings.HasSuffix(a.target, ".so") { shlibs = append(shlibs, a.target) } } for _, shlib := range shlibs { args = append( args, "-L"+filepath.Dir(shlib), "-Wl,-rpath="+filepath.Dir(shlib), "-l"+strings.TrimSuffix( strings.TrimPrefix(filepath.Base(shlib), "lib"), ".so")) } return b.run(".", out, nil, tools.linker(), args, buildGccgoflags) } func (tools gccgoToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error { inc := filepath.Join(goroot, "pkg", "include") cfile = mkAbs(p.Dir, cfile) defs := []string{"-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch} defs = append(defs, b.gccArchArgs()...) if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" { defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`) } switch goarch { case "386", "amd64": defs = append(defs, "-fsplit-stack") } defs = tools.maybePIC(defs) return b.run(p.Dir, p.ImportPath, nil, envList("CC", defaultCC), "-Wall", "-g", "-I", objdir, "-I", inc, "-o", ofile, defs, "-c", cfile) } // maybePIC adds -fPIC to the list of arguments if needed. func (tools gccgoToolchain) maybePIC(args []string) []string { switch buildBuildmode { case "c-shared", "shared": args = append(args, "-fPIC") } return args } func gccgoPkgpath(p *Package) string { if p.build.IsCommand() && !p.forceLibrary { return "" } return p.ImportPath } func gccgoCleanPkgpath(p *Package) string { clean := func(r rune) rune { switch { case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z', '0' <= r && r <= '9': return r } return '_' } return strings.Map(clean, gccgoPkgpath(p)) } // libgcc returns the filename for libgcc, as determined by invoking gcc with // the -print-libgcc-file-name option. func (b *builder) libgcc(p *Package) (string, error) { var buf bytes.Buffer gccCmd := b.gccCmd(p.Dir) prev := b.print if buildN { // In -n mode we temporarily swap out the builder's // print function to capture the command-line. This // let's us assign it to $LIBGCC and produce a valid // buildscript for cgo packages. b.print = func(a ...interface{}) (int, error) { return fmt.Fprint(&buf, a...) } } f, err := b.runOut(p.Dir, p.ImportPath, nil, gccCmd, "-print-libgcc-file-name") if err != nil { return "", fmt.Errorf("gcc -print-libgcc-file-name: %v (%s)", err, f) } if buildN { s := fmt.Sprintf("LIBGCC=$(%s)\n", buf.Next(buf.Len()-1)) b.print = prev b.print(s) return "$LIBGCC", nil } // The compiler might not be able to find libgcc, and in that case, // it will simply return "libgcc.a", which is of no use to us. if !filepath.IsAbs(string(f)) { return "", nil } return strings.Trim(string(f), "\r\n"), nil } // gcc runs the gcc C compiler to create an object from a single C file. func (b *builder) gcc(p *Package, out string, flags []string, cfile string) error { return b.ccompile(p, out, flags, cfile, b.gccCmd(p.Dir)) } // gxx runs the g++ C++ compiler to create an object from a single C++ file. func (b *builder) gxx(p *Package, out string, flags []string, cxxfile string) error { return b.ccompile(p, out, flags, cxxfile, b.gxxCmd(p.Dir)) } // ccompile runs the given C or C++ compiler and creates an object from a single source file. func (b *builder) ccompile(p *Package, out string, flags []string, file string, compiler []string) error { file = mkAbs(p.Dir, file) return b.run(p.Dir, p.ImportPath, nil, compiler, flags, "-o", out, "-c", file) } // gccld runs the gcc linker to create an executable from a set of object files. func (b *builder) gccld(p *Package, out string, flags []string, obj []string) error { var cmd []string if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 { cmd = b.gxxCmd(p.Dir) } else { cmd = b.gccCmd(p.Dir) } return b.run(p.Dir, p.ImportPath, nil, cmd, "-o", out, obj, flags) } // gccCmd returns a gcc command line prefix // defaultCC is defined in zdefaultcc.go, written by cmd/dist. func (b *builder) gccCmd(objdir string) []string { return b.ccompilerCmd("CC", defaultCC, objdir) } // gxxCmd returns a g++ command line prefix // defaultCXX is defined in zdefaultcc.go, written by cmd/dist. func (b *builder) gxxCmd(objdir string) []string { return b.ccompilerCmd("CXX", defaultCXX, objdir) } // ccompilerCmd returns a command line prefix for the given environment // variable and using the default command when the variable is empty. func (b *builder) ccompilerCmd(envvar, defcmd, objdir string) []string { // NOTE: env.go's mkEnv knows that the first three // strings returned are "gcc", "-I", objdir (and cuts them off). compiler := envList(envvar, defcmd) a := []string{compiler[0], "-I", objdir} a = append(a, compiler[1:]...) // Definitely want -fPIC but on Windows gcc complains // "-fPIC ignored for target (all code is position independent)" if goos != "windows" { a = append(a, "-fPIC") } a = append(a, b.gccArchArgs()...) // gcc-4.5 and beyond require explicit "-pthread" flag // for multithreading with pthread library. if buildContext.CgoEnabled { switch goos { case "windows": a = append(a, "-mthreads") default: a = append(a, "-pthread") } } if strings.Contains(a[0], "clang") { // disable ASCII art in clang errors, if possible a = append(a, "-fno-caret-diagnostics") // clang is too smart about command-line arguments a = append(a, "-Qunused-arguments") } // disable word wrapping in error messages a = append(a, "-fmessage-length=0") // On OS X, some of the compilers behave as if -fno-common // is always set, and the Mach-O linker in 6l/8l assumes this. // See https://golang.org/issue/3253. if goos == "darwin" { a = append(a, "-fno-common") } return a } // gccArchArgs returns arguments to pass to gcc based on the architecture. func (b *builder) gccArchArgs() []string { switch goarch { case "386": return []string{"-m32"} case "amd64", "amd64p32": return []string{"-m64"} case "arm": return []string{"-marm"} // not thumb } return nil } // envList returns the value of the given environment variable broken // into fields, using the default value when the variable is empty. func envList(key, def string) []string { v := os.Getenv(key) if v == "" { v = def } return strings.Fields(v) } // Return the flags to use when invoking the C or C++ compilers, or cgo. func (b *builder) cflags(p *Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) { var defaults string if def { defaults = "-g -O2" } cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS) cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS) cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS) ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS) return } var cgoRe = regexp.MustCompile(`[/\\:]`) var ( cgoLibGccFile string cgoLibGccErr error cgoLibGccFileOnce sync.Once ) func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles []string) (outGo, outObj []string, err error) { cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoLDFLAGS := b.cflags(p, true) _, cgoexeCFLAGS, _, _ := b.cflags(p, false) cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...) cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...) // If we are compiling Objective-C code, then we need to link against libobjc if len(mfiles) > 0 { cgoLDFLAGS = append(cgoLDFLAGS, "-lobjc") } // Allows including _cgo_export.h from .[ch] files in the package. cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", obj) // cgo // TODO: CGOPKGPATH, CGO_FLAGS? gofiles := []string{obj + "_cgo_gotypes.go"} cfiles := []string{"_cgo_main.c", "_cgo_export.c"} for _, fn := range cgofiles { f := cgoRe.ReplaceAllString(fn[:len(fn)-2], "_") gofiles = append(gofiles, obj+f+"cgo1.go") cfiles = append(cfiles, f+"cgo2.c") } defunC := obj + "_cgo_defun.c" cgoflags := []string{} // TODO: make cgo not depend on $GOARCH? if p.Standard && p.ImportPath == "runtime/cgo" { cgoflags = append(cgoflags, "-import_runtime_cgo=false") } if p.Standard && (p.ImportPath == "runtime/race" || p.ImportPath == "runtime/cgo") { cgoflags = append(cgoflags, "-import_syscall=false") } // Update $CGO_LDFLAGS with p.CgoLDFLAGS. var cgoenv []string if len(cgoLDFLAGS) > 0 { flags := make([]string, len(cgoLDFLAGS)) for i, f := range cgoLDFLAGS { flags[i] = strconv.Quote(f) } cgoenv = []string{"CGO_LDFLAGS=" + strings.Join(flags, " ")} } if _, ok := buildToolchain.(gccgoToolchain); ok { switch goarch { case "386", "amd64": cgoCFLAGS = append(cgoCFLAGS, "-fsplit-stack") } cgoflags = append(cgoflags, "-gccgo") if pkgpath := gccgoPkgpath(p); pkgpath != "" { cgoflags = append(cgoflags, "-gccgopkgpath="+pkgpath) } } switch buildBuildmode { case "c-archive", "c-shared": // Tell cgo that if there are any exported functions // it should generate a header file that C code can // #include. cgoflags = append(cgoflags, "-exportheader="+obj+"_cgo_install.h") } if err := b.run(p.Dir, p.ImportPath, cgoenv, buildToolExec, cgoExe, "-objdir", obj, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoexeCFLAGS, cgofiles); err != nil { return nil, nil, err } outGo = append(outGo, gofiles...) // cc _cgo_defun.c _, gccgo := buildToolchain.(gccgoToolchain) if gccgo { defunObj := obj + "_cgo_defun.o" if err := buildToolchain.cc(b, p, obj, defunObj, defunC); err != nil { return nil, nil, err } outObj = append(outObj, defunObj) } // gcc var linkobj []string var bareLDFLAGS []string // filter out -lsomelib, -l somelib, *.{so,dll,dylib}, and (on Darwin) -framework X for i := 0; i < len(cgoLDFLAGS); i++ { f := cgoLDFLAGS[i] switch { // skip "-lc" or "-l somelib" case strings.HasPrefix(f, "-l"): if f == "-l" { i++ } // skip "-framework X" on Darwin case goos == "darwin" && f == "-framework": i++ // skip "*.{dylib,so,dll}" case strings.HasSuffix(f, ".dylib"), strings.HasSuffix(f, ".so"), strings.HasSuffix(f, ".dll"): continue // Remove any -fsanitize=foo flags. // Otherwise the compiler driver thinks that we are doing final link // and links sanitizer runtime into the object file. But we are not doing // the final link, we will link the resulting object file again. And // so the program ends up with two copies of sanitizer runtime. // See issue 8788 for details. case strings.HasPrefix(f, "-fsanitize="): continue default: bareLDFLAGS = append(bareLDFLAGS, f) } } cgoLibGccFileOnce.Do(func() { cgoLibGccFile, cgoLibGccErr = b.libgcc(p) }) if cgoLibGccFile == "" && cgoLibGccErr != nil { return nil, nil, err } var staticLibs []string if goos == "windows" { // libmingw32 and libmingwex might also use libgcc, so libgcc must come last, // and they also have some inter-dependencies, so must use linker groups. staticLibs = []string{"-Wl,--start-group", "-lmingwex", "-lmingw32", "-Wl,--end-group"} } if cgoLibGccFile != "" { staticLibs = append(staticLibs, cgoLibGccFile) } cflags := stringList(cgoCPPFLAGS, cgoCFLAGS) for _, cfile := range cfiles { ofile := obj + cfile[:len(cfile)-1] + "o" if err := b.gcc(p, ofile, cflags, obj+cfile); err != nil { return nil, nil, err } linkobj = append(linkobj, ofile) if !strings.HasSuffix(ofile, "_cgo_main.o") { outObj = append(outObj, ofile) } } for _, file := range gccfiles { ofile := obj + cgoRe.ReplaceAllString(file[:len(file)-1], "_") + "o" if err := b.gcc(p, ofile, cflags, file); err != nil { return nil, nil, err } linkobj = append(linkobj, ofile) outObj = append(outObj, ofile) } cxxflags := stringList(cgoCPPFLAGS, cgoCXXFLAGS) for _, file := range gxxfiles { // Append .o to the file, just in case the pkg has file.c and file.cpp ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o" if err := b.gxx(p, ofile, cxxflags, file); err != nil { return nil, nil, err } linkobj = append(linkobj, ofile) outObj = append(outObj, ofile) } for _, file := range mfiles { // Append .o to the file, just in case the pkg has file.c and file.m ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o" if err := b.gcc(p, ofile, cflags, file); err != nil { return nil, nil, err } linkobj = append(linkobj, ofile) outObj = append(outObj, ofile) } linkobj = append(linkobj, p.SysoFiles...) dynobj := obj + "_cgo_.o" pie := goarch == "arm" && (goos == "linux" || goos == "android") if pie { // we need to use -pie for Linux/ARM to get accurate imported sym cgoLDFLAGS = append(cgoLDFLAGS, "-pie") } if err := b.gccld(p, dynobj, cgoLDFLAGS, linkobj); err != nil { return nil, nil, err } if pie { // but we don't need -pie for normal cgo programs cgoLDFLAGS = cgoLDFLAGS[0 : len(cgoLDFLAGS)-1] } if _, ok := buildToolchain.(gccgoToolchain); ok { // we don't use dynimport when using gccgo. return outGo, outObj, nil } // cgo -dynimport importGo := obj + "_cgo_import.go" cgoflags = []string{} if p.Standard && p.ImportPath == "runtime/cgo" { cgoflags = append(cgoflags, "-dynlinker") // record path to dynamic linker } if err := b.run(p.Dir, p.ImportPath, nil, buildToolExec, cgoExe, "-objdir", obj, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags); err != nil { return nil, nil, err } outGo = append(outGo, importGo) ofile := obj + "_all.o" var gccObjs, nonGccObjs []string for _, f := range outObj { if strings.HasSuffix(f, ".o") { gccObjs = append(gccObjs, f) } else { nonGccObjs = append(nonGccObjs, f) } } ldflags := stringList(bareLDFLAGS, "-Wl,-r", "-nostdlib", staticLibs) // We are creating an object file, so we don't want a build ID. ldflags = b.disableBuildID(ldflags) if err := b.gccld(p, ofile, ldflags, gccObjs); err != nil { return nil, nil, err } // NOTE(rsc): The importObj is a 5c/6c/8c object and on Windows // must be processed before the gcc-generated objects. // Put it first. https://golang.org/issue/2601 outObj = stringList(nonGccObjs, ofile) return outGo, outObj, nil } // Run SWIG on all SWIG input files. // TODO: Don't build a shared library, once SWIG emits the necessary // pragmas for external linking. func (b *builder) swig(p *Package, obj string, pcCFLAGS []string) (outGo, outC, outCXX []string, err error) { if err := b.swigVersionCheck(); err != nil { return nil, nil, nil, err } intgosize, err := b.swigIntSize(obj) if err != nil { return nil, nil, nil, err } for _, f := range p.SwigFiles { goFile, cFile, err := b.swigOne(p, f, obj, pcCFLAGS, false, intgosize) if err != nil { return nil, nil, nil, err } if goFile != "" { outGo = append(outGo, goFile) } if cFile != "" { outC = append(outC, cFile) } } for _, f := range p.SwigCXXFiles { goFile, cxxFile, err := b.swigOne(p, f, obj, pcCFLAGS, true, intgosize) if err != nil { return nil, nil, nil, err } if goFile != "" { outGo = append(outGo, goFile) } if cxxFile != "" { outCXX = append(outCXX, cxxFile) } } return outGo, outC, outCXX, nil } // Make sure SWIG is new enough. var ( swigCheckOnce sync.Once swigCheck error ) func (b *builder) swigDoVersionCheck() error { out, err := b.runOut("", "", nil, "swig", "-version") if err != nil { return err } re := regexp.MustCompile(`[vV]ersion +([\d]+)([.][\d]+)?([.][\d]+)?`) matches := re.FindSubmatch(out) if matches == nil { // Can't find version number; hope for the best. return nil } major, err := strconv.Atoi(string(matches[1])) if err != nil { // Can't find version number; hope for the best. return nil } const errmsg = "must have SWIG version >= 3.0.6" if major < 3 { return errors.New(errmsg) } if major > 3 { // 4.0 or later return nil } // We have SWIG version 3.x. if len(matches[2]) > 0 { minor, err := strconv.Atoi(string(matches[2][1:])) if err != nil { return nil } if minor > 0 { // 3.1 or later return nil } } // We have SWIG version 3.0.x. if len(matches[3]) > 0 { patch, err := strconv.Atoi(string(matches[3][1:])) if err != nil { return nil } if patch < 6 { // Before 3.0.6. return errors.New(errmsg) } } return nil } func (b *builder) swigVersionCheck() error { swigCheckOnce.Do(func() { swigCheck = b.swigDoVersionCheck() }) return swigCheck } // This code fails to build if sizeof(int) <= 32 const swigIntSizeCode = ` package main const i int = 1 << 32 ` // Determine the size of int on the target system for the -intgosize option // of swig >= 2.0.9 func (b *builder) swigIntSize(obj string) (intsize string, err error) { if buildN { return "$INTBITS", nil } src := filepath.Join(b.work, "swig_intsize.go") if err = ioutil.WriteFile(src, []byte(swigIntSizeCode), 0644); err != nil { return } srcs := []string{src} p := goFilesPackage(srcs) if _, _, e := buildToolchain.gc(b, p, "", obj, false, nil, srcs); e != nil { return "32", nil } return "64", nil } // Run SWIG on one SWIG input file. func (b *builder) swigOne(p *Package, file, obj string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) { cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _ := b.cflags(p, true) var cflags []string if cxx { cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS) } else { cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCFLAGS) } n := 5 // length of ".swig" if cxx { n = 8 // length of ".swigcxx" } base := file[:len(file)-n] goFile := base + ".go" gccBase := base + "_wrap." gccExt := "c" if cxx { gccExt = "cxx" } _, gccgo := buildToolchain.(gccgoToolchain) // swig args := []string{ "-go", "-cgo", "-intgosize", intgosize, "-module", base, "-o", obj + gccBase + gccExt, "-outdir", obj, } for _, f := range cflags { if len(f) > 3 && f[:2] == "-I" { args = append(args, f) } } if gccgo { args = append(args, "-gccgo") if pkgpath := gccgoPkgpath(p); pkgpath != "" { args = append(args, "-go-pkgpath", pkgpath) } } if cxx { args = append(args, "-c++") } out, err := b.runOut(p.Dir, p.ImportPath, nil, "swig", args, file) if err != nil { if len(out) > 0 { if bytes.Contains(out, []byte("-intgosize")) || bytes.Contains(out, []byte("-cgo")) { return "", "", errors.New("must have SWIG version >= 3.0.6") } b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig error return "", "", errPrintedOutput } return "", "", err } if len(out) > 0 { b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig warning } return obj + goFile, obj + gccBase + gccExt, nil } // disableBuildID adjusts a linker command line to avoid creating a // build ID when creating an object file rather than an executable or // shared library. Some systems, such as Ubuntu, always add // --build-id to every link, but we don't want a build ID when we are // producing an object file. On some of those system a plain -r (not // -Wl,-r) will turn off --build-id, but clang 3.0 doesn't support a // plain -r. I don't know how to turn off --build-id when using clang // other than passing a trailing --build-id=none. So that is what we // do, but only on systems likely to support it, which is to say, // systems that normally use gold or the GNU linker. func (b *builder) disableBuildID(ldflags []string) []string { switch goos { case "android", "dragonfly", "linux", "netbsd": ldflags = append(ldflags, "-Wl,--build-id=none") } return ldflags } // An actionQueue is a priority queue of actions. type actionQueue []*action // Implement heap.Interface func (q *actionQueue) Len() int { return len(*q) } func (q *actionQueue) Swap(i, j int) { (*q)[i], (*q)[j] = (*q)[j], (*q)[i] } func (q *actionQueue) Less(i, j int) bool { return (*q)[i].priority < (*q)[j].priority } func (q *actionQueue) Push(x interface{}) { *q = append(*q, x.(*action)) } func (q *actionQueue) Pop() interface{} { n := len(*q) - 1 x := (*q)[n] *q = (*q)[:n] return x } func (q *actionQueue) push(a *action) { heap.Push(q, a) } func (q *actionQueue) pop() *action { return heap.Pop(q).(*action) } func raceInit() { if !buildRace { return } if goarch != "amd64" || goos != "linux" && goos != "freebsd" && goos != "darwin" && goos != "windows" { fmt.Fprintf(os.Stderr, "go %s: -race is only supported on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0]) os.Exit(2) } buildGcflags = append(buildGcflags, "-race") buildLdflags = append(buildLdflags, "-race") if buildContext.InstallSuffix != "" { buildContext.InstallSuffix += "_" } buildContext.InstallSuffix += "race" buildContext.BuildTags = append(buildContext.BuildTags, "race") } // defaultSuffix returns file extension used for command files in // current os environment. func defaultSuffix() string { switch runtime.GOOS { case "windows": return ".bat" case "plan9": return ".rc" default: return ".bash" } } goos == "windows" { // libmingw32 and libmingwex might also use libgcc, so libgcc must come last, // and they also have some inter-dependencies, so go1.5/src/cmd/link/ 775 0 0 0 12641202403 127725ustar00millermillergo1.5/src/cmd/link/internal/ 775 0 0 0 12641202403 146065ustar00millermillergo1.5/src/cmd/link/internal/arm/ 775 0 0 0 12641202403 153655ustar00millermillergo1.5/src/cmd/link/internal/arm/asm.go 640 0 0 37526 12562734676 16553ustar00millermiller// Inferno utils/5l/asm.c // http://code.google.com/p/inferno-os/source/browse/utils/5l/asm.c // // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) // Portions Copyright © 1997-1999 Vita Nuova Limited // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) // Portions Copyright © 2004,2006 Bruce Ellis // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others // Portions Copyright © 2009 The Go Authors. All rights reserved. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package arm import ( "cmd/internal/obj" "cmd/link/internal/ld" "fmt" "log" ) func gentext() { } // Preserve highest 8 bits of a, and do addition to lower 24-bit // of a and b; used to adjust ARM branch intruction's target func braddoff(a int32, b int32) int32 { return int32((uint32(a))&0xff000000 | 0x00ffffff&uint32(a+b)) } func adddynrela(rel *ld.LSym, s *ld.LSym, r *ld.Reloc) { ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off)) ld.Adduint32(ld.Ctxt, rel, ld.R_ARM_RELATIVE) } func adddynrel(s *ld.LSym, r *ld.Reloc) { targ := r.Sym ld.Ctxt.Cursym = s switch r.Type { default: if r.Type >= 256 { ld.Diag("unexpected relocation type %d", r.Type) return } // Handle relocations found in ELF object files. case 256 + ld.R_ARM_PLT32: r.Type = obj.R_CALLARM if targ.Type == obj.SDYNIMPORT { addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) } return case 256 + ld.R_ARM_THM_PC22: // R_ARM_THM_CALL ld.Exitf("R_ARM_THM_CALL, are you using -marm?") return case 256 + ld.R_ARM_GOT32: // R_ARM_GOT_BREL if targ.Type != obj.SDYNIMPORT { addgotsyminternal(ld.Ctxt, targ) } else { addgotsym(ld.Ctxt, targ) } r.Type = obj.R_CONST // write r->add during relocsym r.Sym = nil r.Add += int64(targ.Got) return case 256 + ld.R_ARM_GOT_PREL: // GOT(nil) + A - nil if targ.Type != obj.SDYNIMPORT { addgotsyminternal(ld.Ctxt, targ) } else { addgotsym(ld.Ctxt, targ) } r.Type = obj.R_PCREL r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0) r.Add += int64(targ.Got) + 4 return case 256 + ld.R_ARM_GOTOFF: // R_ARM_GOTOFF32 r.Type = obj.R_GOTOFF return case 256 + ld.R_ARM_GOTPC: // R_ARM_BASE_PREL r.Type = obj.R_PCREL r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0) r.Add += 4 return case 256 + ld.R_ARM_CALL: r.Type = obj.R_CALLARM if targ.Type == obj.SDYNIMPORT { addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) } return case 256 + ld.R_ARM_REL32: // R_ARM_REL32 r.Type = obj.R_PCREL r.Add += 4 return case 256 + ld.R_ARM_ABS32: if targ.Type == obj.SDYNIMPORT { ld.Diag("unexpected R_ARM_ABS32 relocation for dynamic symbol %s", targ.Name) } r.Type = obj.R_ADDR return // we can just ignore this, because we are targeting ARM V5+ anyway case 256 + ld.R_ARM_V4BX: if r.Sym != nil { // R_ARM_V4BX is ABS relocation, so this symbol is a dummy symbol, ignore it r.Sym.Type = 0 } r.Sym = nil return case 256 + ld.R_ARM_PC24, 256 + ld.R_ARM_JUMP24: r.Type = obj.R_CALLARM if targ.Type == obj.SDYNIMPORT { addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(braddoff(int32(r.Add), targ.Plt/4)) } return } // Handle references to ELF symbols from our own object files. if targ.Type != obj.SDYNIMPORT { return } switch r.Type { case obj.R_CALLARM: addpltsym(ld.Ctxt, targ) r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0) r.Add = int64(targ.Plt) return case obj.R_ADDR: if s.Type != obj.SDATA { break } if ld.Iself { ld.Adddynsym(ld.Ctxt, targ) rel := ld.Linklookup(ld.Ctxt, ".rel", 0) ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off)) ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynamic reloc r.Type = obj.R_CONST // write r->add during relocsym r.Sym = nil return } } ld.Ctxt.Cursym = s ld.Diag("unsupported relocation for dynamic symbol %s (type=%d stype=%d)", targ.Name, r.Type, targ.Type) } func elfreloc1(r *ld.Reloc, sectoff int64) int { ld.Thearch.Lput(uint32(sectoff)) elfsym := r.Xsym.Elfsym switch r.Type { default: return -1 case obj.R_ADDR: if r.Siz == 4 { ld.Thearch.Lput(ld.R_ARM_ABS32 | uint32(elfsym)<<8) } else { return -1 } case obj.R_PCREL: if r.Siz == 4 { ld.Thearch.Lput(ld.R_ARM_REL32 | uint32(elfsym)<<8) } else { return -1 } case obj.R_CALLARM: if r.Siz == 4 { if r.Add&0xff000000 == 0xeb000000 { // BL ld.Thearch.Lput(ld.R_ARM_CALL | uint32(elfsym)<<8) } else { ld.Thearch.Lput(ld.R_ARM_JUMP24 | uint32(elfsym)<<8) } } else { return -1 } case obj.R_TLS: if r.Siz == 4 { if ld.Buildmode == ld.BuildmodeCShared { ld.Thearch.Lput(ld.R_ARM_TLS_IE32 | uint32(elfsym)<<8) } else { ld.Thearch.Lput(ld.R_ARM_TLS_LE32 | uint32(elfsym)<<8) } } else { return -1 } } return 0 } func elfsetupplt() { plt := ld.Linklookup(ld.Ctxt, ".plt", 0) got := ld.Linklookup(ld.Ctxt, ".got.plt", 0) if plt.Size == 0 { // str lr, [sp, #-4]! ld.Adduint32(ld.Ctxt, plt, 0xe52de004) // ldr lr, [pc, #4] ld.Adduint32(ld.Ctxt, plt, 0xe59fe004) // add lr, pc, lr ld.Adduint32(ld.Ctxt, plt, 0xe08fe00e) // ldr pc, [lr, #8]! ld.Adduint32(ld.Ctxt, plt, 0xe5bef008) // .word &GLOBAL_OFFSET_TABLE[0] - . ld.Addpcrelplus(ld.Ctxt, plt, got, 4) // the first .plt entry requires 3 .plt.got entries ld.Adduint32(ld.Ctxt, got, 0) ld.Adduint32(ld.Ctxt, got, 0) ld.Adduint32(ld.Ctxt, got, 0) } } func machoreloc1(r *ld.Reloc, sectoff int64) int { var v uint32 rs := r.Xsym if rs.Type == obj.SHOSTOBJ || r.Type == obj.R_CALLARM { if rs.Dynid < 0 { ld.Diag("reloc %d to non-macho symbol %s type=%d", r.Type, rs.Name, rs.Type) return -1 } v = uint32(rs.Dynid) v |= 1 << 27 // external relocation } else { v = uint32(rs.Sect.Extnum) if v == 0 { ld.Diag("reloc %d to symbol %s in non-macho section %s type=%d", r.Type, rs.Name, rs.Sect.Name, rs.Type) return -1 } } switch r.Type { default: return -1 case obj.R_ADDR: v |= ld.MACHO_GENERIC_RELOC_VANILLA << 28 case obj.R_CALLARM: v |= 1 << 24 // pc-relative bit v |= ld.MACHO_ARM_RELOC_BR24 << 28 } switch r.Siz { default: return -1 case 1: v |= 0 << 25 case 2: v |= 1 << 25 case 4: v |= 2 << 25 case 8: v |= 3 << 25 } ld.Thearch.Lput(uint32(sectoff)) ld.Thearch.Lput(v) return 0 } func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int { if ld.Linkmode == ld.LinkExternal { switch r.Type { case obj.R_CALLARM: r.Done = 0 // set up addend for eventual relocation via outer symbol. rs := r.Sym r.Xadd = r.Add if r.Xadd&0x800000 != 0 { r.Xadd |= ^0xffffff } r.Xadd *= 4 for rs.Outer != nil { r.Xadd += ld.Symaddr(rs) - ld.Symaddr(rs.Outer) rs = rs.Outer } if rs.Type != obj.SHOSTOBJ && rs.Sect == nil { ld.Diag("missing section for %s", rs.Name) } r.Xsym = rs // ld64 for arm seems to want the symbol table to contain offset // into the section rather than pseudo virtual address that contains // the section load address. // we need to compensate that by removing the instruction's address // from addend. if ld.HEADTYPE == obj.Hdarwin { r.Xadd -= ld.Symaddr(s) + int64(r.Off) } *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&uint32(r.Xadd/4)))) return 0 } return -1 } switch r.Type { case obj.R_CONST: *val = r.Add return 0 case obj.R_GOTOFF: *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got", 0)) return 0 // The following three arch specific relocations are only for generation of // Linux/ARM ELF's PLT entry (3 assembler instruction) case obj.R_PLT0: // add ip, pc, #0xXX00000 if ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got.plt", 0)) < ld.Symaddr(ld.Linklookup(ld.Ctxt, ".plt", 0)) { ld.Diag(".got.plt should be placed after .plt section.") } *val = 0xe28fc600 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ld.Linklookup(ld.Ctxt, ".plt", 0))+int64(r.Off))+r.Add)) >> 20)) return 0 case obj.R_PLT1: // add ip, ip, #0xYY000 *val = 0xe28cca00 + (0xff & (int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ld.Linklookup(ld.Ctxt, ".plt", 0))+int64(r.Off))+r.Add+4)) >> 12)) return 0 case obj.R_PLT2: // ldr pc, [ip, #0xZZZ]! *val = 0xe5bcf000 + (0xfff & int64(uint32(ld.Symaddr(r.Sym)-(ld.Symaddr(ld.Linklookup(ld.Ctxt, ".plt", 0))+int64(r.Off))+r.Add+8))) return 0 case obj.R_CALLARM: // bl XXXXXX or b YYYYYY *val = int64(braddoff(int32(0xff000000&uint32(r.Add)), int32(0xffffff&uint32((ld.Symaddr(r.Sym)+int64((uint32(r.Add))*4)-(s.Value+int64(r.Off)))/4)))) return 0 } return -1 } func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 { log.Fatalf("unexpected relocation variant") return t } func addpltreloc(ctxt *ld.Link, plt *ld.LSym, got *ld.LSym, sym *ld.LSym, typ int) *ld.Reloc { r := ld.Addrel(plt) r.Sym = got r.Off = int32(plt.Size) r.Siz = 4 r.Type = int32(typ) r.Add = int64(sym.Got) - 8 plt.Reachable = true plt.Size += 4 ld.Symgrow(ctxt, plt, plt.Size) return r } func addpltsym(ctxt *ld.Link, s *ld.LSym) { if s.Plt >= 0 { return } ld.Adddynsym(ctxt, s) if ld.Iself { plt := ld.Linklookup(ctxt, ".plt", 0) got := ld.Linklookup(ctxt, ".got.plt", 0) rel := ld.Linklookup(ctxt, ".rel.plt", 0) if plt.Size == 0 { elfsetupplt() } // .got entry s.Got = int32(got.Size) // In theory, all GOT should point to the first PLT entry, // Linux/ARM's dynamic linker will do that for us, but FreeBSD/ARM's // dynamic linker won't, so we'd better do it ourselves. ld.Addaddrplus(ctxt, got, plt, 0) // .plt entry, this depends on the .got entry s.Plt = int32(plt.Size) addpltreloc(ctxt, plt, got, s, obj.R_PLT0) // add lr, pc, #0xXX00000 addpltreloc(ctxt, plt, got, s, obj.R_PLT1) // add lr, lr, #0xYY000 addpltreloc(ctxt, plt, got, s, obj.R_PLT2) // ldr pc, [lr, #0xZZZ]! // rel ld.Addaddrplus(ctxt, rel, got, int64(s.Got)) ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_JUMP_SLOT)) } else { ld.Diag("addpltsym: unsupported binary format") } } func addgotsyminternal(ctxt *ld.Link, s *ld.LSym) { if s.Got >= 0 { return } got := ld.Linklookup(ctxt, ".got", 0) s.Got = int32(got.Size) ld.Addaddrplus(ctxt, got, s, 0) if ld.Iself { } else { ld.Diag("addgotsyminternal: unsupported binary format") } } func addgotsym(ctxt *ld.Link, s *ld.LSym) { if s.Got >= 0 { return } ld.Adddynsym(ctxt, s) got := ld.Linklookup(ctxt, ".got", 0) s.Got = int32(got.Size) ld.Adduint32(ctxt, got, 0) if ld.Iself { rel := ld.Linklookup(ctxt, ".rel", 0) ld.Addaddrplus(ctxt, rel, got, int64(s.Got)) ld.Adduint32(ctxt, rel, ld.ELF32_R_INFO(uint32(s.Dynid), ld.R_ARM_GLOB_DAT)) } else { ld.Diag("addgotsym: unsupported binary format") } } func asmb() { if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime()) } ld.Bso.Flush() if ld.Iself { ld.Asmbelfsetup() } sect := ld.Segtext.Sect ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Codeblk(int64(sect.Vaddr), int64(sect.Length)) for sect = sect.Next; sect != nil; sect = sect.Next { ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff)) ld.Datblk(int64(sect.Vaddr), int64(sect.Length)) } if ld.Segrodata.Filelen > 0 { if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f rodatblk\n", obj.Cputime()) } ld.Bso.Flush() ld.Cseek(int64(ld.Segrodata.Fileoff)) ld.Datblk(int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen)) } if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f datblk\n", obj.Cputime()) } ld.Bso.Flush() ld.Cseek(int64(ld.Segdata.Fileoff)) ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen)) machlink := uint32(0) if ld.HEADTYPE == obj.Hdarwin { if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime()) } dwarfoff := uint32(ld.Rnd(int64(uint64(ld.HEADR)+ld.Segtext.Length), int64(ld.INITRND)) + ld.Rnd(int64(ld.Segdata.Filelen), int64(ld.INITRND))) ld.Cseek(int64(dwarfoff)) ld.Segdwarf.Fileoff = uint64(ld.Cpos()) ld.Dwarfemitdebugsections() ld.Segdwarf.Filelen = uint64(ld.Cpos()) - ld.Segdwarf.Fileoff machlink = uint32(ld.Domacholink()) } /* output symbol table */ ld.Symsize = 0 ld.Lcsize = 0 symo := uint32(0) if ld.Debug['s'] == 0 { // TODO: rationalize if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f sym\n", obj.Cputime()) } ld.Bso.Flush() switch ld.HEADTYPE { default: if ld.Iself { symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen) symo = uint32(ld.Rnd(int64(symo), int64(ld.INITRND))) } case obj.Hplan9: symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen) case obj.Hdarwin: symo = uint32(ld.Segdwarf.Fileoff + uint64(ld.Rnd(int64(ld.Segdwarf.Filelen), int64(ld.INITRND))) + uint64(machlink)) } ld.Cseek(int64(symo)) switch ld.HEADTYPE { default: if ld.Iself { if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f elfsym\n", obj.Cputime()) } ld.Asmelfsym() ld.Cflush() ld.Cwrite(ld.Elfstrdat) if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime()) } ld.Dwarfemitdebugsections() if ld.Linkmode == ld.LinkExternal { ld.Elfemitreloc() } } case obj.Hplan9: ld.Asmplan9sym() ld.Cflush() sym := ld.Linklookup(ld.Ctxt, "pclntab", 0) if sym != nil { ld.Lcsize = int32(len(sym.P)) for i := 0; int32(i) < ld.Lcsize; i++ { ld.Cput(uint8(sym.P[i])) } ld.Cflush() } case obj.Hdarwin: if ld.Linkmode == ld.LinkExternal { ld.Machoemitreloc() } } } ld.Ctxt.Cursym = nil if ld.Debug['v'] != 0 { fmt.Fprintf(&ld.Bso, "%5.2f header\n", obj.Cputime()) } ld.Bso.Flush() ld.Cseek(0) switch ld.HEADTYPE { default: case obj.Hplan9: /* plan 9 */ ld.Lputb(0x647) /* magic */ ld.Lputb(uint32(ld.Segtext.Filelen)) /* sizes */ ld.Lputb(uint32(ld.Segdata.Filelen)) ld.Lputb(uint32(ld.Segdata.Length - ld.Segdata.Filelen)) ld.Lputb(uint32(ld.Symsize)) /* nsyms */ ld.Lputb(uint32(ld.Entryvalue())) /* va of entry */ ld.Lputb(0) ld.Lputb(uint32(ld.Lcsize)) case obj.Hlinux, obj.Hfreebsd, obj.Hnetbsd, obj.Hopenbsd, obj.Hnacl: ld.Asmbelf(int64(symo)) case obj.Hdarwin: ld.Asmbmacho() } ld.Cflush() if ld.Debug['c'] != 0 { fmt.Printf("textsize=%d\n", ld.Segtext.Filelen) fmt.Printf("datsize=%d\n", ld.Segdata.Filelen) fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen) fmt.Printf("symsize=%d\n", ld.Symsize) fmt.Printf("lcsize=%d\n", ld.Lcsize) fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize)) } } MP24 | uint32(elfsym)<<8) } } else { return -1 } case obj.R_TLS: if r.Siz == 4 { if ld.Buildmode == ld.BuildmodeCShared { ld.Thearch.Lput(ld.R_ARM_TLgo1.5/src/runtime/ 775 0 0 0 12641202403 127555ustar00millermillergo1.5/src/runtime/asm_arm.s 640 0 0 64030 12567646242 14621ustar00millermiller// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "go_asm.h" #include "go_tls.h" #include "funcdata.h" #include "textflag.h" // using frame size $-4 means do not save LR on stack. TEXT runtime·rt0_go(SB),NOSPLIT,$-4 MOVW $0xcafebabe, R12 // copy arguments forward on an even stack // use R13 instead of SP to avoid linker rewriting the offsets MOVW 0(R13), R0 // argc MOVW 4(R13), R1 // argv SUB $64, R13 // plenty of scratch AND $~7, R13 MOVW R0, 60(R13) // save argc, argv away MOVW R1, 64(R13) // set up g register // g is R10 MOVW $runtime·g0(SB), g MOVW $runtime·m0(SB), R8 // save m->g0 = g0 MOVW g, m_g0(R8) // save g->m = m0 MOVW R8, g_m(g) // create istack out of the OS stack MOVW $(-8192+104)(R13), R0 MOVW R0, g_stackguard0(g) MOVW R0, g_stackguard1(g) MOVW R0, (g_stack+stack_lo)(g) MOVW R13, (g_stack+stack_hi)(g) BL runtime·emptyfunc(SB) // fault if stack check is wrong BL runtime·_initcgo(SB) // will clobber R0-R3 // update stackguard after _cgo_init MOVW (g_stack+stack_lo)(g), R0 ADD $const__StackGuard, R0 MOVW R0, g_stackguard0(g) MOVW R0, g_stackguard1(g) BL runtime·check(SB) // saved argc, argv MOVW 60(R13), R0 MOVW R0, 4(R13) MOVW 64(R13), R1 MOVW R1, 8(R13) BL runtime·args(SB) BL runtime·checkgoarm(SB) BL runtime·osinit(SB) BL runtime·schedinit(SB) // create a new goroutine to start program MOVW $runtime·mainPC(SB), R0 MOVW.W R0, -4(R13) MOVW $8, R0 MOVW.W R0, -4(R13) MOVW $0, R0 MOVW.W R0, -4(R13) // push $0 as guard BL runtime·newproc(SB) MOVW $12(R13), R13 // pop args and LR // start this M BL runtime·mstart(SB) MOVW $1234, R0 MOVW $1000, R1 MOVW R0, (R1) // fail hard DATA runtime·mainPC+0(SB)/4,$runtime·main(SB) GLOBL runtime·mainPC(SB),RODATA,$4 TEXT runtime·breakpoint(SB),NOSPLIT,$0-0 BL runtime·emptyfunc(SB) // force R14 save for traceback // gdb won't skip this breakpoint instruction automatically, // so you must manually "set $pc+=4" to skip it and continue. #ifdef GOOS_nacl WORD $0xe125be7f // BKPT 0x5bef, NACL_INSTR_ARM_BREAKPOINT #else #ifdef GOOS_plan9 WORD $0xD1200070 // undefined instruction used as armv5 breakpoint in Plan 9 #else WORD $0xe7f001f0 // undefined instruction that gdb understands is a software breakpoint #endif #endif RET TEXT runtime·asminit(SB),NOSPLIT,$0-0 // disable runfast (flush-to-zero) mode of vfp if runtime.goarm > 5 MOVB runtime·goarm(SB), R11 CMP $5, R11 BLE 4(PC) WORD $0xeef1ba10 // vmrs r11, fpscr BIC $(1<<24), R11 WORD $0xeee1ba10 // vmsr fpscr, r11 RET /* * go-routine */ // void gosave(Gobuf*) // save state in Gobuf; setjmp TEXT runtime·gosave(SB),NOSPLIT,$-4-4 MOVW buf+0(FP), R0 MOVW R13, gobuf_sp(R0) MOVW LR, gobuf_pc(R0) MOVW g, gobuf_g(R0) MOVW $0, R11 MOVW R11, gobuf_lr(R0) MOVW R11, gobuf_ret(R0) MOVW R11, gobuf_ctxt(R0) RET // void gogo(Gobuf*) // restore state from Gobuf; longjmp TEXT runtime·gogo(SB),NOSPLIT,$-4-4 MOVW buf+0(FP), R1 MOVW gobuf_g(R1), R0 BL setg<>(SB) // NOTE: We updated g above, and we are about to update SP. // Until LR and PC are also updated, the g/SP/LR/PC quadruple // are out of sync and must not be used as the basis of a traceback. // Sigprof skips the traceback when SP is not within g's bounds, // and when the PC is inside this function, runtime.gogo. // Since we are about to update SP, until we complete runtime.gogo // we must not leave this function. In particular, no calls // after this point: it must be straight-line code until the // final B instruction. // See large comment in sigprof for more details. MOVW gobuf_sp(R1), R13 // restore SP==R13 MOVW gobuf_lr(R1), LR MOVW gobuf_ret(R1), R0 MOVW gobuf_ctxt(R1), R7 MOVW $0, R11 MOVW R11, gobuf_sp(R1) // clear to help garbage collector MOVW R11, gobuf_ret(R1) MOVW R11, gobuf_lr(R1) MOVW R11, gobuf_ctxt(R1) MOVW gobuf_pc(R1), R11 CMP R11, R11 // set condition codes for == test, needed by stack split B (R11) // func mcall(fn func(*g)) // Switch to m->g0's stack, call fn(g). // Fn must never return. It should gogo(&g->sched) // to keep running g. TEXT runtime·mcall(SB),NOSPLIT,$-4-4 // Save caller state in g->sched. MOVW R13, (g_sched+gobuf_sp)(g) MOVW LR, (g_sched+gobuf_pc)(g) MOVW $0, R11 MOVW R11, (g_sched+gobuf_lr)(g) MOVW g, (g_sched+gobuf_g)(g) // Switch to m->g0 & its stack, call fn. MOVW g, R1 MOVW g_m(g), R8 MOVW m_g0(R8), R0 BL setg<>(SB) CMP g, R1 B.NE 2(PC) B runtime·badmcall(SB) MOVB runtime·iscgo(SB), R11 CMP $0, R11 BL.NE runtime·save_g(SB) MOVW fn+0(FP), R0 MOVW (g_sched+gobuf_sp)(g), R13 SUB $8, R13 MOVW R1, 4(R13) MOVW R0, R7 MOVW 0(R0), R0 BL (R0) B runtime·badmcall2(SB) RET // systemstack_switch is a dummy routine that systemstack leaves at the bottom // of the G stack. We need to distinguish the routine that // lives at the bottom of the G stack from the one that lives // at the top of the system stack because the one at the top of // the system stack terminates the stack walk (see topofstack()). TEXT runtime·systemstack_switch(SB),NOSPLIT,$0-0 MOVW $0, R0 BL (R0) // clobber lr to ensure push {lr} is kept RET // func systemstack(fn func()) TEXT runtime·systemstack(SB),NOSPLIT,$0-4 MOVW fn+0(FP), R0 // R0 = fn MOVW g_m(g), R1 // R1 = m MOVW m_gsignal(R1), R2 // R2 = gsignal CMP g, R2 B.EQ noswitch MOVW m_g0(R1), R2 // R2 = g0 CMP g, R2 B.EQ noswitch MOVW m_curg(R1), R3 CMP g, R3 B.EQ switch // Bad: g is not gsignal, not g0, not curg. What is it? // Hide call from linker nosplit analysis. MOVW $runtime·badsystemstack(SB), R0 BL (R0) switch: // save our state in g->sched. Pretend to // be systemstack_switch if the G stack is scanned. MOVW $runtime·systemstack_switch(SB), R3 #ifdef GOOS_nacl ADD $4, R3, R3 // get past nacl-insert bic instruction #endif ADD $4, R3, R3 // get past push {lr} MOVW R3, (g_sched+gobuf_pc)(g) MOVW R13, (g_sched+gobuf_sp)(g) MOVW LR, (g_sched+gobuf_lr)(g) MOVW g, (g_sched+gobuf_g)(g) // switch to g0 MOVW R0, R5 MOVW R2, R0 BL setg<>(SB) MOVW R5, R0 MOVW (g_sched+gobuf_sp)(R2), R3 // make it look like mstart called systemstack on g0, to stop traceback SUB $4, R3, R3 MOVW $runtime·mstart(SB), R4 MOVW R4, 0(R3) MOVW R3, R13 // call target function MOVW R0, R7 MOVW 0(R0), R0 BL (R0) // switch back to g MOVW g_m(g), R1 MOVW m_curg(R1), R0 BL setg<>(SB) MOVW (g_sched+gobuf_sp)(g), R13 MOVW $0, R3 MOVW R3, (g_sched+gobuf_sp)(g) RET noswitch: MOVW R0, R7 MOVW 0(R0), R0 BL (R0) RET /* * support for morestack */ // Called during function prolog when more stack is needed. // R1 frame size // R3 prolog's LR // NB. we do not save R0 because we've forced 5c to pass all arguments // on the stack. // using frame size $-4 means do not save LR on stack. // // The traceback routines see morestack on a g0 as being // the top of a stack (for example, morestack calling newstack // calling the scheduler calling newm calling gc), so we must // record an argument size. For that purpose, it has no arguments. TEXT runtime·morestack(SB),NOSPLIT,$-4-0 // Cannot grow scheduler stack (m->g0). MOVW g_m(g), R8 MOVW m_g0(R8), R4 CMP g, R4 BL.EQ runtime·abort(SB) // Cannot grow signal stack (m->gsignal). MOVW m_gsignal(R8), R4 CMP g, R4 BL.EQ runtime·abort(SB) // Called from f. // Set g->sched to context in f. MOVW R7, (g_sched+gobuf_ctxt)(g) MOVW R13, (g_sched+gobuf_sp)(g) MOVW LR, (g_sched+gobuf_pc)(g) MOVW R3, (g_sched+gobuf_lr)(g) // Called from f. // Set m->morebuf to f's caller. MOVW R3, (m_morebuf+gobuf_pc)(R8) // f's caller's PC MOVW R13, (m_morebuf+gobuf_sp)(R8) // f's caller's SP MOVW $4(R13), R3 // f's argument pointer MOVW g, (m_morebuf+gobuf_g)(R8) // Call newstack on m->g0's stack. MOVW m_g0(R8), R0 BL setg<>(SB) MOVW (g_sched+gobuf_sp)(g), R13 BL runtime·newstack(SB) // Not reached, but make sure the return PC from the call to newstack // is still in this function, and not the beginning of the next. RET TEXT runtime·morestack_noctxt(SB),NOSPLIT,$-4-0 MOVW $0, R7 B runtime·morestack(SB) TEXT runtime·stackBarrier(SB),NOSPLIT,$0 // We came here via a RET to an overwritten LR. // R0 may be live. Other registers are available. // Get the original return PC, g.stkbar[g.stkbarPos].savedLRVal. MOVW (g_stkbar+slice_array)(g), R4 MOVW g_stkbarPos(g), R5 MOVW $stkbar__size, R6 MUL R5, R6 ADD R4, R6 MOVW stkbar_savedLRVal(R6), R6 // Record that this stack barrier was hit. ADD $1, R5 MOVW R5, g_stkbarPos(g) // Jump to the original return PC. B (R6) // reflectcall: call a function with the given argument list // func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! #define DISPATCH(NAME,MAXSIZE) \ CMP $MAXSIZE, R0; \ B.HI 3(PC); \ MOVW $NAME(SB), R1; \ B (R1) TEXT reflect·call(SB), NOSPLIT, $0-0 B ·reflectcall(SB) TEXT ·reflectcall(SB),NOSPLIT,$-4-20 MOVW argsize+12(FP), R0 DISPATCH(runtime·call16, 16) DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128) DISPATCH(runtime·call256, 256) DISPATCH(runtime·call512, 512) DISPATCH(runtime·call1024, 1024) DISPATCH(runtime·call2048, 2048) DISPATCH(runtime·call4096, 4096) DISPATCH(runtime·call8192, 8192) DISPATCH(runtime·call16384, 16384) DISPATCH(runtime·call32768, 32768) DISPATCH(runtime·call65536, 65536) DISPATCH(runtime·call131072, 131072) DISPATCH(runtime·call262144, 262144) DISPATCH(runtime·call524288, 524288) DISPATCH(runtime·call1048576, 1048576) DISPATCH(runtime·call2097152, 2097152) DISPATCH(runtime·call4194304, 4194304) DISPATCH(runtime·call8388608, 8388608) DISPATCH(runtime·call16777216, 16777216) DISPATCH(runtime·call33554432, 33554432) DISPATCH(runtime·call67108864, 67108864) DISPATCH(runtime·call134217728, 134217728) DISPATCH(runtime·call268435456, 268435456) DISPATCH(runtime·call536870912, 536870912) DISPATCH(runtime·call1073741824, 1073741824) MOVW $runtime·badreflectcall(SB), R1 B (R1) #define CALLFN(NAME,MAXSIZE) \ TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ MOVW argptr+8(FP), R0; \ MOVW argsize+12(FP), R2; \ ADD $4, R13, R1; \ CMP $0, R2; \ B.EQ 5(PC); \ MOVBU.P 1(R0), R5; \ MOVBU.P R5, 1(R1); \ SUB $1, R2, R2; \ B -5(PC); \ /* call function */ \ MOVW f+4(FP), R7; \ MOVW (R7), R0; \ PCDATA $PCDATA_StackMapIndex, $0; \ BL (R0); \ /* copy return values back */ \ MOVW argptr+8(FP), R0; \ MOVW argsize+12(FP), R2; \ MOVW retoffset+16(FP), R3; \ ADD $4, R13, R1; \ ADD R3, R1; \ ADD R3, R0; \ SUB R3, R2; \ loop: \ CMP $0, R2; \ B.EQ end; \ MOVBU.P 1(R1), R5; \ MOVBU.P R5, 1(R0); \ SUB $1, R2, R2; \ B loop; \ end: \ /* execute write barrier updates */ \ MOVW argtype+0(FP), R1; \ MOVW argptr+8(FP), R0; \ MOVW argsize+12(FP), R2; \ MOVW retoffset+16(FP), R3; \ MOVW R1, 4(R13); \ MOVW R0, 8(R13); \ MOVW R2, 12(R13); \ MOVW R3, 16(R13); \ BL runtime·callwritebarrier(SB); \ RET CALLFN(·call16, 16) CALLFN(·call32, 32) CALLFN(·call64, 64) CALLFN(·call128, 128) CALLFN(·call256, 256) CALLFN(·call512, 512) CALLFN(·call1024, 1024) CALLFN(·call2048, 2048) CALLFN(·call4096, 4096) CALLFN(·call8192, 8192) CALLFN(·call16384, 16384) CALLFN(·call32768, 32768) CALLFN(·call65536, 65536) CALLFN(·call131072, 131072) CALLFN(·call262144, 262144) CALLFN(·call524288, 524288) CALLFN(·call1048576, 1048576) CALLFN(·call2097152, 2097152) CALLFN(·call4194304, 4194304) CALLFN(·call8388608, 8388608) CALLFN(·call16777216, 16777216) CALLFN(·call33554432, 33554432) CALLFN(·call67108864, 67108864) CALLFN(·call134217728, 134217728) CALLFN(·call268435456, 268435456) CALLFN(·call536870912, 536870912) CALLFN(·call1073741824, 1073741824) // void jmpdefer(fn, sp); // called from deferreturn. // 1. grab stored LR for caller // 2. sub 4 bytes to get back to BL deferreturn // 3. B to fn // TODO(rsc): Push things on stack and then use pop // to load all registers simultaneously, so that a profiling // interrupt can never see mismatched SP/LR/PC. // (And double-check that pop is atomic in that way.) TEXT runtime·jmpdefer(SB),NOSPLIT,$0-8 MOVW 0(R13), LR MOVW $-4(LR), LR // BL deferreturn MOVW fv+0(FP), R7 MOVW argp+4(FP), R13 MOVW $-4(R13), R13 // SP is 4 below argp, due to saved LR MOVW 0(R7), R1 B (R1) // Save state of caller into g->sched. Smashes R11. TEXT gosave<>(SB),NOSPLIT,$0 MOVW LR, (g_sched+gobuf_pc)(g) MOVW R13, (g_sched+gobuf_sp)(g) MOVW $0, R11 MOVW R11, (g_sched+gobuf_lr)(g) MOVW R11, (g_sched+gobuf_ret)(g) MOVW R11, (g_sched+gobuf_ctxt)(g) RET // func asmcgocall(fn, arg unsafe.Pointer) int32 // Call fn(arg) on the scheduler stack, // aligned appropriately for the gcc ABI. // See cgocall.go for more details. TEXT ·asmcgocall(SB),NOSPLIT,$0-12 MOVW fn+0(FP), R1 MOVW arg+4(FP), R0 MOVW R13, R2 MOVW g, R4 // Figure out if we need to switch to m->g0 stack. // We get called to create new OS threads too, and those // come in on the m->g0 stack already. MOVW g_m(g), R8 MOVW m_g0(R8), R3 CMP R3, g BEQ g0 BL gosave<>(SB) MOVW R0, R5 MOVW R3, R0 BL setg<>(SB) MOVW R5, R0 MOVW (g_sched+gobuf_sp)(g), R13 // Now on a scheduling stack (a pthread-created stack). g0: SUB $24, R13 BIC $0x7, R13 // alignment for gcc ABI MOVW R4, 20(R13) // save old g MOVW (g_stack+stack_hi)(R4), R4 SUB R2, R4 MOVW R4, 16(R13) // save depth in stack (can't just save SP, as stack might be copied during a callback) BL (R1) // Restore registers, g, stack pointer. MOVW R0, R5 MOVW 20(R13), R0 BL setg<>(SB) MOVW (g_stack+stack_hi)(g), R1 MOVW 16(R13), R2 SUB R2, R1 MOVW R5, R0 MOVW R1, R13 MOVW R0, ret+8(FP) RET // cgocallback(void (*fn)(void*), void *frame, uintptr framesize) // Turn the fn into a Go func (by taking its address) and call // cgocallback_gofunc. TEXT runtime·cgocallback(SB),NOSPLIT,$12-12 MOVW $fn+0(FP), R0 MOVW R0, 4(R13) MOVW frame+4(FP), R0 MOVW R0, 8(R13) MOVW framesize+8(FP), R0 MOVW R0, 12(R13) MOVW $runtime·cgocallback_gofunc(SB), R0 BL (R0) RET // cgocallback_gofunc(void (*fn)(void*), void *frame, uintptr framesize) // See cgocall.go for more details. TEXT ·cgocallback_gofunc(SB),NOSPLIT,$8-12 NO_LOCAL_POINTERS // Load m and g from thread-local storage. MOVB runtime·iscgo(SB), R0 CMP $0, R0 BL.NE runtime·load_g(SB) // If g is nil, Go did not create the current thread. // Call needm to obtain one for temporary use. // In this case, we're running on the thread stack, so there's // lots of space, but the linker doesn't know. Hide the call from // the linker analysis by using an indirect call. CMP $0, g B.NE havem MOVW g, savedm-4(SP) // g is zero, so is m. MOVW $runtime·needm(SB), R0 BL (R0) // Set m->sched.sp = SP, so that if a panic happens // during the function we are about to execute, it will // have a valid SP to run on the g0 stack. // The next few lines (after the havem label) // will save this SP onto the stack and then write // the same SP back to m->sched.sp. That seems redundant, // but if an unrecovered panic happens, unwindm will // restore the g->sched.sp from the stack location // and then systemstack will try to use it. If we don't set it here, // that restored SP will be uninitialized (typically 0) and // will not be usable. MOVW g_m(g), R8 MOVW m_g0(R8), R3 MOVW R13, (g_sched+gobuf_sp)(R3) havem: MOVW g_m(g), R8 MOVW R8, savedm-4(SP) // Now there's a valid m, and we're running on its m->g0. // Save current m->g0->sched.sp on stack and then set it to SP. // Save current sp in m->g0->sched.sp in preparation for // switch back to m->curg stack. // NOTE: unwindm knows that the saved g->sched.sp is at 4(R13) aka savedsp-8(SP). MOVW m_g0(R8), R3 MOVW (g_sched+gobuf_sp)(R3), R4 MOVW R4, savedsp-8(SP) MOVW R13, (g_sched+gobuf_sp)(R3) // Switch to m->curg stack and call runtime.cgocallbackg. // Because we are taking over the execution of m->curg // but *not* resuming what had been running, we need to // save that information (m->curg->sched) so we can restore it. // We can restore m->curg->sched.sp easily, because calling // runtime.cgocallbackg leaves SP unchanged upon return. // To save m->curg->sched.pc, we push it onto the stack. // This has the added benefit that it looks to the traceback // routine like cgocallbackg is going to return to that // PC (because the frame we allocate below has the same // size as cgocallback_gofunc's frame declared above) // so that the traceback will seamlessly trace back into // the earlier calls. // // In the new goroutine, -8(SP) and -4(SP) are unused. MOVW m_curg(R8), R0 BL setg<>(SB) MOVW (g_sched+gobuf_sp)(g), R4 // prepare stack as R4 MOVW (g_sched+gobuf_pc)(g), R5 MOVW R5, -12(R4) MOVW $-12(R4), R13 BL runtime·cgocallbackg(SB) // Restore g->sched (== m->curg->sched) from saved values. MOVW 0(R13), R5 MOVW R5, (g_sched+gobuf_pc)(g) MOVW $12(R13), R4 MOVW R4, (g_sched+gobuf_sp)(g) // Switch back to m->g0's stack and restore m->g0->sched.sp. // (Unlike m->curg, the g0 goroutine never uses sched.pc, // so we do not have to restore it.) MOVW g_m(g), R8 MOVW m_g0(R8), R0 BL setg<>(SB) MOVW (g_sched+gobuf_sp)(g), R13 MOVW savedsp-8(SP), R4 MOVW R4, (g_sched+gobuf_sp)(g) // If the m on entry was nil, we called needm above to borrow an m // for the duration of the call. Since the call is over, return it with dropm. MOVW savedm-4(SP), R6 CMP $0, R6 B.NE 3(PC) MOVW $runtime·dropm(SB), R0 BL (R0) // Done! RET // void setg(G*); set g. for use by needm. TEXT runtime·setg(SB),NOSPLIT,$-4-4 MOVW gg+0(FP), R0 B setg<>(SB) TEXT setg<>(SB),NOSPLIT,$-4-0 MOVW R0, g // Save g to thread-local storage. MOVB runtime·iscgo(SB), R0 CMP $0, R0 B.EQ 2(PC) B runtime·save_g(SB) MOVW g, R0 RET TEXT runtime·getcallerpc(SB),NOSPLIT,$4-8 MOVW 8(R13), R0 // LR saved by caller MOVW runtime·stackBarrierPC(SB), R1 CMP R0, R1 BNE nobar // Get original return PC. BL runtime·nextBarrierPC(SB) MOVW 4(R13), R0 nobar: MOVW R0, ret+4(FP) RET TEXT runtime·setcallerpc(SB),NOSPLIT,$4-8 MOVW pc+4(FP), R0 MOVW 8(R13), R1 MOVW runtime·stackBarrierPC(SB), R2 CMP R1, R2 BEQ setbar MOVW R0, 8(R13) // set LR in caller RET setbar: // Set the stack barrier return PC. MOVW R0, 4(R13) BL runtime·setNextBarrierPC(SB) RET TEXT runtime·getcallersp(SB),NOSPLIT,$-4-8 MOVW argp+0(FP), R0 MOVW $-4(R0), R0 MOVW R0, ret+4(FP) RET TEXT runtime·emptyfunc(SB),0,$0-0 RET TEXT runtime·abort(SB),NOSPLIT,$-4-0 MOVW $0, R0 MOVW (R0), R1 // bool armcas(int32 *val, int32 old, int32 new) // Atomically: // if(*val == old){ // *val = new; // return 1; // }else // return 0; // // To implement runtime·cas in sys_$GOOS_arm.s // using the native instructions, use: // // TEXT runtime·cas(SB),NOSPLIT,$0 // B runtime·armcas(SB) // TEXT runtime·armcas(SB),NOSPLIT,$0-13 MOVW valptr+0(FP), R1 MOVW old+4(FP), R2 MOVW new+8(FP), R3 casl: LDREX (R1), R0 CMP R0, R2 BNE casfail MOVB runtime·goarm(SB), R11 CMP $7, R11 BLT 2(PC) WORD $0xf57ff05a // dmb ishst STREX R3, (R1), R0 CMP $0, R0 BNE casl MOVW $1, R0 MOVB runtime·goarm(SB), R11 CMP $7, R11 BLT 2(PC) WORD $0xf57ff05b // dmb ish MOVB R0, ret+12(FP) RET casfail: MOVW $0, R0 MOVB R0, ret+12(FP) RET TEXT runtime·casuintptr(SB),NOSPLIT,$0-13 B runtime·cas(SB) TEXT runtime·atomicloaduintptr(SB),NOSPLIT,$0-8 B runtime·atomicload(SB) TEXT runtime·atomicloaduint(SB),NOSPLIT,$0-8 B runtime·atomicload(SB) TEXT runtime·atomicstoreuintptr(SB),NOSPLIT,$0-8 B runtime·atomicstore(SB) // armPublicationBarrier is a native store/store barrier for ARMv7+. // On earlier ARM revisions, armPublicationBarrier is a no-op. // This will not work on SMP ARMv6 machines, if any are in use. // To implement publiationBarrier in sys_$GOOS_arm.s using the native // instructions, use: // // TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0 // B runtime·armPublicationBarrier(SB) // TEXT runtime·armPublicationBarrier(SB),NOSPLIT,$-4-0 MOVB runtime·goarm(SB), R11 CMP $7, R11 BLT 2(PC) WORD $0xf57ff05e // DMB ST RET // AES hashing not implemented for ARM TEXT runtime·aeshash(SB),NOSPLIT,$-4-0 MOVW $0, R0 MOVW (R0), R1 TEXT runtime·aeshash32(SB),NOSPLIT,$-4-0 MOVW $0, R0 MOVW (R0), R1 TEXT runtime·aeshash64(SB),NOSPLIT,$-4-0 MOVW $0, R0 MOVW (R0), R1 TEXT runtime·aeshashstr(SB),NOSPLIT,$-4-0 MOVW $0, R0 MOVW (R0), R1 // memhash_varlen(p unsafe.Pointer, h seed) uintptr // redirects to memhash(p, h, size) using the size // stored in the closure. TEXT runtime·memhash_varlen(SB),NOSPLIT,$16-12 GO_ARGS NO_LOCAL_POINTERS MOVW p+0(FP), R0 MOVW h+4(FP), R1 MOVW 4(R7), R2 MOVW R0, 4(R13) MOVW R1, 8(R13) MOVW R2, 12(R13) BL runtime·memhash(SB) MOVW 16(R13), R0 MOVW R0, ret+8(FP) RET TEXT runtime·memeq(SB),NOSPLIT,$-4-13 MOVW a+0(FP), R1 MOVW b+4(FP), R2 MOVW size+8(FP), R3 ADD R1, R3, R6 MOVW $1, R0 MOVB R0, ret+12(FP) loop: CMP R1, R6 RET.EQ MOVBU.P 1(R1), R4 MOVBU.P 1(R2), R5 CMP R4, R5 BEQ loop MOVW $0, R0 MOVB R0, ret+12(FP) RET // memequal_varlen(a, b unsafe.Pointer) bool TEXT runtime·memequal_varlen(SB),NOSPLIT,$16-9 MOVW a+0(FP), R0 MOVW b+4(FP), R1 CMP R0, R1 BEQ eq MOVW 4(R7), R2 // compiler stores size at offset 4 in the closure MOVW R0, 4(R13) MOVW R1, 8(R13) MOVW R2, 12(R13) BL runtime·memeq(SB) MOVB 16(R13), R0 MOVB R0, ret+8(FP) RET eq: MOVW $1, R0 MOVB R0, ret+8(FP) RET TEXT runtime·cmpstring(SB),NOSPLIT,$-4-20 MOVW s1_base+0(FP), R2 MOVW s1_len+4(FP), R0 MOVW s2_base+8(FP), R3 MOVW s2_len+12(FP), R1 ADD $20, R13, R7 B runtime·cmpbody(SB) TEXT bytes·Compare(SB),NOSPLIT,$-4-28 MOVW s1+0(FP), R2 MOVW s1+4(FP), R0 MOVW s2+12(FP), R3 MOVW s2+16(FP), R1 ADD $28, R13, R7 B runtime·cmpbody(SB) // On entry: // R0 is the length of s1 // R1 is the length of s2 // R2 points to the start of s1 // R3 points to the start of s2 // R7 points to return value (-1/0/1 will be written here) // // On exit: // R4, R5, and R6 are clobbered TEXT runtime·cmpbody(SB),NOSPLIT,$-4-0 CMP R2, R3 BEQ samebytes CMP R0, R1 MOVW R0, R6 MOVW.LT R1, R6 // R6 is min(R0, R1) ADD R2, R6 // R2 is current byte in s1, R6 is last byte in s1 to compare loop: CMP R2, R6 BEQ samebytes // all compared bytes were the same; compare lengths MOVBU.P 1(R2), R4 MOVBU.P 1(R3), R5 CMP R4, R5 BEQ loop // bytes differed MOVW.LT $1, R0 MOVW.GT $-1, R0 MOVW R0, (R7) RET samebytes: CMP R0, R1 MOVW.LT $1, R0 MOVW.GT $-1, R0 MOVW.EQ $0, R0 MOVW R0, (R7) RET // eqstring tests whether two strings are equal. // The compiler guarantees that strings passed // to eqstring have equal length. // See runtime_test.go:eqstring_generic for // equivalent Go code. TEXT runtime·eqstring(SB),NOSPLIT,$-4-17 MOVW s1str+0(FP), R2 MOVW s2str+8(FP), R3 MOVW $1, R8 MOVB R8, v+16(FP) CMP R2, R3 RET.EQ MOVW s1len+4(FP), R0 ADD R2, R0, R6 loop: CMP R2, R6 RET.EQ MOVBU.P 1(R2), R4 MOVBU.P 1(R3), R5 CMP R4, R5 BEQ loop MOVW $0, R8 MOVB R8, v+16(FP) RET // TODO: share code with memeq? TEXT bytes·Equal(SB),NOSPLIT,$0-25 MOVW a_len+4(FP), R1 MOVW b_len+16(FP), R3 CMP R1, R3 // unequal lengths are not equal B.NE notequal MOVW a+0(FP), R0 MOVW b+12(FP), R2 ADD R0, R1 // end loop: CMP R0, R1 B.EQ equal // reached the end MOVBU.P 1(R0), R4 MOVBU.P 1(R2), R5 CMP R4, R5 B.EQ loop notequal: MOVW $0, R0 MOVBU R0, ret+24(FP) RET equal: MOVW $1, R0 MOVBU R0, ret+24(FP) RET TEXT bytes·IndexByte(SB),NOSPLIT,$0-20 MOVW s+0(FP), R0 MOVW s_len+4(FP), R1 MOVBU c+12(FP), R2 // byte to find MOVW R0, R4 // store base for later ADD R0, R1 // end _loop: CMP R0, R1 B.EQ _notfound MOVBU.P 1(R0), R3 CMP R2, R3 B.NE _loop SUB $1, R0 // R0 will be one beyond the position we want SUB R4, R0 // remove base MOVW R0, ret+16(FP) RET _notfound: MOVW $-1, R0 MOVW R0, ret+16(FP) RET TEXT strings·IndexByte(SB),NOSPLIT,$0-16 MOVW s+0(FP), R0 MOVW s_len+4(FP), R1 MOVBU c+8(FP), R2 // byte to find MOVW R0, R4 // store base for later ADD R0, R1 // end _sib_loop: CMP R0, R1 B.EQ _sib_notfound MOVBU.P 1(R0), R3 CMP R2, R3 B.NE _sib_loop SUB $1, R0 // R0 will be one beyond the position we want SUB R4, R0 // remove base MOVW R0, ret+12(FP) RET _sib_notfound: MOVW $-1, R0 MOVW R0, ret+12(FP) RET TEXT runtime·fastrand1(SB),NOSPLIT,$-4-4 MOVW g_m(g), R1 MOVW m_fastrand(R1), R0 ADD.S R0, R0 EOR.MI $0x88888eef, R0 MOVW R0, m_fastrand(R1) MOVW R0, ret+0(FP) RET TEXT runtime·return0(SB),NOSPLIT,$0 MOVW $0, R0 RET TEXT runtime·procyield(SB),NOSPLIT,$-4 MOVW cycles+0(FP), R1 MOVW $0, R0 yieldloop: CMP R0, R1 B.NE 2(PC) RET SUB $1, R1 B yieldloop // Called from cgo wrappers, this function returns g->m->curg.stack.hi. // Must obey the gcc calling convention. TEXT _cgo_topofstack(SB),NOSPLIT,$8 // R11 and g register are clobbered by load_g. They are // callee-save in the gcc calling convention, so save them here. MOVW R11, saveR11-4(SP) MOVW g, saveG-8(SP) BL runtime·load_g(SB) MOVW g_m(g), R0 MOVW m_curg(R0), R0 MOVW (g_stack+stack_hi)(R0), R0 MOVW saveG-8(SP), g MOVW saveR11-4(SP), R11 RET // The top-most function running on a goroutine // returns to goexit+PCQuantum. TEXT runtime·goexit(SB),NOSPLIT,$-4-0 MOVW R0, R0 // NOP BL runtime·goexit1(SB) // does not return // traceback from goexit1 must hit code range of goexit MOVW R0, R0 // NOP TEXT runtime·prefetcht0(SB),NOSPLIT,$0-4 RET TEXT runtime·prefetcht1(SB),NOSPLIT,$0-4 RET TEXT runtime·prefetcht2(SB),NOSPLIT,$0-4 RET TEXT runtime·prefetchnta(SB),NOSPLIT,$0-4 RET // x -> x/1000000, x%1000000, called from Go with args, results on stack. TEXT runtime·usplit(SB),NOSPLIT,$0-12 MOVW x+0(FP), R0 CALL runtime·usplitR0(SB) MOVW R0, q+4(FP) MOVW R1, r+8(FP) RET // R0, R1 = R0/1000000, R0%1000000 TEXT runtime·usplitR0(SB),NOSPLIT,$0 // magic multiply to avoid software divide without available m. // see output of go tool compile -S for x/1000000. MOVW R0, R3 MOVW $1125899907, R1 MULLU R1, R0, (R0, R1) MOVW R0>>18, R0 MOVW $1000000, R1 MULU R0, R1 SUB R1, R3, R1 RET but *not* resuming what had been running, we need to // save that information (m->curg->sched) so we can restore it. // We can restore m->curg->sched.sp easily, because calling // runtime.cgocallbackg leaves SP unchanged upon return. // To save m->curg->sched.pc, we push it onto the stack. // This has the added benefit that it looks to the traceback // routine like cgocallbackg is going to return to that // PC (because the frame we allocate below has the same // size as cgocago1.5/src/runtime/defs_plan9_386.go 640 0 0 2677 12563574733 15763ustar00millermillerpackage runtime const _PAGESIZE = 0x1000 type ureg struct { di uint32 /* general registers */ si uint32 /* ... */ bp uint32 /* ... */ nsp uint32 bx uint32 /* ... */ dx uint32 /* ... */ cx uint32 /* ... */ ax uint32 /* ... */ gs uint32 /* data segments */ fs uint32 /* ... */ es uint32 /* ... */ ds uint32 /* ... */ trap uint32 /* trap _type */ ecode uint32 /* error code (or zero) */ pc uint32 /* pc */ cs uint32 /* old context */ flags uint32 /* old flags */ sp uint32 ss uint32 /* old stack segment */ } type sigctxt struct { u *ureg } func (c *sigctxt) pc() uintptr { return uintptr(c.u.pc) } func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) } func (c *sigctxt) lr() uintptr { return uintptr(0) } func (c *sigctxt) setpc(x uintptr) { c.u.pc = uint32(x) } func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint32(x) } func (c *sigctxt) setlr(x uintptr) {} func (c *sigctxt) savelr(x uintptr) {} func dumpregs(u *ureg) { print("ax ", hex(u.ax), "\n") print("bx ", hex(u.bx), "\n") print("cx ", hex(u.cx), "\n") print("dx ", hex(u.dx), "\n") print("di ", hex(u.di), "\n") print("si ", hex(u.si), "\n") print("bp ", hex(u.bp), "\n") print("sp ", hex(u.sp), "\n") print("pc ", hex(u.pc), "\n") print("flags ", hex(u.flags), "\n") print("cs ", hex(u.cs), "\n") print("fs ", hex(u.fs), "\n") print("gs ", hex(u.gs), "\n") } func sigpanictramp() {} 4(R0), R0 MOVW R0, ret+4(FP) RET TEXT runtime·emptyfunc(SB),0go1.5/src/runtime/defs_plan9_amd64.go 640 0 0 3201 12563575055 16334ustar00millermillerpackage runtime const _PAGESIZE = 0x1000 type ureg struct { ax uint64 bx uint64 cx uint64 dx uint64 si uint64 di uint64 bp uint64 r8 uint64 r9 uint64 r10 uint64 r11 uint64 r12 uint64 r13 uint64 r14 uint64 r15 uint64 ds uint16 es uint16 fs uint16 gs uint16 _type uint64 error uint64 /* error code (or zero) */ ip uint64 /* pc */ cs uint64 /* old context */ flags uint64 /* old flags */ sp uint64 /* sp */ ss uint64 /* old stack segment */ } type sigctxt struct { u *ureg } func (c *sigctxt) pc() uintptr { return uintptr(c.u.ip) } func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) } func (c *sigctxt) lr() uintptr { return uintptr(0) } func (c *sigctxt) setpc(x uintptr) { c.u.ip = uint64(x) } func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint64(x) } func (c *sigctxt) setlr(x uintptr) {} func (c *sigctxt) savelr(x uintptr) {} func dumpregs(u *ureg) { print("ax ", hex(u.ax), "\n") print("bx ", hex(u.bx), "\n") print("cx ", hex(u.cx), "\n") print("dx ", hex(u.dx), "\n") print("di ", hex(u.di), "\n") print("si ", hex(u.si), "\n") print("bp ", hex(u.bp), "\n") print("sp ", hex(u.sp), "\n") print("r8 ", hex(u.r8), "\n") print("r9 ", hex(u.r9), "\n") print("r10 ", hex(u.r10), "\n") print("r11 ", hex(u.r11), "\n") print("r12 ", hex(u.r12), "\n") print("r13 ", hex(u.r13), "\n") print("r14 ", hex(u.r14), "\n") print("r15 ", hex(u.r15), "\n") print("ip ", hex(u.ip), "\n") print("flags ", hex(u.flags), "\n") print("cs ", hex(u.cs), "\n") print("fs ", hex(u.fs), "\n") print("gs ", hex(u.gs), "\n") } func sigpanictramp() {} FP), R0 MOVW h+4(FP), R1 MOVW 4(R7), R2 MOVW R0, 4(R13) MOVW R1, 8(R13) MOVW R2, 12(R13) BL runtime·memhash(SB) MOVW 16(R13), R0 MOVW R0, ret+8(FP) RET TEXT runtime·memeq(SB),NOSPLIT,$-4-13 MOVW a+0(FP), R1 MOVW b+4(FP), R2 MOVW size+8(FP), R3 ADD R1, R3, R6 MOVW $1, R0 MOVB R0, ret+12(FP) loop: CMP R1, R6 RET.EQ MOVBU.P 1(R1), R4 MOVBU.P 1(R2), R5 CMP R4, Rgo1.5/src/runtime/defs_plan9_arm.go 640 0 0 3066 12563574061 16205ustar00millermillerpackage runtime const _PAGESIZE = 0x1000 type ureg struct { r0 uint32 /* general registers */ r1 uint32 /* ... */ r2 uint32 /* ... */ r3 uint32 /* ... */ r4 uint32 /* ... */ r5 uint32 /* ... */ r6 uint32 /* ... */ r7 uint32 /* ... */ r8 uint32 /* ... */ r9 uint32 /* ... */ r10 uint32 /* ... */ r11 uint32 /* ... */ r12 uint32 /* ... */ sp uint32 link uint32 /* ... */ trap uint32 /* trap type */ psr uint32 pc uint32 /* interrupted addr */ } type sigctxt struct { u *ureg } func (c *sigctxt) pc() uintptr { return uintptr(c.u.pc) } func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) } func (c *sigctxt) lr() uintptr { return uintptr(c.u.link) } func (c *sigctxt) setpc(x uintptr) { c.u.pc = uint32(x) } func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint32(x) } func (c *sigctxt) setlr(x uintptr) { c.u.link = uint32(x) } func (c *sigctxt) savelr(x uintptr) { c.u.r0 = uint32(x) } func dumpregs(u *ureg) { print("r0 ", hex(u.r0), "\n") print("r1 ", hex(u.r1), "\n") print("r2 ", hex(u.r2), "\n") print("r3 ", hex(u.r3), "\n") print("r4 ", hex(u.r4), "\n") print("r5 ", hex(u.r5), "\n") print("r6 ", hex(u.r6), "\n") print("r7 ", hex(u.r7), "\n") print("r8 ", hex(u.r8), "\n") print("r9 ", hex(u.r9), "\n") print("r10 ", hex(u.r10), "\n") print("r11 ", hex(u.r11), "\n") print("r12 ", hex(u.r12), "\n") print("sp ", hex(u.sp), "\n") print("link ", hex(u.link), "\n") print("pc ", hex(u.pc), "\n") print("psr ", hex(u.psr), "\n") } func sigpanictramp() +4(FP), R1 MOVW b_len+16(FP), R3 CMP R1, R3 // unequal lengths are not equal B.NE notequal MOVW a+0(FP), R0 MOVW b+12(FP), R2 ADD R0, R1 // end loop: CMP R0, R1 B.EQ equal // reached the end MOVBU.P 1(R0), R4 MOVBU.P 1(R2), R5 CMP R4, R5 B.EQ loop notequal: MOVW $0, R0 MOVBU R0, ret+24(FP) RET equal: MOVW $1, R0 MOVBU R0, ret+24(FP) RET TEXT bytes·IndexByte(SB),NOSPLIT,$0-20 MOVW s+0(FP), R0 MOVW s_len+4(FP), R1 MOVBU c+12go1.5/src/runtime/os3_plan9.go 640 0 0 6755 12563635470 15143ustar00millermiller// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime import "unsafe" // May run during STW, so write barriers are not allowed. //go:nowritebarrier func sighandler(_ureg *ureg, note *byte, gp *g) int { _g_ := getg() var t sigTabT var docrash bool var sig int var flags int c := &sigctxt{_ureg} notestr := gostringnocopy(note) // The kernel will never pass us a nil note or ureg so we probably // made a mistake somewhere in sigtramp. if _ureg == nil || note == nil { print("sighandler: ureg ", _ureg, " note ", note, "\n") goto Throw } // Check that the note is no more than ERRMAX bytes (including // the trailing NUL). We should never receive a longer note. if len(notestr) > _ERRMAX-1 { print("sighandler: note is longer than ERRMAX\n") goto Throw } // See if the note matches one of the patterns in sigtab. // Notes that do not match any pattern can be handled at a higher // level by the program but will otherwise be ignored. flags = _SigNotify for sig, t = range sigtable { if hasprefix(notestr, t.name) { flags = t.flags break } } if flags&_SigGoExit != 0 { exits((*byte)(add(unsafe.Pointer(note), 9))) // Strip "go: exit " prefix. } if flags&_SigPanic != 0 { // Copy the error string from sigtramp's stack into m->notesig so // we can reliably access it from the panic routines. memmove(unsafe.Pointer(_g_.m.notesig), unsafe.Pointer(note), uintptr(len(notestr)+1)) gp.sig = uint32(sig) gp.sigpc = c.pc() pc := uintptr(c.pc()) sp := uintptr(c.sp()) // If we don't recognize the PC as code // but we do recognize the top pointer on the stack as code, // then assume this was a call to non-code and treat like // pc == 0, to make unwinding show the context. if pc != 0 && findfunc(pc) == nil && findfunc(*(*uintptr)(unsafe.Pointer(sp))) != nil { pc = 0 } // IF LR exists, it must be saved to the stack before // entry to sigpanic so that panics in leaf // functions are correctly handled. This smashes // the stack frame but we're not going back there // anyway. if usesLR { c.savelr(c.lr()) } // If PC == 0, probably panicked because of a call to a nil func. // Not faking that as the return address will make the trace look like a call // to sigpanic instead. (Otherwise the trace will end at // sigpanic and we won't get to see who faulted). if pc != 0 { if usesLR { c.setlr(pc) } else { if regSize > ptrSize { sp -= ptrSize *(*uintptr)(unsafe.Pointer(sp)) = 0 } sp -= ptrSize *(*uintptr)(unsafe.Pointer(sp)) = pc c.setsp(sp) } } if usesLR { c.setpc(funcPC(sigpanictramp)) } else { c.setpc(funcPC(sigpanic)) } return _NCONT } if flags&_SigNotify != 0 { if sendNote(note) { return _NCONT } } if flags&_SigKill != 0 { goto Exit } if flags&_SigThrow == 0 { return _NCONT } Throw: _g_.m.throwing = 1 _g_.m.caughtsig.set(gp) startpanic() print(notestr, "\n") print("PC=", hex(c.pc()), "\n") print("\n") if gotraceback(&docrash) > 0 { goroutineheader(gp) tracebacktrap(c.pc(), c.sp(), 0, gp) tracebackothers(gp) print("\n") dumpregs(_ureg) } if docrash { crash() } Exit: goexitsall(note) exits(note) return _NDFLT // not reached } func sigenable(sig uint32) { } func sigdisable(sig uint32) { } func sigignore(sig uint32) { } func resetcpuprofiler(hz int32) { // TODO: Enable profiling interrupts. getg().m.profilehz = hz } lags uint32 /* old go1.5/src/runtime/os_plan9.go 640 0 0 5463 12562734656 15060ustar00millermiller// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime import "unsafe" func closefd(fd int32) int32 //go:noescape func open(name *byte, mode, perm int32) int32 //go:noescape func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32 //go:noescape func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32 func seek(fd int32, offset int64, whence int32) int64 //go:noescape func exits(msg *byte) //go:noescape func brk_(addr unsafe.Pointer) int32 func sleep(ms int32) int32 func rfork(flags int32) int32 //go:noescape func plan9_semacquire(addr *uint32, block int32) int32 //go:noescape func plan9_tsemacquire(addr *uint32, ms int32) int32 //go:noescape func plan9_semrelease(addr *uint32, count int32) int32 //go:noescape func notify(fn unsafe.Pointer) int32 func noted(mode int32) int32 //go:noescape func nsec(*int64) int64 //go:noescape func sigtramp(ureg, msg unsafe.Pointer) func setfpmasks() //go:noescape func tstart_plan9(newm *m) func errstr() string type _Plink uintptr //go:linkname os_sigpipe os.sigpipe func os_sigpipe() { throw("too many writes on closed pipe") } func sigpanic() { g := getg() if !canpanic(g) { throw("unexpected signal during runtime execution") } note := gostringnocopy((*byte)(unsafe.Pointer(g.m.notesig))) switch g.sig { case _SIGRFAULT, _SIGWFAULT: i := index(note, "addr=") if i >= 0 { i += 5 } else if i = index(note, "va="); i >= 0 { i += 3 } else { panicmem() } addr := note[i:] g.sigcode1 = uintptr(atolwhex(addr)) if g.sigcode1 < 0x1000 || g.paniconfault { panicmem() } print("unexpected fault address ", hex(g.sigcode1), "\n") throw("fault") case _SIGTRAP: if g.paniconfault { panicmem() } throw(note) case _SIGINTDIV: panicdivide() case _SIGFLOAT: panicfloat() default: panic(errorString(note)) } } func atolwhex(p string) int64 { for hasprefix(p, " ") || hasprefix(p, "\t") { p = p[1:] } neg := false if hasprefix(p, "-") || hasprefix(p, "+") { neg = p[0] == '-' p = p[1:] for hasprefix(p, " ") || hasprefix(p, "\t") { p = p[1:] } } var n int64 switch { case hasprefix(p, "0x"), hasprefix(p, "0X"): p = p[2:] for ; len(p) > 0; p = p[1:] { if '0' <= p[0] && p[0] <= '9' { n = n*16 + int64(p[0]-'0') } else if 'a' <= p[0] && p[0] <= 'f' { n = n*16 + int64(p[0]-'a'+10) } else if 'A' <= p[0] && p[0] <= 'F' { n = n*16 + int64(p[0]-'A'+10) } else { break } } case hasprefix(p, "0"): for ; len(p) > 0 && '0' <= p[0] && p[0] <= '7'; p = p[1:] { n = n*8 + int64(p[0]-'0') } default: for ; len(p) > 0 && '0' <= p[0] && p[0] <= '9'; p = p[1:] { n = n*10 + int64(p[0]-'0') } } if neg { n = -n } return n } emeq(SB),NOSPLIT,$-4-13 MOVW a+0(FP), R1 MOVW b+4(FP), R2 MOVW size+8(FP), R3 ADD R1, R3, R6 MOVW $1, R0 MOVB R0, ret+12(FP) loop: CMP R1, R6 RET.EQ MOVBU.P 1(R1), R4 MOVBU.P 1(R2), R5 CMP R4, Rgo1.5/src/runtime/os_plan9_arm.go 640 0 0 1005 12562734660 15676ustar00millermiller// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime func checkgoarm() { return // TODO(minux) } //go:nosplit func cputicks() int64 { // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. // TODO: need more entropy to better seed fastrand1. return nanotime() } gctxt struct { u *ureg } func (c *sigctxt) pc() uintptr { return uintptr(c.u.pc) } func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) } func (c *sigctxt) lr() uintptr { return uintptr(c.u.link) } func (c *sigctxt) setpc(x uintptr) { c.u.pc = uint32(x) } func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint32(x) } func (c *sigctxt) setlr(x uintptr) { c.u.link = uint32(x) } func (c *sigctxt) savelr(x uintptr) { c.u.r0 = uint32(x) } func dumpregs(u *ureg) { print("r0 ", hex(u.r0), "\n") print("go1.5/src/runtime/rt0_plan9_arm.s 640 0 0 657 12562734657 15621ustar00millermiller// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "textflag.h" //in plan 9 argc is at top of stack followed by ptrs to arguments TEXT _rt0_arm_plan9(SB),NOSPLIT,$-4 MOVW R0, _tos(SB) MOVW 0(R13), R0 MOVW $4(R13), R1 MOVW.W R1, -4(R13) MOVW.W R0, -4(R13) B runtime·rt0_go(SB) GLOBL _tos(SB), NOPTR, $4 go1.5/src/runtime/signal_plan9.go 640 0 0 3515 12562734661 15704ustar00millermiller// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package runtime type sigTabT struct { flags int name string } // Incoming notes are compared against this table using strncmp, so the // order matters: longer patterns must appear before their prefixes. // There are _SIG constants in os2_plan9.go for the table index of some // of these. // // If you add entries to this table, you must respect the prefix ordering // and also update the constant values is os2_plan9.go. var sigtable = [...]sigTabT{ // Traps that we cannot be recovered. {_SigThrow, "sys: trap: debug exception"}, {_SigThrow, "sys: trap: invalid opcode"}, // We can recover from some memory errors in runtime·sigpanic. {_SigPanic, "sys: trap: fault read"}, // SIGRFAULT {_SigPanic, "sys: trap: fault write"}, // SIGWFAULT // We can also recover from math errors. {_SigPanic, "sys: trap: divide error"}, // SIGINTDIV {_SigPanic, "sys: fp:"}, // SIGFLOAT // All other traps are normally handled as if they were marked SigThrow. // We mark them SigPanic here so that debug.SetPanicOnFault will work. {_SigPanic, "sys: trap:"}, // SIGTRAP // Writes to a closed pipe can be handled if desired, otherwise they're ignored. {_SigNotify, "sys: write on closed pipe"}, // Other system notes are more serious and cannot be recovered. {_SigThrow, "sys:"}, // Issued to all other procs when calling runtime·exit. {_SigGoExit, "go: exit "}, // Kill is sent by external programs to cause an exit. {_SigKill, "kill"}, // Interrupts can be handled if desired, otherwise they cause an exit. {_SigNotify + _SigKill, "interrupt"}, {_SigNotify + _SigKill, "hangup"}, // Alarms can be handled if desired, otherwise they're ignored. {_SigNotify, "alarm"}, } 0 && findfunc(pc) == nil && findfunc(*(*uintptr)(unsafe.Pointer(sp))) != nil { pc = 0 } // IF LR exists, it must be saved to the stack before // entry to sigpanic so thgo1.5/src/runtime/sys_plan9_arm.s 640 0 0 16055 12567651370 15764ustar00millermiller// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "go_asm.h" #include "go_tls.h" #include "textflag.h" // from ../syscall/zsysnum_plan9.go #define SYS_SYSR1 0 #define SYS_BIND 2 #define SYS_CHDIR 3 #define SYS_CLOSE 4 #define SYS_DUP 5 #define SYS_ALARM 6 #define SYS_EXEC 7 #define SYS_EXITS 8 #define SYS_FAUTH 10 #define SYS_SEGBRK 12 #define SYS_OPEN 14 #define SYS_OSEEK 16 #define SYS_SLEEP 17 #define SYS_RFORK 19 #define SYS_PIPE 21 #define SYS_CREATE 22 #define SYS_FD2PATH 23 #define SYS_BRK_ 24 #define SYS_REMOVE 25 #define SYS_NOTIFY 28 #define SYS_NOTED 29 #define SYS_SEGATTACH 30 #define SYS_SEGDETACH 31 #define SYS_SEGFREE 32 #define SYS_SEGFLUSH 33 #define SYS_RENDEZVOUS 34 #define SYS_UNMOUNT 35 #define SYS_SEMACQUIRE 37 #define SYS_SEMRELEASE 38 #define SYS_SEEK 39 #define SYS_FVERSION 40 #define SYS_ERRSTR 41 #define SYS_STAT 42 #define SYS_FSTAT 43 #define SYS_WSTAT 44 #define SYS_FWSTAT 45 #define SYS_MOUNT 46 #define SYS_AWAIT 47 #define SYS_PREAD 50 #define SYS_PWRITE 51 #define SYS_TSEMACQUIRE 52 #define SYS_NSEC 53 //func open(name *byte, mode, perm int32) int32 TEXT runtime·open(SB),NOSPLIT,$0-16 MOVW $SYS_OPEN, R0 SWI 0 MOVW R0, ret+12(FP) RET //func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32 TEXT runtime·pread(SB),NOSPLIT,$0-24 MOVW $SYS_PREAD, R0 SWI 0 MOVW R0, ret+20(FP) RET //func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32 TEXT runtime·pwrite(SB),NOSPLIT,$0-24 MOVW $SYS_PWRITE, R0 SWI 0 MOVW R0, ret+20(FP) RET //func seek(fd int32, offset int64, whence int32) int64 TEXT runtime·seek(SB),NOSPLIT,$4-24 MOVW $ret_lo+16(FP), R0 MOVW R0, newoff-4(SP) MOVW $SYS_SEEK, R0 SWI 0 CMP $-1, R0 MOVW.EQ R0, ret_lo+16(FP) MOVW.EQ R0, ret_hi+20(FP) RET //func closefd(fd int32) int32 TEXT runtime·closefd(SB),NOSPLIT,$0-8 MOVW $SYS_CLOSE, R0 SWI 0 MOVW R0, ret+4(FP) RET //func exits(msg *byte) TEXT runtime·exits(SB),NOSPLIT,$0-4 MOVW $SYS_EXITS, R0 SWI 0 RET //func brk_(addr unsafe.Pointer) int32 TEXT runtime·brk_(SB),NOSPLIT,$0-8 MOVW $SYS_BRK_, R0 SWI 0 MOVW R0, ret+4(FP) RET //func sleep(ms int32) int32 TEXT runtime·sleep(SB),NOSPLIT,$0-8 MOVW $SYS_SLEEP, R0 SWI 0 MOVW R0, ret+4(FP) RET //func plan9_semacquire(addr *uint32, block int32) int32 TEXT runtime·plan9_semacquire(SB),NOSPLIT,$0-12 MOVW $SYS_SEMACQUIRE, R0 SWI 0 MOVW R0, ret+8(FP) RET //func plan9_tsemacquire(addr *uint32, ms int32) int32 TEXT runtime·plan9_tsemacquire(SB),NOSPLIT,$0-12 MOVW $SYS_TSEMACQUIRE, R0 SWI 0 MOVW R0, ret+8(FP) RET //func nsec(*int64) int64 TEXT runtime·nsec(SB),NOSPLIT,$-4-12 MOVW $SYS_NSEC, R0 SWI 0 MOVW unnamed+0(FP), R1 MOVW 0(R1), R0 MOVW R0, ret_lo+4(FP) MOVW 4(R1), R0 MOVW R0, ret_hi+8(FP) RET // time.now() (sec int64, nsec int32) TEXT time·now(SB),NOSPLIT,$12-12 // use nsec system call to get current time in nanoseconds MOVW $sysnsec_lo-8(SP), R0 // destination addr MOVW R0,res-12(SP) MOVW $SYS_NSEC, R0 SWI 0 MOVW sysnsec_lo-8(SP), R1 // R1:R2 = nsec MOVW sysnsec_hi-4(SP), R2 // multiply nanoseconds by reciprocal of 10**9 (scaled by 2**61) // to get seconds (96 bit scaled result) MOVW $0x89705f41, R3 // 2**61 * 10**-9 MULLU R1,R3,(R6,R5) // R5:R6:R7 = R1:R2 * R3 MOVW $0,R7 MULALU R2,R3,(R7,R6) // unscale by discarding low 32 bits, shifting the rest by 29 MOVW R6>>29,R6 // R6:R7 = (R5:R6:R7 >> 61) ORR R7<<3,R6 MOVW R7>>29,R7 // subtract (10**9 * sec) from nsec to get nanosecond remainder MOVW $1000000000, R5 // 10**9 MULLU R6,R5,(R9,R8) // R8:R9 = R6:R7 * R5 MULA R7,R5,R9,R9 SUB.S R8,R1 // R1:R2 -= R8:R9 SBC R9,R2 // because reciprocal was a truncated repeating fraction, quotient // may be slightly too small -- adjust to make remainder < 10**9 CMP R5,R1 // if remainder > 10**9 SUB.HS R5,R1 // remainder -= 10**9 ADD.HS $1,R6 // sec += 1 MOVW R6,sec_lo+0(FP) MOVW R7,sec_hi+4(FP) MOVW R1,nsec+8(FP) RET //func notify(fn unsafe.Pointer) int32 TEXT runtime·notify(SB),NOSPLIT,$0-8 MOVW $SYS_NOTIFY, R0 SWI 0 MOVW R0, ret+4(FP) RET //func noted(mode int32) int32 TEXT runtime·noted(SB),NOSPLIT,$0-8 MOVW $SYS_NOTED, R0 SWI 0 MOVW R0, ret+4(FP) RET //func plan9_semrelease(addr *uint32, count int32) int32 TEXT runtime·plan9_semrelease(SB),NOSPLIT,$0-12 MOVW $SYS_SEMRELEASE, R0 SWI 0 MOVW R0, ret+8(FP) RET //func rfork(flags int32) int32 TEXT runtime·rfork(SB),NOSPLIT,$0-8 MOVW $SYS_RFORK, R0 SWI 0 MOVW R0, ret+4(FP) RET //func tstart_plan9(newm *m) TEXT runtime·tstart_plan9(SB),NOSPLIT,$0-4 MOVW newm+0(FP), R1 MOVW m_g0(R1), g // Layout new m scheduler stack on os stack. MOVW R13, R0 MOVW R0, g_stack+stack_hi(g) SUB $(64*1024), R0 MOVW R0, (g_stack+stack_lo)(g) MOVW R0, g_stackguard0(g) MOVW R0, g_stackguard1(g) // Initialize procid from TOS struct. MOVW _tos(SB), R0 MOVW 48(R0), R0 MOVW R0, m_procid(R1) // save pid as m->procid BL runtime·mstart(SB) MOVW $0x1234, R0 MOVW R0, 0(R0) // not reached RET //func sigtramp(ureg, msg unsafe.Pointer) TEXT runtime·sigtramp(SB),NOSPLIT,$0-8 // check that g and m exist CMP $0, g BEQ 4(PC) MOVW g_m(g), R0 CMP $0, R0 BNE 2(PC) BL runtime·badsignal2(SB) // will exit // save args MOVW ureg+0(FP), R1 MOVW msg+4(FP), R2 // change stack MOVW m_gsignal(R0), R3 MOVW (g_stack+stack_hi)(R3), R13 // make room for args, retval and g SUB $24, R13 // save g MOVW g, R3 MOVW R3, 20(R13) // g = m->gsignal MOVW m_gsignal(R0), g // load args and call sighandler ADD $4,R13,R5 MOVM.IA [R1-R3], (R5) BL runtime·sighandler(SB) MOVW 16(R13), R0 // retval // restore g MOVW 20(R13), g // call noted(R0) MOVW R0, 4(R13) BL runtime·noted(SB) RET //func sigpanictramp() TEXT runtime·sigpanictramp(SB),NOSPLIT,$0-0 MOVW.W R0, -4(R13) B runtime·sigpanic(SB) //func setfpmasks() // Only used by the 64-bit runtime. TEXT runtime·setfpmasks(SB),NOSPLIT,$0 RET #define ERRMAX 128 /* from os_plan9.h */ // func errstr() string // Only used by package syscall. // Grab error string due to a syscall made // in entersyscall mode, without going // through the allocator (issue 4994). // See ../syscall/asm_plan9_arm.s:/·Syscall/ TEXT runtime·errstr(SB),NOSPLIT,$0-8 MOVW g_m(g), R0 MOVW m_errstr(R0), R1 MOVW R1, ret_base+0(FP) MOVW $ERRMAX, R2 MOVW R2, ret_len+4(FP) MOVW $SYS_ERRSTR, R0 SWI 0 MOVW R1, R2 MOVBU 0(R2), R0 CMP $0, R0 BEQ 3(PC) ADD $1, R2 B -4(PC) SUB R1, R2 MOVW R2, ret_len+4(FP) RET TEXT runtime·cas(SB),NOSPLIT,$0 B runtime·armcas(SB) TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0 B runtime·armPublicationBarrier(SB) TEXT runtime·casp1(SB),NOSPLIT,$0 B runtime·armcas(SB) // never called (cgo not supported) TEXT runtime·read_tls_fallback(SB),NOSPLIT,$-4 MOVW $0, R0 MOVW R0, (R0) RET 640 0 0 3515 12562734661 15704ustar00millermillergo1.5/src/sync/ 775 0 0 0 12641202403 122465ustar00millermillergo1.5/src/sync/atomic/ 775 0 0 0 12641202403 135225ustar00millermillergo1.5/src/sync/atomic/asm_plan9_arm.s 640 0 0 3771 12562734670 16454ustar00millermiller// Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "textflag.h" // FreeBSD/ARM atomic operations. // TODO(minux): this only supports ARMv6K or higher. TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0 B ·CompareAndSwapUint32(SB) TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0 B ·armCompareAndSwapUint32(SB) TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0 B ·CompareAndSwapUint32(SB) TEXT ·AddInt32(SB),NOSPLIT,$0 B ·AddUint32(SB) TEXT ·AddUint32(SB),NOSPLIT,$0 B ·armAddUint32(SB) TEXT ·AddUintptr(SB),NOSPLIT,$0 B ·AddUint32(SB) TEXT ·SwapInt32(SB),NOSPLIT,$0 B ·SwapUint32(SB) TEXT ·SwapUint32(SB),NOSPLIT,$0 B ·armSwapUint32(SB) TEXT ·SwapUintptr(SB),NOSPLIT,$0 B ·SwapUint32(SB) TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0 B ·CompareAndSwapUint64(SB) TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$-4 B ·armCompareAndSwapUint64(SB) TEXT ·AddInt64(SB),NOSPLIT,$0 B ·addUint64(SB) TEXT ·AddUint64(SB),NOSPLIT,$0 B ·addUint64(SB) TEXT ·SwapInt64(SB),NOSPLIT,$0 B ·swapUint64(SB) TEXT ·SwapUint64(SB),NOSPLIT,$0 B ·swapUint64(SB) TEXT ·LoadInt32(SB),NOSPLIT,$0 B ·LoadUint32(SB) TEXT ·LoadUint32(SB),NOSPLIT,$0-8 MOVW addr+0(FP), R1 load32loop: LDREX (R1), R2 // loads R2 STREX R2, (R1), R0 // stores R2 CMP $0, R0 BNE load32loop MOVW R2, val+4(FP) RET TEXT ·LoadInt64(SB),NOSPLIT,$0 B ·loadUint64(SB) TEXT ·LoadUint64(SB),NOSPLIT,$0 B ·loadUint64(SB) TEXT ·LoadUintptr(SB),NOSPLIT,$0 B ·LoadUint32(SB) TEXT ·LoadPointer(SB),NOSPLIT,$0 B ·LoadUint32(SB) TEXT ·StoreInt32(SB),NOSPLIT,$0 B ·StoreUint32(SB) TEXT ·StoreUint32(SB),NOSPLIT,$0-8 MOVW addr+0(FP), R1 MOVW val+4(FP), R2 storeloop: LDREX (R1), R4 // loads R4 STREX R2, (R1), R0 // stores R2 CMP $0, R0 BNE storeloop RET TEXT ·StoreInt64(SB),NOSPLIT,$0 B ·storeUint64(SB) TEXT ·StoreUint64(SB),NOSPLIT,$0 B ·storeUint64(SB) TEXT ·StoreUintptr(SB),NOSPLIT,$0 B ·StoreUint32(SB) YS_SEMRgo1.5/src/syscall/ 775 0 0 0 12641202403 127445ustar00millermillergo1.5/src/syscall/asm_plan9_arm.s 640 0 0 5177 12567614072 15676ustar00millermiller// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. #include "textflag.h" #define SYS_SEEK 39 /* from zsysnum_plan9.go */ // System call support for plan9 on arm TEXT sysresult<>(SB),NOSPLIT,$12 MOVW $runtime·emptystring+0(SB), R2 CMP $-1, R0 B.NE ok MOVW R1, save-4(SP) BL runtime·errstr(SB) MOVW save-4(SP), R1 MOVW $err-12(SP), R2 ok: MOVM.IA (R2), [R3-R4] MOVM.IA [R3-R4], (R1) RET //func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err ErrorString) TEXT ·Syscall(SB),NOSPLIT,$0 BL runtime·entersyscall(SB) MOVW trap+0(FP), R0 // syscall num MOVM.IA.W (R13),[R1-R2] // pop LR and caller's LR SWI 0 MOVM.DB.W [R1-R2],(R13) // push LR and caller's LR MOVW $0, R2 MOVW $r1+16(FP), R1 MOVM.IA.W [R0,R2], (R1) BL sysresult<>(SB) BL runtime·exitsyscall(SB) RET //func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err ErrorString) // Actually Syscall5 but the rest of the code expects it to be named Syscall6. TEXT ·Syscall6(SB),NOSPLIT,$0 BL runtime·entersyscall(SB) MOVW trap+0(FP), R0 // syscall num MOVM.IA.W (R13),[R1-R2] // pop LR and caller's LR SWI 0 MOVM.DB.W [R1-R2],(R13) // push LR and caller's LR MOVW $0, R1 MOVW $r1+28(FP), R1 MOVM.IA.W [R0,R2], (R1) BL sysresult<>(SB) BL runtime·exitsyscall(SB) RET //func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr) TEXT ·RawSyscall(SB),NOSPLIT,$0 MOVW trap+0(FP), R0 // syscall num MOVM.IA.W (R13),[R1] // pop caller's LR SWI 0 MOVM.DB.W [R1],(R13) // push caller's LR MOVW R0, r1+16(FP) MOVW R0, r2+20(FP) MOVW R0, err+24(FP) RET //func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) // Actually RawSyscall5 but the rest of the code expects it to be named RawSyscall6. TEXT ·RawSyscall6(SB),NOSPLIT,$0 MOVW trap+0(FP), R0 // syscall num MOVM.IA.W (R13),[R1] // pop caller's LR SWI 0 MOVM.DB.W [R1],(R13) // push caller's LR MOVW R0, r1+28(FP) MOVW R0, r2+32(FP) MOVW R0, err+36(FP) RET //func seek(placeholder uintptr, fd int, offset int64, whence int) (newoffset int64, err string) TEXT ·seek(SB),NOSPLIT,$0 MOVW $newoffset_lo+20(FP), R5 MOVW R5, placeholder+0(FP) //placeholder = dest for return value MOVW $SYS_SEEK, R0 // syscall num MOVM.IA.W (R13),[R1] // pop LR SWI 0 MOVM.DB.W [R1],(R13) // push LR CMP $-1, R0 MOVW.EQ R0, 0(R5) MOVW.EQ R0, 4(R5) MOVW $err+28(FP), R1 BL sysresult<>(SB) RET //func exit(code int) // Import runtime·exit for cleanly exiting. TEXT ·exit(SB),NOSPLIT,$4 MOVW code+0(FP), R0 MOVW R0, e-4(SP) BL runtime·exit(SB) RET go1.5/src/syscall/exec_plan9.go 640 0 0 36772 12562734667 15403ustar00millermiller// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Fork, exec, wait, etc. package syscall import ( "runtime" "sync" "unsafe" ) // Lock synchronizing creation of new file descriptors with fork. // // We want the child in a fork/exec sequence to inherit only the // file descriptors we intend. To do that, we mark all file // descriptors close-on-exec and then, in the child, explicitly // unmark the ones we want the exec'ed program to keep. // Unix doesn't make this easy: there is, in general, no way to // allocate a new file descriptor close-on-exec. Instead you // have to allocate the descriptor and then mark it close-on-exec. // If a fork happens between those two events, the child's exec // will inherit an unwanted file descriptor. // // This lock solves that race: the create new fd/mark close-on-exec // operation is done holding ForkLock for reading, and the fork itself // is done holding ForkLock for writing. At least, that's the idea. // There are some complications. // // Some system calls that create new file descriptors can block // for arbitrarily long times: open on a hung NFS server or named // pipe, accept on a socket, and so on. We can't reasonably grab // the lock across those operations. // // It is worse to inherit some file descriptors than others. // If a non-malicious child accidentally inherits an open ordinary file, // that's not a big deal. On the other hand, if a long-lived child // accidentally inherits the write end of a pipe, then the reader // of that pipe will not see EOF until that child exits, potentially // causing the parent program to hang. This is a common problem // in threaded C programs that use popen. // // Luckily, the file descriptors that are most important not to // inherit are not the ones that can take an arbitrarily long time // to create: pipe returns instantly, and the net package uses // non-blocking I/O to accept on a listening socket. // The rules for which file descriptor-creating operations use the // ForkLock are as follows: // // 1) Pipe. Does not block. Use the ForkLock. // 2) Socket. Does not block. Use the ForkLock. // 3) Accept. If using non-blocking mode, use the ForkLock. // Otherwise, live with the race. // 4) Open. Can block. Use O_CLOEXEC if available (Linux). // Otherwise, live with the race. // 5) Dup. Does not block. Use the ForkLock. // On Linux, could use fcntl F_DUPFD_CLOEXEC // instead of the ForkLock, but only for dup(fd, -1). var ForkLock sync.RWMutex // StringSlicePtr converts a slice of strings to a slice of pointers // to NUL-terminated byte arrays. If any string contains a NUL byte // this function panics instead of returning an error. // // Deprecated: Use SlicePtrFromStrings instead. func StringSlicePtr(ss []string) []*byte { bb := make([]*byte, len(ss)+1) for i := 0; i < len(ss); i++ { bb[i] = StringBytePtr(ss[i]) } bb[len(ss)] = nil return bb } // SlicePtrFromStrings converts a slice of strings to a slice of // pointers to NUL-terminated byte arrays. If any string contains // a NUL byte, it returns (nil, EINVAL). func SlicePtrFromStrings(ss []string) ([]*byte, error) { var err error bb := make([]*byte, len(ss)+1) for i := 0; i < len(ss); i++ { bb[i], err = BytePtrFromString(ss[i]) if err != nil { return nil, err } } bb[len(ss)] = nil return bb, nil } // readdirnames returns the names of files inside the directory represented by dirfd. func readdirnames(dirfd int) (names []string, err error) { names = make([]string, 0, 100) var buf [STATMAX]byte for { n, e := Read(dirfd, buf[:]) if e != nil { return nil, e } if n == 0 { break } for i := 0; i < n; { m, _ := gbit16(buf[i:]) m += 2 if m < STATFIXLEN { return nil, ErrBadStat } s, _, ok := gstring(buf[i+41:]) if !ok { return nil, ErrBadStat } names = append(names, s) i += int(m) } } return } // readdupdevice returns a list of currently opened fds (excluding stdin, stdout, stderr) from the dup device #d. // ForkLock should be write locked before calling, so that no new fds would be created while the fd list is being read. func readdupdevice() (fds []int, err error) { dupdevfd, err := Open("#d", O_RDONLY) if err != nil { return } defer Close(dupdevfd) names, err := readdirnames(dupdevfd) if err != nil { return } fds = make([]int, 0, len(names)/2) for _, name := range names { if n := len(name); n > 3 && name[n-3:n] == "ctl" { continue } fd := int(atoi([]byte(name))) switch fd { case 0, 1, 2, dupdevfd: continue } fds = append(fds, fd) } return } var startupFds []int // Plan 9 does not allow clearing the OCEXEC flag // from the underlying channel backing an open file descriptor, // therefore we store a list of already opened file descriptors // inside startupFds and skip them when manually closing descriptors // not meant to be passed to a child exec. func init() { startupFds, _ = readdupdevice() } // forkAndExecInChild forks the process, calling dup onto 0..len(fd) // and finally invoking exec(argv0, argvv, envv) in the child. // If a dup or exec fails, it writes the error string to pipe. // (The pipe write end is close-on-exec so if exec succeeds, it will be closed.) // // In the child, this function must not acquire any locks, because // they might have been locked at the time of the fork. This means // no rescheduling, no malloc calls, and no new stack segments. // The calls to RawSyscall are okay because they are assembly // functions that do not grow the stack. func forkAndExecInChild(argv0 *byte, argv []*byte, envv []envItem, dir *byte, attr *ProcAttr, fdsToClose []int, pipe int, rflag int) (pid int, err error) { // Declare all variables at top in case any // declarations require heap allocation (e.g., errbuf). var ( r1 uintptr nextfd int i int clearenv int envfd int errbuf [ERRMAX]byte ) // Guard against side effects of shuffling fds below. // Make sure that nextfd is beyond any currently open files so // that we can't run the risk of overwriting any of them. fd := make([]int, len(attr.Files)) nextfd = len(attr.Files) for i, ufd := range attr.Files { if nextfd < int(ufd) { nextfd = int(ufd) } fd[i] = int(ufd) } nextfd++ if envv != nil { clearenv = RFCENVG } // About to call fork. // No more allocation or calls of non-assembly functions. r1, _, _ = RawSyscall(SYS_RFORK, uintptr(RFPROC|RFFDG|RFREND|clearenv|rflag), 0, 0) if r1 != 0 { if int32(r1) == -1 { return 0, NewError(errstr()) } // parent; return PID return int(r1), nil } // Fork succeeded, now in child. // Close fds we don't need. for i = 0; i < len(fdsToClose); i++ { RawSyscall(SYS_CLOSE, uintptr(fdsToClose[i]), 0, 0) } if envv != nil { // Write new environment variables. for i = 0; i < len(envv); i++ { r1, _, _ = RawSyscall(SYS_CREATE, uintptr(unsafe.Pointer(envv[i].name)), uintptr(O_WRONLY), uintptr(0666)) if int32(r1) == -1 { goto childerror } envfd = int(r1) r1, _, _ = RawSyscall6(SYS_PWRITE, uintptr(envfd), uintptr(unsafe.Pointer(envv[i].value)), uintptr(envv[i].nvalue), ^uintptr(0), ^uintptr(0), 0) if int32(r1) == -1 || int(r1) != envv[i].nvalue { goto childerror } r1, _, _ = RawSyscall(SYS_CLOSE, uintptr(envfd), 0, 0) if int32(r1) == -1 { goto childerror } } } // Chdir if dir != nil { r1, _, _ = RawSyscall(SYS_CHDIR, uintptr(unsafe.Pointer(dir)), 0, 0) if int32(r1) == -1 { goto childerror } } // Pass 1: look for fd[i] < i and move those up above len(fd) // so that pass 2 won't stomp on an fd it needs later. if pipe < nextfd { r1, _, _ = RawSyscall(SYS_DUP, uintptr(pipe), uintptr(nextfd), 0) if int32(r1) == -1 { goto childerror } pipe = nextfd nextfd++ } for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] < int(i) { r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(nextfd), 0) if int32(r1) == -1 { goto childerror } fd[i] = nextfd nextfd++ if nextfd == pipe { // don't stomp on pipe nextfd++ } } } // Pass 2: dup fd[i] down onto i. for i = 0; i < len(fd); i++ { if fd[i] == -1 { RawSyscall(SYS_CLOSE, uintptr(i), 0, 0) continue } if fd[i] == int(i) { continue } r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(i), 0) if int32(r1) == -1 { goto childerror } } // Pass 3: close fd[i] if it was moved in the previous pass. for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] != int(i) { RawSyscall(SYS_CLOSE, uintptr(fd[i]), 0, 0) } } // Time to exec. r1, _, _ = RawSyscall(SYS_EXEC, uintptr(unsafe.Pointer(argv0)), uintptr(unsafe.Pointer(&argv[0])), 0) childerror: // send error string on pipe RawSyscall(SYS_ERRSTR, uintptr(unsafe.Pointer(&errbuf[0])), uintptr(len(errbuf)), 0) errbuf[len(errbuf)-1] = 0 i = 0 for i < len(errbuf) && errbuf[i] != 0 { i++ } RawSyscall6(SYS_PWRITE, uintptr(pipe), uintptr(unsafe.Pointer(&errbuf[0])), uintptr(i), ^uintptr(0), ^uintptr(0), 0) for { RawSyscall(SYS_EXITS, 0, 0, 0) } // Calling panic is not actually safe, // but the for loop above won't break // and this shuts up the compiler. panic("unreached") } func cexecPipe(p []int) error { e := Pipe(p) if e != nil { return e } fd, e := Open("#d/"+itoa(p[1]), O_CLOEXEC) if e != nil { Close(p[0]) Close(p[1]) return e } Close(fd) return nil } type envItem struct { name *byte value *byte nvalue int } type ProcAttr struct { Dir string // Current working directory. Env []string // Environment. Files []uintptr // File descriptors. Sys *SysProcAttr } type SysProcAttr struct { Rfork int // additional flags to pass to rfork } var zeroProcAttr ProcAttr var zeroSysProcAttr SysProcAttr func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) { var ( p [2]int n int errbuf [ERRMAX]byte wmsg Waitmsg ) if attr == nil { attr = &zeroProcAttr } sys := attr.Sys if sys == nil { sys = &zeroSysProcAttr } p[0] = -1 p[1] = -1 // Convert args to C form. argv0p, err := BytePtrFromString(argv0) if err != nil { return 0, err } argvp, err := SlicePtrFromStrings(argv) if err != nil { return 0, err } destDir := attr.Dir if destDir == "" { wdmu.Lock() destDir = wdStr wdmu.Unlock() } var dir *byte if destDir != "" { dir, err = BytePtrFromString(destDir) if err != nil { return 0, err } } var envvParsed []envItem if attr.Env != nil { envvParsed = make([]envItem, 0, len(attr.Env)) for _, v := range attr.Env { i := 0 for i < len(v) && v[i] != '=' { i++ } envname, err := BytePtrFromString("/env/" + v[:i]) if err != nil { return 0, err } envvalue := make([]byte, len(v)-i) copy(envvalue, v[i+1:]) envvParsed = append(envvParsed, envItem{envname, &envvalue[0], len(v) - i}) } } // Acquire the fork lock to prevent other threads from creating new fds before we fork. ForkLock.Lock() // get a list of open fds, excluding stdin,stdout and stderr that need to be closed in the child. // no new fds can be created while we hold the ForkLock for writing. openFds, e := readdupdevice() if e != nil { ForkLock.Unlock() return 0, e } fdsToClose := make([]int, 0, len(openFds)) for _, fd := range openFds { doClose := true // exclude files opened at startup. for _, sfd := range startupFds { if fd == sfd { doClose = false break } } // exclude files explicitly requested by the caller. for _, rfd := range attr.Files { if fd == int(rfd) { doClose = false break } } if doClose { fdsToClose = append(fdsToClose, fd) } } // Allocate child status pipe close on exec. e = cexecPipe(p[:]) if e != nil { return 0, e } fdsToClose = append(fdsToClose, p[0]) // Kick off child. pid, err = forkAndExecInChild(argv0p, argvp, envvParsed, dir, attr, fdsToClose, p[1], sys.Rfork) if err != nil { if p[0] >= 0 { Close(p[0]) Close(p[1]) } ForkLock.Unlock() return 0, err } ForkLock.Unlock() // Read child error status from pipe. Close(p[1]) n, err = Read(p[0], errbuf[:]) Close(p[0]) if err != nil || n != 0 { if n != 0 { err = NewError(string(errbuf[:n])) } // Child failed; wait for it to exit, to make sure // the zombies don't accumulate. for wmsg.Pid != pid { Await(&wmsg) } return 0, err } // Read got EOF, so pipe closed on exec, so exec succeeded. return pid, nil } type waitErr struct { Waitmsg err error } var procs struct { sync.Mutex waits map[int]chan *waitErr } // startProcess starts a new goroutine, tied to the OS // thread, which runs the process and subsequently waits // for it to finish, communicating the process stats back // to any goroutines that may have been waiting on it. // // Such a dedicated goroutine is needed because on // Plan 9, only the parent thread can wait for a child, // whereas goroutines tend to jump OS threads (e.g., // between starting a process and running Wait(), the // goroutine may have been rescheduled). func startProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) { type forkRet struct { pid int err error } forkc := make(chan forkRet, 1) go func() { runtime.LockOSThread() var ret forkRet ret.pid, ret.err = forkExec(argv0, argv, attr) // If fork fails there is nothing to wait for. if ret.err != nil || ret.pid == 0 { forkc <- ret return } waitc := make(chan *waitErr, 1) // Mark that the process is running. procs.Lock() if procs.waits == nil { procs.waits = make(map[int]chan *waitErr) } procs.waits[ret.pid] = waitc procs.Unlock() forkc <- ret var w waitErr for w.err == nil && w.Pid != ret.pid { w.err = Await(&w.Waitmsg) } waitc <- &w close(waitc) }() ret := <-forkc return ret.pid, ret.err } // Combination of fork and exec, careful to be thread safe. func ForkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) { return startProcess(argv0, argv, attr) } // StartProcess wraps ForkExec for package os. func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error) { pid, err = startProcess(argv0, argv, attr) return pid, 0, err } // Ordinary exec. func Exec(argv0 string, argv []string, envv []string) (err error) { if envv != nil { r1, _, _ := RawSyscall(SYS_RFORK, RFCENVG, 0, 0) if int32(r1) == -1 { return NewError(errstr()) } for _, v := range envv { i := 0 for i < len(v) && v[i] != '=' { i++ } fd, e := Create("/env/"+v[:i], O_WRONLY, 0666) if e != nil { return e } _, e = Write(fd, []byte(v[i+1:])) if e != nil { Close(fd) return e } Close(fd) } } argv0p, err := BytePtrFromString(argv0) if err != nil { return err } argvp, err := SlicePtrFromStrings(argv) if err != nil { return err } _, _, e1 := Syscall(SYS_EXEC, uintptr(unsafe.Pointer(argv0p)), uintptr(unsafe.Pointer(&argvp[0])), 0) return e1 } // WaitProcess waits until the pid of a // running process is found in the queue of // wait messages. It is used in conjunction // with ForkExec/StartProcess to wait for a // running process to exit. func WaitProcess(pid int, w *Waitmsg) (err error) { procs.Lock() ch := procs.waits[pid] procs.Unlock() var wmsg *waitErr if ch != nil { wmsg = <-ch procs.Lock() if procs.waits[pid] == ch { delete(procs.waits, pid) } procs.Unlock() } if wmsg == nil { // ch was missing or ch is closed return NewError("process not found") } if wmsg.err != nil { return wmsg.err } if w != nil { *w = wmsg.Waitmsg } return nil } l are go1.5/src/syscall/zsyscall_plan9_arm.go 640 0 0 14642 12562734667 17152ustar00millermiller// mksyscall.pl -l32 -plan9 syscall_plan9.go // MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build arm,plan9 package syscall import "unsafe" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fd2path(fd int, buf []byte) (err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf))) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]int32) (err error) { r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func await(s []byte) (n int, err error) { var _p0 unsafe.Pointer if len(s) > 0 { _p0 = unsafe.Pointer(&s[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0) n = int(r0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func open(path string, mode int) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) use(unsafe.Pointer(_p0)) fd = int(r0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func create(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) use(unsafe.Pointer(_p0)) fd = int(r0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func remove(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func stat(path string, edir []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } var _p1 unsafe.Pointer if len(edir) > 0 { _p1 = unsafe.Pointer(&edir[0]) } else { _p1 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) use(unsafe.Pointer(_p0)) n = int(r0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(name string, old string, flag int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(name) if err != nil { return } var _p1 *byte _p1, err = BytePtrFromString(old) if err != nil { return } r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag)) use(unsafe.Pointer(_p0)) use(unsafe.Pointer(_p1)) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mount(fd int, afd int, old string, flag int, aname string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(old) if err != nil { return } var _p1 *byte _p1, err = BytePtrFromString(aname) if err != nil { return } r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0) use(unsafe.Pointer(_p0)) use(unsafe.Pointer(_p1)) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wstat(path string, edir []byte) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } var _p1 unsafe.Pointer if len(edir) > 0 { _p1 = unsafe.Pointer(&edir[0]) } else { _p1 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir))) use(unsafe.Pointer(_p0)) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(oldfd int, newfd int) (fd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0) fd = int(r0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0) n = int(r0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, edir []byte) (n int, err error) { var _p0 unsafe.Pointer if len(edir) > 0 { _p0 = unsafe.Pointer(&edir[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) n = int(r0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fwstat(fd int, edir []byte) (err error) { var _p0 unsafe.Pointer if len(edir) > 0 { _p0 = unsafe.Pointer(&edir[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir))) if int32(r0) == -1 { err = e1 } return } msg err error } var procs struct { sync.Mutex waits map[int]chan *waitErr } // startProcesys/ 775 0 0 0 12641202533 104745ustar00millermillersys/src/ 775 0 0 0 12641202533 112635ustar00millermillersys/src/9/ 775 0 0 0 12641203152 114315ustar00millermillersys/src/9/bcm/ 775 0 0 0 12641203052 121715ustar00millermillersys/src/9/bcm/mem.h 664 0 0 5167 12561610663 13144ustar00millermiller/* * Memory and machine-specific definitions. Used in C and assembler. */ #define KiB 1024u /* Kibi 0x0000000000000400 */ #define MiB 1048576u /* Mebi 0x0000000000100000 */ #define GiB 1073741824u /* Gibi 000000000040000000 */ /* * Sizes */ #define BY2PG (4*KiB) /* bytes per page */ #define PGSHIFT 12 /* log(BY2PG) */ #define MAXMACH 4 /* max # cpus system can run */ #define MACHSIZE BY2PG #define L1SIZE (4 * BY2PG) #define KSTKSIZE (8*KiB) #define STACKALIGN(sp) ((sp) & ~3) /* bug: assure with alloc */ /* * Magic registers */ #define USER 9 /* R9 is up-> */ #define MACH 10 /* R10 is m-> */ /* * Address spaces. * KTZERO is used by kprof and dumpstack (if any). * * KZERO is mapped to physical 0 (start of ram). * * vectors are at 0, plan9.ini is at KZERO+256 and is limited to 16K by * devenv. */ #define KSEG0 0x80000000 /* kernel segment */ /* mask to check segment; good for 1GB dram */ #define KSEGM 0xC0000000 #define KZERO KSEG0 /* kernel address space */ #define CONFADDR (KZERO+0x100) /* unparsed plan9.ini */ #define MACHADDR (KZERO+0x2000) /* Mach structure */ #define L2 (KZERO+0x3000) /* L2 ptes for vectors etc */ #define VCBUFFER (KZERO+0x3400) /* videocore mailbox buffer */ #define FIQSTKTOP (KZERO+0x4000) /* FIQ stack */ #define L1 (KZERO+0x4000) /* tt ptes: 16KiB aligned */ #define KTZERO (KZERO+0x8000) /* kernel text start */ #define VIRTIO 0x7E000000 /* i/o registers */ #define FRAMEBUFFER 0xC0000000 /* video framebuffer */ #define UZERO 0 /* user segment */ #define UTZERO (UZERO+BY2PG) /* user text start */ #define UTROUND(t) ROUNDUP((t), BY2PG) #define USTKTOP 0x40000000 /* user segment end +1 */ #define USTKSIZE (8*1024*1024) /* user stack size */ #define TSTKTOP (USTKTOP-USTKSIZE) /* sysexec temporary stack */ #define TSTKSIZ 256 /* address at which to copy and execute rebootcode */ #define REBOOTADDR (KZERO+0x1800) /* * Legacy... */ #define BLOCKALIGN 64 /* only used in allocb.c */ #define KSTACK KSTKSIZE /* * Sizes */ #define BI2BY 8 /* bits per byte */ #define BY2SE 4 #define BY2WD 4 #define BY2V 8 /* only used in xalloc.c */ #define PTEMAPMEM (1024*1024) #define PTEPERTAB (PTEMAPMEM/BY2PG) #define SEGMAPSIZE 1984 #define SSEGMAPSIZE 16 #define PPN(x) ((x)&~(BY2PG-1)) /* * With a little work these move to port. */ #define PTEVALID (1<<0) #define PTERONLY 0 #define PTEWRITE (1<<1) #define PTEUNCACHED (1<<2) #define PTEKERNEL (1<<3) /* * Physical machine information from here on. * PHYS addresses as seen from the arm cpu. * BUS addresses as seen from the videocore gpu. */ #define PHYSDRAM 0 #define IOSIZE (16*MiB) return } r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) use(unsafe.Pointer(_p0)) fd = int(r0) if int32(r0) == -1 { err = e1 } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func remove(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } r0, _, e1 := Sysys/src/9/bcm/vfp3.c 664 0 0 25023 12566040513 13244ustar00millermiller/* * VFPv2 or VFPv3 floating point unit */ #include "u.h" #include "../port/lib.h" #include "mem.h" #include "dat.h" #include "fns.h" #include "ureg.h" #include "arm.h" /* subarchitecture code in m->havefp */ enum { VFPv2 = 2, VFPv3 = 3, }; /* fp control regs. most are read-only */ enum { Fpsid = 0, Fpscr = 1, /* rw */ Mvfr1 = 6, Mvfr0 = 7, Fpexc = 8, /* rw */ Fpinst= 9, /* optional, for exceptions */ Fpinst2=10, }; enum { /* Fpexc bits */ Fpex = 1u << 31, Fpenabled = 1 << 30, Fpdex = 1 << 29, /* defined synch exception */ // Fp2v = 1 << 28, /* Fpinst2 reg is valid */ // Fpvv = 1 << 27, /* if Fpdex, vecitr is valid */ // Fptfv = 1 << 26, /* trapped fault is valid */ // Fpvecitr = MASK(3) << 8, /* FSR bits appear here */ Fpmbc = Fpdex, /* bits exception handler must clear */ /* Fpscr bits; see u.h for more */ Stride = MASK(2) << 20, Len = MASK(3) << 16, Dn= 1 << 25, Fz= 1 << 24, /* trap exception enables (not allowed in vfp3) */ FPIDNRM = 1 << 15, /* input denormal */ Alltraps = FPIDNRM | FPINEX | FPUNFL | FPOVFL | FPZDIV | FPINVAL, /* pending exceptions */ FPAIDNRM = 1 << 7, /* input denormal */ Allexc = FPAIDNRM | FPAINEX | FPAUNFL | FPAOVFL | FPAZDIV | FPAINVAL, /* condition codes */ Allcc = MASK(4) << 28, }; enum { /* CpCPaccess bits */ Cpaccnosimd = 1u << 31, Cpaccd16 = 1 << 30, }; static char * subarch(int impl, uint sa) { static char *armarchs[] = { "VFPv1 (unsupported)", "VFPv2", "VFPv3+ with common VFP subarch v2", "VFPv3+ with null subarch", "VFPv3+ with common VFP subarch v3", }; if (impl != 'A' || sa >= nelem(armarchs)) return "GOK"; else return armarchs[sa]; } static char * implement(uchar impl) { if (impl == 'A') return "arm"; else return "unknown"; } static int havefp(void) { int gotfp; ulong acc, sid; if (m->havefpvalid) return m->havefp; m->havefp = 0; gotfp = 1 << CpFP | 1 << CpDFP; cpwrsc(0, CpCONTROL, 0, CpCPaccess, MASK(28)); acc = cprdsc(0, CpCONTROL, 0, CpCPaccess); if ((acc & (MASK(2) << (2*CpFP))) == 0) { gotfp &= ~(1 << CpFP); print("fpon: no single FP coprocessor\n"); } if ((acc & (MASK(2) << (2*CpDFP))) == 0) { gotfp &= ~(1 << CpDFP); print("fpon: no double FP coprocessor\n"); } if (!gotfp) { print("fpon: no FP coprocessors\n"); m->havefpvalid = 1; return 0; } m->fpon = 1; /* don't panic */ sid = fprd(Fpsid); m->fpon = 0; switch((sid >> 16) & MASK(7)){ case 0: /* VFPv1 */ break; case 1: /* VFPv2 */ m->havefp = VFPv2; m->fpnregs = 16; break; default: /* VFPv3 or later */ m->havefp = VFPv3; m->fpnregs = (acc & Cpaccd16) ? 16 : 32; break; } if (m->machno == 0) print("fp: %d registers, %s simd\n", m->fpnregs, (acc & Cpaccnosimd? " no": "")); m->havefpvalid = 1; return 1; } /* * these can be called to turn the fpu on or off for user procs, * not just at system start up or shutdown. */ void fpoff(void) { if (m->fpon) { fpwr(Fpexc, 0); m->fpon = 0; } } void fpononly(void) { if (!m->fpon && havefp()) { /* enable fp. must be first operation on the FPUs. */ fpwr(Fpexc, Fpenabled); m->fpon = 1; } } static void fpcfg(void) { int impl; ulong sid; static int printed; /* clear pending exceptions; no traps in vfp3; all v7 ops are scalar */ m->fpscr = Dn | FPRNR | (FPINVAL | FPZDIV | FPOVFL) & ~Alltraps; /* VFPv2 needs software support for underflows, so force them to zero */ if(m->havefp == VFPv2) m->fpscr |= Fz; fpwr(Fpscr, m->fpscr); m->fpconfiged = 1; if (printed) return; sid = fprd(Fpsid); impl = sid >> 24; print("fp: %s arch %s; rev %ld\n", implement(impl), subarch(impl, (sid >> 16) & MASK(7)), sid & MASK(4)); printed = 1; } void fpinit(void) { if (havefp()) { fpononly(); fpcfg(); } } void fpon(void) { if (havefp()) { fpononly(); if (m->fpconfiged) fpwr(Fpscr, (fprd(Fpscr) & Allcc) | m->fpscr); else fpcfg(); /* 1st time on this fpu; configure it */ } } void fpclear(void) { // ulong scr; fpon(); // scr = fprd(Fpscr); // m->fpscr = scr & ~Allexc; // fpwr(Fpscr, m->fpscr); fpwr(Fpexc, fprd(Fpexc) & ~Fpmbc); } /* * Called when a note is about to be delivered to a * user process, usually at the end of a system call. * Note handlers are not allowed to use the FPU so * the state is marked (after saving if necessary) and * checked in the Device Not Available handler. */ void fpunotify(Ureg*) { if(up->fpstate == FPactive){ fpsave(&up->fpsave); up->fpstate = FPinactive; } up->fpstate |= FPillegal; } /* * Called from sysnoted() via the machine-dependent * noted() routine. * Clear the flag set above in fpunotify(). */ void fpunoted(void) { up->fpstate &= ~FPillegal; } /* * Called early in the non-interruptible path of * sysrfork() via the machine-dependent syscall() routine. * Save the state so that it can be easily copied * to the child process later. */ void fpusysrfork(Ureg*) { if(up->fpstate == FPactive){ fpsave(&up->fpsave); up->fpstate = FPinactive; } } /* * Called later in sysrfork() via the machine-dependent * sysrforkchild() routine. * Copy the parent FPU state to the child. */ void fpusysrforkchild(Proc *p, Ureg *, Proc *up) { /* don't penalize the child, it hasn't done FP in a note handler. */ p->fpstate = up->fpstate & ~FPillegal; } /* should only be called if p->fpstate == FPactive */ void fpsave(FPsave *fps) { int n; fpon(); fps->control = fps->status = fprd(Fpscr); assert(m->fpnregs); for (n = 0; n < m->fpnregs; n++) fpsavereg(n, (uvlong *)fps->regs[n]); fpoff(); } static void fprestore(Proc *p) { int n; fpon(); fpwr(Fpscr, p->fpsave.control); m->fpscr = fprd(Fpscr) & ~Allcc; assert(m->fpnregs); for (n = 0; n < m->fpnregs; n++) fprestreg(n, *(uvlong *)p->fpsave.regs[n]); } /* * Called from sched() and sleep() via the machine-dependent * procsave() routine. * About to go in to the scheduler. * If the process wasn't using the FPU * there's nothing to do. */ void fpuprocsave(Proc *p) { if(p->fpstate == FPactive){ if(p->state == Moribund) fpclear(); else{ /* * Fpsave() stores without handling pending * unmasked exeptions. Postnote() can't be called * here as sleep() already has up->rlock, so * the handling of pending exceptions is delayed * until the process runs again and generates an * emulation fault to activate the FPU. */ fpsave(&p->fpsave); } p->fpstate = FPinactive; } } /* * The process has been rescheduled and is about to run. * Nothing to do here right now. If the process tries to use * the FPU again it will cause a Device Not Available * exception and the state will then be restored. */ void fpuprocrestore(Proc *) { } /* * Disable the FPU. * Called from sysexec() via sysprocsetup() to * set the FPU for the new process. */ void fpusysprocsetup(Proc *p) { p->fpstate = FPinit; fpoff(); } static void mathnote(void) { ulong status; char *msg, note[ERRMAX]; status = up->fpsave.status; /* * Some attention should probably be paid here to the * exception masks and error summary. */ if (status & FPAINEX) msg = "inexact"; else if (status & FPAOVFL) msg = "overflow"; else if (status & FPAUNFL) msg = "underflow"; else if (status & FPAZDIV) msg = "divide by zero"; else if (status & FPAINVAL) msg = "bad operation"; else msg = "spurious"; snprint(note, sizeof note, "sys: fp: %s fppc=%#p status=%#lux", msg, up->fpsave.pc, status); postnote(up, 1, note, NDebug); } static void mathemu(Ureg *) { switch(up->fpstate){ case FPemu: error("illegal instruction: VFP opcode in emulated mode"); case FPinit: fpinit(); up->fpstate = FPactive; break; case FPinactive: /* * Before restoring the state, check for any pending * exceptions. There's no way to restore the state without * generating an unmasked exception. * More attention should probably be paid here to the * exception masks and error summary. */ if(up->fpsave.status & (FPAINEX|FPAUNFL|FPAOVFL|FPAZDIV|FPAINVAL)){ mathnote(); break; } fprestore(up); up->fpstate = FPactive; break; case FPactive: error("illegal instruction: bad vfp fpu opcode"); break; } fpclear(); } void fpstuck(uintptr pc) { if (m->fppc == pc && m->fppid == up->pid) { m->fpcnt++; if (m->fpcnt > 4) panic("fpuemu: cpu%d stuck at pid %ld %s pc %#p " "instr %#8.8lux", m->machno, up->pid, up->text, pc, *(ulong *)pc); } else { m->fppid = up->pid; m->fppc = pc; m->fpcnt = 0; } } enum { N = 1<<31, Z = 1<<30, C = 1<<29, V = 1<<28, REGPC = 15, }; static int condok(int cc, int c) { switch(c){ case 0: /* Z set */ return cc&Z; case 1: /* Z clear */ return (cc&Z) == 0; case 2: /* C set */ return cc&C; case 3: /* C clear */ return (cc&C) == 0; case 4: /* N set */ return cc&N; case 5: /* N clear */ return (cc&N) == 0; case 6: /* V set */ return cc&V; case 7: /* V clear */ return (cc&V) == 0; case 8: /* C set and Z clear */ return cc&C && (cc&Z) == 0; case 9: /* C clear or Z set */ return (cc&C) == 0 || cc&Z; case 10: /* N set and V set, or N clear and V clear */ return (~cc&(N|V))==0 || (cc&(N|V)) == 0; case 11: /* N set and V clear, or N clear and V set */ return (cc&(N|V))==N || (cc&(N|V))==V; case 12: /* Z clear, and either N set and V set or N clear and V clear */ return (cc&Z) == 0 && ((~cc&(N|V))==0 || (cc&(N|V))==0); case 13: /* Z set, or N set and V clear or N clear and V set */ return (cc&Z) || (cc&(N|V))==N || (cc&(N|V))==V; case 14: /* always */ return 1; case 15: /* never (reserved) */ return 0; } return 0; /* not reached */ } /* only called to deal with user-mode instruction faults */ int fpuemu(Ureg* ureg) { int s, nfp, cop, op; uintptr pc; static int already; if(waserror()){ postnote(up, 1, up->errstr, NDebug); return 1; } if(up->fpstate & FPillegal) error("floating point in note handler"); nfp = 0; pc = ureg->pc; validaddr(pc, 4, 0); if(!condok(ureg->psr, *(ulong*)pc >> 28)) iprint("fpuemu: conditional instr shouldn't have got here\n"); op = (*(ulong *)pc >> 24) & MASK(4); cop = (*(ulong *)pc >> 8) & MASK(4); if(m->fpon) fpstuck(pc); /* debugging; could move down 1 line */ if (ISFPAOP(cop, op)) { /* old arm 7500 fpa opcode? */ s = spllo(); if(!already++) pprint("warning: emulated arm7500 fpa instr %#8.8lux at %#p\n", *(ulong *)pc, pc); if(waserror()){ splx(s); nexterror(); } nfp = fpiarm(ureg); /* advances pc past emulated instr(s) */ if (nfp > 1) /* could adjust this threshold */ m->fppc = m->fpcnt = 0; splx(s); poperror(); } else if (ISVFPOP(cop, op)) { /* if vfp, fpu must be off */ mathemu(ureg); /* enable fpu & retry */ nfp = 1; } poperror(); return nfp; } sys/src/9/port/ 775 0 0 0 12641203221 124125ustar00millermillersys/src/9/port/page.c 664 0 0 25605 12564702425 13540ustar00millermiller#include "u.h" #include "../port/lib.h" #include "mem.h" #include "dat.h" #include "fns.h" #include "../port/error.h" #define pghash(daddr) palloc.hash[(daddr>>PGSHIFT)&(PGHSIZE-1)] struct Palloc palloc; void pageinit(void) { int color, i, j; Page *p; Pallocmem *pm; ulong m, np, k, vkb, pkb; np = 0; for(i=0; inpage; } palloc.pages = xalloc(np*sizeof(Page)); if(palloc.pages == 0) panic("pageinit"); color = 0; palloc.head = palloc.pages; p = palloc.head; for(i=0; inpage; j++){ p->prev = p-1; p->next = p+1; p->pa = pm->base+j*BY2PG; p->color = color; palloc.freecount++; color = (color+1)%NCOLOR; p++; } } palloc.tail = p - 1; palloc.head->prev = 0; palloc.tail->next = 0; palloc.user = p - palloc.pages; pkb = palloc.user*BY2PG/1024; vkb = pkb + (conf.nswap*BY2PG)/1024; /* Paging numbers */ swapalloc.highwater = (palloc.user*5)/100; swapalloc.headroom = swapalloc.highwater + (swapalloc.highwater/4); m = 0; for(i=0; iprev) p->prev->next = p->next; else palloc.head = p->next; if(p->next) p->next->prev = p->prev; else palloc.tail = p->prev; p->prev = p->next = nil; palloc.freecount--; } void pagechaintail(Page *p) { if(canlock(&palloc)) panic("pagechaintail"); if(palloc.tail) { p->prev = palloc.tail; palloc.tail->next = p; } else { palloc.head = p; p->prev = 0; } palloc.tail = p; p->next = 0; palloc.freecount++; } void pagechainhead(Page *p) { if(canlock(&palloc)) panic("pagechainhead"); if(palloc.head) { p->next = palloc.head; palloc.head->prev = p; } else { palloc.tail = p; p->next = 0; } palloc.head = p; p->prev = 0; palloc.freecount++; } Page* newpage(int clear, Segment **s, ulong va) { Page *p; KMap *k; uchar ct; int i, hw, dontalloc, color; lock(&palloc); color = getpgcolor(va); hw = swapalloc.highwater; for(;;) { if(palloc.freecount > hw) break; if(up->kp && palloc.freecount > 0) break; unlock(&palloc); dontalloc = 0; if(s && *s) { qunlock(&((*s)->lk)); *s = 0; dontalloc = 1; } qlock(&palloc.pwait); /* Hold memory requesters here */ while(waserror()) /* Ignore interrupts */ ; kickpager(); tsleep(&palloc.r, ispages, 0, 1000); poperror(); qunlock(&palloc.pwait); /* * If called from fault and we lost the segment from * underneath don't waste time allocating and freeing * a page. Fault will call newpage again when it has * reacquired the segment locks */ if(dontalloc) return 0; lock(&palloc); } /* First try for our colour */ for(p = palloc.head; p; p = p->next) if(p->color == color) break; ct = PG_NOFLUSH; if(p == 0) { p = palloc.head; p->color = color; ct = PG_NEWCOL; } pageunchain(p); lock(p); if(p->ref != 0) panic("newpage: p->ref %d != 0", p->ref); uncachepage(p); p->ref++; p->va = va; p->modref = 0; for(i = 0; i < MAXMACH; i++) p->cachectl[i] = ct; unlock(p); unlock(&palloc); if(clear) { k = kmap(p); memset((void*)VA(k), 0, BY2PG); kunmap(k); } return p; } int ispages(void*) { return palloc.freecount >= swapalloc.highwater; } void putpage(Page *p) { if(onswap(p)) { putswap(p); return; } lock(&palloc); lock(p); if(p->ref == 0) panic("putpage"); if(--p->ref > 0) { unlock(p); unlock(&palloc); return; } if(p->image && p->image != &swapimage) pagechaintail(p); else pagechainhead(p); if(palloc.r.p != 0) wakeup(&palloc.r); unlock(p); unlock(&palloc); } Page* auxpage(void) { Page *p; lock(&palloc); p = palloc.head; if(palloc.freecount < swapalloc.highwater) { unlock(&palloc); return 0; } pageunchain(p); lock(p); if(p->ref != 0) panic("auxpage"); p->ref++; uncachepage(p); unlock(p); unlock(&palloc); return p; } static int dupretries = 15000; int duppage(Page *p) /* Always call with p locked */ { Page *np; int color; int retries; retries = 0; retry: if(retries++ > dupretries){ print("duppage %d, up %p\n", retries, up); dupretries += 100; if(dupretries > 100000) panic("duppage\n"); uncachepage(p); return 1; } /* don't dup pages with no image */ if(p->ref == 0 || p->image == nil || p->image->notext) return 0; /* * normal lock ordering is to call * lock(&palloc) before lock(p). * To avoid deadlock, we have to drop * our locks and try again. */ if(!canlock(&palloc)){ unlock(p); if(up) sched(); lock(p); goto retry; } /* No freelist cache when memory is very low */ if(palloc.freecount < swapalloc.highwater) { unlock(&palloc); uncachepage(p); return 1; } color = getpgcolor(p->va); for(np = palloc.head; np; np = np->next) if(np->color == color) break; /* No page of the correct color */ if(np == 0) { unlock(&palloc); uncachepage(p); return 1; } pageunchain(np); pagechaintail(np); /* * XXX - here's a bug? - np is on the freelist but it's not really free. * when we unlock palloc someone else can come in, decide to * use np, and then try to lock it. they succeed after we've * run copypage and cachepage and unlock(np). then what? * they call pageunchain before locking(np), so it's removed * from the freelist, but still in the cache because of * cachepage below. if someone else looks in the cache * before they remove it, the page will have a nonzero ref * once they finally lock(np). */ lock(np); unlock(&palloc); /* Cache the new version */ uncachepage(np); np->va = p->va; np->daddr = p->daddr; copypage(p, np); cachepage(np, p->image); unlock(np); uncachepage(p); return 0; } void copypage(Page *f, Page *t) { KMap *ks, *kd; ks = kmap(f); kd = kmap(t); memmove((void*)VA(kd), (void*)VA(ks), BY2PG); kunmap(ks); kunmap(kd); } void uncachepage(Page *p) /* Always called with a locked page */ { Page **l, *f; if(p->image == 0) return; lock(&palloc.hashlock); l = &pghash(p->daddr); for(f = *l; f; f = f->hash) { if(f == p) { *l = p->hash; break; } l = &f->hash; } unlock(&palloc.hashlock); putimage(p->image); p->image = 0; p->daddr = 0; } void cachepage(Page *p, Image *i) { Page **l; /* If this ever happens it should be fixed by calling * uncachepage instead of panic. I think there is a race * with pio in which this can happen. Calling uncachepage is * correct - I just wanted to see if we got here. */ if(p->image) panic("cachepage"); incref(i); lock(&palloc.hashlock); p->image = i; l = &pghash(p->daddr); p->hash = *l; *l = p; unlock(&palloc.hashlock); } void cachedel(Image *i, ulong daddr) { Page *f, **l; lock(&palloc.hashlock); l = &pghash(daddr); for(f = *l; f; f = f->hash) { if(f->image == i && f->daddr == daddr) { lock(f); if(f->image == i && f->daddr == daddr){ *l = f->hash; putimage(f->image); f->image = 0; f->daddr = 0; } unlock(f); break; } l = &f->hash; } unlock(&palloc.hashlock); } Page * lookpage(Image *i, ulong daddr) { Page *f; lock(&palloc.hashlock); for(f = pghash(daddr); f; f = f->hash) { if(f->image == i && f->daddr == daddr) { unlock(&palloc.hashlock); lock(&palloc); lock(f); if(f->image != i || f->daddr != daddr) { unlock(f); unlock(&palloc); return 0; } if(++f->ref == 1) pageunchain(f); unlock(&palloc); unlock(f); return f; } } unlock(&palloc.hashlock); return 0; } Pte* ptecpy(Pte *old) { Pte *new; Page **src, **dst; new = ptealloc(); dst = &new->pages[old->first-old->pages]; new->first = dst; for(src = old->first; src <= old->last; src++, dst++) if(*src) { if(onswap(*src)) dupswap(*src); else { lock(*src); (*src)->ref++; unlock(*src); } new->last = dst; *dst = *src; } return new; } Pte* ptealloc(void) { Pte *new; new = smalloc(sizeof(Pte)); new->first = &new->pages[PTEPERTAB]; new->last = new->pages; return new; } void freepte(Segment *s, Pte *p) { int ref; void (*fn)(Page*); Page *pt, **pg, **ptop; switch(s->type&SG_TYPE) { case SG_PHYSICAL: fn = s->pseg->pgfree; ptop = &p->pages[PTEPERTAB]; if(fn) { for(pg = p->pages; pg < ptop; pg++) { if(*pg == 0) continue; (*fn)(*pg); *pg = 0; } break; } for(pg = p->pages; pg < ptop; pg++) { pt = *pg; if(pt == 0) continue; lock(pt); ref = --pt->ref; unlock(pt); if(ref == 0) free(pt); } break; default: for(pg = p->first; pg <= p->last; pg++) if(*pg) { putpage(*pg); *pg = 0; } } free(p); } ulong pagenumber(Page *p) { return p-palloc.pages; } void checkpagerefs(void) { int s; ulong i, np, nwrong; ulong *ref; np = palloc.user; ref = malloc(np*sizeof ref[0]); if(ref == nil){ print("checkpagerefs: out of memory\n"); return; } /* * This may not be exact if there are other processes * holding refs to pages on their stacks. The hope is * that if you run it on a quiescent system it will still * be useful. */ s = splhi(); lock(&palloc); countpagerefs(ref, 0); portcountpagerefs(ref, 0); nwrong = 0; for(i=0; imark avoids double-counting. */ n = 0; ns = 0; for(i=0; iseg[j]; if(s) s->mark = 0; } } for(i=0; iseg[j]; if(s == nil || s->mark++) continue; ns++; for(k=0; kmapsize; k++){ pte = s->map[k]; if(pte == nil) continue; for(pg = pte->first; pg <= pte->last; pg++){ entry = *pg; if(pagedout(entry)) continue; if(print){ if(ref[pagenumber(entry)]) iprint("page %#.8lux in segment %#p\n", entry->pa, s); continue; } if(ref[pagenumber(entry)]++ == 0) n++; } } } } if(!print){ iprint("%lud pages in %lud segments\n", n, ns); for(i=0; iseg[j]; if(s == nil) continue; if(s->ref != s->mark){ iprint("segment %#p (used by proc %lud pid %lud) has bad ref count %lud actual %lud\n", s, i, p->pid, s->ref, s->mark); } } } } } r*BY2PG/1024; vkb = pkb + (conf.nswap*BY2PG)/1024; /* Paging numbers */ swapalloc.highwater = (palloc.user*5)/100; swasys/src/9/port/segment.c 664 0 0 34051 12564702531 14257ustar00millermiller#include "u.h" #include "../port/lib.h" #include "mem.h" #include "dat.h" #include "fns.h" #include "../port/error.h" static void imagereclaim(void); static void imagechanreclaim(void); #include "io.h" /* * Attachable segment types */ static Physseg physseg[10] = { { SG_SHARED, "shared", 0, SEGMAXSIZE, 0, 0 }, { SG_BSS, "memory", 0, SEGMAXSIZE, 0, 0 }, { 0, 0, 0, 0, 0, 0 }, }; static Lock physseglock; #define NFREECHAN 64 #define IHASHSIZE 64 #define ihash(s) imagealloc.hash[s%IHASHSIZE] static struct Imagealloc { Lock; Image *free; Image *hash[IHASHSIZE]; QLock ireclaim; /* mutex on reclaiming free images */ Chan **freechan; /* free image channels */ int nfreechan; /* number of free channels */ int szfreechan; /* size of freechan array */ QLock fcreclaim; /* mutex on reclaiming free channels */ }imagealloc; Segment* (*_globalsegattach)(Proc*, char*); void initseg(void) { Image *i, *ie; imagealloc.free = xalloc(conf.nimage*sizeof(Image)); if (imagealloc.free == nil) panic("initseg: no memory"); ie = &imagealloc.free[conf.nimage-1]; for(i = imagealloc.free; i < ie; i++) i->next = i+1; i->next = 0; imagealloc.freechan = malloc(NFREECHAN * sizeof(Chan*)); imagealloc.szfreechan = NFREECHAN; } Segment * newseg(int type, ulong base, ulong size) { Segment *s; int mapsize; if(size > (SEGMAPSIZE*PTEPERTAB)) error(Enovmem); s = smalloc(sizeof(Segment)); s->ref = 1; s->type = type; s->base = base; s->top = base+(size*BY2PG); s->size = size; s->sema.prev = &s->sema; s->sema.next = &s->sema; mapsize = ROUND(size, PTEPERTAB)/PTEPERTAB; if(mapsize > nelem(s->ssegmap)){ mapsize *= 2; if(mapsize > (SEGMAPSIZE*PTEPERTAB)) mapsize = (SEGMAPSIZE*PTEPERTAB); s->map = smalloc(mapsize*sizeof(Pte*)); s->mapsize = mapsize; } else{ s->map = s->ssegmap; s->mapsize = nelem(s->ssegmap); } return s; } void putseg(Segment *s) { Pte **pp, **emap; Image *i; if(s == 0) return; i = s->image; if(i != 0) { lock(i); lock(s); if(i->s == s && s->ref == 1) i->s = 0; unlock(i); } else lock(s); s->ref--; if(s->ref != 0) { unlock(s); return; } unlock(s); qlock(&s->lk); if(i) putimage(i); emap = &s->map[s->mapsize]; for(pp = s->map; pp < emap; pp++) if(*pp) freepte(s, *pp); qunlock(&s->lk); if(s->map != s->ssegmap) free(s->map); if(s->profile != 0) free(s->profile); free(s); } void relocateseg(Segment *s, ulong offset) { Page **pg, *x; Pte *pte, **p, **endpte; endpte = &s->map[s->mapsize]; for(p = s->map; p < endpte; p++) { if(*p == 0) continue; pte = *p; for(pg = pte->first; pg <= pte->last; pg++) { if(x = *pg) x->va += offset; } } } Segment* dupseg(Segment **seg, int segno, int share) { int i, size; Pte *pte; Segment *n, *s; SET(n); s = seg[segno]; qlock(&s->lk); if(waserror()){ qunlock(&s->lk); nexterror(); } switch(s->type&SG_TYPE) { case SG_TEXT: /* New segment shares pte set */ case SG_SHARED: case SG_PHYSICAL: goto sameseg; case SG_STACK: n = newseg(s->type, s->base, s->size); break; case SG_BSS: /* Just copy on write */ if(share) goto sameseg; n = newseg(s->type, s->base, s->size); break; case SG_DATA: /* Copy on write plus demand load info */ if(segno == TSEG){ poperror(); qunlock(&s->lk); return data2txt(s); } if(share) goto sameseg; n = newseg(s->type, s->base, s->size); incref(s->image); n->image = s->image; n->fstart = s->fstart; n->flen = s->flen; break; } size = s->mapsize; for(i = 0; i < size; i++) if(pte = s->map[i]) n->map[i] = ptecpy(pte); n->flushme = s->flushme; if(s->ref > 1) procflushseg(s); poperror(); qunlock(&s->lk); return n; sameseg: incref(s); poperror(); qunlock(&s->lk); return s; } void segpage(Segment *s, Page *p) { Pte **pte; ulong off; Page **pg; if(p->va < s->base || p->va >= s->top) panic("segpage"); off = p->va - s->base; pte = &s->map[off/PTEMAPMEM]; if(*pte == 0) *pte = ptealloc(); pg = &(*pte)->pages[(off&(PTEMAPMEM-1))/BY2PG]; *pg = p; if(pg < (*pte)->first) (*pte)->first = pg; if(pg > (*pte)->last) (*pte)->last = pg; } Image* attachimage(int type, Chan *c, ulong base, ulong len) { Image *i, **l; /* reclaim any free channels from reclaimed segments */ if(imagealloc.nfreechan) imagechanreclaim(); lock(&imagealloc); /* * Search the image cache for remains of the text from a previous * or currently running incarnation */ for(i = ihash(c->qid.path); i; i = i->hash) { if(c->qid.path == i->qid.path) { lock(i); if(eqqid(c->qid, i->qid) && eqqid(c->mqid, i->mqid) && c->mchan == i->mchan && c->type == i->type) { goto found; } unlock(i); } } /* * imagereclaim dumps pages from the free list which are cached by image * structures. This should free some image structures. */ while(!(i = imagealloc.free)) { unlock(&imagealloc); imagereclaim(); sched(); lock(&imagealloc); } imagealloc.free = i->next; lock(i); incref(c); i->c = c; i->type = c->type; i->qid = c->qid; i->mqid = c->mqid; i->mchan = c->mchan; l = &ihash(c->qid.path); i->hash = *l; *l = i; found: unlock(&imagealloc); if(i->s == 0) { /* Disaster after commit in exec */ if(waserror()) { unlock(i); pexit(Enovmem, 1); } i->s = newseg(type, base, len); i->s->image = i; i->ref++; poperror(); } else incref(i->s); return i; } static struct { int calls; /* times imagereclaim was called */ int loops; /* times the main loop was run */ uvlong ticks; /* total time in the main loop */ uvlong maxt; /* longest time in main loop */ } irstats; void pageunchain(Page *p); static void imagereclaim(void) { int n; Page *p; uvlong ticks; irstats.calls++; /* Somebody is already cleaning the page cache */ if(!canqlock(&imagealloc.ireclaim)) return; lock(&palloc); ticks = fastticks(nil); n = 0; /* * All the pages with images backing them are at the * end of the list (see putpage) so start there and work * backward. */ for(p = palloc.tail; p && p->image && n<1000; p = p->prev) { if(p->ref == 0 && canlock(p)) { if(p->ref == 0) { n++; uncachepage(p); pageunchain(p); pagechainhead(p); } unlock(p); } } ticks = fastticks(nil) - ticks; unlock(&palloc); irstats.loops++; irstats.ticks += ticks; if(ticks > irstats.maxt) irstats.maxt = ticks; //print("T%llud+", ticks); qunlock(&imagealloc.ireclaim); } /* * since close can block, this has to be called outside of * spin locks. */ static void imagechanreclaim(void) { Chan *c; /* Somebody is already cleaning the image chans */ if(!canqlock(&imagealloc.fcreclaim)) return; /* * We don't have to recheck that nfreechan > 0 after we * acquire the lock, because we're the only ones who decrement * it (the other lock contender increments it), and there's only * one of us thanks to the qlock above. */ while(imagealloc.nfreechan > 0){ lock(&imagealloc); imagealloc.nfreechan--; c = imagealloc.freechan[imagealloc.nfreechan]; unlock(&imagealloc); cclose(c); } qunlock(&imagealloc.fcreclaim); } void putimage(Image *i) { Chan *c, **cp; Image *f, **l; if(i->notext) return; lock(i); if(--i->ref == 0) { l = &ihash(i->qid.path); mkqid(&i->qid, ~0, ~0, QTFILE); unlock(i); c = i->c; lock(&imagealloc); for(f = *l; f; f = f->hash) { if(f == i) { *l = i->hash; break; } l = &f->hash; } i->next = imagealloc.free; imagealloc.free = i; /* defer freeing channel till we're out of spin lock's */ if(imagealloc.nfreechan == imagealloc.szfreechan){ imagealloc.szfreechan += NFREECHAN; cp = malloc(imagealloc.szfreechan*sizeof(Chan*)); if(cp == nil) panic("putimage"); memmove(cp, imagealloc.freechan, imagealloc.nfreechan*sizeof(Chan*)); free(imagealloc.freechan); imagealloc.freechan = cp; } imagealloc.freechan[imagealloc.nfreechan++] = c; unlock(&imagealloc); return; } unlock(i); } long ibrk(ulong addr, int seg) { Segment *s, *ns; ulong newtop, newsize; int i, mapsize; Pte **map; s = up->seg[seg]; if(s == 0) error(Ebadarg); if(addr == 0) return s->base; qlock(&s->lk); /* We may start with the bss overlapping the data */ if(addr < s->base) { if(seg != BSEG || up->seg[DSEG] == 0 || addr < up->seg[DSEG]->base) { qunlock(&s->lk); error(Enovmem); } addr = s->base; } newtop = PGROUND(addr); newsize = (newtop-s->base)/BY2PG; if(newtop < s->top) { /* * do not shrink a segment shared with other procs, as the * to-be-freed address space may have been passed to the kernel * already by another proc and is past the validaddr stage. */ if(s->ref > 1){ qunlock(&s->lk); error(Einuse); } mfreeseg(s, newtop, (s->top-newtop)/BY2PG); s->top = newtop; s->size = newsize; qunlock(&s->lk); flushmmu(); return 0; } for(i = 0; i < NSEG; i++) { ns = up->seg[i]; if(ns == 0 || ns == s) continue; if(newtop >= ns->base && newtop < ns->top) { qunlock(&s->lk); error(Esoverlap); } } if(newsize > (SEGMAPSIZE*PTEPERTAB)) { qunlock(&s->lk); error(Enovmem); } mapsize = ROUND(newsize, PTEPERTAB)/PTEPERTAB; if(mapsize > s->mapsize){ map = smalloc(mapsize*sizeof(Pte*)); memmove(map, s->map, s->mapsize*sizeof(Pte*)); if(s->map != s->ssegmap) free(s->map); s->map = map; s->mapsize = mapsize; } s->top = newtop; s->size = newsize; qunlock(&s->lk); return 0; } /* * called with s->lk locked */ void mfreeseg(Segment *s, ulong start, int pages) { int i, j, size; ulong soff; Page *pg; Page *list; soff = start-s->base; j = (soff&(PTEMAPMEM-1))/BY2PG; size = s->mapsize; list = nil; for(i = soff/PTEMAPMEM; i < size; i++) { if(pages <= 0) break; if(s->map[i] == 0) { pages -= PTEPERTAB-j; j = 0; continue; } while(j < PTEPERTAB) { pg = s->map[i]->pages[j]; /* * We want to zero s->map[i]->page[j] and putpage(pg), * but we have to make sure other processors flush the * entry from their TLBs before the page is freed. * We construct a list of the pages to be freed, zero * the entries, then (below) call procflushseg, and call * putpage on the whole list. * * Swapped-out pages don't appear in TLBs, so it's okay * to putswap those pages before procflushseg. */ if(pg){ if(onswap(pg)) putswap(pg); else{ pg->next = list; list = pg; } s->map[i]->pages[j] = 0; } if(--pages == 0) goto out; j++; } j = 0; } out: /* flush this seg in all other processes */ if(s->ref > 1) procflushseg(s); /* free the pages */ for(pg = list; pg != nil; pg = list){ list = list->next; putpage(pg); } } Segment* isoverlap(Proc *p, ulong va, int len) { int i; Segment *ns; ulong newtop; newtop = va+len; for(i = 0; i < NSEG; i++) { ns = p->seg[i]; if(ns == 0) continue; if((newtop > ns->base && newtop <= ns->top) || (va >= ns->base && va < ns->top)) return ns; } return nil; } int addphysseg(Physseg* new) { Physseg *ps; /* * Check not already entered and there is room * for a new entry and the terminating null entry. */ lock(&physseglock); for(ps = physseg; ps->name; ps++){ if(strcmp(ps->name, new->name) == 0){ unlock(&physseglock); return -1; } } if(ps-physseg >= nelem(physseg)-2){ unlock(&physseglock); return -1; } *ps = *new; unlock(&physseglock); return 0; } int isphysseg(char *name) { Physseg *ps; int rv = 0; lock(&physseglock); for(ps = physseg; ps->name; ps++){ if(strcmp(ps->name, name) == 0){ rv = 1; break; } } unlock(&physseglock); return rv; } ulong segattach(Proc *p, ulong attr, char *name, ulong va, ulong len) { int sno; Segment *s, *os; Physseg *ps; if(va != 0 && va >= USTKTOP) error(Ebadarg); validaddr((ulong)name, 1, 0); vmemchr(name, 0, ~0); for(sno = 0; sno < NSEG; sno++) if(p->seg[sno] == nil && sno != ESEG) break; if(sno == NSEG) error(Enovmem); /* * first look for a global segment with the * same name */ if(_globalsegattach != nil){ s = (*_globalsegattach)(p, name); if(s != nil){ p->seg[sno] = s; return s->base; } } len = PGROUND(len); if(len == 0) error(Ebadarg); /* * Find a hole in the address space. * Starting at the lowest possible stack address - len, * check for an overlapping segment, and repeat at the * base of that segment - len until either a hole is found * or the address space is exhausted. Ensure that we don't * map the zero page. */ if(va == 0) { for (os = p->seg[SSEG]; os != nil; os = isoverlap(p, va, len)) { va = os->base; if(len >= va) error(Enovmem); va -= len; } va &= ~(BY2PG-1); } else { va &= ~(BY2PG-1); if(va == 0 || va >= USTKTOP) error(Ebadarg); } if(isoverlap(p, va, len) != nil) error(Esoverlap); for(ps = physseg; ps->name; ps++) if(strcmp(name, ps->name) == 0) goto found; error(Ebadarg); found: if(len > ps->size) error(Enovmem); attr &= ~SG_TYPE; /* Turn off what is not allowed */ attr |= ps->attr; /* Copy in defaults */ s = newseg(attr, va, len/BY2PG); s->pseg = ps; p->seg[sno] = s; return va; } void pteflush(Pte *pte, int s, int e) { int i; Page *p; for(i = s; i < e; i++) { p = pte->pages[i]; if(pagedout(p) == 0) memset(p->cachectl, PG_TXTFLUSH, sizeof(p->cachectl)); } } long syssegflush(ulong *arg) { Segment *s; ulong addr, l; Pte *pte; int chunk, ps, pe, len; addr = arg[0]; len = arg[1]; while(len > 0) { s = seg(up, addr, 1); if(s == 0) error(Ebadarg); s->flushme = 1; more: l = len; if(addr+l > s->top) l = s->top - addr; ps = addr-s->base; pte = s->map[ps/PTEMAPMEM]; ps &= PTEMAPMEM-1; pe = PTEMAPMEM; if(pe-ps > l){ pe = ps + l; pe = (pe+BY2PG-1)&~(BY2PG-1); } if(pe == ps) { qunlock(&s->lk); error(Ebadarg); } if(pte) pteflush(pte, ps/BY2PG, pe/BY2PG); chunk = pe-ps; len -= chunk; addr += chunk; if(len > 0 && addr < s->top) goto more; qunlock(&s->lk); } flushmmu(); return 0; } void segclock(ulong pc) { Segment *s; s = up->seg[TSEG]; if(s == 0 || s->profile == 0) return; s->profile[0] += TK2MS(1); if(pc >= s->base && pc < s->top) { pc -= s->base; s->profile[pc>>LRESPROF] += TK2MS(1); } } ast) (*pte)->last = pg; } Image* attachimage(int type, Chan *c, ulong base, ulong len) { Image *i, **l; /* reclaim any free channels from reclaimed segments */ if(imagealloc.nfreechan) imagechanreclaim(); lock(&imagealloc); /* * Search the image cache for remains of the text from a previous * or currently running incarnation */ for(i = ihash(c->qid.path); i; i = i->hash) { if(c->qid.path == i->qid.path) { lock(i); if(eqqid(c->qid, i->qid)