diff --git a/src/cmd/link/internal/amd64/asm.go b/src/cmd/link/internal/amd64/asm.go index ea99e7f259..9dd55727a0 100644 --- a/src/cmd/link/internal/amd64/asm.go +++ b/src/cmd/link/internal/amd64/asm.go @@ -789,3 +789,36 @@ func asmb(ctxt *ld.Link) { ld.Cflush() } + +func tlsIEtoLE(s *ld.Symbol, off, size int) { + // Transform the PC-relative instruction into a constant load. + // That is, + // + // MOVQ X(IP), REG -> MOVQ $Y, REG + // + // To determine the instruction and register, we study the op codes. + // Consult an AMD64 instruction encoding guide to decipher this. + op := s.P[off-3 : off] + reg := op[2] >> 3 + + if op[1] == 0x8b || reg == 4 { + // MOVQ + if op[0] == 0x4c { + op[0] = 0x49 + } else if size == 4 && op[0] == 0x44 { + op[0] = 0x41 + } + if op[1] == 0x8b { + op[1] = 0xc7 + } else { + op[1] = 0x81 // special case for SP + } + op[2] = 0xc0 | reg + } else { + // An alternate op is ADDQ. This is handled by GNU gold, + // but right now is not generated by the Go compiler: + // ADDQ X(IP), REG -> ADDQ $Y, REG + // Consider adding support for it here. + log.Fatalf("expected TLS IE op to be MOVQ, got %v", op) + } +} diff --git a/src/cmd/link/internal/amd64/obj.go b/src/cmd/link/internal/amd64/obj.go index 4b815c771d..0494050d86 100644 --- a/src/cmd/link/internal/amd64/obj.go +++ b/src/cmd/link/internal/amd64/obj.go @@ -73,6 +73,7 @@ func linkarchinit() { ld.Thearch.Append16 = ld.Append16l ld.Thearch.Append32 = ld.Append32l ld.Thearch.Append64 = ld.Append64l + ld.Thearch.TLSIEtoLE = tlsIEtoLE ld.Thearch.Linuxdynld = "/lib64/ld-linux-x86-64.so.2" ld.Thearch.Freebsddynld = "/libexec/ld-elf.so.1" diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 73e2717ed9..fd536181d7 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -448,7 +448,17 @@ func relocsym(ctxt *Link, s *Symbol) { } break } - log.Fatalf("cannot handle R_TLS_IE when linking internally") + if Buildmode == BuildmodePIE && Iself { + // We are linking the final executable, so we + // can optimize any TLS IE relocation to LE. + if Thearch.TLSIEtoLE == nil { + log.Fatalf("internal linking of TLS IE not supported on %s", SysArch.Family) + } + Thearch.TLSIEtoLE(s, int(off), int(r.Siz)) + o = int64(ctxt.Tlsoffset) + } else { + log.Fatalf("cannot handle R_TLS_IE (sym %s) when linking internally", s.Name) + } case obj.R_ADDR: if Linkmode == LinkExternal && r.Sym.Type != obj.SCONST { diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index 76047905be..7750f1dc68 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -111,6 +111,14 @@ type Arch struct { Append16 func(b []byte, v uint16) []byte Append32 func(b []byte, v uint32) []byte Append64 func(b []byte, v uint64) []byte + + // TLSIEtoLE converts a TLS Initial Executable relocation to + // a TLS Local Executable relocation. + // + // This is possible when a TLS IE relocation refers to a local + // symbol in an executable, which is typical when internally + // linking PIE binaries. + TLSIEtoLE func(s *Symbol, off, size int) } var (