Skip to content

Commit

Permalink
Merge pull request #79 from tailscale/update-go1.21.4
Browse files Browse the repository at this point in the history
Update go1.21.4
  • Loading branch information
awly committed Nov 9, 2023
2 parents 56d25cd + ec6bea9 commit b6cb22c
Show file tree
Hide file tree
Showing 54 changed files with 821 additions and 281 deletions.
4 changes: 2 additions & 2 deletions VERSION
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
go1.21.3
time 2023-10-09T17:04:35Z
go1.21.4
time 2023-11-01T20:46:39Z
32 changes: 32 additions & 0 deletions src/cmd/cgo/internal/testcarchive/carchive_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1365,3 +1365,35 @@ func TestDeepStack(t *testing.T) {
t.Error(err)
}
}

func TestSharedObject(t *testing.T) {
// Test that we can put a Go c-archive into a C shared object.
globalSkip(t)
testenv.MustHaveGoBuild(t)
testenv.MustHaveCGO(t)
testenv.MustHaveBuildMode(t, "c-archive")

t.Parallel()

if !testWork {
defer func() {
os.Remove("libgo_s.a")
os.Remove("libgo_s.h")
os.Remove("libgo_s.so")
}()
}

cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo_s.a", "./libgo")
out, err := cmd.CombinedOutput()
t.Logf("%v\n%s", cmd.Args, out)
if err != nil {
t.Fatal(err)
}

ccArgs := append(cc, "-shared", "-o", "libgo_s.so", "libgo_s.a")
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
t.Logf("%v\n%s", ccArgs, out)
if err != nil {
t.Fatal(err)
}
}
14 changes: 7 additions & 7 deletions src/cmd/compile/internal/ssa/_gen/generic.rules
Original file line number Diff line number Diff line change
Expand Up @@ -981,7 +981,7 @@
(ConstNil <typ.Uintptr>)
(ConstNil <typ.BytePtr>))

(NilCheck (GetG mem) mem) => mem
(NilCheck ptr:(GetG mem) mem) => ptr

(If (Not cond) yes no) => (If cond no yes)
(If (ConstBool [c]) yes no) && c => (First yes no)
Expand Down Expand Up @@ -2055,19 +2055,19 @@
&& isSameCall(call.Aux, "runtime.newobject")
=> mem

(NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _)
&& isSameCall(call.Aux, "runtime.newobject")
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
=> (Invalid)
=> ptr

(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
&& isSameCall(call.Aux, "runtime.newobject")
&& warnRule(fe.Debug_checknil(), v, "removed nil check")
=> (Invalid)
=> ptr

// Addresses of globals are always non-nil.
(NilCheck (Addr {_} (SB)) _) => (Invalid)
(NilCheck (Convert (Addr {_} (SB)) _) _) => (Invalid)
(NilCheck ptr:(Addr {_} (SB)) _) => ptr
(NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr

// for late-expanded calls, recognize memequal applied to a single constant byte
// Support is limited by 1, 2, 4, 8 byte sizes
Expand Down
2 changes: 1 addition & 1 deletion src/cmd/compile/internal/ssa/_gen/genericOps.go
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ var genericOps = []opData{
{name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil
{name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
{name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
{name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void.
{name: "NilCheck", argLength: 2, nilCheck: true}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns the ptr unmodified.

// Pseudo-ops
{name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem
Expand Down
23 changes: 22 additions & 1 deletion src/cmd/compile/internal/ssa/check.go
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,28 @@ func checkFunc(f *Func) {
if !v.Aux.(*ir.Name).Type().HasPointers() {
f.Fatalf("vardef must have pointer type %s", v.Aux.(*ir.Name).Type().String())
}

case OpNilCheck:
// nil checks have pointer type before scheduling, and
// void type after scheduling.
if f.scheduled {
if v.Uses != 0 {
f.Fatalf("nilcheck must have 0 uses %s", v.Uses)
}
if !v.Type.IsVoid() {
f.Fatalf("nilcheck must have void type %s", v.Type.String())
}
} else {
if !v.Type.IsPtrShaped() && !v.Type.IsUintptr() {
f.Fatalf("nilcheck must have pointer type %s", v.Type.String())
}
}
if !v.Args[0].Type.IsPtrShaped() && !v.Args[0].Type.IsUintptr() {
f.Fatalf("nilcheck must have argument of pointer type %s", v.Args[0].Type.String())
}
if !v.Args[1].Type.IsMemory() {
f.Fatalf("bad arg 1 type to %s: want mem, have %s",
v.Op, v.Args[1].Type.String())
}
}

// TODO: check for cycles in values
Expand Down
7 changes: 3 additions & 4 deletions src/cmd/compile/internal/ssa/deadcode.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,16 +110,15 @@ func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value
}
}
for _, v := range b.Values {
if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] {
if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects || opcodeTable[v.Op].nilCheck) && !live[v.ID] {
live[v.ID] = true
q = append(q, v)
if v.Pos.IsStmt() != src.PosNotStmt {
liveOrderStmts = append(liveOrderStmts, v)
}
}
if v.Type.IsVoid() && !live[v.ID] {
// The only Void ops are nil checks and inline marks. We must keep these.
if v.Op == OpInlMark && !liveInlIdx[int(v.AuxInt)] {
if v.Op == OpInlMark {
if !liveInlIdx[int(v.AuxInt)] {
// We don't need marks for bodies that
// have been completely optimized away.
// TODO: save marks only for bodies which
Expand Down
66 changes: 57 additions & 9 deletions src/cmd/compile/internal/ssa/deadstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ func dse(f *Func) {
}

// Walk backwards looking for dead stores. Keep track of shadowed addresses.
// A "shadowed address" is a pointer and a size describing a memory region that
// is known to be written. We keep track of shadowed addresses in the shadowed
// map, mapping the ID of the address to the size of the shadowed region.
// A "shadowed address" is a pointer, offset, and size describing a memory region that
// is known to be written. We keep track of shadowed addresses in the shadowed map,
// mapping the ID of the address to a shadowRange where future writes will happen.
// Since we're walking backwards, writes to a shadowed region are useless,
// as they will be immediately overwritten.
shadowed.clear()
Expand All @@ -88,13 +88,20 @@ func dse(f *Func) {
shadowed.clear()
}
if v.Op == OpStore || v.Op == OpZero {
ptr := v.Args[0]
var off int64
for ptr.Op == OpOffPtr { // Walk to base pointer
off += ptr.AuxInt
ptr = ptr.Args[0]
}
var sz int64
if v.Op == OpStore {
sz = v.Aux.(*types.Type).Size()
} else { // OpZero
sz = v.AuxInt
}
if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz {
sr := shadowRange(shadowed.get(ptr.ID))
if sr.contains(off, off+sz) {
// Modify the store/zero into a copy of the memory state,
// effectively eliding the store operation.
if v.Op == OpStore {
Expand All @@ -108,10 +115,8 @@ func dse(f *Func) {
v.AuxInt = 0
v.Op = OpCopy
} else {
if sz > 0x7fffffff { // work around sparseMap's int32 value type
sz = 0x7fffffff
}
shadowed.set(v.Args[0].ID, int32(sz))
// Extend shadowed region.
shadowed.set(ptr.ID, int32(sr.merge(off, off+sz)))
}
}
// walk to previous store
Expand All @@ -131,6 +136,49 @@ func dse(f *Func) {
}
}

// A shadowRange encodes a set of byte offsets [lo():hi()] from
// a given pointer that will be written to later in the block.
// A zero shadowRange encodes an empty shadowed range (and so
// does a -1 shadowRange, which is what sparsemap.get returns
// on a failed lookup).
type shadowRange int32

func (sr shadowRange) lo() int64 {
return int64(sr & 0xffff)
}
func (sr shadowRange) hi() int64 {
return int64((sr >> 16) & 0xffff)
}

// contains reports whether [lo:hi] is completely within sr.
func (sr shadowRange) contains(lo, hi int64) bool {
return lo >= sr.lo() && hi <= sr.hi()
}

// merge returns the union of sr and [lo:hi].
// merge is allowed to return something smaller than the union.
func (sr shadowRange) merge(lo, hi int64) shadowRange {
if lo < 0 || hi > 0xffff {
// Ignore offsets that are too large or small.
return sr
}
if sr.lo() == sr.hi() {
// Old range is empty - use new one.
return shadowRange(lo + hi<<16)
}
if hi < sr.lo() || lo > sr.hi() {
// The two regions don't overlap or abut, so we would
// have to keep track of multiple disjoint ranges.
// Because we can only keep one, keep the larger one.
if sr.hi()-sr.lo() >= hi-lo {
return sr
}
return shadowRange(lo + hi<<16)
}
// Regions overlap or abut - compute the union.
return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16)
}

// elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
// we track the operations that the address of each auto reaches and if it only
// reaches stores then we delete all the stores. The other operations will then
Expand Down Expand Up @@ -201,7 +249,7 @@ func elimDeadAutosGeneric(f *Func) {
}

if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 {
// Nil check has no use, but we need to keep it.
// We need to keep nil checks even if they have no use.
// Also keep calls and values that have side effects.
return
}
Expand Down
2 changes: 1 addition & 1 deletion src/cmd/compile/internal/ssa/fuse.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ func fuseBlockIf(b *Block) bool {
// There may be false positives.
func isEmpty(b *Block) bool {
for _, v := range b.Values {
if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() {
if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() || opcodeTable[v.Op].nilCheck {
return false
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/cmd/compile/internal/ssa/fuse_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func TestFuseSideEffects(t *testing.T) {
Valu("p", OpArg, c.config.Types.IntPtr, 0, nil),
If("c1", "z0", "exit")),
Bloc("z0",
Valu("nilcheck", OpNilCheck, types.TypeVoid, 0, nil, "p", "mem"),
Valu("nilcheck", OpNilCheck, c.config.Types.IntPtr, 0, nil, "p", "mem"),
Goto("exit")),
Bloc("exit",
Exit("mem"),
Expand Down
42 changes: 20 additions & 22 deletions src/cmd/compile/internal/ssa/nilcheck.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,14 @@ func nilcheckelim(f *Func) {
work := make([]bp, 0, 256)
work = append(work, bp{block: f.Entry})

// map from value ID to bool indicating if value is known to be non-nil
// in the current dominator path being walked. This slice is updated by
// map from value ID to known non-nil version of that value ID
// (in the current dominator path being walked). This slice is updated by
// walkStates to maintain the known non-nil values.
nonNilValues := f.Cache.allocBoolSlice(f.NumValues())
defer f.Cache.freeBoolSlice(nonNilValues)
// If there is extrinsic information about non-nil-ness, this map
// points a value to itself. If a value is known non-nil because we
// already did a nil check on it, it points to the nil check operation.
nonNilValues := f.Cache.allocValueSlice(f.NumValues())
defer f.Cache.freeValueSlice(nonNilValues)

// make an initial pass identifying any non-nil values
for _, b := range f.Blocks {
Expand All @@ -54,7 +57,7 @@ func nilcheckelim(f *Func) {
// We assume that SlicePtr is non-nil because we do a bounds check
// before the slice access (and all cap>0 slices have a non-nil ptr). See #30366.
if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr {
nonNilValues[v.ID] = true
nonNilValues[v.ID] = v
}
}
}
Expand All @@ -68,16 +71,16 @@ func nilcheckelim(f *Func) {
if v.Op == OpPhi {
argsNonNil := true
for _, a := range v.Args {
if !nonNilValues[a.ID] {
if nonNilValues[a.ID] == nil {
argsNonNil = false
break
}
}
if argsNonNil {
if !nonNilValues[v.ID] {
if nonNilValues[v.ID] == nil {
changed = true
}
nonNilValues[v.ID] = true
nonNilValues[v.ID] = v
}
}
}
Expand All @@ -103,8 +106,8 @@ func nilcheckelim(f *Func) {
if len(b.Preds) == 1 {
p := b.Preds[0].b
if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b {
if ptr := p.Controls[0].Args[0]; !nonNilValues[ptr.ID] {
nonNilValues[ptr.ID] = true
if ptr := p.Controls[0].Args[0]; nonNilValues[ptr.ID] == nil {
nonNilValues[ptr.ID] = ptr
work = append(work, bp{op: ClearPtr, ptr: ptr})
}
}
Expand All @@ -117,14 +120,11 @@ func nilcheckelim(f *Func) {
pendingLines.clear()

// Next, process values in the block.
i := 0
for _, v := range b.Values {
b.Values[i] = v
i++
switch v.Op {
case OpIsNonNil:
ptr := v.Args[0]
if nonNilValues[ptr.ID] {
if nonNilValues[ptr.ID] != nil {
if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary.
pendingLines.add(v.Pos)
v.Pos = v.Pos.WithNotStmt()
Expand All @@ -135,7 +135,7 @@ func nilcheckelim(f *Func) {
}
case OpNilCheck:
ptr := v.Args[0]
if nonNilValues[ptr.ID] {
if nilCheck := nonNilValues[ptr.ID]; nilCheck != nil {
// This is a redundant implicit nil check.
// Logging in the style of the former compiler -- and omit line 1,
// which is usually in generated code.
Expand All @@ -145,14 +145,13 @@ func nilcheckelim(f *Func) {
if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary
pendingLines.add(v.Pos)
}
v.reset(OpUnknown)
f.freeValue(v)
i--
v.Op = OpCopy
v.SetArgs1(nilCheck)
continue
}
// Record the fact that we know ptr is non nil, and remember to
// undo that information when this dominator subtree is done.
nonNilValues[ptr.ID] = true
nonNilValues[ptr.ID] = v
work = append(work, bp{op: ClearPtr, ptr: ptr})
fallthrough // a non-eliminated nil check might be a good place for a statement boundary.
default:
Expand All @@ -163,7 +162,7 @@ func nilcheckelim(f *Func) {
}
}
// This reduces the lost statement count in "go" by 5 (out of 500 total).
for j := 0; j < i; j++ { // is this an ordering problem?
for j := range b.Values { // is this an ordering problem?
v := b.Values[j]
if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
v.Pos = v.Pos.WithIsStmt()
Expand All @@ -174,15 +173,14 @@ func nilcheckelim(f *Func) {
b.Pos = b.Pos.WithIsStmt()
pendingLines.remove(b.Pos)
}
b.truncateValues(i)

// Add all dominated blocks to the work list.
for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
work = append(work, bp{op: Work, block: w})
}

case ClearPtr:
nonNilValues[node.ptr.ID] = false
nonNilValues[node.ptr.ID] = nil
continue
}
}
Expand Down

0 comments on commit b6cb22c

Please sign in to comment.