1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
import (
"cmd/compile/internal/ir"
"cmd/internal/src"
"internal/buildcfg"
)
// nilcheckelim eliminates unnecessary nil checks.
// runs on machine-independent code.
func nilcheckelim(f *Func) {
// A nil check is redundant if the same nil check was successful in a
// dominating block. The efficacy of this pass depends heavily on the
// efficacy of the cse pass.
sdom := f.Sdom()
// TODO: Eliminate more nil checks.
// We can recursively remove any chain of fixed offset calculations,
// i.e. struct fields and array elements, even with non-constant
// indices: x is non-nil iff x.a.b[i].c is.
type walkState int
const (
Work walkState = iota // process nil checks and traverse to dominees
ClearPtr // forget the fact that ptr is nil
)
type bp struct {
block *Block // block, or nil in ClearPtr state
ptr *Value // if non-nil, ptr that is to be cleared in ClearPtr state
op walkState
}
work := make([]bp, 0, 256)
work = append(work, bp{block: f.Entry})
// map from value ID to known non-nil version of that value ID
// (in the current dominator path being walked). This slice is updated by
// walkStates to maintain the known non-nil values.
// If there is extrinsic information about non-nil-ness, this map
// points a value to itself. If a value is known non-nil because we
// already did a nil check on it, it points to the nil check operation.
nonNilValues := f.Cache.allocValueSlice(f.NumValues())
defer f.Cache.freeValueSlice(nonNilValues)
// make an initial pass identifying any non-nil values
for _, b := range f.Blocks {
for _, v := range b.Values {
// a value resulting from taking the address of a
// value, or a value constructed from an offset of a
// non-nil ptr (OpAddPtr) implies it is non-nil
// We also assume unsafe pointer arithmetic generates non-nil pointers. See #27180.
// We assume that SlicePtr is non-nil because we do a bounds check
// before the slice access (and all cap>0 slices have a non-nil ptr). See #30366.
if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr {
nonNilValues[v.ID] = v
}
}
}
for changed := true; changed; {
changed = false
for _, b := range f.Blocks {
for _, v := range b.Values {
// phis whose arguments are all non-nil
// are non-nil
if v.Op == OpPhi {
argsNonNil := true
for _, a := range v.Args {
if nonNilValues[a.ID] == nil {
argsNonNil = false
break
}
}
if argsNonNil {
if nonNilValues[v.ID] == nil {
changed = true
}
nonNilValues[v.ID] = v
}
}
}
}
}
// allocate auxiliary date structures for computing store order
sset := f.newSparseSet(f.NumValues())
defer f.retSparseSet(sset)
storeNumber := f.Cache.allocInt32Slice(f.NumValues())
defer f.Cache.freeInt32Slice(storeNumber)
// perform a depth first walk of the dominee tree
for len(work) > 0 {
node := work[len(work)-1]
work = work[:len(work)-1]
switch node.op {
case Work:
b := node.block
// First, see if we're dominated by an explicit nil check.
if len(b.Preds) == 1 {
p := b.Preds[0].b
if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b {
if ptr := p.Controls[0].Args[0]; nonNilValues[ptr.ID] == nil {
nonNilValues[ptr.ID] = ptr
work = append(work, bp{op: ClearPtr, ptr: ptr})
}
}
}
// Next, order values in the current block w.r.t. stores.
b.Values = storeOrder(b.Values, sset, storeNumber)
pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
pendingLines.clear()
// Next, process values in the block.
for _, v := range b.Values {
switch v.Op {
case OpIsNonNil:
ptr := v.Args[0]
if nonNilValues[ptr.ID] != nil {
if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary.
pendingLines.add(v.Pos)
v.Pos = v.Pos.WithNotStmt()
}
// This is a redundant explicit nil check.
v.reset(OpConstBool)
v.AuxInt = 1 // true
}
case OpNilCheck:
ptr := v.Args[0]
if nilCheck := nonNilValues[ptr.ID]; nilCheck != nil {
// This is a redundant implicit nil check.
// Logging in the style of the former compiler -- and omit line 1,
// which is usually in generated code.
if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
f.Warnl(v.Pos, "removed nil check")
}
if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary
pendingLines.add(v.Pos)
}
v.Op = OpCopy
v.SetArgs1(nilCheck)
continue
}
// Record the fact that we know ptr is non nil, and remember to
// undo that information when this dominator subtree is done.
nonNilValues[ptr.ID] = v
work = append(work, bp{op: ClearPtr, ptr: ptr})
fallthrough // a non-eliminated nil check might be a good place for a statement boundary.
default:
if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
v.Pos = v.Pos.WithIsStmt()
pendingLines.remove(v.Pos)
}
}
}
// This reduces the lost statement count in "go" by 5 (out of 500 total).
for j := range b.Values { // is this an ordering problem?
v := b.Values[j]
if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
v.Pos = v.Pos.WithIsStmt()
pendingLines.remove(v.Pos)
}
}
if pendingLines.contains(b.Pos) {
b.Pos = b.Pos.WithIsStmt()
pendingLines.remove(b.Pos)
}
// Add all dominated blocks to the work list.
for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
work = append(work, bp{op: Work, block: w})
}
case ClearPtr:
nonNilValues[node.ptr.ID] = nil
continue
}
}
}
// All platforms are guaranteed to fault if we load/store to anything smaller than this address.
//
// This should agree with minLegalPointer in the runtime.
const minZeroPage = 4096
// faultOnLoad is true if a load to an address below minZeroPage will trigger a SIGSEGV.
var faultOnLoad = buildcfg.GOOS != "aix"
// nilcheckelim2 eliminates unnecessary nil checks.
// Runs after lowering and scheduling.
func nilcheckelim2(f *Func) {
unnecessary := f.newSparseMap(f.NumValues()) // map from pointer that will be dereferenced to index of dereferencing value in b.Values[]
defer f.retSparseMap(unnecessary)
pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
for _, b := range f.Blocks {
// Walk the block backwards. Find instructions that will fault if their
// input pointer is nil. Remove nil checks on those pointers, as the
// faulting instruction effectively does the nil check for free.
unnecessary.clear()
pendingLines.clear()
// Optimization: keep track of removed nilcheck with smallest index
firstToRemove := len(b.Values)
for i := len(b.Values) - 1; i >= 0; i-- {
v := b.Values[i]
if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) {
if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
f.Warnl(v.Pos, "removed nil check")
}
// For bug 33724, policy is that we might choose to bump an existing position
// off the faulting load/store in favor of the one from the nil check.
// Iteration order means that first nilcheck in the chain wins, others
// are bumped into the ordinary statement preservation algorithm.
u := b.Values[unnecessary.get(v.Args[0].ID)]
if !u.Pos.SameFileAndLine(v.Pos) {
if u.Pos.IsStmt() == src.PosIsStmt {
pendingLines.add(u.Pos)
}
u.Pos = v.Pos
} else if v.Pos.IsStmt() == src.PosIsStmt {
pendingLines.add(v.Pos)
}
v.reset(OpUnknown)
firstToRemove = i
continue
}
if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
if v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Name).Type().HasPointers()) {
// These ops don't really change memory.
continue
// Note: OpVarDef requires that the defined variable not have pointers.
// We need to make sure that there's no possible faulting
// instruction between a VarDef and that variable being
// fully initialized. If there was, then anything scanning
// the stack during the handling of that fault will see
// a live but uninitialized pointer variable on the stack.
//
// If we have:
//
// NilCheck p
// VarDef x
// x = *p
//
// We can't rewrite that to
//
// VarDef x
// NilCheck p
// x = *p
//
// Particularly, even though *p faults on p==nil, we still
// have to do the explicit nil check before the VarDef.
// See issue #32288.
}
// This op changes memory. Any faulting instruction after v that
// we've recorded in the unnecessary map is now obsolete.
unnecessary.clear()
}
// Find any pointers that this op is guaranteed to fault on if nil.
var ptrstore [2]*Value
ptrs := ptrstore[:0]
if opcodeTable[v.Op].faultOnNilArg0 && (faultOnLoad || v.Type.IsMemory()) {
// On AIX, only writing will fault.
ptrs = append(ptrs, v.Args[0])
}
if opcodeTable[v.Op].faultOnNilArg1 && (faultOnLoad || (v.Type.IsMemory() && v.Op != OpPPC64LoweredMove)) {
// On AIX, only writing will fault.
// LoweredMove is a special case because it's considered as a "mem" as it stores on arg0 but arg1 is accessed as a load and should be checked.
ptrs = append(ptrs, v.Args[1])
}
for _, ptr := range ptrs {
// Check to make sure the offset is small.
switch opcodeTable[v.Op].auxType {
case auxSym:
if v.Aux != nil {
continue
}
case auxSymOff:
if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage {
continue
}
case auxSymValAndOff:
off := ValAndOff(v.AuxInt).Off()
if v.Aux != nil || off < 0 || off >= minZeroPage {
continue
}
case auxInt32:
// Mips uses this auxType for atomic add constant. It does not affect the effective address.
case auxInt64:
// ARM uses this auxType for duffcopy/duffzero/alignment info.
// It does not affect the effective address.
case auxNone:
// offset is zero.
default:
v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType))
}
// This instruction is guaranteed to fault if ptr is nil.
// Any previous nil check op is unnecessary.
unnecessary.set(ptr.ID, int32(i))
}
}
// Remove values we've clobbered with OpUnknown.
i := firstToRemove
for j := i; j < len(b.Values); j++ {
v := b.Values[j]
if v.Op != OpUnknown {
if !notStmtBoundary(v.Op) && pendingLines.contains(v.Pos) { // Late in compilation, so any remaining NotStmt values are probably okay now.
v.Pos = v.Pos.WithIsStmt()
pendingLines.remove(v.Pos)
}
b.Values[i] = v
i++
}
}
if pendingLines.contains(b.Pos) {
b.Pos = b.Pos.WithIsStmt()
}
b.truncateValues(i)
// TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find
// more unnecessary nil checks. Would fix test/nilptr3.go:159.
}
}
|