Skip to content

Commit

Permalink
runtime, syscall: add calls to asan functions
Browse files Browse the repository at this point in the history
Add explicit address sanitizer instrumentation to the runtime and
syscall packages. The compiler does not instrument the runtime
package. It does instrument the syscall package, but we need to add
a couple of cases that it can't see.

Refer to the implementation of the asan malloc runtime library,
this patch also allocates extra memory as the redzone, around the
returned memory region, and marks the redzone as unaddressable to
detect the overflows or underflows.

Updates #44853.

Change-Id: I2753d1cc1296935a66bf521e31ce91e35fcdf798
Reviewed-on: https://go-review.googlesource.com/c/go/+/298614
Run-TryBot: Ian Lance Taylor <[email protected]>
Reviewed-by: Ian Lance Taylor <[email protected]>
Trust: fannie zhang <[email protected]>
  • Loading branch information
zhangfannie authored and ianlancetaylor committed Nov 2, 2021
1 parent 6f1e9a9 commit 6f327f7
Show file tree
Hide file tree
Showing 18 changed files with 200 additions and 3 deletions.
7 changes: 6 additions & 1 deletion src/runtime/cgo_sigaction.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ func sigaction(sig uint32, new, old *sigactiont) {
if msanenabled && new != nil {
msanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
}

if asanenabled && new != nil {
asanwrite(unsafe.Pointer(new), unsafe.Sizeof(*new))
}
if _cgo_sigaction == nil || inForkedChild {
sysSigaction(sig, new, old)
} else {
Expand Down Expand Up @@ -79,6 +81,9 @@ func sigaction(sig uint32, new, old *sigactiont) {
if msanenabled && old != nil {
msanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
}
if asanenabled && old != nil {
asanread(unsafe.Pointer(old), unsafe.Sizeof(*old))
}
}

// callCgoSigaction calls the sigaction function in the runtime/cgo package
Expand Down
7 changes: 7 additions & 0 deletions src/runtime/iface.go
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,9 @@ func convT(t *_type, v unsafe.Pointer) unsafe.Pointer {
if msanenabled {
msanread(v, t.size)
}
if asanenabled {
asanread(v, t.size)
}
x := mallocgc(t.size, t, true)
typedmemmove(t, x, v)
return x
Expand All @@ -337,6 +340,10 @@ func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
if msanenabled {
msanread(v, t.size)
}
if asanenabled {
asanread(v, t.size)
}

x := mallocgc(t.size, t, false)
memmove(x, v, t.size)
return x
Expand Down
44 changes: 43 additions & 1 deletion src/runtime/malloc.go
Original file line number Diff line number Diff line change
Expand Up @@ -908,6 +908,14 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if size == 0 {
return unsafe.Pointer(&zerobase)
}
userSize := size
if asanenabled {
// Refer to ASAN runtime library, the malloc() function allocates extra memory,
// the redzone, around the user requested memory region. And the redzones are marked
// as unaddressable. We perform the same operations in Go to detect the overflows or
// underflows.
size += computeRZlog(size)
}

if debug.malloc {
if debug.sbrk != 0 {
Expand Down Expand Up @@ -971,7 +979,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
mp.mallocing = 1

shouldhelpgc := false
dataSize := size
dataSize := userSize
c := getMCache(mp)
if c == nil {
throw("mallocgc called without a P or outside bootstrapping")
Expand Down Expand Up @@ -1138,6 +1146,17 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
msanmalloc(x, size)
}

if asanenabled {
// We should only read/write the memory with the size asked by the user.
// The rest of the allocated memory should be poisoned, so that we can report
// errors when accessing poisoned memory.
// The allocated memory is larger than required userSize, it will also include
// redzone and some other padding bytes.
rzBeg := unsafe.Add(x, userSize)
asanpoison(rzBeg, size-userSize)
asanunpoison(x, userSize)
}

if rate := MemProfileRate; rate > 0 {
// Note cache c only valid while m acquired; see #47302
if rate != 1 && size < c.nextSample {
Expand Down Expand Up @@ -1514,3 +1533,26 @@ type notInHeap struct{}
func (p *notInHeap) add(bytes uintptr) *notInHeap {
return (*notInHeap)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + bytes))
}

// computeRZlog computes the size of the redzone.
// Refer to the implementation of the compiler-rt.
func computeRZlog(userSize uintptr) uintptr {
switch {
case userSize <= (64 - 16):
return 16 << 0
case userSize <= (128 - 32):
return 16 << 1
case userSize <= (512 - 64):
return 16 << 2
case userSize <= (4096 - 128):
return 16 << 3
case userSize <= (1<<14)-256:
return 16 << 4
case userSize <= (1<<15)-512:
return 16 << 5
case userSize <= (1<<16)-1024:
return 16 << 6
default:
return 16 << 7
}
}
12 changes: 12 additions & 0 deletions src/runtime/map.go
Original file line number Diff line number Diff line change
Expand Up @@ -402,6 +402,9 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if asanenabled && h != nil {
asanread(key, t.key.size)
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
t.hasher(key, 0) // see issue 23734
Expand Down Expand Up @@ -460,6 +463,9 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if asanenabled && h != nil {
asanread(key, t.key.size)
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
t.hasher(key, 0) // see issue 23734
Expand Down Expand Up @@ -582,6 +588,9 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if msanenabled {
msanread(key, t.key.size)
}
if asanenabled {
asanread(key, t.key.size)
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
Expand Down Expand Up @@ -693,6 +702,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if msanenabled && h != nil {
msanread(key, t.key.size)
}
if asanenabled && h != nil {
asanread(key, t.key.size)
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
t.hasher(key, 0) // see issue 23734
Expand Down
8 changes: 8 additions & 0 deletions src/runtime/mbarrier.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,10 @@ func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
msanwrite(dst, typ.size)
msanread(src, typ.size)
}
if asanenabled {
asanwrite(dst, typ.size)
asanread(src, typ.size)
}
typedmemmove(typ, dst, src)
}

Expand Down Expand Up @@ -262,6 +266,10 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe
msanwrite(dstPtr, uintptr(n)*typ.size)
msanread(srcPtr, uintptr(n)*typ.size)
}
if asanenabled {
asanwrite(dstPtr, uintptr(n)*typ.size)
asanread(srcPtr, uintptr(n)*typ.size)
}

if writeBarrier.cgo {
cgoCheckSliceCopy(typ, dstPtr, srcPtr, n)
Expand Down
5 changes: 4 additions & 1 deletion src/runtime/mgcsweep.go
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
spanHasNoSpecials(s)
}

if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled {
if debug.allocfreetrace != 0 || debug.clobberfree != 0 || raceenabled || msanenabled || asanenabled {
// Find all newly freed objects. This doesn't have to
// efficient; allocfreetrace has massive overhead.
mbits := s.markBitsForBase()
Expand All @@ -583,6 +583,9 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
if msanenabled {
msanfree(unsafe.Pointer(x), size)
}
if asanenabled {
asanpoison(unsafe.Pointer(x), size)
}
}
mbits.advance()
abits.advance()
Expand Down
6 changes: 6 additions & 0 deletions src/runtime/mheap.go
Original file line number Diff line number Diff line change
Expand Up @@ -1419,6 +1419,12 @@ func (h *mheap) freeSpan(s *mspan) {
bytes := s.npages << _PageShift
msanfree(base, bytes)
}
if asanenabled {
// Tell asan that this entire span is no longer in use.
base := unsafe.Pointer(s.base())
bytes := s.npages << _PageShift
asanpoison(base, bytes)
}
h.freeSpanLocked(s, spanAllocHeap)
unlock(&h.lock)
})
Expand Down
6 changes: 6 additions & 0 deletions src/runtime/mprof.go
Original file line number Diff line number Diff line change
Expand Up @@ -627,6 +627,9 @@ func record(r *MemProfileRecord, b *bucket) {
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
}
if asanenabled {
asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
}
copy(r.Stack0[:], b.stk())
for i := int(b.nstk); i < len(r.Stack0); i++ {
r.Stack0[i] = 0
Expand Down Expand Up @@ -680,6 +683,9 @@ func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
if msanenabled {
msanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
}
if asanenabled {
asanwrite(unsafe.Pointer(&r.Stack0[0]), unsafe.Sizeof(r.Stack0))
}
i := copy(r.Stack0[:], b.stk())
for ; i < len(r.Stack0); i++ {
r.Stack0[i] = 0
Expand Down
6 changes: 6 additions & 0 deletions src/runtime/proc.go
Original file line number Diff line number Diff line change
Expand Up @@ -2233,6 +2233,9 @@ func newm1(mp *m) {
if msanenabled {
msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
}
if asanenabled {
asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
}
execLock.rlock() // Prevent process clone.
asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
execLock.runlock()
Expand Down Expand Up @@ -4435,6 +4438,9 @@ retry:
if msanenabled {
msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
}
if asanenabled {
asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
}
}
return gp
}
Expand Down
16 changes: 16 additions & 0 deletions src/runtime/select.go
Original file line number Diff line number Diff line change
Expand Up @@ -406,6 +406,13 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo
msanwrite(cas.elem, c.elemtype.size)
}
}
if asanenabled {
if casi < nsends {
asanread(cas.elem, c.elemtype.size)
} else if cas.elem != nil {
asanwrite(cas.elem, c.elemtype.size)
}
}

selunlock(scases, lockorder)
goto retc
Expand All @@ -421,6 +428,9 @@ bufrecv:
if msanenabled && cas.elem != nil {
msanwrite(cas.elem, c.elemtype.size)
}
if asanenabled && cas.elem != nil {
asanwrite(cas.elem, c.elemtype.size)
}
recvOK = true
qp = chanbuf(c, c.recvx)
if cas.elem != nil {
Expand All @@ -444,6 +454,9 @@ bufsend:
if msanenabled {
msanread(cas.elem, c.elemtype.size)
}
if asanenabled {
asanread(cas.elem, c.elemtype.size)
}
typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
c.sendx++
if c.sendx == c.dataqsiz {
Expand Down Expand Up @@ -482,6 +495,9 @@ send:
if msanenabled {
msanread(cas.elem, c.elemtype.size)
}
if asanenabled {
asanread(cas.elem, c.elemtype.size)
}
send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
if debugSelect {
print("syncsend: cas0=", cas0, " c=", c, "\n")
Expand Down
10 changes: 10 additions & 0 deletions src/runtime/slice.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,9 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf
if msanenabled {
msanread(from, copymem)
}
if asanenabled {
asanread(from, copymem)
}

memmove(to, from, copymem)

Expand Down Expand Up @@ -168,6 +171,9 @@ func growslice(et *_type, old slice, cap int) slice {
if msanenabled {
msanread(old.array, uintptr(old.len*int(et.size)))
}
if asanenabled {
asanread(old.array, uintptr(old.len*int(et.size)))
}

if cap < old.cap {
panic(errorString("growslice: cap out of range"))
Expand Down Expand Up @@ -311,6 +317,10 @@ func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen
msanread(fromPtr, size)
msanwrite(toPtr, size)
}
if asanenabled {
asanread(fromPtr, size)
asanwrite(toPtr, size)
}

if size == 1 { // common case worth about 2x to do here
// TODO: is this still worth it with new memmove impl?
Expand Down
6 changes: 6 additions & 0 deletions src/runtime/stack.go
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,9 @@ func stackalloc(n uint32) stack {
if msanenabled {
msanmalloc(v, uintptr(n))
}
if asanenabled {
asanunpoison(v, uintptr(n))
}
if stackDebug >= 1 {
print(" allocated ", v, "\n")
}
Expand Down Expand Up @@ -461,6 +464,9 @@ func stackfree(stk stack) {
if msanenabled {
msanfree(v, n)
}
if asanenabled {
asanpoison(v, n)
}
if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
order := uint8(0)
n2 := n
Expand Down
9 changes: 9 additions & 0 deletions src/runtime/string.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,9 @@ func slicebytetostring(buf *tmpBuf, ptr *byte, n int) (str string) {
if msanenabled {
msanread(unsafe.Pointer(ptr), uintptr(n))
}
if asanenabled {
asanread(unsafe.Pointer(ptr), uintptr(n))
}
if n == 1 {
p := unsafe.Pointer(&staticuint64s[*ptr])
if goarch.BigEndian {
Expand Down Expand Up @@ -158,6 +161,9 @@ func slicebytetostringtmp(ptr *byte, n int) (str string) {
if msanenabled && n > 0 {
msanread(unsafe.Pointer(ptr), uintptr(n))
}
if asanenabled && n > 0 {
asanread(unsafe.Pointer(ptr), uintptr(n))
}
stringStructOf(&str).str = unsafe.Pointer(ptr)
stringStructOf(&str).len = n
return
Expand Down Expand Up @@ -209,6 +215,9 @@ func slicerunetostring(buf *tmpBuf, a []rune) string {
if msanenabled && len(a) > 0 {
msanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
}
if asanenabled && len(a) > 0 {
asanread(unsafe.Pointer(&a[0]), uintptr(len(a))*unsafe.Sizeof(a[0]))
}
var dum [4]byte
size1 := 0
for _, r := range a {
Expand Down
6 changes: 6 additions & 0 deletions src/runtime/traceback.go
Original file line number Diff line number Diff line change
Expand Up @@ -1390,6 +1390,9 @@ func callCgoSymbolizer(arg *cgoSymbolizerArg) {
if msanenabled {
msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
}
if asanenabled {
asanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{}))
}
call(cgoSymbolizer, noescape(unsafe.Pointer(arg)))
}

Expand All @@ -1412,5 +1415,8 @@ func cgoContextPCs(ctxt uintptr, buf []uintptr) {
if msanenabled {
msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
}
if asanenabled {
asanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg))
}
call(cgoTraceback, noescape(unsafe.Pointer(&arg)))
}
Loading

0 comments on commit 6f327f7

Please sign in to comment.