1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
package bio
import (
"runtime"
"sync/atomic"
"syscall"
)
// mmapLimit is the maximum number of mmaped regions to create before
// falling back to reading into a heap-allocated slice. This exists
// because some operating systems place a limit on the number of
// distinct mapped regions per process. As of this writing:
//
// Darwin unlimited
// DragonFly 1000000 (vm.max_proc_mmap)
// FreeBSD unlimited
// Linux 65530 (vm.max_map_count) // TODO: query /proc/sys/vm/max_map_count?
// NetBSD unlimited
// OpenBSD unlimited
var mmapLimit int32 = 1<<31 - 1
func init() {
// Linux is the only practically concerning OS.
if runtime.GOOS == "linux" {
mmapLimit = 30000
}
}
func (r *Reader) sliceOS(length uint64) ([]byte, bool) {
// For small slices, don't bother with the overhead of a
// mapping, especially since we have no way to unmap it.
const threshold = 16 << 10
if length < threshold {
return nil, false
}
// Have we reached the mmap limit?
if atomic.AddInt32(&mmapLimit, -1) < 0 {
atomic.AddInt32(&mmapLimit, 1)
return nil, false
}
// Page-align the offset.
off := r.Offset()
align := syscall.Getpagesize()
aoff := off &^ int64(align-1)
data, err := syscall.Mmap(int(r.f.Fd()), aoff, int(length+uint64(off-aoff)), syscall.PROT_READ, syscall.MAP_SHARED|syscall.MAP_FILE)
if err != nil {
return nil, false
}
data = data[off-aoff:]
r.MustSeek(int64(length), 1)
return data, true
}
|