-
Notifications
You must be signed in to change notification settings - Fork 0
/
disk.go
376 lines (345 loc) · 11.7 KB
/
disk.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
package disk
import (
"bufio"
"container/heap"
"fmt"
"io/fs"
"log"
"os"
"strconv"
"strings"
)
// TODO Yes, this code/comment mix is fugly.. right now it's just easier to keep
// the docs from kernel next to the fields.
type diskStat struct {
major int
minor int
devname string
// The values for reads and writes are in terms of `sectors` which can be
// assumbed to be 512 bytes. The kernel reports them this way regardless of
// how the filesystem or underlying disk controller is accounting.
// The most reliable way to check this would be `blockdev --getss <dev>`
// which is just using IOCTL to get the sector size.
num_reads_completed int // This is the total number of reads completed successfully.
num_reads_merged int // , field 6 -- # of writes merged (unsigned long)
// Reads and writes which are adjacent to each other may be merged for efficiency. Thus two 4K reads may become one 8K read before it is ultimately handed to the disk, and so it will be counted (and queued) as only one I/O. This field lets you know how often this was done.
num_sectors_read int // This is the total number of sectors read successfully.
ms_reading int // This is the total number of milliseconds spent by all reads (as measured from blk_mq_alloc_request() to __blk_mq_end_request()).
num_writes_completed int // This is the total number of writes completed successfully.
num_writes_merged int // See the description of field 2.
num_sectors_written int // This is the total number of sectors written successfully.
ms_writing int // This is the total number of milliseconds spent by all writes (as measured from blk_mq_alloc_request() to __blk_mq_end_request()).
num_io_in_progress int // The only field that should go to zero. Incremented as requests are given to appropriate struct request_queue and decremented as they finish.
ms_doing_io int // This field increases so long as field 9 is nonzero.
// Since 5.0 this field counts jiffies when at least one request was started or completed. If request runs more than 2 jiffies then some I/O time might be not accounted in case of concurrent requests.
ms_doing_io_weighted int // This field is incremented at each I/O start, I/O completion, I/O merge, or read of these stats by the number of I/Os in progress (field 9) times the number of milliseconds spent doing I/O since the last update of this field. This can provide an easy measure of both I/O completion time and the backlog that may be accumulating.
num_discards_completed int // This is the total number of discards completed successfully.
num_discards_merged int // See the description of field 2
num_sectors_discarded int // This is the total number of sectors discarded successfully.
ms_spent_discarding int // This is the total number of milliseconds spent by
// all discards (as measured from blk_mq_alloc_request() to
// __blk_mq_end_request()).
num_flush_requests_completed int // This is the total number of flush requests completed successfully.
// Block layer combines flush requests and
// executes at most one at a time. This
// counts flush requests executed by disk.
// Not tracked for partitions.
ms_spent_flushing int // This is the total number of milliseconds spent by all flush requests.
}
type statValues struct {
major float32
minor float32
devname string
num_reads_completed float32
num_reads_merged float32
num_sectors_read float32
ms_reading float32
num_writes_completed float32
num_writes_merged float32
num_sectors_written float32
ms_writing float32
num_io_in_progress float32
ms_doing_io float32
ms_doing_io_weighted float32
num_discards_completed float32
num_discards_merged float32
num_sectors_discarded float32
ms_spent_discarding float32
num_flush_requests_completed float32
ms_spent_flushing float32
}
type diskHeap []*statValues
func (h diskHeap) Len() int { return len(h) }
func (h diskHeap) Less(i, j int) bool {
return h[i].num_writes_completed > h[j].num_writes_completed
}
func (h diskHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *diskHeap) Push(x any) { *h = append(*h, x.(*statValues)) }
func (h *diskHeap) Pop() any {
old := *h
n := len(old)
if n == 0 {
return nil
}
x := old[n-1]
*h = old[0 : n-1]
return x
}
type DiskInfo struct {
old []*diskStat
new []*diskStat
values *diskHeap
}
type dsfields int
const (
DSFMAJOR dsfields = iota
DSFMINOR
DSFNAME
DSFNUM_READS_COMPLETED
DSFNUM_READS_MERGED
DSFNUM_SECTORS_READ
DSFMS_READING
DSFNUM_WRITES_COMPLETED
DSFNUM_WRITES_MERGED
DSFNUM_SECTORS_WRITTEN
DSFMS_WRITING
DSFNUM_IO_IN_PROGRESS
DSFMS_DOING_IO
DSFMS_DOING_IO_WEIGHTED
DSFNUM_DISCARDS_COMPLETED
DSFNUM_DISCARDS_MERGED
DSFNUM_SECTORS_DISCARDED
DSFMS_SPENT_DISCARDING
DSFNUM_FLUSH_REQUESTS_COMPLETED
DSFMS_SPENT_FLUSHING
)
const diskstats = "/proc/diskstats"
func getDiskStatPath() string {
return diskstats
}
func (disks *DiskInfo) estimate() {
if len(disks.old) == 0 {
return
}
prev := disks.old
cur := disks.new
disks.values = new(diskHeap)
heap.Init(disks.values)
for i := range cur {
d := new(statValues)
d.devname = cur[i].devname
d.major = float32(cur[i].major)
d.minor = float32(cur[i].minor)
d.num_reads_completed = float32(cur[i].num_reads_completed - prev[i].num_reads_completed)
d.num_reads_merged = float32(cur[i].num_reads_merged - prev[i].num_reads_merged)
d.num_sectors_read = float32(cur[i].num_sectors_read - prev[i].num_sectors_read)
d.ms_reading = float32(cur[i].ms_reading - prev[i].ms_reading)
d.num_writes_completed = float32(cur[i].num_writes_completed - prev[i].num_writes_completed)
d.num_writes_merged = float32(cur[i].num_writes_merged - prev[i].num_writes_merged)
d.num_sectors_written = float32(cur[i].num_sectors_written - prev[i].num_sectors_written)
d.ms_writing = float32(cur[i].ms_writing - prev[i].ms_writing)
d.num_io_in_progress = float32(cur[i].num_io_in_progress - prev[i].num_io_in_progress)
d.ms_doing_io = float32(cur[i].ms_doing_io - prev[i].ms_doing_io)
d.ms_doing_io_weighted = float32(cur[i].ms_doing_io_weighted - prev[i].ms_doing_io_weighted)
d.num_discards_completed = float32(
cur[i].num_discards_completed - prev[i].num_discards_completed,
)
d.num_discards_merged = float32(cur[i].num_discards_merged - prev[i].num_discards_merged)
d.num_sectors_discarded = float32(
cur[i].num_sectors_discarded - prev[i].num_sectors_discarded,
)
d.ms_spent_discarding = float32(cur[i].ms_spent_discarding - prev[i].ms_spent_discarding)
d.num_flush_requests_completed = float32(
cur[i].num_flush_requests_completed - prev[i].num_flush_requests_completed,
)
d.ms_spent_flushing = float32(cur[i].ms_spent_flushing - prev[i].ms_spent_flushing)
heap.Push(disks.values, d)
}
}
func diskparse(s string) (*diskStat, error) {
ds := new(diskStat)
fields := strings.Fields(s)
var fieldnum dsfields
var err error
for fieldnum = DSFMAJOR; fieldnum < DSFMS_SPENT_FLUSHING+1; fieldnum++ {
switch fieldnum {
case DSFMAJOR:
ds.major, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFMINOR:
ds.minor, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNAME:
ds.devname = fields[fieldnum]
case DSFNUM_READS_COMPLETED:
ds.num_reads_completed, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_READS_MERGED:
ds.num_reads_merged, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_SECTORS_READ:
ds.num_sectors_read, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFMS_READING:
ds.ms_reading, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_WRITES_COMPLETED:
ds.num_writes_completed, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_WRITES_MERGED:
ds.num_writes_merged, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_SECTORS_WRITTEN:
ds.num_sectors_written, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFMS_WRITING:
ds.ms_writing, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_IO_IN_PROGRESS:
ds.num_io_in_progress, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFMS_DOING_IO:
ds.ms_doing_io, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFMS_DOING_IO_WEIGHTED:
ds.ms_doing_io_weighted, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_DISCARDS_COMPLETED:
ds.num_discards_completed, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_DISCARDS_MERGED:
ds.num_discards_merged, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_SECTORS_DISCARDED:
ds.num_sectors_discarded, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFMS_SPENT_DISCARDING:
ds.ms_spent_discarding, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNUM_FLUSH_REQUESTS_COMPLETED:
ds.num_flush_requests_completed, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFMS_SPENT_FLUSHING:
ds.ms_spent_flushing, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
}
}
return ds, nil
}
// Store list of disks we want to report on, it should be populated the first
// time anything checks if a disk exists.
var reportableDisks []string
// TODO: Add some ability to configure the reportable disks
func setupReportableDisks(disks []fs.DirEntry) {
for _, disk := range disks {
reportableDisks = append(reportableDisks, disk.Name())
}
}
// Determine if the disk is one we want to report on or not. By default skips
// all partitions.
func isDisk(s string) bool {
if len(reportableDisks) == 0 {
f, err := os.ReadDir("/sys/block")
if err != nil {
log.Fatal("Can't configure disks", err)
}
setupReportableDisks(f)
}
for _, v := range reportableDisks {
if s == v {
return true
}
}
return false
}
// Get a diskinfo and update it with new stats
func DiskStats(di *DiskInfo) (*DiskInfo, error) {
f, err := os.Open(getDiskStatPath())
if err != nil {
return nil, err
}
defer f.Close()
return getDiskStats(di, f)
}
func getDiskStats(di *DiskInfo, f fs.File) (*DiskInfo, error) {
var ds []*diskStat
di.old = di.new
scanner := bufio.NewScanner(f)
for linenum := 0; scanner.Scan(); linenum++ {
line := scanner.Text()
curdisk, err := diskparse(line)
if err != nil {
return nil, err
}
if isDisk(curdisk.devname) {
ds = append(ds, curdisk)
}
}
di.new = ds
di.estimate()
return di, nil
}
func (disks *DiskInfo) InfoPrint(num_disks int) string {
if len(disks.old) == 0 {
return ""
}
total_disks := disks.values.Len()
disk_limit := max(min(total_disks, num_disks), 0)
if num_disks == 0 {
return "zero"
}
var sb strings.Builder
for i := 0; i < disk_limit; i++ {
disk := heap.Pop(disks.values).(*statValues)
sb.WriteString(
fmt.Sprintf("%s wc: %.0f\t sw: %.0f\t rc: %.0f\tsr: %.0f\n",
disk.devname,
disk.num_writes_completed,
// FIXME - 'magic' number here converting sectors to KB, only
// temporary and see comments above
disk.num_sectors_written/2,
disk.num_reads_completed,
// FIXME - 'magic' number here converting sectors to KB
disk.num_sectors_read/2,
))
}
return sb.String()
}