Skip to content

Commit

Permalink
disks - too much info from diskstats
Browse files Browse the repository at this point in the history
  • Loading branch information
bioe007 committed Feb 22, 2024
1 parent 5abc8b9 commit 0d7b5e2
Show file tree
Hide file tree
Showing 3 changed files with 129 additions and 2 deletions.
8 changes: 6 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,12 @@ These are the parameters I'd like to show
- _wonders_ any way to make mpstat type of info here?
- Number of processes: run|able, sleep, unint sleep, zombies
- 'Errors' from dmesg and ~ dmesg | tail (or journalctl -b | tail)
- Memory: free/used
- Swap: free/used
- Memory: free/used (proc/meminfo)
- Swap: free/used (proc/meminfo)
- Disk activity: rw/wr in MBs and queue size
- /proc/diskstats
- /proc/partitions
- [/proc/diskstats](https://www.kernel.org/doc/html/latest/admin-guide/iostats.html)
- Network In/Out (per device?)
- connections - active, passive, trans/retrans stats
- top 'few' processes consuming CPU | memory
Expand All @@ -54,3 +57,4 @@ though

- [/proc/loadavg](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/4/html/reference_guide/s2-proc-loadavg)


116 changes: 116 additions & 0 deletions disk/disk.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
package disk

import (
"bufio"
"os"
"strconv"
"strings"
)

type DiskStats struct {
major int
minor int
devname string
num_reads_completed int // This is the total number of reads completed successfully.
num_reads_merged int // , field 6 -- # of writes merged (unsigned long)
// Reads and writes which are adjacent to each other may be merged for efficiency. Thus two 4K reads may become one 8K read before it is ultimately handed to the disk, and so it will be counted (and queued) as only one I/O. This field lets you know how often this was done.
num_sectors_read int // This is the total number of sectors read successfully.

ms_reading int // This is the total number of milliseconds spent by all reads (as measured from blk_mq_alloc_request() to __blk_mq_end_request()).
num_writes_completed int // This is the total number of writes completed successfully.
num_writes_merged int // See the description of field 2.
num_sectors_written int // This is the total number of sectors written successfully.
ms_writing int // This is the total number of milliseconds spent by all writes (as measured from blk_mq_alloc_request() to __blk_mq_end_request()).
num_io_in_progress int // The only field that should go to zero. Incremented as requests are given to appropriate struct request_queue and decremented as they finish.
ms_doing_io int // This field increases so long as field 9 is nonzero.
// Since 5.0 this field counts jiffies when at least one request was started or completed. If request runs more than 2 jiffies then some I/O time might be not accounted in case of concurrent requests.
ms_doing_io_weighted int // This field is incremented at each I/O start, I/O completion, I/O merge, or read of these stats by the number of I/Os in progress (field 9) times the number of milliseconds spent doing I/O since the last update of this field. This can provide an easy measure of both I/O completion time and the backlog that may be accumulating.
num_discards_completed int // This is the total number of discards completed successfully.
num_discards_merged int // See the description of field 2
num_sectors_discarded int // This is the total number of sectors discarded successfully.
ms_spent_discarding int // This is the total number of milliseconds spent by
// all discards (as measured from blk_mq_alloc_request() to
// __blk_mq_end_request()).
num_flush_requests_completed int // This is the total number of flush requests completed successfully.
// Block layer combines flush requests and
// executes at most one at a time. This
// counts flush requests executed by disk.
// Not tracked for partitions.

ms_spent_flushing int // This is the total number of milliseconds spent by all flush requests.
}

type dsfields int

const (
DSFMAJOR dsfields = iota
DSFMINOR
DSFNAME
DSFNUM_READS_COMPLETED
DSFNUM_READS_MERGED
DSFNUM_SECTORS_READ
DSFMS_READING
DSFNUM_WRITES_COMPLETED
DSFNUM_WRITES_MERGED
DSFNUM_SECTORS_WRITTEN
DSFMS_WRITING
DSFNUM_IO_IN_PROGRESS
DSFMS_DOING_IO
DSFMS_DOING_IO_WEIGHTED
DSFNUM_DISCARDS_COMPLETED
DSFNUM_DISCARDS_MERGED
DSFNUM_SECTORS_DISCARDED
DSFMS_SPENT_DISCARDING
DSFNUM_FLUSH_REQUESTS_COMPLETED
DSFMS_SPENT_FLUSHING
)

func diskparse(s string) (*DiskStats, error) {
ds := new(DiskStats)

fields := strings.Fields(s)

var fieldnum dsfields
var err error
for fieldnum = DSFMAJOR; fieldnum < DSFMS_SPENT_FLUSHING+1; fieldnum++ {
switch fieldnum {
case DSFMAJOR:
ds.major, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFMINOR:
ds.minor, err = strconv.Atoi(fields[fieldnum])
if err != nil {
return nil, err
}
case DSFNAME:
ds.devname = fields[fieldnum]
}
}

return ds, nil
}

func GetDiskStats() ([]*DiskStats, error) {
var ds []*DiskStats

f, err := os.Open("/proc/diskstats")
if err != nil {
return nil, err
}
defer f.Close()

// lines of diskstats
scanner := bufio.NewScanner(f)
for linenum := 0; scanner.Scan(); linenum++ {
line := scanner.Text()
curdisk, err := diskparse(line)
// log.Debug("line: %d: parsed disk %+v", linenum, curdisk)
ds = append(ds, curdisk)
if err != nil {
return nil, err
}
}
return ds, nil
}
7 changes: 7 additions & 0 deletions synopsys.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"log"

"github.com/bioe007/synopsys/cpu"
"github.com/bioe007/synopsys/disk"
"github.com/bioe007/synopsys/load"
"github.com/bioe007/synopsys/memory"
)
Expand All @@ -25,4 +26,10 @@ func main() {
}
fmt.Printf("LOAD %+v\n", load)

disks, err := disk.GetDiskStats()
if err != nil {
log.Fatal("disk average failure", err)
}
fmt.Println("Got disks: ", len(disks))

}

0 comments on commit 0d7b5e2

Please sign in to comment.