forked from iovisor/bcc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
biosnoop.bpf.c
152 lines (132 loc) · 3.41 KB
/
biosnoop.bpf.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Wenbo Zhang
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
#include <bpf/bpf_tracing.h>
#include "biosnoop.h"
#define MAX_ENTRIES 10240
const volatile bool targ_queued = false;
const volatile dev_t targ_dev = -1;
struct piddata {
char comm[TASK_COMM_LEN];
u32 pid;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, struct request *);
__type(value, struct piddata);
__uint(map_flags, BPF_F_NO_PREALLOC);
} infobyreq SEC(".maps");
struct stage {
u64 insert;
u64 issue;
dev_t dev;
};
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, MAX_ENTRIES);
__type(key, struct request *);
__type(value, struct stage);
} start SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
__uint(key_size, sizeof(u32));
__uint(value_size, sizeof(u32));
} events SEC(".maps");
static __always_inline
int trace_pid(struct request *rq)
{
u64 id = bpf_get_current_pid_tgid();
struct piddata piddata = {};
piddata.pid = id;
bpf_get_current_comm(&piddata.comm, sizeof(&piddata.comm));
bpf_map_update_elem(&infobyreq, &rq, &piddata, 0);
return 0;
}
SEC("fentry/blk_account_io_start")
int BPF_PROG(blk_account_io_start, struct request *rq)
{
return trace_pid(rq);
}
SEC("kprobe/blk_account_io_merge_bio")
int BPF_KPROBE(blk_account_io_merge_bio, struct request *rq)
{
return trace_pid(rq);
}
static __always_inline
int trace_rq_start(struct request *rq, bool insert)
{
struct stage *stagep, stage = {};
u64 ts = bpf_ktime_get_ns();
stagep = bpf_map_lookup_elem(&start, &rq);
if (!stagep) {
struct gendisk *disk = BPF_CORE_READ(rq, rq_disk);
stage.dev = disk ? MKDEV(BPF_CORE_READ(disk, major),
BPF_CORE_READ(disk, first_minor)) : 0;
if (targ_dev != -1 && targ_dev != stage.dev)
return 0;
stagep = &stage;
}
if (insert)
stagep->insert = ts;
else
stagep->issue = ts;
if (stagep == &stage)
bpf_map_update_elem(&start, &rq, stagep, 0);
return 0;
}
SEC("tp_btf/block_rq_insert")
int BPF_PROG(block_rq_insert, struct request_queue *q, struct request *rq)
{
return trace_rq_start(rq, true);
}
SEC("tp_btf/block_rq_issue")
int BPF_PROG(block_rq_issue, struct request_queue *q, struct request *rq)
{
return trace_rq_start(rq, false);
}
SEC("tp_btf/block_rq_complete")
int BPF_PROG(block_rq_complete, struct request *rq, int error,
unsigned int nr_bytes)
{
u64 slot, ts = bpf_ktime_get_ns();
struct piddata *piddatap;
struct event event = {};
struct stage *stagep;
s64 delta;
stagep = bpf_map_lookup_elem(&start, &rq);
if (!stagep)
return 0;
delta = (s64)(ts - stagep->issue);
if (delta < 0)
goto cleanup;
piddatap = bpf_map_lookup_elem(&infobyreq, &rq);
if (!piddatap) {
event.comm[0] = '?';
} else {
__builtin_memcpy(&event.comm, piddatap->comm,
sizeof(event.comm));
event.pid = piddatap->pid;
}
event.delta = delta;
if (targ_queued && BPF_CORE_READ(rq, q, elevator)) {
if (!stagep->insert)
event.qdelta = -1; /* missed or don't insert entry */
else
event.qdelta = stagep->issue - stagep->insert;
}
event.ts = ts;
event.sector = rq->__sector;
event.len = rq->__data_len;
event.cmd_flags = rq->cmd_flags;
event.dev = stagep->dev;
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event,
sizeof(event));
cleanup:
bpf_map_delete_elem(&start, &rq);
bpf_map_delete_elem(&infobyreq, &rq);
return 0;
}
char LICENSE[] SEC("license") = "GPL";