diff --git a/docs/reference_guide.md b/docs/reference_guide.md index 2b352c131409..e1292bf83491 100644 --- a/docs/reference_guide.md +++ b/docs/reference_guide.md @@ -1374,7 +1374,7 @@ BPF_PERF_OUTPUT(events); [...] ``` -In Python, you can either let bcc generate the data structure from C declaration automatically (recommanded): +In Python, you can either let bcc generate the data structure from C declaration automatically (recommended): ```Python def print_event(cpu, data, size): diff --git a/examples/cpp/TCPSendStack.cc b/examples/cpp/TCPSendStack.cc index 42aac3448b44..f7f150d5a234 100644 --- a/examples/cpp/TCPSendStack.cc +++ b/examples/cpp/TCPSendStack.cc @@ -101,7 +101,7 @@ int main(int argc, char** argv) { for (auto sym : syms) std::cout << " " << sym << std::endl; } else { - // -EFAULT normally means the stack is not availiable and not an error + // -EFAULT normally means the stack is not available and not an error if (it.first.kernel_stack != -EFAULT) { lost_stacks++; std::cout << " [Lost Kernel Stack" << it.first.kernel_stack << "]" @@ -114,7 +114,7 @@ int main(int argc, char** argv) { for (auto sym : syms) std::cout << " " << sym << std::endl; } else { - // -EFAULT normally means the stack is not availiable and not an error + // -EFAULT normally means the stack is not available and not an error if (it.first.user_stack != -EFAULT) { lost_stacks++; std::cout << " [Lost User Stack " << it.first.user_stack << "]" diff --git a/examples/networking/http_filter/http-parse-complete.c b/examples/networking/http_filter/http-parse-complete.c index 117cd45045fb..01a234ea2cbe 100644 --- a/examples/networking/http_filter/http-parse-complete.c +++ b/examples/networking/http_filter/http-parse-complete.c @@ -27,7 +27,7 @@ BPF_HASH(sessions, struct Key, struct Leaf, 1024); AND ALL the other packets having same (src_ip,dst_ip,src_port,dst_port) this means belonging to the same "session" this additional check avoids url truncation, if url is too long - userspace script, if necessary, reassembles urls splitted in 2 or more packets. + userspace script, if necessary, reassembles urls split in 2 or more packets. if the program is loaded as PROG_TYPE_SOCKET_FILTER and attached to a socket return 0 -> DROP the packet diff --git a/examples/networking/http_filter/http-parse-complete.py b/examples/networking/http_filter/http-parse-complete.py index f1e5e0a26be4..0f9951051047 100644 --- a/examples/networking/http_filter/http-parse-complete.py +++ b/examples/networking/http_filter/http-parse-complete.py @@ -254,13 +254,13 @@ def help(): #check if the packet belong to a session saved in bpf_sessions if (current_Key in bpf_sessions): #check id the packet belong to a session saved in local_dictionary - #(local_dictionary mantains HTTP GET/POST url not printed yet because splitted in N packets) + #(local_dictionary maintains HTTP GET/POST url not printed yet because split in N packets) if (binascii.hexlify(current_Key) in local_dictionary): #first part of the HTTP GET/POST url is already present in local dictionary (prev_payload_string) prev_payload_string = local_dictionary[binascii.hexlify(current_Key)] #looking for CR+LF in current packet. if (crlf in payload_string): - #last packet. containing last part of HTTP GET/POST url splitted in N packets. + #last packet. containing last part of HTTP GET/POST url split in N packets. #append current payload prev_payload_string += payload_string #print HTTP GET/POST url @@ -272,7 +272,7 @@ def help(): except: print ("error deleting from map or dictionary") else: - #NOT last packet. containing part of HTTP GET/POST url splitted in N packets. + #NOT last packet. containing part of HTTP GET/POST url split in N packets. #append current payload prev_payload_string += payload_string #check if not size exceeding (usually HTTP GET/POST url < 8K ) diff --git a/examples/tracing/dddos.py b/examples/tracing/dddos.py index 5b544241c372..fe1afd7260d2 100755 --- a/examples/tracing/dddos.py +++ b/examples/tracing/dddos.py @@ -43,7 +43,7 @@ * timestamp between 2 successive packets is so small * (which is not like regular applications behaviour). * This script looks for this difference in time and if it sees - * more than MAX_NB_PACKETS succesive packets with a difference + * more than MAX_NB_PACKETS successive packets with a difference * of timestamp between each one of them less than * LEGAL_DIFF_TIMESTAMP_PACKETS ns, * ------------------ It Triggers an ALERT ----------------- diff --git a/man/man8/compactsnoop.8 b/man/man8/compactsnoop.8 index d110a2b1d683..8479e809567b 100644 --- a/man/man8/compactsnoop.8 +++ b/man/man8/compactsnoop.8 @@ -145,11 +145,11 @@ output - internal to compaction .PP .in +8n "complete" (COMPACT_COMPLETE): The full zone was compacted scanned but wasn't -successfull to compact suitable pages. +successful to compact suitable pages. .PP .in +8n "partial_skipped" (COMPACT_PARTIAL_SKIPPED): direct compaction has scanned part -of the zone but wasn't successfull to compact suitable pages. +of the zone but wasn't successful to compact suitable pages. .PP .in +8n "contended" (COMPACT_CONTENDED): compaction terminated prematurely due to lock diff --git a/man/man8/criticalstat.8 b/man/man8/criticalstat.8 index 52baf1d8cc53..cdd5e8ed9949 100644 --- a/man/man8/criticalstat.8 +++ b/man/man8/criticalstat.8 @@ -5,10 +5,10 @@ criticalstat \- A tracer to find and report long atomic critical sections in ker .B criticalstat [\-h] [\-p] [\-i] [\-d DURATION] .SH DESCRIPTION -criticalstat traces and reports occurences of atomic critical sections in the +criticalstat traces and reports occurrences of atomic critical sections in the kernel with useful stacktraces showing the origin of them. Such critical sections frequently occur due to use of spinlocks, or if interrupts or -preemption were explicity disabled by a driver. IRQ routines in Linux are also +preemption were explicitly disabled by a driver. IRQ routines in Linux are also executed with interrupts disabled. There are many reasons. Such critical sections are a source of long latency/responsive issues for real-time systems. diff --git a/man/man8/dbslower.8 b/man/man8/dbslower.8 index c21e6fae6960..e39b8bd0065f 100644 --- a/man/man8/dbslower.8 +++ b/man/man8/dbslower.8 @@ -11,7 +11,7 @@ those that exceed a latency (query time) threshold. By default a threshold of This uses User Statically-Defined Tracing (USDT) probes, a feature added to MySQL and PostgreSQL for DTrace support, but which may not be enabled on a given installation. See requirements. -Alternativly, MySQL queries can be traced without the USDT support using the +Alternatively, MySQL queries can be traced without the USDT support using the -x option. Since this uses BPF, only the root user can use this tool. diff --git a/man/man8/filetop.8 b/man/man8/filetop.8 index 68cb2042c777..ba0cbd6e5907 100644 --- a/man/man8/filetop.8 +++ b/man/man8/filetop.8 @@ -9,7 +9,7 @@ This is top for files. This traces file reads and writes, and prints a per-file summary every interval (by default, 1 second). By default the summary is sorted on the highest read throughput (Kbytes). Sorting order can be changed via -s option. By default only -IO on regular files is shown. The -a option will list all file types (sokets, +IO on regular files is shown. The -a option will list all file types (sockets, FIFOs, etc). This uses in-kernel eBPF maps to store per process summaries for efficiency. diff --git a/man/man8/runqlen.8 b/man/man8/runqlen.8 index 27a649dac8aa..b36a5a18cf82 100644 --- a/man/man8/runqlen.8 +++ b/man/man8/runqlen.8 @@ -51,7 +51,7 @@ Print run queue occupancy every second: # .B runqlen \-O 1 .TP -Print run queue occupancy, with timetamps, for each CPU: +Print run queue occupancy, with timestamps, for each CPU: # .B runqlen \-COT 1 .SH FIELDS diff --git a/src/cc/api/BPF.cc b/src/cc/api/BPF.cc index 56d52de19ab3..67f9872a3372 100644 --- a/src/cc/api/BPF.cc +++ b/src/cc/api/BPF.cc @@ -854,7 +854,7 @@ StatusTuple USDT::init() { for (auto& p : ctx->probes_) { if (p->provider_ == provider_ && p->name_ == name_) { // Take ownership of the probe that we are interested in, and avoid it - // being destrcuted when we destruct the USDT::Context instance + // being destructed when we destruct the USDT::Context instance probe_ = std::unique_ptr>(p.release(), deleter); p.swap(ctx->probes_.back()); diff --git a/src/cc/bcc_elf.h b/src/cc/bcc_elf.h index b6f5c4313c85..f948162e2c1f 100644 --- a/src/cc/bcc_elf.h +++ b/src/cc/bcc_elf.h @@ -56,7 +56,7 @@ int bcc_elf_foreach_load_section(const char *path, bcc_elf_load_sectioncb callback, void *payload); // Iterate over symbol table of a binary module -// Parameter "option" points to a bcc_symbol_option struct to indicate wheather +// Parameter "option" points to a bcc_symbol_option struct to indicate whether // and how to use debuginfo file, and what types of symbols to load. // Returns -1 on error, and 0 on success or stopped by callback int bcc_elf_foreach_sym(const char *path, bcc_elf_symcb callback, void *option, diff --git a/src/cc/export/helpers.h b/src/cc/export/helpers.h index 2c3e9eb9a73d..8f85225e1d0a 100644 --- a/src/cc/export/helpers.h +++ b/src/cc/export/helpers.h @@ -104,7 +104,7 @@ BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \ __attribute__((section("maps/export"))) \ struct _name##_table_t __##_name -// define a table that is shared accross the programs in the same namespace +// define a table that is shared across the programs in the same namespace #define BPF_TABLE_SHARED(_table_type, _key_type, _leaf_type, _name, _max_entries) \ BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries); \ __attribute__((section("maps/shared"))) \ diff --git a/src/cc/frontends/b/codegen_llvm.cc b/src/cc/frontends/b/codegen_llvm.cc index a69fdba92e2a..0983c49a26c0 100644 --- a/src/cc/frontends/b/codegen_llvm.cc +++ b/src/cc/frontends/b/codegen_llvm.cc @@ -399,7 +399,7 @@ StatusTuple CodegenLLVM::visit_packet_expr_node(PacketExprNode *n) { expr_ = B.CreateCall(load_fn, vector({skb_ptr8, skb_hdr_offset, B.getInt64(bit_offset & 0x7), B.getInt64(bit_width)})); // this generates extra trunc insns whereas the bpf.load fns already - // trunc the values internally in the bpf interpeter + // trunc the values internally in the bpf interpreter //expr_ = B.CreateTrunc(pop_expr(), B.getIntNTy(bit_width)); } } else { diff --git a/src/cc/frontends/b/parser.yy b/src/cc/frontends/b/parser.yy index 527e84f4dd56..e6d1592c8855 100644 --- a/src/cc/frontends/b/parser.yy +++ b/src/cc/frontends/b/parser.yy @@ -213,9 +213,9 @@ block ; enter_varscope : /* empty */ { $$ = parser.scopes_->enter_var_scope(); } ; -exit_varscope : /* emtpy */ { $$ = parser.scopes_->exit_var_scope(); } ; +exit_varscope : /* empty */ { $$ = parser.scopes_->exit_var_scope(); } ; enter_statescope : /* empty */ { $$ = parser.scopes_->enter_state_scope(); } ; -exit_statescope : /* emtpy */ { $$ = parser.scopes_->exit_state_scope(); } ; +exit_statescope : /* empty */ { $$ = parser.scopes_->exit_state_scope(); } ; struct_decl : TSTRUCT ident TLBRACE struct_decl_stmts TRBRACE diff --git a/src/cc/frontends/p4/compiler/ebpfTable.py b/src/cc/frontends/p4/compiler/ebpfTable.py index 4b7e0232d2d0..5325028bbd90 100644 --- a/src/cc/frontends/p4/compiler/ebpfTable.py +++ b/src/cc/frontends/p4/compiler/ebpfTable.py @@ -136,7 +136,7 @@ def serializeType(self, serializer, keyTypeName): # Sort fields in decreasing size; this will ensure that # there is no padding. # Padding may cause the ebpf verification to fail, - # since padding fields are not initalized + # since padding fields are not initialized fieldOrder = sorted( self.fields, key=EbpfTableKey.fieldRank, reverse=True) for f in fieldOrder: diff --git a/src/cc/libbpf.c b/src/cc/libbpf.c index 138d9ee68db3..cfd7fde5f6ef 100644 --- a/src/cc/libbpf.c +++ b/src/cc/libbpf.c @@ -516,7 +516,7 @@ int bcc_prog_load_xattr(struct bpf_load_program_attr *attr, int prog_len, if (attr->log_level > 0) { if (log_buf_size > 0) { - // Use user-provided log buffer if availiable. + // Use user-provided log buffer if available. log_buf[0] = 0; attr_log_buf = log_buf; attr_log_buf_size = log_buf_size; diff --git a/src/cc/libbpf.h b/src/cc/libbpf.h index 4a2215746869..77bb728c5fdb 100644 --- a/src/cc/libbpf.h +++ b/src/cc/libbpf.h @@ -59,7 +59,7 @@ int bpf_get_next_key(int fd, void *key, void *next_key); * it will not to any additional memory allocation. * - Otherwise, it will allocate an internal temporary buffer for log message * printing, and continue to attempt increase that allocated buffer size if - * initial attemp was insufficient in size. + * initial attempt was insufficient in size. */ int bcc_prog_load(enum bpf_prog_type prog_type, const char *name, const struct bpf_insn *insns, int prog_len, diff --git a/tests/cc/test_perf_event.cc b/tests/cc/test_perf_event.cc index 76d2d17ef721..b3b26031f0c6 100644 --- a/tests/cc/test_perf_event.cc +++ b/tests/cc/test_perf_event.cc @@ -25,7 +25,7 @@ TEST_CASE("test read perf event", "[bpf_perf_event]") { // The basic bpf_perf_event_read is supported since Kernel 4.3. However in that // version it only supported HARDWARE and RAW events. On the other hand, our -// tests running on Jenkins won't have availiable HARDWARE counters since they +// tests running on Jenkins won't have available HARDWARE counters since they // are running on VMs. The support of other types of events such as SOFTWARE are // only added since Kernel 4.13, hence we can only run the test since that. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0) diff --git a/tests/lua/luaunit.lua b/tests/lua/luaunit.lua index 03316d3a570b..6ce56f0e7873 100644 --- a/tests/lua/luaunit.lua +++ b/tests/lua/luaunit.lua @@ -36,7 +36,7 @@ M.VERBOSITY_VERBOSE = 20 -- set EXPORT_ASSERT_TO_GLOBALS to have all asserts visible as global values -- EXPORT_ASSERT_TO_GLOBALS = true --- we need to keep a copy of the script args before it is overriden +-- we need to keep a copy of the script args before it is overridden local cmdline_argv = rawget(_G, "arg") M.FAILURE_PREFIX = 'LuaUnit test FAILURE: ' -- prefix string for failed tests @@ -2136,7 +2136,7 @@ end end -- class LuaUnit --- For compatbility with LuaUnit v2 +-- For compatibility with LuaUnit v2 M.run = M.LuaUnit.run M.Run = M.LuaUnit.run diff --git a/tests/python/include/folly/tracing/StaticTracepoint-ELF.h b/tests/python/include/folly/tracing/StaticTracepoint-ELF.h index a8a74c3ede3f..4deb18a7166f 100644 --- a/tests/python/include/folly/tracing/StaticTracepoint-ELF.h +++ b/tests/python/include/folly/tracing/StaticTracepoint-ELF.h @@ -52,7 +52,7 @@ #define FOLLY_SDT_ARGSIZE(x) (FOLLY_SDT_ISARRAY(x) ? sizeof(void*) : sizeof(x)) // Format of each probe arguments as operand. -// Size of the arugment tagged with FOLLY_SDT_Sn, with "n" constraint. +// Size of the argument tagged with FOLLY_SDT_Sn, with "n" constraint. // Value of the argument tagged with FOLLY_SDT_An, with configured constraint. #define FOLLY_SDT_ARG(n, x) \ [FOLLY_SDT_S##n] "n" ((size_t)FOLLY_SDT_ARGSIZE(x)), \ diff --git a/tools/cachetop.py b/tools/cachetop.py index 59de3912dd94..00b11a8c8fc5 100755 --- a/tools/cachetop.py +++ b/tools/cachetop.py @@ -107,7 +107,7 @@ def get_processes_stats( misses = (apcl + apd) # rtaccess is the read hit % during the sample period. - # wtaccess is the write hit % during the smaple period. + # wtaccess is the write hit % during the sample period. if mpa > 0: rtaccess = float(mpa) / (access + misses) if apcl > 0: diff --git a/tools/capable.py b/tools/capable.py index bb4a843558ff..69fef3de4c17 100755 --- a/tools/capable.py +++ b/tools/capable.py @@ -189,7 +189,7 @@ def __getattr__(self, name): "TIME", "UID", "PID", "COMM", "CAP", "NAME", "AUDIT")) def stack_id_err(stack_id): - # -EFAULT in get_stackid normally means the stack-trace is not availible, + # -EFAULT in get_stackid normally means the stack-trace is not available, # Such as getting kernel stack trace in userspace code return (stack_id < 0) and (stack_id != -errno.EFAULT) diff --git a/tools/compactsnoop.py b/tools/compactsnoop.py index 7bdf33c18131..7f9ce7ee3628 100755 --- a/tools/compactsnoop.py +++ b/tools/compactsnoop.py @@ -329,10 +329,10 @@ def compact_result_to_str(status): # COMPACT_CONTINUE: compaction should continue to another pageblock 4: "continue", # COMPACT_COMPLETE: The full zone was compacted scanned but wasn't - # successfull to compact suitable pages. + # successful to compact suitable pages. 5: "complete", # COMPACT_PARTIAL_SKIPPED: direct compaction has scanned part of the - # zone but wasn't successfull to compact suitable pages. + # zone but wasn't successful to compact suitable pages. 6: "partial_skipped", # COMPACT_CONTENDED: compaction terminated prematurely due to lock # contentions diff --git a/tools/compactsnoop_example.txt b/tools/compactsnoop_example.txt index 563ee80f6de2..47fc73078294 100644 --- a/tools/compactsnoop_example.txt +++ b/tools/compactsnoop_example.txt @@ -65,9 +65,9 @@ or (kernel 4.7 and above) 3: "no_suitable_page", # COMPACT_CONTINUE: compaction should continue to another pageblock 4: "continue", - # COMPACT_COMPLETE: The full zone was compacted scanned but wasn't successfull to compact suitable pages. + # COMPACT_COMPLETE: The full zone was compacted scanned but wasn't successful to compact suitable pages. 5: "complete", - # COMPACT_PARTIAL_SKIPPED: direct compaction has scanned part of the zone but wasn't successfull to compact suitable pages. + # COMPACT_PARTIAL_SKIPPED: direct compaction has scanned part of the zone but wasn't successful to compact suitable pages. 6: "partial_skipped", # COMPACT_CONTENDED: compaction terminated prematurely due to lock contentions 7: "contended", diff --git a/tools/criticalstat_example.txt b/tools/criticalstat_example.txt index 1f5376913840..10e25a920548 100644 --- a/tools/criticalstat_example.txt +++ b/tools/criticalstat_example.txt @@ -1,9 +1,9 @@ Demonstrations of criticalstat: Find long atomic critical sections in the kernel. -criticalstat traces and reports occurences of atomic critical sections in the +criticalstat traces and reports occurrences of atomic critical sections in the kernel with useful stacktraces showing the origin of them. Such critical sections frequently occur due to use of spinlocks, or if interrupts or -preemption were explicity disabled by a driver. IRQ routines in Linux are also +preemption were explicitly disabled by a driver. IRQ routines in Linux are also executed with interrupts disabled. There are many reasons. Such critical sections are a source of long latency/responsive issues for real-time systems. diff --git a/tools/inject.py b/tools/inject.py index fa2d3887544d..9d6b85f8cc16 100755 --- a/tools/inject.py +++ b/tools/inject.py @@ -75,7 +75,7 @@ def _get_if_top(self): else: early_pred = "bpf_get_prandom_u32() > %s" % str(int((1<<32)*Probe.probability)) # init the map - # dont do an early exit here so the singular case works automatically + # don't do an early exit here so the singular case works automatically # have an early exit for probability option enter = """ /* @@ -112,7 +112,7 @@ def _get_heading(self): self.func_name = self.event + ("_entry" if self.is_entry else "_exit") func_sig = "struct pt_regs *ctx" - # assume theres something in there, no guarantee its well formed + # assume there's something in there, no guarantee its well formed if right > left + 1 and self.is_entry: func_sig += ", " + self.func[left + 1:right] @@ -209,13 +209,13 @@ def _generate_bottom(self): pred = self.preds[0][0] text = self._get_heading() + """ { - u32 overriden = 0; + u32 overridden = 0; int zero = 0; u32* val; val = count.lookup(&zero); if (val) - overriden = *val; + overridden = *val; /* * preparation for predicate, if necessary @@ -224,7 +224,7 @@ def _generate_bottom(self): /* * If this is the only call in the chain and predicate passes */ - if (%s == 1 && %s && overriden < %s) { + if (%s == 1 && %s && overridden < %s) { count.increment(zero); bpf_override_return(ctx, %s); return 0; @@ -239,7 +239,7 @@ def _generate_bottom(self): /* * If all conds have been met and predicate passes */ - if (p->conds_met == %s && %s && overriden < %s) { + if (p->conds_met == %s && %s && overridden < %s) { count.increment(zero); bpf_override_return(ctx, %s); } diff --git a/tools/klockstat.py b/tools/klockstat.py index aa9e7c0ab6f0..4b4661068443 100755 --- a/tools/klockstat.py +++ b/tools/klockstat.py @@ -47,7 +47,7 @@ def positive_nonzero_int(val): return ival def stack_id_err(stack_id): - # -EFAULT in get_stackid normally means the stack-trace is not availible, + # -EFAULT in get_stackid normally means the stack-trace is not available, # Such as getting kernel stack trace in userspace code return (stack_id < 0) and (stack_id != -errno.EFAULT) diff --git a/tools/nfsslower.py b/tools/nfsslower.py index 36918ca00faa..5e344b9b400a 100755 --- a/tools/nfsslower.py +++ b/tools/nfsslower.py @@ -22,7 +22,7 @@ # # This tool uses kprobes to instrument the kernel for entry and exit # information, in the future a preferred way would be to use tracepoints. -# Currently there are'nt any tracepoints available for nfs_read_file, +# Currently there aren't any tracepoints available for nfs_read_file, # nfs_write_file and nfs_open_file, nfs_getattr does have entry and exit # tracepoints but we chose to use kprobes for consistency # diff --git a/tools/offcputime.py b/tools/offcputime.py index 39f36953f1c0..50ce1cc1ef61 100755 --- a/tools/offcputime.py +++ b/tools/offcputime.py @@ -36,7 +36,7 @@ def positive_nonzero_int(val): return ival def stack_id_err(stack_id): - # -EFAULT in get_stackid normally means the stack-trace is not availible, + # -EFAULT in get_stackid normally means the stack-trace is not available, # Such as getting kernel stack trace in userspace code return (stack_id < 0) and (stack_id != -errno.EFAULT) diff --git a/tools/offwaketime.py b/tools/offwaketime.py index 4809a3b65e1d..da9dbdbc7ed7 100755 --- a/tools/offwaketime.py +++ b/tools/offwaketime.py @@ -36,7 +36,7 @@ def positive_nonzero_int(val): return ival def stack_id_err(stack_id): - # -EFAULT in get_stackid normally means the stack-trace is not availible, + # -EFAULT in get_stackid normally means the stack-trace is not available, # Such as getting kernel stack trace in userspace code return (stack_id < 0) and (stack_id != -errno.EFAULT) diff --git a/tools/old/profile.py b/tools/old/profile.py index 7c768f436452..0abcf576374a 100755 --- a/tools/old/profile.py +++ b/tools/old/profile.py @@ -19,7 +19,7 @@ # wrong, note that the first line is the IP, and then the (skipped) stack. # # Note: if another perf-based sampling session is active, the output may become -# polluted with their events. On older kernels, the ouptut may also become +# polluted with their events. On older kernels, the output may also become # polluted with tracing sessions (when the kprobe is used instead of the # tracepoint). If this becomes a problem, logic can be added to filter events. # @@ -300,7 +300,7 @@ def aksym(addr): counts = b.get_table("counts") stack_traces = b.get_table("stack_traces") for k, v in sorted(counts.items(), key=lambda counts: counts[1].value): - # handle get_stackid erorrs + # handle get_stackid errors if (not args.user_stacks_only and k.kernel_stack_id < 0 and k.kernel_stack_id != -errno.EFAULT) or \ (not args.kernel_stacks_only and k.user_stack_id < 0 and diff --git a/tools/profile.py b/tools/profile.py index dfbced6aa841..97ffd8fb3f49 100755 --- a/tools/profile.py +++ b/tools/profile.py @@ -57,7 +57,7 @@ def positive_nonzero_int(val): return ival def stack_id_err(stack_id): - # -EFAULT in get_stackid normally means the stack-trace is not availible, + # -EFAULT in get_stackid normally means the stack-trace is not available, # Such as getting kernel stack trace in userspace code return (stack_id < 0) and (stack_id != -errno.EFAULT) diff --git a/tools/reset-trace.sh b/tools/reset-trace.sh index fb891a7384d7..34565b795d11 100755 --- a/tools/reset-trace.sh +++ b/tools/reset-trace.sh @@ -93,7 +93,7 @@ function checkfile { contents=$(grep -v '^#' $file) if [[ "$contents" != "$expected" ]]; then echo "Noticed unrelated tracing file $PWD/$file isn't set as" \ - "expected. Not reseting (-F to force, -v for verbose)." + "expected. Not resetting (-F to force, -v for verbose)." vecho "Contents of $file is (line enumerated):" (( opt_verbose )) && cat -nv $file vecho "Expected \"$expected\"." @@ -113,7 +113,7 @@ done shift $(( $OPTIND - 1 )) ### reset tracing state -vecho "Reseting tracing state..." +vecho "Resetting tracing state..." vecho cd $tracing || die "ERROR: accessing tracing. Root user? /sys/kernel/debug?" diff --git a/tools/reset-trace_example.txt b/tools/reset-trace_example.txt index d0f6777f68d7..37b2232ac325 100644 --- a/tools/reset-trace_example.txt +++ b/tools/reset-trace_example.txt @@ -27,7 +27,7 @@ That's it. You can use -v to see what it does: # ./reset-trace.sh -v -Reseting tracing state... +Resetting tracing state... Checking /sys/kernel/debug/tracing/kprobe_events Checking /sys/kernel/debug/tracing/uprobe_events @@ -132,7 +132,7 @@ Ok, so the process is now gone, but it did leave tracing in a semi-enabled state. Using reset-trace: # ./reset-trace.sh -v -Reseting tracing state... +Resetting tracing state... Checking /sys/kernel/debug/tracing/kprobe_events Checking /sys/kernel/debug/tracing/uprobe_events @@ -186,19 +186,19 @@ And again with quiet: Here is an example of reset-trace detecting an unrelated tracing session: # ./reset-trace.sh -Noticed unrelated tracing file /sys/kernel/debug/tracing/set_ftrace_filter isn't set as expected. Not reseting (-F to force, -v for verbose). +Noticed unrelated tracing file /sys/kernel/debug/tracing/set_ftrace_filter isn't set as expected. Not resetting (-F to force, -v for verbose). And verbose: # ./reset-trace.sh -v -Reseting tracing state... +Resetting tracing state... Checking /sys/kernel/debug/tracing/kprobe_events Checking /sys/kernel/debug/tracing/uprobe_events Checking /sys/kernel/debug/tracing/trace Checking /sys/kernel/debug/tracing/current_tracer Checking /sys/kernel/debug/tracing/set_ftrace_filter -Noticed unrelated tracing file /sys/kernel/debug/tracing/set_ftrace_filter isn't set as expected. Not reseting (-F to force, -v for verbose). +Noticed unrelated tracing file /sys/kernel/debug/tracing/set_ftrace_filter isn't set as expected. Not resetting (-F to force, -v for verbose). Contents of set_ftrace_filter is (line enumerated): 1 tcp_send_mss 2 tcp_sendpage diff --git a/tools/runqlen_example.txt b/tools/runqlen_example.txt index 4c10ed42ed77..60c76feca81a 100644 --- a/tools/runqlen_example.txt +++ b/tools/runqlen_example.txt @@ -36,7 +36,7 @@ of 8. This will cause run queue latency, which can be measured by the bcc runqlat tool. -Here's an example of an issue that runqlen can indentify. Starting with the +Here's an example of an issue that runqlen can identify. Starting with the system-wide summary: # ./runqlen.py diff --git a/tools/solisten_example.txt b/tools/solisten_example.txt index f7ace8c56af4..2e5d7613cd7d 100644 --- a/tools/solisten_example.txt +++ b/tools/solisten_example.txt @@ -21,7 +21,7 @@ PID COMM NETNS PROTO BACKLOG ADDR 6069 nginx 4026531957 TCPv6 128 :: 80 This output show the listen event from 3 programs. Netcat was started twice as -shown by the 2 different PIDs. The first time on the wilcard IPv4, the second +shown by the 2 different PIDs. The first time on the wildcard IPv4, the second time on an IPv6. Netcat being a "one shot" program. It can accept a single connection, hence the backlog of "1". diff --git a/tools/tcpaccept.py b/tools/tcpaccept.py index 914d51837150..7c1042084793 100755 --- a/tools/tcpaccept.py +++ b/tools/tcpaccept.py @@ -82,7 +82,7 @@ # # The following code uses kprobes to instrument inet_csk_accept(). # On Linux 4.16 and later, we could use sock:inet_sock_set_state -# tracepoint for efficency, but it may output wrong PIDs. This is +# tracepoint for efficiency, but it may output wrong PIDs. This is # because sock:inet_sock_set_state may run outside of process context. # Hence, we stick to kprobes until we find a proper solution. # diff --git a/tools/tcplife.lua b/tools/tcplife.lua index 60fb51f08392..3f4f6afddf6b 100755 --- a/tools/tcplife.lua +++ b/tools/tcplife.lua @@ -203,7 +203,7 @@ local arg_time = false local examples = [[examples: ./tcplife # trace all TCP connect()s ./tcplife -t # include time column (HH:MM:SS) - ./tcplife -w # wider colums (fit IPv6) + ./tcplife -w # wider columns (fit IPv6) ./tcplife -stT # csv output, with times & timestamps ./tcplife -p 181 # only trace PID 181 ./tcplife -L 80 # only trace local port 80 diff --git a/tools/tcplife.py b/tools/tcplife.py index ed2515539625..d4e679dd7e20 100755 --- a/tools/tcplife.py +++ b/tools/tcplife.py @@ -33,7 +33,7 @@ examples = """examples: ./tcplife # trace all TCP connect()s ./tcplife -t # include time column (HH:MM:SS) - ./tcplife -w # wider colums (fit IPv6) + ./tcplife -w # wider columns (fit IPv6) ./tcplife -stT # csv output, with times & timestamps ./tcplife -p 181 # only trace PID 181 ./tcplife -L 80 # only trace local port 80 diff --git a/tools/tcplife_example.txt b/tools/tcplife_example.txt index fe4e52b102e6..cbf44796f22e 100644 --- a/tools/tcplife_example.txt +++ b/tools/tcplife_example.txt @@ -127,7 +127,7 @@ optional arguments: examples: ./tcplife # trace all TCP connect()s ./tcplife -t # include time column (HH:MM:SS) - ./tcplife -w # wider colums (fit IPv6) + ./tcplife -w # wider columns (fit IPv6) ./tcplife -stT # csv output, with times & timestamps ./tcplife -p 181 # only trace PID 181 ./tcplife -L 80 # only trace local port 80 diff --git a/tools/tcpstates.py b/tools/tcpstates.py index 3c78b1c4c0e6..b9a643873ff4 100755 --- a/tools/tcpstates.py +++ b/tools/tcpstates.py @@ -28,7 +28,7 @@ ./tcpstates # trace all TCP state changes ./tcpstates -t # include timestamp column ./tcpstates -T # include time column (HH:MM:SS) - ./tcpstates -w # wider colums (fit IPv6) + ./tcpstates -w # wider columns (fit IPv6) ./tcpstates -stT # csv output, with times & timestamps ./tcpstates -Y # log events to the systemd journal ./tcpstates -L 80 # only trace local port 80 diff --git a/tools/tcpstates_example.txt b/tools/tcpstates_example.txt index 05df8b616614..e50012e7d8a4 100644 --- a/tools/tcpstates_example.txt +++ b/tools/tcpstates_example.txt @@ -47,7 +47,7 @@ examples: ./tcpstates # trace all TCP state changes ./tcpstates -t # include timestamp column ./tcpstates -T # include time column (HH:MM:SS) - ./tcpstates -w # wider colums (fit IPv6) + ./tcpstates -w # wider columns (fit IPv6) ./tcpstates -stT # csv output, with times & timestamps ./tcpstates -Y # log events to the systemd journal ./tcpstates -L 80 # only trace local port 80 diff --git a/tools/tcpsubnet_example.txt b/tools/tcpsubnet_example.txt index 72a61722c90e..49576d6784d5 100644 --- a/tools/tcpsubnet_example.txt +++ b/tools/tcpsubnet_example.txt @@ -42,7 +42,7 @@ By default, tcpsubnet will categorize traffic in the following subnets: The last subnet is a catch-all. In other words, anything that doesn't match the first 4 defaults will be categorized under 0.0.0.0/0 -You can change this default behavoir by passing a comma separated list +You can change this default behavior by passing a comma separated list of subnets. Let's say we would like to know how much traffic we are sending to github.com. We first find out what IPs github.com resolves to, Eg: @@ -96,7 +96,7 @@ the first element of the list and 192.130.253.112/32 as the second, all the traffic going to 192.130.253.112/32 will have been categorized in 0.0.0.0/0 as 192.130.253.112/32 is contained in 0.0.0.0/0. -The default ouput unit is bytes. You can change it by using the +The default output unit is bytes. You can change it by using the -f [--format] flag. tcpsubnet uses the same flags as iperf for the unit format and adds mM. When using kmKM, the output will be rounded to floor. Eg: diff --git a/tools/tcptracer_example.txt b/tools/tcptracer_example.txt index f782d91d2ae9..2873d036a999 100644 --- a/tools/tcptracer_example.txt +++ b/tools/tcptracer_example.txt @@ -19,7 +19,7 @@ A 28978 nc 4 10.202.210.1 10.202.109.12 8080 59160 X 28978 nc 4 10.202.210.1 10.202.109.12 8080 59160 ``` -This output shows three conections, one outgoing from a "telnet" process, one +This output shows three connections, one outgoing from a "telnet" process, one outgoing from "curl" to a local netcat, and one incoming received by the "nc" process. The output details show the kind of event (C for connection, X for close and A for accept), PID, IP version, source address, destination address,