Skip to content

Commit

Permalink
apply select pedantic clippy recommendations
Browse files Browse the repository at this point in the history
  • Loading branch information
jqnatividad committed Jun 24, 2022
1 parent 992878b commit 4d16724
Show file tree
Hide file tree
Showing 12 changed files with 34 additions and 26 deletions.
4 changes: 2 additions & 2 deletions src/cmd/apply.rs
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ pub fn run(argv: &[&str]) -> CliResult<()> {
// we prep it so we only populate the lookup vec with the index of these columns
// so SimpleCurlyFormat is performant
let mut dynfmt_fields = Vec::with_capacity(10); // 10 is a reasonable default to save allocs
let mut dynfmt_template = args.flag_formatstr.to_owned();
let mut dynfmt_template = args.flag_formatstr.clone();
if args.cmd_dynfmt {
if args.flag_no_headers {
return fail!("dynfmt operation requires headers.");
Expand Down Expand Up @@ -503,7 +503,7 @@ pub fn run(argv: &[&str]) -> CliResult<()> {
match rdr.read_record(&mut batch_record) {
Ok(has_data) => {
if has_data {
batch.push(batch_record.to_owned());
batch.push(batch_record.clone());
} else {
// nothing else to add to batch
break;
Expand Down
4 changes: 2 additions & 2 deletions src/cmd/excel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,15 +100,15 @@ pub fn run(argv: &[&str]) -> CliResult<()> {
.writer()?;
let mut record = csv::StringRecord::new();
let mut trimmed_record = csv::StringRecord::new();
let mut count = 0_u32; // Excel can only hold 1m rows anyways, ODS - only 32k
let mut count = 0_u32; // use u32 as Excel can only hold 1m rows anyways, ODS - only 32k
for row in range.rows() {
record.clear();
for cell in row {
match *cell {
DataType::Empty => record.push_field(""),
DataType::String(ref s) => record.push_field(s),
DataType::Float(ref f) | DataType::DateTime(ref f) => {
record.push_field(&f.to_string())
record.push_field(&f.to_string());
}
DataType::Int(ref i) => record.push_field(&i.to_string()),
DataType::Error(ref e) => record.push_field(&format!("{e:?}")),
Expand Down
4 changes: 2 additions & 2 deletions src/cmd/fetch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ pub fn run(argv: &[&str]) -> CliResult<()> {
let jql_selector: Option<String> = if let Some(jql_file) = args.flag_jqlfile {
Some(fs::read_to_string(jql_file)?)
} else {
args.flag_jql.as_ref().map(|jql| jql.to_string())
args.flag_jql.as_ref().map(std::string::ToString::to_string)
};

// amortize memory allocation by reusing record
Expand All @@ -341,7 +341,7 @@ pub fn run(argv: &[&str]) -> CliResult<()> {
match rdr.read_byte_record(&mut batch_record) {
Ok(has_data) => {
if has_data {
batch.push(batch_record.to_owned());
batch.push(batch_record.clone());
} else {
// nothing else to add to batch
break;
Expand Down
4 changes: 3 additions & 1 deletion src/cmd/jsonl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ struct Args {
flag_output: Option<String>,
}

#[allow(clippy::needless_pass_by_value)]
fn recurse_to_infer_headers(value: &Value, headers: &mut Vec<Vec<String>>, path: Vec<String>) {
match value {
Value::Object(map) => {
Expand Down Expand Up @@ -65,6 +66,7 @@ fn recurse_to_infer_headers(value: &Value, headers: &mut Vec<Vec<String>>, path:
}
}

#[allow(clippy::unnecessary_wraps)]
fn infer_headers(value: &Value) -> Option<Vec<Vec<String>>> {
let mut headers: Vec<Vec<String>> = Vec::new();

Expand All @@ -87,7 +89,7 @@ fn get_value_at_path(value: &Value, path: &[String]) -> Option<Value> {
}
}

Some(current.to_owned())
Some(current.clone())
}

fn json_line_to_csv_record(value: &Value, headers: &[Vec<String>]) -> csv::StringRecord {
Expand Down
8 changes: 4 additions & 4 deletions src/cmd/partition.rs
Original file line number Diff line number Diff line change
Expand Up @@ -181,10 +181,7 @@ impl WriterGenerator {
};

// Now check for collisions.
if !self.used.contains(&base) {
self.used.insert(base.clone());
base
} else {
if self.used.contains(&base) {
loop {
let candidate = format!("{}_{}", &base, self.counter);
self.counter = self.counter.checked_add(1).unwrap_or_else(|| {
Expand All @@ -198,6 +195,9 @@ impl WriterGenerator {
return candidate;
}
}
} else {
self.used.insert(base.clone());
base
}
}
}
8 changes: 4 additions & 4 deletions src/cmd/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -360,8 +360,8 @@ fn infer_schema_from_stats(args: &Args, input_filename: &str) -> CliResult<Map<S
Ok(properties_map)
}

/// get stats records from cmd::stats
/// returns tuple (csv_fields, csv_stats, stats_col_index_map)
/// get stats records from `cmd::stats`
/// returns tuple (`csv_fields`, `csv_stats`, `stats_col_index_map`)
fn get_stats_records(args: &Args) -> CliResult<(ByteRecord, Vec<Stats>, AHashMap<String, usize>)> {
let stats_args = crate::cmd::stats::Args {
arg_input: args.arg_input.clone(),
Expand Down Expand Up @@ -451,7 +451,7 @@ fn build_low_cardinality_column_selector_arg(
column_select_arg
}

/// get frequency tables from cmd::stats
/// get frequency tables from `cmd::stats`
/// returns map of unique valules keyed by header
fn get_unique_values(
args: &Args,
Expand Down Expand Up @@ -613,7 +613,7 @@ fn generate_string_patterns(
.with_minimum_repetitions(2)
.build();

pattern_map.insert(header.to_owned(), regexp);
pattern_map.insert(header.clone(), regexp);
}

debug!("pattern map: {pattern_map:?}");
Expand Down
1 change: 1 addition & 0 deletions src/cmd/split.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ impl Args {
Ok(())
}

#[allow(clippy::unnecessary_wraps)]
fn parallel_split(&self, idx: &Indexed<fs::File, fs::File>) -> CliResult<()> {
let nchunks = util::num_of_chunks(idx.count() as usize, self.flag_size);
let pool = ThreadPool::new(util::njobs(self.flag_jobs));
Expand Down
6 changes: 4 additions & 2 deletions src/cmd/stats.rs
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ pub fn run(argv: &[&str]) -> CliResult<()> {
}

impl Args {
#[allow(clippy::unnecessary_wraps)]
pub fn sequential_stats(&self, whitelist: &str) -> CliResult<(csv::ByteRecord, Vec<Stats>)> {
let mut rdr = self.rconfig().reader()?;
let (headers, sel) = self.sel_headers(&mut rdr)?;
Expand All @@ -165,6 +166,7 @@ impl Args {
Ok((headers, stats))
}

#[allow(clippy::unnecessary_wraps)]
pub fn parallel_stats(
&self,
whitelist: &str,
Expand Down Expand Up @@ -743,7 +745,7 @@ impl fmt::Debug for FieldType {
}
}

/// TypedSum keeps a rolling sum of the data seen.
/// `TypedSum` keeps a rolling sum of the data seen.
/// It sums integers until it sees a float, at which point it sums floats.
#[derive(Clone, Default)]
struct TypedSum {
Expand Down Expand Up @@ -811,7 +813,7 @@ impl Commute for TypedSum {
}
}

/// TypedMinMax keeps track of minimum/maximum values for each possible type
/// `TypedMinMax` keeps track of minimum/maximum values for each possible type
/// where min/max makes sense.
#[derive(Clone, Default)]
struct TypedMinMax {
Expand Down
8 changes: 4 additions & 4 deletions src/cmd/validate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ pub fn run(argv: &[&str]) -> CliResult<()> {
// non-allocating trimming in place is much faster on the record level
// with our csv fork than doing field-by-field std::str::trim which is allocating
record.trim();
batch.push(record.to_owned());
batch.push(record.clone());
} else {
// nothing else to add to batch
break;
Expand Down Expand Up @@ -711,7 +711,9 @@ fn validate_json_instance(
let validation_output = schema_compiled.apply(instance);

// If validation output is Invalid, then grab field names and errors
if !validation_output.flag() {
if validation_output.flag() {
None
} else {
// get validation errors as String
let validation_errors: Vec<(String, String)> = match validation_output.basic() {
BasicOutput::Invalid(errors) => errors
Expand All @@ -734,8 +736,6 @@ fn validate_json_instance(
};

Some(validation_errors)
} else {
None
}
}

Expand Down
3 changes: 1 addition & 2 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ use std::borrow::ToOwned;
use std::env;
use std::fs;
use std::io::{self, Read};
use std::ops::Deref;
use std::path::PathBuf;

use log::{debug, info, warn};
Expand Down Expand Up @@ -100,7 +99,7 @@ impl Config {
};
let (path, mut delim) = match *path {
None => (None, default_delim),
Some(ref s) if s.deref() == "-" => (None, default_delim),
Some(ref s) if &**s == "-" => (None, default_delim),
Some(ref s) => {
let path = PathBuf::from(s);
let file_extension = path
Expand Down
9 changes: 6 additions & 3 deletions src/select.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ impl SelectorParser {
self.bump();
self.parse_quoted_name()?
} else {
self.parse_name()?
self.parse_name()
};
Ok(if self.cur() == Some('[') {
let idx = self.parse_index()?;
Expand All @@ -168,7 +168,7 @@ impl SelectorParser {
})
}

fn parse_name(&mut self) -> Result<String, String> {
fn parse_name(&mut self) -> String {
let mut name = String::new();
loop {
if self.is_end_of_field() || self.cur() == Some('[') {
Expand All @@ -177,7 +177,7 @@ impl SelectorParser {
name.push(self.cur().unwrap());
self.bump();
}
Ok(name)
name
}

fn parse_quoted_name(&mut self) -> Result<String, String> {
Expand Down Expand Up @@ -399,6 +399,7 @@ impl Selection {
row: &'b csv::ByteRecord,
) -> iter::Scan<slice::Iter<'a, usize>, &'b csv::ByteRecord, _GetField> {
// This is horrifying.
#[allow(clippy::unnecessary_wraps)]
fn get_field<'c>(row: &mut &'c csv::ByteRecord, idx: &usize) -> Option<&'c [u8]> {
Some(&row[*idx])
}
Expand Down Expand Up @@ -454,6 +455,8 @@ impl NormalSelection {
fn filmap<T>(v: Option<T>) -> Option<T> {
v
}
#[allow(clippy::option_option)]
#[allow(clippy::unnecessary_wraps)]
fn get_field<T>(set: &mut &[bool], t: (usize, T)) -> Option<Option<T>> {
let (i, v) = t;
if i < set.len() && set[i] {
Expand Down
1 change: 1 addition & 0 deletions src/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -290,6 +290,7 @@ pub const fn num_of_chunks(nitems: usize, chunk_size: usize) -> usize {
n
}

#[allow(clippy::cast_sign_loss)]
pub fn last_modified(md: &fs::Metadata) -> u64 {
use filetime::FileTime;
FileTime::from_last_modification_time(md).unix_seconds() as u64
Expand Down

0 comments on commit 4d16724

Please sign in to comment.